Skip to content

Commit 1cb5a03

Browse files
frowandgregkh
authored andcommitted
of: of_node_get()/of_node_put() nodes held in phandle cache
commit b8a9ac1 upstream. The phandle cache contains struct device_node pointers. The refcount of the pointers was not incremented while in the cache, allowing use after free error after kfree() of the node. Add the proper increment and decrement of the use count. Fixes: 0b3ce78 ("of: cache phandle nodes to reduce cost of of_find_node_by_phandle()") Cc: stable@vger.kernel.org # v4.17+ Signed-off-by: Frank Rowand <frank.rowand@sony.com> Signed-off-by: Rob Herring <robh@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent daa67bd commit 1cb5a03

1 file changed

Lines changed: 46 additions & 24 deletions

File tree

drivers/of/base.c

Lines changed: 46 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -115,9 +115,6 @@ int __weak of_node_to_nid(struct device_node *np)
115115
}
116116
#endif
117117

118-
static struct device_node **phandle_cache;
119-
static u32 phandle_cache_mask;
120-
121118
/*
122119
* Assumptions behind phandle_cache implementation:
123120
* - phandle property values are in a contiguous range of 1..n
@@ -126,6 +123,44 @@ static u32 phandle_cache_mask;
126123
* - the phandle lookup overhead reduction provided by the cache
127124
* will likely be less
128125
*/
126+
127+
static struct device_node **phandle_cache;
128+
static u32 phandle_cache_mask;
129+
130+
/*
131+
* Caller must hold devtree_lock.
132+
*/
133+
static void __of_free_phandle_cache(void)
134+
{
135+
u32 cache_entries = phandle_cache_mask + 1;
136+
u32 k;
137+
138+
if (!phandle_cache)
139+
return;
140+
141+
for (k = 0; k < cache_entries; k++)
142+
of_node_put(phandle_cache[k]);
143+
144+
kfree(phandle_cache);
145+
phandle_cache = NULL;
146+
}
147+
148+
int of_free_phandle_cache(void)
149+
{
150+
unsigned long flags;
151+
152+
raw_spin_lock_irqsave(&devtree_lock, flags);
153+
154+
__of_free_phandle_cache();
155+
156+
raw_spin_unlock_irqrestore(&devtree_lock, flags);
157+
158+
return 0;
159+
}
160+
#if !defined(CONFIG_MODULES)
161+
late_initcall_sync(of_free_phandle_cache);
162+
#endif
163+
129164
void of_populate_phandle_cache(void)
130165
{
131166
unsigned long flags;
@@ -135,8 +170,7 @@ void of_populate_phandle_cache(void)
135170

136171
raw_spin_lock_irqsave(&devtree_lock, flags);
137172

138-
kfree(phandle_cache);
139-
phandle_cache = NULL;
173+
__of_free_phandle_cache();
140174

141175
for_each_of_allnodes(np)
142176
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
@@ -154,30 +188,15 @@ void of_populate_phandle_cache(void)
154188
goto out;
155189

156190
for_each_of_allnodes(np)
157-
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
191+
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
192+
of_node_get(np);
158193
phandle_cache[np->phandle & phandle_cache_mask] = np;
194+
}
159195

160196
out:
161197
raw_spin_unlock_irqrestore(&devtree_lock, flags);
162198
}
163199

164-
int of_free_phandle_cache(void)
165-
{
166-
unsigned long flags;
167-
168-
raw_spin_lock_irqsave(&devtree_lock, flags);
169-
170-
kfree(phandle_cache);
171-
phandle_cache = NULL;
172-
173-
raw_spin_unlock_irqrestore(&devtree_lock, flags);
174-
175-
return 0;
176-
}
177-
#if !defined(CONFIG_MODULES)
178-
late_initcall_sync(of_free_phandle_cache);
179-
#endif
180-
181200
void __init of_core_init(void)
182201
{
183202
struct device_node *np;
@@ -1155,8 +1174,11 @@ struct device_node *of_find_node_by_phandle(phandle handle)
11551174
if (!np) {
11561175
for_each_of_allnodes(np)
11571176
if (np->phandle == handle) {
1158-
if (phandle_cache)
1177+
if (phandle_cache) {
1178+
/* will put when removed from cache */
1179+
of_node_get(np);
11591180
phandle_cache[masked_handle] = np;
1181+
}
11601182
break;
11611183
}
11621184
}

0 commit comments

Comments
 (0)