Skip to content

Commit 074fd02

Browse files
tohojogregkh
authored andcommitted
xdp: Fix cleanup on map free for devmap_hash map type
[ Upstream commit 071cdec ] Tetsuo pointed out that it was not only the device unregister hook that was broken for devmap_hash types, it was also cleanup on map free. So better fix this as well. While we're at it, there's no reason to allocate the netdev_map array for DEVMAP_HASH, so skip that and adjust the cost accordingly. Fixes: 6f9d451 ("xdp: Add devmap_hash map type for looking up devices by hashed index") Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20191121133612.430414-1-toke@redhat.com Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 62ac16b commit 074fd02

1 file changed

Lines changed: 46 additions & 28 deletions

File tree

kernel/bpf/devmap.c

Lines changed: 46 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ struct bpf_dtab_netdev {
7474

7575
struct bpf_dtab {
7676
struct bpf_map map;
77-
struct bpf_dtab_netdev **netdev_map;
77+
struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
7878
struct list_head __percpu *flush_list;
7979
struct list_head list;
8080

@@ -101,6 +101,12 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries)
101101
return hash;
102102
}
103103

104+
static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
105+
int idx)
106+
{
107+
return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
108+
}
109+
104110
static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
105111
{
106112
int err, cpu;
@@ -120,15 +126,16 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
120126
bpf_map_init_from_attr(&dtab->map, attr);
121127

122128
/* make sure page count doesn't overflow */
123-
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
124-
cost += sizeof(struct list_head) * num_possible_cpus();
129+
cost = (u64) sizeof(struct list_head) * num_possible_cpus();
125130

126131
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
127132
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
128133

129134
if (!dtab->n_buckets) /* Overflow check */
130135
return -EINVAL;
131136
cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
137+
} else {
138+
cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
132139
}
133140

134141
/* if map size is larger than memlock limit, reject it */
@@ -143,24 +150,22 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
143150
for_each_possible_cpu(cpu)
144151
INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
145152

146-
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
147-
sizeof(struct bpf_dtab_netdev *),
148-
dtab->map.numa_node);
149-
if (!dtab->netdev_map)
150-
goto free_percpu;
151-
152153
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
153154
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
154155
if (!dtab->dev_index_head)
155-
goto free_map_area;
156+
goto free_percpu;
156157

157158
spin_lock_init(&dtab->index_lock);
159+
} else {
160+
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
161+
sizeof(struct bpf_dtab_netdev *),
162+
dtab->map.numa_node);
163+
if (!dtab->netdev_map)
164+
goto free_percpu;
158165
}
159166

160167
return 0;
161168

162-
free_map_area:
163-
bpf_map_area_free(dtab->netdev_map);
164169
free_percpu:
165170
free_percpu(dtab->flush_list);
166171
free_charge:
@@ -228,21 +233,40 @@ static void dev_map_free(struct bpf_map *map)
228233
cond_resched();
229234
}
230235

231-
for (i = 0; i < dtab->map.max_entries; i++) {
232-
struct bpf_dtab_netdev *dev;
236+
if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
237+
for (i = 0; i < dtab->n_buckets; i++) {
238+
struct bpf_dtab_netdev *dev;
239+
struct hlist_head *head;
240+
struct hlist_node *next;
233241

234-
dev = dtab->netdev_map[i];
235-
if (!dev)
236-
continue;
242+
head = dev_map_index_hash(dtab, i);
237243

238-
free_percpu(dev->bulkq);
239-
dev_put(dev->dev);
240-
kfree(dev);
244+
hlist_for_each_entry_safe(dev, next, head, index_hlist) {
245+
hlist_del_rcu(&dev->index_hlist);
246+
free_percpu(dev->bulkq);
247+
dev_put(dev->dev);
248+
kfree(dev);
249+
}
250+
}
251+
252+
kfree(dtab->dev_index_head);
253+
} else {
254+
for (i = 0; i < dtab->map.max_entries; i++) {
255+
struct bpf_dtab_netdev *dev;
256+
257+
dev = dtab->netdev_map[i];
258+
if (!dev)
259+
continue;
260+
261+
free_percpu(dev->bulkq);
262+
dev_put(dev->dev);
263+
kfree(dev);
264+
}
265+
266+
bpf_map_area_free(dtab->netdev_map);
241267
}
242268

243269
free_percpu(dtab->flush_list);
244-
bpf_map_area_free(dtab->netdev_map);
245-
kfree(dtab->dev_index_head);
246270
kfree(dtab);
247271
}
248272

@@ -263,12 +287,6 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
263287
return 0;
264288
}
265289

266-
static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
267-
int idx)
268-
{
269-
return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
270-
}
271-
272290
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
273291
{
274292
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);

0 commit comments

Comments
 (0)