Skip to content

Commit 422baf6

Browse files
borkmanngregkh
authored andcommitted
bpf: fix mlock precharge on arraymaps
[ upstream commit 9c2d63b ] syzkaller recently triggered OOM during percpu map allocation; while there is work in progress by Dennis Zhou to add __GFP_NORETRY semantics for percpu allocator under pressure, there seems also a missing bpf_map_precharge_memlock() check in array map allocation. Given today the actual bpf_map_charge_memlock() happens after the find_and_alloc_map() in syscall path, the bpf_map_precharge_memlock() is there to bail out early before we go and do the map setup work when we find that we hit the limits anyway. Therefore add this for array map as well. Fixes: 6c90598 ("bpf: pre-allocate hash map elements") Fixes: a10423b ("bpf: introduce BPF_MAP_TYPE_PERCPU_ARRAY map") Reported-by: syzbot+adb03f3f0bb57ce3acda@syzkaller.appspotmail.com Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Dennis Zhou <dennisszhou@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 816cfeb commit 422baf6

1 file changed

Lines changed: 18 additions & 11 deletions

File tree

kernel/bpf/arraymap.c

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,9 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
4848
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
4949
u32 elem_size, index_mask, max_entries;
5050
bool unpriv = !capable(CAP_SYS_ADMIN);
51+
u64 cost, array_size, mask64;
5152
struct bpf_array *array;
52-
u64 array_size, mask64;
53+
int ret;
5354

5455
/* check sanity of attributes */
5556
if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -92,8 +93,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
9293
array_size += (u64) max_entries * elem_size;
9394

9495
/* make sure there is no u32 overflow later in round_up() */
95-
if (array_size >= U32_MAX - PAGE_SIZE)
96+
cost = array_size;
97+
if (cost >= U32_MAX - PAGE_SIZE)
9698
return ERR_PTR(-ENOMEM);
99+
if (percpu) {
100+
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
101+
if (cost >= U32_MAX - PAGE_SIZE)
102+
return ERR_PTR(-ENOMEM);
103+
}
104+
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
105+
106+
ret = bpf_map_precharge_memlock(cost);
107+
if (ret < 0)
108+
return ERR_PTR(ret);
97109

98110
/* allocate all map elements and zero-initialize them */
99111
array = bpf_map_area_alloc(array_size);
@@ -108,20 +120,15 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
108120
array->map.value_size = attr->value_size;
109121
array->map.max_entries = attr->max_entries;
110122
array->map.map_flags = attr->map_flags;
123+
array->map.pages = cost;
111124
array->elem_size = elem_size;
112125

113-
if (!percpu)
114-
goto out;
115-
116-
array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
117-
118-
if (array_size >= U32_MAX - PAGE_SIZE ||
119-
elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
126+
if (percpu &&
127+
(elem_size > PCPU_MIN_UNIT_SIZE ||
128+
bpf_array_alloc_percpu(array))) {
120129
bpf_map_area_free(array);
121130
return ERR_PTR(-ENOMEM);
122131
}
123-
out:
124-
array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
125132

126133
return &array->map;
127134
}

0 commit comments

Comments
 (0)