summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/hashtab.c
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2019-05-29 18:03:59 -0700
committerAlexei Starovoitov <ast@kernel.org>2019-05-31 16:52:56 -0700
commitc85d69135a9175c50a823d04d62d932312d037b3 (patch)
treeedd6ec707ebbf68a89fc1c3fc2b2d06364978ad3 /kernel/bpf/hashtab.c
parentb936ca643ade11f265fa10e5fb71c20d9c5243f1 (diff)
downloadlinux-c85d69135a9175c50a823d04d62d932312d037b3.tar.bz2
bpf: move memory size checks to bpf_map_charge_init()
Most bpf map types doing similar checks and bytes to pages conversion during memory allocation and charging. Let's unify these checks by moving them into bpf_map_charge_init(). Signed-off-by: Roman Gushchin <guro@fb.com> Acked-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r--kernel/bpf/hashtab.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index b0bdc7b040ad..d92e05d9979b 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -360,13 +360,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else
cost += (u64) htab->elem_size * num_possible_cpus();
- if (cost >= U32_MAX - PAGE_SIZE)
- /* make sure page count doesn't overflow */
- goto free_htab;
-
/* if map size is larger than memlock limit, reject it */
- err = bpf_map_charge_init(&htab->map.memory,
- round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
+ err = bpf_map_charge_init(&htab->map.memory, cost);
if (err)
goto free_htab;