diff options
author | David S. Miller <davem@davemloft.net> | 2018-02-21 15:37:37 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-02-21 15:37:37 -0500 |
commit | bf006d18b74172c3562486b5e354b42cb5bcb261 (patch) | |
tree | a7437cb2d9b04240d29325c384ad198c74406563 | |
parent | 6c4df17c7a529d460448cc8284b95a4ada37e3a3 (diff) | |
parent | b1a2ce825737b0165cc08e6f98f8c0ea1affdd60 (diff) | |
download | linux-bf006d18b74172c3562486b5e354b42cb5bcb261.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says:
====================
pull-request: bpf 2018-02-20
The following pull-request contains BPF updates for your *net* tree.
The main changes are:
1) Fix a memory leak in LPM trie's map_free() callback function, where
the trie structure itself was not freed since initial implementation.
Also a synchronize_rcu() was needed in order to wait for outstanding
programs accessing the trie to complete, from Yonghong.
2) Fix sock_map_alloc()'s error path in order to correctly propagate
the -EINVAL error in case of too large allocation requests. This
was just recently introduced when fixing close hooks via ULP layer,
fix from Eric.
3) Do not use GFP_ATOMIC in __cpu_map_entry_alloc(). Reason is that this
will not work with the recent __ptr_ring_init_queue_alloc() conversion
to kvmalloc_array(), where in case of fallback to vmalloc() that GFP
flag is invalid, from Jason.
4) Fix two recent syzkaller warnings: i) fix bpf_prog_array_copy_to_user()
when a prog query with a big number of ids was performed where we'd
otherwise trigger a warning from allocator side, ii) fix a missing
mlock precharge on arraymaps, from Daniel.
5) Two fixes for bpftool in order to avoid breaking JSON output when used
in batch mode, from Quentin.
6) Move a pr_debug() in libbpf in order to avoid having an otherwise
uninitialized variable in bpf_program__reloc_text(), from Jeremy.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | kernel/bpf/arraymap.c | 28 | ||||
-rw-r--r-- | kernel/bpf/core.c | 2 | ||||
-rw-r--r-- | kernel/bpf/cpumap.c | 2 | ||||
-rw-r--r-- | kernel/bpf/lpm_trie.c | 11 | ||||
-rw-r--r-- | kernel/bpf/sockmap.c | 3 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 2 | ||||
-rw-r--r-- | tools/bpf/bpftool/main.c | 2 | ||||
-rw-r--r-- | tools/bpf/bpftool/prog.c | 3 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.c | 5 |
9 files changed, 36 insertions, 22 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index b1f66480135b..a364c408f25a 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -73,11 +73,11 @@ static int array_map_alloc_check(union bpf_attr *attr) static struct bpf_map *array_map_alloc(union bpf_attr *attr) { bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; - int numa_node = bpf_map_attr_numa_node(attr); + int ret, numa_node = bpf_map_attr_numa_node(attr); u32 elem_size, index_mask, max_entries; bool unpriv = !capable(CAP_SYS_ADMIN); + u64 cost, array_size, mask64; struct bpf_array *array; - u64 array_size, mask64; elem_size = round_up(attr->value_size, 8); @@ -109,8 +109,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) array_size += (u64) max_entries * elem_size; /* make sure there is no u32 overflow later in round_up() */ - if (array_size >= U32_MAX - PAGE_SIZE) + cost = array_size; + if (cost >= U32_MAX - PAGE_SIZE) return ERR_PTR(-ENOMEM); + if (percpu) { + cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); + if (cost >= U32_MAX - PAGE_SIZE) + return ERR_PTR(-ENOMEM); + } + cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + + ret = bpf_map_precharge_memlock(cost); + if (ret < 0) + return ERR_PTR(ret); /* allocate all map elements and zero-initialize them */ array = bpf_map_area_alloc(array_size, numa_node); @@ -121,20 +132,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) /* copy mandatory map attributes */ bpf_map_init_from_attr(&array->map, attr); + array->map.pages = cost; array->elem_size = elem_size; - if (!percpu) - goto out; - - array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); - - if (array_size >= U32_MAX - PAGE_SIZE || - bpf_array_alloc_percpu(array)) { + if (percpu && bpf_array_alloc_percpu(array)) { bpf_map_area_free(array); return ERR_PTR(-ENOMEM); } -out: - array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; return &array->map; } diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 29ca9208dcfa..d315b393abdd 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, * so always copy 'cnt' prog_ids to the user. * In a rare race the user will see zero prog_ids */ - ids = kcalloc(cnt, sizeof(u32), GFP_USER); + ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); if (!ids) return -ENOMEM; rcu_read_lock(); diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index fbfdada6caee..a4bb0b34375a 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data) static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id) { - gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN; + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; struct bpf_cpu_map_entry *rcpu; int numa, err; diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 7b469d10d0e9..a75e02c961b5 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map) struct lpm_trie_node __rcu **slot; struct lpm_trie_node *node; - raw_spin_lock(&trie->lock); + /* Wait for outstanding programs to complete + * update/lookup/delete/get_next_key and free the trie. + */ + synchronize_rcu(); /* Always start at the root and walk down to a node that has no * children. Then free that node, nullify its reference in the parent @@ -569,7 +572,7 @@ static void trie_free(struct bpf_map *map) node = rcu_dereference_protected(*slot, lockdep_is_held(&trie->lock)); if (!node) - goto unlock; + goto out; if (rcu_access_pointer(node->child[0])) { slot = &node->child[0]; @@ -587,8 +590,8 @@ static void trie_free(struct bpf_map *map) } } -unlock: - raw_spin_unlock(&trie->lock); +out: + kfree(trie); } static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 48c33417d13c..a927e89dad6e 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock, static struct bpf_map *sock_map_alloc(union bpf_attr *attr) { struct bpf_stab *stab; - int err = -EINVAL; u64 cost; + int err; if (!capable(CAP_NET_ADMIN)) return ERR_PTR(-EPERM); @@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) /* make sure page count doesn't overflow */ cost = (u64) stab->map.max_entries * sizeof(struct sock *); + err = -EINVAL; if (cost >= U32_MAX - PAGE_SIZE) goto free_stab; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index fc2838ac8b78..c0a9e310d715 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info) return -EINVAL; if (copy_from_user(&query, uquery, sizeof(query))) return -EFAULT; + if (query.ids_len > BPF_TRACE_MAX_PROGS) + return -E2BIG; mutex_lock(&bpf_event_mutex); ret = bpf_prog_array_copy_info(event->tp_event->prog_array, diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 3a0396d87c42..185acfa229b5 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -244,7 +244,7 @@ static int do_batch(int argc, char **argv) } if (errno && errno != ENOENT) { - perror("reading batch file failed"); + p_err("reading batch file failed: %s", strerror(errno)); err = -1; } else { p_info("processed %d lines", lines); diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index e8e2baaf93c2..e549e329be82 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -774,6 +774,9 @@ static int do_dump(int argc, char **argv) n < 0 ? strerror(errno) : "short write"); goto err_free; } + + if (json_output) + jsonw_null(json_wtr); } else { if (member_len == &info.jited_prog_len) { const char *name = NULL; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 97073d649c1a..5bbbf285af74 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1060,11 +1060,12 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, prog->insns = new_insn; prog->main_prog_cnt = prog->insns_cnt; prog->insns_cnt = new_cnt; + pr_debug("added %zd insn from %s to prog %s\n", + text->insns_cnt, text->section_name, + prog->section_name); } insn = &prog->insns[relo->insn_idx]; insn->imm += prog->main_prog_cnt - relo->insn_idx; - pr_debug("added %zd insn from %s to prog %s\n", - text->insns_cnt, text->section_name, prog->section_name); return 0; } |