summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-10-26 21:41:49 -0700
committerDavid S. Miller <davem@davemloft.net>2018-10-26 21:41:49 -0700
commit6788fac82001ed1944b5da976bcec4a7b9accf51 (patch)
treeb003137f9b8cfceac884409216f61cbb2a2f4ddd /kernel
parent345671ea0f9258f410eb057b9ced9cefbbe5dc78 (diff)
parentd8fd9e106fbc291167ebb675ad69234597d0fd98 (diff)
downloadlinux-6788fac82001ed1944b5da976bcec4a7b9accf51.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2018-10-27 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix toctou race in BTF header validation, from Martin and Wenwen. 2) Fix devmap interface comparison in notifier call which was neglecting netns, from Taehee. 3) Several fixes in various places, for example, correcting direct packet access and helper function availability, from Daniel. 4) Fix BPF kselftest config fragment to include af_xdp and sockmap, from Naresh. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/btf.c58
-rw-r--r--kernel/bpf/core.c49
-rw-r--r--kernel/bpf/devmap.c3
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/queue_stack_maps.c2
-rw-r--r--kernel/bpf/verifier.c13
6 files changed, 84 insertions, 43 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 378cef70341c..ee4c82667d65 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -2067,56 +2067,47 @@ static int btf_check_sec_info(struct btf_verifier_env *env,
return 0;
}
-static int btf_parse_hdr(struct btf_verifier_env *env, void __user *btf_data,
- u32 btf_data_size)
+static int btf_parse_hdr(struct btf_verifier_env *env)
{
+ u32 hdr_len, hdr_copy, btf_data_size;
const struct btf_header *hdr;
- u32 hdr_len, hdr_copy;
- /*
- * Minimal part of the "struct btf_header" that
- * contains the hdr_len.
- */
- struct btf_min_header {
- u16 magic;
- u8 version;
- u8 flags;
- u32 hdr_len;
- } __user *min_hdr;
struct btf *btf;
int err;
btf = env->btf;
- min_hdr = btf_data;
+ btf_data_size = btf->data_size;
- if (btf_data_size < sizeof(*min_hdr)) {
+ if (btf_data_size <
+ offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
btf_verifier_log(env, "hdr_len not found");
return -EINVAL;
}
- if (get_user(hdr_len, &min_hdr->hdr_len))
- return -EFAULT;
-
+ hdr = btf->data;
+ hdr_len = hdr->hdr_len;
if (btf_data_size < hdr_len) {
btf_verifier_log(env, "btf_header not found");
return -EINVAL;
}
- err = bpf_check_uarg_tail_zero(btf_data, sizeof(btf->hdr), hdr_len);
- if (err) {
- if (err == -E2BIG)
- btf_verifier_log(env, "Unsupported btf_header");
- return err;
+ /* Ensure the unsupported header fields are zero */
+ if (hdr_len > sizeof(btf->hdr)) {
+ u8 *expected_zero = btf->data + sizeof(btf->hdr);
+ u8 *end = btf->data + hdr_len;
+
+ for (; expected_zero < end; expected_zero++) {
+ if (*expected_zero) {
+ btf_verifier_log(env, "Unsupported btf_header");
+ return -E2BIG;
+ }
+ }
}
hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
- if (copy_from_user(&btf->hdr, btf_data, hdr_copy))
- return -EFAULT;
+ memcpy(&btf->hdr, btf->data, hdr_copy);
hdr = &btf->hdr;
- if (hdr->hdr_len != hdr_len)
- return -EINVAL;
-
btf_verifier_log_hdr(env, btf_data_size);
if (hdr->magic != BTF_MAGIC) {
@@ -2186,10 +2177,6 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
}
env->btf = btf;
- err = btf_parse_hdr(env, btf_data, btf_data_size);
- if (err)
- goto errout;
-
data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
if (!data) {
err = -ENOMEM;
@@ -2198,13 +2185,18 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
btf->data = data;
btf->data_size = btf_data_size;
- btf->nohdr_data = btf->data + btf->hdr.hdr_len;
if (copy_from_user(data, btf_data, btf_data_size)) {
err = -EFAULT;
goto errout;
}
+ err = btf_parse_hdr(env);
+ if (err)
+ goto errout;
+
+ btf->nohdr_data = btf->data + btf->hdr.hdr_len;
+
err = btf_parse_str_sec(env);
if (err)
goto errout;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 7c7eeea8cffc..6377225b2082 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -365,10 +365,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
}
#ifdef CONFIG_BPF_JIT
+# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
+
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
int bpf_jit_harden __read_mostly;
int bpf_jit_kallsyms __read_mostly;
+int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -577,27 +580,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
return ret;
}
+static atomic_long_t bpf_jit_current;
+
+#if defined(MODULES_VADDR)
+static int __init bpf_jit_charge_init(void)
+{
+ /* Only used as heuristic here to derive limit. */
+ bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
+ PAGE_SIZE), INT_MAX);
+ return 0;
+}
+pure_initcall(bpf_jit_charge_init);
+#endif
+
+static int bpf_jit_charge_modmem(u32 pages)
+{
+ if (atomic_long_add_return(pages, &bpf_jit_current) >
+ (bpf_jit_limit >> PAGE_SHIFT)) {
+ if (!capable(CAP_SYS_ADMIN)) {
+ atomic_long_sub(pages, &bpf_jit_current);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static void bpf_jit_uncharge_modmem(u32 pages)
+{
+ atomic_long_sub(pages, &bpf_jit_current);
+}
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns)
{
struct bpf_binary_header *hdr;
- unsigned int size, hole, start;
+ u32 size, hole, start, pages;
/* Most of BPF filters are really small, but if some of them
* fill a page, allow at least 128 extra bytes to insert a
* random section of illegal instructions.
*/
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
+ pages = size / PAGE_SIZE;
+
+ if (bpf_jit_charge_modmem(pages))
+ return NULL;
hdr = module_alloc(size);
- if (hdr == NULL)
+ if (!hdr) {
+ bpf_jit_uncharge_modmem(pages);
return NULL;
+ }
/* Fill space with illegal/arch-dep instructions. */
bpf_fill_ill_insns(hdr, size);
- hdr->pages = size / PAGE_SIZE;
+ hdr->pages = pages;
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr));
start = (get_random_int() % hole) & ~(alignment - 1);
@@ -610,7 +650,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
+ u32 pages = hdr->pages;
+
module_memfree(hdr);
+ bpf_jit_uncharge_modmem(pages);
}
/* This symbol is only overridden by archs that have different
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 141710b82a6c..191b79948424 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -512,8 +512,7 @@ static int dev_map_notification(struct notifier_block *notifier,
struct bpf_dtab_netdev *dev, *odev;
dev = READ_ONCE(dtab->netdev_map[i]);
- if (!dev ||
- dev->dev->ifindex != netdev->ifindex)
+ if (!dev || netdev != dev->dev)
continue;
odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
if (dev == odev)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index ab0d5e3f9892..a74972b07e74 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -99,7 +99,6 @@ BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
const struct bpf_func_proto bpf_map_pop_elem_proto = {
.func = bpf_map_pop_elem,
.gpl_only = false,
- .pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
@@ -113,7 +112,6 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
const struct bpf_func_proto bpf_map_peek_elem_proto = {
.func = bpf_map_pop_elem,
.gpl_only = false,
- .pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 12a93fb37449..8bbd72d3a121 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -122,6 +122,7 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
raw_spin_lock_irqsave(&qs->lock, flags);
if (queue_stack_map_is_empty(qs)) {
+ memset(value, 0, qs->map.value_size);
err = -ENOENT;
goto out;
}
@@ -151,6 +152,7 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
raw_spin_lock_irqsave(&qs->lock, flags);
if (queue_stack_map_is_empty(qs)) {
+ memset(value, 0, qs->map.value_size);
err = -ENOENT;
goto out;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 98fa0be35370..171a2c88e77d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1387,21 +1387,24 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
enum bpf_access_type t)
{
switch (env->prog->type) {
+ /* Program types only with direct read access go here! */
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
case BPF_PROG_TYPE_SK_REUSEPORT:
- /* dst_input() and dst_output() can't write for now */
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ case BPF_PROG_TYPE_CGROUP_SKB:
if (t == BPF_WRITE)
return false;
/* fallthrough */
+
+ /* Program types with direct read + write access go here! */
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_SK_MSG:
- case BPF_PROG_TYPE_FLOW_DISSECTOR:
if (meta)
return meta->pkt_access;
@@ -5706,7 +5709,11 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
bool is_narrower_load;
u32 target_size;
- if (ops->gen_prologue) {
+ if (ops->gen_prologue || env->seen_direct_write) {
+ if (!ops->gen_prologue) {
+ verbose(env, "bpf verifier is misconfigured\n");
+ return -EINVAL;
+ }
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog);
if (cnt >= ARRAY_SIZE(insn_buf)) {