summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-08-19 21:56:34 -0700
committerDavid S. Miller <davem@davemloft.net>2017-08-19 21:56:34 -0700
commit228498596c44041c710f5a633904205bc1cd9177 (patch)
tree859903eeb8d19cdfeb44fac4c72e594058534a56
parent06d0a11f6e4a1f576937758f7fbbbe8ad398e0ef (diff)
parent7b0c2a0508b90fce79d3782b2e55d0e8bf6a283e (diff)
downloadlinux-228498596c44041c710f5a633904205bc1cd9177.tar.bz2
Merge branch 'BPF-inline-improvements'
Daniel Borkmann says: ==================== BPF inline improvements First one makes htab inlining more robust wrt future jits and second one inlines map in map lookups through map_gen_lookup() callback. v1 -> v2: - BITS_PER_LONG guard in patch 1 - BPF_EMIT_CALL is on __htab_map_lookup_elem ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--kernel/bpf/arraymap.c26
-rw-r--r--kernel/bpf/hashtab.c17
-rw-r--r--kernel/bpf/verifier.c6
3 files changed, 48 insertions, 1 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 96e9c5c1dfc9..98c0f00c3f5e 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -606,6 +606,31 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
return READ_ONCE(*inner_map);
}
+static u32 array_of_map_gen_lookup(struct bpf_map *map,
+ struct bpf_insn *insn_buf)
+{
+ u32 elem_size = round_up(map->value_size, 8);
+ struct bpf_insn *insn = insn_buf;
+ const int ret = BPF_REG_0;
+ const int map_ptr = BPF_REG_1;
+ const int index = BPF_REG_2;
+
+ *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
+ *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
+ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+ if (is_power_of_2(elem_size))
+ *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
+ else
+ *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
+ *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
+ *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
+ *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ *insn++ = BPF_MOV64_IMM(ret, 0);
+
+ return insn - insn_buf;
+}
+
const struct bpf_map_ops array_of_maps_map_ops = {
.map_alloc = array_of_map_alloc,
.map_free = array_of_map_free,
@@ -615,4 +640,5 @@ const struct bpf_map_ops array_of_maps_map_ops = {
.map_fd_get_ptr = bpf_map_fd_get_ptr,
.map_fd_put_ptr = bpf_map_fd_put_ptr,
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
+ .map_gen_lookup = array_of_map_gen_lookup,
};
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 47ae748c3a49..ae822de4a90a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1322,6 +1322,22 @@ static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
return READ_ONCE(*inner_map);
}
+static u32 htab_of_map_gen_lookup(struct bpf_map *map,
+ struct bpf_insn *insn_buf)
+{
+ struct bpf_insn *insn = insn_buf;
+ const int ret = BPF_REG_0;
+
+ *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
+ *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
+ offsetof(struct htab_elem, key) +
+ round_up(map->key_size, 8));
+ *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
+
+ return insn - insn_buf;
+}
+
static void htab_of_map_free(struct bpf_map *map)
{
bpf_map_meta_free(map->inner_map_meta);
@@ -1337,4 +1353,5 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
.map_fd_get_ptr = bpf_map_fd_get_ptr,
.map_fd_put_ptr = bpf_map_fd_put_ptr,
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
+ .map_gen_lookup = htab_of_map_gen_lookup,
};
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4f6e7eb42ba0..e42c096ba20d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4160,7 +4160,11 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
continue;
}
- if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) {
+ /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
+ * handlers are currently limited to 64 bit only.
+ */
+ if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
+ insn->imm == BPF_FUNC_map_lookup_elem) {
map_ptr = env->insn_aux_data[i + delta].map_ptr;
if (map_ptr == BPF_MAP_PTR_POISON ||
!map_ptr->ops->map_gen_lookup)