summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-05-16 22:47:11 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-16 22:47:11 -0400
commitb9f672af148bf7a08a6031743156faffd58dbc7e (patch)
tree4e3a384636147f0fd31ec01cc267a51bdab7cbb5 /arch/arm64
parent8e725f7caafb8e820e05707fe9853023375438cf (diff)
parente23afe5e7cba89cd0744c5218eda1b3553455c17 (diff)
downloadlinux-b9f672af148bf7a08a6031743156faffd58dbc7e.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2018-05-17 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Provide a new BPF helper for doing a FIB and neighbor lookup in the kernel tables from an XDP or tc BPF program. The helper provides a fast-path for forwarding packets. The API supports IPv4, IPv6 and MPLS protocols, but currently IPv4 and IPv6 are implemented in this initial work, from David (Ahern). 2) Just a tiny diff but huge feature enabled for nfp driver by extending the BPF offload beyond a pure host processing offload. Offloaded XDP programs are allowed to set the RX queue index and thus opening the door for defining a fully programmable RSS/n-tuple filter replacement. Once BPF decided on a queue already, the device data-path will skip the conventional RSS processing completely, from Jakub. 3) The original sockmap implementation was array based similar to devmap. However unlike devmap where an ifindex has a 1:1 mapping into the map there are use cases with sockets that need to be referenced using longer keys. Hence, sockhash map is added reusing as much of the sockmap code as possible, from John. 4) Introduce BTF ID. The ID is allocatd through an IDR similar as with BPF maps and progs. It also makes BTF accessible to user space via BPF_BTF_GET_FD_BY_ID and adds exposure of the BTF data through BPF_OBJ_GET_INFO_BY_FD, from Martin. 5) Enable BPF stackmap with build_id also in NMI context. Due to the up_read() of current->mm->mmap_sem build_id cannot be parsed. This work defers the up_read() via a per-cpu irq_work so that at least limited support can be enabled, from Song. 6) Various BPF JIT follow-up cleanups and fixups after the LD_ABS/LD_IND JIT conversion as well as implementation of an optimized 32/64 bit immediate load in the arm64 JIT that allows to reduce the number of emitted instructions; in case of tested real-world programs they were shrinking by three percent, from Daniel. 7) Add ifindex parameter to the libbpf loader in order to enable BPF offload support. Right now only iproute2 can load offloaded BPF and this will also enable libbpf for direct integration into other applications, from David (Beckett). 8) Convert the plain text documentation under Documentation/bpf/ into RST format since this is the appropriate standard the kernel is moving to for all documentation. Also add an overview README.rst, from Jesper. 9) Add __printf verification attribute to the bpf_verifier_vlog() helper. Though it uses va_list we can still allow gcc to check the format string, from Mathieu. 10) Fix a bash reference in the BPF selftest's Makefile. The '|& ...' is a bash 4.0+ feature which is not guaranteed to be available when calling out to shell, therefore use a more portable variant, from Joe. 11) Fix a 64 bit division in xdp_umem_reg() by using div_u64() instead of relying on the gcc built-in, from Björn. 12) Fix a sock hashmap kmalloc warning reported by syzbot when an overly large key size is used in hashmap then causing overflows in htab->elem_size. Reject bogus attr->key_size early in the sock_hash_alloc(), from Yonghong. 13) Ensure in BPF selftests when urandom_read is being linked that --build-id is always enabled so that test_stacktrace_build_id[_nmi] won't be failing, from Alexei. 14) Add bitsperlong.h as well as errno.h uapi headers into the tools header infrastructure which point to one of the arch specific uapi headers. This was needed in order to fix a build error on some systems for the BPF selftests, from Sirio. 15) Allow for short options to be used in the xdp_monitor BPF sample code. And also a bpf.h tools uapi header sync in order to fix a selftest build failure. Both from Prashant. 16) More formally clarify the meaning of ID in the direct packet access section of the BPF documentation, from Wang. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/net/bpf_jit_comp.c115
1 files changed, 69 insertions, 46 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 0b40c8fb0706..a6fdaea07c63 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -21,7 +21,6 @@
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/printk.h>
-#include <linux/skbuff.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
@@ -80,23 +79,66 @@ static inline void emit(const u32 insn, struct jit_ctx *ctx)
ctx->idx++;
}
+static inline void emit_a64_mov_i(const int is64, const int reg,
+ const s32 val, struct jit_ctx *ctx)
+{
+ u16 hi = val >> 16;
+ u16 lo = val & 0xffff;
+
+ if (hi & 0x8000) {
+ if (hi == 0xffff) {
+ emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
+ } else {
+ emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
+ if (lo != 0xffff)
+ emit(A64_MOVK(is64, reg, lo, 0), ctx);
+ }
+ } else {
+ emit(A64_MOVZ(is64, reg, lo, 0), ctx);
+ if (hi)
+ emit(A64_MOVK(is64, reg, hi, 16), ctx);
+ }
+}
+
+static int i64_i16_blocks(const u64 val, bool inverse)
+{
+ return (((val >> 0) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
+ (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
+ (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
+ (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000));
+}
+
static inline void emit_a64_mov_i64(const int reg, const u64 val,
struct jit_ctx *ctx)
{
- u64 tmp = val;
- int shift = 0;
-
- emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
- tmp >>= 16;
- shift += 16;
- while (tmp) {
- if (tmp & 0xffff)
- emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
- tmp >>= 16;
- shift += 16;
+ u64 nrm_tmp = val, rev_tmp = ~val;
+ bool inverse;
+ int shift;
+
+ if (!(nrm_tmp >> 32))
+ return emit_a64_mov_i(0, reg, (u32)val, ctx);
+
+ inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false);
+ shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) :
+ (fls64(nrm_tmp) - 1)), 16), 0);
+ if (inverse)
+ emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx);
+ else
+ emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
+ shift -= 16;
+ while (shift >= 0) {
+ if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000))
+ emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
+ shift -= 16;
}
}
+/*
+ * This is an unoptimized 64 immediate emission used for BPF to BPF call
+ * addresses. It will always do a full 64 bit decomposition as otherwise
+ * more complexity in the last extra pass is required since we previously
+ * reserved 4 instructions for the address.
+ */
static inline void emit_addr_mov_i64(const int reg, const u64 val,
struct jit_ctx *ctx)
{
@@ -111,26 +153,6 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
}
}
-static inline void emit_a64_mov_i(const int is64, const int reg,
- const s32 val, struct jit_ctx *ctx)
-{
- u16 hi = val >> 16;
- u16 lo = val & 0xffff;
-
- if (hi & 0x8000) {
- if (hi == 0xffff) {
- emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
- } else {
- emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
- emit(A64_MOVK(is64, reg, lo, 0), ctx);
- }
- } else {
- emit(A64_MOVZ(is64, reg, lo, 0), ctx);
- if (hi)
- emit(A64_MOVK(is64, reg, hi, 16), ctx);
- }
-}
-
static inline int bpf2a64_offset(int bpf_to, int bpf_from,
const struct jit_ctx *ctx)
{
@@ -163,7 +185,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
/* Tail call offset to jump into */
#define PROLOGUE_OFFSET 7
-static int build_prologue(struct jit_ctx *ctx)
+static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
{
const struct bpf_prog *prog = ctx->prog;
const u8 r6 = bpf2a64[BPF_REG_6];
@@ -188,7 +210,7 @@ static int build_prologue(struct jit_ctx *ctx)
* | ... | BPF prog stack
* | |
* +-----+ <= (BPF_FP - prog->aux->stack_depth)
- * |RSVD | JIT scratchpad
+ * |RSVD | padding
* current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size)
* | |
* | ... | Function call stack
@@ -210,19 +232,19 @@ static int build_prologue(struct jit_ctx *ctx)
/* Set up BPF prog stack base register */
emit(A64_MOV(1, fp, A64_SP), ctx);
- /* Initialize tail_call_cnt */
- emit(A64_MOVZ(1, tcc, 0, 0), ctx);
+ if (!ebpf_from_cbpf) {
+ /* Initialize tail_call_cnt */
+ emit(A64_MOVZ(1, tcc, 0, 0), ctx);
- cur_offset = ctx->idx - idx0;
- if (cur_offset != PROLOGUE_OFFSET) {
- pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
- cur_offset, PROLOGUE_OFFSET);
- return -1;
+ cur_offset = ctx->idx - idx0;
+ if (cur_offset != PROLOGUE_OFFSET) {
+ pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
+ cur_offset, PROLOGUE_OFFSET);
+ return -1;
+ }
}
- /* 4 byte extra for skb_copy_bits buffer */
- ctx->stack_size = prog->aux->stack_depth + 4;
- ctx->stack_size = STACK_ALIGN(ctx->stack_size);
+ ctx->stack_size = STACK_ALIGN(prog->aux->stack_depth);
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
@@ -786,6 +808,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header;
struct arm64_jit_data *jit_data;
+ bool was_classic = bpf_prog_was_classic(prog);
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
@@ -840,7 +863,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
- if (build_prologue(&ctx)) {
+ if (build_prologue(&ctx, was_classic)) {
prog = orig_prog;
goto out_off;
}
@@ -863,7 +886,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
skip_init_ctx:
ctx.idx = 0;
- build_prologue(&ctx);
+ build_prologue(&ctx, was_classic);
if (build_body(&ctx)) {
bpf_jit_binary_free(header);