From 6c6874f401e5a0caab3b6a0663169e1fb5e930bb Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 25 Jun 2019 09:56:31 -0700 Subject: tools: bpftool: use correct argument in cgroup errors cgroup code tries to use argv[0] as the cgroup path, but if it fails uses argv[1] to report errors. Fixes: 5ccda64d38cc ("bpftool: implement cgroup bpf operations") Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Roman Gushchin Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/cgroup.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 73ec8ea33fb4..a13fb7265d1a 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -168,7 +168,7 @@ static int do_show(int argc, char **argv) cgroup_fd = open(argv[0], O_RDONLY); if (cgroup_fd < 0) { - p_err("can't open cgroup %s", argv[1]); + p_err("can't open cgroup %s", argv[0]); goto exit; } @@ -356,7 +356,7 @@ static int do_attach(int argc, char **argv) cgroup_fd = open(argv[0], O_RDONLY); if (cgroup_fd < 0) { - p_err("can't open cgroup %s", argv[1]); + p_err("can't open cgroup %s", argv[0]); goto exit; } @@ -414,7 +414,7 @@ static int do_detach(int argc, char **argv) cgroup_fd = open(argv[0], O_RDONLY); if (cgroup_fd < 0) { - p_err("can't open cgroup %s", argv[1]); + p_err("can't open cgroup %s", argv[0]); goto exit; } -- cgit v1.2.3 From 0472301a28f6cf53a6bc5783e48a2d0bbff4682f Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Fri, 28 Jun 2019 07:08:45 +0300 Subject: bpf: fix uapi bpf_prog_info fields alignment Merge commit 1c8c5a9d38f60 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next") undid the fix from commit 36f9814a494 ("bpf: fix uapi hole for 32 bit compat applications") by taking the gpl_compatible 1-bit field definition from commit b85fab0e67b162 ("bpf: Add gpl_compatible flag to struct bpf_prog_info") as is. That breaks architectures with 16-bit alignment like m68k. Add 31-bit pad after gpl_compatible to restore alignment of following fields. Thanks to Dmitry V. Levin his analysis of this bug history. Signed-off-by: Baruch Siach Acked-by: Song Liu Cc: Jiri Olsa Cc: Daniel Borkmann Cc: Geert Uytterhoeven Cc: Linus Torvalds Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 1 + tools/include/uapi/linux/bpf.h | 1 + 2 files changed, 2 insertions(+) (limited to 'tools') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a8b823c30b43..29a5bc3d5c66 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3143,6 +3143,7 @@ struct bpf_prog_info { char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; + __u32 :31; /* alignment pad */ __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index a8b823c30b43..29a5bc3d5c66 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3143,6 +3143,7 @@ struct bpf_prog_info { char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; + __u32 :31; /* alignment pad */ __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; -- cgit v1.2.3 From ac8786c72eba67dfc8ae751a75c586289a1b9b1b Mon Sep 17 00:00:00 2001 From: Luke Nelson Date: Fri, 28 Jun 2019 22:57:51 -0700 Subject: selftests: bpf: add tests for shifts by zero There are currently no tests for ALU64 shift operations when the shift amount is 0. This adds 6 new tests to make sure they are equivalent to a no-op. The x32 JIT had such bugs that could have been caught by these tests. Cc: Xi Wang Signed-off-by: Luke Nelson Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/verifier/basic_instr.c | 85 ++++++++++++++++++++++ 1 file changed, 85 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c index ed91a7b9a456..071dbc889e8c 100644 --- a/tools/testing/selftests/bpf/verifier/basic_instr.c +++ b/tools/testing/selftests/bpf/verifier/basic_instr.c @@ -90,6 +90,91 @@ }, .result = ACCEPT, }, +{ + "lsh64 by 0 imm", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "rsh64 by 0 imm", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 0), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "arsh64 by 0 imm", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 0), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "lsh64 by 0 reg", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 1), + BPF_LD_IMM64(BPF_REG_2, 0), + BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "rsh64 by 0 reg", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_LD_IMM64(BPF_REG_3, 0), + BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_3), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "arsh64 by 0 reg", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x100000000LL), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_LD_IMM64(BPF_REG_3, 0), + BPF_ALU64_REG(BPF_ARSH, BPF_REG_1, BPF_REG_3), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, { "invalid 64-bit BPF_END", .insns = { -- cgit v1.2.3 From 11aca65ec4db09527d3e9b6b41a0615b7da4386b Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 2 Jul 2019 19:40:31 +0200 Subject: selftests: bpf: fix inlines in test_lwt_seg6local Selftests are reporting this failure in test_lwt_seg6local.sh: + ip netns exec ns2 ip -6 route add fb00::6 encap bpf in obj test_lwt_seg6local.o sec encap_srh dev veth2 Error fetching program/map! Failed to parse eBPF program: Operation not permitted The problem is __attribute__((always_inline)) alone is not enough to prevent clang from inserting those functions in .text. In that case, .text is not marked as relocateable. See the output of objdump -h test_lwt_seg6local.o: Idx Name Size VMA LMA File off Algn 0 .text 00003530 0000000000000000 0000000000000000 00000040 2**3 CONTENTS, ALLOC, LOAD, READONLY, CODE This causes the iproute bpf loader to fail in bpf_fetch_prog_sec: bpf_has_call_data returns true but bpf_fetch_prog_relo fails as there's no relocateable .text section in the file. To fix this, convert to 'static __always_inline'. v2: Use 'static __always_inline' instead of 'static inline __attribute__((always_inline))' Fixes: c99a84eac026 ("selftests/bpf: test for seg6local End.BPF action") Signed-off-by: Jiri Benc Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/progs/test_lwt_seg6local.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c index 0575751bc1bc..e2f6ed0a583d 100644 --- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c +++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c @@ -61,7 +61,7 @@ struct sr6_tlv_t { unsigned char value[0]; } BPF_PACKET_HEADER; -__attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb) +static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb) { void *cursor, *data_end; struct ip6_srh_t *srh; @@ -95,7 +95,7 @@ __attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb) return srh; } -__attribute__((always_inline)) +static __always_inline int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad, uint32_t old_pad, uint32_t pad_off) { @@ -125,7 +125,7 @@ int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad, return 0; } -__attribute__((always_inline)) +static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t *tlv_off, uint32_t *pad_size, uint32_t *pad_off) @@ -184,7 +184,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh, return 0; } -__attribute__((always_inline)) +static __always_inline int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off, struct sr6_tlv_t *itlv, uint8_t tlv_size) { @@ -228,7 +228,7 @@ int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off, return update_tlv_pad(skb, new_pad, pad_size, pad_off); } -__attribute__((always_inline)) +static __always_inline int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off) { @@ -266,7 +266,7 @@ int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, return update_tlv_pad(skb, new_pad, pad_size, pad_off); } -__attribute__((always_inline)) +static __always_inline int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh) { int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) + -- cgit v1.2.3