diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-08-10 12:02:26 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-08-10 12:02:26 +0200 |
commit | 1ccb2f4e8e435a53bb378e8b092087f091754aa6 (patch) | |
tree | ba120509947138915e06ad39cca5f065bd99f42f /tools | |
parent | c3a3800fe46f00ceeeb181cc07cc4fdaed4574f1 (diff) | |
parent | 9b231d9f47c6114d317ce28cff92a74ad80547f5 (diff) | |
download | linux-1ccb2f4e8e435a53bb378e8b092087f091754aa6.tar.bz2 |
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/build/feature/test-bpf.c | 2 | ||||
-rwxr-xr-x | tools/kvm/kvm_stat/kvm_stat | 22 | ||||
-rw-r--r-- | tools/lib/bpf/bpf.c | 3 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_pkt_md_access.c | 11 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_progs.c | 8 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_verifier.c | 47 |
6 files changed, 76 insertions, 17 deletions
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c index 7598361ef1f1..da2172ff9662 100644 --- a/tools/build/feature/test-bpf.c +++ b/tools/build/feature/test-bpf.c @@ -11,6 +11,8 @@ # define __NR_bpf 280 # elif defined(__sparc__) # define __NR_bpf 349 +# elif defined(__s390__) +# define __NR_bpf 351 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index dd8f00cfb8b4..32283d88701a 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -474,7 +474,7 @@ class Provider(object): @staticmethod def is_field_wanted(fields_filter, field): """Indicate whether field is valid according to fields_filter.""" - if not fields_filter: + if not fields_filter or fields_filter == "help": return True return re.match(fields_filter, field) is not None @@ -1413,8 +1413,8 @@ performance. Requirements: - Access to: - /sys/kernel/debug/kvm - /sys/kernel/debug/trace/events/* + %s + %s/events/* /proc/pid/task - /proc/sys/kernel/perf_event_paranoid < 1 if user has no CAP_SYS_ADMIN and perf events are used. @@ -1434,7 +1434,7 @@ Interactive Commands: s set update interval x toggle reporting of stats for individual child trace events Press any other key to refresh statistics immediately. -""" +""" % (PATH_DEBUGFS_KVM, PATH_DEBUGFS_TRACING) class PlainHelpFormatter(optparse.IndentedHelpFormatter): def format_description(self, description): @@ -1496,7 +1496,8 @@ Press any other key to refresh statistics immediately. action='store', default=DEFAULT_REGEX, dest='fields', - help='fields to display (regex)', + help='''fields to display (regex) + "-f help" for a list of available events''', ) optparser.add_option('-p', '--pid', action='store', @@ -1559,6 +1560,17 @@ def main(): stats = Stats(options) + if options.fields == "help": + event_list = "\n" + s = stats.get() + for key in s.keys(): + if key.find('(') != -1: + key = key[0:key.find('(')] + if event_list.find('\n' + key + '\n') == -1: + event_list += key + '\n' + sys.stdout.write(event_list) + return "" + if options.log: log(stats) elif not options.once: diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 412a7c82995a..e5bbb090bf88 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -39,6 +39,8 @@ # define __NR_bpf 280 # elif defined(__sparc__) # define __NR_bpf 349 +# elif defined(__s390__) +# define __NR_bpf 351 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif @@ -314,7 +316,6 @@ int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len) int err; bzero(&attr, sizeof(attr)); - bzero(info, *info_len); attr.info.bpf_fd = prog_fd; attr.info.info_len = *info_len; attr.info.info = ptr_to_u64(info); diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/test_pkt_md_access.c index 71729d47eb85..7956302ecdf2 100644 --- a/tools/testing/selftests/bpf/test_pkt_md_access.c +++ b/tools/testing/selftests/bpf/test_pkt_md_access.c @@ -12,12 +12,23 @@ int _version SEC("version") = 1; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define TEST_FIELD(TYPE, FIELD, MASK) \ { \ TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ return TC_ACT_SHOT; \ } +#else +#define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b)) +#define TEST_FIELD(TYPE, FIELD, MASK) \ + { \ + TYPE tmp = *((volatile TYPE *)&skb->FIELD + \ + TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \ + if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ + return TC_ACT_SHOT; \ + } +#endif SEC("test1") int process(struct __sk_buff *skb) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 5855cd3d3d45..1f7dd35551b9 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -340,6 +340,7 @@ static void test_bpf_obj_id(void) /* Check getting prog info */ info_len = sizeof(struct bpf_prog_info) * 2; + bzero(&prog_infos[i], info_len); prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); prog_infos[i].jited_prog_len = sizeof(jited_insns); prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); @@ -369,6 +370,7 @@ static void test_bpf_obj_id(void) /* Check getting map info */ info_len = sizeof(struct bpf_map_info) * 2; + bzero(&map_infos[i], info_len); err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i], &info_len); if (CHECK(err || @@ -394,7 +396,7 @@ static void test_bpf_obj_id(void) nr_id_found = 0; next_id = 0; while (!bpf_prog_get_next_id(next_id, &next_id)) { - struct bpf_prog_info prog_info; + struct bpf_prog_info prog_info = {}; int prog_fd; info_len = sizeof(prog_info); @@ -418,6 +420,8 @@ static void test_bpf_obj_id(void) nr_id_found++; err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); + prog_infos[i].jited_prog_insns = 0; + prog_infos[i].xlated_prog_insns = 0; CHECK(err || info_len != sizeof(struct bpf_prog_info) || memcmp(&prog_info, &prog_infos[i], info_len), "get-prog-info(next_id->fd)", @@ -436,7 +440,7 @@ static void test_bpf_obj_id(void) nr_id_found = 0; next_id = 0; while (!bpf_map_get_next_id(next_id, &next_id)) { - struct bpf_map_info map_info; + struct bpf_map_info map_info = {}; int map_fd; info_len = sizeof(map_info); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index af7d173910f4..d3ed7324105e 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -8,6 +8,7 @@ * License as published by the Free Software Foundation. */ +#include <endian.h> #include <asm/types.h> #include <linux/types.h> #include <stdint.h> @@ -1098,7 +1099,7 @@ static struct bpf_test tests[] = { "check skb->hash byte load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash)), #else @@ -1135,7 +1136,7 @@ static struct bpf_test tests[] = { "check skb->hash byte load not permitted 3", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 3), #else @@ -1244,7 +1245,7 @@ static struct bpf_test tests[] = { "check skb->hash half load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash)), #else @@ -1259,7 +1260,7 @@ static struct bpf_test tests[] = { "check skb->hash half load not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, hash) + 2), #else @@ -5422,7 +5423,7 @@ static struct bpf_test tests[] = { "check bpf_perf_event_data->sample_period byte load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_perf_event_data, sample_period)), #else @@ -5438,7 +5439,7 @@ static struct bpf_test tests[] = { "check bpf_perf_event_data->sample_period half load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_perf_event_data, sample_period)), #else @@ -5454,7 +5455,7 @@ static struct bpf_test tests[] = { "check bpf_perf_event_data->sample_period word load permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_perf_event_data, sample_period)), #else @@ -5481,7 +5482,7 @@ static struct bpf_test tests[] = { "check skb->data half load not permitted", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, data)), #else @@ -5497,7 +5498,7 @@ static struct bpf_test tests[] = { "check skb->tc_classid half load not permitted for lwt prog", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER == __LITTLE_ENDIAN BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, tc_classid)), #else @@ -5980,6 +5981,34 @@ static struct bpf_test tests[] = { .result = REJECT, .result_unpriv = REJECT, }, + { + "subtraction bounds (map value)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7), + BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", + .result = REJECT, + .result_unpriv = REJECT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) |