From d0cabbb021bee5c4b831a0235af9534ad07f8d3d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 10 May 2018 10:24:40 -0700 Subject: tools: bpf: move the event reading loop to libbpf There are two copies of event reading loop - in bpftool and trace_helpers "library". Consolidate them and move the code to libbpf. Return codes from trace_helpers are kept, but renamed to include LIBBPF prefix. Suggested-by: Alexei Starovoitov Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/map_perf_ring.c | 66 +++++++++------------------------------ 1 file changed, 15 insertions(+), 51 deletions(-) (limited to 'tools/bpf') diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c index 9ae4bb8a2cad..1832100d1b27 100644 --- a/tools/bpf/bpftool/map_perf_ring.c +++ b/tools/bpf/bpftool/map_perf_ring.c @@ -50,14 +50,15 @@ static void int_exit(int signo) stop = true; } -static void -print_bpf_output(struct event_ring_info *ring, struct perf_event_sample *e) +static enum bpf_perf_event_ret print_bpf_output(void *event, void *priv) { + struct event_ring_info *ring = priv; + struct perf_event_sample *e = event; struct { struct perf_event_header header; __u64 id; __u64 lost; - } *lost = (void *)e; + } *lost = event; if (json_output) { jsonw_start_object(json_wtr); @@ -96,60 +97,23 @@ print_bpf_output(struct event_ring_info *ring, struct perf_event_sample *e) e->header.type, e->header.size); } } + + return LIBBPF_PERF_EVENT_CONT; } static void perf_event_read(struct event_ring_info *ring, void **buf, size_t *buf_len) { - volatile struct perf_event_mmap_page *header = ring->mem; - __u64 buffer_size = MMAP_PAGE_CNT * get_page_size(); - __u64 data_tail = header->data_tail; - __u64 data_head = header->data_head; - void *base, *begin, *end; - - asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */ - if (data_head == data_tail) - return; - - base = ((char *)header) + get_page_size(); - - begin = base + data_tail % buffer_size; - end = base + data_head % buffer_size; - - while (begin != end) { - struct perf_event_sample *e; - - e = begin; - if (begin + e->header.size > base + buffer_size) { - long len = base + buffer_size - begin; - - if (*buf_len < e->header.size) { - free(*buf); - *buf = malloc(e->header.size); - if (!*buf) { - fprintf(stderr, - "can't allocate memory"); - stop = true; - return; - } - *buf_len = e->header.size; - } - - memcpy(*buf, begin, len); - memcpy(*buf + len, base, e->header.size - len); - e = (void *)*buf; - begin = base + e->header.size - len; - } else if (begin + e->header.size == base + buffer_size) { - begin = base; - } else { - begin += e->header.size; - } - - print_bpf_output(ring, e); + enum bpf_perf_event_ret ret; + + ret = bpf_perf_event_read_simple(ring->mem, + MMAP_PAGE_CNT * get_page_size(), + get_page_size(), buf, buf_len, + print_bpf_output, ring); + if (ret != LIBBPF_PERF_EVENT_CONT) { + fprintf(stderr, "perf read loop failed with %d\n", ret); + stop = true; } - - __sync_synchronize(); /* smp_mb() */ - header->data_tail = data_head; } static int perf_mmap_size(void) -- cgit v1.2.3