diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2019-06-18 15:05:58 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-06-19 11:23:13 -0400 |
commit | f033b688c1ede5ec78c9a718fa9f0b374049bc31 (patch) | |
tree | 541546c9e967a1d4c6be385231980637149599be /net/core | |
parent | d956a048cd3fc1ba154101a1a50fb37950081ff6 (diff) | |
download | linux-f033b688c1ede5ec78c9a718fa9f0b374049bc31.tar.bz2 |
xdp: add tracepoints for XDP mem
These tracepoints make it easier to troubleshoot XDP mem id disconnect.
The xdp:mem_disconnect tracepoint cannot be replaced via kprobe. It is
placed at the last stable place for the pointer to struct xdp_mem_allocator,
just before it's scheduled for RCU removal. It also extract info on
'safe_to_remove' and 'force'.
Detailed info about in-flight pages is not available at this layer. The next
patch will added tracepoints needed at the page_pool layer for this.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/xdp.c | 21 |
1 files changed, 5 insertions, 16 deletions
diff --git a/net/core/xdp.c b/net/core/xdp.c index 622c81dc7ba8..b29d7b513a18 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -14,6 +14,8 @@ #include <net/page_pool.h> #include <net/xdp.h> +#include <net/xdp_priv.h> /* struct xdp_mem_allocator */ +#include <trace/events/xdp.h> #define REG_STATE_NEW 0x0 #define REG_STATE_REGISTERED 0x1 @@ -29,21 +31,6 @@ static int mem_id_next = MEM_ID_MIN; static bool mem_id_init; /* false */ static struct rhashtable *mem_id_ht; -struct xdp_mem_allocator { - struct xdp_mem_info mem; - union { - void *allocator; - struct page_pool *page_pool; - struct zero_copy_allocator *zc_alloc; - }; - struct rhash_head node; - struct rcu_head rcu; - struct delayed_work defer_wq; - unsigned long defer_start; - unsigned long defer_warn; - int disconnect_cnt; -}; - static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) { const u32 *k = data; @@ -117,7 +104,7 @@ bool __mem_id_disconnect(int id, bool force) if (xa->mem.type == MEM_TYPE_PAGE_POOL) safe_to_remove = page_pool_request_shutdown(xa->page_pool); - /* TODO: Tracepoint will be added here in next-patch */ + trace_mem_disconnect(xa, safe_to_remove, force); if ((safe_to_remove || force) && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) @@ -385,6 +372,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, mutex_unlock(&mem_id_lock); + trace_mem_connect(xdp_alloc, xdp_rxq); return 0; err: mutex_unlock(&mem_id_lock); @@ -417,6 +405,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, } else { /* Hopefully stack show who to blame for late return */ WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id); + trace_mem_return_failed(mem, page); put_page(page); } rcu_read_unlock(); |