summaryrefslogtreecommitdiffstats
path: root/net/core/page_pool.c
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2019-12-27 18:13:24 +0100
committerDavid S. Miller <davem@davemloft.net>2020-01-02 15:37:52 -0800
commitf13fc10785bc04f83fe0d7730beb8ff4be563a20 (patch)
tree9284b79bba0aa682bb0019f1165a5fb50c6edace /net/core/page_pool.c
parent44768decb7c0a6c7c01995dbacc864f3921b6dac (diff)
downloadlinux-f13fc10785bc04f83fe0d7730beb8ff4be563a20.tar.bz2
page_pool: help compiler remove code in case CONFIG_NUMA=n
When kernel is compiled without NUMA support, then page_pool NUMA config setting (pool->p.nid) doesn't make any practical sense. The compiler cannot see that it can remove the code paths. This patch avoids reading pool->p.nid setting in case of !CONFIG_NUMA, in allocation and numa check code, which helps compiler to see the optimisation potential. It leaves update code intact to keep API the same. $ ./scripts/bloat-o-meter net/core/page_pool.o-numa-enabled \ net/core/page_pool.o-numa-disabled add/remove: 0/0 grow/shrink: 0/3 up/down: 0/-113 (-113) Function old new delta page_pool_create 401 398 -3 __page_pool_alloc_pages_slow 439 426 -13 page_pool_refill_alloc_cache 425 328 -97 Total: Before=3611, After=3498, chg -3.13% Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/page_pool.c')
-rw-r--r--net/core/page_pool.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 515bfb97fda1..9b7cbe35df37 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -113,7 +113,12 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool,
/* Softirq guarantee CPU and thus NUMA node is stable. This,
* assumes CPU refilling driver RX-ring will also run RX-NAPI.
*/
+#ifdef CONFIG_NUMA
pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
+#else
+ /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
+ pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
+#endif
/* Slower-path: Get pages from locked ring queue */
spin_lock(&r->consumer_lock);
@@ -200,7 +205,11 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
*/
/* Cache was empty, do real allocation */
+#ifdef CONFIG_NUMA
page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
+#else
+ page = alloc_pages(gfp, pool->p.order);
+#endif
if (!page)
return NULL;