summaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorAlexander Lobakin <alobakin@pm.me>2021-02-13 14:12:02 +0000
committerDavid S. Miller <davem@davemloft.net>2021-02-13 14:32:03 -0800
commitfec6e49b63989657bc4076dad99fa51d5ece34da (patch)
tree98df77201dfd70c97cca49ddf73e0ea0aa7b7109 /net/core/skbuff.c
parentf9d6725bf44a5b9412b5da07e3467100fe2af236 (diff)
downloadlinux-fec6e49b63989657bc4076dad99fa51d5ece34da.tar.bz2
skbuff: remove __kfree_skb_flush()
This function isn't much needed as NAPI skb queue gets bulk-freed anyway when there's no more room, and even may reduce the efficiency of bulk operations. It will be even less needed after reusing skb cache on allocation path, so remove it and this way lighten network softirqs a bit. Suggested-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Alexander Lobakin <alobakin@pm.me> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c12
1 files changed, 0 insertions, 12 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1c6f6ef70339..4be2bb969535 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -838,18 +838,6 @@ void __consume_stateless_skb(struct sk_buff *skb)
kfree_skbmem(skb);
}
-void __kfree_skb_flush(void)
-{
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
-
- /* flush skb_cache if containing objects */
- if (nc->skb_count) {
- kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
- nc->skb_cache);
- nc->skb_count = 0;
- }
-}
-
static inline void _kfree_skb_defer(struct sk_buff *skb)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);