summaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c71
1 files changed, 39 insertions, 32 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d124306b81fd..368f65c15e4f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -160,8 +160,8 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
* @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
- * tail room of size bytes. The object has a reference count of one.
- * The return is the buffer. On a failure the return is %NULL.
+ * tail room of at least size bytes. The object has a reference count
+ * of one. The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
@@ -296,9 +296,12 @@ EXPORT_SYMBOL(build_skb);
struct netdev_alloc_cache {
struct page *page;
unsigned int offset;
+ unsigned int pagecnt_bias;
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
+#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES)
+
/**
* netdev_alloc_frag - allocate a page fragment
* @fragsz: fragment size
@@ -317,17 +320,26 @@ void *netdev_alloc_frag(unsigned int fragsz)
if (unlikely(!nc->page)) {
refill:
nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ if (unlikely(!nc->page))
+ goto end;
+recycle:
+ atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS);
+ nc->pagecnt_bias = NETDEV_PAGECNT_BIAS;
nc->offset = 0;
}
- if (likely(nc->page)) {
- if (nc->offset + fragsz > PAGE_SIZE) {
- put_page(nc->page);
- goto refill;
- }
- data = page_address(nc->page) + nc->offset;
- nc->offset += fragsz;
- get_page(nc->page);
+
+ if (nc->offset + fragsz > PAGE_SIZE) {
+ /* avoid unnecessary locked operations if possible */
+ if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) ||
+ atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count))
+ goto recycle;
+ goto refill;
}
+
+ data = page_address(nc->page) + nc->offset;
+ nc->offset += fragsz;
+ nc->pagecnt_bias--;
+end:
local_irq_restore(flags);
return data;
}
@@ -713,7 +725,8 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
}
EXPORT_SYMBOL_GPL(skb_morph);
-/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
+/**
+ * skb_copy_ubufs - copy userspace skb frags buffers to kernel
* @skb: the skb to modify
* @gfp_mask: allocation priority
*
@@ -738,7 +751,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
u8 *vaddr;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- page = alloc_page(GFP_ATOMIC);
+ page = alloc_page(gfp_mask);
if (!page) {
while (head) {
struct page *next = (struct page *)head->private;
@@ -756,22 +769,22 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
}
/* skb frags release userspace buffers */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ for (i = 0; i < num_frags; i++)
skb_frag_unref(skb, i);
uarg->callback(uarg);
/* skb frags point to kernel buffers */
- for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
- __skb_fill_page_desc(skb, i-1, head, 0,
- skb_shinfo(skb)->frags[i - 1].size);
+ for (i = num_frags - 1; i >= 0; i--) {
+ __skb_fill_page_desc(skb, i, head, 0,
+ skb_shinfo(skb)->frags[i].size);
head = (struct page *)head->private;
}
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
return 0;
}
-
+EXPORT_SYMBOL_GPL(skb_copy_ubufs);
/**
* skb_clone - duplicate an sk_buff
@@ -791,10 +804,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *n;
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, gfp_mask))
- return NULL;
- }
+ if (skb_orphan_frags(skb, gfp_mask))
+ return NULL;
n = skb + 1;
if (skb->fclone == SKB_FCLONE_ORIG &&
@@ -914,12 +925,10 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
if (skb_shinfo(skb)->nr_frags) {
int i;
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, gfp_mask)) {
- kfree_skb(n);
- n = NULL;
- goto out;
- }
+ if (skb_orphan_frags(skb, gfp_mask)) {
+ kfree_skb(n);
+ n = NULL;
+ goto out;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -992,10 +1001,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
*/
if (skb_cloned(skb)) {
/* copy this zero copy skb frags */
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, gfp_mask))
- goto nofrags;
- }
+ if (skb_orphan_frags(skb, gfp_mask))
+ goto nofrags;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_frag_ref(skb, i);
@@ -2614,7 +2621,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
EXPORT_SYMBOL(skb_find_text);
/**
- * skb_append_datato_frags: - append the user data to a skb
+ * skb_append_datato_frags - append the user data to a skb
* @sk: sock structure
* @skb: skb structure to be appened with user data.
* @getfrag: call back function to be used for getting the user data