summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorReshetova, Elena <elena.reshetova@intel.com>2017-06-30 13:07:59 +0300
committerDavid S. Miller <davem@davemloft.net>2017-07-01 07:39:08 -0700
commit2638595afccf6554bfe55268ff9b2d3ac3dff2e6 (patch)
tree958d39026bc292e6048824012fdcb46f3a70d7e9
parent633547973ffc32fd2c815639d4675e1531f0896f (diff)
downloadlinux-2638595afccf6554bfe55268ff9b2d3ac3dff2e6.tar.bz2
net: convert sk_buff_fclones.fclone_ref from atomic_t to refcount_t
refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: David Windsor <dwindsor@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--net/core/skbuff.c10
2 files changed, 7 insertions, 7 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 90cbd86152da..d0b9f3846eab 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -915,7 +915,7 @@ struct sk_buff_fclones {
struct sk_buff skb2;
- atomic_t fclone_ref;
+ refcount_t fclone_ref;
};
/**
@@ -935,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
fclones = container_of(skb, struct sk_buff_fclones, skb1);
return skb->fclone == SKB_FCLONE_ORIG &&
- atomic_read(&fclones->fclone_ref) > 1 &&
+ refcount_read(&fclones->fclone_ref) > 1 &&
fclones->skb2.sk == sk;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 45dc6620dd74..659dfc0494c5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
skb->fclone = SKB_FCLONE_ORIG;
- atomic_set(&fclones->fclone_ref, 1);
+ refcount_set(&fclones->fclone_ref, 1);
fclones->skb2.fclone = SKB_FCLONE_CLONE;
}
@@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb)
* This test would have no chance to be true for the clone,
* while here, branch prediction will be good.
*/
- if (atomic_read(&fclones->fclone_ref) == 1)
+ if (refcount_read(&fclones->fclone_ref) == 1)
goto fastpath;
break;
@@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb)
fclones = container_of(skb, struct sk_buff_fclones, skb2);
break;
}
- if (!atomic_dec_and_test(&fclones->fclone_ref))
+ if (!refcount_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
kmem_cache_free(skbuff_fclone_cache, fclones);
@@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
return NULL;
if (skb->fclone == SKB_FCLONE_ORIG &&
- atomic_read(&fclones->fclone_ref) == 1) {
+ refcount_read(&fclones->fclone_ref) == 1) {
n = &fclones->skb2;
- atomic_set(&fclones->fclone_ref, 2);
+ refcount_set(&fclones->fclone_ref, 2);
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;