diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-07-16 14:03:40 +0200 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2009-07-16 14:03:40 +0200 |
commit | 941297f443f871b8c3372feccf27a8733f6ce9e9 (patch) | |
tree | c98b7e30bf14255cfd87a3d8d929f26f35764d21 /net | |
parent | aa6a03eb0ae859c1371555ef381de4c96ca1e4e6 (diff) | |
download | linux-941297f443f871b8c3372feccf27a8733f6ce9e9.tar.bz2 |
netfilter: nf_conntrack: nf_conntrack_alloc() fixes
When a slab cache uses SLAB_DESTROY_BY_RCU, we must be careful when allocating
objects, since slab allocator could give a freed object still used by lockless
readers.
In particular, nf_conntrack RCU lookups rely on ct->tuplehash[xxx].hnnode.next
being always valid (ie containing a valid 'nulls' value, or a valid pointer to next
object in hash chain.)
kmem_cache_zalloc() setups object with NULL values, but a NULL value is not valid
for ct->tuplehash[xxx].hnnode.next.
Fix is to call kmem_cache_alloc() and do the zeroing ourself.
As spotted by Patrick, we also need to make sure lookup keys are committed to
memory before setting refcount to 1, or a lockless reader could get a reference
on the old version of the object. Its key re-check could then pass the barrier.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 21 |
1 files changed, 18 insertions, 3 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 7508f11c5b39..b5869b9574b0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -561,23 +561,38 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, } } - ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp); + /* + * Do not use kmem_cache_zalloc(), as this cache uses + * SLAB_DESTROY_BY_RCU. + */ + ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); if (ct == NULL) { pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); atomic_dec(&net->ct.count); return ERR_PTR(-ENOMEM); } - + /* + * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next + * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. + */ + memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, + sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); spin_lock_init(&ct->lock); - atomic_set(&ct->ct_general.use, 1); ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; + ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; + ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; /* Don't set timer yet: wait for confirmation */ setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); #ifdef CONFIG_NET_NS ct->ct_net = net; #endif + /* + * changes to lookup keys must be done before setting refcnt to 1 + */ + smp_wmb(); + atomic_set(&ct->ct_general.use, 1); return ct; } EXPORT_SYMBOL_GPL(nf_conntrack_alloc); |