summaryrefslogtreecommitdiffstats
path: root/net/core/net_namespace.c
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@virtuozzo.com>2018-01-16 12:31:41 +0300
committerDavid S. Miller <davem@davemloft.net>2018-01-17 15:42:35 -0500
commit0c06bea919f3289368a023d1a62a1bc319617fa3 (patch)
treed15b03fefc6edd450cb18c995d4948d8e6675e07 /net/core/net_namespace.c
parenta29ae44c7aa3efa224ecc7325e4b6da2363c6b5f (diff)
downloadlinux-0c06bea919f3289368a023d1a62a1bc319617fa3.tar.bz2
net: Fix possible race in peernet2id_alloc()
peernet2id_alloc() is racy without rtnl_lock() as refcount_read(&peer->count) under net->nsid_lock does not guarantee, peer is alive: rcu_read_lock() peernet2id_alloc() .. spin_lock_bh(&net->nsid_lock) .. refcount_read(&peer->count) (!= 0) .. .. put_net() .. cleanup_net() .. for_each_net(tmp) .. spin_lock_bh(&tmp->nsid_lock) .. __peernet2id(tmp, net) == -1 .. .. .. .. __peernet2id_alloc(alloc == true) .. .. .. rcu_read_unlock() .. .. synchronize_rcu() .. kmem_cache_free(net) After the above situation, net::netns_id contains id pointing to freed memory, and any other dereferencing by the id will operate with this freed memory. Currently, peernet2id_alloc() is used under rtnl_lock() everywhere except ovs_vport_cmd_fill_info(), and this race can't occur. But peernet2id_alloc() is generic interface, and better we fix it before someone really starts use it in wrong context. v2: Don't place refcount_read(&net->count) under net->nsid_lock as suggested by Eric W. Biederman <ebiederm@xmission.com> v3: Rebase on top of net-next Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/net_namespace.c')
-rw-r--r--net/core/net_namespace.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 2213d45fcafd..3c77d84ad60d 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -221,17 +221,26 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
- bool alloc;
+ bool alloc = false, alive = false;
int id;
if (refcount_read(&net->count) == 0)
return NETNSA_NSID_NOT_ASSIGNED;
spin_lock_bh(&net->nsid_lock);
- alloc = refcount_read(&peer->count) == 0 ? false : true;
+ /*
+ * When peer is obtained from RCU lists, we may race with
+ * its cleanup. Check whether it's alive, and this guarantees
+ * we never hash a peer back to net->netns_ids, after it has
+ * just been idr_remove()'d from there in cleanup_net().
+ */
+ if (maybe_get_net(peer))
+ alive = alloc = true;
id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_bh(&net->nsid_lock);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
+ if (alive)
+ put_net(peer);
return id;
}
EXPORT_SYMBOL_GPL(peernet2id_alloc);