diff options
author | Vlad Buslov <vladbu@mellanox.com> | 2019-03-06 16:22:12 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-03-06 10:52:16 -0800 |
commit | ecb3dea400d3beaf611ce76ac7a51d4230492cf2 (patch) | |
tree | 6f3617886d27ffc510099e7a167d2606c2707085 /net/sched | |
parent | a10674bf2406afc2554f9c7d31b2dc65d6a27fd9 (diff) | |
download | linux-ecb3dea400d3beaf611ce76ac7a51d4230492cf2.tar.bz2 |
net: sched: flower: insert new filter to idr after setting its mask
When adding new filter to flower classifier, fl_change() inserts it to
handle_idr before initializing filter extensions and assigning it a mask.
Normally this ordering doesn't matter because all flower classifier ops
callbacks assume rtnl lock protection. However, when filter has an action
that doesn't have its kernel module loaded, rtnl lock is released before
call to request_module(). During this time the filter can be accessed bu
concurrent task before its initialization is completed, which can lead to a
crash.
Example case of NULL pointer dereference in concurrent dump:
Task 1 Task 2
tc_new_tfilter()
fl_change()
idr_alloc_u32(fnew)
fl_set_parms()
tcf_exts_validate()
tcf_action_init()
tcf_action_init_1()
rtnl_unlock()
request_module()
... rtnl_lock()
tc_dump_tfilter()
tcf_chain_dump()
fl_walk()
idr_get_next_ul()
tcf_node_dump()
tcf_fill_node()
fl_dump()
mask = &f->mask->key; <- NULL ptr
rtnl_lock()
Extension initialization and mask assignment don't depend on fnew->handle
that is allocated by idr_alloc_u32(). Move idr allocation code after action
creation and mask assignment in fl_change() to prevent concurrent access
to not fully initialized filter when rtnl lock is released to load action
module.
Fixes: 01683a146999 ("net: sched: refactor flower walk to iterate over idr")
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Reviewed-by: Roi Dayan <roid@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/cls_flower.c | 43 |
1 files changed, 22 insertions, 21 deletions
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 27300a3e76c7..c04247b403ed 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1348,46 +1348,46 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto errout; - if (!handle) { - handle = 1; - err = idr_alloc_u32(&head->handle_idr, fnew, &handle, - INT_MAX, GFP_KERNEL); - } else if (!fold) { - /* user specifies a handle and it doesn't exist */ - err = idr_alloc_u32(&head->handle_idr, fnew, &handle, - handle, GFP_KERNEL); - } - if (err) - goto errout; - fnew->handle = handle; - if (tb[TCA_FLOWER_FLAGS]) { fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); if (!tc_flags_valid(fnew->flags)) { err = -EINVAL; - goto errout_idr; + goto errout; } } err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, tp->chain->tmplt_priv, extack); if (err) - goto errout_idr; + goto errout; err = fl_check_assign_mask(head, fnew, fold, mask); if (err) - goto errout_idr; + goto errout; + + if (!handle) { + handle = 1; + err = idr_alloc_u32(&head->handle_idr, fnew, &handle, + INT_MAX, GFP_KERNEL); + } else if (!fold) { + /* user specifies a handle and it doesn't exist */ + err = idr_alloc_u32(&head->handle_idr, fnew, &handle, + handle, GFP_KERNEL); + } + if (err) + goto errout_mask; + fnew->handle = handle; if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) { err = -EEXIST; - goto errout_mask; + goto errout_idr; } err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node, fnew->mask->filter_ht_params); if (err) - goto errout_mask; + goto errout_idr; if (!tc_skip_hw(fnew->flags)) { err = fl_hw_replace_filter(tp, fnew, extack); @@ -1426,12 +1426,13 @@ errout_mask_ht: rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, fnew->mask->filter_ht_params); -errout_mask: - fl_mask_put(head, fnew->mask, false); - errout_idr: if (!fold) idr_remove(&head->handle_idr, fnew->handle); + +errout_mask: + fl_mask_put(head, fnew->mask, false); + errout: tcf_exts_destroy(&fnew->exts); kfree(fnew); |