summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorVlad Buslov <vladbu@mellanox.com>2018-08-10 20:51:45 +0300
committerDavid S. Miller <davem@davemloft.net>2018-08-11 12:37:09 -0700
commitff25276de997f41197ebab91935627c249a30fc4 (patch)
tree6de06ae2ad225b18e3326b3c693ee596249a7dca /net
parent54d0d423a48aa0e61bb39665d20376ba7b940535 (diff)
downloadlinux-ff25276de997f41197ebab91935627c249a30fc4.tar.bz2
net: sched: act_ipt: remove dependency on rtnl lock
Use tcf spinlock to protect ipt action private data from concurrent modification during dump. Ipt init already takes tcf spinlock when modifying ipt state. Signed-off-by: Vlad Buslov <vladbu@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/act_ipt.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 0dc787a57798..e149f0e66cb6 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -288,6 +288,7 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
* for foolproof you need to not assume this
*/
+ spin_lock_bh(&ipt->tcf_lock);
t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
if (unlikely(!t))
goto nla_put_failure;
@@ -307,10 +308,12 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&ipt->tcf_lock);
kfree(t);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&ipt->tcf_lock);
nlmsg_trim(skb, b);
kfree(t);
return -1;