summaryrefslogtreecommitdiffstats
path: root/net/sched/cls_api.c
diff options
context:
space:
mode:
authorJohn Hurley <john.hurley@netronome.com>2019-12-05 17:03:35 +0000
committerDavid S. Miller <davem@davemloft.net>2019-12-06 20:45:09 -0800
commit25a443f74bcff2c4d506a39eae62fc15ad7c618a (patch)
treebabfe8574b84081e05e01343889868507159fc73 /net/sched/cls_api.c
parentdbad3408896c3c5722ec9cda065468b3df16c5bf (diff)
downloadlinux-25a443f74bcff2c4d506a39eae62fc15ad7c618a.tar.bz2
net: sched: allow indirect blocks to bind to clsact in TC
When a device is bound to a clsact qdisc, bind events are triggered to registered drivers for both ingress and egress. However, if a driver registers to such a device using the indirect block routines then it is assumed that it is only interested in ingress offload and so only replays ingress bind/unbind messages. The NFP driver supports the offload of some egress filters when registering to a block with qdisc of type clsact. However, on unregister, if the block is still active, it will not receive an unbind egress notification which can prevent proper cleanup of other registered callbacks. Modify the indirect block callback command in TC to send messages of ingress and/or egress bind depending on the qdisc in use. NFP currently supports egress offload for TC flower offload so the changes are only added to TC. Fixes: 4d12ba42787b ("nfp: flower: allow offloading of matches on 'internal' ports") Signed-off-by: John Hurley <john.hurley@netronome.com> Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_api.c')
-rw-r--r--net/sched/cls_api.c52
1 files changed, 33 insertions, 19 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 75b48083614e..3c335fd9bcb0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -626,15 +626,15 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
static int tcf_block_setup(struct tcf_block *block,
struct flow_block_offload *bo);
-static void tc_indr_block_ing_cmd(struct net_device *dev,
- struct tcf_block *block,
- flow_indr_block_bind_cb_t *cb,
- void *cb_priv,
- enum flow_block_command command)
+static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block,
+ flow_indr_block_bind_cb_t *cb, void *cb_priv,
+ enum flow_block_command command, bool ingress)
{
struct flow_block_offload bo = {
.command = command,
- .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
+ .binder_type = ingress ?
+ FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS :
+ FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
.net = dev_net(dev),
.block_shared = tcf_block_non_null_shared(block),
};
@@ -652,9 +652,10 @@ static void tc_indr_block_ing_cmd(struct net_device *dev,
up_write(&block->cb_lock);
}
-static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
+static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress)
{
const struct Qdisc_class_ops *cops;
+ const struct Qdisc_ops *ops;
struct Qdisc *qdisc;
if (!dev_ingress_queue(dev))
@@ -664,24 +665,37 @@ static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
if (!qdisc)
return NULL;
- cops = qdisc->ops->cl_ops;
+ ops = qdisc->ops;
+ if (!ops)
+ return NULL;
+
+ if (!ingress && !strcmp("ingress", ops->id))
+ return NULL;
+
+ cops = ops->cl_ops;
if (!cops)
return NULL;
if (!cops->tcf_block)
return NULL;
- return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
+ return cops->tcf_block(qdisc,
+ ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS,
+ NULL);
}
-static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb,
- void *cb_priv,
- enum flow_block_command command)
+static void tc_indr_block_get_and_cmd(struct net_device *dev,
+ flow_indr_block_bind_cb_t *cb,
+ void *cb_priv,
+ enum flow_block_command command)
{
- struct tcf_block *block = tc_dev_ingress_block(dev);
+ struct tcf_block *block;
+
+ block = tc_dev_block(dev, true);
+ tc_indr_block_cmd(dev, block, cb, cb_priv, command, true);
- tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
+ block = tc_dev_block(dev, false);
+ tc_indr_block_cmd(dev, block, cb, cb_priv, command, false);
}
static void tc_indr_block_call(struct tcf_block *block,
@@ -3626,9 +3640,9 @@ static struct pernet_operations tcf_net_ops = {
.size = sizeof(struct tcf_net),
};
-static struct flow_indr_block_entry block_ing_entry = {
- .cb = tc_indr_block_get_and_ing_cmd,
- .list = LIST_HEAD_INIT(block_ing_entry.list),
+static struct flow_indr_block_entry block_entry = {
+ .cb = tc_indr_block_get_and_cmd,
+ .list = LIST_HEAD_INIT(block_entry.list),
};
static int __init tc_filter_init(void)
@@ -3643,7 +3657,7 @@ static int __init tc_filter_init(void)
if (err)
goto err_register_pernet_subsys;
- flow_indr_add_block_cb(&block_ing_entry);
+ flow_indr_add_block_cb(&block_entry);
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
RTNL_FLAG_DOIT_UNLOCKED);