diff options
author | David S. Miller <davem@davemloft.net> | 2018-01-22 16:30:30 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-01-22 16:30:30 -0500 |
commit | 208e0b17329cd802088c79f399835d51b9710df8 (patch) | |
tree | 8129f3ba6e564ceaa4ac6132cb56c7e9ffd1c811 | |
parent | 25a39f7f975c3c26a0052fbf9b59201c06744332 (diff) | |
parent | 52be9a7cde1fd26e43a01ac06d5c2558c563a7cb (diff) | |
download | linux-208e0b17329cd802088c79f399835d51b9710df8.tar.bz2 |
Merge branch 'net-sched-add-extack-support-for-cls-offloads'
Jakub Kicinski says:
====================
net: sched: add extack support for cls offloads
I've dropped the tests from the series because test_offloads.py changes
will conflict with bpf-next patches. I will send four more patches with
tests once bpf-next is merged back, hopefully still making it into 4.16 :)
v4:
- rebase on top of Alex's changes.
---
Quentin says:
This series tries to improve user experience when eBPF hardware offload
hits error paths at load time. In particular, it introduces netlink
extended ack support in the nfp driver.
To that aim, transmission of the pointer to the extack object is piped
through the `change()` operation of the existing classifiers (patch 1 to
6). Then it is used for TC offload in the nfp driver (patch 8) and in
netdevsim (patch 9, selftest in patch 10). Patch 7 adds a helper to handle
extack messages in the core when TC offload is disabled on the net device.
For completeness extack is propagated for classifiers other than cls_bpf,
but it's up to the drivers to make use of it.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/bpf/main.c | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/bpf/main.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/bpf/offload.c | 24 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/nfp_app.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 | ||||
-rw-r--r-- | include/net/pkt_cls.h | 16 | ||||
-rw-r--r-- | net/sched/cls_bpf.c | 18 | ||||
-rw-r--r-- | net/sched/cls_flower.c | 12 | ||||
-rw-r--r-- | net/sched/cls_matchall.c | 10 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 18 |
10 files changed, 95 insertions, 51 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 4ee11bf2aed7..b3206855535a 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -54,7 +54,7 @@ static bool nfp_net_ebpf_capable(struct nfp_net *nn) static int nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog) + struct bpf_prog *prog, struct netlink_ext_ack *extack) { bool running, xdp_running; int ret; @@ -70,10 +70,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, if (prog && running && !xdp_running) return -EBUSY; - ret = nfp_net_bpf_offload(nn, prog, running); + ret = nfp_net_bpf_offload(nn, prog, running, extack); /* Stop offload if replace not possible */ if (ret && prog) - nfp_bpf_xdp_offload(app, nn, NULL); + nfp_bpf_xdp_offload(app, nn, NULL, extack); nn->dp.bpf_offload_xdp = prog && !ret; return ret; @@ -125,17 +125,31 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, struct nfp_bpf_vnic *bv; int err; - if (type != TC_SETUP_CLSBPF || - !tc_can_offload(nn->dp.netdev) || - !nfp_net_ebpf_capable(nn) || - cls_bpf->common.protocol != htons(ETH_P_ALL) || - cls_bpf->common.chain_index) + if (type != TC_SETUP_CLSBPF) { + NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, + "only offload of BPF classifiers supported"); + return -EOPNOTSUPP; + } + if (!tc_can_offload_extack(nn->dp.netdev, cls_bpf->common.extack)) + return -EOPNOTSUPP; + if (!nfp_net_ebpf_capable(nn)) { + NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, + "NFP firmware does not support eBPF offload"); + return -EOPNOTSUPP; + } + if (cls_bpf->common.protocol != htons(ETH_P_ALL)) { + NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, + "only ETH_P_ALL supported as filter protocol"); + return -EOPNOTSUPP; + } + if (cls_bpf->common.chain_index) return -EOPNOTSUPP; /* Only support TC direct action */ if (!cls_bpf->exts_integrated || tcf_exts_has_actions(cls_bpf->exts)) { - nn_err(nn, "only direct action with no legacy actions supported\n"); + NL_SET_ERR_MSG_MOD(cls_bpf->common.extack, + "only direct action with no legacy actions supported"); return -EOPNOTSUPP; } @@ -152,7 +166,8 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, return 0; } - err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog); + err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog, + cls_bpf->common.extack); if (err) return err; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index c476bca15ba4..424fe8338105 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -335,7 +335,7 @@ struct nfp_net; int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf); int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, - bool old_prog); + bool old_prog, struct netlink_ext_ack *extack); struct nfp_insn_meta * nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 1a357aacc444..0a7732385469 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -281,7 +281,9 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) } } -static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) +static int +nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, + struct netlink_ext_ack *extack) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; unsigned int max_mtu; @@ -291,7 +293,7 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; if (max_mtu < nn->dp.netdev->mtu) { - nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); + NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary"); return -EOPNOTSUPP; } @@ -313,7 +315,8 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) /* Load up the JITed code */ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF); if (err) - nn_err(nn, "FW command error while loading BPF: %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "FW command error while loading BPF"); dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64), DMA_TO_DEVICE); @@ -322,7 +325,8 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) return err; } -static void nfp_net_bpf_start(struct nfp_net *nn) +static void +nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack) { int err; @@ -331,7 +335,8 @@ static void nfp_net_bpf_start(struct nfp_net *nn) nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); if (err) - nn_err(nn, "FW command error while enabling BPF: %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "FW command error while enabling BPF"); } static int nfp_net_bpf_stop(struct nfp_net *nn) @@ -346,7 +351,7 @@ static int nfp_net_bpf_stop(struct nfp_net *nn) } int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, - bool old_prog) + bool old_prog, struct netlink_ext_ack *extack) { int err; @@ -364,7 +369,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP); if (!(cap & NFP_NET_BPF_CAP_RELO)) { - nn_err(nn, "FW does not support live reload\n"); + NL_SET_ERR_MSG_MOD(extack, + "FW does not support live reload"); return -EBUSY; } } @@ -376,12 +382,12 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, if (old_prog && !prog) return nfp_net_bpf_stop(nn); - err = nfp_net_bpf_load(nn, prog); + err = nfp_net_bpf_load(nn, prog, extack); if (err) return err; if (!old_prog) - nfp_net_bpf_start(nn); + nfp_net_bpf_start(nn, extack); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 7e474df90598..437964afa8ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -43,6 +43,7 @@ struct bpf_prog; struct net_device; struct netdev_bpf; +struct netlink_ext_ack; struct pci_dev; struct sk_buff; struct sk_buff; @@ -138,7 +139,8 @@ struct nfp_app_type { int (*bpf)(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *xdp); int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog); + struct bpf_prog *prog, + struct netlink_ext_ack *extack); int (*sriov_enable)(struct nfp_app *app, int num_vfs); void (*sriov_disable)(struct nfp_app *app); @@ -324,11 +326,12 @@ static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn, } static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, - struct bpf_prog *prog) + struct bpf_prog *prog, + struct netlink_ext_ack *extack) { if (!app || !app->type->xdp_offload) return -EOPNOTSUPP; - return app->type->xdp_offload(app, nn, prog); + return app->type->xdp_offload(app, nn, prog, extack); } static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index cdf52421eaca..c0fd351c86b1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3403,7 +3403,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags, if (err) return err; - err = nfp_app_xdp_offload(nn->app, nn, offload_prog); + err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack); if (err && flags & XDP_FLAGS_HW_MODE) return err; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 2e4b8e436d25..2f8f16a4d88e 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -602,15 +602,18 @@ struct tc_cls_common_offload { u32 chain_index; __be16 protocol; u32 prio; + struct netlink_ext_ack *extack; }; static inline void tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, - const struct tcf_proto *tp) + const struct tcf_proto *tp, + struct netlink_ext_ack *extack) { cls_common->chain_index = tp->chain->index; cls_common->protocol = tp->protocol; cls_common->prio = tp->prio; + cls_common->extack = extack; } struct tc_cls_u32_knode { @@ -653,6 +656,17 @@ static inline bool tc_can_offload(const struct net_device *dev) return dev->features & NETIF_F_HW_TC; } +static inline bool tc_can_offload_extack(const struct net_device *dev, + struct netlink_ext_ack *extack) +{ + bool can = tc_can_offload(dev); + + if (!can) + NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); + + return can; +} + static inline bool tc_skip_hw(u32 flags) { return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 988ad45d78b8..c11e0fe23a17 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -147,7 +147,8 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) } static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, - struct cls_bpf_prog *oldprog) + struct cls_bpf_prog *oldprog, + struct netlink_ext_ack *extack) { struct tcf_block *block = tp->chain->block; struct tc_cls_bpf_offload cls_bpf = {}; @@ -158,7 +159,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, skip_sw = prog && tc_skip_sw(prog->gen_flags); obj = prog ?: oldprog; - tc_cls_common_offload_init(&cls_bpf.common, tp); + tc_cls_common_offload_init(&cls_bpf.common, tp, extack); cls_bpf.command = TC_CLSBPF_OFFLOAD; cls_bpf.exts = &obj->exts; cls_bpf.prog = prog ? prog->filter : NULL; @@ -173,7 +174,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); if (prog) { if (err < 0) { - cls_bpf_offload_cmd(tp, oldprog, prog); + cls_bpf_offload_cmd(tp, oldprog, prog, extack); return err; } else if (err > 0) { tcf_block_offload_inc(block, &prog->gen_flags); @@ -192,7 +193,8 @@ static u32 cls_bpf_flags(u32 flags) } static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, - struct cls_bpf_prog *oldprog) + struct cls_bpf_prog *oldprog, + struct netlink_ext_ack *extack) { if (prog && oldprog && cls_bpf_flags(prog->gen_flags) != @@ -206,7 +208,7 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, if (!prog && !oldprog) return 0; - return cls_bpf_offload_cmd(tp, prog, oldprog); + return cls_bpf_offload_cmd(tp, prog, oldprog, extack); } static void cls_bpf_stop_offload(struct tcf_proto *tp, @@ -214,7 +216,7 @@ static void cls_bpf_stop_offload(struct tcf_proto *tp, { int err; - err = cls_bpf_offload_cmd(tp, NULL, prog); + err = cls_bpf_offload_cmd(tp, NULL, prog, NULL); if (err) pr_err("Stopping hardware offload failed: %d\n", err); } @@ -225,7 +227,7 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp, struct tcf_block *block = tp->chain->block; struct tc_cls_bpf_offload cls_bpf = {}; - tc_cls_common_offload_init(&cls_bpf.common, tp); + tc_cls_common_offload_init(&cls_bpf.common, tp, NULL); cls_bpf.command = TC_CLSBPF_STATS; cls_bpf.exts = &prog->exts; cls_bpf.prog = prog->filter; @@ -514,7 +516,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, if (ret < 0) goto errout_idr; - ret = cls_bpf_offload(tp, prog, oldprog); + ret = cls_bpf_offload(tp, prog, oldprog, extack); if (ret) goto errout_parms; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index c6ac4a612c4a..727c10378f37 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -223,7 +223,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) struct tc_cls_flower_offload cls_flower = {}; struct tcf_block *block = tp->chain->block; - tc_cls_common_offload_init(&cls_flower.common, tp); + tc_cls_common_offload_init(&cls_flower.common, tp, NULL); cls_flower.command = TC_CLSFLOWER_DESTROY; cls_flower.cookie = (unsigned long) f; @@ -235,14 +235,15 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) static int fl_hw_replace_filter(struct tcf_proto *tp, struct flow_dissector *dissector, struct fl_flow_key *mask, - struct cls_fl_filter *f) + struct cls_fl_filter *f, + struct netlink_ext_ack *extack) { struct tc_cls_flower_offload cls_flower = {}; struct tcf_block *block = tp->chain->block; bool skip_sw = tc_skip_sw(f->flags); int err; - tc_cls_common_offload_init(&cls_flower.common, tp); + tc_cls_common_offload_init(&cls_flower.common, tp, extack); cls_flower.command = TC_CLSFLOWER_REPLACE; cls_flower.cookie = (unsigned long) f; cls_flower.dissector = dissector; @@ -271,7 +272,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) struct tc_cls_flower_offload cls_flower = {}; struct tcf_block *block = tp->chain->block; - tc_cls_common_offload_init(&cls_flower.common, tp); + tc_cls_common_offload_init(&cls_flower.common, tp, NULL); cls_flower.command = TC_CLSFLOWER_STATS; cls_flower.cookie = (unsigned long) f; cls_flower.exts = &f->exts; @@ -943,7 +944,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, err = fl_hw_replace_filter(tp, &head->dissector, &mask.key, - fnew); + fnew, + extack); if (err) goto errout_idr; } diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index f67d3d7fcf40..d990d2a52c6d 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -76,7 +76,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, struct tc_cls_matchall_offload cls_mall = {}; struct tcf_block *block = tp->chain->block; - tc_cls_common_offload_init(&cls_mall.common, tp); + tc_cls_common_offload_init(&cls_mall.common, tp, NULL); cls_mall.command = TC_CLSMATCHALL_DESTROY; cls_mall.cookie = cookie; @@ -86,14 +86,15 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, static int mall_replace_hw_filter(struct tcf_proto *tp, struct cls_mall_head *head, - unsigned long cookie) + unsigned long cookie, + struct netlink_ext_ack *extack) { struct tc_cls_matchall_offload cls_mall = {}; struct tcf_block *block = tp->chain->block; bool skip_sw = tc_skip_sw(head->flags); int err; - tc_cls_common_offload_init(&cls_mall.common, tp); + tc_cls_common_offload_init(&cls_mall.common, tp, extack); cls_mall.command = TC_CLSMATCHALL_REPLACE; cls_mall.exts = &head->exts; cls_mall.cookie = cookie; @@ -205,7 +206,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, goto err_set_parms; if (!tc_skip_hw(new->flags)) { - err = mall_replace_hw_filter(tp, new, (unsigned long)new); + err = mall_replace_hw_filter(tp, new, (unsigned long)new, + extack); if (err) goto err_replace_hw_filter; } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 57113e936155..7030240f8826 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -491,7 +491,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; - tc_cls_common_offload_init(&cls_u32.common, tp); + tc_cls_common_offload_init(&cls_u32.common, tp, NULL); cls_u32.command = TC_CLSU32_DELETE_HNODE; cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.handle = h->handle; @@ -501,7 +501,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) } static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, - u32 flags) + u32 flags, struct netlink_ext_ack *extack) { struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; @@ -509,7 +509,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, bool offloaded = false; int err; - tc_cls_common_offload_init(&cls_u32.common, tp); + tc_cls_common_offload_init(&cls_u32.common, tp, extack); cls_u32.command = TC_CLSU32_NEW_HNODE; cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.handle = h->handle; @@ -534,7 +534,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n) struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; - tc_cls_common_offload_init(&cls_u32.common, tp); + tc_cls_common_offload_init(&cls_u32.common, tp, NULL); cls_u32.command = TC_CLSU32_DELETE_KNODE; cls_u32.knode.handle = n->handle; @@ -543,14 +543,14 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n) } static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, - u32 flags) + u32 flags, struct netlink_ext_ack *extack) { struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; bool skip_sw = tc_skip_sw(flags); int err; - tc_cls_common_offload_init(&cls_u32.common, tp); + tc_cls_common_offload_init(&cls_u32.common, tp, extack); cls_u32.command = TC_CLSU32_REPLACE_KNODE; cls_u32.knode.handle = n->handle; cls_u32.knode.fshift = n->fshift; @@ -965,7 +965,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return err; } - err = u32_replace_hw_knode(tp, new, flags); + err = u32_replace_hw_knode(tp, new, flags, extack); if (err) { u32_destroy_key(tp, new, false); return err; @@ -1016,7 +1016,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ht->prio = tp->prio; idr_init(&ht->handle_idr); - err = u32_replace_hw_hnode(tp, ht, flags); + err = u32_replace_hw_hnode(tp, ht, flags, extack); if (err) { idr_remove_ext(&tp_c->handle_idr, handle); kfree(ht); @@ -1122,7 +1122,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, struct tc_u_knode __rcu **ins; struct tc_u_knode *pins; - err = u32_replace_hw_knode(tp, n, flags); + err = u32_replace_hw_knode(tp, n, flags, extack); if (err) goto errhw; |