summaryrefslogtreecommitdiffstats
path: root/net/sched/cls_bpf.c
diff options
context:
space:
mode:
authorJohn Hurley <john.hurley@netronome.com>2018-06-25 14:30:09 -0700
committerDavid S. Miller <davem@davemloft.net>2018-06-26 23:21:33 +0900
commit7e916b76805f11c1686a43ab5ead9a9b1a0a5945 (patch)
tree546e10b17f4ec1c035b0c00fa288b9b86ad20954 /net/sched/cls_bpf.c
parent530d995123fe647d28566d81ff9562fe6cbaff94 (diff)
downloadlinux-7e916b76805f11c1686a43ab5ead9a9b1a0a5945.tar.bz2
net: sched: cls_bpf: implement offload tcf_proto_op
Add the offload tcf_proto_op in cls_bpf to generate an offload message for each bpf prog in the given tcf_proto. Call the specified callback with this new offload message. The function only returns an error if the callback rejects adding a 'hardware only' prog. A prog contains a flag to indicate if it is in hardware or not. To ensure the offload function properly maintains this flag, keep a reference counter for the number of instances of the prog that are in hardware. Only update the flag when this counter changes from or to 0. Signed-off-by: John Hurley <john.hurley@netronome.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_bpf.c')
-rw-r--r--net/sched/cls_bpf.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 1aa7f6511065..66e0ac9811f9 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -43,6 +43,7 @@ struct cls_bpf_prog {
struct tcf_result res;
bool exts_integrated;
u32 gen_flags;
+ unsigned int in_hw_count;
struct tcf_exts exts;
u32 handle;
u16 bpf_num_ops;
@@ -174,6 +175,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
cls_bpf_offload_cmd(tp, oldprog, prog, extack);
return err;
} else if (err > 0) {
+ prog->in_hw_count = err;
tcf_block_offload_inc(block, &prog->gen_flags);
}
}
@@ -652,6 +654,42 @@ skip:
}
}
+static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+ void *cb_priv, struct netlink_ext_ack *extack)
+{
+ struct cls_bpf_head *head = rtnl_dereference(tp->root);
+ struct tcf_block *block = tp->chain->block;
+ struct tc_cls_bpf_offload cls_bpf = {};
+ struct cls_bpf_prog *prog;
+ int err;
+
+ list_for_each_entry(prog, &head->plist, link) {
+ if (tc_skip_hw(prog->gen_flags))
+ continue;
+
+ tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
+ extack);
+ cls_bpf.command = TC_CLSBPF_OFFLOAD;
+ cls_bpf.exts = &prog->exts;
+ cls_bpf.prog = add ? prog->filter : NULL;
+ cls_bpf.oldprog = add ? NULL : prog->filter;
+ cls_bpf.name = prog->bpf_name;
+ cls_bpf.exts_integrated = prog->exts_integrated;
+
+ err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv);
+ if (err) {
+ if (add && tc_skip_sw(prog->gen_flags))
+ return err;
+ continue;
+ }
+
+ tc_cls_offload_cnt_update(block, &prog->in_hw_count,
+ &prog->gen_flags, add);
+ }
+
+ return 0;
+}
+
static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
.kind = "bpf",
.owner = THIS_MODULE,
@@ -662,6 +700,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
.change = cls_bpf_change,
.delete = cls_bpf_delete,
.walk = cls_bpf_walk,
+ .reoffload = cls_bpf_reoffload,
.dump = cls_bpf_dump,
.bind_class = cls_bpf_bind_class,
};