summaryrefslogtreecommitdiffstats
path: root/arch/mips/net
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2014-07-30 20:34:16 -0700
committerDavid S. Miller <davem@davemloft.net>2014-08-02 15:03:58 -0700
commit7ae457c1e5b45a1b826fad9d62b32191d2bdcfdb (patch)
treedcb1aba57530e6c9426a81758173ca146ffafcaf /arch/mips/net
parent8fb575ca396bc31d9fa99c26336e2432b41d1bfc (diff)
downloadlinux-7ae457c1e5b45a1b826fad9d62b32191d2bdcfdb.tar.bz2
net: filter: split 'struct sk_filter' into socket and bpf parts
clean up names related to socket filtering and bpf in the following way: - everything that deals with sockets keeps 'sk_*' prefix - everything that is pure BPF is changed to 'bpf_*' prefix split 'struct sk_filter' into struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; and struct bpf_prog { u32 jited:1, len:31; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; struct work_struct work; }; }; so that 'struct bpf_prog' can be used independent of sockets and cleans up 'unattached' bpf use cases split SK_RUN_FILTER macro into: SK_RUN_FILTER to be used with 'struct sk_filter *' and BPF_PROG_RUN to be used with 'struct bpf_prog *' __sk_filter_release(struct sk_filter *) gains __bpf_prog_release(struct bpf_prog *) helper function also perform related renames for the functions that work with 'struct bpf_prog *', since they're on the same lines: sk_filter_size -> bpf_prog_size sk_filter_select_runtime -> bpf_prog_select_runtime sk_filter_free -> bpf_prog_free sk_unattached_filter_create -> bpf_prog_create sk_unattached_filter_destroy -> bpf_prog_destroy sk_store_orig_filter -> bpf_prog_store_orig_filter sk_release_orig_filter -> bpf_release_orig_filter __sk_migrate_filter -> bpf_migrate_filter __sk_prepare_filter -> bpf_prepare_filter API for attaching classic BPF to a socket stays the same: sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *) and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program which is used by sockets, tun, af_packet API for 'unattached' BPF programs becomes: bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *) and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/mips/net')
-rw-r--r--arch/mips/net/bpf_jit.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index b87390a56a2f..05a56619ece2 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -131,7 +131,7 @@
* @target: Memory location for the compiled filter
*/
struct jit_ctx {
- const struct sk_filter *skf;
+ const struct bpf_prog *skf;
unsigned int prologue_bytes;
u32 idx;
u32 flags;
@@ -789,7 +789,7 @@ static int pkt_type_offset(void)
static int build_body(struct jit_ctx *ctx)
{
void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
- const struct sk_filter *prog = ctx->skf;
+ const struct bpf_prog *prog = ctx->skf;
const struct sock_filter *inst;
unsigned int i, off, load_order, condt;
u32 k, b_off __maybe_unused;
@@ -1369,7 +1369,7 @@ jmp_cmp:
int bpf_jit_enable __read_mostly;
-void bpf_jit_compile(struct sk_filter *fp)
+void bpf_jit_compile(struct bpf_prog *fp)
{
struct jit_ctx ctx;
unsigned int alloc_size, tmp_idx;
@@ -1423,7 +1423,7 @@ out:
kfree(ctx.offsets);
}
-void bpf_jit_free(struct sk_filter *fp)
+void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
module_free(NULL, fp->bpf_func);