summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid Ahern <dsa@cumulusnetworks.com>2016-12-01 08:48:03 -0800
committerDavid S. Miller <davem@davemloft.net>2016-12-02 13:44:56 -0500
commitb2cd12574aa3e1625f471ff57cde7f628a18a46b (patch)
treef0ab0871e5673a32bde0b5c393e8925765522118 /include
parent7f7bf1606fa8fa0e3aecdeac0ba8005f2a0fbdef (diff)
downloadlinux-b2cd12574aa3e1625f471ff57cde7f628a18a46b.tar.bz2
bpf: Refactor cgroups code in prep for new type
Code move and rename only; no functional change intended. Signed-off-by: David Ahern <dsa@cumulusnetworks.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf-cgroup.h46
1 files changed, 23 insertions, 23 deletions
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 0cf1adfadd2d..af2ca8b432c0 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -36,31 +36,31 @@ void cgroup_bpf_update(struct cgroup *cgrp,
struct bpf_prog *prog,
enum bpf_attach_type type);
-int __cgroup_bpf_run_filter(struct sock *sk,
- struct sk_buff *skb,
- enum bpf_attach_type type);
-
-/* Wrappers for __cgroup_bpf_run_filter() guarded by cgroup_bpf_enabled. */
-#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) \
-({ \
- int __ret = 0; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter(sk, skb, \
- BPF_CGROUP_INET_INGRESS); \
- \
- __ret; \
+int __cgroup_bpf_run_filter_skb(struct sock *sk,
+ struct sk_buff *skb,
+ enum bpf_attach_type type);
+
+/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled) \
+ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
+ BPF_CGROUP_INET_INGRESS); \
+ \
+ __ret; \
})
-#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) \
-({ \
- int __ret = 0; \
- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
- typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk)) \
- __ret = __cgroup_bpf_run_filter(__sk, skb, \
- BPF_CGROUP_INET_EGRESS); \
- } \
- __ret; \
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ typeof(sk) __sk = sk_to_full_sk(sk); \
+ if (sk_fullsock(__sk)) \
+ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
+ BPF_CGROUP_INET_EGRESS); \
+ } \
+ __ret; \
})
#else