summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-02-01 15:28:07 -0800
committerDavid S. Miller <davem@davemloft.net>2019-02-01 15:28:07 -0800
commite7b816415e031bf7879ffd234c5e4f4fcec13a74 (patch)
tree7610edf8a5b800399bb92772a0309aba12a2ed74 /include
parent9b1f19d810e92d6cdc68455fbc22d9f961a58ce1 (diff)
parentf01c2803873e83ea5f1b160c3169ed6018704be8 (diff)
downloadlinux-e7b816415e031bf7879ffd234c5e4f4fcec13a74.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2019-01-31 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) disable preemption in sender side of socket filters, from Alexei. 2) fix two potential deadlocks in syscall bpf lookup and prog_register, from Martin and Alexei. 3) fix BTF to allow typedef on func_proto, from Yonghong. 4) two bpftool fixes, from Jiri and Paolo. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/filter.h21
1 files changed, 18 insertions, 3 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ad106d845b22..e532fcc6e4b5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
return qdisc_skb_cb(skb)->data;
}
-static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
- struct sk_buff *skb)
+static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ struct sk_buff *skb)
{
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
return res;
}
+static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ u32 res;
+
+ preempt_disable();
+ res = __bpf_prog_run_save_cb(prog, skb);
+ preempt_enable();
+ return res;
+}
+
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u8 *cb_data = bpf_skb_cb(skb);
+ u32 res;
if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN);
- return BPF_PROG_RUN(prog, skb);
+ preempt_disable();
+ res = BPF_PROG_RUN(prog, skb);
+ preempt_enable();
+ return res;
}
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,