summaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2021-03-25 16:30:46 -0700
committerDavid S. Miller <davem@davemloft.net>2021-03-25 16:30:46 -0700
commit241949e488f38a192f2359dbb21d80e08173eb60 (patch)
tree5a8975683f2ccadfbba7ebeb55db482e605581bb /kernel/bpf
parentefd13b71a3fa31413f8d15342e01d44b60b0a432 (diff)
parente2c69f3a5b4edfbcade2c38862c1839fc371c5d5 (diff)
downloadlinux-241949e488f38a192f2359dbb21d80e08173eb60.tar.bz2
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2021-03-24 The following pull-request contains BPF updates for your *net-next* tree. We've added 37 non-merge commits during the last 15 day(s) which contain a total of 65 files changed, 3200 insertions(+), 738 deletions(-). The main changes are: 1) Static linking of multiple BPF ELF files, from Andrii. 2) Move drop error path to devmap for XDP_REDIRECT, from Lorenzo. 3) Spelling fixes from various folks. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/bpf_lsm.c4
-rw-r--r--kernel/bpf/bpf_task_storage.c2
-rw-r--r--kernel/bpf/devmap.c30
-rw-r--r--kernel/bpf/hashtab.c2
-rw-r--r--kernel/bpf/verifier.c1
5 files changed, 16 insertions, 23 deletions
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 9829f381b51c..f211506218ea 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -67,7 +67,7 @@ BPF_CALL_2(bpf_bprm_opts_set, struct linux_binprm *, bprm, u64, flags)
BTF_ID_LIST_SINGLE(bpf_bprm_opts_set_btf_ids, struct, linux_binprm)
-const static struct bpf_func_proto bpf_bprm_opts_set_proto = {
+static const struct bpf_func_proto bpf_bprm_opts_set_proto = {
.func = bpf_bprm_opts_set,
.gpl_only = false,
.ret_type = RET_INTEGER,
@@ -88,7 +88,7 @@ static bool bpf_ima_inode_hash_allowed(const struct bpf_prog *prog)
BTF_ID_LIST_SINGLE(bpf_ima_inode_hash_btf_ids, struct, inode)
-const static struct bpf_func_proto bpf_ima_inode_hash_proto = {
+static const struct bpf_func_proto bpf_ima_inode_hash_proto = {
.func = bpf_ima_inode_hash,
.gpl_only = false,
.ret_type = RET_INTEGER,
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index fd3c74ef608e..3ce75758d394 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -20,7 +20,7 @@
DEFINE_BPF_STORAGE_CACHE(task_cache);
-DEFINE_PER_CPU(int, bpf_task_storage_busy);
+static DEFINE_PER_CPU(int, bpf_task_storage_busy);
static void bpf_task_storage_lock(void)
{
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 7a5ad7331c3b..aa516472ce46 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -329,7 +329,7 @@ bool dev_map_can_have_prog(struct bpf_map *map)
static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
{
struct net_device *dev = bq->dev;
- int sent = 0, drops = 0, err = 0;
+ int sent = 0, err = 0;
int i;
if (unlikely(!bq->count))
@@ -343,29 +343,23 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
if (sent < 0) {
+ /* If ndo_xdp_xmit fails with an errno, no frames have
+ * been xmit'ed.
+ */
err = sent;
sent = 0;
- goto error;
}
- drops = bq->count - sent;
-out:
- bq->count = 0;
- trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err);
- bq->dev_rx = NULL;
- __list_del_clearprev(&bq->flush_node);
- return;
-error:
- /* If ndo_xdp_xmit fails with an errno, no frames have been
- * xmit'ed and it's our responsibility to them free all.
+ /* If not all frames have been transmitted, it is our
+ * responsibility to free them
*/
- for (i = 0; i < bq->count; i++) {
- struct xdp_frame *xdpf = bq->q[i];
+ for (i = sent; unlikely(i < bq->count); i++)
+ xdp_return_frame_rx_napi(bq->q[i]);
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
- goto out;
+ trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, bq->count - sent, err);
+ bq->dev_rx = NULL;
+ bq->count = 0;
+ __list_del_clearprev(&bq->flush_node);
}
/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 330d721dd2af..d7ebb12ffffc 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -31,7 +31,7 @@
/*
* The bucket lock has two protection scopes:
*
- * 1) Serializing concurrent operations from BPF programs on differrent
+ * 1) Serializing concurrent operations from BPF programs on different
* CPUs
*
* 2) Serializing concurrent operations from BPF programs and sys_bpf()
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 999bf36ffeb1..210169c25ead 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -11912,7 +11912,6 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
- struct bpf_insn insn_buf[16];
struct bpf_insn *patch = &insn_buf[0];
bool issrc, isneg;
u32 off_reg;