summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/net
diff options
context:
space:
mode:
authorHari Bathini <hbathini@linux.ibm.com>2022-06-10 21:25:51 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2022-06-29 19:37:08 +1000
commitaea7ef8a82c0ea13ff20b65ff2edf8a38a17eda8 (patch)
treec16b3569e05c976e22859f082a136d5bf070f18c /arch/powerpc/net
parent1e82dfaa7819f03f0b0022be7ca15bbc83090da1 (diff)
downloadlinux-aea7ef8a82c0ea13ff20b65ff2edf8a38a17eda8.tar.bz2
powerpc/bpf/32: add support for BPF_ATOMIC bitwise operations
Adding instructions for ppc32 for atomic_and atomic_or atomic_xor atomic_fetch_add atomic_fetch_and atomic_fetch_or atomic_fetch_xor Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> (ppc64le) Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220610155552.25892-5-hbathini@linux.ibm.com
Diffstat (limited to 'arch/powerpc/net')
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c53
1 files changed, 41 insertions, 12 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index e46ed1e8c6ca..28dc6a1a8f2f 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -294,6 +294,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
u32 dst_reg_h = dst_reg - 1;
u32 src_reg = bpf_to_ppc(insn[i].src_reg);
u32 src_reg_h = src_reg - 1;
+ u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
u32 tmp_reg = bpf_to_ppc(TMP_REG);
u32 size = BPF_SIZE(code);
s16 off = insn[i].off;
@@ -798,25 +799,53 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* BPF_STX ATOMIC (atomic ops)
*/
case BPF_STX | BPF_ATOMIC | BPF_W:
- if (imm != BPF_ADD) {
- pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
- code, i);
- return -ENOTSUPP;
- }
-
- /* *(u32 *)(dst + off) += src */
-
bpf_set_seen_register(ctx, tmp_reg);
+ bpf_set_seen_register(ctx, ax_reg);
+
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
+ tmp_idx = ctx->idx * 4;
/* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
- /* add value from src_reg into this */
- EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
- /* store result back */
+
+ /* Save old value in BPF_REG_AX */
+ if (imm & BPF_FETCH)
+ EMIT(PPC_RAW_MR(ax_reg, _R0));
+
+ switch (imm) {
+ case BPF_ADD:
+ case BPF_ADD | BPF_FETCH:
+ EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
+ break;
+ case BPF_AND:
+ case BPF_AND | BPF_FETCH:
+ EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
+ break;
+ case BPF_OR:
+ case BPF_OR | BPF_FETCH:
+ EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
+ break;
+ case BPF_XOR:
+ case BPF_XOR | BPF_FETCH:
+ EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
+ break;
+ default:
+ pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ return -EOPNOTSUPP;
+ }
+
+ /* store new value */
EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
/* we're done if this succeeded */
- PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4);
+ PPC_BCC_SHORT(COND_NE, tmp_idx);
+
+ /* For the BPF_FETCH variant, get old data into src_reg */
+ if (imm & BPF_FETCH) {
+ EMIT(PPC_RAW_MR(src_reg, ax_reg));
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(src_reg_h, 0));
+ }
break;
case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */