summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests
diff options
context:
space:
mode:
authorEduard Zingerman <eddyz87@gmail.com>2022-12-09 15:57:33 +0200
committerAlexei Starovoitov <ast@kernel.org>2022-12-10 13:36:22 -0800
commitefd6286ff74a2fa2b45ed070d344cc0822b8ea6e (patch)
treede4c47b1ac47deceb055aef7fbab79f30dabebc0 /tools/testing/selftests
parent2026f2062df860e5d282ffd4962ea5d5ed53dc51 (diff)
downloadlinux-efd6286ff74a2fa2b45ed070d344cc0822b8ea6e.tar.bz2
selftests/bpf: test case for relaxed prunning of active_lock.id
Check that verifier.c:states_equal() uses check_ids() to match consistent active_lock/map_value configurations. This allows to prune states with active spin locks even if numerical values of active_lock ids do not match across compared states. Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20221209135733.28851-8-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests')
-rw-r--r--tools/testing/selftests/bpf/verifier/spin_lock.c75
1 files changed, 75 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/verifier/spin_lock.c b/tools/testing/selftests/bpf/verifier/spin_lock.c
index 0a8dcfc37fc6..eaf114f07e2e 100644
--- a/tools/testing/selftests/bpf/verifier/spin_lock.c
+++ b/tools/testing/selftests/bpf/verifier/spin_lock.c
@@ -370,3 +370,78 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = BPF_F_TEST_STATE_FREQ,
},
+/* Make sure that regsafe() compares ids for spin lock records using
+ * check_ids():
+ * 1: r9 = map_lookup_elem(...) ; r9.id == 1
+ * 2: r8 = map_lookup_elem(...) ; r8.id == 2
+ * 3: r7 = ktime_get_ns()
+ * 4: r6 = ktime_get_ns()
+ * 5: if r6 > r7 goto <9>
+ * 6: spin_lock(r8)
+ * 7: r9 = r8
+ * 8: goto <10>
+ * 9: spin_lock(r9)
+ * 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active,
+ * ; second visit to (10) should be considered safe
+ * ; if check_ids() is used.
+ * 11: exit(0)
+ */
+{
+ "spin_lock: regsafe() check_ids() similar id mappings",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ /* r9 = map_lookup_elem(...) */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 24),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
+ /* r8 = map_lookup_elem(...) */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 18),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ /* r7 = ktime_get_ns() */
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ /* r6 = ktime_get_ns() */
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* if r6 > r7 goto +5 ; no new information about the state is derived from
+ * ; this check, thus produced verifier states differ
+ * ; only in 'insn_idx'
+ * spin_lock(r8)
+ * r9 = r8
+ * goto unlock
+ */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
+ BPF_JMP_A(3),
+ /* spin_lock(r9) */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_EMIT_CALL(BPF_FUNC_spin_lock),
+ /* spin_unlock(r9) */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_EMIT_CALL(BPF_FUNC_spin_unlock),
+ /* exit(0) */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3, 10 },
+ .result = VERBOSE_ACCEPT,
+ .errstr = "28: safe",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = BPF_F_TEST_STATE_FREQ,
+},