Skip to content

Commit

Permalink
selftests/bpf: validate __xlated same way as __jited
Browse files Browse the repository at this point in the history
Both __xlated and __jited work with disassembly.
It is logical to have both work in a similar manner.

This commit updates __xlated macro handling in test_loader.c by making
it expect matches on sequential lines, same way as __jited operates.
For example:

    __xlated("1: *(u64 *)(r10 -16) = r1")      ;; matched on line N
    __xlated("3: r0 = &(void __percpu *)(r0)") ;; matched on line N+1

Also:

    __xlated("1: *(u64 *)(r10 -16) = r1")      ;; matched on line N
    __xlated("...")                            ;; not matched
    __xlated("3: r0 = &(void __percpu *)(r0)") ;; mantched on any
                                               ;; line >= N

Signed-off-by: Eduard Zingerman <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
  • Loading branch information
eddyz87 authored and Alexei Starovoitov committed Aug 21, 2024
1 parent e5bdd6a commit a038eac
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 4 deletions.
53 changes: 51 additions & 2 deletions tools/testing/selftests/bpf/progs/verifier_nocsr.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ __naked void canary_arm64_riscv64(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("3: exit")
__success
__naked void canary_zero_spills(void)
Expand All @@ -94,7 +95,9 @@ SEC("raw_tp")
__arch_x86_64
__log_level(4) __msg("stack depth 16")
__xlated("1: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r2 = *(u64 *)(r10 -16)")
__success
__naked void wrong_reg_in_pattern1(void)
Expand All @@ -113,7 +116,9 @@ __naked void wrong_reg_in_pattern1(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r6")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r6 = *(u64 *)(r10 -16)")
__success
__naked void wrong_reg_in_pattern2(void)
Expand All @@ -132,7 +137,9 @@ __naked void wrong_reg_in_pattern2(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r0")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r0 = *(u64 *)(r10 -16)")
__success
__naked void wrong_reg_in_pattern3(void)
Expand All @@ -151,7 +158,9 @@ __naked void wrong_reg_in_pattern3(void)
SEC("raw_tp")
__arch_x86_64
__xlated("2: *(u64 *)(r2 -16) = r1")
__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("6: r1 = *(u64 *)(r10 -16)")
__success
__naked void wrong_base_in_pattern(void)
Expand All @@ -171,7 +180,9 @@ __naked void wrong_base_in_pattern(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r2 = 1")
__success
__naked void wrong_insn_in_pattern(void)
Expand All @@ -191,7 +202,9 @@ __naked void wrong_insn_in_pattern(void)
SEC("raw_tp")
__arch_x86_64
__xlated("2: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("6: r1 = *(u64 *)(r10 -8)")
__success
__naked void wrong_off_in_pattern1(void)
Expand All @@ -211,7 +224,9 @@ __naked void wrong_off_in_pattern1(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u32 *)(r10 -4) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u32 *)(r10 -4)")
__success
__naked void wrong_off_in_pattern2(void)
Expand All @@ -230,7 +245,9 @@ __naked void wrong_off_in_pattern2(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u32 *)(r10 -16) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u32 *)(r10 -16)")
__success
__naked void wrong_size_in_pattern(void)
Expand All @@ -249,7 +266,9 @@ __naked void wrong_size_in_pattern(void)
SEC("raw_tp")
__arch_x86_64
__xlated("2: *(u32 *)(r10 -8) = r1")
__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("6: r1 = *(u32 *)(r10 -8)")
__success
__naked void partial_pattern(void)
Expand All @@ -275,11 +294,15 @@ __xlated("1: r2 = 2")
/* not patched, spills for -8, -16 not removed */
__xlated("2: *(u64 *)(r10 -8) = r1")
__xlated("3: *(u64 *)(r10 -16) = r2")
__xlated("...")
__xlated("5: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("7: r2 = *(u64 *)(r10 -16)")
__xlated("8: r1 = *(u64 *)(r10 -8)")
/* patched, spills for -24, -32 removed */
__xlated("...")
__xlated("10: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("12: exit")
__success
__naked void min_stack_offset(void)
Expand Down Expand Up @@ -308,7 +331,9 @@ __naked void min_stack_offset(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
__success
__naked void bad_fixed_read(void)
Expand All @@ -330,7 +355,9 @@ __naked void bad_fixed_read(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
__success
__naked void bad_fixed_write(void)
Expand All @@ -352,7 +379,9 @@ __naked void bad_fixed_write(void)
SEC("raw_tp")
__arch_x86_64
__xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("10: r1 = *(u64 *)(r10 -16)")
__success
__naked void bad_varying_read(void)
Expand All @@ -379,7 +408,9 @@ __naked void bad_varying_read(void)
SEC("raw_tp")
__arch_x86_64
__xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("10: r1 = *(u64 *)(r10 -16)")
__success
__naked void bad_varying_write(void)
Expand All @@ -406,7 +437,9 @@ __naked void bad_varying_write(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
__success
__naked void bad_write_in_subprog(void)
Expand Down Expand Up @@ -438,7 +471,9 @@ __naked static void bad_write_in_subprog_aux(void)
SEC("raw_tp")
__arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
__success
__naked void bad_helper_write(void)
Expand Down Expand Up @@ -466,13 +501,19 @@ SEC("raw_tp")
__arch_x86_64
/* main, not patched */
__xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)")
__xlated("...")
__xlated("9: call pc+1")
__xlated("...")
__xlated("10: exit")
/* subprogram, patched */
__xlated("11: r1 = 1")
__xlated("...")
__xlated("13: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("15: exit")
__success
__naked void invalidate_one_subprog(void)
Expand Down Expand Up @@ -510,12 +551,16 @@ SEC("raw_tp")
__arch_x86_64
/* main */
__xlated("0: r1 = 1")
__xlated("...")
__xlated("2: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("4: call pc+1")
__xlated("5: exit")
/* subprogram */
__xlated("6: r1 = 1")
__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("10: *(u64 *)(r10 -16) = r1")
__xlated("11: exit")
__success
Expand Down Expand Up @@ -576,7 +621,9 @@ __log_level(4) __msg("stack depth 16")
/* may_goto counter at -16 */
__xlated("0: *(u64 *)(r10 -16) =")
__xlated("1: r1 = 1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
/* may_goto expansion starts */
__xlated("5: r11 = *(u64 *)(r10 -16)")
__xlated("6: if r11 == 0x0 goto pc+3")
Expand Down Expand Up @@ -623,13 +670,15 @@ __xlated("5: r0 = *(u32 *)(r0 +0)")
__xlated("6: r2 =")
__xlated("7: r3 = 0")
__xlated("8: r4 = 0")
__xlated("...")
/* ... part of the inlined bpf_loop */
__xlated("12: *(u64 *)(r10 -32) = r6")
__xlated("13: *(u64 *)(r10 -24) = r7")
__xlated("14: *(u64 *)(r10 -16) = r8")
/* ... */
__xlated("...")
__xlated("21: call pc+8") /* dummy_loop_callback */
/* ... last insns of the bpf_loop_interaction1 */
__xlated("...")
__xlated("28: r0 = 0")
__xlated("29: exit")
/* dummy_loop_callback */
Expand Down Expand Up @@ -670,7 +719,7 @@ __xlated("5: r0 = *(u32 *)(r0 +0)")
__xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("7: call")
__xlated("8: r1 = *(u64 *)(r10 -16)")
/* ... */
__xlated("...")
/* ... part of the inlined bpf_loop */
__xlated("15: *(u64 *)(r10 -40) = r6")
__xlated("16: *(u64 *)(r10 -32) = r7")
Expand Down
8 changes: 6 additions & 2 deletions tools/testing/selftests/bpf/test_loader.c
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,8 @@ static int parse_test_spec(struct test_loader *tester,
const char *description = NULL;
bool has_unpriv_result = false;
bool has_unpriv_retval = false;
bool unpriv_xlated_on_next_line = true;
bool xlated_on_next_line = true;
bool unpriv_jit_on_next_line;
bool jit_on_next_line;
bool collect_jit = false;
Expand Down Expand Up @@ -461,12 +463,14 @@ static int parse_test_spec(struct test_loader *tester,
spec->mode_mask |= UNPRIV;
}
} else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) {
err = push_msg(msg, &spec->priv.expect_xlated);
err = push_disasm_msg(msg, &xlated_on_next_line,
&spec->priv.expect_xlated);
if (err)
goto cleanup;
spec->mode_mask |= PRIV;
} else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) {
err = push_msg(msg, &spec->unpriv.expect_xlated);
err = push_disasm_msg(msg, &unpriv_xlated_on_next_line,
&spec->unpriv.expect_xlated);
if (err)
goto cleanup;
spec->mode_mask |= UNPRIV;
Expand Down

0 comments on commit a038eac

Please sign in to comment.