Skip to content

Commit 2e00b29

Browse files
aspskKernel Patches Daemon
authored andcommitted
selftests/bpf: add C-level selftests for indirect jumps
Add C-level selftests for indirect jumps to validate LLVM and libbpf functionality. The tests are intentionally disabled, to be run locally by developers, but will not make the CI red. Signed-off-by: Anton Protopopov <[email protected]>
1 parent 8bf52a8 commit 2e00b29

File tree

3 files changed

+590
-1
lines changed

3 files changed

+590
-1
lines changed

tools/testing/selftests/bpf/Makefile

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -454,7 +454,9 @@ BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
454454
-I$(abspath $(OUTPUT)/../usr/include) \
455455
-std=gnu11 \
456456
-fno-strict-aliasing \
457-
-Wno-compare-distinct-pointer-types
457+
-Wno-compare-distinct-pointer-types \
458+
-Wno-initializer-overrides \
459+
#
458460
# TODO: enable me -Wsign-compare
459461

460462
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES)
Lines changed: 185 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,185 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
#include <test_progs.h>
4+
5+
#include <linux/if_ether.h>
6+
#include <linux/in.h>
7+
#include <linux/ip.h>
8+
#include <linux/ipv6.h>
9+
#include <linux/in6.h>
10+
#include <linux/udp.h>
11+
#include <linux/tcp.h>
12+
13+
#include <sys/syscall.h>
14+
#include <bpf/bpf.h>
15+
16+
#include "bpf_gotox.skel.h"
17+
18+
/* Disable tests for now, as CI runs with LLVM-20 */
19+
#if 0
20+
static void __test_run(struct bpf_program *prog, void *ctx_in, size_t ctx_size_in)
21+
{
22+
LIBBPF_OPTS(bpf_test_run_opts, topts,
23+
.ctx_in = ctx_in,
24+
.ctx_size_in = ctx_size_in,
25+
);
26+
int err, prog_fd;
27+
28+
prog_fd = bpf_program__fd(prog);
29+
err = bpf_prog_test_run_opts(prog_fd, &topts);
30+
ASSERT_OK(err, "test_run_opts err");
31+
}
32+
33+
static void check_simple(struct bpf_gotox *skel,
34+
struct bpf_program *prog,
35+
__u64 ctx_in,
36+
__u64 expected)
37+
{
38+
skel->bss->ret_user = 0;
39+
40+
__test_run(prog, &ctx_in, sizeof(ctx_in));
41+
42+
if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
43+
return;
44+
}
45+
46+
static void check_simple_fentry(struct bpf_gotox *skel,
47+
struct bpf_program *prog,
48+
__u64 ctx_in,
49+
__u64 expected)
50+
{
51+
skel->bss->in_user = ctx_in;
52+
skel->bss->ret_user = 0;
53+
54+
/* trigger */
55+
usleep(1);
56+
57+
if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
58+
return;
59+
}
60+
61+
/* validate that for two loads of the same jump table libbpf generates only one map */
62+
static void check_one_map_two_jumps(struct bpf_gotox *skel)
63+
{
64+
struct bpf_prog_info prog_info;
65+
struct bpf_map_info map_info;
66+
__u32 len;
67+
__u32 map_ids[16];
68+
int prog_fd, map_fd;
69+
int ret;
70+
int i;
71+
bool seen = false;
72+
73+
memset(&prog_info, 0, sizeof(prog_info));
74+
prog_info.map_ids = (long)map_ids;
75+
prog_info.nr_map_ids = ARRAY_SIZE(map_ids);
76+
prog_fd = bpf_program__fd(skel->progs.one_map_two_jumps);
77+
if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd(one_map_two_jumps)"))
78+
return;
79+
80+
len = sizeof(prog_info);
81+
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &len);
82+
if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(prog_fd)"))
83+
return;
84+
85+
for (i = 0; i < prog_info.nr_map_ids; i++) {
86+
map_fd = bpf_map_get_fd_by_id(map_ids[i]);
87+
if (!ASSERT_GE(map_fd, 0, "bpf_program__fd(one_map_two_jumps)"))
88+
return;
89+
90+
len = sizeof(map_info);
91+
memset(&map_info, 0, len);
92+
ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &len);
93+
if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(map_fd)")) {
94+
close(map_fd);
95+
return;
96+
}
97+
98+
if (map_info.type == BPF_MAP_TYPE_INSN_ARRAY) {
99+
if (!ASSERT_EQ(seen, false, "more than one INSN_ARRAY map")) {
100+
close(map_fd);
101+
return;
102+
}
103+
seen = true;
104+
}
105+
close(map_fd);
106+
}
107+
108+
ASSERT_EQ(seen, true, "no INSN_ARRAY map");
109+
}
110+
111+
static void check_gotox_skel(struct bpf_gotox *skel)
112+
{
113+
int i;
114+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
115+
__u64 out[] = {2, 3, 4, 5, 7, 19, 19};
116+
__u64 out2[] = {103, 104, 107, 205, 115, 1019, 1019};
117+
__u64 in3[] = {0, 11, 27, 31, 22, 45, 99};
118+
__u64 out3[] = {2, 3, 4, 5, 19, 19, 19};
119+
__u64 in4[] = {0, 1, 2, 3, 4, 5, 77};
120+
__u64 out4[] = {12, 15, 7 , 15, 12, 15, 15};
121+
122+
for (i = 0; i < ARRAY_SIZE(in); i++)
123+
check_simple(skel, skel->progs.simple_test, in[i], out[i]);
124+
125+
for (i = 0; i < ARRAY_SIZE(in); i++)
126+
check_simple(skel, skel->progs.simple_test2, in[i], out[i]);
127+
128+
for (i = 0; i < ARRAY_SIZE(in); i++)
129+
check_simple(skel, skel->progs.two_switches, in[i], out2[i]);
130+
131+
if (0) for (i = 0; i < ARRAY_SIZE(in); i++)
132+
check_simple(skel, skel->progs.big_jump_table, in3[i], out3[i]);
133+
134+
if (0) for (i = 0; i < ARRAY_SIZE(in); i++)
135+
check_simple(skel, skel->progs.one_jump_two_maps, in4[i], out4[i]);
136+
137+
for (i = 0; i < ARRAY_SIZE(in); i++)
138+
check_simple(skel, skel->progs.use_static_global1, in[i], out[i]);
139+
140+
for (i = 0; i < ARRAY_SIZE(in); i++)
141+
check_simple(skel, skel->progs.use_static_global2, in[i], out[i]);
142+
143+
for (i = 0; i < ARRAY_SIZE(in); i++)
144+
check_simple(skel, skel->progs.use_nonstatic_global1, in[i], out[i]);
145+
146+
for (i = 0; i < ARRAY_SIZE(in); i++)
147+
check_simple(skel, skel->progs.use_nonstatic_global2, in[i], out[i]);
148+
149+
bpf_program__attach(skel->progs.simple_test_other_sec);
150+
for (i = 0; i < ARRAY_SIZE(in); i++)
151+
check_simple_fentry(skel, skel->progs.simple_test_other_sec, in[i], out[i]);
152+
153+
bpf_program__attach(skel->progs.use_static_global_other_sec);
154+
for (i = 0; i < ARRAY_SIZE(in); i++)
155+
check_simple_fentry(skel, skel->progs.use_static_global_other_sec, in[i], out[i]);
156+
157+
bpf_program__attach(skel->progs.use_nonstatic_global_other_sec);
158+
for (i = 0; i < ARRAY_SIZE(in); i++)
159+
check_simple_fentry(skel, skel->progs.use_nonstatic_global_other_sec, in[i], out[i]);
160+
161+
if (0) check_one_map_two_jumps(skel);
162+
}
163+
164+
void test_bpf_gotox(void)
165+
{
166+
struct bpf_gotox *skel;
167+
int ret;
168+
169+
skel = bpf_gotox__open();
170+
if (!ASSERT_NEQ(skel, NULL, "bpf_gotox__open"))
171+
return;
172+
173+
ret = bpf_gotox__load(skel);
174+
if (!ASSERT_OK(ret, "bpf_gotox__load"))
175+
return;
176+
177+
check_gotox_skel(skel);
178+
179+
bpf_gotox__destroy(skel);
180+
}
181+
#else
182+
void test_bpf_gotox(void)
183+
{
184+
}
185+
#endif

0 commit comments

Comments
 (0)