Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sync: release 0.18.2 #2157

Merged
merged 45 commits into from
Nov 20, 2024
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
9136650
fix(modern): check `cred` field is not NULL before the access
Andreagit97 Oct 16, 2024
e214eb3
cleanup: move `sched_p_exec` tail calls
Andreagit97 Nov 6, 2024
e9a8ced
cleanup: move `sched_p_fork` tail calls
Andreagit97 Nov 6, 2024
00a6a39
cleanup: move `hotplug` logic into sys_exit
Andreagit97 Nov 6, 2024
dabbaf7
cleanup: simplify droppping logic for tracepoint
Andreagit97 Nov 6, 2024
70033a4
update: set sched_switch as UF_ALWAYS_DROP
Andreagit97 Nov 6, 2024
cc7c36c
cleanup: rename some methods
Andreagit97 Nov 6, 2024
38cceec
cleanup: don't initialize enums if not needed
Andreagit97 Nov 6, 2024
27e12c9
cleanup: remove `ctx`
Andreagit97 Nov 6, 2024
070cee3
cleanup(tests): improve test stability
Andreagit97 Nov 7, 2024
8279c1c
chore(ci): bumped perf unit tests threshold to 5% (relative) slowdown.
FedeDP Sep 16, 2024
377d260
chore(test/libsinsp_e2e,ci): port libsinsp_e2e tests to use python3.
FedeDP Sep 17, 2024
e4eea8b
new(ci): add a zig build job plus a composite action to setup zig.
FedeDP Sep 20, 2024
8b369a2
chore(ci): check linked glibc version on zig.
FedeDP Sep 23, 2024
768d1b1
chore(ci): show diff on failed format
Molter73 Oct 1, 2024
c4c124a
clean up and update ci
cpanato Oct 9, 2024
cfe3dae
chore(deps): Bump the actions group with 5 updates
dependabot[bot] Oct 9, 2024
0ade1df
chore(deps): Bump codecov/codecov-action
dependabot[bot] Oct 9, 2024
8cdf9f0
chore(deps): Bump dorny/paths-filter from 2.11.1 to 3.0.2
dependabot[bot] Oct 9, 2024
adc1630
chore(deps): Bump actions/setup-python from 4.7.1 to 5.2.0
dependabot[bot] Oct 9, 2024
406c6ad
chore(deps): Bump actions/upload-pages-artifact from 2.0.0 to 3.0.1
dependabot[bot] Oct 9, 2024
45a63b0
chore(deps): Bump peter-evans/create-pull-request from 5.0.2 to 7.0.5
dependabot[bot] Oct 9, 2024
0d1ab87
chore(deps): Bump actions/deploy-pages from 2.0.4 to 4.0.5
dependabot[bot] Oct 9, 2024
a3b7545
chore(deps): Bump actions/upload-artifact from 3.1.3 to 4.4.2
dependabot[bot] Oct 9, 2024
697c0b2
chore(deps): Bump actions/checkout from 3.6.0 to 4.2.1
dependabot[bot] Oct 10, 2024
66d9dcb
fix(ci): fix kernel testing action by using proper tag name.
FedeDP Oct 10, 2024
eaa816d
fix(ci): fixed build-scap-open-w-extern-bpf-skeleton: we do not rely …
FedeDP Oct 10, 2024
d0c4f58
chore(ci): bump actions/download-artifact to latest release (v4.1.8).
FedeDP Oct 10, 2024
8d752c8
chore(deps): Bump the actions group with 2 updates
dependabot[bot] Oct 14, 2024
0fb3d13
new(ci): run latest-kernel CI against arm64 too.
FedeDP Oct 16, 2024
aa6951e
fix(ci): fixed latest-kernel CI usage of steps/jobs outputs.
FedeDP Oct 17, 2024
f101f1e
fix(ci): fixed create-comment-kernel-testing workflow when kernel-tes…
FedeDP Oct 17, 2024
12d3a8b
fix(ci): fixed create-comment-perf workflow when perf CI does not run.
FedeDP Oct 17, 2024
e9168ca
fix(ci): use `process.exit();` to leave node script in github/action-…
FedeDP Oct 21, 2024
9387467
fix(ci): create-comment workflows array length check.
FedeDP Oct 21, 2024
a152203
chore(deps): Bump uraimo/run-on-arch-action in the actions group
dependabot[bot] Oct 21, 2024
1a50aba
chore(ci): keep zig development version alive by using actions/cache.
FedeDP Sep 20, 2024
ef5a56d
chore(ci): bump the zig version to latest.
FedeDP Sep 23, 2024
8a2665b
chore(ci): bump zig version and improve install-zig action.
FedeDP Oct 22, 2024
3ff318d
fix(ci): fixed create-comment-kernel-testing typo.
FedeDP Oct 22, 2024
d0c5297
chore(ci): increase timeout to 5minutes for driverkit build in latest…
FedeDP Oct 22, 2024
2a67310
update(ci): use cncf provided self hosted runners for arm64.
FedeDP Sep 13, 2024
70f0538
chore(deps): Bump the actions group with 3 updates
dependabot[bot] Oct 29, 2024
8e0c741
chore(deps): Bump softprops/action-gh-release from 1 to 2
dependabot[bot] Oct 9, 2024
89cb0d3
chore(deps): Bump softprops/action-gh-release in the actions group
dependabot[bot] Nov 11, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion driver/bpf/probe.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ BPF_PROBE("sched/", sched_switch, sched_switch_args) {

evt_type = PPME_SCHEDSWITCH_6_E;

call_filler(ctx, ctx, evt_type, 0, -1);
call_filler(ctx, ctx, evt_type, UF_ALWAYS_DROP, -1);
return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion driver/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2393,7 +2393,7 @@ TRACEPOINT_PROBE(sched_switch_probe,
* handler calling printk() and potentially deadlocking the system.
*/
record_event_all_consumers(PPME_SCHEDSWITCH_6_E,
UF_USED | UF_ATOMIC,
UF_ALWAYS_DROP | UF_ATOMIC,
&event_data,
KMOD_PROG_SCHED_SWITCH);
}
Expand Down
5 changes: 0 additions & 5 deletions driver/modern_bpf/helpers/base/maps_getters.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,6 @@ static __always_inline uint8_t maps__64bit_sampling_syscall_table(uint32_t sysca
return g_64bit_sampling_syscall_table[syscall_id & (SYSCALL_TABLE_SIZE - 1)];
}

static __always_inline uint8_t maps__64bit_sampling_tracepoint_table(uint32_t event_id) {
return g_64bit_sampling_tracepoint_table[event_id < PPM_EVENT_MAX ? event_id
: PPM_EVENT_MAX - 1];
}

/*=============================== SAMPLING TABLES ===========================*/

/*=============================== SYSCALL-64 INTERESTING TABLE ===========================*/
Expand Down
20 changes: 10 additions & 10 deletions driver/modern_bpf/helpers/extract/extract_from_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -374,15 +374,15 @@ static __always_inline uint64_t extract__capability(struct task_struct *task,

switch(capability_type) {
case CAP_INHERITABLE:
READ_TASK_FIELD_INTO(&cap_struct, task, cred, cap_inheritable);
BPF_CORE_READ_INTO(&cap_struct, task, cred, cap_inheritable);
break;

case CAP_PERMITTED:
READ_TASK_FIELD_INTO(&cap_struct, task, cred, cap_permitted);
BPF_CORE_READ_INTO(&cap_struct, task, cred, cap_permitted);
break;

case CAP_EFFECTIVE:
READ_TASK_FIELD_INTO(&cap_struct, task, cred, cap_effective);
BPF_CORE_READ_INTO(&cap_struct, task, cred, cap_effective);
break;

default:
Expand Down Expand Up @@ -729,7 +729,7 @@ static __always_inline unsigned long extract__clone_flags(struct task_struct *ta
*/
static __always_inline void extract__euid(struct task_struct *task, uint32_t *euid) {
*euid = UINT32_MAX;
READ_TASK_FIELD_INTO(euid, task, cred, euid.val);
BPF_CORE_READ_INTO(euid, task, cred, euid.val);
}

/**
Expand All @@ -739,7 +739,7 @@ static __always_inline void extract__euid(struct task_struct *task, uint32_t *eu
* @param egid return value by reference
*/
static __always_inline void extract__egid(struct task_struct *task, uint32_t *egid) {
READ_TASK_FIELD_INTO(egid, task, cred, egid.val);
BPF_CORE_READ_INTO(egid, task, cred, egid.val);
}

/////////////////////////
Expand Down Expand Up @@ -885,7 +885,7 @@ static __always_inline uint32_t bpf_map_id_up(struct uid_gid_map *map, uint32_t

static __always_inline bool groups_search(struct task_struct *task, uint32_t grp) {
struct group_info *group_info = NULL;
READ_TASK_FIELD_INTO(&group_info, task, cred, group_info);
BPF_CORE_READ_INTO(&group_info, task, cred, group_info);
if(!group_info) {
return false;
}
Expand Down Expand Up @@ -934,8 +934,8 @@ static __always_inline bool extract__exe_writable(struct task_struct *task, stru

uint32_t fsuid;
uint32_t fsgid;
READ_TASK_FIELD_INTO(&fsuid, task, cred, fsuid.val);
READ_TASK_FIELD_INTO(&fsgid, task, cred, fsgid.val);
BPF_CORE_READ_INTO(&fsuid, task, cred, fsuid.val);
BPF_CORE_READ_INTO(&fsgid, task, cred, fsgid.val);

/* HAS_UNMAPPED_ID() */
if(i_uid == -1 || i_gid == -1) {
Expand Down Expand Up @@ -978,15 +978,15 @@ static __always_inline bool extract__exe_writable(struct task_struct *task, stru
}

struct user_namespace *ns;
READ_TASK_FIELD_INTO(&ns, task, cred, user_ns);
BPF_CORE_READ_INTO(&ns, task, cred, user_ns);
if(ns == NULL) {
return false;
}
bool kuid_mapped = bpf_map_id_up(&ns->uid_map, i_uid) != (uint32_t)-1;
bool kgid_mapped = bpf_map_id_up(&ns->gid_map, i_gid) != (uint32_t)-1;

kernel_cap_t cap_struct = {0};
READ_TASK_FIELD_INTO(&cap_struct, task, cred, cap_effective);
BPF_CORE_READ_INTO(&cap_struct, task, cred, cap_effective);
// Kernel 6.3 changed the kernel_cap_struct type from uint32_t[2] to uint64_t.
// Luckily enough, it also changed field name from cap to val.
if(bpf_core_field_exists(((struct kernel_cap_struct *)0)->cap)) {
Expand Down
23 changes: 4 additions & 19 deletions driver/modern_bpf/helpers/interfaces/attached_programs.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,11 @@

#include <helpers/base/maps_getters.h>

/* This enum is used to tell if we are considering a syscall or a tracepoint */
enum intrumentation_type {
MODERN_BPF_SYSCALL = 0,
MODERN_BPF_TRACEPOINT = 1,
};

/* The sampling logic is used by all BPF programs attached to the kernel.
* We treat the syscalls tracepoints in a dedicated way because they could generate
* more than one event (1 for each syscall) for this reason we need a dedicated table.
*/
static __always_inline bool sampling_logic(void* ctx, uint32_t id, enum intrumentation_type type) {
static __always_inline bool sampling_logic(void* ctx, uint32_t id) {
/* If dropping mode is not enabled we don't perform any sampling
* false: means don't drop the syscall
* true: means drop the syscall
Expand All @@ -29,16 +23,7 @@ static __always_inline bool sampling_logic(void* ctx, uint32_t id, enum intrumen
return false;
}

uint8_t sampling_flag = 0;

/* If we have a syscall we use the sampling_syscall_table otherwise
* with tracepoints we use the sampling_tracepoint_table.
*/
if(type == MODERN_BPF_SYSCALL) {
sampling_flag = maps__64bit_sampling_syscall_table(id);
} else {
sampling_flag = maps__64bit_sampling_tracepoint_table(id);
}
uint8_t sampling_flag = maps__64bit_sampling_syscall_table(id);

if(sampling_flag == UF_NEVER_DROP) {
return false;
Expand All @@ -59,15 +44,15 @@ static __always_inline bool sampling_logic(void* ctx, uint32_t id, enum intrumen
* an iteration we will synchronize again the next time the logic is enabled.
*/
maps__set_is_dropping(true);
bpf_tail_call(ctx, &extra_event_prog_tail_table, T1_DROP_E);
bpf_tail_call(ctx, &extra_syscall_calls, T1_DROP_E);
bpf_printk("unable to tail call into 'drop_e' prog");
}
return true;
}

if(maps__get_is_dropping()) {
maps__set_is_dropping(false);
bpf_tail_call(ctx, &extra_event_prog_tail_table, T1_DROP_X);
bpf_tail_call(ctx, &extra_syscall_calls, T1_DROP_X);
bpf_printk("unable to tail call into 'drop_x' prog");
}

Expand Down
7 changes: 3 additions & 4 deletions driver/modern_bpf/helpers/store/auxmap_store_params.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,13 +117,12 @@ static __always_inline void auxmap__finalize_event_header(struct auxiliary_map *
* of events sent to userspace, otherwise we increment the dropped events.
*
* @param auxmap pointer to the auxmap in which we have already written the entire event.
* @param ctx BPF prog context
*/
static __always_inline void auxmap__submit_event(struct auxiliary_map *auxmap, void *ctx) {
static __always_inline void auxmap__submit_event(struct auxiliary_map *auxmap) {
struct ringbuf_map *rb = maps__get_ringbuf_map();
if(!rb) {
bpf_tail_call(ctx, &extra_event_prog_tail_table, T1_HOTPLUG_E);
bpf_printk("failed to tail call into the 'hotplug' prog");
// this should never happen because we check it in sys_enter/sys_exit
bpf_printk("FAILURE: unable to obtain the ring buffer");
return;
}

Expand Down
6 changes: 2 additions & 4 deletions driver/modern_bpf/helpers/store/ringbuf_store_params.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,18 +90,16 @@ struct ringbuf_struct {
* to know the event dimension at compile time.
*
* @param ringbuf pointer to the `ringbuf_struct`
* @param ctx BPF prog context
* @param event_size exact size of the fixed-size event
* @return `1` in case of success, `0` in case of failure.
*/
static __always_inline uint32_t ringbuf__reserve_space(struct ringbuf_struct *ringbuf,
void *ctx,
uint32_t event_size,
uint16_t event_type) {
struct ringbuf_map *rb = maps__get_ringbuf_map();
if(!rb) {
bpf_tail_call(ctx, &extra_event_prog_tail_table, T1_HOTPLUG_E);
bpf_printk("failed to tail call into the 'hotplug' prog");
// this should never happen because we check it in sys_enter/sys_exit
bpf_printk("FAILURE: unable to obtain the ring buffer");
return 0;
}

Expand Down
13 changes: 2 additions & 11 deletions driver/modern_bpf/maps/maps.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,15 +65,6 @@ __weak bool g_64bit_interesting_syscalls_table[SYSCALL_TABLE_SIZE];
*/
__weak uint8_t g_64bit_sampling_syscall_table[SYSCALL_TABLE_SIZE];

/**
* @brief Given the tracepoint enum returns:
* - `UF_NEVER_DROP` if the syscall must not be dropped in the sampling logic.
* - `UF_ALWAYS_DROP` if the syscall must always be dropped in the sampling logic.
* - `UF_NONE` if we drop the syscall depends on the sampling ratio.
*/
/// TOOD: we need to change the dimension! we need to create a dedicated enum for tracepoints!
__weak uint8_t g_64bit_sampling_tracepoint_table[PPM_EVENT_MAX];

/**
* @brief Given the syscall id on 32-bit x86 arch returns
* its x64 value. Used to support ia32 syscall emulation.
Expand Down Expand Up @@ -131,15 +122,15 @@ struct {
* programs directly attached in the kernel (like page_faults,
* context_switch, ...) and by syscall_events (like
* ppme_syscall_execveat_x, ...).
* Given a predefined tail-code (`extra_event_prog_code`), it calls
* Given a predefined tail-code (`extra_syscall_codes`), it calls
* the right bpf program.
*/
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, TAIL_EXTRA_EVENT_PROG_MAX);
__type(key, uint32_t);
__type(value, uint32_t);
} extra_event_prog_tail_table __weak SEC(".maps");
} extra_syscall_calls __weak SEC(".maps");

/*=============================== BPF_MAP_TYPE_PROG_ARRAY ===============================*/

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ int BPF_PROG(sys_enter, struct pt_regs *regs, long syscall_id) {
return 0;
}

if(sampling_logic(ctx, syscall_id, MODERN_BPF_SYSCALL)) {
if(sampling_logic(ctx, syscall_id)) {
return 0;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,24 @@ int BPF_PROG(sys_exit, struct pt_regs *regs, long ret) {
return 0;
}

if(sampling_logic(ctx, syscall_id, MODERN_BPF_SYSCALL)) {
if(sampling_logic(ctx, syscall_id)) {
return 0;
}

if(maps__get_drop_failed() && ret < 0) {
return 0;
}

// If we cannot find a ring buffer for this CPU we probably have an hotplug event. It's ok to
// check only in the exit path since we will always have at least one exit syscall enabled. If
// we change our architecture we may need to update this logic.
struct ringbuf_map *rb = maps__get_ringbuf_map();
if(!rb) {
bpf_tail_call(ctx, &extra_syscall_calls, T1_HOTPLUG_E);
bpf_printk("failed to tail call into the 'hotplug' prog");
return 0;
}

bpf_tail_call(ctx, &syscall_exit_tail_table, syscall_id);

return 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,13 @@
#ifdef CAPTURE_PAGE_FAULTS
SEC("tp_btf/page_fault_kernel")
int BPF_PROG(pf_kernel, unsigned long address, struct pt_regs *regs, unsigned long error_code) {
if(sampling_logic(ctx, PPME_PAGE_FAULT_E, MODERN_BPF_TRACEPOINT)) {
// In case of dropping mode we don't want this kind of events.
if(maps__get_dropping_mode()) {
return 0;
}

struct ringbuf_struct ringbuf;
if(!ringbuf__reserve_space(&ringbuf, ctx, PAGE_FAULT_SIZE, PPME_PAGE_FAULT_E)) {
if(!ringbuf__reserve_space(&ringbuf, PAGE_FAULT_SIZE, PPME_PAGE_FAULT_E)) {
return 0;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,13 @@
#ifdef CAPTURE_PAGE_FAULTS
SEC("tp_btf/page_fault_user")
int BPF_PROG(pf_user, unsigned long address, struct pt_regs *regs, unsigned long error_code) {
if(sampling_logic(ctx, PPME_PAGE_FAULT_E, MODERN_BPF_TRACEPOINT)) {
// In case of dropping mode we don't want this kind of events.
if(maps__get_dropping_mode()) {
return 0;
}

struct ringbuf_struct ringbuf;
if(!ringbuf__reserve_space(&ringbuf, ctx, PAGE_FAULT_SIZE, PPME_PAGE_FAULT_E)) {
if(!ringbuf__reserve_space(&ringbuf, PAGE_FAULT_SIZE, PPME_PAGE_FAULT_E)) {
return 0;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,37 @@
* struct linux_binprm *bprm)
*/
#ifdef CAPTURE_SCHED_PROC_EXEC

enum extra_sched_proc_exec_codes {
T1_SCHED_PROC_EXEC,
T2_SCHED_PROC_EXEC,
// add more codes here.
T_SCHED_PROC_EXEC_MAX,
};

/*
* FORWARD DECLARATIONS:
* See the `BPF_PROG` macro in libbpf `libbpf/src/bpf_tracing.h`
* #define BPF_PROG(name, args...) \
* name(unsigned long long *ctx); \
*/
int t1_sched_p_exec(unsigned long long *ctx);
int t2_sched_p_exec(unsigned long long *ctx);

struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, T_SCHED_PROC_EXEC_MAX);
__uint(key_size, sizeof(__u32));
__array(values, int(void *));
} extra_sched_proc_exec_calls SEC(".maps") = {
.values =
{
[T1_SCHED_PROC_EXEC] = (void *)&t1_sched_p_exec,
[T2_SCHED_PROC_EXEC] = (void *)&t2_sched_p_exec,
// add more tail calls here.
},
};

/* chose a short name for bpftool debugging*/
SEC("tp_btf/sched_process_exec")
int BPF_PROG(sched_p_exec, struct task_struct *p, pid_t old_pid, struct linux_binprm *bprm) {
Expand Down Expand Up @@ -114,7 +145,7 @@ int BPF_PROG(sched_p_exec, struct task_struct *p, pid_t old_pid, struct linux_bi

/*=============================== COLLECT PARAMETERS ===========================*/

bpf_tail_call(ctx, &extra_event_prog_tail_table, T1_SCHED_PROC_EXEC);
bpf_tail_call(ctx, &extra_sched_proc_exec_calls, T1_SCHED_PROC_EXEC);
return 0;
}

Expand Down Expand Up @@ -234,11 +265,11 @@ int BPF_PROG(t1_sched_p_exec, struct task_struct *p, pid_t old_pid, struct linux

/*=============================== COLLECT PARAMETERS ===========================*/

bpf_tail_call(ctx, &extra_event_prog_tail_table, T2_SCHED_PROC_EXEC);
bpf_tail_call(ctx, &extra_sched_proc_exec_calls, T2_SCHED_PROC_EXEC);
return 0;
}

SEC("tp_btf/sys_exit")
SEC("tp_btf/sched_process_exec")
int BPF_PROG(t2_sched_p_exec, struct pt_regs *regs, long ret) {
struct auxiliary_map *auxmap = auxmap__get();
if(!auxmap) {
Expand All @@ -261,7 +292,7 @@ int BPF_PROG(t2_sched_p_exec, struct pt_regs *regs, long ret) {

auxmap__finalize_event_header(auxmap);

auxmap__submit_event(auxmap, ctx);
auxmap__submit_event(auxmap);
return 0;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ int BPF_PROG(sched_proc_exit, struct task_struct *task) {

auxmap__finalize_event_header(auxmap);

auxmap__submit_event(auxmap, ctx);
auxmap__submit_event(auxmap);

return 0;
}
Loading
Loading