From 141e298cf82a06cceaff9952a1d43cb5ef7c9d86 Mon Sep 17 00:00:00 2001 From: gray Date: Fri, 8 Nov 2024 17:51:16 +0800 Subject: [PATCH 1/2] draft Signed-off-by: gray --- bpf/kprobe_pwru.c | 26 +++++++++++++++++++++++++- internal/pwru/config.go | 4 ++++ internal/pwru/kprobe.go | 16 ++++++++++++++-- internal/pwru/types.go | 2 ++ internal/pwru/utils.go | 14 +++++++++----- main.go | 4 ++-- 6 files changed, 56 insertions(+), 10 deletions(-) diff --git a/bpf/kprobe_pwru.c b/bpf/kprobe_pwru.c index c91d0a87..3e9ec293 100644 --- a/bpf/kprobe_pwru.c +++ b/bpf/kprobe_pwru.c @@ -82,6 +82,7 @@ struct event_t { u64 ts; u64 print_skb_id; u64 print_shinfo_id; + //u64 print_bpf_map_id; struct skb_meta meta; struct tuple tuple; s64 print_stack_id; @@ -143,7 +144,8 @@ struct config { u8 output_shinfo: 1; u8 output_stack: 1; u8 output_caller: 1; - u8 output_unused: 2; + u8 output_bpf_map: 1; + u8 output_unused: 1; u8 is_set: 1; u8 track_skb: 1; u8 track_skb_by_stackid: 1; @@ -390,6 +392,21 @@ set_shinfo_btf(struct sk_buff *skb, u64 *event_id) { bpf_map_update_elem(&print_shinfo_map, event_id, &v, BPF_ANY); } +static __always_inline void +set_bpf_map(struct pt_regs *ctx, u64 cookie, u64 *event_id) { + if (cookie == 1) + bpf_printk("bpf_map_lookup/delete"); + else if (cookie == 2) + bpf_printk("bpf_map_update"); + + struct bpf_map *map = (struct bpf_map *)PT_REGS_PARM1(ctx); + + char name[16]; + BPF_CORE_READ_STR_INTO(&name, map, name); + bpf_printk(" name=%s key_size=%ld value_size=%ld\n", &name, BPF_CORE_READ(map, key_size), BPF_CORE_READ(map, value_size)); + // TODO@gray: print/collect key and value hex +} + static __always_inline u64 get_tracing_fp(void) { @@ -521,6 +538,13 @@ kprobe_skb(struct sk_buff *skb, struct pt_regs *ctx, const bool has_get_func_ip, if (CFG.output_caller) bpf_probe_read_kernel(&event.caller_addr, sizeof(event.caller_addr), (void *)PT_REGS_SP(ctx)); + if (CFG.output_bpf_map) { + // TODO@gray: kernel>=5.15 + __u64 cookie = bpf_get_attach_cookie(ctx); + if (cookie) + set_bpf_map(ctx, cookie, NULL); + } + bpf_map_push_elem(&events, &event, BPF_EXIST); return BPF_OK; diff --git a/internal/pwru/config.go b/internal/pwru/config.go index 1f7d8b23..6ada5fd6 100644 --- a/internal/pwru/config.go +++ b/internal/pwru/config.go @@ -22,6 +22,7 @@ const ( OutputShinfoMask OutputStackMask OutputCallerMask + OutputBpfMapMask ) const ( @@ -69,6 +70,9 @@ func GetConfig(flags *Flags) (cfg FilterCfg, err error) { if flags.OutputCaller { cfg.OutputFlags |= OutputCallerMask } + if flags.OutputBpfMap { + cfg.OutputFlags |= OutputBpfMapMask + } if flags.FilterTrackSkb { cfg.FilterFlags |= TrackSkbMask } diff --git a/internal/pwru/kprobe.go b/internal/pwru/kprobe.go index 3fbc2a54..fb4a0beb 100644 --- a/internal/pwru/kprobe.go +++ b/internal/pwru/kprobe.go @@ -16,6 +16,7 @@ import ( "github.com/cheggaaa/pb/v3" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/link" "golang.org/x/sync/errgroup" ) @@ -247,7 +248,7 @@ func NewKprober(ctx context.Context, funcs Funcs, coll *ebpf.Collection, a2n Add return &k } -func NewNonSkbFuncsKprober(nonSkbFuncs []string, funcs Funcs, coll *ebpf.Collection) *kprober { +func NewNonSkbFuncsKprober(nonSkbFuncs []string, funcs Funcs, bpfMapFuncs map[string]*btf.FuncProto, coll *ebpf.Collection) *kprober { slices.Sort(nonSkbFuncs) nonSkbFuncs = slices.Compact(nonSkbFuncs) @@ -264,7 +265,18 @@ func NewNonSkbFuncsKprober(nonSkbFuncs []string, funcs Funcs, coll *ebpf.Collect continue } - kp, err := link.Kprobe(fn, coll.Programs["kprobe_skb_by_stackid"], nil) + var cookie uint64 + if proto, ok := bpfMapFuncs[fn]; ok { + cookie = 1 + for _, p := range proto.Params { + if p.Name == "value" { + cookie = 2 + } + } + } + + opts := &link.KprobeOptions{Cookie: cookie} + kp, err := link.Kprobe(fn, coll.Programs["kprobe_skb_by_stackid"], opts) if err != nil { if errors.Is(err, os.ErrNotExist) { continue diff --git a/internal/pwru/types.go b/internal/pwru/types.go index b6276487..f238789b 100644 --- a/internal/pwru/types.go +++ b/internal/pwru/types.go @@ -45,6 +45,7 @@ type Flags struct { OutputShinfo bool OutputStack bool OutputCaller bool + OutputBpfMap bool OutputLimitLines uint64 OutputFile string OutputJson bool @@ -81,6 +82,7 @@ func (f *Flags) SetFlags() { flag.BoolVar(&f.OutputShinfo, "output-skb-shared-info", false, "print skb shared info") flag.BoolVar(&f.OutputStack, "output-stack", false, "print stack") flag.BoolVar(&f.OutputCaller, "output-caller", false, "print caller function name") + flag.BoolVar(&f.OutputBpfMap, "output-bpf-map", false, "print bpf helper arguments related to bpf maps") flag.Uint64Var(&f.OutputLimitLines, "output-limit-lines", 0, "exit the program after the number of events has been received/printed") flag.StringVar(&f.OutputFile, "output-file", "", "write traces to file") diff --git a/internal/pwru/utils.go b/internal/pwru/utils.go index eacae941..7ed2e29a 100644 --- a/internal/pwru/utils.go +++ b/internal/pwru/utils.go @@ -42,8 +42,9 @@ func getAvailableFilterFunctions() (map[string]struct{}, error) { return availableFuncs, nil } -func GetFuncs(pattern string, spec *btf.Spec, kmods []string, kprobeMulti bool) (Funcs, error) { +func GetFuncs(pattern string, spec *btf.Spec, kmods []string, kprobeMulti, bpfMap bool) (Funcs, map[string]*btf.FuncProto, error) { funcs := Funcs{} + bpfMapFuncs := make(map[string]*btf.FuncProto) type iterator struct { kmod string @@ -52,7 +53,7 @@ func GetFuncs(pattern string, spec *btf.Spec, kmods []string, kprobeMulti bool) reg, err := regexp.Compile(pattern) if err != nil { - return nil, fmt.Errorf("failed to compile regular expression %v", err) + return nil, nil, fmt.Errorf("failed to compile regular expression %v", err) } var availableFuncs map[string]struct{} @@ -68,13 +69,13 @@ func GetFuncs(pattern string, spec *btf.Spec, kmods []string, kprobeMulti bool) path := filepath.Join("/sys/kernel/btf", module) f, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("failed to open %s: %v", path, err) + return nil, nil, fmt.Errorf("failed to open %s: %v", path, err) } defer f.Close() modSpec, err := btf.LoadSplitSpecFromReader(f, spec) if err != nil { - return nil, fmt.Errorf("failed to load %s btf: %v", module, err) + return nil, nil, fmt.Errorf("failed to load %s btf: %v", module, err) } iters = append(iters, iterator{module, modSpec.Iterate()}) } @@ -108,6 +109,9 @@ func GetFuncs(pattern string, spec *btf.Spec, kmods []string, kprobeMulti bool) for _, p := range fnProto.Params { if ptr, ok := p.Type.(*btf.Pointer); ok { if strct, ok := ptr.Target.(*btf.Struct); ok { + if bpfMap && strct.Name == "bpf_map" { + bpfMapFuncs[fnName] = fnProto + } if strct.Name == "sk_buff" && i <= 5 { name := fnName if kprobeMulti && it.kmod != "" { @@ -123,7 +127,7 @@ func GetFuncs(pattern string, spec *btf.Spec, kmods []string, kprobeMulti bool) } } - return funcs, nil + return funcs, bpfMapFuncs, nil } func GetFuncsByPos(funcs Funcs) map[int][]string { diff --git a/main.go b/main.go index fde15017..c9b68e01 100644 --- a/main.go +++ b/main.go @@ -98,7 +98,7 @@ func main() { useKprobeMulti = true } - funcs, err := pwru.GetFuncs(flags.FilterFunc, btfSpec, flags.KMods, useKprobeMulti) + funcs, bpfMapFuncs, err := pwru.GetFuncs(flags.FilterFunc, btfSpec, flags.KMods, useKprobeMulti, flags.OutputBpfMap) if err != nil { log.Fatalf("Failed to get skb-accepting functions: %s", err) } @@ -265,7 +265,7 @@ func main() { } if nonSkbFuncs := flags.FilterNonSkbFuncs; len(nonSkbFuncs) != 0 { - k := pwru.NewNonSkbFuncsKprober(nonSkbFuncs, funcs, coll) + k := pwru.NewNonSkbFuncsKprober(nonSkbFuncs, funcs, bpfMapFuncs, coll) defer k.DetachKprobes() } From e6de8f9c20815b457c5999c875c05f2fe1f0a4db Mon Sep 17 00:00:00 2001 From: gray Date: Mon, 2 Dec 2024 17:37:01 +0800 Subject: [PATCH 2/2] bpf: add k/kr for bpf_map_lookup/update/delete --- bpf/kprobe_pwru.c | 162 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 150 insertions(+), 12 deletions(-) diff --git a/bpf/kprobe_pwru.c b/bpf/kprobe_pwru.c index 3e9ec293..c234b30d 100644 --- a/bpf/kprobe_pwru.c +++ b/bpf/kprobe_pwru.c @@ -82,7 +82,7 @@ struct event_t { u64 ts; u64 print_skb_id; u64 print_shinfo_id; - //u64 print_bpf_map_id; + u64 print_bpfmap_id; struct skb_meta meta; struct tuple tuple; s64 print_stack_id; @@ -144,8 +144,7 @@ struct config { u8 output_shinfo: 1; u8 output_stack: 1; u8 output_caller: 1; - u8 output_bpf_map: 1; - u8 output_unused: 1; + u8 output_unused: 2; u8 is_set: 1; u8 track_skb: 1; u8 track_skb_by_stackid: 1; @@ -166,6 +165,13 @@ struct { __uint(value_size, MAX_STACK_DEPTH * sizeof(u64)); } print_stack_map SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, u32); + __type(value, struct event_t); +} event_stash SEC(".maps"); + struct print_skb_value { u32 len; char str[PRINT_SKB_STR_SIZE]; @@ -199,6 +205,28 @@ struct { __type(value, struct print_shinfo_value); } print_shinfo_map SEC(".maps"); +struct print_bpfmap_value { + u32 id; + char name[16]; + u32 key_size; + u32 value_size; + u8 key[128]; + u8 value[128]; +} __attribute__((packed)); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, u32); + __type(value, u32); +} print_bpfmap_id_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u64); + __type(value, struct print_bpfmap_value); +} print_bpfmap_map SEC(".maps"); + static __always_inline u32 get_netns(struct sk_buff *skb) { u32 netns = BPF_CORE_READ(skb, dev, nd_net.net, ns.inum); @@ -517,6 +545,7 @@ handle_everything(struct sk_buff *skb, void *ctx, struct event_t *event, u64 *_s bpf_map_update_elem(&skb_stackid, &skb, &stackid, BPF_ANY); } + event->skb_addr = (u64) skb; event->pid = bpf_get_current_pid_tgid() >> 32; event->ts = bpf_ktime_get_ns(); event->cpu_id = bpf_get_smp_processor_id(); @@ -531,20 +560,12 @@ kprobe_skb(struct sk_buff *skb, struct pt_regs *ctx, const bool has_get_func_ip, if (!handle_everything(skb, ctx, &event, _stackid, true)) return BPF_OK; - event.skb_addr = (u64) skb; event.addr = has_get_func_ip ? bpf_get_func_ip(ctx) : PT_REGS_IP(ctx); event.param_second = PT_REGS_PARM2(ctx); event.param_third = PT_REGS_PARM3(ctx); if (CFG.output_caller) bpf_probe_read_kernel(&event.caller_addr, sizeof(event.caller_addr), (void *)PT_REGS_SP(ctx)); - if (CFG.output_bpf_map) { - // TODO@gray: kernel>=5.15 - __u64 cookie = bpf_get_attach_cookie(ctx); - if (cookie) - set_bpf_map(ctx, cookie, NULL); - } - bpf_map_push_elem(&events, &event, BPF_EXIST); return BPF_OK; @@ -631,7 +652,6 @@ int BPF_PROG(fentry_tc, struct sk_buff *skb) { if (!handle_everything(skb, ctx, &event, NULL, false)) return BPF_OK; - event.skb_addr = (u64) skb; event.addr = BPF_PROG_ADDR; event.type = EVENT_TYPE_TC; bpf_map_push_elem(&events, &event, BPF_EXIST); @@ -781,4 +801,122 @@ int kretprobe_veth_convert_skb_to_xdp_buff(struct pt_regs *ctx) { return BPF_OK; } +static __always_inline void +set_common_bpfmap_info(struct pt_regs *ctx, u64 *event_id, + struct print_bpfmap_value *map_value) { + *event_id = sync_fetch_and_add(&print_bpfmap_id_map); + + struct bpf_map *map = (struct bpf_map *)PT_REGS_PARM1(ctx); + BPF_CORE_READ_INTO(&map_value->id, map, id); + BPF_CORE_READ_STR_INTO(&map_value->name, map, name); + BPF_CORE_READ_INTO(&map_value->key_size, map, key_size); + BPF_CORE_READ_INTO(&map_value->value_size, map, value_size); + bpf_probe_read_kernel(&map_value->key, sizeof(map_value->key), (void *)PT_REGS_PARM2(ctx)); +} + +SEC("kprobe/bpf_map_lookup") +int kprobe_bpf_map_lookup(struct pt_regs *ctx) { + u64 stackid = get_stackid(ctx, true); + + struct sk_buff **skb = bpf_map_lookup_elem(&stackid_skb, &stackid); + if (skb && *skb) { + struct event_t event = {}; + + event.addr = PT_REGS_IP(ctx); + if (!handle_everything(*skb, ctx, &event, &stackid, true)) + return BPF_OK; + + if (CFG.output_caller) + bpf_probe_read_kernel(&event.caller_addr, + sizeof(event.caller_addr), + (void *)PT_REGS_SP(ctx)); + + struct print_bpfmap_value map_value = {}; + set_common_bpfmap_info(ctx, &event.print_bpfmap_id, &map_value); + + u64 pid_tgid = bpf_get_current_pid_tgid(); + bpf_map_update_elem(&print_bpfmap_map, &event.print_bpfmap_id, &map_value, BPF_ANY); + bpf_map_update_elem(&event_stash, &ZERO, &event, BPF_ANY); + } + + return BPF_OK; +} + +SEC("kretprobe/bpf_map_lookup") +int kretprobe_bpf_map_lookup(struct pt_regs *ctx) { + struct event_t *event = bpf_map_lookup_elem(&event_stash, &ZERO); + if (!event) + return BPF_OK; + + struct print_bpfmap_value *map_value = bpf_map_lookup_elem(&print_bpfmap_map, + &event->print_bpfmap_id); + if (!map_value) + return BPF_OK; + + bpf_probe_read_kernel(&map_value->value, + sizeof(map_value->value), + (void *)PT_REGS_RC(ctx)); + + bpf_map_push_elem(&events, &event, BPF_EXIST); + return BPF_OK; +} + +SEC("kprobe/bpf_map_update") +int kprobe_bpf_map_update(struct pt_regs *ctx) { + u64 stackid = get_stackid(ctx, true); + + struct sk_buff **skb = bpf_map_lookup_elem(&stackid_skb, &stackid); + if (skb && *skb) { + struct event_t event = {}; + + event.addr = PT_REGS_IP(ctx); + if (!handle_everything(*skb, ctx, &event, &stackid, true)) + return BPF_OK; + + if (CFG.output_caller) + bpf_probe_read_kernel(&event.caller_addr, + sizeof(event.caller_addr), + (void *)PT_REGS_SP(ctx)); + + // todo@gray: static? + static struct print_bpfmap_value map_value = {}; + set_common_bpfmap_info(ctx, &event.print_bpfmap_id, &map_value); + + bpf_probe_read_kernel(&map_value.value, + sizeof(map_value.value), + (void *)PT_REGS_PARM3(ctx)); + bpf_map_update_elem(&print_bpfmap_map, &event.print_bpfmap_id, &map_value, BPF_ANY); + bpf_map_push_elem(&events, &event, BPF_EXIST); + } + + return BPF_OK; +} + +SEC("kprobe/bpf_map_delete") +int kprobe_bpf_map_delete(struct pt_regs *ctx) { + u64 stackid = get_stackid(ctx, true); + + struct sk_buff **skb = bpf_map_lookup_elem(&stackid_skb, &stackid); + if (skb && *skb) { + struct event_t event = {}; + + event.addr = PT_REGS_IP(ctx); + if (!handle_everything(*skb, ctx, &event, &stackid, true)) + return BPF_OK; + + if (CFG.output_caller) + bpf_probe_read_kernel(&event.caller_addr, + sizeof(event.caller_addr), + (void *)PT_REGS_SP(ctx)); + + static struct print_bpfmap_value map_value = {}; + set_common_bpfmap_info(ctx, &event.print_bpfmap_id, &map_value); + + bpf_map_update_elem(&print_bpfmap_map, &event.print_bpfmap_id, &map_value, BPF_ANY); + bpf_map_push_elem(&events, &event, BPF_EXIST); + } + + return BPF_OK; +} + char __license[] SEC("license") = "Dual BSD/GPL";