From 059997a1f575757513eeb84b7797aaa3dbf5cf85 Mon Sep 17 00:00:00 2001 From: gray Date: Mon, 3 Jun 2024 16:38:50 +0800 Subject: [PATCH] bpf: Differentiate skb tracking reasons By differentiating skb track reason (filter/skb/stackid), we can avoid a mismatching issue that leads to confusing output. I saw an issue when --filter-track-skb and --filter-track-skb-by-stackid are used together: 1. At func1, an skb matches the pcap filter, we then fill the map stackid_skb[stackid] = skb for --filter-track-skb-by-stackid; 2. At func2, the skb arg doesn't match the pcap filter, but stackid can be found in map stackid_skb. We therefore store the map entry skb_addresses[skb] = true for --filter-track-skb; 3. Due to some reasons, this skb never gets freed by kfree_skbmem(), so pwru keeps matching "wrong" packets due to permanent entry in map skb_addresses for --filter-track-skb; This patch mitigates these risks by not storing new entry in skb_addresses if the skb is tracked by stackid. This doesn't harm the original purpose of --filter-track-skb-by-stack, which is designed for non-skb function tracing on the same stack. On the otherhand, calling bpf_update_map() only for specific track reasons improves performance a little bit. For example, if an skb is already in map skb_addresses, now we don't need to put it in the map again. Signed-off-by: gray --- bpf/kprobe_pwru.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/bpf/kprobe_pwru.c b/bpf/kprobe_pwru.c index 2e6b8845..43896598 100644 --- a/bpf/kprobe_pwru.c +++ b/bpf/kprobe_pwru.c @@ -18,6 +18,12 @@ const static bool TRUE = true; volatile const static __u64 BPF_PROG_ADDR = 0; +enum { + TRACKED_BY_FILTER = (1 << 0), + TRACKED_BY_SKB = (1 << 1), + TRACKED_BY_STACKID = (1 << 2), +}; + union addr { u32 v4addr; struct { @@ -375,7 +381,7 @@ set_output(void *ctx, struct sk_buff *skb, struct event_t *event) { static __noinline bool handle_everything(struct sk_buff *skb, void *ctx, struct event_t *event, u64 *_stackid) { - bool tracked_by_addr = false; + u8 tracked_by; u64 skb_addr = (u64) skb; u64 stackid; @@ -384,27 +390,31 @@ handle_everything(struct sk_buff *skb, void *ctx, struct event_t *event, u64 *_s if (cfg->is_set) { if (cfg->track_skb && bpf_map_lookup_elem(&skb_addresses, &skb_addr)) { - tracked_by_addr = true; + tracked_by = _stackid ? TRACKED_BY_STACKID : TRACKED_BY_SKB; goto cont; } if (cfg->track_skb_by_stackid && bpf_map_lookup_elem(&stackid_skb, &stackid)) { + tracked_by = TRACKED_BY_STACKID; goto cont; } - if (!filter(skb)) { - return false; + if (filter(skb)) { + tracked_by = TRACKED_BY_FILTER; + goto cont; } + return false; + cont: set_output(ctx, skb, event); } - if (cfg->track_skb && !tracked_by_addr) { + if (cfg->track_skb && tracked_by == TRACKED_BY_FILTER) { bpf_map_update_elem(&skb_addresses, &skb_addr, &TRUE, BPF_ANY); } - if (cfg->track_skb_by_stackid) { + if (cfg->track_skb_by_stackid && tracked_by != TRACKED_BY_STACKID) { u64 *old_stackid = bpf_map_lookup_elem(&skb_stackid, &skb); if (old_stackid && *old_stackid != stackid) { bpf_map_delete_elem(&stackid_skb, old_stackid);