From bfd14412f9ad935cabc55a54836546b11e2423c7 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Mon, 20 Oct 2025 01:01:17 +0800 Subject: [PATCH 1/2] perf: Use extern perf_callchain_entry for get_perf_callchain From bpf stack map, we want to use our own buffers to avoid unnecessary copy, so let us pass it directly. BPF will use this in the next patch. Signed-off-by: Tao Chen --- include/linux/perf_event.h | 4 ++-- kernel/bpf/stackmap.c | 4 ++-- kernel/events/callchain.c | 13 +++++++++---- kernel/events/core.c | 2 +- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fd1d91017b99b..b144da7d80394 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1719,8 +1719,8 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern struct perf_callchain_entry * -get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, - u32 max_stack, bool crosstask, bool add_mark); +get_perf_callchain(struct pt_regs *regs, struct perf_callchain_entry *external_entry, + bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); extern struct perf_callchain_entry *get_callchain_entry(int *rctx); diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 4d53cdd1374cf..94e46b7f34077 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -314,7 +314,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, if (max_depth > sysctl_perf_event_max_stack) max_depth = sysctl_perf_event_max_stack; - trace = get_perf_callchain(regs, kernel, user, max_depth, + trace = get_perf_callchain(regs, NULL, kernel, user, max_depth, false, false); if (unlikely(!trace)) @@ -451,7 +451,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, else if (kernel && task) trace = get_callchain_entry_for_task(task, max_depth); else - trace = get_perf_callchain(regs, kernel, user, max_depth, + trace = get_perf_callchain(regs, NULL, kernel, user, max_depth, crosstask, false); if (unlikely(!trace) || trace->nr < skip) { diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 808c0d7a31faf..851e8f9d026f5 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -217,8 +217,8 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr } struct perf_callchain_entry * -get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, - u32 max_stack, bool crosstask, bool add_mark) +get_perf_callchain(struct pt_regs *regs, struct perf_callchain_entry *external_entry, + bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark) { struct perf_callchain_entry *entry; struct perf_callchain_entry_ctx ctx; @@ -228,7 +228,11 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, if (crosstask && user && !kernel) return NULL; - entry = get_callchain_entry(&rctx); + if (external_entry) + entry = external_entry; + else + entry = get_callchain_entry(&rctx); + if (!entry) return NULL; @@ -260,7 +264,8 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, } exit_put: - put_callchain_entry(rctx); + if (!external_entry) + put_callchain_entry(rctx); return entry; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 7541f6f85fcb0..5d8e146003ac3 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8217,7 +8217,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) if (!kernel && !user) return &__empty_callchain; - callchain = get_perf_callchain(regs, kernel, user, + callchain = get_perf_callchain(regs, NULL, kernel, user, max_stack, crosstask, true); return callchain ?: &__empty_callchain; } From 83c1fb877fd1eabff618ba8ffea2fa3caa9d546f Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Mon, 20 Oct 2025 01:01:18 +0800 Subject: [PATCH 2/2] bpf: Use per-cpu BPF callchain entry to save callchain As Alexei noted, get_perf_callchain() return values may be reused if a task is preempted after the BPF program enters migrate disable mode. Drawing on the per-cpu design of bpf_bprintf_buffers, per-cpu BPF callchain entry is used here. Signed-off-by: Tao Chen --- kernel/bpf/stackmap.c | 98 ++++++++++++++++++++++++++++++++----------- 1 file changed, 74 insertions(+), 24 deletions(-) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 94e46b7f34077..3513077c57d5a 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -31,6 +31,52 @@ struct bpf_stack_map { struct stack_map_bucket *buckets[] __counted_by(n_buckets); }; +struct bpf_perf_callchain_entry { + u64 nr; + u64 ip[PERF_MAX_STACK_DEPTH]; +}; + +#define MAX_PERF_CALLCHAIN_PREEMPT 3 +static DEFINE_PER_CPU(struct bpf_perf_callchain_entry[MAX_PERF_CALLCHAIN_PREEMPT], + bpf_perf_callchain_entries); +static DEFINE_PER_CPU(int, bpf_perf_callchain_preempt_cnt); + +static int bpf_get_perf_callchain_or_entry(struct perf_callchain_entry **entry, + struct pt_regs *regs, bool kernel, + bool user, u32 max_stack, bool crosstack, + bool add_mark, bool get_callchain) +{ + struct bpf_perf_callchain_entry *bpf_entry; + struct perf_callchain_entry *perf_entry; + int preempt_cnt; + + preempt_cnt = this_cpu_inc_return(bpf_perf_callchain_preempt_cnt); + if (WARN_ON_ONCE(preempt_cnt > MAX_PERF_CALLCHAIN_PREEMPT)) { + this_cpu_dec(bpf_perf_callchain_preempt_cnt); + return -EBUSY; + } + + bpf_entry = this_cpu_ptr(&bpf_perf_callchain_entries[preempt_cnt - 1]); + if (!get_callchain) { + *entry = (struct perf_callchain_entry *)bpf_entry; + return 0; + } + + perf_entry = get_perf_callchain(regs, (struct perf_callchain_entry *)bpf_entry, + kernel, user, max_stack, + crosstack, add_mark); + *entry = perf_entry; + + return 0; +} + +static void bpf_put_perf_callchain(void) +{ + if (WARN_ON_ONCE(this_cpu_read(bpf_perf_callchain_preempt_cnt) == 0)) + return; + this_cpu_dec(bpf_perf_callchain_preempt_cnt); +} + static inline bool stack_map_use_build_id(struct bpf_map *map) { return (map->map_flags & BPF_F_STACK_BUILD_ID); @@ -192,11 +238,11 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) { #ifdef CONFIG_STACKTRACE struct perf_callchain_entry *entry; - int rctx; - - entry = get_callchain_entry(&rctx); + int ret; - if (!entry) + ret = bpf_get_perf_callchain_or_entry(&entry, NULL, false, false, 0, false, false, + false); + if (ret) return NULL; entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip, @@ -216,7 +262,7 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) to[i] = (u64)(from[i]); } - put_callchain_entry(rctx); + bpf_put_perf_callchain(); return entry; #else /* CONFIG_STACKTRACE */ @@ -305,6 +351,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, bool user = flags & BPF_F_USER_STACK; struct perf_callchain_entry *trace; bool kernel = !user; + int err; if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) @@ -314,14 +361,15 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, if (max_depth > sysctl_perf_event_max_stack) max_depth = sysctl_perf_event_max_stack; - trace = get_perf_callchain(regs, NULL, kernel, user, max_depth, - false, false); + err = bpf_get_perf_callchain_or_entry(&trace, regs, kernel, user, max_depth, + false, false, true); + if (err) + return err; - if (unlikely(!trace)) - /* couldn't fetch the stack trace */ - return -EFAULT; + err = __bpf_get_stackid(map, trace, flags); + bpf_put_perf_callchain(); - return __bpf_get_stackid(map, trace, flags); + return err; } const struct bpf_func_proto bpf_get_stackid_proto = { @@ -443,20 +491,23 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, if (sysctl_perf_event_max_stack < max_depth) max_depth = sysctl_perf_event_max_stack; - if (may_fault) - rcu_read_lock(); /* need RCU for perf's callchain below */ - if (trace_in) trace = trace_in; - else if (kernel && task) + else if (kernel && task) { trace = get_callchain_entry_for_task(task, max_depth); - else - trace = get_perf_callchain(regs, NULL, kernel, user, max_depth, - crosstask, false); + } else { + err = bpf_get_perf_callchain_or_entry(&trace, regs, kernel, user, max_depth, + false, false, true); + if (err) + return err; + } + + if (unlikely(!trace)) + goto err_fault; - if (unlikely(!trace) || trace->nr < skip) { - if (may_fault) - rcu_read_unlock(); + if (trace->nr < skip) { + if (!trace_in) + bpf_put_perf_callchain(); goto err_fault; } @@ -475,9 +526,8 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, memcpy(buf, ips, copy_len); } - /* trace/ips should not be dereferenced after this point */ - if (may_fault) - rcu_read_unlock(); + if (!trace_in) + bpf_put_perf_callchain(); if (user_build_id) stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);