[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240813223014.1a5093ede1a5046aaedea34a@kernel.org>
Date: Tue, 13 Aug 2024 22:30:14 +0900
From: Masami Hiramatsu (Google) <mhiramat@...nel.org>
To: Andrii Nakryiko <andrii@...nel.org>
Cc: linux-trace-kernel@...r.kernel.org, rostedt@...dmis.org,
peterz@...radead.org, oleg@...hat.com, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org, jolsa@...nel.org
Subject: Re: [PATCH v2] uprobes: make trace_uprobe->nhit counter a per-CPU
one
On Fri, 9 Aug 2024 12:23:57 -0700
Andrii Nakryiko <andrii@...nel.org> wrote:
> trace_uprobe->nhit counter is not incremented atomically, so its value
> is questionable in when uprobe is hit on multiple CPUs simultaneously.
>
> Also, doing this shared counter increment across many CPUs causes heavy
> cache line bouncing, limiting uprobe/uretprobe performance scaling with
> number of CPUs.
>
> Solve both problems by making this a per-CPU counter.
>
This looks good to me. I would like to pick this to linux-trace/probes/for-next.
> @@ -62,7 +63,7 @@ struct trace_uprobe {
> struct uprobe *uprobe;
BTW, what is this change? I couldn't cleanly apply this to the v6.11-rc3.
Which tree would you working on? (I missed something?)
Thanks,
> unsigned long offset;
> unsigned long ref_ctr_offset;
> - unsigned long nhit;
> + unsigned long __percpu *nhits;
> struct trace_probe tp;
> };
>
> @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
> if (!tu)
> return ERR_PTR(-ENOMEM);
>
> + tu->nhits = alloc_percpu(unsigned long);
> + if (!tu->nhits) {
> + ret = -ENOMEM;
> + goto error;
> + }
> +
> ret = trace_probe_init(&tu->tp, event, group, true, nargs);
> if (ret < 0)
> goto error;
> @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
> return tu;
>
> error:
> + free_percpu(tu->nhits);
> kfree(tu);
>
> return ERR_PTR(ret);
> @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu)
> path_put(&tu->path);
> trace_probe_cleanup(&tu->tp);
> kfree(tu->filename);
> + free_percpu(tu->nhits);
> kfree(tu);
> }
>
> @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
> {
> struct dyn_event *ev = v;
> struct trace_uprobe *tu;
> + unsigned long nhits;
> + int cpu;
>
> if (!is_trace_uprobe(ev))
> return 0;
>
> tu = to_trace_uprobe(ev);
> +
> + nhits = 0;
> + for_each_possible_cpu(cpu) {
> + nhits += READ_ONCE(*per_cpu_ptr(tu->nhits, cpu));
> + }
> +
> seq_printf(m, " %s %-44s %15lu\n", tu->filename,
> - trace_probe_name(&tu->tp), tu->nhit);
> + trace_probe_name(&tu->tp), nhits);
> return 0;
> }
>
> @@ -1507,7 +1524,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
> int ret = 0;
>
> tu = container_of(con, struct trace_uprobe, consumer);
> - tu->nhit++;
> +
> + this_cpu_inc(*tu->nhits);
>
> udd.tu = tu;
> udd.bp_addr = instruction_pointer(regs);
> --
> 2.43.5
>
--
Masami Hiramatsu (Google) <mhiramat@...nel.org>
Powered by blists - more mailing lists