[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAEf4Bzb+LT2nTTjVXi3ATu9AsYSxZJr2XzegA09Cm8izNG=grg@mail.gmail.com>
Date: Wed, 23 Apr 2025 10:42:43 -0700
From: Andrii Nakryiko <andrii.nakryiko@...il.com>
To: Jiri Olsa <jolsa@...nel.org>
Cc: Oleg Nesterov <oleg@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
Andrii Nakryiko <andrii@...nel.org>, bpf@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org, x86@...nel.org,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>, Hao Luo <haoluo@...gle.com>,
Steven Rostedt <rostedt@...dmis.org>, Masami Hiramatsu <mhiramat@...nel.org>,
Alan Maguire <alan.maguire@...cle.com>, David Laight <David.Laight@...lab.com>,
Thomas Weißschuh <thomas@...ch.de>,
Ingo Molnar <mingo@...nel.org>
Subject: Re: [PATCH perf/core 15/22] selftests/bpf: Add hit/attach/detach race
optimized uprobe test
On Mon, Apr 21, 2025 at 2:47 PM Jiri Olsa <jolsa@...nel.org> wrote:
>
> Adding test that makes sure parallel execution of the uprobe and
> attach/detach of optimized uprobe on it works properly.
>
> Signed-off-by: Jiri Olsa <jolsa@...nel.org>
> ---
> .../selftests/bpf/prog_tests/uprobe_syscall.c | 74 +++++++++++++++++++
> 1 file changed, 74 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
> index 16effe0bca1d..57ef1207c3f5 100644
> --- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
> +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
> @@ -619,6 +619,78 @@ static void test_uretprobe_shadow_stack(void)
> ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
> }
>
> +static volatile bool race_stop;
> +
> +static void *worker_trigger(void *arg)
> +{
> + unsigned long rounds = 0;
> +
> + while (!race_stop) {
> + uprobe_test();
> + rounds++;
> + }
> +
> + printf("tid %d trigger rounds: %lu\n", gettid(), rounds);
> + return NULL;
> +}
> +
> +static void *worker_attach(void *arg)
> +{
> + struct uprobe_syscall_executed *skel;
> + unsigned long rounds = 0, offset;
> +
> + offset = get_uprobe_offset(&uprobe_test);
> + if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
> + return NULL;
> +
> + skel = uprobe_syscall_executed__open_and_load();
> + if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
> + return NULL;
> +
> + while (!race_stop) {
> + skel->links.test_uprobe = bpf_program__attach_uprobe_opts(skel->progs.test_uprobe,
> + 0, "/proc/self/exe", offset, NULL);
> + if (!ASSERT_OK_PTR(skel->links.test_uprobe, "bpf_program__attach_uprobe_opts"))
> + break;
> +
> + bpf_link__destroy(skel->links.test_uprobe);
> + skel->links.test_uprobe = NULL;
> + rounds++;
> + }
> +
> + printf("tid %d attach rounds: %lu hits: %d\n", gettid(), rounds, skel->bss->executed);
> + uprobe_syscall_executed__destroy(skel);
> + return NULL;
> +}
> +
> +static void test_uprobe_race(void)
> +{
> + int err, i, nr_threads;
> + pthread_t *threads;
> +
> + nr_threads = libbpf_num_possible_cpus();
> + if (!ASSERT_GE(nr_threads, 0, "libbpf_num_possible_cpus"))
I hope there are strictly more than zero CPUs... ;)
> + return;
> +
> + threads = malloc(sizeof(*threads) * nr_threads);
> + if (!ASSERT_OK_PTR(threads, "malloc"))
> + return;
> +
> + for (i = 0; i < nr_threads; i++) {
> + err = pthread_create(&threads[i], NULL, i % 2 ? worker_trigger : worker_attach,
> + NULL);
What happens when three is just one CPU?
> + if (!ASSERT_OK(err, "pthread_create"))
> + goto cleanup;
> + }
> +
> + sleep(4);
> +
> +cleanup:
> + race_stop = true;
> + for (nr_threads = i, i = 0; i < nr_threads; i++)
> + pthread_join(threads[i], NULL);
> +}
> +
> static void __test_uprobe_syscall(void)
> {
> if (test__start_subtest("uretprobe_regs_equal"))
> @@ -637,6 +709,8 @@ static void __test_uprobe_syscall(void)
> test_uprobe_session();
> if (test__start_subtest("uprobe_usdt"))
> test_uprobe_usdt();
> + if (test__start_subtest("uprobe_race"))
> + test_uprobe_race();
> }
> #else
> static void __test_uprobe_syscall(void)
> --
> 2.49.0
>
Powered by blists - more mailing lists