[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <z5vpeaow6kyr4uamfqlev7yxbfpr333ngws6tgjnuyjqaznfcr@vn4ihodpiwhz>
Date: Fri, 10 Oct 2025 04:40:40 +0530
From: Brahmajit Das <listout@...tout.xyz>
To: d0fdced7-a9a5-473e-991f-4f5e4c13f616@...ux.dev
Cc: yonghong.song@...ux.dev, andrii@...nel.org, ast@...nel.org,
bpf@...r.kernel.org, chandna.linuxkernel@...il.com, daniel@...earbox.net,
david.hunter.linux@...il.com, haoluo@...gle.com, john.fastabend@...il.com, jolsa@...nel.org,
khalid@...nel.org, martin.lau@...ux.dev, netdev@...r.kernel.org,
skhan@...uxfoundation.org, song@...nel.org,
syzbot+1f1fbecb9413cdbfbef8@...kaller.appspotmail.com
Subject: Re: [PATCH] bpf: test_run: Fix timer mode initialization to
NO_MIGRATE mode
On 10.10.2025 04:20, Brahmajit Das wrote:
> Yonghong Song,
>
> > So I suspect that we can remove NO_PREEMPT/NO_MIGRATE in test_run.c
> > and use migrate_disable()/migrate_enable() universally.
> Would something like this work?
>
Or we can do something like this to completely remove
NO_PREEMPT/NO_MIGRATE.
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -29,7 +29,6 @@
#include <trace/events/bpf_test_run.h>
struct bpf_test_timer {
- enum { NO_PREEMPT, NO_MIGRATE } mode;
u32 i;
u64 time_start, time_spent;
};
@@ -38,10 +37,7 @@ static void bpf_test_timer_enter(struct bpf_test_timer *t)
__acquires(rcu)
{
rcu_read_lock();
- if (t->mode == NO_PREEMPT)
- preempt_disable();
- else
- migrate_disable();
+ migrate_disable();
t->time_start = ktime_get_ns();
}
@@ -51,10 +47,7 @@ static void bpf_test_timer_leave(struct bpf_test_timer *t)
{
t->time_start = 0;
- if (t->mode == NO_PREEMPT)
- preempt_enable();
- else
- migrate_enable();
+ migrate_enable();
rcu_read_unlock();
}
@@ -374,7 +367,7 @@ static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
{
struct xdp_test_data xdp = { .batch_size = batch_size };
- struct bpf_test_timer t = { .mode = NO_MIGRATE };
+ struct bpf_test_timer t = {};
int ret;
if (!repeat)
@@ -404,7 +397,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
struct bpf_prog_array_item item = {.prog = prog};
struct bpf_run_ctx *old_ctx;
struct bpf_cg_run_ctx run_ctx;
- struct bpf_test_timer t = { NO_MIGRATE };
+ struct bpf_test_timer t = {};
enum bpf_cgroup_storage_type stype;
int ret;
@@ -1377,7 +1370,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
- struct bpf_test_timer t = { NO_PREEMPT };
+ struct bpf_test_timer t = {};
u32 size = kattr->test.data_size_in;
struct bpf_flow_dissector ctx = {};
u32 repeat = kattr->test.repeat;
@@ -1445,7 +1438,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
- struct bpf_test_timer t = { NO_PREEMPT };
+ struct bpf_test_timer t = {};
struct bpf_prog_array *progs = NULL;
struct bpf_sk_lookup_kern ctx = {};
u32 repeat = kattr->test.repeat;
Basically RFC. I posted a patch, wasn't aware that work was already
going on.
--
Regards,
listout
Powered by blists - more mailing lists