[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230518211751.3492982-2-shr@devkernel.io>
Date: Thu, 18 May 2023 14:17:45 -0700
From: Stefan Roesch <shr@...kernel.io>
To: io-uring@...r.kernel.org,
kernel-team@...com
Cc: shr@...kernel.io,
axboe@...nel.dk,
ammarfaizi2@...weeb.org,
netdev@...r.kernel.org,
kuba@...nel.org,
olivier@...llion01.com
Subject: [PATCH v13 1/7] net: split off __napi_busy_poll from napi_busy_poll
This splits off the key part of the napi_busy_poll function into its own
function __napi_busy_poll. This is done in preparation for an additional
napi_busy_poll() function, that doesn't take the rcu_read_lock(). The
new function is introduced in the next patch.
Signed-off-by: Stefan Roesch <shr@...kernel.io>
---
net/core/dev.c | 99 ++++++++++++++++++++++++++++----------------------
1 file changed, 56 insertions(+), 43 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index 253584777101..f4677aa20f84 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6166,66 +6166,79 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool
local_bh_enable();
}
+struct napi_busy_poll_ctx {
+ struct napi_struct *napi;
+ int (*napi_poll)(struct napi_struct *napi, int budget);
+ void *have_poll_lock;
+};
+
+static inline void __napi_busy_poll(struct napi_busy_poll_ctx *ctx,
+ bool prefer_busy_poll, u16 budget)
+{
+ struct napi_struct *napi = ctx->napi;
+ int work = 0;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ local_bh_disable();
+ if (!ctx->napi_poll) {
+ unsigned long val = READ_ONCE(napi->state);
+
+ /* If multiple threads are competing for this napi,
+ * we avoid dirtying napi->state as much as we can.
+ */
+ if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
+ NAPIF_STATE_IN_BUSY_POLL)) {
+ if (prefer_busy_poll)
+ set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
+ goto count;
+ }
+ if (cmpxchg(&napi->state, val,
+ val | NAPIF_STATE_IN_BUSY_POLL |
+ NAPIF_STATE_SCHED) != val) {
+ if (prefer_busy_poll)
+ set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
+ goto count;
+ }
+ ctx->have_poll_lock = netpoll_poll_lock(napi);
+ ctx->napi_poll = napi->poll;
+ }
+ work = ctx->napi_poll(napi, budget);
+ trace_napi_poll(napi, work, budget);
+ gro_normal_list(napi);
+
+count:
+ if (work > 0)
+ __NET_ADD_STATS(dev_net(napi->dev),
+ LINUX_MIB_BUSYPOLLRXPACKETS, work);
+ local_bh_enable();
+}
void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget)
{
unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
- int (*napi_poll)(struct napi_struct *napi, int budget);
- void *have_poll_lock = NULL;
- struct napi_struct *napi;
+ struct napi_busy_poll_ctx ctx = {};
restart:
- napi_poll = NULL;
+ ctx.napi_poll = NULL;
rcu_read_lock();
- napi = napi_by_id(napi_id);
- if (!napi)
+ ctx.napi = napi_by_id(napi_id);
+ if (!ctx.napi)
goto out;
preempt_disable();
for (;;) {
- int work = 0;
-
- local_bh_disable();
- if (!napi_poll) {
- unsigned long val = READ_ONCE(napi->state);
-
- /* If multiple threads are competing for this napi,
- * we avoid dirtying napi->state as much as we can.
- */
- if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
- NAPIF_STATE_IN_BUSY_POLL)) {
- if (prefer_busy_poll)
- set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
- goto count;
- }
- if (cmpxchg(&napi->state, val,
- val | NAPIF_STATE_IN_BUSY_POLL |
- NAPIF_STATE_SCHED) != val) {
- if (prefer_busy_poll)
- set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
- goto count;
- }
- have_poll_lock = netpoll_poll_lock(napi);
- napi_poll = napi->poll;
- }
- work = napi_poll(napi, budget);
- trace_napi_poll(napi, work, budget);
- gro_normal_list(napi);
-count:
- if (work > 0)
- __NET_ADD_STATS(dev_net(napi->dev),
- LINUX_MIB_BUSYPOLLRXPACKETS, work);
- local_bh_enable();
+ __napi_busy_poll(&ctx, prefer_busy_poll, budget);
if (!loop_end || loop_end(loop_end_arg, start_time))
break;
if (unlikely(need_resched())) {
- if (napi_poll)
- busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
+ if (ctx.napi_poll)
+ busy_poll_stop(ctx.napi, ctx.have_poll_lock, prefer_busy_poll, budget);
preempt_enable();
rcu_read_unlock();
cond_resched();
@@ -6235,8 +6248,8 @@ void napi_busy_loop(unsigned int napi_id,
}
cpu_relax();
}
- if (napi_poll)
- busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
+ if (ctx.napi_poll)
+ busy_poll_stop(ctx.napi, ctx.have_poll_lock, prefer_busy_poll, budget);
preempt_enable();
out:
rcu_read_unlock();
--
2.39.1
Powered by blists - more mailing lists