[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200914172453.1833883-5-weiwan@google.com>
Date: Mon, 14 Sep 2020 10:24:51 -0700
From: Wei Wang <weiwan@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, netdev@...r.kernel.org
Cc: Jakub Kicinski <kuba@...nel.org>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
Hannes Frederic Sowa <hannes@...essinduktion.org>,
Felix Fietkau <nbd@....name>, Wei Wang <weiwan@...gle.com>
Subject: [RFC PATCH net-next 4/6] net: modify kthread handler to use __napi_poll()
From: Jakub Kicinski <kuba@...nel.org>
The current kthread handler calls napi_poll() and has to pass a dummy
repoll list to the function, which seems redundent. The new proposed
kthread handler calls the newly proposed __napi_poll(), and respects
napi->weight as before. If repoll is needed, cond_resched() is called
first to give other tasks a chance to run before repolling.
This change is proposed by Jakub Kicinski <kuba@...nel.org> on top of
the previous patch.
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
Signed-off-by: Wei Wang <weiwan@...gle.com>
---
net/core/dev.c | 62 +++++++++++++++++++-------------------------------
1 file changed, 24 insertions(+), 38 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index bc2a7681b239..be676c21bdc4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6763,6 +6763,15 @@ static int __napi_poll(struct napi_struct *n, bool *repoll)
gro_normal_list(n);
+ /* Some drivers may have called napi_schedule
+ * prior to exhausting their budget.
+ */
+ if (unlikely(!list_empty(&n->poll_list))) {
+ pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
+ n->dev ? n->dev->name : "backlog");
+ return work;
+ }
+
*repoll = true;
return work;
@@ -6783,15 +6792,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
if (!do_repoll)
goto out_unlock;
- /* Some drivers may have called napi_schedule
- * prior to exhausting their budget.
- */
- if (unlikely(!list_empty(&n->poll_list))) {
- pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
- n->dev ? n->dev->name : "backlog");
- goto out_unlock;
- }
-
list_add_tail(&n->poll_list, repoll);
out_unlock:
@@ -6820,40 +6820,26 @@ static int napi_thread_wait(struct napi_struct *napi)
static int napi_threaded_poll(void *data)
{
struct napi_struct *napi = data;
+ void *have;
while (!napi_thread_wait(napi)) {
- struct list_head dummy_repoll;
- int budget = netdev_budget;
- unsigned long time_limit;
- bool again = true;
+ for (;;) {
+ bool repoll = false;
- INIT_LIST_HEAD(&dummy_repoll);
- local_bh_disable();
- time_limit = jiffies + 2;
- do {
- /* ensure that the poll list is not empty */
- if (list_empty(&dummy_repoll))
- list_add(&napi->poll_list, &dummy_repoll);
-
- budget -= napi_poll(napi, &dummy_repoll);
- if (unlikely(budget <= 0 ||
- time_after_eq(jiffies, time_limit))) {
- cond_resched();
-
- /* refresh the budget */
- budget = netdev_budget;
- __kfree_skb_flush();
- time_limit = jiffies + 2;
- }
+ local_bh_disable();
- if (napi_disable_pending(napi))
- again = false;
- else if (!test_bit(NAPI_STATE_SCHED, &napi->state))
- again = false;
- } while (again);
+ have = netpoll_poll_lock(napi);
+ __napi_poll(napi, &repoll);
+ netpoll_poll_unlock(have);
- __kfree_skb_flush();
- local_bh_enable();
+ __kfree_skb_flush();
+ local_bh_enable();
+
+ if (!repoll)
+ break;
+
+ cond_resched();
+ }
}
return 0;
}
--
2.28.0.618.gf4bc123cb7-goog
Powered by blists - more mailing lists