[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200930192140.4192859-5-weiwan@google.com>
Date: Wed, 30 Sep 2020 12:21:39 -0700
From: Wei Wang <weiwan@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, netdev@...r.kernel.org
Cc: Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Hannes Frederic Sowa <hannes@...essinduktion.org>,
Paolo Abeni <pabeni@...hat.com>, Felix Fietkau <nbd@....name>,
Wei Wang <weiwan@...gle.com>
Subject: [PATCH net-next 4/5] net: modify kthread handler to use __napi_poll()
From: Jakub Kicinski <kuba@...nel.org>
The current kthread handler calls napi_poll() and has to pass a dummy
repoll list to the function, which seems redundent. The new proposed
kthread handler calls the newly proposed __napi_poll(), and respects
napi->weight as before. If repoll is needed, cond_resched() is called
first to give other tasks a chance to run before repolling.
This change is proposed by Jakub Kicinski <kuba@...nel.org> on top of
the previous patch.
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
Signed-off-by: Wei Wang <weiwan@...gle.com>
---
net/core/dev.c | 62 +++++++++++++++++++-------------------------------
1 file changed, 24 insertions(+), 38 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index c82522262ca8..b4f33e442b5e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6827,6 +6827,15 @@ static int __napi_poll(struct napi_struct *n, bool *repoll)
gro_normal_list(n);
+ /* Some drivers may have called napi_schedule
+ * prior to exhausting their budget.
+ */
+ if (unlikely(!list_empty(&n->poll_list))) {
+ pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
+ n->dev ? n->dev->name : "backlog");
+ return work;
+ }
+
*repoll = true;
return work;
@@ -6847,15 +6856,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
if (!do_repoll)
goto out_unlock;
- /* Some drivers may have called napi_schedule
- * prior to exhausting their budget.
- */
- if (unlikely(!list_empty(&n->poll_list))) {
- pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
- n->dev ? n->dev->name : "backlog");
- goto out_unlock;
- }
-
list_add_tail(&n->poll_list, repoll);
out_unlock:
@@ -6884,40 +6884,26 @@ static int napi_thread_wait(struct napi_struct *napi)
static int napi_threaded_poll(void *data)
{
struct napi_struct *napi = data;
+ void *have;
while (!napi_thread_wait(napi)) {
- struct list_head dummy_repoll;
- int budget = netdev_budget;
- unsigned long time_limit;
- bool again = true;
+ for (;;) {
+ bool repoll = false;
- INIT_LIST_HEAD(&dummy_repoll);
- local_bh_disable();
- time_limit = jiffies + 2;
- do {
- /* ensure that the poll list is not empty */
- if (list_empty(&dummy_repoll))
- list_add(&napi->poll_list, &dummy_repoll);
-
- budget -= napi_poll(napi, &dummy_repoll);
- if (unlikely(budget <= 0 ||
- time_after_eq(jiffies, time_limit))) {
- cond_resched();
-
- /* refresh the budget */
- budget = netdev_budget;
- __kfree_skb_flush();
- time_limit = jiffies + 2;
- }
+ local_bh_disable();
- if (napi_disable_pending(napi))
- again = false;
- else if (!test_bit(NAPI_STATE_SCHED, &napi->state))
- again = false;
- } while (again);
+ have = netpoll_poll_lock(napi);
+ __napi_poll(napi, &repoll);
+ netpoll_poll_unlock(have);
- __kfree_skb_flush();
- local_bh_enable();
+ __kfree_skb_flush();
+ local_bh_enable();
+
+ if (!repoll)
+ break;
+
+ cond_resched();
+ }
}
return 0;
}
--
2.28.0.709.gb0816b6eb0-goog
Powered by blists - more mailing lists