[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210311114935.11379-4-mgorman@techsingularity.net>
Date: Thu, 11 Mar 2021 11:49:33 +0000
From: Mel Gorman <mgorman@...hsingularity.net>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Chuck Lever <chuck.lever@...cle.com>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Christoph Hellwig <hch@...radead.org>,
LKML <linux-kernel@...r.kernel.org>,
Linux-Net <netdev@...r.kernel.org>,
Linux-MM <linux-mm@...ck.org>,
Linux-NFS <linux-nfs@...r.kernel.org>,
Mel Gorman <mgorman@...hsingularity.net>
Subject: [PATCH 3/5] SUNRPC: Refresh rq_pages using a bulk page allocator
From: Chuck Lever <chuck.lever@...cle.com>
Reduce the rate at which nfsd threads hammer on the page allocator.
This improve throughput scalability by enabling the threads to run
more independently of each other.
Signed-off-by: Chuck Lever <chuck.lever@...cle.com>
Signed-off-by: Mel Gorman <mgorman@...hsingularity.net>
---
net/sunrpc/svc_xprt.c | 43 +++++++++++++++++++++++++++++++------------
1 file changed, 31 insertions(+), 12 deletions(-)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index cfa7e4776d0e..38a8d6283801 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -642,11 +642,12 @@ static void svc_check_conn_limits(struct svc_serv *serv)
static int svc_alloc_arg(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
+ unsigned long needed;
struct xdr_buf *arg;
+ struct page *page;
int pages;
int i;
- /* now allocate needed pages. If we get a failure, sleep briefly */
pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
if (pages > RPCSVC_MAXPAGES) {
pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n",
@@ -654,19 +655,28 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
/* use as many pages as possible */
pages = RPCSVC_MAXPAGES;
}
- for (i = 0; i < pages ; i++)
- while (rqstp->rq_pages[i] == NULL) {
- struct page *p = alloc_page(GFP_KERNEL);
- if (!p) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (signalled() || kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- return -EINTR;
- }
- schedule_timeout(msecs_to_jiffies(500));
+
+ for (needed = 0, i = 0; i < pages ; i++)
+ if (!rqstp->rq_pages[i])
+ needed++;
+ if (needed) {
+ LIST_HEAD(list);
+
+retry:
+ alloc_pages_bulk(GFP_KERNEL, needed, &list);
+ for (i = 0; i < pages; i++) {
+ if (!rqstp->rq_pages[i]) {
+ page = list_first_entry_or_null(&list,
+ struct page,
+ lru);
+ if (unlikely(!page))
+ goto empty_list;
+ list_del(&page->lru);
+ rqstp->rq_pages[i] = page;
+ needed--;
}
- rqstp->rq_pages[i] = p;
}
+ }
rqstp->rq_page_end = &rqstp->rq_pages[pages];
rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */
@@ -681,6 +691,15 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
arg->len = (pages-1)*PAGE_SIZE;
arg->tail[0].iov_len = 0;
return 0;
+
+empty_list:
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signalled() || kthread_should_stop()) {
+ set_current_state(TASK_RUNNING);
+ return -EINTR;
+ }
+ schedule_timeout(msecs_to_jiffies(500));
+ goto retry;
}
static bool
--
2.26.2
Powered by blists - more mailing lists