[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1689259687-5231-1-git-send-email-haiyangz@microsoft.com>
Date: Thu, 13 Jul 2023 14:48:45 +0000
From: Haiyang Zhang <haiyangz@...rosoft.com>
To: "linux-hyperv@...r.kernel.org" <linux-hyperv@...r.kernel.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>
CC: Haiyang Zhang <haiyangz@...rosoft.com>, Dexuan Cui <decui@...rosoft.com>,
KY Srinivasan <kys@...rosoft.com>, Paul Rosswurm <paulros@...rosoft.com>,
"olaf@...fle.de" <olaf@...fle.de>, "vkuznets@...hat.com"
<vkuznets@...hat.com>, "davem@...emloft.net" <davem@...emloft.net>,
"wei.liu@...nel.org" <wei.liu@...nel.org>, "edumazet@...gle.com"
<edumazet@...gle.com>, "kuba@...nel.org" <kuba@...nel.org>,
"pabeni@...hat.com" <pabeni@...hat.com>, "leon@...nel.org" <leon@...nel.org>,
Long Li <longli@...rosoft.com>, "ssengar@...ux.microsoft.com"
<ssengar@...ux.microsoft.com>, "linux-rdma@...r.kernel.org"
<linux-rdma@...r.kernel.org>, "daniel@...earbox.net" <daniel@...earbox.net>,
"john.fastabend@...il.com" <john.fastabend@...il.com>, "bpf@...r.kernel.org"
<bpf@...r.kernel.org>, "ast@...nel.org" <ast@...nel.org>, Ajay Sharma
<sharmaajay@...rosoft.com>, "hawk@...nel.org" <hawk@...nel.org>,
"tglx@...utronix.de" <tglx@...utronix.de>, "shradhagupta@...ux.microsoft.com"
<shradhagupta@...ux.microsoft.com>, "linux-kernel@...r.kernel.org"
<linux-kernel@...r.kernel.org>
Subject: [PATCH net-next] net: mana: Add page pool for RX buffers
Add page pool for RX buffers for faster buffer cycle and reduce CPU
usage.
Get an extra ref count of a page after allocation, so after upper
layers put the page, it's still referenced by the pool. We can reuse
it as RX buffer without alloc a new page.
Signed-off-by: Haiyang Zhang <haiyangz@...rosoft.com>
---
drivers/net/ethernet/microsoft/mana/mana_en.c | 73 ++++++++++++++++++-
include/net/mana/mana.h | 5 ++
2 files changed, 77 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index a499e460594b..6444a8e47852 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1507,6 +1507,34 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
return;
}
+static struct page *mana_get_page_from_pool(struct mana_rxq *rxq)
+{
+ struct page *page;
+ int i;
+
+ i = rxq->pl_last + 1;
+ if (i >= MANA_POOL_SIZE)
+ i = 0;
+
+ rxq->pl_last = i;
+
+ page = rxq->pool[i];
+ if (page_ref_count(page) == 1) {
+ get_page(page);
+ return page;
+ }
+
+ page = dev_alloc_page();
+ if (page) {
+ put_page(rxq->pool[i]);
+
+ get_page(page);
+ rxq->pool[i] = page;
+ }
+
+ return page;
+}
+
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
dma_addr_t *da, bool is_napi)
{
@@ -1533,7 +1561,7 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
return NULL;
}
} else {
- page = dev_alloc_page();
+ page = mana_get_page_from_pool(rxq);
if (!page)
return NULL;
@@ -1873,6 +1901,21 @@ static int mana_create_txq(struct mana_port_context *apc,
return err;
}
+static void mana_release_rxq_pool(struct mana_rxq *rxq)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < MANA_POOL_SIZE; i++) {
+ page = rxq->pool[i];
+
+ if (page)
+ put_page(page);
+
+ rxq->pool[i] = NULL;
+ }
+}
+
static void mana_destroy_rxq(struct mana_port_context *apc,
struct mana_rxq *rxq, bool validate_state)
@@ -1917,6 +1960,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
rx_oob->buf_va = NULL;
}
+ mana_release_rxq_pool(rxq);
+
if (rxq->gdma_rq)
mana_gd_destroy_queue(gc, rxq->gdma_rq);
@@ -2008,6 +2053,27 @@ static int mana_push_wqe(struct mana_rxq *rxq)
return 0;
}
+static int mana_alloc_rxq_pool(struct mana_rxq *rxq)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < MANA_POOL_SIZE; i++) {
+ page = dev_alloc_page();
+ if (!page)
+ goto err;
+
+ rxq->pool[i] = page;
+ }
+
+ return 0;
+
+err:
+ mana_release_rxq_pool(rxq);
+
+ return -ENOMEM;
+}
+
static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
u32 rxq_idx, struct mana_eq *eq,
struct net_device *ndev)
@@ -2029,6 +2095,11 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
if (!rxq)
return NULL;
+ if (mana_alloc_rxq_pool(rxq)) {
+ kfree(rxq);
+ return NULL;
+ }
+
rxq->ndev = ndev;
rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
rxq->rxq_idx = rxq_idx;
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 024ad8ddb27e..8f1f09f9e4ab 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -297,6 +297,8 @@ struct mana_recv_buf_oob {
#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
+#define MANA_POOL_SIZE (RX_BUFFERS_PER_QUEUE * 2)
+
struct mana_rxq {
struct gdma_queue *gdma_rq;
/* Cache the gdma receive queue id */
@@ -330,6 +332,9 @@ struct mana_rxq {
bool xdp_flush;
int xdp_rc; /* XDP redirect return code */
+ struct page *pool[MANA_POOL_SIZE];
+ int pl_last;
+
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
*/
--
2.25.1
Powered by blists - more mailing lists