[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1643421818-14259-4-git-send-email-haiyangz@microsoft.com>
Date: Fri, 28 Jan 2022 18:03:38 -0800
From: Haiyang Zhang <haiyangz@...rosoft.com>
To: linux-hyperv@...r.kernel.org, netdev@...r.kernel.org
Cc: haiyangz@...rosoft.com, decui@...rosoft.com, kys@...rosoft.com,
sthemmin@...rosoft.com, paulros@...rosoft.com,
shacharr@...rosoft.com, olaf@...fle.de, vkuznets@...hat.com,
davem@...emloft.net, linux-kernel@...r.kernel.org
Subject: [PATCH net-next, 3/3] net: mana: Reuse XDP dropped page
Reuse the dropped page in RX path to save page allocation
overhead.
Signed-off-by: Haiyang Zhang <haiyangz@...rosoft.com>
---
drivers/net/ethernet/microsoft/mana/mana.h | 1 +
drivers/net/ethernet/microsoft/mana/mana_en.c | 15 +++++++++++++--
2 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index 8ead960f898d..d36405af9432 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -310,6 +310,7 @@ struct mana_rxq {
struct bpf_prog __rcu *bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ struct page *xdp_save_page;
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 12067bf5b7d6..69e791e6abc4 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1059,7 +1059,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
u64_stats_update_end(&rx_stats->syncp);
drop:
- free_page((unsigned long)buf_va);
+ WARN_ON_ONCE(rxq->xdp_save_page);
+ rxq->xdp_save_page = virt_to_page(buf_va);
+
++ndev->stats.rx_dropped;
return;
@@ -1116,7 +1118,13 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
- new_page = alloc_page(GFP_ATOMIC);
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_page) {
+ new_page = rxq->xdp_save_page;
+ rxq->xdp_save_page = NULL;
+ } else {
+ new_page = alloc_page(GFP_ATOMIC);
+ }
if (new_page) {
da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
@@ -1403,6 +1411,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
mana_deinit_cq(apc, &rxq->rx_cq);
+ if (rxq->xdp_save_page)
+ __free_page(rxq->xdp_save_page);
+
for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];
--
2.25.1
Powered by blists - more mailing lists