[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240729183038.1959-2-eladwf@gmail.com>
Date: Mon, 29 Jul 2024 21:29:54 +0300
From: Elad Yifee <eladwf@...il.com>
To: Felix Fietkau <nbd@....name>,
Sean Wang <sean.wang@...iatek.com>,
Mark Lee <Mark-MC.Lee@...iatek.com>,
Lorenzo Bianconi <lorenzo@...nel.org>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Matthias Brugger <matthias.bgg@...il.com>,
AngeloGioacchino Del Regno <angelogioacchino.delregno@...labora.com>,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linux-mediatek@...ts.infradead.org
Cc: Elad Yifee <eladwf@...il.com>,
Daniel Golle <daniel@...rotopia.org>,
Joe Damato <jdamato@...tly.com>
Subject: [PATCH net-next v2 1/2] net: ethernet: mtk_eth_soc: use prefetch methods
Utilize kernel prefetch methods for faster cache line access.
This change boosts driver performance,
allowing the CPU to handle about 5% more packets/sec.
Signed-off-by: Elad Yifee <eladwf@...il.com>
---
Changes in v2:
- use net_prefetchw as suggested by Joe Damato
- add (NET_SKB_PAD + eth->ip_align) offset to prefetched data
- use eth->ip_align instead of NET_IP_ALIGN as it could be 0,
depending on the platform
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 16ca427cf4c3..4d0052dbe3f4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1963,6 +1963,7 @@ static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
if (!prog)
goto out;
+ net_prefetchw(xdp->data_hard_start);
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
@@ -2038,6 +2039,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ prefetch(rxd);
data = ring->data[idx];
if (!mtk_rx_get_desc(eth, &trxd, rxd))
@@ -2105,6 +2107,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (ret != XDP_PASS)
goto skip_rx;
+ net_prefetch(xdp.data_meta);
skb = build_skb(data, PAGE_SIZE);
if (unlikely(!skb)) {
page_pool_put_full_page(ring->page_pool,
@@ -2113,6 +2116,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto skip_rx;
}
+ net_prefetchw(skb->data);
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
skb_mark_for_recycle(skb);
@@ -2143,6 +2147,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
ring->buf_size, DMA_FROM_DEVICE);
+ net_prefetch(data + NET_SKB_PAD + eth->ip_align);
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
netdev->stats.rx_dropped++;
@@ -2150,7 +2155,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto skip_rx;
}
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ net_prefetchw(skb->data);
+ skb_reserve(skb, NET_SKB_PAD + eth->ip_align);
skb_put(skb, pktlen);
}
--
2.45.2
Powered by blists - more mailing lists