[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZEGmHe2pyxwWiYRL@boxer>
Date: Thu, 20 Apr 2023 22:52:45 +0200
From: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
To: Horatiu Vultur <horatiu.vultur@...rochip.com>
CC: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<bpf@...r.kernel.org>, <davem@...emloft.net>,
<edumazet@...gle.com>, <kuba@...nel.org>, <pabeni@...hat.com>,
<daniel@...earbox.net>, <hawk@...nel.org>,
<john.fastabend@...il.com>, <richardcochran@...il.com>,
<UNGLinuxDriver@...rochip.com>, <alexandr.lobakin@...el.com>
Subject: Re: [PATCH net-next] net: lan966x: Don't use xdp_frame when action
is XDP_TX
On Thu, Apr 20, 2023 at 02:11:52PM +0200, Horatiu Vultur wrote:
'net: ' in patch subject is excessive to me
> When the action of an xdp program was XDP_TX, lan966x was creating
> a xdp_frame and use this one to send the frame back. But it is also
> possible to send back the frame without needing a xdp_frame, because
> it possible to send it back using the page.
s/it/it is
> And then once the frame is transmitted is possible to use directly
> page_pool_recycle_direct as lan966x is using page pools.
> This would save some CPU usage on this path.
i remember this optimization gave me noticeable perf improvement, would
you mind sharing it in % on your side?
>
> Signed-off-by: Horatiu Vultur <horatiu.vultur@...rochip.com>
> ---
> .../ethernet/microchip/lan966x/lan966x_fdma.c | 35 +++++++++++--------
> .../ethernet/microchip/lan966x/lan966x_main.h | 2 ++
> .../ethernet/microchip/lan966x/lan966x_xdp.c | 11 +++---
> 3 files changed, 27 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
> index 2ed76bb61a731..7947259e67e4e 100644
> --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
> +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
> @@ -390,6 +390,7 @@ static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
> static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
> {
> struct lan966x_tx *tx = &lan966x->tx;
> + struct lan966x_rx *rx = &lan966x->rx;
> struct lan966x_tx_dcb_buf *dcb_buf;
> struct xdp_frame_bulk bq;
> struct lan966x_db *db;
> @@ -432,7 +433,8 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
> if (dcb_buf->xdp_ndo)
> xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
> else
> - xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
> + page_pool_recycle_direct(rx->page_pool,
> + dcb_buf->data.page);
> }
>
> clear = true;
> @@ -702,6 +704,7 @@ static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
> int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
> struct xdp_frame *xdpf,
> struct page *page,
> + u32 len,
agreed with Olek regarding arguments reduction here
> bool dma_map)
> {
> struct lan966x *lan966x = port->lan966x;
> @@ -722,6 +725,15 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
> goto out;
> }
>
> + /* Fill up the buffer */
> + next_dcb_buf = &tx->dcbs_buf[next_to_use];
> + next_dcb_buf->use_skb = false;
> + next_dcb_buf->xdp_ndo = dma_map;
a bit misleading that xdp_ndo is a bool :P
> + next_dcb_buf->len = len + IFH_LEN_BYTES;
> + next_dcb_buf->used = true;
> + next_dcb_buf->ptp = false;
> + next_dcb_buf->dev = port->dev;
> +
> /* Generate new IFH */
> if (dma_map) {
> if (xdpf->headroom < IFH_LEN_BYTES) {
> @@ -736,16 +748,18 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
>
> dma_addr = dma_map_single(lan966x->dev,
> xdpf->data - IFH_LEN_BYTES,
> - xdpf->len + IFH_LEN_BYTES,
> + len + IFH_LEN_BYTES,
> DMA_TO_DEVICE);
> if (dma_mapping_error(lan966x->dev, dma_addr)) {
> ret = NETDEV_TX_OK;
> goto out;
> }
>
> + next_dcb_buf->data.xdpf = xdpf;
> +
> /* Setup next dcb */
> lan966x_fdma_tx_setup_dcb(tx, next_to_use,
> - xdpf->len + IFH_LEN_BYTES,
> + len + IFH_LEN_BYTES,
> dma_addr);
> } else {
> ifh = page_address(page) + XDP_PACKET_HEADROOM;
> @@ -756,25 +770,18 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
> dma_addr = page_pool_get_dma_addr(page);
> dma_sync_single_for_device(lan966x->dev,
> dma_addr + XDP_PACKET_HEADROOM,
> - xdpf->len + IFH_LEN_BYTES,
> + len + IFH_LEN_BYTES,
> DMA_TO_DEVICE);
>
> + next_dcb_buf->data.page = page;
> +
> /* Setup next dcb */
> lan966x_fdma_tx_setup_dcb(tx, next_to_use,
> - xdpf->len + IFH_LEN_BYTES,
> + len + IFH_LEN_BYTES,
> dma_addr + XDP_PACKET_HEADROOM);
> }
>
> - /* Fill up the buffer */
> - next_dcb_buf = &tx->dcbs_buf[next_to_use];
> - next_dcb_buf->use_skb = false;
> - next_dcb_buf->data.xdpf = xdpf;
> - next_dcb_buf->xdp_ndo = dma_map;
> - next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
> next_dcb_buf->dma_addr = dma_addr;
> - next_dcb_buf->used = true;
> - next_dcb_buf->ptp = false;
> - next_dcb_buf->dev = port->dev;
>
> /* Start the transmission */
> lan966x_fdma_tx_start(tx, next_to_use);
> diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
> index 851afb0166b19..59da35a2c93d4 100644
> --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
> +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
> @@ -243,6 +243,7 @@ struct lan966x_tx_dcb_buf {
> union {
> struct sk_buff *skb;
> struct xdp_frame *xdpf;
> + struct page *page;
> } data;
> u32 len;
> u32 used : 1;
> @@ -544,6 +545,7 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
> int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
> struct xdp_frame *frame,
> struct page *page,
> + u32 len,
> bool dma_map);
> int lan966x_fdma_change_mtu(struct lan966x *lan966x);
> void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
> diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
> index 2e6f486ec67d7..a8ad1f4e431cb 100644
> --- a/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
> +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c
> @@ -62,7 +62,7 @@ int lan966x_xdp_xmit(struct net_device *dev,
> struct xdp_frame *xdpf = frames[i];
> int err;
>
> - err = lan966x_fdma_xmit_xdpf(port, xdpf, NULL, true);
> + err = lan966x_fdma_xmit_xdpf(port, xdpf, NULL, xdpf->len, true);
> if (err)
> break;
>
> @@ -76,7 +76,6 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
> {
> struct bpf_prog *xdp_prog = port->xdp_prog;
> struct lan966x *lan966x = port->lan966x;
> - struct xdp_frame *xdpf;
> struct xdp_buff xdp;
> u32 act;
>
> @@ -90,11 +89,9 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
> case XDP_PASS:
> return FDMA_PASS;
> case XDP_TX:
> - xdpf = xdp_convert_buff_to_frame(&xdp);
> - if (!xdpf)
> - return FDMA_DROP;
> -
> - return lan966x_fdma_xmit_xdpf(port, xdpf, page, false) ?
> + return lan966x_fdma_xmit_xdpf(port, NULL, page,
> + data_len - IFH_LEN_BYTES,
> + false) ?
> FDMA_DROP : FDMA_TX;
> case XDP_REDIRECT:
> if (xdp_do_redirect(port->dev, &xdp, xdp_prog))
> --
> 2.38.0
>
Powered by blists - more mailing lists