lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <464fd490273bf0df9f0725a69aa1c890705d0513.camel@gmail.com>
Date:   Tue, 10 Jan 2023 09:29:39 -0800
From:   Alexander H Duyck <alexander.duyck@...il.com>
To:     Gerhard Engleder <gerhard@...leder-embedded.com>,
        netdev@...r.kernel.org
Cc:     davem@...emloft.net, kuba@...nel.org, edumazet@...gle.com,
        pabeni@...hat.com, Saeed Mahameed <saeed@...nel.org>
Subject: Re: [PATCH net-next v4 08/10] tsnep: Add RX queue info for XDP
 support

On Mon, 2023-01-09 at 20:15 +0100, Gerhard Engleder wrote:
> Register xdp_rxq_info with page_pool memory model. This is needed for
> XDP buffer handling.
> 
> Signed-off-by: Gerhard Engleder <gerhard@...leder-embedded.com>
> Reviewed-by: Saeed Mahameed <saeed@...nel.org>
> ---
>  drivers/net/ethernet/engleder/tsnep.h      |  2 ++
>  drivers/net/ethernet/engleder/tsnep_main.c | 39 ++++++++++++++++------
>  2 files changed, 31 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
> index 855738d31d73..2268ff793edf 100644
> --- a/drivers/net/ethernet/engleder/tsnep.h
> +++ b/drivers/net/ethernet/engleder/tsnep.h
> @@ -134,6 +134,8 @@ struct tsnep_rx {
>  	u32 dropped;
>  	u32 multicast;
>  	u32 alloc_failed;
> +
> +	struct xdp_rxq_info xdp_rxq;
>  };
>  

Rather than placing it in your Rx queue structure it might be better to
look at placing it in the tsnep_queue struct. The fact is this is more
closely associated with the napi struct then your Rx ring so changing
it to that might make more sense as you can avoid a bunch of extra
overhead with having to pull napi to it.

Basically what it comes down it is that it is much easier to access
queue[i].rx than it is for the rx to get access to queue[i].napi.

>  struct tsnep_queue {
> diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
> index 0c9669edb2dd..451ad1849b9d 100644
> --- a/drivers/net/ethernet/engleder/tsnep_main.c
> +++ b/drivers/net/ethernet/engleder/tsnep_main.c
> @@ -792,6 +792,9 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>  		entry->page = NULL;
>  	}
>  
> +	if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
> +		xdp_rxq_info_unreg(&rx->xdp_rxq);
> +
>  	if (rx->page_pool)
>  		page_pool_destroy(rx->page_pool);
>  
> @@ -807,7 +810,7 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>  	}
>  }
>  
> -static int tsnep_rx_ring_init(struct tsnep_rx *rx)
> +static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id)
>  {
>  	struct device *dmadev = rx->adapter->dmadev;
>  	struct tsnep_rx_entry *entry;
> @@ -850,6 +853,15 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
>  		goto failed;
>  	}
>  
> +	retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev,
> +				  rx->queue_index, napi_id);
> +	if (retval)
> +		goto failed;
> +	retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL,
> +					    rx->page_pool);
> +	if (retval)
> +		goto failed;
> +
>  	for (i = 0; i < TSNEP_RING_SIZE; i++) {
>  		entry = &rx->entry[i];
>  		next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
> @@ -1104,7 +1116,8 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx)
>  }
>  
>  static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
> -			 int queue_index, struct tsnep_rx *rx)
> +			 unsigned int napi_id, int queue_index,
> +			 struct tsnep_rx *rx)
>  {
>  	dma_addr_t dma;
>  	int retval;
> @@ -1118,7 +1131,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
>  	else
>  		rx->offset = TSNEP_SKB_PAD;
>  
> -	retval = tsnep_rx_ring_init(rx);
> +	retval = tsnep_rx_ring_init(rx, napi_id);
>  	if (retval)
>  		return retval;
>  
> @@ -1245,14 +1258,19 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
>  static int tsnep_netdev_open(struct net_device *netdev)
>  {
>  	struct tsnep_adapter *adapter = netdev_priv(netdev);
> -	int i;
> -	void __iomem *addr;
>  	int tx_queue_index = 0;
>  	int rx_queue_index = 0;
> -	int retval;
> +	unsigned int napi_id;
> +	void __iomem *addr;
> +	int i, retval;
>  
>  	for (i = 0; i < adapter->num_queues; i++) {
>  		adapter->queue[i].adapter = adapter;
> +
> +		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
> +			       tsnep_poll);
> +		napi_id = adapter->queue[i].napi.napi_id;
> +

This is a good example of what I am referring to.

>  		if (adapter->queue[i].tx) {
>  			addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
>  			retval = tsnep_tx_open(adapter, addr, tx_queue_index,
> @@ -1263,7 +1281,7 @@ static int tsnep_netdev_open(struct net_device *netdev)
>  		}
>  		if (adapter->queue[i].rx) {
>  			addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
> -			retval = tsnep_rx_open(adapter, addr,
> +			retval = tsnep_rx_open(adapter, addr, napi_id,
>  					       rx_queue_index,
>  					       adapter->queue[i].rx);
>  			if (retval)
> @@ -1295,8 +1313,6 @@ static int tsnep_netdev_open(struct net_device *netdev)
>  		goto phy_failed;
>  
>  	for (i = 0; i < adapter->num_queues; i++) {
> -		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
> -			       tsnep_poll);
>  		napi_enable(&adapter->queue[i].napi);
>  
>  		tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);

What you could do here is look at making all the napi_add/napi_enable
and xdp_rxq_info items into one function to handle all the enabling.

> @@ -1317,6 +1333,8 @@ static int tsnep_netdev_open(struct net_device *netdev)
>  			tsnep_rx_close(adapter->queue[i].rx);
>  		if (adapter->queue[i].tx)
>  			tsnep_tx_close(adapter->queue[i].tx);
> +
> +		netif_napi_del(&adapter->queue[i].napi);
>  	}
>  	return retval;
>  }
> @@ -1335,7 +1353,6 @@ static int tsnep_netdev_close(struct net_device *netdev)
>  		tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
>  
>  		napi_disable(&adapter->queue[i].napi);
> -		netif_napi_del(&adapter->queue[i].napi);
>  
>  		tsnep_free_irq(&adapter->queue[i], i == 0);
>  

Likewise here you could take care of all the same items with the page
pool being freed after you have already unregistered and freed the napi
instance.

> @@ -1343,6 +1360,8 @@ static int tsnep_netdev_close(struct net_device *netdev)
>  			tsnep_rx_close(adapter->queue[i].rx);
>  		if (adapter->queue[i].tx)
>  			tsnep_tx_close(adapter->queue[i].tx);
> +
> +		netif_napi_del(&adapter->queue[i].napi);
>  	}
>  
>  	return 0;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ