lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d2ac4a60-9375-df80-4f00-7eae72c72291@linux.ibm.com>
Date:   Wed, 1 Jul 2020 16:50:50 -0500
From:   Thomas Falcon <tlfalcon@...ux.ibm.com>
To:     Cristobal Forno <cforno12@...ux.ibm.com>, netdev@...r.kernel.org
Subject: Re: [PATCH] ibmvnic: store RX and TX subCRQ handle array in
 ibmvnic_adapter struct

On 7/1/20 4:25 PM, Cristobal Forno wrote:
> Currently the driver reads RX and TX subCRQ handle array directly from
> a DMA-mapped buffer address when it needs to make a H_SEND_SUBCRQ
> hcall. This patch stores that information in the ibmvnic_sub_crq_queue
> structure instead of reading from the buffer received at login.
>   

Hi, thank you for the submission. I think it would be better, however, 
if each subCRQ structure had a member denoting its respective handle 
rather than a pointer to the handle array. This would allow us to 
discard the login_rsp buffer later when it is no longer needed.

Tom

> Signed-off-by: Cristobal Forno <cforno12@...ux.ibm.com>
> ---
>   drivers/net/ethernet/ibm/ibmvnic.c | 27 ++++++++++++++++++++-------
>   drivers/net/ethernet/ibm/ibmvnic.h |  1 +
>   2 files changed, 21 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
> index 0fd7eae25fe9..ca0d88aab6da 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.c
> +++ b/drivers/net/ethernet/ibm/ibmvnic.c
> @@ -305,6 +305,7 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
>   static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
>   			      struct ibmvnic_rx_pool *pool)
>   {
> +	u64 *handle_array = adapter->rx_scrq[pool->index]->handle_array;
>   	int count = pool->size - atomic_read(&pool->available);
>   	struct device *dev = &adapter->vdev->dev;
>   	int buffers_added = 0;
> @@ -314,7 +315,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
>   	unsigned int offset;
>   	dma_addr_t dma_addr;
>   	unsigned char *dst;
> -	u64 *handle_array;
>   	int shift = 0;
>   	int index;
>   	int i;
> @@ -322,10 +322,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
>   	if (!pool->active)
>   		return;
>   
> -	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
> -				      be32_to_cpu(adapter->login_rsp_buf->
> -				      off_rxadd_subcrqs));
> -
>   	for (i = 0; i < count; ++i) {
>   		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
>   		if (!skb) {
> @@ -1553,8 +1549,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
>   
>   	tx_scrq = adapter->tx_scrq[queue_num];
>   	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
> -	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
> -		be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
> +	handle_array = tx_scrq->handle_array;
>   
>   	index = tx_pool->free_map[tx_pool->consumer_index];
>   
> @@ -4292,6 +4287,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
>   	struct net_device *netdev = adapter->netdev;
>   	struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
>   	struct ibmvnic_login_buffer *login = adapter->login_buf;
> +	int num_tx_pools;
> +	int num_rx_pools;
>   	int i;
>   
>   	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
> @@ -4326,6 +4323,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
>   		ibmvnic_remove(adapter->vdev);
>   		return -EIO;
>   	}
> +
> +	num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
> +	num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
> +
> +	for (i = 0; i < num_tx_pools; i++)
> +		adapter->tx_scrq[i]->handle_array =
> +			(u64 *)((u8 *)(adapter->login_rsp_buf) +
> +				be32_to_cpu(adapter->login_rsp_buf->
> +					    off_txsubm_subcrqs));
> +
> +	for (i = 0; i < num_rx_pools; i++)
> +		adapter->rx_scrq[i]->handle_array =
> +			(u64 *)((u8 *)(adapter->login_rsp_buf) +
> +				be32_to_cpu(adapter->login_rsp_buf->
> +					    off_rxadd_subcrqs));
> +
>   	release_login_buffer(adapter);
>   	complete(&adapter->init_done);
>   
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
> index f8416e1d4cf0..e51c72d1e357 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.h
> +++ b/drivers/net/ethernet/ibm/ibmvnic.h
> @@ -875,6 +875,7 @@ struct ibmvnic_sub_crq_queue {
>   	struct ibmvnic_adapter *adapter;
>   	atomic_t used;
>   	char name[32];
> +	u64 *handle_array;
>   };
>   
>   struct ibmvnic_long_term_buff {

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ