lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240105105059.GS31813@kernel.org>
Date: Fri, 5 Jan 2024 10:50:59 +0000
From: Simon Horman <horms@...nel.org>
To: Shinas Rasheed <srasheed@...vell.com>
Cc: netdev@...r.kernel.org, linux-kernel@...r.kernel.org, hgani@...vell.com,
	vimleshk@...vell.com, sedara@...vell.com, egallen@...hat.com,
	mschmidt@...hat.com, pabeni@...hat.com, kuba@...nel.org,
	wizhao@...hat.com, kheib@...hat.com, konguyen@...hat.com,
	Veerasenareddy Burru <vburru@...vell.com>,
	Satananda Burla <sburla@...vell.com>,
	"David S. Miller" <davem@...emloft.net>,
	Eric Dumazet <edumazet@...gle.com>
Subject: Re: [PATCH net-next v2 4/8] octeon_ep_vf: add Tx/Rx ring resource
 setup and cleanup

On Sat, Dec 23, 2023 at 05:39:56AM -0800, Shinas Rasheed wrote:
> Implement Tx/Rx ring resource allocation and cleanup.
> 
> Signed-off-by: Shinas Rasheed <srasheed@...vell.com>

Hi Shinas,

some minor feedback from my side.

...

> diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c

...

> +/**
> + * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
> + *
> + * @oq: Octeon Rx queue data structure.
> + *
> + * Return: 0, if successfully filled receive buffers for all descriptors.
> + *         -1, if failed to allocate a buffer or failed to map for DMA.

I think it is more idiomatic to use well known error codes
in kernel code. In this case, perhaps -ENOMEM.

Likewise elsewhere in this patch.

> + */
> +static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq)
> +{
> +	struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
> +	struct page *page;
> +	u32 i;
> +
> +	for (i = 0; i < oq->max_count; i++) {
> +		page = dev_alloc_page();
> +		if (unlikely(!page)) {
> +			dev_err(oq->dev, "Rx buffer alloc failed\n");
> +			goto rx_buf_alloc_err;
> +		}
> +		desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
> +						       PAGE_SIZE,
> +						       DMA_FROM_DEVICE);
> +		if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
> +			dev_err(oq->dev,
> +				"OQ-%d buffer alloc: DMA mapping error!\n",
> +				oq->q_no);
> +			put_page(page);
> +			goto dma_map_err;

nit: I think put_page() can be moved to the dma_map_err label.

> +		}
> +		oq->buff_info[i].page = page;
> +	}
> +
> +	return 0;
> +
> +dma_map_err:
> +rx_buf_alloc_err:
> +	while (i) {
> +		i--;
> +		dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
> +		put_page(oq->buff_info[i].page);
> +		oq->buff_info[i].page = NULL;
> +	}
> +
> +	return -1;
> +}
> +
> +/**
> + * octep_vf_setup_oq() - Setup a Rx queue.
> + *
> + * @oct: Octeon device private data structure.
> + * @q_no: Rx queue number to be setup.
> + *
> + * Allocate resources for a Rx queue.
> + */
> +static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
> +{
> +	struct octep_vf_oq *oq;
> +	u32 desc_ring_size;
> +
> +	oq = vzalloc(sizeof(*oq));
> +	if (!oq)
> +		goto create_oq_fail;
> +	oct->oq[q_no] = oq;
> +
> +	oq->octep_vf_dev = oct;
> +	oq->netdev = oct->netdev;
> +	oq->dev = &oct->pdev->dev;
> +	oq->q_no = q_no;
> +	oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
> +	oq->ring_size_mask = oq->max_count - 1;
> +	oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
> +	oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE;
> +
> +	/* When the hardware/firmware supports additional capabilities,
> +	 * additional header is filled-in by Octeon after length field in
> +	 * Rx packets. this header contains additional packet information.
> +	 */
> +	if (oct->fw_info.rx_ol_flags)
> +		oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
> +
> +	oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
> +
> +	desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE;
> +	oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
> +					   &oq->desc_ring_dma, GFP_KERNEL);
> +
> +	if (unlikely(!oq->desc_ring)) {
> +		dev_err(oq->dev,
> +			"Failed to allocate DMA memory for OQ-%d !!\n", q_no);
> +		goto desc_dma_alloc_err;
> +	}
> +
> +	oq->buff_info = (struct octep_vf_rx_buffer *)
> +			vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);

nit: there is no need to cast a void pointer.

	oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);

> +	if (unlikely(!oq->buff_info)) {
> +		dev_err(&oct->pdev->dev,
> +			"Failed to allocate buffer info for OQ-%d\n", q_no);
> +		goto buf_list_err;
> +	}
> +
> +	if (octep_vf_oq_fill_ring_buffers(oq))
> +		goto oq_fill_buff_err;
> +
> +	octep_vf_oq_reset_indices(oq);
> +	oct->hw_ops.setup_oq_regs(oct, q_no);
> +	oct->num_oqs++;
> +
> +	return 0;
> +
> +oq_fill_buff_err:
> +	vfree(oq->buff_info);
> +	oq->buff_info = NULL;
> +buf_list_err:
> +	dma_free_coherent(oq->dev, desc_ring_size,
> +			  oq->desc_ring, oq->desc_ring_dma);
> +	oq->desc_ring = NULL;
> +desc_dma_alloc_err:
> +	vfree(oq);
> +	oct->oq[q_no] = NULL;
> +create_oq_fail:
> +	return -1;
> +}

...

> +/**
> + * octep_vf_free_oq() - Free Rx queue resources.
> + *
> + * @oq: Octeon Rx queue data structure.
> + *
> + * Free all resources of a Rx queue.
> + */
> +static int octep_vf_free_oq(struct octep_vf_oq *oq)
> +{
> +	struct octep_vf_device *oct = oq->octep_vf_dev;
> +	int q_no = oq->q_no;
> +
> +	octep_vf_oq_free_ring_buffers(oq);
> +
> +	if (oq->buff_info)
> +		vfree(oq->buff_info);

nit: there is no need to check for NULL as vfree() can handle
     a NULL argument.

> +
> +	if (oq->desc_ring)
> +		dma_free_coherent(oq->dev,
> +				  oq->max_count * OCTEP_VF_OQ_DESC_SIZE,
> +				  oq->desc_ring, oq->desc_ring_dma);
> +
> +	vfree(oq);
> +	oct->oq[q_no] = NULL;
> +	oct->num_oqs--;
> +	return 0;
> +}

...

> +/**
> + * octep_vf_free_iq() - Free Tx queue resources.
> + *
> + * @iq: Octeon Tx queue data structure.
> + *
> + * Free all the resources allocated for a Tx queue.
> + */
> +static void octep_vf_free_iq(struct octep_vf_iq *iq)
> +{
> +	struct octep_vf_device *oct = iq->octep_vf_dev;
> +	u64 desc_ring_size, sglist_size;
> +	int q_no = iq->q_no;
> +
> +	desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
> +
> +	if (iq->buff_info)
> +		vfree(iq->buff_info);

Ditto.

> +
> +	if (iq->desc_ring)
> +		dma_free_coherent(iq->dev, desc_ring_size,
> +				  iq->desc_ring, iq->desc_ring_dma);
> +
> +	sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
> +		      CFG_GET_IQ_NUM_DESC(oct->conf);
> +	if (iq->sglist)
> +		dma_free_coherent(iq->dev, sglist_size,
> +				  iq->sglist, iq->sglist_dma);
> +
> +	vfree(iq);
> +	oct->iq[q_no] = NULL;
> +	oct->num_iqs--;
>  }

...

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ