lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <cb1ef2d3-4750-40d0-85f9-df6a8ed3ec22@intel.com>
Date: Fri, 27 Jun 2025 15:10:21 +0200
From: Alexander Lobakin <aleksander.lobakin@...el.com>
To: Joshua Hay <joshua.a.hay@...el.com>
CC: <intel-wired-lan@...ts.osuosl.org>, <netdev@...r.kernel.org>, Luigi Rizzo
	<lrizzo@...gle.com>, Brian Vazquez <brianvv@...gle.com>, Madhu Chittim
	<madhu.chittim@...el.com>
Subject: Re: [Intel-wired-lan] [PATCH net 3/5] idpf: replace flow scheduling
 buffer ring with buffer pool

From: Joshua Hay <joshua.a.hay@...el.com>
Date: Wed, 25 Jun 2025 09:11:54 -0700

> Replace the TxQ buffer ring with one large pool/array of buffers (only
> for flow scheduling). The completion tag passed to HW through the

[...]

> diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
> index cdecf558d7ec..25eea632a966 100644
> --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
> +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
> @@ -13,6 +13,7 @@ struct idpf_tx_stash {
>  	struct libeth_sqe buf;
>  };
>  
> +#define idpf_tx_buf_next(buf)  (*(u32 *)&(buf)->priv)

Align it to the next line, i.e. 2 tabs instead of 2 spaces.

>  #define idpf_tx_buf_compl_tag(buf)	(*(u32 *)&(buf)->priv)
>  LIBETH_SQE_CHECK_PRIV(u32);
>  
> @@ -91,7 +92,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
>  		return;
>  
>  	/* Free all the Tx buffer sk_buffs */
> -	for (i = 0; i < txq->desc_count; i++)
> +	for (i = 0; i < txq->buf_pool_size; i++)
>  		libeth_tx_complete(&txq->tx_buf[i], &cp);
>  
>  	kfree(txq->tx_buf);
> @@ -205,7 +206,11 @@ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
>  	/* Allocate book keeping buffers only. Buffers to be supplied to HW
>  	 * are allocated by kernel network stack and received as part of skb
>  	 */
> -	buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
> +	if (idpf_queue_has(FLOW_SCH_EN, tx_q))
> +		tx_q->buf_pool_size = U16_MAX;

3.2 Mb per queue... OTOH 1 Rx queue with 512 descriptors eats 2.1 Mb,
not that bad.

> +	else
> +		tx_q->buf_pool_size = tx_q->desc_count;
> +	buf_size = sizeof(struct idpf_tx_buf) * tx_q->buf_pool_size;

array_size() if you really want, but the proper way would be to replace
the kzalloc() below with kcalloc().

>  	tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
>  	if (!tx_q->tx_buf)
>  		return -ENOMEM;

[...]

> +static bool idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u16 buf_id,

Just use u32 when it comes to function arguments and onstack variables.

> +			       struct libeth_sq_napi_stats *cleaned,
> +			       int budget)
>  {
> -	u16 idx = compl_tag & txq->compl_tag_bufid_m;
> +	u16 idx = buf_id & txq->compl_tag_bufid_m;
>  	struct idpf_tx_buf *tx_buf = NULL;
>  	struct libeth_cq_pp cp = {
>  		.dev	= txq->dev,

[...]

>  	if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
>  		if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
>  						      &tx_params.compl_tag)))
>  			return idpf_tx_drop_skb(tx_q, skb);
> +		buf_id = tx_params.compl_tag;

So this field in tx_params needs to be renamed as it no longer reflects
its purpose.

>  
>  		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
>  		tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;

Thanks,
Olek

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ