lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a3caf088-3a47-4036-85b6-906141f6b17f@nvidia.com>
Date: Fri, 13 Jun 2025 15:03:54 -0700
From: Fenghua Yu <fenghuay@...dia.com>
To: Yi Sun <yi.sun@...el.com>, dave.jiang@...el.com,
 vinicius.gomes@...el.com, dmaengine@...r.kernel.org,
 linux-kernel@...r.kernel.org
Cc: gordon.jin@...el.com, anil.s.keshavamurthy@...el.com,
 philip.lantz@...el.com
Subject: Re: [PATCH 2/2] dmaengine: idxd: Add Max SGL Size Support for DSA3.0

Hi, Yi,

On 6/13/25 09:18, Yi Sun wrote:
> Certain DSA 3.0 opcodes, such as Gather copy and Gather reduce requires max
s/reduce requires/reduce, require/
> SGL configured for workqueues prior to support these opcodes.
s/prior to support/prior to supporting/
>
> Configure the maximum scatter-gather list (SGL) size for workqueues during
> setup on the supported HW. Application can then properly handle the SGL
> size without explicitly setting it.
>
> Signed-off-by: Yi Sun <yi.sun@...el.com>
> Co-developed-by: Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>
> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>
>
> diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
> index 5cf419fe6b46..1c10b030bea7 100644
> --- a/drivers/dma/idxd/device.c
> +++ b/drivers/dma/idxd/device.c
> @@ -375,6 +375,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
>   	memset(wq->name, 0, WQ_NAME_SIZE);
>   	wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
>   	idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
> +	idxd_wq_set_init_max_sgl_size(idxd, wq);
>   	if (wq->opcap_bmap)
>   		bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
>   }
> @@ -974,6 +975,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
>   	/* bytes 12-15 */
>   	wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
>   	idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
> +	if (idxd_sgl_supported(idxd))
> +		wq->wqcfg->max_sgl_shift = ilog2(wq->max_sgl_size);
>   
>   	/* bytes 32-63 */
>   	if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
> @@ -1152,6 +1155,8 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
>   
>   	wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
>   	idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
> +	if (idxd_sgl_supported(idxd))
> +		wq->max_sgl_size = 1U << wq->wqcfg->max_sgl_shift;
>   
>   	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
>   		wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
> diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
> index cc0a3fe1c957..fe5af50b58a4 100644
> --- a/drivers/dma/idxd/idxd.h
> +++ b/drivers/dma/idxd/idxd.h
> @@ -227,6 +227,7 @@ struct idxd_wq {
>   	char name[WQ_NAME_SIZE + 1];
>   	u64 max_xfer_bytes;
>   	u32 max_batch_size;
> +	u32 max_sgl_size;
>   
>   	/* Lock to protect upasid_xa access. */
>   	struct mutex uc_lock;
> @@ -348,6 +349,7 @@ struct idxd_device {
>   
>   	u64 max_xfer_bytes;
>   	u32 max_batch_size;
> +	u32 max_sgl_size;
>   	int max_groups;
>   	int max_engines;
>   	int max_rdbufs;
> @@ -692,6 +694,20 @@ static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq,
>   		wq->max_batch_size = max_batch_size;
>   }
>   
> +static bool idxd_sgl_supported(struct idxd_device *idxd)
> +{
> +	return idxd->hw.dsacap0.sgl_formats &&
> +	       idxd->data->type == IDXD_TYPE_DSA &&
> +	       idxd->hw.version >= DEVICE_VERSION_3;
> +}

This is not safe on DSA 1 or 2 because the first check 
idxd->hw.dsacap0.sgl_format is an invalid value on DSA 1 and 2.

You need to change the order to this for safety:

+	return idxd->data->type == IDXD_TYPE_DSA &&
+	       idxd->hw.version >= DEVICE_VERSION_3 &&
+		idxd->hw.dsacap0.sgl_formats;

> +
> +static inline void idxd_wq_set_init_max_sgl_size(struct idxd_device *idxd,
> +						 struct idxd_wq *wq)
> +{
> +	if (idxd_sgl_supported(idxd))
> +		wq->max_sgl_size = 1U << idxd->hw.dsacap0.max_sgl_shift;
> +}
> +
>   static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg,
>   						  u32 max_batch_shift)
>   {
> diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
> index cc8203320d40..f37a7d7b537a 100644
> --- a/drivers/dma/idxd/init.c
> +++ b/drivers/dma/idxd/init.c
> @@ -217,6 +217,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
>   		init_completion(&wq->wq_resurrect);
>   		wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
>   		idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
> +		idxd_wq_set_init_max_sgl_size(idxd, wq);
>   		wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
>   		wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
>   		if (!wq->wqcfg) {
> @@ -585,6 +586,10 @@ static void idxd_read_caps(struct idxd_device *idxd)
>   	idxd->hw.dsacap0.bits = ioread64(idxd->reg_base + IDXD_DSACAP0_OFFSET);
>   	idxd->hw.dsacap1.bits = ioread64(idxd->reg_base + IDXD_DSACAP1_OFFSET);
>   	idxd->hw.dsacap2.bits = ioread64(idxd->reg_base + IDXD_DSACAP2_OFFSET);
> +	if (idxd_sgl_supported(idxd)) {
> +		idxd->max_sgl_size = 1U << idxd->hw.dsacap0.max_sgl_shift;
> +		dev_dbg(dev, "max sgl size: %u\n", idxd->max_sgl_size);
> +	}
>   
>   	/* read iaa cap */
>   	if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2)
> diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
> index 45485ecd7bb6..0401cfc95f27 100644
> --- a/drivers/dma/idxd/registers.h
> +++ b/drivers/dma/idxd/registers.h
> @@ -385,7 +385,8 @@ union wqcfg {
>   		/* bytes 12-15 */
>   		u32 max_xfer_shift:5;
>   		u32 max_batch_shift:4;
> -		u32 rsvd4:23;
> +		u32 max_sgl_shift:4;
> +		u32 rsvd4:19;
>   
>   		/* bytes 16-19 */
>   		u16 occupancy_inth;
> @@ -585,6 +586,15 @@ union evl_status_reg {
>   
>   #define IDXD_DSACAP0_OFFSET		0x180
>   union dsacap0_reg {
> +	struct {
> +		u64 max_sgl_shift:4;
> +		u64 max_gr_block_shift:4;
> +		u64 ops_inter_domain:7;
> +		u64 rsvd1:17;
> +		u64 sgl_formats:16;
> +		u64 max_sg_process:8;
> +		u64 rsvd2:8;
> +	};

Ah. The fields are defined here. I would suggest the fields are defined 
in patch 1 because:

1. Reviewer (like me) may get confused when reviewing patch 1 where 
dsacap0 doesn't have any field but is defined a union.

2. There are fields that not max_sgl_shift. So those fields are 
irrelevant to this patch and had better to be define in patch 1.

>   	u64 bits;
>   };
>   

Thanks.

-Fenghua


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ