lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3a981eac-6036-4a77-aed1-61fcc6c9c2f5@amd.com>
Date: Thu, 14 Aug 2025 20:43:43 +0000
From: "Limonciello, Mario" <Mario.Limonciello@....com>
To: "Hou, Lizhi" <lizhi.hou@....com>, "ogabbay@...nel.org"
	<ogabbay@...nel.org>, "quic_jhugo@...cinc.com" <quic_jhugo@...cinc.com>,
	"jacek.lawrynowicz@...ux.intel.com" <jacek.lawrynowicz@...ux.intel.com>,
	"dri-devel@...ts.freedesktop.org" <dri-devel@...ts.freedesktop.org>
CC: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>, "Zhen, Max"
	<Max.Zhen@....com>, "Santan, Sonal" <sonal.santan@....com>
Subject: Re: [PATCH V1] accel/amdxdna: Add a function to walk hardware
 contexts

On 8/14/25 3:29 PM, Lizhi Hou wrote:
> Walking hardware contexts created by a process is duplicated in multiple
> spots. Add a function, amdxdna_hwctx_walk(), and replace all spots.
> 
> hwctx_srcu and dev_lock are good enough to protect hardware context list.
> Remove hwctx_lock.
> 
> Signed-off-by: Lizhi Hou <lizhi.hou@....com>
> ---
>   drivers/accel/amdxdna/aie2_ctx.c        | 38 ++++++-----
>   drivers/accel/amdxdna/aie2_message.c    | 21 +++---
>   drivers/accel/amdxdna/aie2_pci.c        | 86 +++++++++++--------------
>   drivers/accel/amdxdna/amdxdna_ctx.c     | 26 ++++++--
>   drivers/accel/amdxdna/amdxdna_ctx.h     |  8 +--
>   drivers/accel/amdxdna/amdxdna_pci_drv.c |  7 +-
>   drivers/accel/amdxdna/amdxdna_pci_drv.h |  2 -
>   7 files changed, 97 insertions(+), 91 deletions(-)
> 
> diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
> index 910ffb7051f4..b76877179b62 100644
> --- a/drivers/accel/amdxdna/aie2_ctx.c
> +++ b/drivers/accel/amdxdna/aie2_ctx.c
> @@ -133,11 +133,20 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
>   	dma_fence_put(fence);
>   }
>   
> +static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
> +{
> +	struct amdxdna_dev *xdna = hwctx->client->xdna;
> +
> +	aie2_hwctx_wait_for_idle(hwctx);

This is an existing problem; but shouldn't aie2_hwctx_wait_for_idle() be 
looking at the return code for dma_fence_wait_timeout()?

Otherwise you can end up with a case that you put() the fence prematurely.

> +	aie2_hwctx_stop(xdna, hwctx, NULL);
> +	aie2_hwctx_status_shift_stop(hwctx);
> +
> +	return 0;
> +}
> +
>   void aie2_hwctx_suspend(struct amdxdna_client *client)
>   {
>   	struct amdxdna_dev *xdna = client->xdna;
> -	struct amdxdna_hwctx *hwctx;
> -	unsigned long hwctx_id;
>   
>   	/*
>   	 * Command timeout is unlikely. But if it happens, it doesn't
> @@ -145,19 +154,22 @@ void aie2_hwctx_suspend(struct amdxdna_client *client)
>   	 * and abort all commands.
>   	 */
>   	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
> -	guard(mutex)(&client->hwctx_lock);
> -	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
> -		aie2_hwctx_wait_for_idle(hwctx);
> -		aie2_hwctx_stop(xdna, hwctx, NULL);
> -		aie2_hwctx_status_shift_stop(hwctx);
> -	}
> +	amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
> +}
> +
> +static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
> +{
> +	struct amdxdna_dev *xdna = hwctx->client->xdna;
> +
> +	aie2_hwctx_status_restore(hwctx);
> +	aie2_hwctx_restart(xdna, hwctx);
> +
> +	return 0;

return aie2_hwctx_restart(xdna, hwctx);

>   }
>   
>   void aie2_hwctx_resume(struct amdxdna_client *client)
>   {
>   	struct amdxdna_dev *xdna = client->xdna;
> -	struct amdxdna_hwctx *hwctx;
> -	unsigned long hwctx_id;
>   
>   	/*
>   	 * The resume path cannot guarantee that mailbox channel can be
> @@ -165,11 +177,7 @@ void aie2_hwctx_resume(struct amdxdna_client *client)
>   	 * mailbox channel, error will return.
>   	 */
>   	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
> -	guard(mutex)(&client->hwctx_lock);
> -	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
> -		aie2_hwctx_status_restore(hwctx);
> -		aie2_hwctx_restart(xdna, hwctx);
> -	}
> +	amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
>   }
>   
>   static void
> diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
> index 82412eec9a4b..9caad083543d 100644
> --- a/drivers/accel/amdxdna/aie2_message.c
> +++ b/drivers/accel/amdxdna/aie2_message.c
> @@ -290,18 +290,25 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6
>   	return 0;
>   }
>   
> +static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg)
> +{
> +	u32 *bitmap = arg;
> +
> +	*bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col);
> +
> +	return 0;
> +}
> +
>   int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
>   		      u32 size, u32 *cols_filled)
>   {
>   	DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
>   	struct amdxdna_dev *xdna = ndev->xdna;
>   	struct amdxdna_client *client;
> -	struct amdxdna_hwctx *hwctx;
> -	unsigned long hwctx_id;
>   	dma_addr_t dma_addr;
>   	u32 aie_bitmap = 0;
>   	u8 *buff_addr;
> -	int ret, idx;
> +	int ret;
>   
>   	buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
>   					  DMA_FROM_DEVICE, GFP_KERNEL);
> @@ -309,12 +316,8 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
>   		return -ENOMEM;
>   
>   	/* Go through each hardware context and mark the AIE columns that are active */
> -	list_for_each_entry(client, &xdna->client_list, node) {
> -		idx = srcu_read_lock(&client->hwctx_srcu);
> -		amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
> -			aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
> -		srcu_read_unlock(&client->hwctx_srcu, idx);
> -	}
> +	list_for_each_entry(client, &xdna->client_list, node)
> +		amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map);
>   
>   	*cols_filled = 0;
>   	req.dump_buff_addr = dma_addr;
> diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
> index 6fc3191c3097..b3ae03d05fb0 100644
> --- a/drivers/accel/amdxdna/aie2_pci.c
> +++ b/drivers/accel/amdxdna/aie2_pci.c
> @@ -10,6 +10,7 @@
>   #include <drm/drm_managed.h>
>   #include <drm/drm_print.h>
>   #include <drm/gpu_scheduler.h>
> +#include <linux/cleanup.h>
>   #include <linux/errno.h>
>   #include <linux/firmware.h>
>   #include <linux/iommu.h>
> @@ -779,65 +780,56 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client,
>   	return ret;
>   }
>   
> +static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
> +{
> +	struct amdxdna_drm_query_hwctx __user *buf, *tmp __free(kfree) = NULL;
> +	struct amdxdna_drm_get_info *get_info_args = arg;
> +
> +	if (get_info_args->buffer_size < sizeof(*tmp))
> +		return -EINVAL;
> +
> +	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
> +	if (!tmp)
> +		return -ENOMEM;
> +
> +	tmp->pid = hwctx->client->pid;
> +	tmp->context_id = hwctx->id;
> +	tmp->start_col = hwctx->start_col;
> +	tmp->num_col = hwctx->num_col;
> +	tmp->command_submissions = hwctx->priv->seq;
> +	tmp->command_completions = hwctx->priv->completed;
> +
> +	buf = u64_to_user_ptr(get_info_args->buffer);
> +
> +	if (copy_to_user(buf, tmp, sizeof(*tmp)))
> +		return -EFAULT;
> +
> +	get_info_args->buffer += sizeof(*tmp);
> +	get_info_args->buffer_size -= sizeof(*tmp);
> +
> +	return 0;
> +}
> +
>   static int aie2_get_hwctx_status(struct amdxdna_client *client,
>   				 struct amdxdna_drm_get_info *args)
>   {
> -	struct amdxdna_drm_query_hwctx __user *buf;
>   	struct amdxdna_dev *xdna = client->xdna;
> -	struct amdxdna_drm_query_hwctx *tmp;
> +	struct amdxdna_drm_get_info info_args;
>   	struct amdxdna_client *tmp_client;
> -	struct amdxdna_hwctx *hwctx;
> -	unsigned long hwctx_id;
> -	bool overflow = false;
> -	u32 req_bytes = 0;
> -	u32 hw_i = 0;
> -	int ret = 0;
> -	int idx;
> +	int ret;
>   
>   	drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
>   
> -	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
> -	if (!tmp)
> -		return -ENOMEM;
> +	info_args.buffer = args->buffer;
> +	info_args.buffer_size = args->buffer_size;
>   
> -	buf = u64_to_user_ptr(args->buffer);
>   	list_for_each_entry(tmp_client, &xdna->client_list, node) {
> -		idx = srcu_read_lock(&tmp_client->hwctx_srcu);
> -		amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
> -			req_bytes += sizeof(*tmp);
> -			if (args->buffer_size < req_bytes) {
> -				/* Continue iterating to get the required size */
> -				overflow = true;
> -				continue;
> -			}
> -
> -			memset(tmp, 0, sizeof(*tmp));
> -			tmp->pid = tmp_client->pid;
> -			tmp->context_id = hwctx->id;
> -			tmp->start_col = hwctx->start_col;
> -			tmp->num_col = hwctx->num_col;
> -			tmp->command_submissions = hwctx->priv->seq;
> -			tmp->command_completions = hwctx->priv->completed;
> -
> -			if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
> -				ret = -EFAULT;
> -				srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
> -				goto out;
> -			}
> -			hw_i++;
> -		}
> -		srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
> -	}
> -
> -	if (overflow) {
> -		XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
> -			 args->buffer_size, req_bytes);
> -		ret = -EINVAL;
> +		ret = amdxdna_hwctx_walk(tmp_client, &info_args, aie2_hwctx_status_cb);
> +		if (ret)
> +			break;
>   	}
>   
> -out:
> -	kfree(tmp);
> -	args->buffer_size = req_bytes;
> +	args->buffer_size = (u32)(info_args.buffer - args->buffer);
>   	return ret;
>   }
>   
> diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
> index b47a7f8e9017..4bfe4ef20550 100644
> --- a/drivers/accel/amdxdna/amdxdna_ctx.c
> +++ b/drivers/accel/amdxdna/amdxdna_ctx.c
> @@ -68,14 +68,30 @@ static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
>   	synchronize_srcu(ss);
>   
>   	/* At this point, user is not able to submit new commands */
> -	mutex_lock(&xdna->dev_lock);
>   	xdna->dev_info->ops->hwctx_fini(hwctx);
> -	mutex_unlock(&xdna->dev_lock);
>   
>   	kfree(hwctx->name);
>   	kfree(hwctx);
>   }
>   
> +int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
> +		       int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
> +{
> +	struct amdxdna_hwctx *hwctx;
> +	unsigned long hwctx_id;
> +	int ret = 0, idx;
> +
> +	idx = srcu_read_lock(&client->hwctx_srcu);
> +	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
> +		ret = walk(hwctx, arg);
> +		if (ret)
> +			break;
> +	}
> +	srcu_read_unlock(&client->hwctx_srcu, idx);
> +
> +	return ret;
> +}
> +
>   void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
>   {
>   	struct amdxdna_cmd *cmd = abo->mem.kva;
> @@ -126,16 +142,12 @@ void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
>   	struct amdxdna_hwctx *hwctx;
>   	unsigned long hwctx_id;
>   
> -	mutex_lock(&client->hwctx_lock);
>   	amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
>   		XDNA_DBG(client->xdna, "PID %d close HW context %d",
>   			 client->pid, hwctx->id);
>   		xa_erase(&client->hwctx_xa, hwctx->id);
> -		mutex_unlock(&client->hwctx_lock);
>   		amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
> -		mutex_lock(&client->hwctx_lock);
>   	}
> -	mutex_unlock(&client->hwctx_lock);
>   }
>   
>   int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
> @@ -225,6 +237,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
>   	if (!drm_dev_enter(dev, &idx))
>   		return -ENODEV;
>   
> +	mutex_lock(&xdna->dev_lock);
>   	hwctx = xa_erase(&client->hwctx_xa, args->handle);
>   	if (!hwctx) {
>   		ret = -EINVAL;
> @@ -241,6 +254,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
>   
>   	XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
>   out:
> +	mutex_unlock(&xdna->dev_lock);
>   	drm_dev_exit(idx);
>   	return ret;
>   }
> diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
> index c652229547a3..7cd7a55936f0 100644
> --- a/drivers/accel/amdxdna/amdxdna_ctx.h
> +++ b/drivers/accel/amdxdna/amdxdna_ctx.h
> @@ -139,14 +139,10 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
>   void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
>   int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
>   
> -static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
> -{
> -	return GENMASK(hwctx->start_col + hwctx->num_col - 1,
> -		       hwctx->start_col);
> -}
> -
>   void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
>   void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
> +int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
> +		       int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
>   
>   int amdxdna_cmd_submit(struct amdxdna_client *client,
>   		       u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
> diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
> index fbca94183f96..8ef5e4f27f5e 100644
> --- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
> +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
> @@ -81,7 +81,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
>   		ret = -ENODEV;
>   		goto unbind_sva;
>   	}
> -	mutex_init(&client->hwctx_lock);
>   	init_srcu_struct(&client->hwctx_srcu);
>   	xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
>   	mutex_init(&client->mm_lock);
> @@ -116,7 +115,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
>   
>   	xa_destroy(&client->hwctx_xa);
>   	cleanup_srcu_struct(&client->hwctx_srcu);
> -	mutex_destroy(&client->hwctx_lock);
>   	mutex_destroy(&client->mm_lock);
>   	if (client->dev_heap)
>   		drm_gem_object_put(to_gobj(client->dev_heap));
> @@ -142,8 +140,8 @@ static int amdxdna_flush(struct file *f, fl_owner_t id)
>   
>   	mutex_lock(&xdna->dev_lock);
>   	list_del_init(&client->node);
> -	mutex_unlock(&xdna->dev_lock);
>   	amdxdna_hwctx_remove_all(client);
> +	mutex_unlock(&xdna->dev_lock);
>   
>   	drm_dev_exit(idx);
>   	return 0;
> @@ -330,11 +328,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
>   					  struct amdxdna_client, node);
>   	while (client) {
>   		list_del_init(&client->node);
> -		mutex_unlock(&xdna->dev_lock);
> -
>   		amdxdna_hwctx_remove_all(client);
>   
> -		mutex_lock(&xdna->dev_lock);
>   		client = list_first_entry_or_null(&xdna->client_list,
>   						  struct amdxdna_client, node);
>   	}
> diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
> index 40bbb3c06320..b6b3b424d1d5 100644
> --- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
> +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
> @@ -116,8 +116,6 @@ struct amdxdna_device_id {
>   struct amdxdna_client {
>   	struct list_head		node;
>   	pid_t				pid;
> -	struct mutex			hwctx_lock; /* protect hwctx */
> -	/* do NOT wait this srcu when hwctx_lock is held */
>   	struct srcu_struct		hwctx_srcu;
>   	struct xarray			hwctx_xa;
>   	u32				next_hwctxid;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ