lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20cdfe29-403c-4093-9aad-55639a53a0c5@amd.com>
Date: Wed, 11 Feb 2026 16:05:50 -0600
From: Mario Limonciello <mario.limonciello@....com>
To: Lizhi Hou <lizhi.hou@....com>, ogabbay@...nel.org,
 quic_jhugo@...cinc.com, dri-devel@...ts.freedesktop.org,
 maciej.falkowski@...ux.intel.com
Cc: linux-kernel@...r.kernel.org, max.zhen@....com, sonal.santan@....com
Subject: Re: [PATCH V2] accel/amdxdna: Fix dead lock for suspend and resume

On 2/11/26 3:59 PM, Lizhi Hou wrote:
> 
> On 2/11/26 13:54, Mario Limonciello wrote:
>> On 2/11/26 2:46 PM, Lizhi Hou wrote:
>>> When an application issues a query IOCTL while auto suspend is running,
>>> a deadlock can occur. The query path holds dev_lock and then calls
>>> pm_runtime_resume_and_get(), which waits for the ongoing suspend to
>>> complete. Meanwhile, the suspend callback attempts to acquire dev_lock
>>> and blocks, resulting in a deadlock.
>>>
>>> Fix this by releasing dev_lock before calling 
>>> pm_runtime_resume_and_get()
>>> and reacquiring it after the call completes. Also acquire dev_lock in 
>>> the
>>> resume callback to keep the locking consistent.
>>>
>>> Fixes: 063db451832b ("accel/amdxdna: Enhance runtime power management")
>>> Signed-off-by: Lizhi Hou <lizhi.hou@....com>
>>> ---
>>>   drivers/accel/amdxdna/aie2_ctx.c    |  4 ++--
>>>   drivers/accel/amdxdna/aie2_pci.c    |  7 +++----
>>>   drivers/accel/amdxdna/aie2_pm.c     |  2 +-
>>>   drivers/accel/amdxdna/amdxdna_ctx.c | 19 +++++++------------
>>>   drivers/accel/amdxdna/amdxdna_pm.c  |  2 ++
>>>   drivers/accel/amdxdna/amdxdna_pm.h  | 11 +++++++++++
>>>   6 files changed, 26 insertions(+), 19 deletions(-)
>>>
>>> diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/ 
>>> amdxdna/aie2_ctx.c
>>> index 37d05f2e986f..58e146172b61 100644
>>> --- a/drivers/accel/amdxdna/aie2_ctx.c
>>> +++ b/drivers/accel/amdxdna/aie2_ctx.c
>>> @@ -629,7 +629,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
>>>           goto free_entity;
>>>       }
>>>   -    ret = amdxdna_pm_resume_get(xdna);
>>> +    ret = amdxdna_pm_resume_get_locked(xdna);
>>>       if (ret)
>>>           goto free_col_list;
>>>   @@ -760,7 +760,7 @@ static int aie2_hwctx_cu_config(struct 
>>> amdxdna_hwctx *hwctx, void *buf, u32 size
>>>       if (!hwctx->cus)
>>>           return -ENOMEM;
>>>   -    ret = amdxdna_pm_resume_get(xdna);
>>> +    ret = amdxdna_pm_resume_get_locked(xdna);
>>>       if (ret)
>>>           goto free_cus;
>>>   diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/ 
>>> amdxdna/aie2_pci.c
>>> index f70ccf0f3c01..5b326e4610e6 100644
>>> --- a/drivers/accel/amdxdna/aie2_pci.c
>>> +++ b/drivers/accel/amdxdna/aie2_pci.c
>>> @@ -451,7 +451,6 @@ static int aie2_hw_suspend(struct amdxdna_dev *xdna)
>>>   {
>>>       struct amdxdna_client *client;
>>>   -    guard(mutex)(&xdna->dev_lock);
>>>       list_for_each_entry(client, &xdna->client_list, node)
>>>           aie2_hwctx_suspend(client);
>>>   @@ -951,7 +950,7 @@ static int aie2_get_info(struct amdxdna_client 
>>> *client, struct amdxdna_drm_get_i
>>>       if (!drm_dev_enter(&xdna->ddev, &idx))
>>>           return -ENODEV;
>>>   -    ret = amdxdna_pm_resume_get(xdna);
>>> +    ret = amdxdna_pm_resume_get_locked(xdna);
>>>       if (ret)
>>>           goto dev_exit;
>>>   @@ -1044,7 +1043,7 @@ static int aie2_get_array(struct 
>>> amdxdna_client *client,
>>>       if (!drm_dev_enter(&xdna->ddev, &idx))
>>>           return -ENODEV;
>>>   -    ret = amdxdna_pm_resume_get(xdna);
>>> +    ret = amdxdna_pm_resume_get_locked(xdna);
>>>       if (ret)
>>>           goto dev_exit;
>>>   @@ -1134,7 +1133,7 @@ static int aie2_set_state(struct 
>>> amdxdna_client *client,
>>>       if (!drm_dev_enter(&xdna->ddev, &idx))
>>>           return -ENODEV;
>>>   -    ret = amdxdna_pm_resume_get(xdna);
>>> +    ret = amdxdna_pm_resume_get_locked(xdna);
>>>       if (ret)
>>>           goto dev_exit;
>>>   diff --git a/drivers/accel/amdxdna/aie2_pm.c b/drivers/accel/ 
>>> amdxdna/aie2_pm.c
>>> index 579b8be13b18..29bd4403a94d 100644
>>> --- a/drivers/accel/amdxdna/aie2_pm.c
>>> +++ b/drivers/accel/amdxdna/aie2_pm.c
>>> @@ -31,7 +31,7 @@ int aie2_pm_set_dpm(struct amdxdna_dev_hdl *ndev, 
>>> u32 dpm_level)
>>>   {
>>>       int ret;
>>>   -    ret = amdxdna_pm_resume_get(ndev->xdna);
>>> +    ret = amdxdna_pm_resume_get_locked(ndev->xdna);
>>>       if (ret)
>>>           return ret;
>>>   diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/ 
>>> amdxdna/amdxdna_ctx.c
>>> index d17aef89a0ad..db3aa26fb55f 100644
>>> --- a/drivers/accel/amdxdna/amdxdna_ctx.c
>>> +++ b/drivers/accel/amdxdna/amdxdna_ctx.c
>>> @@ -266,9 +266,9 @@ int amdxdna_drm_config_hwctx_ioctl(struct 
>>> drm_device *dev, void *data, struct dr
>>>       struct amdxdna_drm_config_hwctx *args = data;
>>>       struct amdxdna_dev *xdna = to_xdna_dev(dev);
>>>       struct amdxdna_hwctx *hwctx;
>>> -    int ret, idx;
>>>       u32 buf_size;
>>>       void *buf;
>>> +    int ret;
>>>       u64 val;
>>>         if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
>>> @@ -310,20 +310,17 @@ int amdxdna_drm_config_hwctx_ioctl(struct 
>>> drm_device *dev, void *data, struct dr
>>>           return -EINVAL;
>>>       }
>>>   -    mutex_lock(&xdna->dev_lock);
>>> -    idx = srcu_read_lock(&client->hwctx_srcu);
>>> +    guard(mutex)(&xdna->dev_lock);
>>>       hwctx = xa_load(&client->hwctx_xa, args->handle);
>>>       if (!hwctx) {
>>>           XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client- 
>>> >pid, args->handle);
>>>           ret = -EINVAL;
>>> -        goto unlock_srcu;
>>> +        goto free_buf;
>>>       }
>>>         ret = xdna->dev_info->ops->hwctx_config(hwctx, args- 
>>> >param_type, val, buf, buf_size);
>>>   -unlock_srcu:
>>> -    srcu_read_unlock(&client->hwctx_srcu, idx);
>>> -    mutex_unlock(&xdna->dev_lock);
>>> +free_buf:
>>>       kfree(buf);
>>>       return ret;
>>>   }
>>> @@ -334,7 +331,7 @@ int amdxdna_hwctx_sync_debug_bo(struct 
>>> amdxdna_client *client, u32 debug_bo_hdl)
>>>       struct amdxdna_hwctx *hwctx;
>>>       struct amdxdna_gem_obj *abo;
>>>       struct drm_gem_object *gobj;
>>> -    int ret, idx;
>>> +    int ret;
>>>         if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
>>>           return -EOPNOTSUPP;
>>> @@ -345,17 +342,15 @@ int amdxdna_hwctx_sync_debug_bo(struct 
>>> amdxdna_client *client, u32 debug_bo_hdl)
>>>         abo = to_xdna_obj(gobj);
>>>       guard(mutex)(&xdna->dev_lock);
>>> -    idx = srcu_read_lock(&client->hwctx_srcu);
>>>       hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
>>>       if (!hwctx) {
>>>           ret = -EINVAL;
>>> -        goto unlock_srcu;
>>> +        goto put_obj;
>>>       }
>>>         ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, 
>>> debug_bo_hdl);
>>>   -unlock_srcu:
>>> -    srcu_read_unlock(&client->hwctx_srcu, idx);
>>> +put_obj:
>>>       drm_gem_object_put(gobj);
>>>       return ret;
>>>   }
>>> diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/ 
>>> amdxdna/amdxdna_pm.c
>>> index d024d480521c..b1fafddd7ad5 100644
>>> --- a/drivers/accel/amdxdna/amdxdna_pm.c
>>> +++ b/drivers/accel/amdxdna/amdxdna_pm.c
>>> @@ -16,6 +16,7 @@ int amdxdna_pm_suspend(struct device *dev)
>>>       struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
>>>       int ret = -EOPNOTSUPP;
>>>   +    guard(mutex)(&xdna->dev_lock);
>>>       if (xdna->dev_info->ops->suspend)
>>>           ret = xdna->dev_info->ops->suspend(xdna);
>>>   @@ -28,6 +29,7 @@ int amdxdna_pm_resume(struct device *dev)
>>>       struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
>>>       int ret = -EOPNOTSUPP;
>>>   +    guard(mutex)(&xdna->dev_lock);
>>>       if (xdna->dev_info->ops->resume)
>>>           ret = xdna->dev_info->ops->resume(xdna);
>>>   diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/ 
>>> amdxdna/amdxdna_pm.h
>>> index 77b2d6e45570..3d26b973e0e3 100644
>>> --- a/drivers/accel/amdxdna/amdxdna_pm.h
>>> +++ b/drivers/accel/amdxdna/amdxdna_pm.h
>>> @@ -15,4 +15,15 @@ void amdxdna_pm_suspend_put(struct amdxdna_dev 
>>> *xdna);
>>>   void amdxdna_pm_init(struct amdxdna_dev *xdna);
>>>   void amdxdna_pm_fini(struct amdxdna_dev *xdna);
>>>   +static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev 
>>> *xdna)
>>> +{
>>> +    int ret;
>>> +
>>> +    mutex_unlock(&xdna->dev_lock);
>>> +    ret = amdxdna_pm_resume_get(xdna);
>>> +    mutex_lock(&xdna->dev_lock);
>>> +
>>> +    return ret;
>>
>> Like the other code you added in this patch, couldn't this be 
>> simplified with a guard(mutex)?
> 
> This is unlock before resume_get, then lock after. guard(mutex) might 
> not help here.
> 
Oh right; it's reversed.  Agreed!

Reviewed-by: Mario Limonciello (AMD) <superm1@...nel.org>

> Lizhi
> 
>>
>>> +}
>>> +
>>>   #endif /* _AMDXDNA_PM_H_ */
>>


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ