[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4b711688-9a82-03f1-844c-cf1557892679@nvidia.com>
Date: Wed, 15 Sep 2021 14:34:19 +0800
From: Mark Zhang <markzhang@...dia.com>
To: Jason Gunthorpe <jgg@...dia.com>, Tao Liu <thomas.liu@...oud.cn>
CC: <dledford@...hat.com>, <leon@...nel.org>,
<haakon.bugge@...cle.com>, <shayd@...dia.com>,
<avihaih@...dia.com>, <linux-rdma@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <thomas.liu@...oud.com>
Subject: Re: [PATCH] RDMA/cma: Fix listener leak in rdma_cma_listen_on_all()
failure
On 9/15/2021 3:54 AM, Jason Gunthorpe wrote:
> External email: Use caution opening links or attachments
>
>
> On Mon, Sep 13, 2021 at 05:33:44PM +0800, Tao Liu wrote:
>> rdma_cma_listen_on_all() just destroy listener which lead to an error,
>> but not including those already added in listen_list. Then cm state
>> fallbacks to RDMA_CM_ADDR_BOUND.
>>
>> When user destroys id, the listeners will not be destroyed, and
>> process stucks.
>>
>> task:rping state:D stack: 0 pid:19605 ppid: 47036 flags:0x00000084
>> Call Trace:
>> __schedule+0x29a/0x780
>> ? free_unref_page_commit+0x9b/0x110
>> schedule+0x3c/0xa0
>> schedule_timeout+0x215/0x2b0
>> ? __flush_work+0x19e/0x1e0
>> wait_for_completion+0x8d/0xf0
>> _destroy_id+0x144/0x210 [rdma_cm]
>> ucma_close_id+0x2b/0x40 [rdma_ucm]
>> __destroy_id+0x93/0x2c0 [rdma_ucm]
>> ? __xa_erase+0x4a/0xa0
>> ucma_destroy_id+0x9a/0x120 [rdma_ucm]
>> ucma_write+0xb8/0x130 [rdma_ucm]
>> vfs_write+0xb4/0x250
>> ksys_write+0xb5/0xd0
>> ? syscall_trace_enter.isra.19+0x123/0x190
>> do_syscall_64+0x33/0x40
>> entry_SYSCALL_64_after_hwframe+0x44/0xa9
>>
>> Fixes: c80a0c52d85c ("RDMA/cma: Add missing error handling of listen_id")
>> Signed-off-by: Tao Liu <thomas.liu@...oud.cn>
>> Reviewed-by: Leon Romanovsky <leonro@...dia.com>
>> drivers/infiniband/core/cma.c | 22 +++++++++++++++-------
>> 1 file changed, 15 insertions(+), 7 deletions(-)
>
> I'd like to see a bit more than this, I reworked the patch slightly
> into this below. It is in for-rc so let me know if it busted up. Thanks
>
> From a17a1faf5d3e2e19a75397dfd740dbde06f054c3 Mon Sep 17 00:00:00 2001
> From: Tao Liu <thomas.liu@...oud.cn>
> Date: Mon, 13 Sep 2021 17:33:44 +0800
> Subject: [PATCH] RDMA/cma: Fix listener leak in rdma_cma_listen_on_all()
> failure
>
> If cma_listen_on_all() fails it leaves the per-device ID still on the
> listen_list but the state is not set to RDMA_CM_ADDR_BOUND.
>
> When the cmid is eventually destroyed cma_cancel_listens() is not called
> due to the wrong state, however the per-device IDs are still holding the
> refcount preventing the ID from being destroyed, thus deadlocking:
>
> task:rping state:D stack: 0 pid:19605 ppid: 47036 flags:0x00000084
> Call Trace:
> __schedule+0x29a/0x780
> ? free_unref_page_commit+0x9b/0x110
> schedule+0x3c/0xa0
> schedule_timeout+0x215/0x2b0
> ? __flush_work+0x19e/0x1e0
> wait_for_completion+0x8d/0xf0
> _destroy_id+0x144/0x210 [rdma_cm]
> ucma_close_id+0x2b/0x40 [rdma_ucm]
> __destroy_id+0x93/0x2c0 [rdma_ucm]
> ? __xa_erase+0x4a/0xa0
> ucma_destroy_id+0x9a/0x120 [rdma_ucm]
> ucma_write+0xb8/0x130 [rdma_ucm]
> vfs_write+0xb4/0x250
> ksys_write+0xb5/0xd0
> ? syscall_trace_enter.isra.19+0x123/0x190
> do_syscall_64+0x33/0x40
> entry_SYSCALL_64_after_hwframe+0x44/0xa9
>
> Ensure that cma_listen_on_all() atomically unwinds its action under the
> lock during error and reorganize how destroy_id works to be directly
> sensitive to the listen list not indirectly through the state and some
> other random collection of variables.
>
> Fixes: c80a0c52d85c ("RDMA/cma: Add missing error handling of listen_id")
> Link: https://lore.kernel.org/r/20210913093344.17230-1-thomas.liu@ucloud.cn
> Signed-off-by: Tao Liu <thomas.liu@...oud.cn>
> Reviewed-by: Leon Romanovsky <leonro@...dia.com>
> Signed-off-by: Jason Gunthorpe <jgg@...dia.com>
> ---
> drivers/infiniband/core/cma.c | 28 ++++++++++++++++++++--------
> 1 file changed, 20 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
> index 86ee3b01b3ee47..be6beee1dd4c5e 100644
> --- a/drivers/infiniband/core/cma.c
> +++ b/drivers/infiniband/core/cma.c
> @@ -1746,16 +1746,17 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
> }
> }
>
> -static void cma_cancel_listens(struct rdma_id_private *id_priv)
> +static void _cma_cancel_listens(struct rdma_id_private *id_priv)
> {
> struct rdma_id_private *dev_id_priv;
>
> + lockdep_assert_held(&lock);
> +
> /*
> * Remove from listen_any_list to prevent added devices from spawning
> * additional listen requests.
> */
> - mutex_lock(&lock);
> - list_del(&id_priv->list);
> + list_del_init(&id_priv->list);
>
> while (!list_empty(&id_priv->listen_list)) {
> dev_id_priv = list_entry(id_priv->listen_list.next,
> @@ -1768,6 +1769,20 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
> rdma_destroy_id(&dev_id_priv->id);
> mutex_lock(&lock);
> }
> +}
> +
> +static void cma_cancel_listens(struct rdma_id_private *id_priv)
> +{
> + /*
> + * During _destroy_id() it is not possible for this value to transition
> + * from empty to !empty, test it outside to lock to avoid taking a
> + * global lock on every destroy. Only listen all cases will have
> + * something to do
> + */
> + if (list_empty(&id_priv->list))
> + return;
> + mutex_lock(&lock);
> + _cma_cancel_listens(id_priv);
> mutex_unlock(&lock);
> }
>
> @@ -1781,10 +1796,6 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
> case RDMA_CM_ROUTE_QUERY:
> cma_cancel_route(id_priv);
> break;
> - case RDMA_CM_LISTEN:
> - if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
> - cma_cancel_listens(id_priv);
> - break;
If this case is removed, is this code path still good?
cma_add_one/cma_remove_one
cma_process_remove
cma_send_device_removal_put
cma_cancel_operation
> default:
> break;
> }
> @@ -1855,6 +1866,7 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
> static void _destroy_id(struct rdma_id_private *id_priv,
> enum rdma_cm_state state)
> {
> + cma_cancel_listens(id_priv);
> cma_cancel_operation(id_priv, state);
>
> rdma_restrack_del(&id_priv->res);
> @@ -2579,7 +2591,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
> return 0;
>
> err_listen:
> - list_del(&id_priv->list);
> + _cma_cancel_listens(id_priv);
> mutex_unlock(&lock);
> if (to_destroy)
> rdma_destroy_id(&to_destroy->id);
> --
> 2.33.0
>
Powered by blists - more mailing lists