[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <717f4fb6-9523-6358-eb10-94fc08a1e1b2@rock-chips.com>
Date: Tue, 9 Aug 2022 09:12:45 +0800
From: Chen Jeffy <jeffy.chen@...k-chips.com>
To: Christian König <christian.koenig@....com>,
Daniel Vetter <daniel.vetter@...ll.ch>
Cc: Andy Yan <andy.yan@...k-chips.com>,
Jianqun Xu <jay.xu@...k-chips.com>,
Maxime Ripard <mripard@...nel.org>,
Sumit Semwal <sumit.semwal@...aro.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
linux-kernel@...r.kernel.org, dri-devel@...ts.freedesktop.org,
linaro-mm-sig@...ts.linaro.org, David Airlie <airlied@...ux.ie>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
linux-media@...r.kernel.org, Daniel Vetter <daniel@...ll.ch>
Subject: Re: [PATCH] drm/gem: Fix GEM handle release errors
Hi Christian,
Sorry, i've sent a v2 before, please check that.
On 8/9 星期二 2:05, Christian König wrote:
>
>
> Am 02.08.22 um 13:33 schrieb Jeffy Chen:
>> Currently we are assuming a one to one mapping between dmabuf and handle
>> when releasing GEM handles.
>>
>> But that is not always true, since we would create extra handles for the
>> GEM obj in cases like gem_open() and getfb{,2}().
>>
>> A similar issue was reported at:
>> https://lore.kernel.org/all/20211105083308.392156-1-jay.xu@rock-chips.com/
>>
>> Another problem is that the drm_gem_remove_prime_handles() now only
>> remove handle to the exported dmabuf (gem_obj->dma_buf), so the imported
>> ones would leak:
>> WARNING: CPU: 2 PID: 236 at drivers/gpu/drm/drm_prime.c:228
>> drm_prime_destroy_file_private+0x18/0x24
>>
>> Let's fix these by using handle to find the exact map to remove.
>>
>> Signed-off-by: Jeffy Chen <jeffy.chen@...k-chips.com>
>> ---
>>
>> drivers/gpu/drm/drm_gem.c | 17 +----------------
>> drivers/gpu/drm/drm_internal.h | 4 ++--
>> drivers/gpu/drm/drm_prime.c | 16 ++++++++++------
>> 3 files changed, 13 insertions(+), 24 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
>> index eb0c2d041f13..ed39da383570 100644
>> --- a/drivers/gpu/drm/drm_gem.c
>> +++ b/drivers/gpu/drm/drm_gem.c
>> @@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct
>> drm_device *dev,
>> }
>> EXPORT_SYMBOL(drm_gem_private_object_init);
>> -static void
>> -drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct
>> drm_file *filp)
>> -{
>> - /*
>> - * Note: obj->dma_buf can't disappear as long as we still hold a
>> - * handle reference in obj->handle_count.
>> - */
>> - mutex_lock(&filp->prime.lock);
>> - if (obj->dma_buf) {
>> - drm_prime_remove_buf_handle_locked(&filp->prime,
>> - obj->dma_buf);
>> - }
>> - mutex_unlock(&filp->prime.lock);
>> -}
>> -
>> /**
>> * drm_gem_object_handle_free - release resources bound to userspace
>> handles
>> * @obj: GEM object to clean up.
>> @@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr,
>> void *data)
>> if (obj->funcs->close)
>> obj->funcs->close(obj, file_priv);
>> - drm_gem_remove_prime_handles(obj, file_priv);
>> + drm_prime_remove_buf_handle(&file_priv->prime, id);
>> drm_vma_node_revoke(&obj->vma_node, file_priv);
>> drm_gem_object_handle_put_unlocked(obj);
>> diff --git a/drivers/gpu/drm/drm_internal.h
>> b/drivers/gpu/drm/drm_internal.h
>> index 1fbbc19f1ac0..7bb98e6a446d 100644
>> --- a/drivers/gpu/drm/drm_internal.h
>> +++ b/drivers/gpu/drm/drm_internal.h
>> @@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device
>> *dev, void *data,
>> void drm_prime_init_file_private(struct drm_prime_file_private
>> *prime_fpriv);
>> void drm_prime_destroy_file_private(struct drm_prime_file_private
>> *prime_fpriv);
>> -void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private
>> *prime_fpriv,
>> - struct dma_buf *dma_buf);
>> +void drm_prime_remove_buf_handle(struct drm_prime_file_private
>> *prime_fpriv,
>> + uint32_t handle);
>> /* drm_drv.c */
>> struct drm_minor *drm_minor_acquire(unsigned int minor_id);
>> diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
>> index e3f09f18110c..c28518ab62d0 100644
>> --- a/drivers/gpu/drm/drm_prime.c
>> +++ b/drivers/gpu/drm/drm_prime.c
>> @@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct
>> drm_prime_file_private *prime_fpri
>> return -ENOENT;
>> }
>> -void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private
>> *prime_fpriv,
>> - struct dma_buf *dma_buf)
>> +void drm_prime_remove_buf_handle(struct drm_prime_file_private
>> *prime_fpriv,
>> + uint32_t handle)
>> {
>> struct rb_node *rb;
>> + mutex_lock(&prime_fpriv->lock);
>> +
>> rb = prime_fpriv->dmabufs.rb_node;
>> while (rb) {
>> struct drm_prime_member *member;
>> member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
>> - if (member->dma_buf == dma_buf) {
>> + if (member->handle == handle) {
>> rb_erase(&member->handle_rb, &prime_fpriv->handles);
>> rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
>> - dma_buf_put(dma_buf);
>> + dma_buf_put(member->dma_buf);
>> kfree(member);
>> - return;
>> - } else if (member->dma_buf < dma_buf) {
>> + break;
>> + } else if (member->handle < handle) {
>
> Just to make it clear once more. That change here is completely broken.
>
> The rb is indexed by the dma_buf object, not the handle.
>
> Regards,
> Christian.
>
>> rb = rb->rb_right;
>> } else {
>> rb = rb->rb_left;
>> }
>> }
>> +
>> + mutex_unlock(&prime_fpriv->lock);
>> }
>> void drm_prime_init_file_private(struct drm_prime_file_private
>> *prime_fpriv)
>
>
Powered by blists - more mailing lists