[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fe549bf7-a704-abda-ac1d-221a96de344d@suse.de>
Date: Fri, 17 Feb 2023 13:28:47 +0100
From: Thomas Zimmermann <tzimmermann@...e.de>
To: Dmitry Osipenko <dmitry.osipenko@...labora.com>,
David Airlie <airlied@...il.com>,
Gerd Hoffmann <kraxel@...hat.com>,
Gurchetan Singh <gurchetansingh@...omium.org>,
Chia-I Wu <olvaffe@...il.com>, Daniel Vetter <daniel@...ll.ch>,
Daniel Almeida <daniel.almeida@...labora.com>,
Gustavo Padovan <gustavo.padovan@...labora.com>,
Daniel Stone <daniel@...ishbar.org>,
Tomeu Vizoso <tomeu.vizoso@...labora.com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Rob Clark <robdclark@...il.com>,
Sumit Semwal <sumit.semwal@...aro.org>,
Christian König <christian.koenig@....com>,
Qiang Yu <yuq825@...il.com>,
Steven Price <steven.price@....com>,
Alyssa Rosenzweig <alyssa.rosenzweig@...labora.com>,
Rob Herring <robh@...nel.org>, Sean Paul <sean@...rly.run>,
Dmitry Baryshkov <dmitry.baryshkov@...aro.org>,
Abhinav Kumar <quic_abhinavk@...cinc.com>
Cc: dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
kernel@...labora.com, virtualization@...ts.linux-foundation.org
Subject: Re: [PATCH v10 05/11] drm/shmem: Switch to use drm_* debug helpers
Am 08.01.23 um 22:04 schrieb Dmitry Osipenko:
> Ease debugging of a multi-GPU system by using drm_WARN_*() and
> drm_dbg_kms() helpers that print out DRM device name corresponding
> to shmem GEM.
>
> Suggested-by: Thomas Zimmermann <tzimmermann@...e.de>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
Reviewed-by: Thomas Zimmermann <tzimmermann@...e.de>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 38 +++++++++++++++-----------
> 1 file changed, 22 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index f21f47737817..5006f7da7f2d 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
> {
> struct drm_gem_object *obj = &shmem->base;
>
> - WARN_ON(shmem->vmap_use_count);
> + drm_WARN_ON(obj->dev, shmem->vmap_use_count);
>
> if (obj->import_attach) {
> drm_prime_gem_destroy(obj, shmem->sgt);
> @@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
> drm_gem_shmem_put_pages(shmem);
> }
>
> - WARN_ON(shmem->pages_use_count);
> + drm_WARN_ON(obj->dev, shmem->pages_use_count);
>
> drm_gem_object_release(obj);
> mutex_destroy(&shmem->pages_lock);
> @@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
>
> pages = drm_gem_get_pages(obj);
> if (IS_ERR(pages)) {
> - DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
> + drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
> + PTR_ERR(pages));
> shmem->pages_use_count = 0;
> return PTR_ERR(pages);
> }
> @@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
> */
> int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
> {
> + struct drm_gem_object *obj = &shmem->base;
> int ret;
>
> - WARN_ON(shmem->base.import_attach);
> + drm_WARN_ON(obj->dev, obj->import_attach);
>
> ret = mutex_lock_interruptible(&shmem->pages_lock);
> if (ret)
> @@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
> {
> struct drm_gem_object *obj = &shmem->base;
>
> - if (WARN_ON_ONCE(!shmem->pages_use_count))
> + if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
> return;
>
> if (--shmem->pages_use_count > 0)
> @@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
> */
> int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
> {
> - WARN_ON(shmem->base.import_attach);
> + struct drm_gem_object *obj = &shmem->base;
> +
> + drm_WARN_ON(obj->dev, obj->import_attach);
>
> return drm_gem_shmem_get_pages(shmem);
> }
> @@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);
> */
> void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
> {
> - WARN_ON(shmem->base.import_attach);
> + struct drm_gem_object *obj = &shmem->base;
> +
> + drm_WARN_ON(obj->dev, obj->import_attach);
>
> drm_gem_shmem_put_pages(shmem);
> }
> @@ -303,7 +309,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
> if (obj->import_attach) {
> ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
> if (!ret) {
> - if (WARN_ON(map->is_iomem)) {
> + if (drm_WARN_ON(obj->dev, map->is_iomem)) {
> dma_buf_vunmap(obj->import_attach->dmabuf, map);
> ret = -EIO;
> goto err_put_pages;
> @@ -328,7 +334,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
> }
>
> if (ret) {
> - DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
> + drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
> goto err_put_pages;
> }
>
> @@ -378,7 +384,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
> {
> struct drm_gem_object *obj = &shmem->base;
>
> - if (WARN_ON_ONCE(!shmem->vmap_use_count))
> + if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
> return;
>
> if (--shmem->vmap_use_count > 0)
> @@ -463,7 +469,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
> struct drm_gem_object *obj = &shmem->base;
> struct drm_device *dev = obj->dev;
>
> - WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
> + drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
>
> dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
> sg_free_table(shmem->sgt);
> @@ -555,7 +561,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> mutex_lock(&shmem->pages_lock);
>
> if (page_offset >= num_pages ||
> - WARN_ON_ONCE(!shmem->pages) ||
> + drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
> shmem->madv < 0) {
> ret = VM_FAULT_SIGBUS;
> } else {
> @@ -574,7 +580,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
> struct drm_gem_object *obj = vma->vm_private_data;
> struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>
> - WARN_ON(shmem->base.import_attach);
> + drm_WARN_ON(obj->dev, obj->import_attach);
>
> mutex_lock(&shmem->pages_lock);
>
> @@ -583,7 +589,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
> * mmap'd, vm_open() just grabs an additional reference for the new
> * mm the vma is getting copied into (ie. on fork()).
> */
> - if (!WARN_ON_ONCE(!shmem->pages_use_count))
> + if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
> shmem->pages_use_count++;
>
> mutex_unlock(&shmem->pages_lock);
> @@ -677,7 +683,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
> {
> struct drm_gem_object *obj = &shmem->base;
>
> - WARN_ON(shmem->base.import_attach);
> + drm_WARN_ON(obj->dev, obj->import_attach);
>
> return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
> }
> @@ -708,7 +714,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
> if (shmem->sgt)
> return shmem->sgt;
>
> - WARN_ON(obj->import_attach);
> + drm_WARN_ON(obj->dev, obj->import_attach);
>
> ret = drm_gem_shmem_get_pages(shmem);
> if (ret)
--
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Ivo Totev
Download attachment "OpenPGP_signature" of type "application/pgp-signature" (841 bytes)
Powered by blists - more mailing lists