[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250225142849.4dcec919@collabora.com>
Date: Tue, 25 Feb 2025 14:28:49 +0100
From: Boris Brezillon <boris.brezillon@...labora.com>
To: Adrián Larumbe <adrian.larumbe@...labora.com>
Cc: dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org, Steven
Price <steven.price@....com>, Rob Herring <robh@...nel.org>, Maarten
Lankhorst <maarten.lankhorst@...ux.intel.com>, Maxime Ripard
<mripard@...nel.org>, Thomas Zimmermann <tzimmermann@...e.de>, David Airlie
<airlied@...il.com>, Simona Vetter <simona@...ll.ch>, kernel@...labora.com
Subject: Re: [RFC PATCH 4/7] drm/shmem: Introduce the notion of sparse
objects
On Tue, 18 Feb 2025 23:25:34 +0000
Adrián Larumbe <adrian.larumbe@...labora.com> wrote:
> Sparse DRM objects will store their backing pages in an xarray, to avoid the
> overhead of preallocating a huge struct page pointer array when only a very
> small range of indices might be assigned.
>
> For now, only the definition of a sparse object as a union alternative to a
> 'dense' object is provided, with functions that exploit it being part of later
> commits.
>
> Signed-off-by: Adrián Larumbe <adrian.larumbe@...labora.com>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 42 +++++++++++++++++++++++---
> include/drm/drm_gem_shmem_helper.h | 18 ++++++++++-
> 2 files changed, 54 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 5ab351409312..d63e42be2d72 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -10,6 +10,7 @@
> #include <linux/shmem_fs.h>
> #include <linux/slab.h>
> #include <linux/vmalloc.h>
> +#include <linux/xarray.h>
>
> #ifdef CONFIG_X86
> #include <asm/set_memory.h>
> @@ -50,7 +51,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
>
> static struct drm_gem_shmem_object *
> __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
> - struct vfsmount *gemfs)
> + bool sparse, struct vfsmount *gemfs)
> {
> struct drm_gem_shmem_object *shmem;
> struct drm_gem_object *obj;
> @@ -90,6 +91,11 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
>
> INIT_LIST_HEAD(&shmem->madv_list);
>
> + if (unlikely(sparse))
> + xa_init_flags(&shmem->xapages, XA_FLAGS_ALLOC);
> +
> + shmem->sparse = sparse;
Looks like the only caller passing sparse=true is
drm_gem_shmem_create_sparse(), and the sparse property is not used for
the rest of the gem_shmem object initialization, so maybe we could move
that code to drm_gem_shmem_create_sparse() instead of modifying the
prototype of __drm_gem_shmem_create().
> +
> if (!private) {
> /*
> * Our buffers are kept pinned, so allocating them
> @@ -124,10 +130,16 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
> */
> struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
> {
> - return __drm_gem_shmem_create(dev, size, false, NULL);
> + return __drm_gem_shmem_create(dev, size, false, false, NULL);
> }
> EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
>
> +struct drm_gem_shmem_object *drm_gem_shmem_create_sparse(struct drm_device *dev, size_t size)
> +{
> + return __drm_gem_shmem_create(dev, size, false, true, NULL);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_create_sparse);
> +
> /**
> * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
> * given mountpoint
> @@ -145,7 +157,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
> size_t size,
> struct vfsmount *gemfs)
> {
> - return __drm_gem_shmem_create(dev, size, false, gemfs);
> + return __drm_gem_shmem_create(dev, size, false, false, gemfs);
> }
> EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
>
> @@ -173,7 +185,9 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
> sg_free_table(shmem->sgt);
> kfree(shmem->sgt);
> }
> - if (shmem->pages)
> +
> + if ((!shmem->sparse && shmem->pages) ||
> + (shmem->sparse && !xa_empty(&shmem->xapages)))
> drm_gem_shmem_put_pages(shmem);
Can we let drm_gem_shmem_put_pages() do the is_empty() check?
>
> drm_WARN_ON(obj->dev, shmem->pages_use_count);
> @@ -191,11 +205,19 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
> struct drm_gem_object *obj = &shmem->base;
> struct page **pages;
>
> + if (drm_WARN_ON(obj->dev, shmem->sparse))
> + return -EINVAL;
> +
> dma_resv_assert_held(shmem->base.resv);
>
> if (shmem->pages_use_count++ > 0)
> return 0;
>
> + /* We only allow increasing the user count in the case of
> + sparse shmem objects with some backed pages for now */
> + if (shmem->sparse && xa_empty(&shmem->xapages))
> + return -EINVAL;
You'll never enter this branch because you return -EINVAL early when
sparse==true.
> +
> pages = drm_gem_get_pages(obj);
> if (IS_ERR(pages)) {
> drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
> @@ -541,6 +563,8 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> struct page *page;
> pgoff_t page_offset;
>
> + drm_WARN_ON(obj->dev, shmem->sparse);
For all those WARN_ON()s you add, I would add a comment explaining why
you don't expect sparse objects to enter this path. In that case, it
has to do with the fact sparse GEMs are not mmap-able (yet?).
And, if you don't want to populate on-demand, you should probably
return VM_FAULT_SIGBUS here, even if that's not expected.
> +
> /* We don't use vmf->pgoff since that has the fake offset */
> page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
>
> @@ -567,6 +591,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
> struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>
> drm_WARN_ON(obj->dev, obj->import_attach);
> + drm_WARN_ON(obj->dev, shmem->sparse);
>
> dma_resv_lock(shmem->base.resv, NULL);
>
> @@ -666,6 +691,9 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
> if (shmem->base.import_attach)
> return;
>
> + if (drm_WARN_ON(shmem->base.dev, shmem->sparse))
> + return;
> +
We probably want to print some of these in case we're dealing with a
sparse GEM object.
> drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
> drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
> drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
> @@ -691,6 +719,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
> struct drm_gem_object *obj = &shmem->base;
>
> drm_WARN_ON(obj->dev, obj->import_attach);
> + drm_WARN_ON(obj->dev, shmem->sparse);
>
> return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
> }
> @@ -702,6 +731,9 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
> int ret;
> struct sg_table *sgt;
>
> + if (drm_WARN_ON(obj->dev, shmem->sparse))
> + return ERR_PTR(-EINVAL);
> +
> if (shmem->sgt)
> return shmem->sgt;
>
> @@ -787,7 +819,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
> size_t size = PAGE_ALIGN(attach->dmabuf->size);
> struct drm_gem_shmem_object *shmem;
>
> - shmem = __drm_gem_shmem_create(dev, size, true, NULL);
> + shmem = __drm_gem_shmem_create(dev, size, true, false, NULL);
> if (IS_ERR(shmem))
> return ERR_CAST(shmem);
>
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index d22e3fb53631..902039cfc4ce 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -6,6 +6,7 @@
> #include <linux/fs.h>
> #include <linux/mm.h>
> #include <linux/mutex.h>
> +#include <linux/xarray.h>
>
> #include <drm/drm_file.h>
> #include <drm/drm_gem.h>
> @@ -29,7 +30,11 @@ struct drm_gem_shmem_object {
> /**
> * @pages: Page table
> */
> - struct page **pages;
> + union {
> +
> + struct page **pages;
> + struct xarray xapages;
> + };
>
> /**
> * @pages_use_count:
> @@ -91,6 +96,11 @@ struct drm_gem_shmem_object {
> * @map_wc: map object write-combined (instead of using shmem defaults).
> */
> bool map_wc : 1;
> +
> + /**
> + * @sparse: the object's virtual memory space is only partially backed by pages
I would drop the "virtual memory space" part. sparse just means the
object might be partially backed by physical memory, and that memory
will be allocated on-demand (most likely on-GPU-demand, but there's
nothing preventing us from doing it on-CPU-demand, as long as the fault
handler knows the allocation granularity).
> + */
> + bool sparse : 1;
> };
>
> #define to_drm_gem_shmem_obj(obj) \
> @@ -229,6 +239,9 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
> {
> struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>
> + if (shmem->sparse)
> + return -EACCES;
> +
> return drm_gem_shmem_vmap(shmem, map);
Do we need a WARN_ON() in drm_gem_shmem_vunmap()?
> }
>
> @@ -263,6 +276,9 @@ static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct v
> {
> struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>
> + if (shmem->sparse)
> + return -EACCES;
> +
> return drm_gem_shmem_mmap(shmem, vma);
> }
>
I guess we also need to fail in drm_gem_shmem_get_sg_table() is this is
a sparse object.
Powered by blists - more mailing lists