[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250401104508.684dddaf@collabora.com>
Date: Tue, 1 Apr 2025 10:45:08 +0200
From: Boris Brezillon <boris.brezillon@...labora.com>
To: Adrián Larumbe <adrian.larumbe@...labora.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>, Maarten Lankhorst
<maarten.lankhorst@...ux.intel.com>, Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>, David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>, Rob Herring <robh@...nel.org>, Steven
Price <steven.price@....com>, Liviu Dudau <liviu.dudau@....com>,
kernel@...labora.com, linux-kernel@...r.kernel.org,
dri-devel@...ts.freedesktop.org
Subject: Re: [RFC PATCH v2 2/6] drm/shmem: Introduce the notion of sparse
objects
On Wed, 26 Mar 2025 02:14:22 +0000
Adrián Larumbe <adrian.larumbe@...labora.com> wrote:
> Sparse DRM objects will store their backing pages in an xarray, to avoid
> the overhead of preallocating a huge struct page pointer array when only
> a very small range of indices might be assigned.
>
> For now, only the definition of a sparse object as a union alternative
> to a 'dense' object is provided, with functions that exploit it being
> part of later commits.
>
> Signed-off-by: Adrián Larumbe <adrian.larumbe@...labora.com>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 68 +++++++++++++++++++++++++-
> include/drm/drm_gem_shmem_helper.h | 23 ++++++++-
> 2 files changed, 88 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index d99dee67353a..5f75eb1230f6 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -128,6 +128,31 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
> }
> EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
>
> +/**
> + * drm_gem_shmem_create_sparse - Allocate a sparse object with the given size
> + * @dev: DRM device
> + * @size: Size of the sparse object to allocate
> + *
> + * This function creates a sparse shmem GEM object.
> + *
> + * Returns:
> + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
> + * error code on failure.
> + */
> +struct drm_gem_shmem_object *drm_gem_shmem_create_sparse(struct drm_device *dev, size_t size)
> +{
> + struct drm_gem_shmem_object *shmem =
> + __drm_gem_shmem_create(dev, size, false, NULL);
> +
> + if (!IS_ERR(shmem)) {
> + shmem->sparse = true;
> + xa_init_flags(&shmem->xapages, XA_FLAGS_ALLOC);
> + }
> +
> + return shmem;
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_create_sparse);
> +
> /**
> * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
> * given mountpoint
> @@ -173,8 +198,8 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
> sg_free_table(shmem->sgt);
> kfree(shmem->sgt);
> }
> - if (shmem->pages)
> - drm_gem_shmem_put_pages(shmem);
> +
> + drm_gem_shmem_put_pages(shmem);
>
> drm_WARN_ON(obj->dev, shmem->pages_use_count);
>
> @@ -196,6 +221,12 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
> if (shmem->pages_use_count++ > 0)
> return 0;
>
> + /* We only allow increasing the user count in the case of
> + * sparse shmem objects with some backed pages for now
> + */
> + if (shmem->sparse && xa_empty(&shmem->xapages))
> + return -EINVAL;
> +
> pages = drm_gem_get_pages(obj);
> if (IS_ERR(pages)) {
> drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
> @@ -231,6 +262,14 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
>
> dma_resv_assert_held(shmem->base.resv);
>
> + if (!shmem->sparse) {
> + if (!shmem->pages)
> + return;
> + } else {
> + /* Not implemented yet */
> + return;
> + }
> +
> if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
> return;
>
> @@ -404,8 +443,15 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
> {
> struct drm_gem_object *obj = &shmem->base;
>
> + if (shmem->sparse) {
> + drm_err(obj->dev, "UM unmapping of sparse shmem objects not implemented\n");
> + return;
> + }
> +
> if (drm_gem_is_imported(obj)) {
> dma_buf_vunmap(obj->dma_buf, map);
> + } else if (obj->import_attach) {
> + dma_buf_vunmap(obj->import_attach->dmabuf, map);
> } else {
> dma_resv_assert_held(shmem->base.resv);
>
> @@ -541,6 +587,12 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> struct page *page;
> pgoff_t page_offset;
>
> + /* TODO: Implement UM mapping of sparse shmem objects */
> + if (drm_WARN_ON(obj->dev, shmem->sparse)) {
> + drm_err(obj->dev, "UM mapping of sparse shmem objects not implemented\n");
> + return VM_FAULT_SIGBUS;
> + }
> +
> /* We don't use vmf->pgoff since that has the fake offset */
> page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
>
> @@ -566,8 +618,14 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
> struct drm_gem_object *obj = vma->vm_private_data;
> struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>
> + /* TODO: Implement UM mapping of sparse shmem objects */
> + if (drm_WARN_ON(obj->dev, shmem->sparse))
> + return;
> +
> drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
>
> + drm_WARN_ON(obj->dev, obj->import_attach);
> +
> dma_resv_lock(shmem->base.resv, NULL);
>
> /*
> @@ -690,6 +748,9 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
> {
> struct drm_gem_object *obj = &shmem->base;
>
> + if (drm_WARN_ON(obj->dev, shmem->sparse))
> + return ERR_PTR(-EINVAL);
> +
> drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
>
> return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
> @@ -702,6 +763,9 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
> int ret;
> struct sg_table *sgt;
>
> + if (drm_WARN_ON(obj->dev, shmem->sparse))
> + return ERR_PTR(-EINVAL);
> +
> if (shmem->sgt)
> return shmem->sgt;
>
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index cef5a6b5a4d6..00e47512b30f 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -6,6 +6,7 @@
> #include <linux/fs.h>
> #include <linux/mm.h>
> #include <linux/mutex.h>
> +#include <linux/xarray.h>
>
> #include <drm/drm_file.h>
> #include <drm/drm_gem.h>
> @@ -29,7 +30,10 @@ struct drm_gem_shmem_object {
> /**
> * @pages: Page table
> */
> - struct page **pages;
> + union {
> + struct page **pages;
> + struct xarray xapages;
> + };
I've played with this code, and I must admit I'm not a huge fan of this
union. It's just super easy to forget an
if (is_xarray) do_this else do_that
in one of the path, and end up accessing the wrong type without even
noticing (or noticing late).
I'd rather go for an drm_gem_shmem_sparse_backing object, and have an
optional pointer to this sparse_backing object in drm_gem_shmem_object.
I actually tried this approach here [1], and it seems to work.
>
> /**
> * @pages_use_count:
> @@ -91,12 +95,18 @@ struct drm_gem_shmem_object {
> * @map_wc: map object write-combined (instead of using shmem defaults).
> */
> bool map_wc : 1;
> +
> + /**
> + * @sparse: the object is only partially backed by pages
> + */
> + bool sparse : 1;
> };
>
> #define to_drm_gem_shmem_obj(obj) \
> container_of(obj, struct drm_gem_shmem_object, base)
>
> struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
> +struct drm_gem_shmem_object *drm_gem_shmem_create_sparse(struct drm_device *dev, size_t size);
> struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
> size_t size,
> struct vfsmount *gemfs);
> @@ -210,6 +220,10 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_
> {
> struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>
> + /* Use the specific sparse shmem get_sg_table function instead */
> + if (WARN_ON(shmem->sparse))
> + return ERR_PTR(-EINVAL);
> +
> return drm_gem_shmem_get_sg_table(shmem);
> }
>
> @@ -229,6 +243,10 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
> {
> struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>
> + /* TODO: Implement kernel mapping of sparse shmem objects */
> + if (WARN_ON(shmem->sparse))
> + return -EACCES;
> +
Okay, this is where things start to get messy. Sparse objects support a
subset of the features supported by regular shmem objects, which
not only makes the code error-prone, but makes it hard for people to
guess what a vmap/get_pages/sgt/... implementation should do. I think
we should define it right now, and IMO, the simpler is to just assume
that sparse objects, when operated as regular objects, provide the same
functionality. That is:
- vmap will populate all pages, pin them, and vmap them
- get_pages will populate all pages
- get_sgt will populate all pages and instantiate an sgt covering the
whole GEM
- ...
Of course, this means that sparse objects are pointless when operated as
regular objects, but it makes them safe to use, and if we want to
discourage people to call vmap/get_pages/... on a sparse object, we can
always add a drm_warn() in those places.
> return drm_gem_shmem_vmap(shmem, map);
> }
>
> @@ -263,6 +281,9 @@ static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct v
> {
> struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>
> + if (shmem->sparse)
> + return -EACCES;
> +
> return drm_gem_shmem_mmap(shmem, vma);
> }
>
[1]https://gitlab.freedesktop.org/panfrost/linux/-/merge_requests/16/diffs?commit_id=ec08f6c7a728bc6bfc8031ab5fd67ffe92453726
Powered by blists - more mailing lists