[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250228053650.393646-8-honglei1.huang@amd.com>
Date: Fri, 28 Feb 2025 13:36:50 +0800
From: Honglei Huang <honglei1.huang@....com>
To: David Airlie <airlied@...hat.com>, Gerd Hoffmann <kraxel@...hat.com>,
Gurchetan Singh <gurchetansingh@...omium.org>, Chia-I Wu <olvaffe@...il.com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>, Maxime Ripard
<mripard@...nel.org>, Thomas Zimmermann <tzimmermann@...e.de>, Simona Vetter
<simona@...ll.ch>, Rob Clark <robdclark@...il.com>, Huang Rui
<ray.huang@....com>
CC: <dri-devel@...ts.freedesktop.org>, <virtualization@...ts.linux.dev>,
<linux-kernel@...r.kernel.org>, Demi Marie Obenour <demiobenour@...il.com>,
Dmitry Osipenko <dmitry.osipenko@...labora.com>, Honglei Huang
<Honglei1.Huang@....com>
Subject: [PATCH v1 7/7] drm/virtio: implement userptr: add mmu notifier
From: Honglei Huang <Honglei1.Huang@....com>
Add mmu notifier, there are some benefits:
- UMD do not need manage the userptrs, just alloc and free user space
memory, with the MMU notifier userpters can be managed by kernel.
- Can achieve a performance improvement of 20%~30%. With the MMU notifier
UMD like OpenCL can achieve 98% performance compared to bare metal in
some bench marks like Geekbench and CLpeak.
Signed-off-by: Honglei Huang <Honglei1.Huang@....com>
---
drivers/gpu/drm/virtio/virtgpu_drv.h | 47 ++-
drivers/gpu/drm/virtio/virtgpu_ioctl.c | 4 +-
drivers/gpu/drm/virtio/virtgpu_kms.c | 2 +
drivers/gpu/drm/virtio/virtgpu_userptr.c | 423 ++++++++++++++++++++++-
4 files changed, 469 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index fa5dd46e3732..6fa6dd9d1738 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -42,6 +42,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/virtgpu_drm.h>
+#include <linux/mmu_notifier.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
@@ -121,9 +122,33 @@ struct virtio_gpu_object_userptr_ops {
int (*get_pages)(struct virtio_gpu_object_userptr *userptr);
void (*put_pages)(struct virtio_gpu_object_userptr *userptr);
void (*release)(struct virtio_gpu_object_userptr *userptr);
- int (*insert)(struct virtio_gpu_object_userptr *userptr, struct virtio_gpu_fpriv *fpriv);
- int (*remove)(struct virtio_gpu_object_userptr *userptr, struct virtio_gpu_fpriv *fpriv);
+ int (*insert)(struct virtio_gpu_object_userptr *userptr,
+ struct virtio_gpu_fpriv *fpriv);
+ int (*remove)(struct virtio_gpu_object_userptr *userptr,
+ struct virtio_gpu_fpriv *fpriv);
+ bool (*valid)(struct virtio_gpu_object_userptr *userptr);
+ void (*notifier_init)(struct virtio_gpu_object_userptr *userptr,
+ struct mm_struct *mm);
+ int (*notifier_add)(struct virtio_gpu_object_userptr *userptr,
+ unsigned long start, unsigned long length);
+ void (*notifier_remove)(struct virtio_gpu_object_userptr *userptr);
+ int (*split)(struct virtio_gpu_object_userptr *userptr,
+ unsigned long start, unsigned long last,
+ struct virtio_gpu_object_userptr **pnew);
+ void (*evict)(struct virtio_gpu_object_userptr *userptr);
+ void (*update)(struct virtio_gpu_object_userptr *userptr);
+ struct virtio_gpu_object_userptr *(*split_new)(
+ struct virtio_gpu_object_userptr *userptr, unsigned long start,
+ unsigned long last);
};
+
+enum userptr_work_list_ops {
+ USERPTR_OP_NULL,
+ USERPTR_OP_UNMAP,
+ USERPTR_OP_UPDATE,
+ USERPTR_OP_EVICT,
+};
+
struct virtio_gpu_object_userptr {
struct virtio_gpu_object base;
const struct virtio_gpu_object_userptr_ops *ops;
@@ -142,6 +167,16 @@ struct virtio_gpu_object_userptr {
struct sg_table *sgt;
struct interval_tree_node it_node;
+
+#ifdef CONFIG_MMU_NOTIFIER
+ struct list_head work_list;
+ enum userptr_work_list_ops op;
+ atomic_t in_release;
+ struct mm_struct *mm;
+ uint64_t notifier_start;
+ uint64_t notifier_last;
+ struct mmu_interval_notifier notifier;
+#endif
};
#define to_virtio_gpu_shmem(virtio_gpu_object) \
@@ -317,6 +352,12 @@ struct virtio_gpu_fpriv {
bool explicit_debug_name;
struct rb_root_cached userptrs_tree;
struct mutex userptrs_tree_lock;
+
+#ifdef CONFIG_MMU_NOTIFIER
+ struct work_struct userptr_work;
+ struct list_head userptr_work_list;
+ spinlock_t userptr_work_list_lock;
+#endif
};
/* virtgpu_ioctl.c */
@@ -536,4 +577,6 @@ bool virtio_gpu_is_userptr(struct virtio_gpu_object *bo);
void virtio_gpu_userptr_interval_tree_init(struct virtio_gpu_fpriv *vfpriv);
void virtio_gpu_userptr_set_handle(struct virtio_gpu_object *qobj,
uint32_t handle);
+uint32_t virtio_gpu_userptr_get_handle(struct virtio_gpu_object *qobj);
+void virtio_gpu_userptr_list_work_init(struct virtio_gpu_fpriv *vfpriv);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index ad1ac8d0eadf..14326fd8fee9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -697,8 +697,10 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
}
}
- if (vfpriv->context_init & VIRTIO_GPU_CAPSET_HSAKMT)
+ if (vfpriv->context_init & VIRTIO_GPU_CAPSET_HSAKMT) {
+ virtio_gpu_userptr_list_work_init(vfpriv);
virtio_gpu_userptr_interval_tree_init(vfpriv);
+ }
virtio_gpu_create_context_locked(vgdev, vfpriv);
virtio_gpu_notify(vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 3d5158caef46..3dc44eb16fb8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -345,6 +345,8 @@ void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
return;
if (vfpriv->context_created) {
+ if (vfpriv->context_init & VIRTIO_GPU_CAPSET_HSAKMT)
+ flush_work(&vfpriv->userptr_work);
virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
virtio_gpu_notify(vgdev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_userptr.c b/drivers/gpu/drm/virtio/virtgpu_userptr.c
index 03398c3b9f30..10264227f3e7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_userptr.c
+++ b/drivers/gpu/drm/virtio/virtgpu_userptr.c
@@ -3,6 +3,7 @@
#include <linux/mm.h>
#include <linux/pid.h>
#include <linux/vmalloc.h>
+#include <linux/mmu_notifier.h>
#include "virtgpu_drv.h"
#include "drm/drm_gem.h"
@@ -13,21 +14,422 @@ virtio_gpu_userptr_get_sg_table(struct drm_gem_object *obj);
static int virtio_gpu_userptr_insert(struct virtio_gpu_object_userptr *userptr,
struct virtio_gpu_fpriv *vfpriv)
{
+ int ret;
+
if (!userptr->ops->insert)
return -EINVAL;
- return userptr->ops->insert(userptr, vfpriv);
+ ret = userptr->ops->insert(userptr, vfpriv);
+ if (ret)
+ return ret;
+
+ if (userptr->ops->notifier_add)
+ ret = userptr->ops->notifier_add(userptr, userptr->start,
+ userptr->last -
+ userptr->start + 1UL);
+
+ return ret;
}
static int virtio_gpu_userptr_remove(struct virtio_gpu_object_userptr *userptr,
struct virtio_gpu_fpriv *vfpriv)
{
+ int ret;
+
if (!userptr->ops->remove)
return -EINVAL;
- return userptr->ops->remove(userptr, vfpriv);
+ ret = userptr->ops->remove(userptr, vfpriv);
+ if (ret)
+ return ret;
+
+ if (userptr->ops->notifier_remove)
+ userptr->ops->notifier_remove(userptr);
+
+ return ret;
+}
+
+static bool virtio_gpu_userptr_valid(struct virtio_gpu_object_userptr *userptr)
+{
+ if (userptr->ops->valid)
+ return userptr->ops->valid(userptr);
+
+ return true;
+}
+
+#ifdef CONFIG_MMU_NOTIFIER
+
+static bool
+virtio_gpu_userptr_invalidate(struct mmu_interval_notifier *mn,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
+
+static const struct mmu_interval_notifier_ops virtio_gpu_userptr_mn_ops = {
+ .invalidate = virtio_gpu_userptr_invalidate,
+};
+
+static int
+virtio_gpu_userptr_add_notifier(struct virtio_gpu_object_userptr *userptr,
+ unsigned long start, unsigned long length)
+{
+ if (!start || !length)
+ return -EINVAL;
+
+ return mmu_interval_notifier_insert(&userptr->notifier, userptr->mm,
+ start, length,
+ &virtio_gpu_userptr_mn_ops);
+}
+
+static void
+virtio_gpu_userptr_remove_notifier(struct virtio_gpu_object_userptr *userptr)
+{
+ mmu_interval_notifier_remove(&userptr->notifier);
+}
+
+static void virtio_gpu_userptr_unmap(struct virtio_gpu_object_userptr *userptr)
+{
+ drm_gem_handle_delete(userptr->file, userptr->bo_handle);
+}
+
+static void virtio_gpu_userptr_update_notifier_and_interval_tree(
+ struct virtio_gpu_object_userptr *userptr)
+{
+ unsigned long start = userptr->notifier.interval_tree.start;
+ unsigned long last = userptr->notifier.interval_tree.last;
+
+ if (userptr->start == start && userptr->last == last)
+ return;
+
+ if (start != 0 && last != 0)
+ virtio_gpu_userptr_remove(userptr, userptr->file->driver_priv);
+
+ virtio_gpu_userptr_insert(userptr, userptr->file->driver_priv);
+ userptr->op = 0;
}
+static int virtio_gpu_userptr_split(struct virtio_gpu_object_userptr *userptr,
+ unsigned long valid_start,
+ unsigned long valid_last,
+ struct virtio_gpu_object_userptr **new)
+{
+ uint64_t old_start = userptr->start;
+ uint64_t old_last = userptr->last;
+
+ if (old_start != valid_start && old_last != valid_last)
+ return -EINVAL;
+ if (valid_start < old_start || valid_last > old_last)
+ return -EINVAL;
+
+ if (userptr->ops->split_new)
+ *new = userptr->ops->split_new(userptr, valid_start,
+ valid_last);
+
+ userptr->start = valid_start;
+ userptr->last = valid_last;
+
+ return 0;
+}
+
+static void
+virtio_gpu_userptr_update_split(struct virtio_gpu_object_userptr *userptr,
+ unsigned long mn_start, unsigned long mn_last)
+{
+ struct virtio_gpu_object_userptr *head;
+ struct virtio_gpu_object_userptr *tail;
+
+ if (!userptr->ops->split)
+ return;
+ if (userptr->op == USERPTR_OP_UNMAP)
+ return;
+
+ if (mn_start > userptr->last || mn_last < userptr->start)
+ return;
+
+ head = tail = userptr;
+ if (mn_start > userptr->start)
+ userptr->ops->split(userptr, userptr->start, mn_start - 1UL,
+ &tail);
+ else if (mn_last < userptr->last)
+ userptr->ops->split(userptr, mn_last + 1UL, userptr->last,
+ &head);
+}
+
+static void
+virtio_gpu_userptr_add_list_work(struct virtio_gpu_object_userptr *userptr,
+ int op)
+{
+ struct virtio_gpu_fpriv *vfpriv = userptr->file->driver_priv;
+
+ spin_lock(&vfpriv->userptr_work_list_lock);
+
+ if (!list_empty(&userptr->work_list)) {
+ if (op != USERPTR_OP_NULL && userptr->op != USERPTR_OP_UNMAP)
+ userptr->op = op;
+ } else {
+ userptr->op = op;
+ list_add_tail(&userptr->work_list, &vfpriv->userptr_work_list);
+ }
+
+ spin_unlock(&vfpriv->userptr_work_list_lock);
+}
+
+static int virtio_gpu_follow_pfn(struct vm_area_struct *vma, uint64_t addr,
+ unsigned long *pfn)
+{
+ struct follow_pfnmap_args args = { .vma = vma, .address = addr };
+
+ if (follow_pfnmap_start(&args))
+ return -EINVAL;
+
+ *pfn = args.pfn;
+ follow_pfnmap_end(&args);
+
+ return 0;
+}
+
+static int virtio_gpu_userptr_check(struct virtio_gpu_object_userptr *userptr,
+ struct vm_area_struct *vma, uint64_t start,
+ uint64_t end)
+{
+ uint64_t addr;
+ int ret;
+ unsigned long pfn;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ ret = virtio_gpu_follow_pfn(vma, addr, &pfn);
+ if (ret)
+ return -EINVAL;
+
+ if (page_to_pfn(userptr->pages[(addr - userptr->start) >>
+ PAGE_SHIFT]) != pfn)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+virtio_gpu_userptr_check_range(struct virtio_gpu_object_userptr *userptr,
+ uint64_t notifier_start, uint64_t notifier_last)
+{
+ uint64_t start, end, addr;
+ int r = 0;
+
+ start = notifier_start;
+ end = notifier_last + (1UL << PAGE_SHIFT);
+
+ for (addr = start; !r && addr < end;) {
+ struct vm_area_struct *vma;
+ uint64_t next = 0;
+
+ vma = vma_lookup(userptr->mm, addr);
+
+ if (vma) {
+ next = min(vma->vm_end, end);
+ r = virtio_gpu_userptr_check(userptr, vma, start, next);
+ if (r)
+ break;
+ } else {
+ r = -EFAULT;
+ break;
+ }
+
+ addr = next;
+ }
+
+ return r;
+}
+
+static void
+virtio_gpu_update_or_remove_userptr(struct virtio_gpu_object_userptr *userptr,
+ unsigned long start, unsigned long last)
+{
+ if ((userptr->start) >= start && (userptr->last) <= last) {
+ if (atomic_xchg(&userptr->in_release, 1) == 0) {
+ virtio_gpu_userptr_add_list_work(userptr,
+ USERPTR_OP_UNMAP);
+ }
+ } else {
+ virtio_gpu_userptr_update_split(userptr, start, last);
+ virtio_gpu_userptr_add_list_work(userptr, USERPTR_OP_UPDATE);
+ }
+}
+
+static void virtio_gpu_userptr_evict(struct virtio_gpu_object_userptr *userptr)
+{
+ if (!userptr->notifier_start || !userptr->notifier_last)
+ return;
+
+ if (userptr->notifier_start < userptr->start ||
+ userptr->notifier_last > userptr->last)
+ return;
+
+ if (virtio_gpu_userptr_check_range(userptr, userptr->notifier_start,
+ userptr->notifier_last)) {
+ virtio_gpu_update_or_remove_userptr(
+ userptr, userptr->notifier_start,
+ userptr->notifier_last + (1UL << PAGE_SHIFT) - 1UL);
+ }
+
+ userptr->notifier_start = 0;
+ userptr->notifier_last = 0;
+}
+
+static void
+virtio_gpu_userptr_handle_list_work(struct virtio_gpu_object_userptr *userptr)
+{
+ switch (userptr->op) {
+ case USERPTR_OP_NULL:
+ break;
+ case USERPTR_OP_UNMAP:
+ virtio_gpu_userptr_unmap(userptr);
+ break;
+ case USERPTR_OP_UPDATE:
+ if (userptr->ops->update)
+ userptr->ops->update(userptr);
+ break;
+ case USERPTR_OP_EVICT:
+ if (userptr->ops->evict)
+ userptr->ops->evict(userptr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void virtio_gpu_userptr_invalidate_work(struct work_struct *work)
+{
+ struct virtio_gpu_fpriv *vfpriv;
+ struct virtio_gpu_object_userptr *userptr;
+
+ vfpriv = container_of(work, struct virtio_gpu_fpriv, userptr_work);
+
+ spin_lock(&vfpriv->userptr_work_list_lock);
+ while (!list_empty(&vfpriv->userptr_work_list)) {
+ userptr = list_first_entry(&vfpriv->userptr_work_list,
+ struct virtio_gpu_object_userptr,
+ work_list);
+ spin_unlock(&vfpriv->userptr_work_list_lock);
+
+ mmap_write_lock(userptr->mm);
+
+ spin_lock(&vfpriv->userptr_work_list_lock);
+ list_del_init(&userptr->work_list);
+ spin_unlock(&vfpriv->userptr_work_list_lock);
+
+ mutex_lock(&vfpriv->userptrs_tree_lock);
+
+ virtio_gpu_userptr_handle_list_work(userptr);
+
+ mutex_unlock(&vfpriv->userptrs_tree_lock);
+ mmap_write_unlock(userptr->mm);
+
+ spin_lock(&vfpriv->userptr_work_list_lock);
+ }
+ spin_unlock(&vfpriv->userptr_work_list_lock);
+}
+
+void virtio_gpu_userptr_list_work_init(struct virtio_gpu_fpriv *vfpriv)
+{
+ INIT_WORK(&vfpriv->userptr_work, virtio_gpu_userptr_invalidate_work);
+ INIT_LIST_HEAD(&vfpriv->userptr_work_list);
+ spin_lock_init(&vfpriv->userptr_work_list_lock);
+}
+
+static void
+virtio_gpu_userptr_schedule_list_work(struct virtio_gpu_fpriv *vfpriv)
+{
+ spin_lock(&vfpriv->userptr_work_list_lock);
+ if (!list_empty(&vfpriv->userptr_work_list))
+ schedule_work(&vfpriv->userptr_work);
+ spin_unlock(&vfpriv->userptr_work_list_lock);
+}
+
+static bool
+virtio_gpu_userptr_invalidate(struct mmu_interval_notifier *mn,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct virtio_gpu_object_userptr *userptr;
+ unsigned long start;
+ unsigned long last;
+
+ if (range->event == MMU_NOTIFY_RELEASE)
+ return true;
+ if (!mmget_not_zero(mn->mm))
+ return true;
+
+ start = mn->interval_tree.start;
+ last = mn->interval_tree.last;
+ start = (max(start, range->start) >> PAGE_SHIFT) << PAGE_SHIFT;
+ last = (min(last, range->end - 1UL) >> PAGE_SHIFT) << PAGE_SHIFT;
+
+ userptr = container_of(mn, struct virtio_gpu_object_userptr, notifier);
+ userptr->mm = mn->mm;
+
+ mutex_lock(&userptr->lock);
+ mmu_interval_set_seq(mn, cur_seq);
+
+ if (userptr->op != USERPTR_OP_UNMAP) {
+ switch (range->event) {
+ case MMU_NOTIFY_UNMAP:
+ virtio_gpu_update_or_remove_userptr(
+ userptr, start,
+ last + (1UL << PAGE_SHIFT) - 1UL);
+ break;
+ default:
+ userptr->notifier_start = start;
+ userptr->notifier_last = last;
+ virtio_gpu_userptr_add_list_work(userptr,
+ USERPTR_OP_EVICT);
+ break;
+ }
+ }
+
+ virtio_gpu_userptr_schedule_list_work(userptr->file->driver_priv);
+
+ mutex_unlock(&userptr->lock);
+ mmput(mn->mm);
+ return true;
+}
+
+static void
+virtio_gpu_userptr_lock_and_flush_work(struct virtio_gpu_fpriv *vfpriv)
+{
+retry_flush_work:
+ flush_work(&vfpriv->userptr_work);
+
+ if (list_empty(&vfpriv->userptr_work_list))
+ return;
+
+ goto retry_flush_work;
+}
+
+static bool virtio_gpu_userptr_valid_with_notifier(
+ struct virtio_gpu_object_userptr *userptr)
+{
+ return (!atomic_read(&userptr->in_release)) && (!userptr->op);
+}
+
+static void
+virtio_gpu_userptr_notifier_init(struct virtio_gpu_object_userptr *userptr,
+ struct mm_struct *mm)
+{
+ userptr->notifier_start = 0;
+ userptr->notifier_last = 0;
+ atomic_set(&userptr->in_release, 0);
+ INIT_LIST_HEAD(&userptr->work_list);
+ mutex_init(&userptr->lock);
+ userptr->mm = mm;
+}
+
+#else
+static void
+virtio_gpu_userptr_lock_and_flush_work(struct virtio_gpu_fpriv *vfpriv)
+{
+}
+#endif /* CONFIG_MMU_NOTIFIER */
+
static uint64_t virtio_gpu_userptr_get_offset(struct virtio_gpu_object *qobj,
uint64_t addr)
{
@@ -52,7 +454,8 @@ virtio_gpu_userptr_from_addr_range(struct virtio_gpu_fpriv *vfpriv,
userptr = container_of(node, struct virtio_gpu_object_userptr,
it_node);
- if (start >= userptr->start && last <= userptr->last) {
+ if (start >= userptr->start && last <= userptr->last &&
+ virtio_gpu_userptr_valid(userptr)) {
ret = userptr;
return ret;
}
@@ -92,7 +495,6 @@ void virtio_gpu_userptr_set_handle(struct virtio_gpu_object *qobj,
uint32_t handle)
{
struct virtio_gpu_object_userptr *userptr = to_virtio_gpu_userptr(qobj);
-
userptr->bo_handle = handle;
}
@@ -254,6 +656,9 @@ virtio_gpu_userptr_init(struct drm_device *dev, struct drm_file *file,
obj = &userptr->base.base.base;
obj->funcs = &virtio_gpu_userptr_funcs;
+ if (userptr->ops->notifier_init)
+ userptr->ops->notifier_init(userptr, current->mm);
+
drm_gem_private_object_init(dev, obj, aligned_size);
ret = virtio_gpu_resource_id_get(userptr->vgdev,
@@ -268,6 +673,15 @@ static const struct virtio_gpu_object_userptr_ops virtio_gpu_userptr_ops = {
.release = virtio_gpu_userptr_release,
.insert = virtio_gpu_userptr_insert_interval_tree,
.remove = virtio_gpu_userptr_remove_interval_tree,
+#ifdef CONFIG_MMU_NOTIFIER
+ .valid = virtio_gpu_userptr_valid_with_notifier,
+ .notifier_init = virtio_gpu_userptr_notifier_init,
+ .notifier_add = virtio_gpu_userptr_add_notifier,
+ .notifier_remove = virtio_gpu_userptr_remove_notifier,
+ .split = virtio_gpu_userptr_split,
+ .update = virtio_gpu_userptr_update_notifier_and_interval_tree,
+ .evict = virtio_gpu_userptr_evict,
+#endif
};
int virtio_gpu_userptr_create(struct virtio_gpu_device *vgdev,
@@ -290,6 +704,7 @@ int virtio_gpu_userptr_create(struct virtio_gpu_device *vgdev,
params->size))
return -EFAULT;
+ virtio_gpu_userptr_lock_and_flush_work(vfpriv);
mutex_lock(&vfpriv->userptrs_tree_lock);
userptr = virtio_gpu_userptr_from_addr_range(
--
2.34.1
Powered by blists - more mailing lists