[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230404131734.45943-1-sgarzare@redhat.com>
Date: Tue, 4 Apr 2023 15:17:34 +0200
From: Stefano Garzarella <sgarzare@...hat.com>
To: virtualization@...ts.linux-foundation.org
Cc: stefanha@...hat.com,
Andrey Zhadchenko <andrey.zhadchenko@...tuozzo.com>,
kvm@...r.kernel.org, eperezma@...hat.com,
"Michael S. Tsirkin" <mst@...hat.com>,
linux-kernel@...r.kernel.org, Jason Wang <jasowang@...hat.com>,
netdev@...r.kernel.org, Stefano Garzarella <sgarzare@...hat.com>
Subject: [PATCH v5 9/9] vdpa_sim: add support for user VA
The new "use_va" module parameter (default: true) is used in
vdpa_alloc_device() to inform the vDPA framework that the device
supports VA.
vringh is initialized to use VA only when "use_va" is true and the
user's mm has been bound. So, only when the bus supports user VA
(e.g. vhost-vdpa).
vdpasim_mm_work_fn work is used to serialize the binding to a new
address space when the .bind_mm callback is invoked, and unbinding
when the .unbind_mm callback is invoked.
Call mmget_not_zero()/kthread_use_mm() inside the worker function
to pin the address space only as long as needed, following the
documentation of mmget() in include/linux/sched/mm.h:
* Never use this function to pin this address space for an
* unbounded/indefinite amount of time.
Acked-by: Jason Wang <jasowang@...hat.com>
Signed-off-by: Stefano Garzarella <sgarzare@...hat.com>
---
Notes:
v4:
- checked `use_va` in vdpasim_work_fn() [Jason]
- removed `va_enabled` variable now used only in the if condition
v3:
- called mmget_not_zero() before kthread_use_mm() [Jason]
As the documentation of mmget() in include/linux/sched/mm.h says:
* Never use this function to pin this address space for an
* unbounded/indefinite amount of time.
I moved mmget_not_zero/kthread_use_mm inside the worker function,
this way we pin the address space only as long as needed.
This is similar to what vfio_iommu_type1_dma_rw_chunk() does in
drivers/vfio/vfio_iommu_type1.c
- simplified the mm bind/unbind [Jason]
- renamed vdpasim_worker_change_mm_sync() [Jason]
- fix commit message (s/default: false/default: true)
v2:
- `use_va` set to true by default [Eugenio]
- supported the new unbind_mm callback [Jason]
- removed the unbind_mm call in vdpasim_do_reset() [Jason]
- avoided to release the lock while call kthread_flush_work() since we
are now using a mutex to protect the device state
drivers/vdpa/vdpa_sim/vdpa_sim.h | 1 +
drivers/vdpa/vdpa_sim/vdpa_sim.c | 97 +++++++++++++++++++++++++++++---
2 files changed, 90 insertions(+), 8 deletions(-)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 4774292fba8c..3a42887d05d9 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -59,6 +59,7 @@ struct vdpasim {
struct vdpasim_virtqueue *vqs;
struct kthread_worker *worker;
struct kthread_work work;
+ struct mm_struct *mm_bound;
struct vdpasim_dev_attr dev_attr;
/* mutex to synchronize virtqueue state */
struct mutex mutex;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 2b2e439a66f7..2c706bb18897 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -35,10 +35,44 @@ module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(max_iotlb_entries,
"Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
+static bool use_va = true;
+module_param(use_va, bool, 0444);
+MODULE_PARM_DESC(use_va, "Enable/disable the device's ability to use VA");
+
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
#define VDPASIM_QUEUE_MAX 256
#define VDPASIM_VENDOR_ID 0
+struct vdpasim_mm_work {
+ struct kthread_work work;
+ struct vdpasim *vdpasim;
+ struct mm_struct *mm_to_bind;
+ int ret;
+};
+
+static void vdpasim_mm_work_fn(struct kthread_work *work)
+{
+ struct vdpasim_mm_work *mm_work =
+ container_of(work, struct vdpasim_mm_work, work);
+ struct vdpasim *vdpasim = mm_work->vdpasim;
+
+ mm_work->ret = 0;
+
+ //TODO: should we attach the cgroup of the mm owner?
+ vdpasim->mm_bound = mm_work->mm_to_bind;
+}
+
+static void vdpasim_worker_change_mm_sync(struct vdpasim *vdpasim,
+ struct vdpasim_mm_work *mm_work)
+{
+ struct kthread_work *work = &mm_work->work;
+
+ kthread_init_work(work, vdpasim_mm_work_fn);
+ kthread_queue_work(vdpasim->worker, work);
+
+ kthread_flush_work(work);
+}
+
static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
{
return container_of(vdpa, struct vdpasim, vdpa);
@@ -59,13 +93,20 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
{
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
uint16_t last_avail_idx = vq->vring.last_avail_idx;
-
- vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, true,
- (struct vring_desc *)(uintptr_t)vq->desc_addr,
- (struct vring_avail *)
- (uintptr_t)vq->driver_addr,
- (struct vring_used *)
- (uintptr_t)vq->device_addr);
+ struct vring_desc *desc = (struct vring_desc *)
+ (uintptr_t)vq->desc_addr;
+ struct vring_avail *avail = (struct vring_avail *)
+ (uintptr_t)vq->driver_addr;
+ struct vring_used *used = (struct vring_used *)
+ (uintptr_t)vq->device_addr;
+
+ if (use_va && vdpasim->mm_bound) {
+ vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num,
+ true, desc, avail, used);
+ } else {
+ vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num,
+ true, desc, avail, used);
+ }
vq->vring.last_avail_idx = last_avail_idx;
@@ -130,8 +171,20 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops;
static void vdpasim_work_fn(struct kthread_work *work)
{
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
+ struct mm_struct *mm = vdpasim->mm_bound;
+
+ if (use_va && mm) {
+ if (!mmget_not_zero(mm))
+ return;
+ kthread_use_mm(mm);
+ }
vdpasim->dev_attr.work_fn(vdpasim);
+
+ if (use_va && mm) {
+ kthread_unuse_mm(mm);
+ mmput(mm);
+ }
}
struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
@@ -162,7 +215,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
vdpa = __vdpa_alloc_device(NULL, ops,
dev_attr->ngroups, dev_attr->nas,
dev_attr->alloc_size,
- dev_attr->name, false);
+ dev_attr->name, use_va);
if (IS_ERR(vdpa)) {
ret = PTR_ERR(vdpa);
goto err_alloc;
@@ -582,6 +635,30 @@ static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
return ret;
}
+static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_mm_work mm_work;
+
+ mm_work.vdpasim = vdpasim;
+ mm_work.mm_to_bind = mm;
+
+ vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
+
+ return mm_work.ret;
+}
+
+static void vdpasim_unbind_mm(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_mm_work mm_work;
+
+ mm_work.vdpasim = vdpasim;
+ mm_work.mm_to_bind = NULL;
+
+ vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
+}
+
static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
u64 iova, u64 size,
u64 pa, u32 perm, void *opaque)
@@ -678,6 +755,8 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_group_asid = vdpasim_set_group_asid,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
+ .bind_mm = vdpasim_bind_mm,
+ .unbind_mm = vdpasim_unbind_mm,
.free = vdpasim_free,
};
@@ -712,6 +791,8 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_iova_range = vdpasim_get_iova_range,
.set_group_asid = vdpasim_set_group_asid,
.set_map = vdpasim_set_map,
+ .bind_mm = vdpasim_bind_mm,
+ .unbind_mm = vdpasim_unbind_mm,
.free = vdpasim_free,
};
--
2.39.2
Powered by blists - more mailing lists