[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230613172054.3959700-21-quic_eberman@quicinc.com>
Date: Tue, 13 Jun 2023 10:20:48 -0700
From: Elliot Berman <quic_eberman@...cinc.com>
To: Alex Elder <elder@...aro.org>,
Srinivas Kandagatla <srinivas.kandagatla@...aro.org>,
Elliot Berman <quic_eberman@...cinc.com>,
Prakruthi Deepak Heragu <quic_pheragu@...cinc.com>
CC: Murali Nalajala <quic_mnalajal@...cinc.com>,
Trilok Soni <quic_tsoni@...cinc.com>,
Srivatsa Vaddagiri <quic_svaddagi@...cinc.com>,
Carl van Schaik <quic_cvanscha@...cinc.com>,
Dmitry Baryshkov <dmitry.baryshkov@...aro.org>,
Bjorn Andersson <andersson@...nel.org>,
"Konrad Dybcio" <konrad.dybcio@...aro.org>,
Arnd Bergmann <arnd@...db.de>,
"Greg Kroah-Hartman" <gregkh@...uxfoundation.org>,
Rob Herring <robh+dt@...nel.org>,
Krzysztof Kozlowski <krzysztof.kozlowski+dt@...aro.org>,
Conor Dooley <conor+dt@...nel.org>,
Jonathan Corbet <corbet@....net>,
Bagas Sanjaya <bagasdotme@...il.com>,
Will Deacon <will@...nel.org>, Andy Gross <agross@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Jassi Brar <jassisinghbrar@...il.com>,
<linux-arm-msm@...r.kernel.org>, <devicetree@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <linux-doc@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>
Subject: [PATCH v14 20/25] virt: gunyah: Add IO handlers
Add framework for VM functions to handle stage-2 write faults from Gunyah
guest virtual machines. IO handlers have a range of addresses which they
apply to. Optionally, they may apply to only when the value written
matches the IO handler's value.
Reviewed-by: Alex Elder <elder@...aro.org>
Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@...cinc.com>
Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@...cinc.com>
Signed-off-by: Elliot Berman <quic_eberman@...cinc.com>
---
drivers/virt/gunyah/vm_mgr.c | 104 ++++++++++++++++++++++++++++++++++
drivers/virt/gunyah/vm_mgr.h | 4 ++
include/linux/gunyah_vm_mgr.h | 25 ++++++++
3 files changed, 133 insertions(+)
diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
index d6aa1731148ae..28da1f95e49ec 100644
--- a/drivers/virt/gunyah/vm_mgr.c
+++ b/drivers/virt/gunyah/vm_mgr.c
@@ -276,6 +276,108 @@ static void gh_vm_clean_resources(struct gh_vm *ghvm)
mutex_unlock(&ghvm->resources_lock);
}
+static int _gh_vm_io_handler_compare(const struct rb_node *node, const struct rb_node *parent)
+{
+ struct gh_vm_io_handler *n = container_of(node, struct gh_vm_io_handler, node);
+ struct gh_vm_io_handler *p = container_of(parent, struct gh_vm_io_handler, node);
+
+ if (n->addr < p->addr)
+ return -1;
+ if (n->addr > p->addr)
+ return 1;
+ if ((n->len && !p->len) || (!n->len && p->len))
+ return 0;
+ if (n->len < p->len)
+ return -1;
+ if (n->len > p->len)
+ return 1;
+ /* one of the io handlers doesn't have datamatch and the other does.
+ * For purposes of comparison, that makes them identical since the
+ * one that doesn't have datamatch will cover the same handler that
+ * does.
+ */
+ if (n->datamatch != p->datamatch)
+ return 0;
+ if (n->data < p->data)
+ return -1;
+ if (n->data > p->data)
+ return 1;
+ return 0;
+}
+
+static int gh_vm_io_handler_compare(struct rb_node *node, const struct rb_node *parent)
+{
+ return _gh_vm_io_handler_compare(node, parent);
+}
+
+static int gh_vm_io_handler_find(const void *key, const struct rb_node *node)
+{
+ const struct gh_vm_io_handler *k = key;
+
+ return _gh_vm_io_handler_compare(&k->node, node);
+}
+
+static struct gh_vm_io_handler *gh_vm_mgr_find_io_hdlr(struct gh_vm *ghvm, u64 addr,
+ u64 len, u64 data)
+{
+ struct gh_vm_io_handler key = {
+ .addr = addr,
+ .len = len,
+ .datamatch = true,
+ .data = data,
+ };
+ struct rb_node *node;
+
+ node = rb_find(&key, &ghvm->mmio_handler_root, gh_vm_io_handler_find);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct gh_vm_io_handler, node);
+}
+
+int gh_vm_mmio_write(struct gh_vm *ghvm, u64 addr, u32 len, u64 data)
+{
+ struct gh_vm_io_handler *io_hdlr = NULL;
+ int ret;
+
+ down_read(&ghvm->mmio_handler_lock);
+ io_hdlr = gh_vm_mgr_find_io_hdlr(ghvm, addr, len, data);
+ if (!io_hdlr || !io_hdlr->ops || !io_hdlr->ops->write) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = io_hdlr->ops->write(io_hdlr, addr, len, data);
+
+out:
+ up_read(&ghvm->mmio_handler_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gh_vm_mmio_write);
+
+int gh_vm_add_io_handler(struct gh_vm *ghvm, struct gh_vm_io_handler *io_hdlr)
+{
+ struct rb_node *found;
+
+ if (io_hdlr->datamatch && (!io_hdlr->len || io_hdlr->len > sizeof(io_hdlr->data)))
+ return -EINVAL;
+
+ down_write(&ghvm->mmio_handler_lock);
+ found = rb_find_add(&io_hdlr->node, &ghvm->mmio_handler_root, gh_vm_io_handler_compare);
+ up_write(&ghvm->mmio_handler_lock);
+
+ return found ? -EEXIST : 0;
+}
+EXPORT_SYMBOL_GPL(gh_vm_add_io_handler);
+
+void gh_vm_remove_io_handler(struct gh_vm *ghvm, struct gh_vm_io_handler *io_hdlr)
+{
+ down_write(&ghvm->mmio_handler_lock);
+ rb_erase(&io_hdlr->node, &ghvm->mmio_handler_root);
+ up_write(&ghvm->mmio_handler_lock);
+}
+EXPORT_SYMBOL_GPL(gh_vm_remove_io_handler);
+
static int gh_vm_rm_notification_status(struct gh_vm *ghvm, void *data)
{
struct gh_rm_vm_status_payload *payload = data;
@@ -361,6 +463,8 @@ static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
mutex_init(&ghvm->resources_lock);
INIT_LIST_HEAD(&ghvm->resources);
INIT_LIST_HEAD(&ghvm->resource_tickets);
+ init_rwsem(&ghvm->mmio_handler_lock);
+ ghvm->mmio_handler_root = RB_ROOT;
INIT_LIST_HEAD(&ghvm->functions);
ghvm->vm_status = GH_RM_VM_STATUS_NO_STATE;
diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
index e5e0c92d4cb12..3fc0f91dfd1a9 100644
--- a/drivers/virt/gunyah/vm_mgr.h
+++ b/drivers/virt/gunyah/vm_mgr.h
@@ -56,10 +56,14 @@ struct gh_vm {
struct mutex resources_lock;
struct list_head resources;
struct list_head resource_tickets;
+ struct rb_root mmio_handler_root;
+ struct rw_semaphore mmio_handler_lock;
};
int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region);
void gh_vm_mem_reclaim(struct gh_vm *ghvm);
struct gh_vm_mem *gh_vm_mem_find_by_addr(struct gh_vm *ghvm, u64 guest_phys_addr, u32 size);
+int gh_vm_mmio_write(struct gh_vm *ghvm, u64 addr, u32 len, u64 data);
+
#endif
diff --git a/include/linux/gunyah_vm_mgr.h b/include/linux/gunyah_vm_mgr.h
index af97fec9e2ef5..527e946243698 100644
--- a/include/linux/gunyah_vm_mgr.h
+++ b/include/linux/gunyah_vm_mgr.h
@@ -125,4 +125,29 @@ struct gh_vm_resource_ticket {
int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket);
void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket);
+/*
+ * gh_vm_io_handler contains the info about an io device and its associated
+ * addr and the ops associated with the io device.
+ */
+struct gh_vm_io_handler {
+ struct rb_node node;
+ u64 addr;
+
+ bool datamatch;
+ u8 len;
+ u64 data;
+ struct gh_vm_io_handler_ops *ops;
+};
+
+/*
+ * gh_vm_io_handler_ops contains function pointers associated with an iodevice.
+ */
+struct gh_vm_io_handler_ops {
+ int (*read)(struct gh_vm_io_handler *io_dev, u64 addr, u32 len, u64 data);
+ int (*write)(struct gh_vm_io_handler *io_dev, u64 addr, u32 len, u64 data);
+};
+
+int gh_vm_add_io_handler(struct gh_vm *ghvm, struct gh_vm_io_handler *io_dev);
+void gh_vm_remove_io_handler(struct gh_vm *ghvm, struct gh_vm_io_handler *io_dev);
+
#endif
--
2.40.0
Powered by blists - more mailing lists