[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201019061803.13298-15-shuo.a.liu@intel.com>
Date: Mon, 19 Oct 2020 14:18:00 +0800
From: shuo.a.liu@...el.com
To: linux-kernel@...r.kernel.org, x86@...nel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
"H . Peter Anvin" <hpa@...or.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Yu Wang <yu1.wang@...el.com>,
Reinette Chatre <reinette.chatre@...el.com>,
Shuo Liu <shuo.a.liu@...el.com>,
Zhi Wang <zhi.a.wang@...el.com>,
Zhenyu Wang <zhenyuw@...ux.intel.com>
Subject: [PATCH v5 14/17] virt: acrn: Introduce I/O ranges operation interfaces
From: Shuo Liu <shuo.a.liu@...el.com>
An I/O request of a User VM, which is constructed by hypervisor, is
distributed by the ACRN Hypervisor Service Module to an I/O client
corresponding to the address range of the I/O request.
I/O client maintains a list of address ranges. Introduce
acrn_ioreq_range_{add,del}() to manage these address ranges.
Signed-off-by: Shuo Liu <shuo.a.liu@...el.com>
Reviewed-by: Reinette Chatre <reinette.chatre@...el.com>
Cc: Zhi Wang <zhi.a.wang@...el.com>
Cc: Zhenyu Wang <zhenyuw@...ux.intel.com>
Cc: Yu Wang <yu1.wang@...el.com>
Cc: Reinette Chatre <reinette.chatre@...el.com>
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/virt/acrn/acrn_drv.h | 4 +++
drivers/virt/acrn/ioreq.c | 60 ++++++++++++++++++++++++++++++++++++
2 files changed, 64 insertions(+)
diff --git a/drivers/virt/acrn/acrn_drv.h b/drivers/virt/acrn/acrn_drv.h
index 701c83319115..5b824fa1ee57 100644
--- a/drivers/virt/acrn/acrn_drv.h
+++ b/drivers/virt/acrn/acrn_drv.h
@@ -199,6 +199,10 @@ struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
void *data, bool is_default,
const char *name);
void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client);
+int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end);
+void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end);
int acrn_msi_inject(struct acrn_vm *vm, u64 msi_addr, u64 msi_data);
diff --git a/drivers/virt/acrn/ioreq.c b/drivers/virt/acrn/ioreq.c
index b2dbac205078..aa21c8ccc63a 100644
--- a/drivers/virt/acrn/ioreq.c
+++ b/drivers/virt/acrn/ioreq.c
@@ -101,6 +101,66 @@ int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu)
return ret;
}
+/**
+ * acrn_ioreq_range_add() - Add an iorange monitored by an ioreq client
+ * @client: The ioreq client
+ * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
+ * @start: Start address of iorange
+ * @end: End address of iorange
+ *
+ * Return: 0 on success, <0 on error
+ */
+int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end)
+{
+ struct acrn_ioreq_range *range;
+
+ if (end < start) {
+ dev_err(acrn_dev.this_device,
+ "Invalid IO range [0x%llx,0x%llx]\n", start, end);
+ return -EINVAL;
+ }
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+
+ range->type = type;
+ range->start = start;
+ range->end = end;
+
+ write_lock_bh(&client->range_lock);
+ list_add(&range->list, &client->range_list);
+ write_unlock_bh(&client->range_lock);
+
+ return 0;
+}
+
+/**
+ * acrn_ioreq_range_del() - Del an iorange monitored by an ioreq client
+ * @client: The ioreq client
+ * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
+ * @start: Start address of iorange
+ * @end: End address of iorange
+ */
+void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end)
+{
+ struct acrn_ioreq_range *range;
+
+ write_lock_bh(&client->range_lock);
+ list_for_each_entry(range, &client->range_list, list) {
+ if (type == range->type &&
+ start == range->start &&
+ end == range->end) {
+ list_del(&range->list);
+ kfree(range);
+ break;
+ }
+ }
+ write_unlock_bh(&client->range_lock);
+}
+
/*
* ioreq_task() is the execution entity of handler thread of an I/O client.
* The handler callback of the I/O client is called within the handler thread.
--
2.28.0
Powered by blists - more mailing lists