[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1513711816-2618-25-git-send-email-dongwon.kim@intel.com>
Date: Tue, 19 Dec 2017 11:29:41 -0800
From: Dongwon Kim <dongwon.kim@...el.com>
To: linux-kernel@...r.kernel.org
Cc: dri-devel@...ts.freedesktop.org, xen-devel@...ts.xenproject.org,
mateuszx.potrola@...el.com, dongwon.kim@...el.com
Subject: [RFC PATCH 25/60] hyper_dmabuf: introduced delayed unexport
From: Mateusz Polrola <mateuszx.potrola@...el.com>
To prevent overhead when a DMA BUF needs to be exported right after
it is unexported, a marginal delay is introduced in unexporting process.
This adds a probation period to the unexporting process. If the same
DMA_BUF is requested to be exported agagin, unexporting process is
canceled right away and the buffer can be reused without any extensive
re-exporting process.
Additionally, "FIRST EXPORT" message is synchronously transmitted to
the exporter VM (importer VM waits for the response.) to make sure
the buffer is still valid (not unexported) on expoter VM's side before
importer VM starts to use it.
"delayed_ms" attribute is added to unexport ioctl, used for hardcoding
delay from userspace.
Signed-off-by: Dongwon Kim <dongwon.kim@...el.com>
---
drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c | 4 +
drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 157 ++++++++++++++-------
drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h | 2 +
drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c | 41 +++---
drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h | 2 +
.../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c | 2 +
6 files changed, 139 insertions(+), 69 deletions(-)
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index d7a35fc..a9bc354 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -341,6 +341,10 @@ static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachme
/* extract pages from sgt */
page_info = hyper_dmabuf_ext_pgs(sgt_info->sgt);
+ if (!page_info) {
+ return NULL;
+ }
+
/* create a new sg_table with extracted pages */
st = hyper_dmabuf_create_sgt(page_info->pages, page_info->frst_ofst,
page_info->last_len, page_info->nents);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index b0f5b5b..018de8c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -115,11 +115,24 @@ static int hyper_dmabuf_export_remote(void *data)
ret = hyper_dmabuf_find_id_exported(dma_buf, export_remote_attr->remote_domain);
sgt_info = hyper_dmabuf_find_exported(ret);
if (ret != -1 && sgt_info->valid) {
+ /*
+ * Check if unexport is already scheduled for that buffer,
+ * if so try to cancel it. If that will fail, buffer needs
+ * to be reexport once again.
+ */
+ if (sgt_info->unexport_scheduled) {
+ if (!cancel_delayed_work_sync(&sgt_info->unexport_work)) {
+ dma_buf_put(dma_buf);
+ goto reexport;
+ }
+ sgt_info->unexport_scheduled = 0;
+ }
dma_buf_put(dma_buf);
export_remote_attr->hyper_dmabuf_id = ret;
return 0;
}
+reexport:
attachment = dma_buf_attach(dma_buf, hyper_dmabuf_private.device);
if (!attachment) {
dev_err(hyper_dmabuf_private.device, "Cannot get attachment\n");
@@ -133,7 +146,7 @@ static int hyper_dmabuf_export_remote(void *data)
sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
- sgt_info = kmalloc(sizeof(*sgt_info), GFP_KERNEL);
+ sgt_info = kcalloc(1, sizeof(*sgt_info), GFP_KERNEL);
sgt_info->hyper_dmabuf_id = hyper_dmabuf_get_id();
@@ -141,7 +154,6 @@ static int hyper_dmabuf_export_remote(void *data)
sgt_info->hyper_dmabuf_rdomain = export_remote_attr->remote_domain;
sgt_info->dma_buf = dma_buf;
sgt_info->valid = 1;
- sgt_info->importer_exported = 0;
sgt_info->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL);
sgt_info->active_attached = kmalloc(sizeof(struct attachment_list), GFP_KERNEL);
@@ -245,8 +257,35 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
/* look for dmabuf for the id */
sgt_info = hyper_dmabuf_find_imported(export_fd_attr->hyper_dmabuf_id);
- if (sgt_info == NULL) /* can't find sgt from the table */
+ if (sgt_info == NULL || !sgt_info->valid) /* can't find sgt from the table */
+ return -1;
+
+ sgt_info->num_importers++;
+
+ /* send notification for export_fd to exporter */
+ operand = sgt_info->hyper_dmabuf_id;
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ hyper_dmabuf_create_request(req, HYPER_DMABUF_FIRST_EXPORT, &operand);
+
+ ret = ops->send_req(HYPER_DMABUF_DOM_ID(operand), req, true);
+
+ if (ret < 0) {
+ kfree(req);
+ dev_err(hyper_dmabuf_private.device, "Failed to create sgt or notify exporter\n");
+ sgt_info->num_importers--;
+ return -EINVAL;
+ }
+ kfree(req);
+
+ if (ret == HYPER_DMABUF_REQ_ERROR) {
+ dev_err(hyper_dmabuf_private.device,
+ "Buffer invalid\n");
+ sgt_info->num_importers--;
return -1;
+ } else {
+ dev_dbg(hyper_dmabuf_private.device, "Can import buffer\n");
+ }
dev_dbg(hyper_dmabuf_private.device,
"%s Found buffer gref %d off %d last len %d nents %d domain %d\n", __func__,
@@ -262,86 +301,62 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
sgt_info->nents,
&sgt_info->refs_info);
+ if (!data_pages) {
+ sgt_info->num_importers--;
+ return -EINVAL;
+ }
+
sgt_info->sgt = hyper_dmabuf_create_sgt(data_pages, sgt_info->frst_ofst,
sgt_info->last_len, sgt_info->nents);
}
- /* send notification for export_fd to exporter */
- operand = sgt_info->hyper_dmabuf_id;
-
- req = kcalloc(1, sizeof(*req), GFP_KERNEL);
- hyper_dmabuf_create_request(req, HYPER_DMABUF_FIRST_EXPORT, &operand);
-
- ret = ops->send_req(HYPER_DMABUF_DOM_ID(operand), req, false);
-
- if (!sgt_info->sgt || ret) {
- kfree(req);
- dev_err(hyper_dmabuf_private.device, "Failed to create sgt or notify exporter\n");
- return -EINVAL;
- }
- kfree(req);
-
export_fd_attr->fd = hyper_dmabuf_export_fd(sgt_info, export_fd_attr->flags);
if (export_fd_attr->fd < 0) {
/* fail to get fd */
ret = export_fd_attr->fd;
- } else {
- sgt_info->num_importers++;
}
- dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
- return ret;
+ dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
+ return 0;
}
/* unexport dmabuf from the database and send int req to the source domain
* to unmap it.
*/
-static int hyper_dmabuf_unexport(void *data)
+static void hyper_dmabuf_delayed_unexport(struct work_struct *work)
{
- struct ioctl_hyper_dmabuf_unexport *unexport_attr;
- struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
- struct hyper_dmabuf_sgt_info *sgt_info;
struct hyper_dmabuf_req *req;
+ int hyper_dmabuf_id;
int ret;
+ struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+ struct hyper_dmabuf_sgt_info *sgt_info =
+ container_of(work, struct hyper_dmabuf_sgt_info, unexport_work.work);
- dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
-
- if (!data) {
- dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
- return -EINVAL;
- }
-
- unexport_attr = (struct ioctl_hyper_dmabuf_unexport *)data;
+ if (!sgt_info)
+ return;
- /* find dmabuf in export list */
- sgt_info = hyper_dmabuf_find_exported(unexport_attr->hyper_dmabuf_id);
+ hyper_dmabuf_id = sgt_info->hyper_dmabuf_id;
- /* failed to find corresponding entry in export list */
- if (sgt_info == NULL) {
- unexport_attr->status = -EINVAL;
- return -EFAULT;
- }
+ dev_dbg(hyper_dmabuf_private.device,
+ "Marking buffer %d as invalid\n", hyper_dmabuf_id);
+ /* no longer valid */
+ sgt_info->valid = 0;
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
- hyper_dmabuf_create_request(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &unexport_attr->hyper_dmabuf_id);
+ hyper_dmabuf_create_request(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &hyper_dmabuf_id);
/* Now send unexport request to remote domain, marking that buffer should not be used anymore */
ret = ops->send_req(sgt_info->hyper_dmabuf_rdomain, req, true);
if (ret < 0) {
- kfree(req);
- return -EFAULT;
+ dev_err(hyper_dmabuf_private.device, "unexport message for buffer %d failed\n", hyper_dmabuf_id);
}
/* free msg */
kfree(req);
-
- dev_dbg(hyper_dmabuf_private.device,
- "Marking buffer %d as invalid\n", unexport_attr->hyper_dmabuf_id);
- /* no longer valid */
- sgt_info->valid = 0;
+ sgt_info->unexport_scheduled = 0;
/*
* Immediately clean-up if it has never been exported by importer
@@ -352,16 +367,52 @@ static int hyper_dmabuf_unexport(void *data)
*/
if (!sgt_info->importer_exported) {
dev_dbg(hyper_dmabuf_private.device,
- "claning up buffer %d completly\n", unexport_attr->hyper_dmabuf_id);
+ "claning up buffer %d completly\n", hyper_dmabuf_id);
hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
- hyper_dmabuf_remove_exported(unexport_attr->hyper_dmabuf_id);
+ hyper_dmabuf_remove_exported(hyper_dmabuf_id);
kfree(sgt_info);
/* register hyper_dmabuf_id to the list for reuse */
- store_reusable_id(unexport_attr->hyper_dmabuf_id);
+ store_reusable_id(hyper_dmabuf_id);
}
+}
+
+/* Schedules unexport of dmabuf.
+ */
+static int hyper_dmabuf_unexport(void *data)
+{
+ struct ioctl_hyper_dmabuf_unexport *unexport_attr;
+ struct hyper_dmabuf_sgt_info *sgt_info;
dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
- return ret;
+
+ if (!data) {
+ dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
+ return -EINVAL;
+ }
+
+ unexport_attr = (struct ioctl_hyper_dmabuf_unexport *)data;
+
+ /* find dmabuf in export list */
+ sgt_info = hyper_dmabuf_find_exported(unexport_attr->hyper_dmabuf_id);
+
+ dev_dbg(hyper_dmabuf_private.device, "scheduling unexport of buffer %d\n", unexport_attr->hyper_dmabuf_id);
+
+ /* failed to find corresponding entry in export list */
+ if (sgt_info == NULL) {
+ unexport_attr->status = -EINVAL;
+ return -EFAULT;
+ }
+
+ if (sgt_info->unexport_scheduled)
+ return 0;
+
+ sgt_info->unexport_scheduled = 1;
+ INIT_DELAYED_WORK(&sgt_info->unexport_work, hyper_dmabuf_delayed_unexport);
+ schedule_delayed_work(&sgt_info->unexport_work,
+ msecs_to_jiffies(unexport_attr->delay_ms));
+
+ dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
+ return 0;
}
static int hyper_dmabuf_query(void *data)
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
index e43a25f..558964c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
@@ -90,6 +90,8 @@ struct ioctl_hyper_dmabuf_unexport {
/* IN parameters */
/* hyper dmabuf id to be unexported */
int hyper_dmabuf_id;
+ /* delay in ms by which unexport processing will be postponed */
+ int delay_ms;
/* OUT parameters */
/* Status of request */
int status;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index c99176ac..dd4bb01 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -112,7 +112,6 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
void cmd_process_work(struct work_struct *work)
{
- struct hyper_dmabuf_sgt_info *sgt_info;
struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
struct cmd_process *proc = container_of(work, struct cmd_process, work);
struct hyper_dmabuf_req *req;
@@ -154,19 +153,6 @@ void cmd_process_work(struct work_struct *work)
hyper_dmabuf_register_imported(imported_sgt_info);
break;
- case HYPER_DMABUF_FIRST_EXPORT:
- /* find a corresponding SGT for the id */
- sgt_info = hyper_dmabuf_find_exported(req->operands[0]);
-
- if (!sgt_info) {
- dev_err(hyper_dmabuf_private.device,
- "critical err: requested sgt_info can't be found %d\n", req->operands[0]);
- break;
- }
-
- sgt_info->importer_exported++;
- break;
-
case HYPER_DMABUF_OPS_TO_REMOTE:
/* notifying dmabuf map/unmap to importer (probably not needed) */
/* for dmabuf synchronization */
@@ -187,6 +173,7 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
struct cmd_process *proc;
struct hyper_dmabuf_req *temp_req;
struct hyper_dmabuf_imported_sgt_info *sgt_info;
+ struct hyper_dmabuf_sgt_info *exp_sgt_info;
int ret;
if (!req) {
@@ -216,8 +203,7 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
if (sgt_info) {
/* if anything is still using dma_buf */
- if (sgt_info->dma_buf &&
- dmabuf_refcount(sgt_info->dma_buf) > 0) {
+ if (sgt_info->num_importers) {
/*
* Buffer is still in use, just mark that it should
* not be allowed to export its fd anymore.
@@ -255,6 +241,29 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
return req->command;
}
+ /* synchronous dma_buf_fd export */
+ if (req->command == HYPER_DMABUF_FIRST_EXPORT) {
+ /* find a corresponding SGT for the id */
+ exp_sgt_info = hyper_dmabuf_find_exported(req->operands[0]);
+
+ if (!exp_sgt_info) {
+ dev_err(hyper_dmabuf_private.device,
+ "critical err: requested sgt_info can't be found %d\n", req->operands[0]);
+ req->status = HYPER_DMABUF_REQ_ERROR;
+ } else if (!exp_sgt_info->valid) {
+ dev_dbg(hyper_dmabuf_private.device,
+ "Buffer no longer valid - cannot export\n");
+ req->status = HYPER_DMABUF_REQ_ERROR;
+ } else {
+ dev_dbg(hyper_dmabuf_private.device,
+ "Buffer still valid - can export\n");
+ exp_sgt_info->importer_exported++;
+ req->status = HYPER_DMABUF_REQ_PROCESSED;
+ }
+ return req->command;
+ }
+
+
dev_dbg(hyper_dmabuf_private.device,
"%s: putting request to workqueue\n", __func__);
temp_req = kmalloc(sizeof(*temp_req), GFP_KERNEL);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
index 2a58218..a41fd0a 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
@@ -78,6 +78,8 @@ struct hyper_dmabuf_sgt_info {
bool valid;
int importer_exported; /* exported locally on importer's side */
void *refs_info; /* hypervisor-specific info for the references */
+ struct delayed_work unexport_work;
+ bool unexport_scheduled;
int private[4]; /* device specific info (e.g. image's meta info?) */
};
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index ba6b126..a8cce26 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -551,6 +551,8 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
dev_err(hyper_dmabuf_private.device, "request timed-out\n");
return -EBUSY;
}
+
+ return req_pending.status;
}
return 0;
--
2.7.4
Powered by blists - more mailing lists