lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 19 Dec 2017 11:29:47 -0800
From:   Dongwon Kim <dongwon.kim@...el.com>
To:     linux-kernel@...r.kernel.org
Cc:     dri-devel@...ts.freedesktop.org, xen-devel@...ts.xenproject.org,
        mateuszx.potrola@...el.com, dongwon.kim@...el.com
Subject: [RFC PATCH 31/60] hyper_dmabuf: built-in compilation option

From: Mateusz Polrola <mateuszx.potrola@...el.com>

Enabled built-in compilation option of hyper_dmabuf driver.
Also, moved backend initialization into open() to remove
its dependencies on Kernel booting sequence.

hyper_dmabuf.h is now installed as one of standard header
files of Kernel.

This patch also addresses possible memory leaks in various
places.

Signed-off-by: Dongwon Kim <dongwon.kim@...el.com>
---
 drivers/xen/hyper_dmabuf/Kconfig                   |   1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c        |  17 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h        |   1 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c         |  14 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c        |  13 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c      | 113 +++++++++++++++++----
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c       |  15 +++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c        |  20 ++++
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c    |  32 +++++-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   |   6 ++
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |  15 +++
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c    |   6 ++
 include/uapi/xen/Kbuild                            |   6 ++
 13 files changed, 227 insertions(+), 32 deletions(-)
 create mode 100644 include/uapi/xen/Kbuild

diff --git a/drivers/xen/hyper_dmabuf/Kconfig b/drivers/xen/hyper_dmabuf/Kconfig
index 56633a2..185fdf8 100644
--- a/drivers/xen/hyper_dmabuf/Kconfig
+++ b/drivers/xen/hyper_dmabuf/Kconfig
@@ -14,6 +14,7 @@ config HYPER_DMABUF_XEN
 config HYPER_DMABUF_SYSFS
 	bool "Enable sysfs information about hyper DMA buffers"
 	default y
+	depends on HYPER_DMABUF
 	help
 	  Expose information about imported and exported buffers using
 	  hyper_dmabuf driver
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index a12d4dc..92d710e 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -66,6 +66,15 @@ static int __init hyper_dmabuf_drv_init(void)
 #ifdef CONFIG_HYPER_DMABUF_XEN
 	hyper_dmabuf_private.backend_ops = &xen_backend_ops;
 #endif
+	/*
+	 * Defer backend setup to first open call.
+	 * Due to fact that some hypervisors eg. Xen, may have dependencies
+	 * to userspace daemons like xenstored, in that case all xenstore
+	 * calls done from kernel will block until that deamon will be
+	 * started, in case where module is built in that will block entire
+	 * kernel initialization.
+	 */
+	hyper_dmabuf_private.backend_initialized = false;
 
 	dev_info(hyper_dmabuf_private.device,
 		 "initializing database for imported/exported dmabufs\n");
@@ -73,7 +82,6 @@ static int __init hyper_dmabuf_drv_init(void)
 	/* device structure initialization */
 	/* currently only does work-queue initialization */
 	hyper_dmabuf_private.work_queue = create_workqueue("hyper_dmabuf_wqueue");
-	hyper_dmabuf_private.domid = hyper_dmabuf_private.backend_ops->get_vm_id();
 
 	ret = hyper_dmabuf_table_init();
 	if (ret < 0) {
@@ -82,13 +90,6 @@ static int __init hyper_dmabuf_drv_init(void)
 		return ret;
 	}
 
-	ret = hyper_dmabuf_private.backend_ops->init_comm_env();
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"failed to initiailize hypervisor-specific comm env\n");
-		return ret;
-	}
-
 #ifdef CONFIG_HYPER_DMABUF_SYSFS
 	ret = hyper_dmabuf_register_sysfs(hyper_dmabuf_private.device);
 	if (ret < 0) {
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index 8445416..91fda04 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -77,6 +77,7 @@ struct hyper_dmabuf_private {
 	/* backend ops - hypervisor specific */
 	struct hyper_dmabuf_backend_ops *backend_ops;
 	struct mutex lock;
+	bool backend_initialized;
 };
 
 #endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
index 35bfdfb..fe95091 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -40,6 +40,13 @@ void store_reusable_id(int id)
 	struct list_reusable_id *new_reusable;
 
 	new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL);
+
+	if (!new_reusable) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return;
+	}
+
 	new_reusable->id = id;
 
 	list_add(&new_reusable->list, &reusable_head->list);
@@ -94,6 +101,13 @@ int hyper_dmabuf_get_id(void)
 	/* first cla to hyper_dmabuf_get_id */
 	if (id == 0) {
 		reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL);
+
+		if (!reusable_head) {
+			dev_err(hyper_dmabuf_private.device,
+				"No memory left to be allocated\n");
+			return -ENOMEM;
+		}
+
 		reusable_head->id = -1; /* list head have invalid id */
 		INIT_LIST_HEAD(&reusable_head->list);
 		hyper_dmabuf_private.id_queue = reusable_head;
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
index 5a034ffb..34dfa18 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
@@ -270,6 +270,12 @@ inline int hyper_dmabuf_sync_request(int id, int dmabuf_ops)
 
 	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
+	if (!req) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
 	hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, &operands[0]);
 
 	/* send request and wait for a response */
@@ -366,8 +372,11 @@ static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachme
 	return st;
 
 err_free_sg:
-	sg_free_table(st);
-	kfree(st);
+	if (st) {
+		sg_free_table(st);
+		kfree(st);
+	}
+
 	return NULL;
 }
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index fa700f2..c0048d9 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -115,22 +115,24 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
 	 */
 	ret = hyper_dmabuf_find_id_exported(dma_buf, export_remote_attr->remote_domain);
 	sgt_info = hyper_dmabuf_find_exported(ret);
-	if (ret != -ENOENT && sgt_info->valid) {
-		/*
-		 * Check if unexport is already scheduled for that buffer,
-		 * if so try to cancel it. If that will fail, buffer needs
-		 * to be reexport once again.
-		 */
-		if (sgt_info->unexport_scheduled) {
-			if (!cancel_delayed_work_sync(&sgt_info->unexport_work)) {
-				dma_buf_put(dma_buf);
-				goto reexport;
+	if (ret != -ENOENT && sgt_info != NULL) {
+		if (sgt_info->valid) {
+			/*
+			 * Check if unexport is already scheduled for that buffer,
+			 * if so try to cancel it. If that will fail, buffer needs
+			 * to be reexport once again.
+			 */
+			if (sgt_info->unexport_scheduled) {
+				if (!cancel_delayed_work_sync(&sgt_info->unexport_work)) {
+					dma_buf_put(dma_buf);
+					goto reexport;
+				}
+				sgt_info->unexport_scheduled = 0;
 			}
-			sgt_info->unexport_scheduled = 0;
+			dma_buf_put(dma_buf);
+			export_remote_attr->hyper_dmabuf_id = ret;
+			return 0;
 		}
-		dma_buf_put(dma_buf);
-		export_remote_attr->hyper_dmabuf_id = ret;
-		return 0;
 	}
 
 reexport:
@@ -162,9 +164,32 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
 	sgt_info->valid = 1;
 
 	sgt_info->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL);
+	if (!sgt_info->active_sgts) {
+		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+		ret = -ENOMEM;
+		goto fail_map_active_sgts;
+	}
+
 	sgt_info->active_attached = kmalloc(sizeof(struct attachment_list), GFP_KERNEL);
+	if (!sgt_info->active_attached) {
+		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+		ret = -ENOMEM;
+		goto fail_map_active_attached;
+	}
+
 	sgt_info->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list), GFP_KERNEL);
+	if (!sgt_info->va_kmapped) {
+		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+		ret = -ENOMEM;
+		goto fail_map_va_kmapped;
+	}
+
 	sgt_info->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list), GFP_KERNEL);
+	if (!sgt_info->va_vmapped) {
+		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+		ret = -ENOMEM;
+		goto fail_map_va_vmapped;
+	}
 
 	sgt_info->active_sgts->sgt = sgt;
 	sgt_info->active_attached->attach = attachment;
@@ -211,6 +236,11 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
 
 	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
+	if(!req) {
+		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+		goto fail_map_req;
+	}
+
 	/* composing a message to the importer */
 	hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT, &operands[0]);
 
@@ -233,6 +263,8 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
 
 fail_send_request:
 	kfree(req);
+
+fail_map_req:
 	hyper_dmabuf_remove_exported(sgt_info->hyper_dmabuf_id);
 
 fail_export:
@@ -242,10 +274,14 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
 	dma_buf_detach(sgt_info->dma_buf, sgt_info->active_attached->attach);
 	dma_buf_put(sgt_info->dma_buf);
 
-	kfree(sgt_info->active_attached);
-	kfree(sgt_info->active_sgts);
-	kfree(sgt_info->va_kmapped);
 	kfree(sgt_info->va_vmapped);
+fail_map_va_vmapped:
+	kfree(sgt_info->va_kmapped);
+fail_map_va_kmapped:
+	kfree(sgt_info->active_sgts);
+fail_map_active_sgts:
+	kfree(sgt_info->active_attached);
+fail_map_active_attached:
 
 	return ret;
 }
@@ -288,6 +324,13 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
 	dev_dbg(hyper_dmabuf_private.device, "Exporting fd of buffer %d\n", operand);
 
 	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+	if (!req) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
 	hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD, &operand);
 
 	ret = ops->send_req(HYPER_DMABUF_DOM_ID(operand), req, true);
@@ -381,6 +424,12 @@ static void hyper_dmabuf_delayed_unexport(struct work_struct *work)
 
 	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
+	if (!req) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return;
+	}
+
 	hyper_dmabuf_create_request(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &hyper_dmabuf_id);
 
 	/* Now send unexport request to remote domain, marking that buffer should not be used anymore */
@@ -540,6 +589,11 @@ static long hyper_dmabuf_ioctl(struct file *filp,
 	hyper_dmabuf_ioctl_t func;
 	char *kdata;
 
+	if (nr > ARRAY_SIZE(hyper_dmabuf_ioctls)) {
+		dev_err(hyper_dmabuf_private.device, "invalid ioctl\n");
+		return -EINVAL;
+	}
+
 	ioctl = &hyper_dmabuf_ioctls[nr];
 
 	func = ioctl->func;
@@ -574,11 +628,34 @@ static long hyper_dmabuf_ioctl(struct file *filp,
 
 int hyper_dmabuf_open(struct inode *inode, struct file *filp)
 {
+	int ret = 0;
+
 	/* Do not allow exclusive open */
 	if (filp->f_flags & O_EXCL)
 		return -EBUSY;
 
-	return 0;
+	/*
+	 * Initialize backend if neededm,
+	 * use mutex to prevent race conditions when
+	 * two userspace apps will open device at the same time
+	 */
+	mutex_lock(&hyper_dmabuf_private.lock);
+
+	if (!hyper_dmabuf_private.backend_initialized) {
+		hyper_dmabuf_private.domid = hyper_dmabuf_private.backend_ops->get_vm_id();
+
+		ret = hyper_dmabuf_private.backend_ops->init_comm_env();
+	        if (ret < 0) {
+			dev_err(hyper_dmabuf_private.device,
+				"failed to initiailize hypervisor-specific comm env\n");
+		} else {
+			hyper_dmabuf_private.backend_initialized = true;
+		}
+	}
+
+	mutex_unlock(&hyper_dmabuf_private.lock);
+
+	return ret;
 }
 
 static void hyper_dmabuf_emergency_release(struct hyper_dmabuf_sgt_info* sgt_info,
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
index c1285eb..90c8c56 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
@@ -34,8 +34,11 @@
 #include <asm/uaccess.h>
 #include <linux/hashtable.h>
 #include <linux/dma-buf.h>
+#include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_list.h"
 
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
 DECLARE_HASHTABLE(hyper_dmabuf_hash_imported, MAX_ENTRY_IMPORTED);
 DECLARE_HASHTABLE(hyper_dmabuf_hash_exported, MAX_ENTRY_EXPORTED);
 
@@ -132,6 +135,12 @@ int hyper_dmabuf_register_exported(struct hyper_dmabuf_sgt_info *info)
 
 	info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
+	if (!info_entry) {
+		dev_err(hyper_dmabuf_private.device,
+                        "No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
 	info_entry->info = info;
 
 	hash_add(hyper_dmabuf_hash_exported, &info_entry->node,
@@ -146,6 +155,12 @@ int hyper_dmabuf_register_imported(struct hyper_dmabuf_imported_sgt_info* info)
 
 	info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
+	if (!info_entry) {
+		dev_err(hyper_dmabuf_private.device,
+                        "No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
 	info_entry->info = info;
 
 	hash_add(hyper_dmabuf_hash_imported, &info_entry->node,
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index 3111cdc..5f64261 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -134,6 +134,13 @@ void cmd_process_work(struct work_struct *work)
 		 * operands5~8 : Driver-specific private data (e.g. graphic buffer's meta info)
 		 */
 		imported_sgt_info = kcalloc(1, sizeof(*imported_sgt_info), GFP_KERNEL);
+
+		if (!imported_sgt_info) {
+			dev_err(hyper_dmabuf_private.device,
+				"No memory left to be allocated\n");
+			break;
+		}
+
 		imported_sgt_info->hyper_dmabuf_id = req->operands[0];
 		imported_sgt_info->frst_ofst = req->operands[2];
 		imported_sgt_info->last_len = req->operands[3];
@@ -288,9 +295,22 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
 		"%s: putting request to workqueue\n", __func__);
 	temp_req = kmalloc(sizeof(*temp_req), GFP_KERNEL);
 
+	if (!temp_req) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
 	memcpy(temp_req, req, sizeof(*temp_req));
 
 	proc = kcalloc(1, sizeof(struct cmd_process), GFP_KERNEL);
+
+	if (!proc) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
 	proc->rq = temp_req;
 	proc->domid = domid;
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
index 0eded61..2dab833 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
@@ -78,6 +78,12 @@ int hyper_dmabuf_remote_sync(int id, int ops)
 	case HYPER_DMABUF_OPS_ATTACH:
 		attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL);
 
+		if (!attachl) {
+			dev_err(hyper_dmabuf_private.device,
+				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_ATTACH\n");
+			return -ENOMEM;
+		}
+
 		attachl->attach = dma_buf_attach(sgt_info->dma_buf,
 						 hyper_dmabuf_private.device);
 
@@ -85,7 +91,7 @@ int hyper_dmabuf_remote_sync(int id, int ops)
 			kfree(attachl);
 			dev_err(hyper_dmabuf_private.device,
 				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_ATTACH\n");
-			return PTR_ERR(attachl->attach);
+			return -ENOMEM;
 		}
 
 		list_add(&attachl->list, &sgt_info->active_attached->list);
@@ -121,12 +127,19 @@ int hyper_dmabuf_remote_sync(int id, int ops)
 					   struct attachment_list, list);
 
 		sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL);
+
+		if (!sgtl) {
+			dev_err(hyper_dmabuf_private.device,
+				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n");
+			return -ENOMEM;
+		}
+
 		sgtl->sgt = dma_buf_map_attachment(attachl->attach, DMA_BIDIRECTIONAL);
 		if (!sgtl->sgt) {
 			kfree(sgtl);
 			dev_err(hyper_dmabuf_private.device,
 				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n");
-			return PTR_ERR(sgtl->sgt);
+			return -ENOMEM;
 		}
 		list_add(&sgtl->list, &sgt_info->active_sgts->list);
 		break;
@@ -201,6 +214,11 @@ int hyper_dmabuf_remote_sync(int id, int ops)
 	case HYPER_DMABUF_OPS_KMAP_ATOMIC:
 	case HYPER_DMABUF_OPS_KMAP:
 		va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL);
+		if (!va_kmapl) {
+			dev_err(hyper_dmabuf_private.device,
+				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
+			return -ENOMEM;
+		}
 
 		/* dummy kmapping of 1 page */
 		if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC)
@@ -212,7 +230,7 @@ int hyper_dmabuf_remote_sync(int id, int ops)
 			kfree(va_kmapl);
 			dev_err(hyper_dmabuf_private.device,
 				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
-			return PTR_ERR(va_kmapl->vaddr);
+			return -ENOMEM;
 		}
 		list_add(&va_kmapl->list, &sgt_info->va_kmapped->list);
 		break;
@@ -255,6 +273,12 @@ int hyper_dmabuf_remote_sync(int id, int ops)
 	case HYPER_DMABUF_OPS_VMAP:
 		va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL);
 
+		if (!va_vmapl) {
+			dev_err(hyper_dmabuf_private.device,
+				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VMAP\n");
+			return -ENOMEM;
+		}
+
 		/* dummy vmapping */
 		va_vmapl->vaddr = dma_buf_vmap(sgt_info->dma_buf);
 
@@ -262,7 +286,7 @@ int hyper_dmabuf_remote_sync(int id, int ops)
 			kfree(va_vmapl);
 			dev_err(hyper_dmabuf_private.device,
 				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VMAP\n");
-			return PTR_ERR(va_vmapl->vaddr);
+			return -ENOMEM;
 		}
 		list_add(&va_vmapl->list, &sgt_info->va_vmapped->list);
 		break;
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index ce9862a..43dd3b6 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -381,6 +381,12 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 
 	ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
 
+	if (!ring_info) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
 	ring_info->sdomain = domid;
 	ring_info->evtchn = rx_port;
 
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
index 2f469da..4708b49 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
@@ -34,9 +34,12 @@
 #include <asm/uaccess.h>
 #include <linux/hashtable.h>
 #include <xen/grant_table.h>
+#include "../hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_xen_comm.h"
 #include "hyper_dmabuf_xen_comm_list.h"
 
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
 DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING);
 DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING);
 
@@ -52,6 +55,12 @@ int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info)
 
 	info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
+	if (!info_entry) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
 	info_entry->info = ring_info;
 
 	hash_add(xen_comm_tx_ring_hash, &info_entry->node,
@@ -66,6 +75,12 @@ int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info)
 
 	info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
+	if (!info_entry) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
 	info_entry->info = ring_info;
 
 	hash_add(xen_comm_rx_ring_hash, &info_entry->node,
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
index 524f75c..c6a2993 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -96,6 +96,12 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
 	lvl2_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs);
 
 	sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
+
+	if (!sh_pages_info) {
+		dev_err(hyper_dmabuf_private.device, "No more space left\n");
+		return -ENOMEM;
+	}
+
 	*refs_info = (void *)sh_pages_info;
 
 	/* share data pages in rw mode*/
diff --git a/include/uapi/xen/Kbuild b/include/uapi/xen/Kbuild
new file mode 100644
index 0000000..bf81f42
--- /dev/null
+++ b/include/uapi/xen/Kbuild
@@ -0,0 +1,6 @@
+# UAPI Header export list
+header-y += evtchn.h
+header-y += gntalloc.h
+header-y += gntdev.h
+header-y += privcmd.h
+header-y += hyper_dmabuf.h
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ