lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1513711816-2618-49-git-send-email-dongwon.kim@intel.com>
Date:   Tue, 19 Dec 2017 11:30:05 -0800
From:   Dongwon Kim <dongwon.kim@...el.com>
To:     linux-kernel@...r.kernel.org
Cc:     dri-devel@...ts.freedesktop.org, xen-devel@...ts.xenproject.org,
        mateuszx.potrola@...el.com, dongwon.kim@...el.com
Subject: [RFC PATCH 49/60] hyper_dmabuf: general clean-up and fixes

1. global hyper_dmabuf_private is now pointer(*hy_drv_priv)
   pointing to private data structure initialized when driver
   is initialized. This is freed when driver exits.

2. using shorter variable and type's names

3. remove unnecessary NULL checks

4. event-polling related funcs are now compiled only if
   CONFIG_HYPER_DMABUF_EVENT_GEN is enabled.

Signed-off-by: Dongwon Kim <dongwon.kim@...el.com>
---
 drivers/xen/hyper_dmabuf/Makefile                  |   7 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h       |  25 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c        | 164 +++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h        |  13 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c      |  60 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c         |  16 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c      | 569 ++++++++++-----------
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h      |   2 +
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c       |  88 ++--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h       |  18 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c        | 259 +++++-----
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h        |  18 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c        | 284 +++++-----
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h        |   4 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c      |  58 +--
 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h      |   4 +-
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c    | 170 +++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   | 123 ++---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h   |  10 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h     |  24 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c   | 240 +++++----
 .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c  |   6 +-
 .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c    | 147 +++---
 23 files changed, 1144 insertions(+), 1165 deletions(-)
 delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h

diff --git a/drivers/xen/hyper_dmabuf/Makefile b/drivers/xen/hyper_dmabuf/Makefile
index 1cd7a81..a113bfc 100644
--- a/drivers/xen/hyper_dmabuf/Makefile
+++ b/drivers/xen/hyper_dmabuf/Makefile
@@ -13,9 +13,12 @@ ifneq ($(KERNELRELEASE),)
 				 hyper_dmabuf_id.o \
 				 hyper_dmabuf_remote_sync.o \
 				 hyper_dmabuf_query.o \
-				 hyper_dmabuf_event.o \
 
-ifeq ($(CONFIG_XEN), y)
+ifeq ($(CONFIG_HYPER_DMABUF_EVENT_GEN), y)
+	$(TARGET_MODULE)-objs += hyper_dmabuf_event.o
+endif
+
+ifeq ($(CONFIG_HYPER_DMABUF_XEN), y)
 	$(TARGET_MODULE)-objs += xen/hyper_dmabuf_xen_comm.o \
 				 xen/hyper_dmabuf_xen_comm_list.o \
 				 xen/hyper_dmabuf_xen_shm.o \
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
deleted file mode 100644
index d5125f2..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_conf.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-/* configuration */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index 1c35a59..525ee78 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -36,7 +36,6 @@
 #include <linux/poll.h>
 #include <linux/dma-buf.h>
 #include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_conf.h"
 #include "hyper_dmabuf_ioctl.h"
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_list.h"
@@ -51,13 +50,32 @@ extern struct hyper_dmabuf_backend_ops xen_backend_ops;
 MODULE_LICENSE("GPL and additional rights");
 MODULE_AUTHOR("Intel Corporation");
 
-struct hyper_dmabuf_private hyper_dmabuf_private;
+struct hyper_dmabuf_private *hy_drv_priv;
 
 long hyper_dmabuf_ioctl(struct file *filp,
 			unsigned int cmd, unsigned long param);
 
-void hyper_dmabuf_emergency_release(struct hyper_dmabuf_sgt_info* sgt_info,
-				    void *attr);
+static void hyper_dmabuf_force_free(struct exported_sgt_info* exported,
+			            void *attr)
+{
+	struct ioctl_hyper_dmabuf_unexport unexport_attr;
+	struct file *filp = (struct file*) attr;
+
+	if (!filp || !exported)
+		return;
+
+	if (exported->filp == filp) {
+		dev_dbg(hy_drv_priv->dev,
+			"Forcefully releasing buffer {id:%d key:%d %d %d}\n",
+			 exported->hid.id, exported->hid.rng_key[0],
+			 exported->hid.rng_key[1], exported->hid.rng_key[2]);
+
+		unexport_attr.hid = exported->hid;
+		unexport_attr.delay_ms = 0;
+
+		hyper_dmabuf_unexport_ioctl(filp, &unexport_attr);
+	}
+}
 
 int hyper_dmabuf_open(struct inode *inode, struct file *filp)
 {
@@ -72,18 +90,20 @@ int hyper_dmabuf_open(struct inode *inode, struct file *filp)
 
 int hyper_dmabuf_release(struct inode *inode, struct file *filp)
 {
-	hyper_dmabuf_foreach_exported(hyper_dmabuf_emergency_release, filp);
+	hyper_dmabuf_foreach_exported(hyper_dmabuf_force_free, filp);
 
 	return 0;
 }
 
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+
 unsigned int hyper_dmabuf_event_poll(struct file *filp, struct poll_table_struct *wait)
 {
 	unsigned int mask = 0;
 
-	poll_wait(filp, &hyper_dmabuf_private.event_wait, wait);
+	poll_wait(filp, &hy_drv_priv->event_wait, wait);
 
-	if (!list_empty(&hyper_dmabuf_private.event_list))
+	if (!list_empty(&hy_drv_priv->event_list))
 		mask |= POLLIN | POLLRDNORM;
 
 	return mask;
@@ -96,32 +116,32 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer,
 
 	/* only root can read events */
 	if (!capable(CAP_DAC_OVERRIDE)) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"Only root can read events\n");
 		return -EFAULT;
 	}
 
 	/* make sure user buffer can be written */
 	if (!access_ok(VERIFY_WRITE, buffer, count)) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"User buffer can't be written.\n");
 		return -EFAULT;
 	}
 
-	ret = mutex_lock_interruptible(&hyper_dmabuf_private.event_read_lock);
+	ret = mutex_lock_interruptible(&hy_drv_priv->event_read_lock);
 	if (ret)
 		return ret;
 
 	while (1) {
 		struct hyper_dmabuf_event *e = NULL;
 
-		spin_lock_irq(&hyper_dmabuf_private.event_lock);
-		if (!list_empty(&hyper_dmabuf_private.event_list)) {
-			e = list_first_entry(&hyper_dmabuf_private.event_list,
+		spin_lock_irq(&hy_drv_priv->event_lock);
+		if (!list_empty(&hy_drv_priv->event_list)) {
+			e = list_first_entry(&hy_drv_priv->event_list,
 					struct hyper_dmabuf_event, link);
 			list_del(&e->link);
 		}
-		spin_unlock_irq(&hyper_dmabuf_private.event_lock);
+		spin_unlock_irq(&hy_drv_priv->event_lock);
 
 		if (!e) {
 			if (ret)
@@ -131,12 +151,12 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer,
 				break;
 			}
 
-			mutex_unlock(&hyper_dmabuf_private.event_read_lock);
-			ret = wait_event_interruptible(hyper_dmabuf_private.event_wait,
-						       !list_empty(&hyper_dmabuf_private.event_list));
+			mutex_unlock(&hy_drv_priv->event_read_lock);
+			ret = wait_event_interruptible(hy_drv_priv->event_wait,
+						       !list_empty(&hy_drv_priv->event_list));
 
 			if (ret == 0)
-				ret = mutex_lock_interruptible(&hyper_dmabuf_private.event_read_lock);
+				ret = mutex_lock_interruptible(&hy_drv_priv->event_read_lock);
 
 			if (ret)
 				return ret;
@@ -145,9 +165,9 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer,
 
 			if (length > count - ret) {
 put_back_event:
-				spin_lock_irq(&hyper_dmabuf_private.event_lock);
-				list_add(&e->link, &hyper_dmabuf_private.event_list);
-				spin_unlock_irq(&hyper_dmabuf_private.event_lock);
+				spin_lock_irq(&hy_drv_priv->event_lock);
+				list_add(&e->link, &hy_drv_priv->event_list);
+				spin_unlock_irq(&hy_drv_priv->event_lock);
 				break;
 			}
 
@@ -170,7 +190,7 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer,
 				/* nullifying hdr of the event in user buffer */
 				if (copy_to_user(buffer + ret, &dummy_hdr,
 						 sizeof(dummy_hdr))) {
-					dev_err(hyper_dmabuf_private.device,
+					dev_err(hy_drv_priv->dev,
 						"failed to nullify invalid hdr already in userspace\n");
 				}
 
@@ -180,23 +200,30 @@ ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer,
 			}
 
 			ret += e->event_data.hdr.size;
-			hyper_dmabuf_private.curr_num_event--;
+			hy_drv_priv->pending--;
 			kfree(e);
 		}
 	}
 
-	mutex_unlock(&hyper_dmabuf_private.event_read_lock);
+	mutex_unlock(&hy_drv_priv->event_read_lock);
 
 	return ret;
 }
 
+#endif
+
 static struct file_operations hyper_dmabuf_driver_fops =
 {
 	.owner = THIS_MODULE,
 	.open = hyper_dmabuf_open,
 	.release = hyper_dmabuf_release,
+
+/* poll and read interfaces are needed only for event-polling */
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
 	.read = hyper_dmabuf_event_read,
 	.poll = hyper_dmabuf_event_poll,
+#endif
+
 	.unlocked_ioctl = hyper_dmabuf_ioctl,
 };
 
@@ -217,17 +244,17 @@ int register_device(void)
 		return ret;
 	}
 
-	hyper_dmabuf_private.device = hyper_dmabuf_miscdev.this_device;
+	hy_drv_priv->dev = hyper_dmabuf_miscdev.this_device;
 
 	/* TODO: Check if there is a different way to initialize dma mask nicely */
-	dma_coerce_mask_and_coherent(hyper_dmabuf_private.device, DMA_BIT_MASK(64));
+	dma_coerce_mask_and_coherent(hy_drv_priv->dev, DMA_BIT_MASK(64));
 
 	return ret;
 }
 
 void unregister_device(void)
 {
-	dev_info(hyper_dmabuf_private.device,
+	dev_info(hy_drv_priv->dev,
 		"hyper_dmabuf: unregister_device() is called\n");
 
 	misc_deregister(&hyper_dmabuf_miscdev);
@@ -239,9 +266,13 @@ static int __init hyper_dmabuf_drv_init(void)
 
 	printk( KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n");
 
-	mutex_init(&hyper_dmabuf_private.lock);
-	mutex_init(&hyper_dmabuf_private.event_read_lock);
-	spin_lock_init(&hyper_dmabuf_private.event_lock);
+	hy_drv_priv = kcalloc(1, sizeof(struct hyper_dmabuf_private),
+			      GFP_KERNEL);
+
+	if (!hy_drv_priv) {
+		printk( KERN_ERR "hyper_dmabuf: Failed to create drv\n");
+		return -1;
+	}
 
 	ret = register_device();
 	if (ret < 0) {
@@ -251,64 +282,72 @@ static int __init hyper_dmabuf_drv_init(void)
 /* currently only supports XEN hypervisor */
 
 #ifdef CONFIG_HYPER_DMABUF_XEN
-	hyper_dmabuf_private.backend_ops = &xen_backend_ops;
+	hy_drv_priv->backend_ops = &xen_backend_ops;
 #else
-	hyper_dmabuf_private.backend_ops = NULL;
+	hy_drv_priv->backend_ops = NULL;
 	printk( KERN_ERR "hyper_dmabuf drv currently supports XEN only.\n");
 #endif
 
-	if (hyper_dmabuf_private.backend_ops == NULL) {
+	if (hy_drv_priv->backend_ops == NULL) {
 		printk( KERN_ERR "Hyper_dmabuf: failed to be loaded - no backend found\n");
 		return -1;
 	}
 
-	mutex_lock(&hyper_dmabuf_private.lock);
+	/* initializing mutexes and a spinlock */
+	mutex_init(&hy_drv_priv->lock);
+
+	mutex_lock(&hy_drv_priv->lock);
 
-	hyper_dmabuf_private.backend_initialized = false;
+	hy_drv_priv->initialized = false;
 
-	dev_info(hyper_dmabuf_private.device,
+	dev_info(hy_drv_priv->dev,
 		 "initializing database for imported/exported dmabufs\n");
 
 	/* device structure initialization */
 	/* currently only does work-queue initialization */
-	hyper_dmabuf_private.work_queue = create_workqueue("hyper_dmabuf_wqueue");
+	hy_drv_priv->work_queue = create_workqueue("hyper_dmabuf_wqueue");
 
 	ret = hyper_dmabuf_table_init();
 	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"failed to initialize table for exported/imported entries\n");
 		return ret;
 	}
 
 #ifdef CONFIG_HYPER_DMABUF_SYSFS
-	ret = hyper_dmabuf_register_sysfs(hyper_dmabuf_private.device);
+	ret = hyper_dmabuf_register_sysfs(hy_drv_priv->dev);
 	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"failed to initialize sysfs\n");
 		return ret;
 	}
 #endif
 
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+	mutex_init(&hy_drv_priv->event_read_lock);
+	spin_lock_init(&hy_drv_priv->event_lock);
+
 	/* Initialize event queue */
-	INIT_LIST_HEAD(&hyper_dmabuf_private.event_list);
-	init_waitqueue_head(&hyper_dmabuf_private.event_wait);
+	INIT_LIST_HEAD(&hy_drv_priv->event_list);
+	init_waitqueue_head(&hy_drv_priv->event_wait);
 
-	hyper_dmabuf_private.curr_num_event = 0;
-	hyper_dmabuf_private.exited = false;
+	/* resetting number of pending events */
+	hy_drv_priv->pending = 0;
+#endif
 
-	hyper_dmabuf_private.domid = hyper_dmabuf_private.backend_ops->get_vm_id();
+	hy_drv_priv->domid = hy_drv_priv->backend_ops->get_vm_id();
 
-	ret = hyper_dmabuf_private.backend_ops->init_comm_env();
+	ret = hy_drv_priv->backend_ops->init_comm_env();
 	if (ret < 0) {
-		dev_dbg(hyper_dmabuf_private.device,
+		dev_dbg(hy_drv_priv->dev,
 			"failed to initialize comm-env but it will re-attempt.\n");
 	} else {
-		hyper_dmabuf_private.backend_initialized = true;
+		hy_drv_priv->initialized = true;
 	}
 
-	mutex_unlock(&hyper_dmabuf_private.lock);
+	mutex_unlock(&hy_drv_priv->lock);
 
-	dev_info(hyper_dmabuf_private.device,
+	dev_info(hy_drv_priv->dev,
 		"Finishing up initialization of hyper_dmabuf drv\n");
 
 	/* interrupt for comm should be registered here: */
@@ -318,34 +357,39 @@ static int __init hyper_dmabuf_drv_init(void)
 static void hyper_dmabuf_drv_exit(void)
 {
 #ifdef CONFIG_HYPER_DMABUF_SYSFS
-	hyper_dmabuf_unregister_sysfs(hyper_dmabuf_private.device);
+	hyper_dmabuf_unregister_sysfs(hy_drv_priv->dev);
 #endif
 
-	mutex_lock(&hyper_dmabuf_private.lock);
+	mutex_lock(&hy_drv_priv->lock);
 
 	/* hash tables for export/import entries and ring_infos */
 	hyper_dmabuf_table_destroy();
 
-	hyper_dmabuf_private.backend_ops->destroy_comm();
+	hy_drv_priv->backend_ops->destroy_comm();
 
 	/* destroy workqueue */
-	if (hyper_dmabuf_private.work_queue)
-		destroy_workqueue(hyper_dmabuf_private.work_queue);
+	if (hy_drv_priv->work_queue)
+		destroy_workqueue(hy_drv_priv->work_queue);
 
 	/* destroy id_queue */
-	if (hyper_dmabuf_private.id_queue)
+	if (hy_drv_priv->id_queue)
 		destroy_reusable_list();
 
-	hyper_dmabuf_private.exited = true;
-
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
 	/* clean up event queue */
 	hyper_dmabuf_events_release();
+#endif
 
-	mutex_unlock(&hyper_dmabuf_private.lock);
+	mutex_unlock(&hy_drv_priv->lock);
 
-	dev_info(hyper_dmabuf_private.device,
+	dev_info(hy_drv_priv->dev,
 		 "hyper_dmabuf driver: Exiting\n");
 
+	if (hy_drv_priv) {
+		kfree(hy_drv_priv);
+		hy_drv_priv = NULL;
+	}
+
 	unregister_device();
 }
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
index a4acdd9f..2ead41b 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -36,7 +36,7 @@ struct hyper_dmabuf_event {
 };
 
 struct hyper_dmabuf_private {
-        struct device *device;
+        struct device *dev;
 
 	/* VM(domain) id of current VM instance */
 	int domid;
@@ -55,7 +55,7 @@ struct hyper_dmabuf_private {
 	struct mutex lock;
 
 	/* flag that shows whether backend is initialized */
-	bool backend_initialized;
+	bool initialized;
 
         wait_queue_head_t event_wait;
         struct list_head event_list;
@@ -63,10 +63,8 @@ struct hyper_dmabuf_private {
 	spinlock_t event_lock;
 	struct mutex event_read_lock;
 
-	int curr_num_event;
-
-	/* indicate whether the driver is unloaded */
-	bool exited;
+	/* # of pending events */
+	int pending;
 };
 
 struct list_reusable_id {
@@ -108,4 +106,7 @@ struct hyper_dmabuf_backend_ops {
 	int (*send_req)(int, struct hyper_dmabuf_req *, int);
 };
 
+/* exporting global drv private info */
+extern struct hyper_dmabuf_private *hy_drv_priv;
+
 #endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
index 3e1498c..0498cda 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
@@ -32,37 +32,33 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/dma-buf.h>
-#include <xen/grant_table.h>
-#include <asm/xen/page.h>
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_struct.h"
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_event.h"
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 static void hyper_dmabuf_send_event_locked(struct hyper_dmabuf_event *e)
 {
 	struct hyper_dmabuf_event *oldest;
 
-	assert_spin_locked(&hyper_dmabuf_private.event_lock);
+	assert_spin_locked(&hy_drv_priv->event_lock);
 
 	/* check current number of event then if it hits the max num allowed
 	 * then remove the oldest event in the list */
-	if (hyper_dmabuf_private.curr_num_event > MAX_DEPTH_EVENT_QUEUE - 1) {
-		oldest = list_first_entry(&hyper_dmabuf_private.event_list,
+	if (hy_drv_priv->pending > MAX_DEPTH_EVENT_QUEUE - 1) {
+		oldest = list_first_entry(&hy_drv_priv->event_list,
 				struct hyper_dmabuf_event, link);
 		list_del(&oldest->link);
-		hyper_dmabuf_private.curr_num_event--;
+		hy_drv_priv->pending--;
 		kfree(oldest);
 	}
 
 	list_add_tail(&e->link,
-		      &hyper_dmabuf_private.event_list);
+		      &hy_drv_priv->event_list);
 
-	hyper_dmabuf_private.curr_num_event++;
+	hy_drv_priv->pending++;
 
-	wake_up_interruptible(&hyper_dmabuf_private.event_wait);
+	wake_up_interruptible(&hy_drv_priv->event_wait);
 }
 
 void hyper_dmabuf_events_release()
@@ -70,34 +66,34 @@ void hyper_dmabuf_events_release()
 	struct hyper_dmabuf_event *e, *et;
 	unsigned long irqflags;
 
-	spin_lock_irqsave(&hyper_dmabuf_private.event_lock, irqflags);
+	spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags);
 
-	list_for_each_entry_safe(e, et, &hyper_dmabuf_private.event_list,
+	list_for_each_entry_safe(e, et, &hy_drv_priv->event_list,
 				 link) {
 		list_del(&e->link);
 		kfree(e);
-		hyper_dmabuf_private.curr_num_event--;
+		hy_drv_priv->pending--;
 	}
 
-	if (hyper_dmabuf_private.curr_num_event) {
-		dev_err(hyper_dmabuf_private.device,
+	if (hy_drv_priv->pending) {
+		dev_err(hy_drv_priv->dev,
 			"possible leak on event_list\n");
 	}
 
-	spin_unlock_irqrestore(&hyper_dmabuf_private.event_lock, irqflags);
+	spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags);
 }
 
 int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid)
 {
 	struct hyper_dmabuf_event *e;
-	struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
+	struct imported_sgt_info *imported;
 
 	unsigned long irqflags;
 
-	imported_sgt_info = hyper_dmabuf_find_imported(hid);
+	imported = hyper_dmabuf_find_imported(hid);
 
-	if (!imported_sgt_info) {
-		dev_err(hyper_dmabuf_private.device,
+	if (!imported) {
+		dev_err(hy_drv_priv->dev,
 			"can't find imported_sgt_info in the list\n");
 		return -EINVAL;
 	}
@@ -105,29 +101,29 @@ int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid)
 	e = kzalloc(sizeof(*e), GFP_KERNEL);
 
 	if (!e) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"no space left\n");
 		return -ENOMEM;
 	}
 
 	e->event_data.hdr.event_type = HYPER_DMABUF_NEW_IMPORT;
 	e->event_data.hdr.hid = hid;
-	e->event_data.data = (void*)imported_sgt_info->priv;
-	e->event_data.hdr.size = imported_sgt_info->sz_priv;
+	e->event_data.data = (void*)imported->priv;
+	e->event_data.hdr.size = imported->sz_priv;
 
-	spin_lock_irqsave(&hyper_dmabuf_private.event_lock, irqflags);
+	spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags);
 
 	hyper_dmabuf_send_event_locked(e);
 
-	spin_unlock_irqrestore(&hyper_dmabuf_private.event_lock, irqflags);
+	spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags);
 
-	dev_dbg(hyper_dmabuf_private.device,
-			"event number = %d :", hyper_dmabuf_private.curr_num_event);
+	dev_dbg(hy_drv_priv->dev,
+		"event number = %d :", hy_drv_priv->pending);
 
-	dev_dbg(hyper_dmabuf_private.device,
-			"generating events for {%d, %d, %d, %d}\n",
-			imported_sgt_info->hid.id, imported_sgt_info->hid.rng_key[0],
-			imported_sgt_info->hid.rng_key[1], imported_sgt_info->hid.rng_key[2]);
+	dev_dbg(hy_drv_priv->dev,
+		"generating events for {%d, %d, %d, %d}\n",
+		imported->hid.id, imported->hid.rng_key[0],
+		imported->hid.rng_key[1], imported->hid.rng_key[2]);
 
 	return 0;
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
index cccdc19..e2466c7 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
@@ -33,17 +33,15 @@
 #include "hyper_dmabuf_id.h"
 #include "hyper_dmabuf_msg.h"
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 void store_reusable_hid(hyper_dmabuf_id_t hid)
 {
-	struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
+	struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
 	struct list_reusable_id *new_reusable;
 
 	new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL);
 
 	if (!new_reusable) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		return;
 	}
@@ -55,7 +53,7 @@ void store_reusable_hid(hyper_dmabuf_id_t hid)
 
 static hyper_dmabuf_id_t retrieve_reusable_hid(void)
 {
-	struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
+	struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
 	hyper_dmabuf_id_t hid = {-1, {0,0,0}};
 
 	/* check there is reusable id */
@@ -74,7 +72,7 @@ static hyper_dmabuf_id_t retrieve_reusable_hid(void)
 
 void destroy_reusable_list(void)
 {
-	struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
+	struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
 	struct list_reusable_id *temp_head;
 
 	if (reusable_head) {
@@ -103,14 +101,14 @@ hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
 		reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL);
 
 		if (!reusable_head) {
-			dev_err(hyper_dmabuf_private.device,
+			dev_err(hy_drv_priv->dev,
 				"No memory left to be allocated\n");
 			return (hyper_dmabuf_id_t){-1, {0,0,0}};
 		}
 
 		reusable_head->hid.id = -1; /* list head has an invalid count */
 		INIT_LIST_HEAD(&reusable_head->list);
-		hyper_dmabuf_private.id_queue = reusable_head;
+		hy_drv_priv->id_queue = reusable_head;
 	}
 
 	hid = retrieve_reusable_hid();
@@ -119,7 +117,7 @@ hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
 	 * and count is less than maximum allowed
 	 */
 	if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX) {
-		hid.id = HYPER_DMABUF_ID_CREATE(hyper_dmabuf_private.domid, count++);
+		hid.id = HYPER_DMABUF_ID_CREATE(hy_drv_priv->domid, count++);
 	}
 
 	/* random data embedded in the id for security */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 15191c2..b328df7 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -45,16 +45,14 @@
 #include "hyper_dmabuf_ops.h"
 #include "hyper_dmabuf_query.h"
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data)
 {
 	struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr;
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+	struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops;
 	int ret = 0;
 
 	if (!data) {
-		dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
+		dev_err(hy_drv_priv->dev, "user data is NULL\n");
 		return -EINVAL;
 	}
 	tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data;
@@ -67,11 +65,11 @@ static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data)
 static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data)
 {
 	struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr;
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+	struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops;
 	int ret = 0;
 
 	if (!data) {
-		dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
+		dev_err(hy_drv_priv->dev, "user data is NULL\n");
 		return -EINVAL;
 	}
 
@@ -82,48 +80,48 @@ static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data)
 	return ret;
 }
 
-static int hyper_dmabuf_send_export_msg(struct hyper_dmabuf_sgt_info *sgt_info,
-					struct hyper_dmabuf_pages_info *page_info)
+static int hyper_dmabuf_send_export_msg(struct exported_sgt_info *exported,
+					struct pages_info *pg_info)
 {
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+	struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops;
 	struct hyper_dmabuf_req *req;
-	int operands[MAX_NUMBER_OF_OPERANDS] = {0};
+	int op[MAX_NUMBER_OF_OPERANDS] = {0};
 	int ret, i;
 
 	/* now create request for importer via ring */
-	operands[0] = sgt_info->hid.id;
+	op[0] = exported->hid.id;
 
 	for (i=0; i<3; i++)
-		operands[i+1] = sgt_info->hid.rng_key[i];
-
-	if (page_info) {
-		operands[4] = page_info->nents;
-		operands[5] = page_info->frst_ofst;
-		operands[6] = page_info->last_len;
-		operands[7] = ops->share_pages (page_info->pages, sgt_info->hyper_dmabuf_rdomain,
-						page_info->nents, &sgt_info->refs_info);
-		if (operands[7] < 0) {
-			dev_err(hyper_dmabuf_private.device, "pages sharing failed\n");
+		op[i+1] = exported->hid.rng_key[i];
+
+	if (pg_info) {
+		op[4] = pg_info->nents;
+		op[5] = pg_info->frst_ofst;
+		op[6] = pg_info->last_len;
+		op[7] = ops->share_pages(pg_info->pgs, exported->rdomid,
+					 pg_info->nents, &exported->refs_info);
+		if (op[7] < 0) {
+			dev_err(hy_drv_priv->dev, "pages sharing failed\n");
 			return -1;
 		}
 	}
 
-	operands[8] = sgt_info->sz_priv;
+	op[8] = exported->sz_priv;
 
 	/* driver/application specific private info */
-	memcpy(&operands[9], sgt_info->priv, operands[8]);
+	memcpy(&op[9], exported->priv, op[8]);
 
 	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
 	if(!req) {
-		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+		dev_err(hy_drv_priv->dev, "no more space left\n");
 		return -1;
 	}
 
 	/* composing a message to the importer */
-	hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT, &operands[0]);
+	hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT, &op[0]);
 
-	ret = ops->send_req(sgt_info->hyper_dmabuf_rdomain, req, true);
+	ret = ops->send_req(exported->rdomid, req, true);
 
 	kfree(req);
 
@@ -132,24 +130,18 @@ static int hyper_dmabuf_send_export_msg(struct hyper_dmabuf_sgt_info *sgt_info,
 
 static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
 {
-	struct ioctl_hyper_dmabuf_export_remote *export_remote_attr;
+	struct ioctl_hyper_dmabuf_export_remote *export_remote_attr =
+			(struct ioctl_hyper_dmabuf_export_remote *)data;
 	struct dma_buf *dma_buf;
 	struct dma_buf_attachment *attachment;
 	struct sg_table *sgt;
-	struct hyper_dmabuf_pages_info *page_info;
-	struct hyper_dmabuf_sgt_info *sgt_info;
+	struct pages_info *pg_info;
+	struct exported_sgt_info *exported;
 	hyper_dmabuf_id_t hid;
 	int ret = 0;
 
-	if (!data) {
-		dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
-		return -EINVAL;
-	}
-
-	export_remote_attr = (struct ioctl_hyper_dmabuf_export_remote *)data;
-
-	if (hyper_dmabuf_private.domid == export_remote_attr->remote_domain) {
-		dev_err(hyper_dmabuf_private.device,
+	if (hy_drv_priv->domid == export_remote_attr->remote_domain) {
+		dev_err(hy_drv_priv->dev,
 			"exporting to the same VM is not permitted\n");
 		return -EINVAL;
 	}
@@ -157,7 +149,7 @@ static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
 	dma_buf = dma_buf_get(export_remote_attr->dmabuf_fd);
 
 	if (IS_ERR(dma_buf)) {
-		dev_err(hyper_dmabuf_private.device,  "Cannot get dma buf\n");
+		dev_err(hy_drv_priv->dev, "Cannot get dma buf\n");
 		return PTR_ERR(dma_buf);
 	}
 
@@ -165,69 +157,79 @@ static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
 	 * to the same domain and if yes and it's valid sgt_info,
 	 * it returns hyper_dmabuf_id of pre-exported sgt_info
 	 */
-	hid = hyper_dmabuf_find_hid_exported(dma_buf, export_remote_attr->remote_domain);
+	hid = hyper_dmabuf_find_hid_exported(dma_buf,
+					     export_remote_attr->remote_domain);
 	if (hid.id != -1) {
-		sgt_info = hyper_dmabuf_find_exported(hid);
-		if (sgt_info != NULL) {
-			if (sgt_info->valid) {
+		exported = hyper_dmabuf_find_exported(hid);
+		if (exported != NULL) {
+			if (exported->valid) {
 				/*
 				 * Check if unexport is already scheduled for that buffer,
 				 * if so try to cancel it. If that will fail, buffer needs
 				 * to be reexport once again.
 				 */
-				if (sgt_info->unexport_scheduled) {
-					if (!cancel_delayed_work_sync(&sgt_info->unexport_work)) {
+				if (exported->unexport_sched) {
+					if (!cancel_delayed_work_sync(&exported->unexport)) {
 						dma_buf_put(dma_buf);
 						goto reexport;
 					}
-					sgt_info->unexport_scheduled = 0;
+					exported->unexport_sched = false;
 				}
 
 				/* if there's any change in size of private data.
 				 * we reallocate space for private data with new size */
-				if (export_remote_attr->sz_priv != sgt_info->sz_priv) {
-					kfree(sgt_info->priv);
+				if (export_remote_attr->sz_priv != exported->sz_priv) {
+					kfree(exported->priv);
 
 					/* truncating size */
 					if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA) {
-						sgt_info->sz_priv = MAX_SIZE_PRIV_DATA;
+						exported->sz_priv = MAX_SIZE_PRIV_DATA;
 					} else {
-						sgt_info->sz_priv = export_remote_attr->sz_priv;
+						exported->sz_priv = export_remote_attr->sz_priv;
 					}
 
-					sgt_info->priv = kcalloc(1, sgt_info->sz_priv, GFP_KERNEL);
+					exported->priv = kcalloc(1, exported->sz_priv, GFP_KERNEL);
 
-					if(!sgt_info->priv) {
-						dev_err(hyper_dmabuf_private.device,
-							"Can't reallocate priv because there's no more space left\n");
-						hyper_dmabuf_remove_exported(sgt_info->hid);
-						hyper_dmabuf_cleanup_sgt_info(sgt_info, true);
-						kfree(sgt_info);
+					if(!exported->priv) {
+						dev_err(hy_drv_priv->dev,
+							"no more space left for priv\n");
+						hyper_dmabuf_remove_exported(exported->hid);
+						hyper_dmabuf_cleanup_sgt_info(exported, true);
+						kfree(exported);
+						dma_buf_put(dma_buf);
 						return -ENOMEM;
 					}
 				}
 
 				/* update private data in sgt_info with new ones */
-				copy_from_user(sgt_info->priv, export_remote_attr->priv, sgt_info->sz_priv);
-
-				/* send an export msg for updating priv in importer */
-				ret = hyper_dmabuf_send_export_msg(sgt_info, NULL);
-
-				if (ret < 0) {
-					dev_err(hyper_dmabuf_private.device, "Failed to send a new private data\n");
+				ret = copy_from_user(exported->priv, export_remote_attr->priv,
+						     exported->sz_priv);
+				if (ret) {
+					dev_err(hy_drv_priv->dev,
+						"Failed to load a new private data\n");
+					ret = -EINVAL;
+				} else {
+					/* send an export msg for updating priv in importer */
+					ret = hyper_dmabuf_send_export_msg(exported, NULL);
+
+					if (ret < 0) {
+						dev_err(hy_drv_priv->dev,
+							"Failed to send a new private data\n");
+						ret = -EBUSY;
+					}
 				}
 
 				dma_buf_put(dma_buf);
 				export_remote_attr->hid = hid;
-				return 0;
+				return ret;
 			}
 		}
 	}
 
 reexport:
-	attachment = dma_buf_attach(dma_buf, hyper_dmabuf_private.device);
+	attachment = dma_buf_attach(dma_buf, hy_drv_priv->dev);
 	if (IS_ERR(attachment)) {
-		dev_err(hyper_dmabuf_private.device, "Cannot get attachment\n");
+		dev_err(hy_drv_priv->dev, "Cannot get attachment\n");
 		ret = PTR_ERR(attachment);
 		goto fail_attach;
 	}
@@ -235,154 +237,165 @@ static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
 	sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
 
 	if (IS_ERR(sgt)) {
-		dev_err(hyper_dmabuf_private.device, "Cannot map attachment\n");
+		dev_err(hy_drv_priv->dev, "Cannot map attachment\n");
 		ret = PTR_ERR(sgt);
 		goto fail_map_attachment;
 	}
 
-	sgt_info = kcalloc(1, sizeof(*sgt_info), GFP_KERNEL);
+	exported = kcalloc(1, sizeof(*exported), GFP_KERNEL);
 
-	if(!sgt_info) {
-		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+	if(!exported) {
+		dev_err(hy_drv_priv->dev, "no more space left\n");
 		ret = -ENOMEM;
 		goto fail_sgt_info_creation;
 	}
 
 	/* possible truncation */
 	if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA) {
-		sgt_info->sz_priv = MAX_SIZE_PRIV_DATA;
+		exported->sz_priv = MAX_SIZE_PRIV_DATA;
 	} else {
-		sgt_info->sz_priv = export_remote_attr->sz_priv;
+		exported->sz_priv = export_remote_attr->sz_priv;
 	}
 
 	/* creating buffer for private data of buffer */
-	if(sgt_info->sz_priv != 0) {
-		sgt_info->priv = kcalloc(1, sgt_info->sz_priv, GFP_KERNEL);
+	if(exported->sz_priv != 0) {
+		exported->priv = kcalloc(1, exported->sz_priv, GFP_KERNEL);
 
-		if(!sgt_info->priv) {
-			dev_err(hyper_dmabuf_private.device, "no more space left\n");
+		if(!exported->priv) {
+			dev_err(hy_drv_priv->dev, "no more space left\n");
 			ret = -ENOMEM;
 			goto fail_priv_creation;
 		}
 	} else {
-		dev_err(hyper_dmabuf_private.device, "size is 0\n");
+		dev_err(hy_drv_priv->dev, "size is 0\n");
 	}
 
-	sgt_info->hid = hyper_dmabuf_get_hid();
+	exported->hid = hyper_dmabuf_get_hid();
 
 	/* no more exported dmabuf allowed */
-	if(sgt_info->hid.id == -1) {
-		dev_err(hyper_dmabuf_private.device,
+	if(exported->hid.id == -1) {
+		dev_err(hy_drv_priv->dev,
 			"exceeds allowed number of dmabuf to be exported\n");
 		ret = -ENOMEM;
 		goto fail_sgt_info_creation;
 	}
 
-	/* TODO: We might need to consider using port number on event channel? */
-	sgt_info->hyper_dmabuf_rdomain = export_remote_attr->remote_domain;
-	sgt_info->dma_buf = dma_buf;
-	sgt_info->valid = 1;
+	exported->rdomid = export_remote_attr->remote_domain;
+	exported->dma_buf = dma_buf;
+	exported->valid = true;
 
-	sgt_info->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL);
-	if (!sgt_info->active_sgts) {
-		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+	exported->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL);
+	if (!exported->active_sgts) {
+		dev_err(hy_drv_priv->dev, "no more space left\n");
 		ret = -ENOMEM;
 		goto fail_map_active_sgts;
 	}
 
-	sgt_info->active_attached = kmalloc(sizeof(struct attachment_list), GFP_KERNEL);
-	if (!sgt_info->active_attached) {
-		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+	exported->active_attached = kmalloc(sizeof(struct attachment_list), GFP_KERNEL);
+	if (!exported->active_attached) {
+		dev_err(hy_drv_priv->dev, "no more space left\n");
 		ret = -ENOMEM;
 		goto fail_map_active_attached;
 	}
 
-	sgt_info->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list), GFP_KERNEL);
-	if (!sgt_info->va_kmapped) {
-		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+	exported->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list), GFP_KERNEL);
+	if (!exported->va_kmapped) {
+		dev_err(hy_drv_priv->dev, "no more space left\n");
 		ret = -ENOMEM;
 		goto fail_map_va_kmapped;
 	}
 
-	sgt_info->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list), GFP_KERNEL);
-	if (!sgt_info->va_vmapped) {
-		dev_err(hyper_dmabuf_private.device, "no more space left\n");
+	exported->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list), GFP_KERNEL);
+	if (!exported->va_vmapped) {
+		dev_err(hy_drv_priv->dev, "no more space left\n");
 		ret = -ENOMEM;
 		goto fail_map_va_vmapped;
 	}
 
-	sgt_info->active_sgts->sgt = sgt;
-	sgt_info->active_attached->attach = attachment;
-	sgt_info->va_kmapped->vaddr = NULL;
-	sgt_info->va_vmapped->vaddr = NULL;
+	exported->active_sgts->sgt = sgt;
+	exported->active_attached->attach = attachment;
+	exported->va_kmapped->vaddr = NULL;
+	exported->va_vmapped->vaddr = NULL;
 
 	/* initialize list of sgt, attachment and vaddr for dmabuf sync
 	 * via shadow dma-buf
 	 */
-	INIT_LIST_HEAD(&sgt_info->active_sgts->list);
-	INIT_LIST_HEAD(&sgt_info->active_attached->list);
-	INIT_LIST_HEAD(&sgt_info->va_kmapped->list);
-	INIT_LIST_HEAD(&sgt_info->va_vmapped->list);
+	INIT_LIST_HEAD(&exported->active_sgts->list);
+	INIT_LIST_HEAD(&exported->active_attached->list);
+	INIT_LIST_HEAD(&exported->va_kmapped->list);
+	INIT_LIST_HEAD(&exported->va_vmapped->list);
 
 	/* copy private data to sgt_info */
-	copy_from_user(sgt_info->priv, export_remote_attr->priv, sgt_info->sz_priv);
+	ret = copy_from_user(exported->priv, export_remote_attr->priv,
+			     exported->sz_priv);
 
-	page_info = hyper_dmabuf_ext_pgs(sgt);
-	if (!page_info) {
-		dev_err(hyper_dmabuf_private.device, "failed to construct page_info\n");
+	if (ret) {
+		dev_err(hy_drv_priv->dev,
+			"failed to load private data\n");
+		ret = -EINVAL;
 		goto fail_export;
 	}
 
-	sgt_info->nents = page_info->nents;
+	pg_info = hyper_dmabuf_ext_pgs(sgt);
+	if (!pg_info) {
+		dev_err(hy_drv_priv->dev,
+			"failed to construct pg_info\n");
+		ret = -ENOMEM;
+		goto fail_export;
+	}
+
+	exported->nents = pg_info->nents;
 
 	/* now register it to export list */
-	hyper_dmabuf_register_exported(sgt_info);
+	hyper_dmabuf_register_exported(exported);
 
-	export_remote_attr->hid = sgt_info->hid;
+	export_remote_attr->hid = exported->hid;
 
-	ret = hyper_dmabuf_send_export_msg(sgt_info, page_info);
+	ret = hyper_dmabuf_send_export_msg(exported, pg_info);
 
 	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device, "failed to send out the export request\n");
+		dev_err(hy_drv_priv->dev,
+			"failed to send out the export request\n");
 		goto fail_send_request;
 	}
 
-	/* free page_info */
-	kfree(page_info->pages);
-	kfree(page_info);
+	/* free pg_info */
+	kfree(pg_info->pgs);
+	kfree(pg_info);
 
-	sgt_info->filp = filp;
+	exported->filp = filp;
 
 	return ret;
 
 /* Clean-up if error occurs */
 
 fail_send_request:
-	hyper_dmabuf_remove_exported(sgt_info->hid);
+	hyper_dmabuf_remove_exported(exported->hid);
 
-	/* free page_info */
-	kfree(page_info->pages);
-	kfree(page_info);
+	/* free pg_info */
+	kfree(pg_info->pgs);
+	kfree(pg_info);
 
 fail_export:
-	kfree(sgt_info->va_vmapped);
+	kfree(exported->va_vmapped);
 
 fail_map_va_vmapped:
-	kfree(sgt_info->va_kmapped);
+	kfree(exported->va_kmapped);
 
 fail_map_va_kmapped:
-	kfree(sgt_info->active_attached);
+	kfree(exported->active_attached);
 
 fail_map_active_attached:
-	kfree(sgt_info->active_sgts);
-	kfree(sgt_info->priv);
+	kfree(exported->active_sgts);
+	kfree(exported->priv);
 
 fail_priv_creation:
-	kfree(sgt_info);
+	kfree(exported);
 
 fail_map_active_sgts:
 fail_sgt_info_creation:
-	dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
+	dma_buf_unmap_attachment(attachment, sgt,
+				 DMA_BIDIRECTIONAL);
 
 fail_map_attachment:
 	dma_buf_detach(dma_buf, attachment);
@@ -395,143 +408,136 @@ static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
 
 static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
 {
-	struct ioctl_hyper_dmabuf_export_fd *export_fd_attr;
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct ioctl_hyper_dmabuf_export_fd *export_fd_attr =
+			(struct ioctl_hyper_dmabuf_export_fd *)data;
+	struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops;
+	struct imported_sgt_info *imported;
 	struct hyper_dmabuf_req *req;
-	struct page **data_pages;
-	int operands[4];
+	struct page **data_pgs;
+	int op[4];
 	int i;
 	int ret = 0;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
-
-	if (!data) {
-		dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
-		return -EINVAL;
-	}
-
-	export_fd_attr = (struct ioctl_hyper_dmabuf_export_fd *)data;
+	dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
 
 	/* look for dmabuf for the id */
-	sgt_info = hyper_dmabuf_find_imported(export_fd_attr->hid);
+	imported = hyper_dmabuf_find_imported(export_fd_attr->hid);
 
 	/* can't find sgt from the table */
-	if (!sgt_info) {
-		dev_err(hyper_dmabuf_private.device, "can't find the entry\n");
+	if (!imported) {
+		dev_err(hy_drv_priv->dev, "can't find the entry\n");
 		return -ENOENT;
 	}
 
-	mutex_lock(&hyper_dmabuf_private.lock);
+	mutex_lock(&hy_drv_priv->lock);
 
-	sgt_info->num_importers++;
+	imported->importers++;
 
 	/* send notification for export_fd to exporter */
-	operands[0] = sgt_info->hid.id;
+	op[0] = imported->hid.id;
 
 	for (i=0; i<3; i++)
-		operands[i+1] = sgt_info->hid.rng_key[i];
+		op[i+1] = imported->hid.rng_key[i];
 
-	dev_dbg(hyper_dmabuf_private.device, "Exporting fd of buffer {id:%d key:%d %d %d}\n",
-		sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-		sgt_info->hid.rng_key[2]);
+	dev_dbg(hy_drv_priv->dev, "Exporting fd of buffer {id:%d key:%d %d %d}\n",
+		imported->hid.id, imported->hid.rng_key[0], imported->hid.rng_key[1],
+		imported->hid.rng_key[2]);
 
 	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
 	if (!req) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		return -ENOMEM;
 	}
 
-	hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD, &operands[0]);
+	hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD, &op[0]);
 
-	ret = ops->send_req(HYPER_DMABUF_DOM_ID(sgt_info->hid), req, true);
+	ret = ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true);
 
 	if (ret < 0) {
 		/* in case of timeout other end eventually will receive request, so we need to undo it */
-		hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD_FAILED, &operands[0]);
-		ops->send_req(operands[0], req, false);
+		hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED, &op[0]);
+		ops->send_req(op[0], req, false);
 		kfree(req);
-		dev_err(hyper_dmabuf_private.device, "Failed to create sgt or notify exporter\n");
-		sgt_info->num_importers--;
-		mutex_unlock(&hyper_dmabuf_private.lock);
+		dev_err(hy_drv_priv->dev, "Failed to create sgt or notify exporter\n");
+		imported->importers--;
+		mutex_unlock(&hy_drv_priv->lock);
 		return ret;
 	}
 
 	kfree(req);
 
 	if (ret == HYPER_DMABUF_REQ_ERROR) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"Buffer invalid {id:%d key:%d %d %d}, cannot import\n",
-			sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-			sgt_info->hid.rng_key[2]);
+			imported->hid.id, imported->hid.rng_key[0], imported->hid.rng_key[1],
+			imported->hid.rng_key[2]);
 
-		sgt_info->num_importers--;
-		mutex_unlock(&hyper_dmabuf_private.lock);
+		imported->importers--;
+		mutex_unlock(&hy_drv_priv->lock);
 		return -EINVAL;
 	} else {
-		dev_dbg(hyper_dmabuf_private.device, "Can import buffer {id:%d key:%d %d %d}\n",
-			sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-			sgt_info->hid.rng_key[2]);
+		dev_dbg(hy_drv_priv->dev, "Can import buffer {id:%d key:%d %d %d}\n",
+			imported->hid.id, imported->hid.rng_key[0], imported->hid.rng_key[1],
+			imported->hid.rng_key[2]);
 
 		ret = 0;
 	}
 
-	dev_dbg(hyper_dmabuf_private.device,
-		  "%s Found buffer gref %d  off %d last len %d nents %d domain %d\n", __func__,
-		  sgt_info->ref_handle, sgt_info->frst_ofst,
-		  sgt_info->last_len, sgt_info->nents,
-		  HYPER_DMABUF_DOM_ID(sgt_info->hid));
+	dev_dbg(hy_drv_priv->dev,
+		  "%s Found buffer gref %d  off %d last len %d nents %d domain %d\n",
+		  __func__, imported->ref_handle, imported->frst_ofst,
+		  imported->last_len, imported->nents, HYPER_DMABUF_DOM_ID(imported->hid));
 
-	if (!sgt_info->sgt) {
-		dev_dbg(hyper_dmabuf_private.device,
+	if (!imported->sgt) {
+		dev_dbg(hy_drv_priv->dev,
 			"%s buffer {id:%d key:%d %d %d} pages not mapped yet\n", __func__,
-			sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-			sgt_info->hid.rng_key[2]);
+			imported->hid.id, imported->hid.rng_key[0], imported->hid.rng_key[1],
+			imported->hid.rng_key[2]);
 
-		data_pages = ops->map_shared_pages(sgt_info->ref_handle,
-						   HYPER_DMABUF_DOM_ID(sgt_info->hid),
-						   sgt_info->nents,
-						   &sgt_info->refs_info);
+		data_pgs = ops->map_shared_pages(imported->ref_handle,
+						   HYPER_DMABUF_DOM_ID(imported->hid),
+						   imported->nents,
+						   &imported->refs_info);
 
-		if (!data_pages) {
-			dev_err(hyper_dmabuf_private.device,
+		if (!data_pgs) {
+			dev_err(hy_drv_priv->dev,
 				"Cannot map pages of buffer {id:%d key:%d %d %d}\n",
-				sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-				sgt_info->hid.rng_key[2]);
+				imported->hid.id, imported->hid.rng_key[0], imported->hid.rng_key[1],
+				imported->hid.rng_key[2]);
 
-			sgt_info->num_importers--;
+			imported->importers--;
 			req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
 			if (!req) {
-				dev_err(hyper_dmabuf_private.device,
+				dev_err(hy_drv_priv->dev,
 					"No more space left\n");
 				return -ENOMEM;
 			}
 
-			hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD_FAILED, &operands[0]);
-			ops->send_req(HYPER_DMABUF_DOM_ID(sgt_info->hid), req, false);
+			hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED, &op[0]);
+			ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, false);
 			kfree(req);
-			mutex_unlock(&hyper_dmabuf_private.lock);
+			mutex_unlock(&hy_drv_priv->lock);
 			return -EINVAL;
 		}
 
-		sgt_info->sgt = hyper_dmabuf_create_sgt(data_pages, sgt_info->frst_ofst,
-							sgt_info->last_len, sgt_info->nents);
+		imported->sgt = hyper_dmabuf_create_sgt(data_pgs, imported->frst_ofst,
+							imported->last_len, imported->nents);
 
 	}
 
-	export_fd_attr->fd = hyper_dmabuf_export_fd(sgt_info, export_fd_attr->flags);
+	export_fd_attr->fd = hyper_dmabuf_export_fd(imported, export_fd_attr->flags);
 
 	if (export_fd_attr->fd < 0) {
 		/* fail to get fd */
 		ret = export_fd_attr->fd;
 	}
 
-	mutex_unlock(&hyper_dmabuf_private.lock);
+	mutex_unlock(&hy_drv_priv->lock);
 
-	dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
 	return ret;
 }
 
@@ -541,50 +547,51 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
 static void hyper_dmabuf_delayed_unexport(struct work_struct *work)
 {
 	struct hyper_dmabuf_req *req;
+	struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops;
+	struct exported_sgt_info *exported =
+		container_of(work, struct exported_sgt_info, unexport.work);
+	int op[4];
 	int i, ret;
-	int operands[4];
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
-	struct hyper_dmabuf_sgt_info *sgt_info =
-		container_of(work, struct hyper_dmabuf_sgt_info, unexport_work.work);
 
-	if (!sgt_info)
+	if (!exported)
 		return;
 
-	dev_dbg(hyper_dmabuf_private.device,
+	dev_dbg(hy_drv_priv->dev,
 		"Marking buffer {id:%d key:%d %d %d} as invalid\n",
-		sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-		sgt_info->hid.rng_key[2]);
+		exported->hid.id, exported->hid.rng_key[0],
+		exported->hid.rng_key[1], exported->hid.rng_key[2]);
 
 	/* no longer valid */
-	sgt_info->valid = 0;
+	exported->valid = false;
 
 	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
 	if (!req) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		return;
 	}
 
-	operands[0] = sgt_info->hid.id;
+	op[0] = exported->hid.id;
 
 	for (i=0; i<3; i++)
-		operands[i+1] = sgt_info->hid.rng_key[i];
+		op[i+1] = exported->hid.rng_key[i];
 
-	hyper_dmabuf_create_request(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &operands[0]);
+	hyper_dmabuf_create_req(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &op[0]);
 
-	/* Now send unexport request to remote domain, marking that buffer should not be used anymore */
-	ret = ops->send_req(sgt_info->hyper_dmabuf_rdomain, req, true);
+	/* Now send unexport request to remote domain, marking
+	 * that buffer should not be used anymore */
+	ret = ops->send_req(exported->rdomid, req, true);
 	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"unexport message for buffer {id:%d key:%d %d %d} failed\n",
-			sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-			sgt_info->hid.rng_key[2]);
+			exported->hid.id, exported->hid.rng_key[0],
+			exported->hid.rng_key[1], exported->hid.rng_key[2]);
 	}
 
 	/* free msg */
 	kfree(req);
-	sgt_info->unexport_scheduled = 0;
+	exported->unexport_sched = false;
 
 	/*
 	 * Immediately clean-up if it has never been exported by importer
@@ -593,104 +600,94 @@ static void hyper_dmabuf_delayed_unexport(struct work_struct *work)
 	 * is called (importer does this only when there's no
 	 * no consumer of locally exported FDs)
 	 */
-	if (!sgt_info->importer_exported) {
-		dev_dbg(hyper_dmabuf_private.device,
+	if (exported->active == 0) {
+		dev_dbg(hy_drv_priv->dev,
 			"claning up buffer {id:%d key:%d %d %d} completly\n",
-			sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-			sgt_info->hid.rng_key[2]);
+			exported->hid.id, exported->hid.rng_key[0],
+			exported->hid.rng_key[1], exported->hid.rng_key[2]);
+
+		hyper_dmabuf_cleanup_sgt_info(exported, false);
+		hyper_dmabuf_remove_exported(exported->hid);
 
-		hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
-		hyper_dmabuf_remove_exported(sgt_info->hid);
 		/* register hyper_dmabuf_id to the list for reuse */
-		store_reusable_hid(sgt_info->hid);
+		store_reusable_hid(exported->hid);
 
-		if (sgt_info->sz_priv > 0 && !sgt_info->priv)
-			kfree(sgt_info->priv);
+		if (exported->sz_priv > 0 && !exported->priv)
+			kfree(exported->priv);
 
-		kfree(sgt_info);
+		kfree(exported);
 	}
 }
 
-/* Schedules unexport of dmabuf.
+/* Schedule unexport of dmabuf.
  */
-static int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data)
+int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data)
 {
-	struct ioctl_hyper_dmabuf_unexport *unexport_attr;
-	struct hyper_dmabuf_sgt_info *sgt_info;
+	struct ioctl_hyper_dmabuf_unexport *unexport_attr =
+			(struct ioctl_hyper_dmabuf_unexport *)data;
+	struct exported_sgt_info *exported;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
-
-	if (!data) {
-		dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
-		return -EINVAL;
-	}
-
-	unexport_attr = (struct ioctl_hyper_dmabuf_unexport *)data;
+	dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
 
 	/* find dmabuf in export list */
-	sgt_info = hyper_dmabuf_find_exported(unexport_attr->hid);
+	exported = hyper_dmabuf_find_exported(unexport_attr->hid);
 
-	dev_dbg(hyper_dmabuf_private.device,
+	dev_dbg(hy_drv_priv->dev,
 		"scheduling unexport of buffer {id:%d key:%d %d %d}\n",
 		unexport_attr->hid.id, unexport_attr->hid.rng_key[0],
 		unexport_attr->hid.rng_key[1], unexport_attr->hid.rng_key[2]);
 
 	/* failed to find corresponding entry in export list */
-	if (sgt_info == NULL) {
+	if (exported == NULL) {
 		unexport_attr->status = -ENOENT;
 		return -ENOENT;
 	}
 
-	if (sgt_info->unexport_scheduled)
+	if (exported->unexport_sched)
 		return 0;
 
-	sgt_info->unexport_scheduled = 1;
-	INIT_DELAYED_WORK(&sgt_info->unexport_work, hyper_dmabuf_delayed_unexport);
-	schedule_delayed_work(&sgt_info->unexport_work,
+	exported->unexport_sched = true;
+	INIT_DELAYED_WORK(&exported->unexport,
+			  hyper_dmabuf_delayed_unexport);
+	schedule_delayed_work(&exported->unexport,
 			      msecs_to_jiffies(unexport_attr->delay_ms));
 
-	dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
 	return 0;
 }
 
 static int hyper_dmabuf_query_ioctl(struct file *filp, void *data)
 {
-	struct ioctl_hyper_dmabuf_query *query_attr;
-	struct hyper_dmabuf_sgt_info *sgt_info = NULL;
-	struct hyper_dmabuf_imported_sgt_info *imported_sgt_info = NULL;
+	struct ioctl_hyper_dmabuf_query *query_attr =
+			(struct ioctl_hyper_dmabuf_query *)data;
+	struct exported_sgt_info *exported = NULL;
+	struct imported_sgt_info *imported = NULL;
 	int ret = 0;
 
-	if (!data) {
-		dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
-		return -EINVAL;
-	}
-
-	query_attr = (struct ioctl_hyper_dmabuf_query *)data;
-
-	if (HYPER_DMABUF_DOM_ID(query_attr->hid) == hyper_dmabuf_private.domid) {
+	if (HYPER_DMABUF_DOM_ID(query_attr->hid) == hy_drv_priv->domid) {
 		/* query for exported dmabuf */
-		sgt_info = hyper_dmabuf_find_exported(query_attr->hid);
-		if (sgt_info) {
-			ret = hyper_dmabuf_query_exported(sgt_info,
+		exported = hyper_dmabuf_find_exported(query_attr->hid);
+		if (exported) {
+			ret = hyper_dmabuf_query_exported(exported,
 							  query_attr->item, &query_attr->info);
 		} else {
-			dev_err(hyper_dmabuf_private.device,
-				"DMA BUF {id:%d key:%d %d %d} can't be found in the export list\n",
-				query_attr->hid.id, query_attr->hid.rng_key[0], query_attr->hid.rng_key[1],
-				query_attr->hid.rng_key[2]);
+			dev_err(hy_drv_priv->dev,
+				"DMA BUF {id:%d key:%d %d %d} not in the export list\n",
+				query_attr->hid.id, query_attr->hid.rng_key[0],
+				query_attr->hid.rng_key[1], query_attr->hid.rng_key[2]);
 			return -ENOENT;
 		}
 	} else {
 		/* query for imported dmabuf */
-		imported_sgt_info = hyper_dmabuf_find_imported(query_attr->hid);
-		if (imported_sgt_info) {
-			ret = hyper_dmabuf_query_imported(imported_sgt_info,
-							  query_attr->item, &query_attr->info);
+		imported = hyper_dmabuf_find_imported(query_attr->hid);
+		if (imported) {
+			ret = hyper_dmabuf_query_imported(imported, query_attr->item,
+							  &query_attr->info);
 		} else {
-			dev_err(hyper_dmabuf_private.device,
-				"DMA BUF {id:%d key:%d %d %d} can't be found in the imported list\n",
-				query_attr->hid.id, query_attr->hid.rng_key[0], query_attr->hid.rng_key[1],
-				query_attr->hid.rng_key[2]);
+			dev_err(hy_drv_priv->dev,
+				"DMA BUF {id:%d key:%d %d %d} not in the imported list\n",
+				query_attr->hid.id, query_attr->hid.rng_key[0],
+				query_attr->hid.rng_key[1], query_attr->hid.rng_key[2]);
 			return -ENOENT;
 		}
 	}
@@ -698,28 +695,6 @@ static int hyper_dmabuf_query_ioctl(struct file *filp, void *data)
 	return ret;
 }
 
-void hyper_dmabuf_emergency_release(struct hyper_dmabuf_sgt_info* sgt_info,
-				    void *attr)
-{
-	struct ioctl_hyper_dmabuf_unexport unexport_attr;
-	struct file *filp = (struct file*) attr;
-
-	if (!filp || !sgt_info)
-		return;
-
-	if (sgt_info->filp == filp) {
-		dev_dbg(hyper_dmabuf_private.device,
-			"Executing emergency release of buffer {id:%d key:%d %d %d}\n",
-			 sgt_info->hid.id, sgt_info->hid.rng_key[0],
-			 sgt_info->hid.rng_key[1], sgt_info->hid.rng_key[2]);
-
-		unexport_attr.hid = sgt_info->hid;
-		unexport_attr.delay_ms = 0;
-
-		hyper_dmabuf_unexport_ioctl(filp, &unexport_attr);
-	}
-}
-
 const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = {
 	HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP, hyper_dmabuf_tx_ch_setup_ioctl, 0),
 	HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP, hyper_dmabuf_rx_ch_setup_ioctl, 0),
@@ -739,7 +714,7 @@ long hyper_dmabuf_ioctl(struct file *filp,
 	char *kdata;
 
 	if (nr > ARRAY_SIZE(hyper_dmabuf_ioctls)) {
-		dev_err(hyper_dmabuf_private.device, "invalid ioctl\n");
+		dev_err(hy_drv_priv->dev, "invalid ioctl\n");
 		return -EINVAL;
 	}
 
@@ -748,18 +723,18 @@ long hyper_dmabuf_ioctl(struct file *filp,
 	func = ioctl->func;
 
 	if (unlikely(!func)) {
-		dev_err(hyper_dmabuf_private.device, "no function\n");
+		dev_err(hy_drv_priv->dev, "no function\n");
 		return -EINVAL;
 	}
 
 	kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
 	if (!kdata) {
-		dev_err(hyper_dmabuf_private.device, "no memory\n");
+		dev_err(hy_drv_priv->dev, "no memory\n");
 		return -ENOMEM;
 	}
 
 	if (copy_from_user(kdata, (void __user *)param, _IOC_SIZE(cmd)) != 0) {
-		dev_err(hyper_dmabuf_private.device, "failed to copy from user arguments\n");
+		dev_err(hy_drv_priv->dev, "failed to copy from user arguments\n");
 		ret = -EFAULT;
 		goto ioctl_error;
 	}
@@ -767,7 +742,7 @@ long hyper_dmabuf_ioctl(struct file *filp,
 	ret = func(filp, kdata);
 
 	if (copy_to_user((void __user *)param, kdata, _IOC_SIZE(cmd)) != 0) {
-		dev_err(hyper_dmabuf_private.device, "failed to copy to user arguments\n");
+		dev_err(hy_drv_priv->dev, "failed to copy to user arguments\n");
 		ret = -EFAULT;
 		goto ioctl_error;
 	}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
index ebfbb84..3e9470a 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
@@ -42,4 +42,6 @@ struct hyper_dmabuf_ioctl_desc {
 			.name = #ioctl			\
 	}
 
+int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data);
+
 #endif //__HYPER_DMABUF_IOCTL_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
index eaef2c1..1b3745e 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
@@ -39,24 +39,22 @@
 #include "hyper_dmabuf_id.h"
 #include "hyper_dmabuf_event.h"
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 DECLARE_HASHTABLE(hyper_dmabuf_hash_imported, MAX_ENTRY_IMPORTED);
 DECLARE_HASHTABLE(hyper_dmabuf_hash_exported, MAX_ENTRY_EXPORTED);
 
 #ifdef CONFIG_HYPER_DMABUF_SYSFS
 static ssize_t hyper_dmabuf_imported_show(struct device *drv, struct device_attribute *attr, char *buf)
 {
-	struct hyper_dmabuf_info_entry_imported *info_entry;
+	struct list_entry_imported *info_entry;
 	int bkt;
 	ssize_t count = 0;
 	size_t total = 0;
 
 	hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) {
-		hyper_dmabuf_id_t hid = info_entry->info->hid;
-		int nents = info_entry->info->nents;
-		bool valid = info_entry->info->valid;
-		int num_importers = info_entry->info->num_importers;
+		hyper_dmabuf_id_t hid = info_entry->imported->hid;
+		int nents = info_entry->imported->nents;
+		bool valid = info_entry->imported->valid;
+		int num_importers = info_entry->imported->importers;
 		total += nents;
 		count += scnprintf(buf + count, PAGE_SIZE - count,
 				   "hid:{id:%d keys:%d %d %d}, nents:%d, v:%c, numi:%d\n",
@@ -71,16 +69,16 @@ static ssize_t hyper_dmabuf_imported_show(struct device *drv, struct device_attr
 
 static ssize_t hyper_dmabuf_exported_show(struct device *drv, struct device_attribute *attr, char *buf)
 {
-	struct hyper_dmabuf_info_entry_exported *info_entry;
+	struct list_entry_exported *info_entry;
 	int bkt;
 	ssize_t count = 0;
 	size_t total = 0;
 
 	hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) {
-		hyper_dmabuf_id_t hid = info_entry->info->hid;
-		int nents = info_entry->info->nents;
-		bool valid = info_entry->info->valid;
-		int importer_exported = info_entry->info->importer_exported;
+		hyper_dmabuf_id_t hid = info_entry->exported->hid;
+		int nents = info_entry->exported->nents;
+		bool valid = info_entry->exported->valid;
+		int importer_exported = info_entry->exported->active;
 		total += nents;
 		count += scnprintf(buf + count, PAGE_SIZE - count,
 				   "hid:{hid:%d keys:%d %d %d}, nents:%d, v:%c, ie:%d\n",
@@ -135,57 +133,57 @@ int hyper_dmabuf_table_destroy()
 	return 0;
 }
 
-int hyper_dmabuf_register_exported(struct hyper_dmabuf_sgt_info *info)
+int hyper_dmabuf_register_exported(struct exported_sgt_info *exported)
 {
-	struct hyper_dmabuf_info_entry_exported *info_entry;
+	struct list_entry_exported *info_entry;
 
 	info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
 	if (!info_entry) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
                         "No memory left to be allocated\n");
 		return -ENOMEM;
 	}
 
-	info_entry->info = info;
+	info_entry->exported = exported;
 
 	hash_add(hyper_dmabuf_hash_exported, &info_entry->node,
-		 info_entry->info->hid.id);
+		 info_entry->exported->hid.id);
 
 	return 0;
 }
 
-int hyper_dmabuf_register_imported(struct hyper_dmabuf_imported_sgt_info* info)
+int hyper_dmabuf_register_imported(struct imported_sgt_info* imported)
 {
-	struct hyper_dmabuf_info_entry_imported *info_entry;
+	struct list_entry_imported *info_entry;
 
 	info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
 	if (!info_entry) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
                         "No memory left to be allocated\n");
 		return -ENOMEM;
 	}
 
-	info_entry->info = info;
+	info_entry->imported = imported;
 
 	hash_add(hyper_dmabuf_hash_imported, &info_entry->node,
-		 info_entry->info->hid.id);
+		 info_entry->imported->hid.id);
 
 	return 0;
 }
 
-struct hyper_dmabuf_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid)
+struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid)
 {
-	struct hyper_dmabuf_info_entry_exported *info_entry;
+	struct list_entry_exported *info_entry;
 	int bkt;
 
 	hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
 		/* checking hid.id first */
-		if(info_entry->info->hid.id == hid.id) {
+		if(info_entry->exported->hid.id == hid.id) {
 			/* then key is compared */
-			if(hyper_dmabuf_hid_keycomp(info_entry->info->hid, hid))
-				return info_entry->info;
+			if(hyper_dmabuf_hid_keycomp(info_entry->exported->hid, hid))
+				return info_entry->exported;
 			/* if key is unmatched, given HID is invalid, so returning NULL */
 			else
 				break;
@@ -197,29 +195,29 @@ struct hyper_dmabuf_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid)
 /* search for pre-exported sgt and return id of it if it exist */
 hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, int domid)
 {
-	struct hyper_dmabuf_info_entry_exported *info_entry;
+	struct list_entry_exported *info_entry;
 	hyper_dmabuf_id_t hid = {-1, {0, 0, 0}};
 	int bkt;
 
 	hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
-		if(info_entry->info->dma_buf == dmabuf &&
-		   info_entry->info->hyper_dmabuf_rdomain == domid)
-			return info_entry->info->hid;
+		if(info_entry->exported->dma_buf == dmabuf &&
+		   info_entry->exported->rdomid == domid)
+			return info_entry->exported->hid;
 
 	return hid;
 }
 
-struct hyper_dmabuf_imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid)
+struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid)
 {
-	struct hyper_dmabuf_info_entry_imported *info_entry;
+	struct list_entry_imported *info_entry;
 	int bkt;
 
 	hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node)
 		/* checking hid.id first */
-		if(info_entry->info->hid.id == hid.id) {
+		if(info_entry->imported->hid.id == hid.id) {
 			/* then key is compared */
-			if(hyper_dmabuf_hid_keycomp(info_entry->info->hid, hid))
-				return info_entry->info;
+			if(hyper_dmabuf_hid_keycomp(info_entry->imported->hid, hid))
+				return info_entry->imported;
 			/* if key is unmatched, given HID is invalid, so returning NULL */
 			else {
 				break;
@@ -231,14 +229,14 @@ struct hyper_dmabuf_imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_i
 
 int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid)
 {
-	struct hyper_dmabuf_info_entry_exported *info_entry;
+	struct list_entry_exported *info_entry;
 	int bkt;
 
 	hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
 		/* checking hid.id first */
-		if(info_entry->info->hid.id == hid.id) {
+		if(info_entry->exported->hid.id == hid.id) {
 			/* then key is compared */
-			if(hyper_dmabuf_hid_keycomp(info_entry->info->hid, hid)) {
+			if(hyper_dmabuf_hid_keycomp(info_entry->exported->hid, hid)) {
 				hash_del(&info_entry->node);
 				kfree(info_entry);
 				return 0;
@@ -252,14 +250,14 @@ int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid)
 
 int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid)
 {
-	struct hyper_dmabuf_info_entry_imported *info_entry;
+	struct list_entry_imported *info_entry;
 	int bkt;
 
 	hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node)
 		/* checking hid.id first */
-		if(info_entry->info->hid.id == hid.id) {
+		if(info_entry->imported->hid.id == hid.id) {
 			/* then key is compared */
-			if(hyper_dmabuf_hid_keycomp(info_entry->info->hid, hid)) {
+			if(hyper_dmabuf_hid_keycomp(info_entry->imported->hid, hid)) {
 				hash_del(&info_entry->node);
 				kfree(info_entry);
 				return 0;
@@ -272,15 +270,15 @@ int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid)
 }
 
 void hyper_dmabuf_foreach_exported(
-	void (*func)(struct hyper_dmabuf_sgt_info *, void *attr),
+	void (*func)(struct exported_sgt_info *, void *attr),
 	void *attr)
 {
-	struct hyper_dmabuf_info_entry_exported *info_entry;
+	struct list_entry_exported *info_entry;
 	struct hlist_node *tmp;
 	int bkt;
 
 	hash_for_each_safe(hyper_dmabuf_hash_exported, bkt, tmp,
 			info_entry, node) {
-		func(info_entry->info, attr);
+		func(info_entry->exported, attr);
 	}
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h
index 8f64db8..d5c17ef 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h
@@ -32,13 +32,13 @@
 /* number of bits to be used for imported dmabufs hash table */
 #define MAX_ENTRY_IMPORTED 7
 
-struct hyper_dmabuf_info_entry_exported {
-        struct hyper_dmabuf_sgt_info *info;
+struct list_entry_exported {
+        struct exported_sgt_info *exported;
         struct hlist_node node;
 };
 
-struct hyper_dmabuf_info_entry_imported {
-        struct hyper_dmabuf_imported_sgt_info *info;
+struct list_entry_imported {
+        struct imported_sgt_info *imported;
         struct hlist_node node;
 };
 
@@ -46,23 +46,23 @@ int hyper_dmabuf_table_init(void);
 
 int hyper_dmabuf_table_destroy(void);
 
-int hyper_dmabuf_register_exported(struct hyper_dmabuf_sgt_info *info);
+int hyper_dmabuf_register_exported(struct exported_sgt_info *info);
 
 /* search for pre-exported sgt and return id of it if it exist */
 hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, int domid);
 
-int hyper_dmabuf_register_imported(struct hyper_dmabuf_imported_sgt_info* info);
+int hyper_dmabuf_register_imported(struct imported_sgt_info* info);
 
-struct hyper_dmabuf_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid);
+struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid);
 
-struct hyper_dmabuf_imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid);
+struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid);
 
 int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid);
 
 int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid);
 
 void hyper_dmabuf_foreach_exported(
-	void (*func)(struct hyper_dmabuf_sgt_info *, void *attr),
+	void (*func)(struct exported_sgt_info *, void *attr),
 	void *attr);
 
 int hyper_dmabuf_register_sysfs(struct device *dev);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index ec37c3b..907f76e 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -31,7 +31,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/dma-buf.h>
-#include <xen/grant_table.h>
 #include <linux/workqueue.h>
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_drv.h"
@@ -39,58 +38,56 @@
 #include "hyper_dmabuf_event.h"
 #include "hyper_dmabuf_list.h"
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 struct cmd_process {
 	struct work_struct work;
 	struct hyper_dmabuf_req *rq;
 	int domid;
 };
 
-void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
-				 enum hyper_dmabuf_command command, int *operands)
+void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
+			     enum hyper_dmabuf_command cmd, int *op)
 {
 	int i;
 
-	req->status = HYPER_DMABUF_REQ_NOT_RESPONDED;
-	req->command = command;
+	req->stat = HYPER_DMABUF_REQ_NOT_RESPONDED;
+	req->cmd = cmd;
 
-	switch(command) {
+	switch(cmd) {
 	/* as exporter, commands to importer */
 	case HYPER_DMABUF_EXPORT:
 		/* exporting pages for dmabuf */
 		/* command : HYPER_DMABUF_EXPORT,
-		 * operands0~3 : hyper_dmabuf_id
-		 * operands4 : number of pages to be shared
-		 * operands5 : offset of data in the first page
-		 * operands6 : length of data in the last page
-		 * operands7 : top-level reference number for shared pages
-		 * operands8 : size of private data (from operands9)
-		 * operands9 ~ : Driver-specific private data (e.g. graphic buffer's meta info)
+		 * op0~3 : hyper_dmabuf_id
+		 * op4 : number of pages to be shared
+		 * op5 : offset of data in the first page
+		 * op6 : length of data in the last page
+		 * op7 : top-level reference number for shared pages
+		 * op8 : size of private data (from op9)
+		 * op9 ~ : Driver-specific private data (e.g. graphic buffer's meta info)
 		 */
 
-		memcpy(&req->operands[0], &operands[0], 9 * sizeof(int) + operands[8]);
+		memcpy(&req->op[0], &op[0], 9 * sizeof(int) + op[8]);
 		break;
 
 	case HYPER_DMABUF_NOTIFY_UNEXPORT:
 		/* destroy sg_list for hyper_dmabuf_id on remote side */
 		/* command : DMABUF_DESTROY,
-		 * operands0~3 : hyper_dmabuf_id_t hid
+		 * op0~3 : hyper_dmabuf_id_t hid
 		 */
 
 		for (i=0; i < 4; i++)
-			req->operands[i] = operands[i];
+			req->op[i] = op[i];
 		break;
 
 	case HYPER_DMABUF_EXPORT_FD:
 	case HYPER_DMABUF_EXPORT_FD_FAILED:
 		/* dmabuf fd is being created on imported side or importing failed */
 		/* command : HYPER_DMABUF_EXPORT_FD or HYPER_DMABUF_EXPORT_FD_FAILED,
-		 * operands0~3 : hyper_dmabuf_id
+		 * op0~3 : hyper_dmabuf_id
 		 */
 
 		for (i=0; i < 4; i++)
-			req->operands[i] = operands[i];
+			req->op[i] = op[i];
 		break;
 
 	case HYPER_DMABUF_OPS_TO_REMOTE:
@@ -103,11 +100,11 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
 		/* notifying dmabuf map/unmap to exporter, map will make the driver to do shadow mapping
 		* or unmapping for synchronization with original exporter (e.g. i915) */
 		/* command : DMABUF_OPS_TO_SOURCE.
-		 * operands0~3 : hyper_dmabuf_id
-		 * operands4 : map(=1)/unmap(=2)/attach(=3)/detach(=4)
+		 * op0~3 : hyper_dmabuf_id
+		 * op4 : map(=1)/unmap(=2)/attach(=3)/detach(=4)
 		 */
 		for (i = 0; i < 5; i++)
-			req->operands[i] = operands[i];
+			req->op[i] = op[i];
 		break;
 
 	default:
@@ -116,9 +113,9 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
 	}
 }
 
-void cmd_process_work(struct work_struct *work)
+static void cmd_process_work(struct work_struct *work)
 {
-	struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
+	struct imported_sgt_info *imported;
 	struct cmd_process *proc = container_of(work, struct cmd_process, work);
 	struct hyper_dmabuf_req *req;
 	int domid;
@@ -127,107 +124,107 @@ void cmd_process_work(struct work_struct *work)
 	req = proc->rq;
 	domid = proc->domid;
 
-	switch (req->command) {
+	switch (req->cmd) {
 	case HYPER_DMABUF_EXPORT:
 		/* exporting pages for dmabuf */
 		/* command : HYPER_DMABUF_EXPORT,
-		 * operands0~3 : hyper_dmabuf_id
-		 * operands4 : number of pages to be shared
-		 * operands5 : offset of data in the first page
-		 * operands6 : length of data in the last page
-		 * operands7 : top-level reference number for shared pages
-		 * operands8 : size of private data (from operands9)
-		 * operands9 ~ : Driver-specific private data (e.g. graphic buffer's meta info)
+		 * op0~3 : hyper_dmabuf_id
+		 * op4 : number of pages to be shared
+		 * op5 : offset of data in the first page
+		 * op6 : length of data in the last page
+		 * op7 : top-level reference number for shared pages
+		 * op8 : size of private data (from op9)
+		 * op9 ~ : Driver-specific private data (e.g. graphic buffer's meta info)
 		 */
 
 		/* if nents == 0, it means it is a message only for priv synchronization
 		 * for existing imported_sgt_info so not creating a new one */
-		if (req->operands[4] == 0) {
-			hyper_dmabuf_id_t exist = {req->operands[0],
-						   {req->operands[1], req->operands[2],
-						    req->operands[3]}};
+		if (req->op[4] == 0) {
+			hyper_dmabuf_id_t exist = {req->op[0],
+						   {req->op[1], req->op[2],
+						   req->op[3]}};
 
-			imported_sgt_info = hyper_dmabuf_find_imported(exist);
+			imported = hyper_dmabuf_find_imported(exist);
 
-			if (!imported_sgt_info) {
-				dev_err(hyper_dmabuf_private.device,
+			if (!imported) {
+				dev_err(hy_drv_priv->dev,
 					"Can't find imported sgt_info from IMPORT_LIST\n");
 				break;
 			}
 
 			/* if size of new private data is different,
 			 * we reallocate it. */
-			if (imported_sgt_info->sz_priv != req->operands[8]) {
-				kfree(imported_sgt_info->priv);
-				imported_sgt_info->sz_priv = req->operands[8];
-				imported_sgt_info->priv = kcalloc(1, req->operands[8], GFP_KERNEL);
-				if (!imported_sgt_info->priv) {
-					dev_err(hyper_dmabuf_private.device,
+			if (imported->sz_priv != req->op[8]) {
+				kfree(imported->priv);
+				imported->sz_priv = req->op[8];
+				imported->priv = kcalloc(1, req->op[8], GFP_KERNEL);
+				if (!imported->priv) {
+					dev_err(hy_drv_priv->dev,
 						"Fail to allocate priv\n");
 
 					/* set it invalid */
-					imported_sgt_info->valid = 0;
+					imported->valid = 0;
 					break;
 				}
 			}
 
 			/* updating priv data */
-			memcpy(imported_sgt_info->priv, &req->operands[9], req->operands[8]);
+			memcpy(imported->priv, &req->op[9], req->op[8]);
 
 #ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
 			/* generating import event */
-			hyper_dmabuf_import_event(imported_sgt_info->hid);
+			hyper_dmabuf_import_event(imported->hid);
 #endif
 
 			break;
 		}
 
-		imported_sgt_info = kcalloc(1, sizeof(*imported_sgt_info), GFP_KERNEL);
+		imported = kcalloc(1, sizeof(*imported), GFP_KERNEL);
 
-		if (!imported_sgt_info) {
-			dev_err(hyper_dmabuf_private.device,
+		if (!imported) {
+			dev_err(hy_drv_priv->dev,
 				"No memory left to be allocated\n");
 			break;
 		}
 
-		imported_sgt_info->sz_priv = req->operands[8];
-		imported_sgt_info->priv = kcalloc(1, req->operands[8], GFP_KERNEL);
+		imported->sz_priv = req->op[8];
+		imported->priv = kcalloc(1, req->op[8], GFP_KERNEL);
 
-		if (!imported_sgt_info->priv) {
-			dev_err(hyper_dmabuf_private.device,
+		if (!imported->priv) {
+			dev_err(hy_drv_priv->dev,
 				"Fail to allocate priv\n");
 
-			kfree(imported_sgt_info);
+			kfree(imported);
 			break;
 		}
 
-		imported_sgt_info->hid.id = req->operands[0];
+		imported->hid.id = req->op[0];
 
 		for (i=0; i<3; i++)
-			imported_sgt_info->hid.rng_key[i] = req->operands[i+1];
+			imported->hid.rng_key[i] = req->op[i+1];
 
-		imported_sgt_info->nents = req->operands[4];
-		imported_sgt_info->frst_ofst = req->operands[5];
-		imported_sgt_info->last_len = req->operands[6];
-		imported_sgt_info->ref_handle = req->operands[7];
+		imported->nents = req->op[4];
+		imported->frst_ofst = req->op[5];
+		imported->last_len = req->op[6];
+		imported->ref_handle = req->op[7];
 
-		dev_dbg(hyper_dmabuf_private.device, "DMABUF was exported\n");
-		dev_dbg(hyper_dmabuf_private.device, "\thid{id:%d key:%d %d %d}\n",
-			req->operands[0], req->operands[1], req->operands[2],
-			req->operands[3]);
-		dev_dbg(hyper_dmabuf_private.device, "\tnents %d\n", req->operands[4]);
-		dev_dbg(hyper_dmabuf_private.device, "\tfirst offset %d\n", req->operands[5]);
-		dev_dbg(hyper_dmabuf_private.device, "\tlast len %d\n", req->operands[6]);
-		dev_dbg(hyper_dmabuf_private.device, "\tgrefid %d\n", req->operands[7]);
+		dev_dbg(hy_drv_priv->dev, "DMABUF was exported\n");
+		dev_dbg(hy_drv_priv->dev, "\thid{id:%d key:%d %d %d}\n",
+			req->op[0], req->op[1], req->op[2],
+			req->op[3]);
+		dev_dbg(hy_drv_priv->dev, "\tnents %d\n", req->op[4]);
+		dev_dbg(hy_drv_priv->dev, "\tfirst offset %d\n", req->op[5]);
+		dev_dbg(hy_drv_priv->dev, "\tlast len %d\n", req->op[6]);
+		dev_dbg(hy_drv_priv->dev, "\tgrefid %d\n", req->op[7]);
 
-		memcpy(imported_sgt_info->priv, &req->operands[9], req->operands[8]);
+		memcpy(imported->priv, &req->op[9], req->op[8]);
 
-		imported_sgt_info->valid = 1;
-		hyper_dmabuf_register_imported(imported_sgt_info);
+		imported->valid = true;
+		hyper_dmabuf_register_imported(imported);
 
 #ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
 		/* generating import event */
-		hyper_dmabuf_import_event(imported_sgt_info->hid);
+		hyper_dmabuf_import_event(imported->hid);
 #endif
 
 		break;
@@ -251,142 +248,142 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
 {
 	struct cmd_process *proc;
 	struct hyper_dmabuf_req *temp_req;
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	struct hyper_dmabuf_sgt_info *exp_sgt_info;
+	struct imported_sgt_info *imported;
+	struct exported_sgt_info *exported;
 	hyper_dmabuf_id_t hid;
 	int ret;
 
 	if (!req) {
-		dev_err(hyper_dmabuf_private.device, "request is NULL\n");
+		dev_err(hy_drv_priv->dev, "request is NULL\n");
 		return -EINVAL;
 	}
 
-	hid.id = req->operands[0];
-	hid.rng_key[0] = req->operands[1];
-	hid.rng_key[1] = req->operands[2];
-	hid.rng_key[2] = req->operands[3];
+	hid.id = req->op[0];
+	hid.rng_key[0] = req->op[1];
+	hid.rng_key[1] = req->op[2];
+	hid.rng_key[2] = req->op[3];
 
-	if ((req->command < HYPER_DMABUF_EXPORT) ||
-		(req->command > HYPER_DMABUF_OPS_TO_SOURCE)) {
-		dev_err(hyper_dmabuf_private.device, "invalid command\n");
+	if ((req->cmd < HYPER_DMABUF_EXPORT) ||
+		(req->cmd > HYPER_DMABUF_OPS_TO_SOURCE)) {
+		dev_err(hy_drv_priv->dev, "invalid command\n");
 		return -EINVAL;
 	}
 
-	req->status = HYPER_DMABUF_REQ_PROCESSED;
+	req->stat = HYPER_DMABUF_REQ_PROCESSED;
 
 	/* HYPER_DMABUF_DESTROY requires immediate
 	 * follow up so can't be processed in workqueue
 	 */
-	if (req->command == HYPER_DMABUF_NOTIFY_UNEXPORT) {
+	if (req->cmd == HYPER_DMABUF_NOTIFY_UNEXPORT) {
 		/* destroy sg_list for hyper_dmabuf_id on remote side */
 		/* command : HYPER_DMABUF_NOTIFY_UNEXPORT,
-		 * operands0~3 : hyper_dmabuf_id
+		 * op0~3 : hyper_dmabuf_id
 		 */
-		dev_dbg(hyper_dmabuf_private.device,
+		dev_dbg(hy_drv_priv->dev,
 			"%s: processing HYPER_DMABUF_NOTIFY_UNEXPORT\n", __func__);
 
-		sgt_info = hyper_dmabuf_find_imported(hid);
+		imported = hyper_dmabuf_find_imported(hid);
 
-		if (sgt_info) {
+		if (imported) {
 			/* if anything is still using dma_buf */
-			if (sgt_info->num_importers) {
+			if (imported->importers) {
 				/*
 				 * Buffer is still in  use, just mark that it should
 				 * not be allowed to export its fd anymore.
 				 */
-				sgt_info->valid = 0;
+				imported->valid = false;
 			} else {
 				/* No one is using buffer, remove it from imported list */
 				hyper_dmabuf_remove_imported(hid);
-				kfree(sgt_info);
+				kfree(imported);
 			}
 		} else {
-			req->status = HYPER_DMABUF_REQ_ERROR;
+			req->stat = HYPER_DMABUF_REQ_ERROR;
 		}
 
-		return req->command;
+		return req->cmd;
 	}
 
 	/* dma buf remote synchronization */
-	if (req->command == HYPER_DMABUF_OPS_TO_SOURCE) {
+	if (req->cmd == HYPER_DMABUF_OPS_TO_SOURCE) {
 		/* notifying dmabuf map/unmap to exporter, map will make the driver to do shadow mapping
 		 * or unmapping for synchronization with original exporter (e.g. i915) */
 
 		/* command : DMABUF_OPS_TO_SOURCE.
-		 * operands0~3 : hyper_dmabuf_id
-		 * operands1 : enum hyper_dmabuf_ops {....}
+		 * op0~3 : hyper_dmabuf_id
+		 * op1 : enum hyper_dmabuf_ops {....}
 		 */
-		dev_dbg(hyper_dmabuf_private.device,
+		dev_dbg(hy_drv_priv->dev,
 			"%s: HYPER_DMABUF_OPS_TO_SOURCE\n", __func__);
 
-		ret = hyper_dmabuf_remote_sync(hid, req->operands[4]);
+		ret = hyper_dmabuf_remote_sync(hid, req->op[4]);
 
 		if (ret)
-			req->status = HYPER_DMABUF_REQ_ERROR;
+			req->stat = HYPER_DMABUF_REQ_ERROR;
 		else
-			req->status = HYPER_DMABUF_REQ_PROCESSED;
+			req->stat = HYPER_DMABUF_REQ_PROCESSED;
 
-		return req->command;
+		return req->cmd;
 	}
 
 	/* synchronous dma_buf_fd export */
-	if (req->command == HYPER_DMABUF_EXPORT_FD) {
+	if (req->cmd == HYPER_DMABUF_EXPORT_FD) {
 		/* find a corresponding SGT for the id */
-		dev_dbg(hyper_dmabuf_private.device,
+		dev_dbg(hy_drv_priv->dev,
 			"Processing HYPER_DMABUF_EXPORT_FD for buffer {id:%d key:%d %d %d}\n",
 			hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
 
-		exp_sgt_info = hyper_dmabuf_find_exported(hid);
+		exported = hyper_dmabuf_find_exported(hid);
 
-		if (!exp_sgt_info) {
-			dev_err(hyper_dmabuf_private.device,
+		if (!exported) {
+			dev_err(hy_drv_priv->dev,
 				"critical err: requested sgt_info can't be found for buffer {id:%d key:%d %d %d}\n",
 				hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
 
-			req->status = HYPER_DMABUF_REQ_ERROR;
-		} else if (!exp_sgt_info->valid) {
-			dev_dbg(hyper_dmabuf_private.device,
+			req->stat = HYPER_DMABUF_REQ_ERROR;
+		} else if (!exported->valid) {
+			dev_dbg(hy_drv_priv->dev,
 				"Buffer no longer valid - cannot export fd for buffer {id:%d key:%d %d %d}\n",
 				hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
 
-			req->status = HYPER_DMABUF_REQ_ERROR;
+			req->stat = HYPER_DMABUF_REQ_ERROR;
 		} else {
-			dev_dbg(hyper_dmabuf_private.device,
+			dev_dbg(hy_drv_priv->dev,
 				"Buffer still valid - can export fd for buffer {id:%d key:%d %d %d}\n",
 				hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
 
-			exp_sgt_info->importer_exported++;
-			req->status = HYPER_DMABUF_REQ_PROCESSED;
+			exported->active++;
+			req->stat = HYPER_DMABUF_REQ_PROCESSED;
 		}
-		return req->command;
+		return req->cmd;
 	}
 
-	if (req->command == HYPER_DMABUF_EXPORT_FD_FAILED) {
-		dev_dbg(hyper_dmabuf_private.device,
+	if (req->cmd == HYPER_DMABUF_EXPORT_FD_FAILED) {
+		dev_dbg(hy_drv_priv->dev,
 			"Processing HYPER_DMABUF_EXPORT_FD_FAILED for buffer {id:%d key:%d %d %d}\n",
 			hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
 
-		exp_sgt_info = hyper_dmabuf_find_exported(hid);
+		exported = hyper_dmabuf_find_exported(hid);
 
-		if (!exp_sgt_info) {
-			dev_err(hyper_dmabuf_private.device,
+		if (!exported) {
+			dev_err(hy_drv_priv->dev,
 				"critical err: requested sgt_info can't be found for buffer {id:%d key:%d %d %d}\n",
 				hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
 
-			req->status = HYPER_DMABUF_REQ_ERROR;
+			req->stat = HYPER_DMABUF_REQ_ERROR;
 		} else {
-			exp_sgt_info->importer_exported--;
-			req->status = HYPER_DMABUF_REQ_PROCESSED;
+			exported->active--;
+			req->stat = HYPER_DMABUF_REQ_PROCESSED;
 		}
-		return req->command;
+		return req->cmd;
 	}
 
-	dev_dbg(hyper_dmabuf_private.device,
+	dev_dbg(hy_drv_priv->dev,
 		"%s: putting request to workqueue\n", __func__);
 	temp_req = kmalloc(sizeof(*temp_req), GFP_KERNEL);
 
 	if (!temp_req) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		return -ENOMEM;
 	}
@@ -396,7 +393,7 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
 	proc = kcalloc(1, sizeof(struct cmd_process), GFP_KERNEL);
 
 	if (!proc) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		kfree(temp_req);
 		return -ENOMEM;
@@ -407,7 +404,7 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
 
 	INIT_WORK(&(proc->work), cmd_process_work);
 
-	queue_work(hyper_dmabuf_private.work_queue, &(proc->work));
+	queue_work(hy_drv_priv->work_queue, &(proc->work));
 
-	return req->command;
+	return req->cmd;
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
index 0f6e795..7c694ec 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
@@ -28,17 +28,17 @@
 #define MAX_NUMBER_OF_OPERANDS 64
 
 struct hyper_dmabuf_req {
-	unsigned int request_id;
-	unsigned int status;
-	unsigned int command;
-	unsigned int operands[MAX_NUMBER_OF_OPERANDS];
+	unsigned int req_id;
+	unsigned int stat;
+	unsigned int cmd;
+	unsigned int op[MAX_NUMBER_OF_OPERANDS];
 };
 
 struct hyper_dmabuf_resp {
-	unsigned int response_id;
-	unsigned int status;
-	unsigned int command;
-	unsigned int operands[MAX_NUMBER_OF_OPERANDS];
+	unsigned int resp_id;
+	unsigned int stat;
+	unsigned int cmd;
+	unsigned int op[MAX_NUMBER_OF_OPERANDS];
 };
 
 enum hyper_dmabuf_command {
@@ -75,7 +75,7 @@ enum hyper_dmabuf_req_feedback {
 };
 
 /* create a request packet with given command and operands */
-void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
+void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
 				 enum hyper_dmabuf_command command,
 				 int *operands);
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
index 9313c42..7e73170 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
@@ -32,8 +32,6 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/dma-buf.h>
-#include <xen/grant_table.h>
-#include <asm/xen/page.h>
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_struct.h"
 #include "hyper_dmabuf_ops.h"
@@ -45,122 +43,111 @@
 #define WAIT_AFTER_SYNC_REQ 0
 #define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
-inline int hyper_dmabuf_sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops)
+static int hyper_dmabuf_sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops)
 {
 	struct hyper_dmabuf_req *req;
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
-	int operands[5];
+	struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops;
+	int op[5];
 	int i;
 	int ret;
 
-	operands[0] = hid.id;
+	op[0] = hid.id;
 
 	for (i=0; i<3; i++)
-		operands[i+1] = hid.rng_key[i];
+		op[i+1] = hid.rng_key[i];
 
-	operands[4] = dmabuf_ops;
+	op[4] = dmabuf_ops;
 
 	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
 
 	if (!req) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		return -ENOMEM;
 	}
 
-	hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, &operands[0]);
+	hyper_dmabuf_create_req(req, HYPER_DMABUF_OPS_TO_SOURCE, &op[0]);
 
 	/* send request and wait for a response */
 	ret = ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, WAIT_AFTER_SYNC_REQ);
 
+	if (ret < 0) {
+		dev_dbg(hy_drv_priv->dev,
+			"dmabuf sync request failed:%d\n", req->op[4]);
+	}
+
 	kfree(req);
 
 	return ret;
 }
 
-static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, struct device* dev,
-			struct dma_buf_attachment *attach)
+static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf,
+				   struct device* dev,
+				   struct dma_buf_attachment *attach)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!attach->dmabuf->priv)
 		return -EINVAL;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv;
+	imported = (struct imported_sgt_info *)attach->dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_ATTACH);
 
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-		return ret;
-	}
-
-	return 0;
+	return ret;
 }
 
-static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, struct dma_buf_attachment *attach)
+static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf,
+				    struct dma_buf_attachment *attach)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!attach->dmabuf->priv)
 		return;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv;
+	imported = (struct imported_sgt_info *)attach->dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_DETACH);
-
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 }
 
 static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachment,
-						enum dma_data_direction dir)
+					     enum dma_data_direction dir)
 {
 	struct sg_table *st;
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	struct hyper_dmabuf_pages_info *page_info;
+	struct imported_sgt_info *imported;
+	struct pages_info *pg_info;
 	int ret;
 
 	if (!attachment->dmabuf->priv)
 		return NULL;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attachment->dmabuf->priv;
+	imported = (struct imported_sgt_info *)attachment->dmabuf->priv;
 
 	/* extract pages from sgt */
-	page_info = hyper_dmabuf_ext_pgs(sgt_info->sgt);
+	pg_info = hyper_dmabuf_ext_pgs(imported->sgt);
 
-	if (!page_info) {
+	if (!pg_info) {
 		return NULL;
 	}
 
 	/* create a new sg_table with extracted pages */
-	st = hyper_dmabuf_create_sgt(page_info->pages, page_info->frst_ofst,
-				page_info->last_len, page_info->nents);
+	st = hyper_dmabuf_create_sgt(pg_info->pgs, pg_info->frst_ofst,
+				     pg_info->last_len, pg_info->nents);
 	if (!st)
 		goto err_free_sg;
 
         if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir))
                 goto err_free_sg;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_MAP);
 
-	kfree(page_info->pages);
-	kfree(page_info);
-
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
+	kfree(pg_info->pgs);
+	kfree(pg_info);
 
 	return st;
 
@@ -170,8 +157,8 @@ static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachme
 		kfree(st);
 	}
 
-	kfree(page_info->pages);
-	kfree(page_info);
+	kfree(pg_info->pgs);
+	kfree(pg_info);
 
 	return NULL;
 }
@@ -180,294 +167,251 @@ static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment,
 				   struct sg_table *sg,
 				   enum dma_data_direction dir)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!attachment->dmabuf->priv)
 		return;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attachment->dmabuf->priv;
+	imported = (struct imported_sgt_info *)attachment->dmabuf->priv;
 
 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
 
 	sg_free_table(sg);
 	kfree(sg);
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_UNMAP);
-
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 }
 
 static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+	struct imported_sgt_info *imported;
+	struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops;
 	int ret;
-	int final_release;
+	int finish;
 
 	if (!dma_buf->priv)
 		return;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dma_buf->priv;
+	imported = (struct imported_sgt_info *)dma_buf->priv;
 
-	if (!dmabuf_refcount(sgt_info->dma_buf)) {
-		sgt_info->dma_buf = NULL;
+	if (!dmabuf_refcount(imported->dma_buf)) {
+		imported->dma_buf = NULL;
 	}
 
-	sgt_info->num_importers--;
+	imported->importers--;
 
-	if (sgt_info->num_importers == 0) {
-		ops->unmap_shared_pages(&sgt_info->refs_info, sgt_info->nents);
+	if (imported->importers == 0) {
+		ops->unmap_shared_pages(&imported->refs_info, imported->nents);
 
-		if (sgt_info->sgt) {
-			sg_free_table(sgt_info->sgt);
-			kfree(sgt_info->sgt);
-			sgt_info->sgt = NULL;
+		if (imported->sgt) {
+			sg_free_table(imported->sgt);
+			kfree(imported->sgt);
+			imported->sgt = NULL;
 		}
 	}
 
-	final_release = sgt_info && !sgt_info->valid &&
-		        !sgt_info->num_importers;
+	finish = imported && !imported->valid &&
+		 !imported->importers;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_RELEASE);
-	if (ret < 0) {
-		dev_warn(hyper_dmabuf_private.device,
-			 "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 
 	/*
 	 * Check if buffer is still valid and if not remove it from imported list.
 	 * That has to be done after sending sync request
 	 */
-	if (final_release) {
-		hyper_dmabuf_remove_imported(sgt_info->hid);
-		kfree(sgt_info);
+	if (finish) {
+		hyper_dmabuf_remove_imported(imported->hid);
+		kfree(imported);
 	}
 }
 
 static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!dmabuf->priv)
 		return -EINVAL;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+	imported = (struct imported_sgt_info *)dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 
 	return ret;
 }
 
 static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!dmabuf->priv)
 		return -EINVAL;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+	imported = (struct imported_sgt_info *)dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_END_CPU_ACCESS);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 
 	return 0;
 }
 
 static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!dmabuf->priv)
 		return NULL;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+	imported = (struct imported_sgt_info *)dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_KMAP_ATOMIC);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 
 	return NULL; /* for now NULL.. need to return the address of mapped region */
 }
 
 static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum, void *vaddr)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!dmabuf->priv)
 		return;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+	imported = (struct imported_sgt_info *)dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 }
 
 static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!dmabuf->priv)
 		return NULL;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+	imported = (struct imported_sgt_info *)dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_KMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 
-	return NULL; /* for now NULL.. need to return the address of mapped region */
+	/* for now NULL.. need to return the address of mapped region */
+	return NULL;
 }
 
-static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum, void *vaddr)
+static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum,
+				    void *vaddr)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!dmabuf->priv)
 		return;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+	imported = (struct imported_sgt_info *)dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_KUNMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 }
 
 static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!dmabuf->priv)
 		return -EINVAL;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+	imported = (struct imported_sgt_info *)dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_MMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 
 	return ret;
 }
 
 static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!dmabuf->priv)
 		return NULL;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+	imported = (struct imported_sgt_info *)dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_VMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 
 	return NULL;
 }
 
 static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr)
 {
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct imported_sgt_info *imported;
 	int ret;
 
 	if (!dmabuf->priv)
 		return;
 
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+	imported = (struct imported_sgt_info *)dmabuf->priv;
 
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+	ret = hyper_dmabuf_sync_request(imported->hid,
 					HYPER_DMABUF_OPS_VUNMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
 }
 
 static const struct dma_buf_ops hyper_dmabuf_ops = {
-		.attach = hyper_dmabuf_ops_attach,
-		.detach = hyper_dmabuf_ops_detach,
-		.map_dma_buf = hyper_dmabuf_ops_map,
-		.unmap_dma_buf = hyper_dmabuf_ops_unmap,
-		.release = hyper_dmabuf_ops_release,
-		.begin_cpu_access = (void*)hyper_dmabuf_ops_begin_cpu_access,
-		.end_cpu_access = (void*)hyper_dmabuf_ops_end_cpu_access,
-		.map_atomic = hyper_dmabuf_ops_kmap_atomic,
-		.unmap_atomic = hyper_dmabuf_ops_kunmap_atomic,
-		.map = hyper_dmabuf_ops_kmap,
-		.unmap = hyper_dmabuf_ops_kunmap,
-		.mmap = hyper_dmabuf_ops_mmap,
-		.vmap = hyper_dmabuf_ops_vmap,
-		.vunmap = hyper_dmabuf_ops_vunmap,
+	.attach = hyper_dmabuf_ops_attach,
+	.detach = hyper_dmabuf_ops_detach,
+	.map_dma_buf = hyper_dmabuf_ops_map,
+	.unmap_dma_buf = hyper_dmabuf_ops_unmap,
+	.release = hyper_dmabuf_ops_release,
+	.begin_cpu_access = (void*)hyper_dmabuf_ops_begin_cpu_access,
+	.end_cpu_access = (void*)hyper_dmabuf_ops_end_cpu_access,
+	.map_atomic = hyper_dmabuf_ops_kmap_atomic,
+	.unmap_atomic = hyper_dmabuf_ops_kunmap_atomic,
+	.map = hyper_dmabuf_ops_kmap,
+	.unmap = hyper_dmabuf_ops_kunmap,
+	.mmap = hyper_dmabuf_ops_mmap,
+	.vmap = hyper_dmabuf_ops_vmap,
+	.vunmap = hyper_dmabuf_ops_vunmap,
 };
 
 /* exporting dmabuf as fd */
-int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int flags)
+int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags)
 {
 	int fd = -1;
 
 	/* call hyper_dmabuf_export_dmabuf and create
 	 * and bind a handle for it then release
 	 */
-	hyper_dmabuf_export_dma_buf(dinfo);
+	hyper_dmabuf_export_dma_buf(imported);
 
-	if (dinfo->dma_buf) {
-		fd = dma_buf_fd(dinfo->dma_buf, flags);
+	if (imported->dma_buf) {
+		fd = dma_buf_fd(imported->dma_buf, flags);
 	}
 
 	return fd;
 }
 
-void hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo)
+void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported)
 {
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 
 	exp_info.ops = &hyper_dmabuf_ops;
 
 	/* multiple of PAGE_SIZE, not considering offset */
-	exp_info.size = dinfo->sgt->nents * PAGE_SIZE;
-	exp_info.flags = /* not sure about flag */0;
-	exp_info.priv = dinfo;
+	exp_info.size = imported->sgt->nents * PAGE_SIZE;
+	exp_info.flags = /* not sure about flag */ 0;
+	exp_info.priv = imported;
 
-	dinfo->dma_buf = dma_buf_export(&exp_info);
+	imported->dma_buf = dma_buf_export(&exp_info);
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
index 8c06fc6..c5505a4 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
@@ -25,8 +25,8 @@
 #ifndef __HYPER_DMABUF_OPS_H__
 #define __HYPER_DMABUF_OPS_H__
 
-int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int flags);
+int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags);
 
-void hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo);
+void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported);
 
 #endif /* __HYPER_DMABUF_IMP_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c
index 39c9dee..36e888c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c
@@ -32,16 +32,12 @@
 #include "hyper_dmabuf_struct.h"
 #include "hyper_dmabuf_id.h"
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 #define HYPER_DMABUF_SIZE(nents, first_offset, last_len) \
 	((nents)*PAGE_SIZE - (first_offset) - PAGE_SIZE + (last_len))
 
-int hyper_dmabuf_query_exported(struct hyper_dmabuf_sgt_info *sgt_info,
+int hyper_dmabuf_query_exported(struct exported_sgt_info *exported,
 				int query, unsigned long* info)
 {
-	int n;
-
 	switch (query)
 	{
 		case HYPER_DMABUF_QUERY_TYPE:
@@ -50,45 +46,46 @@ int hyper_dmabuf_query_exported(struct hyper_dmabuf_sgt_info *sgt_info,
 
 		/* exporting domain of this specific dmabuf*/
 		case HYPER_DMABUF_QUERY_EXPORTER:
-			*info = HYPER_DMABUF_DOM_ID(sgt_info->hid);
+			*info = HYPER_DMABUF_DOM_ID(exported->hid);
 			break;
 
 		/* importing domain of this specific dmabuf */
 		case HYPER_DMABUF_QUERY_IMPORTER:
-			*info = sgt_info->hyper_dmabuf_rdomain;
+			*info = exported->rdomid;
 			break;
 
 		/* size of dmabuf in byte */
 		case HYPER_DMABUF_QUERY_SIZE:
-			*info = sgt_info->dma_buf->size;
+			*info = exported->dma_buf->size;
 			break;
 
 		/* whether the buffer is used by importer */
 		case HYPER_DMABUF_QUERY_BUSY:
-			*info = (sgt_info->importer_exported == 0) ? false : true;
+			*info = (exported->active > 0);
 			break;
 
 		/* whether the buffer is unexported */
 		case HYPER_DMABUF_QUERY_UNEXPORTED:
-			*info = !sgt_info->valid;
+			*info = !exported->valid;
 			break;
 
 		/* whether the buffer is scheduled to be unexported */
 		case HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED:
-			*info = !sgt_info->unexport_scheduled;
+			*info = !exported->unexport_sched;
 			break;
 
 		/* size of private info attached to buffer */
 		case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
-			*info = sgt_info->sz_priv;
+			*info = exported->sz_priv;
 			break;
 
 		/* copy private info attached to buffer */
 		case HYPER_DMABUF_QUERY_PRIV_INFO:
-			if (sgt_info->sz_priv > 0) {
+			if (exported->sz_priv > 0) {
+				int n;
 				n = copy_to_user((void __user*) *info,
-						sgt_info->priv,
-						sgt_info->sz_priv);
+						exported->priv,
+						exported->sz_priv);
 				if (n != 0)
 					return -EINVAL;
 			}
@@ -102,11 +99,9 @@ int hyper_dmabuf_query_exported(struct hyper_dmabuf_sgt_info *sgt_info,
 }
 
 
-int hyper_dmabuf_query_imported(struct hyper_dmabuf_imported_sgt_info *imported_sgt_info,
+int hyper_dmabuf_query_imported(struct imported_sgt_info *imported,
 				int query, unsigned long *info)
 {
-	int n;
-
 	switch (query)
 	{
 		case HYPER_DMABUF_QUERY_TYPE:
@@ -115,50 +110,51 @@ int hyper_dmabuf_query_imported(struct hyper_dmabuf_imported_sgt_info *imported_
 
 		/* exporting domain of this specific dmabuf*/
 		case HYPER_DMABUF_QUERY_EXPORTER:
-			*info = HYPER_DMABUF_DOM_ID(imported_sgt_info->hid);
+			*info = HYPER_DMABUF_DOM_ID(imported->hid);
 			break;
 
 		/* importing domain of this specific dmabuf */
 		case HYPER_DMABUF_QUERY_IMPORTER:
-			*info = hyper_dmabuf_private.domid;
+			*info = hy_drv_priv->domid;
 			break;
 
 		/* size of dmabuf in byte */
 		case HYPER_DMABUF_QUERY_SIZE:
-			if (imported_sgt_info->dma_buf) {
+			if (imported->dma_buf) {
 				/* if local dma_buf is created (if it's ever mapped),
 				 * retrieve it directly from struct dma_buf *
 				 */
-				*info = imported_sgt_info->dma_buf->size;
+				*info = imported->dma_buf->size;
 			} else {
 				/* calcuate it from given nents, frst_ofst and last_len */
-				*info = HYPER_DMABUF_SIZE(imported_sgt_info->nents,
-							  imported_sgt_info->frst_ofst,
-							  imported_sgt_info->last_len);
+				*info = HYPER_DMABUF_SIZE(imported->nents,
+							  imported->frst_ofst,
+							  imported->last_len);
 			}
 			break;
 
 		/* whether the buffer is used or not */
 		case HYPER_DMABUF_QUERY_BUSY:
 			/* checks if it's used by importer */
-			*info = (imported_sgt_info->num_importers > 0) ? true : false;
+			*info = (imported->importers > 0);
 			break;
 
 		/* whether the buffer is unexported */
 		case HYPER_DMABUF_QUERY_UNEXPORTED:
-			*info = !imported_sgt_info->valid;
+			*info = !imported->valid;
 			break;
 		/* size of private info attached to buffer */
 		case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
-			*info = imported_sgt_info->sz_priv;
+			*info = imported->sz_priv;
 			break;
 
 		/* copy private info attached to buffer */
 		case HYPER_DMABUF_QUERY_PRIV_INFO:
-			if (imported_sgt_info->sz_priv > 0) {
+			if (imported->sz_priv > 0) {
+				int n;
 				n = copy_to_user((void __user*) *info,
-						imported_sgt_info->priv,
-						imported_sgt_info->sz_priv);
+						imported->priv,
+						imported->sz_priv);
 				if (n != 0)
 					return -EINVAL;
 			}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h
index 7bbb322..65ae738 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h
@@ -1,10 +1,10 @@
 #ifndef __HYPER_DMABUF_QUERY_H__
 #define __HYPER_DMABUF_QUERY_H__
 
-int hyper_dmabuf_query_imported(struct hyper_dmabuf_imported_sgt_info *imported_sgt_info,
+int hyper_dmabuf_query_imported(struct imported_sgt_info *imported,
 				int query, unsigned long *info);
 
-int hyper_dmabuf_query_exported(struct hyper_dmabuf_sgt_info *sgt_info,
+int hyper_dmabuf_query_exported(struct exported_sgt_info *exported,
 				int query, unsigned long *info);
 
 #endif // __HYPER_DMABUF_QUERY_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
index 9004406..01ec98c 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
@@ -39,8 +39,6 @@
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_sgl_proc.h"
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 /* Whenever importer does dma operations from remote domain,
  * a notification is sent to the exporter so that exporter
  * issues equivalent dma operation on the original dma buf
@@ -58,7 +56,7 @@ extern struct hyper_dmabuf_private hyper_dmabuf_private;
  */
 int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
 {
-	struct hyper_dmabuf_sgt_info *sgt_info;
+	struct exported_sgt_info *exported;
 	struct sgt_list *sgtl;
 	struct attachment_list *attachl;
 	struct kmap_vaddr_list *va_kmapl;
@@ -66,10 +64,10 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
 	int ret;
 
 	/* find a coresponding SGT for the id */
-	sgt_info = hyper_dmabuf_find_exported(hid);
+	exported = hyper_dmabuf_find_exported(hid);
 
-	if (!sgt_info) {
-		dev_err(hyper_dmabuf_private.device,
+	if (!exported) {
+		dev_err(hy_drv_priv->dev,
 			"dmabuf remote sync::can't find exported list\n");
 		return -ENOENT;
 	}
@@ -79,84 +77,84 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
 		attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL);
 
 		if (!attachl) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_ATTACH\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_ATTACH\n");
 			return -ENOMEM;
 		}
 
-		attachl->attach = dma_buf_attach(sgt_info->dma_buf,
-						 hyper_dmabuf_private.device);
+		attachl->attach = dma_buf_attach(exported->dma_buf,
+						 hy_drv_priv->dev);
 
 		if (!attachl->attach) {
 			kfree(attachl);
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_ATTACH\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_ATTACH\n");
 			return -ENOMEM;
 		}
 
-		list_add(&attachl->list, &sgt_info->active_attached->list);
+		list_add(&attachl->list, &exported->active_attached->list);
 		break;
 
 	case HYPER_DMABUF_OPS_DETACH:
-		if (list_empty(&sgt_info->active_attached->list)) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_DETACH\n");
-			dev_err(hyper_dmabuf_private.device,
+		if (list_empty(&exported->active_attached->list)) {
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_DETACH\n");
+			dev_err(hy_drv_priv->dev,
 				"no more dmabuf attachment left to be detached\n");
 			return -EFAULT;
 		}
 
-		attachl = list_first_entry(&sgt_info->active_attached->list,
+		attachl = list_first_entry(&exported->active_attached->list,
 					   struct attachment_list, list);
 
-		dma_buf_detach(sgt_info->dma_buf, attachl->attach);
+		dma_buf_detach(exported->dma_buf, attachl->attach);
 		list_del(&attachl->list);
 		kfree(attachl);
 		break;
 
 	case HYPER_DMABUF_OPS_MAP:
-		if (list_empty(&sgt_info->active_attached->list)) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n");
-			dev_err(hyper_dmabuf_private.device,
+		if (list_empty(&exported->active_attached->list)) {
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_MAP\n");
+			dev_err(hy_drv_priv->dev,
 				"no more dmabuf attachment left to be mapped\n");
 			return -EFAULT;
 		}
 
-		attachl = list_first_entry(&sgt_info->active_attached->list,
+		attachl = list_first_entry(&exported->active_attached->list,
 					   struct attachment_list, list);
 
 		sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL);
 
 		if (!sgtl) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_MAP\n");
 			return -ENOMEM;
 		}
 
 		sgtl->sgt = dma_buf_map_attachment(attachl->attach, DMA_BIDIRECTIONAL);
 		if (!sgtl->sgt) {
 			kfree(sgtl);
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_MAP\n");
 			return -ENOMEM;
 		}
-		list_add(&sgtl->list, &sgt_info->active_sgts->list);
+		list_add(&sgtl->list, &exported->active_sgts->list);
 		break;
 
 	case HYPER_DMABUF_OPS_UNMAP:
-		if (list_empty(&sgt_info->active_sgts->list) ||
-		    list_empty(&sgt_info->active_attached->list)) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_UNMAP\n");
-			dev_err(hyper_dmabuf_private.device,
+		if (list_empty(&exported->active_sgts->list) ||
+		    list_empty(&exported->active_attached->list)) {
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_UNMAP\n");
+			dev_err(hy_drv_priv->dev,
 				"no more SGT or attachment left to be unmapped\n");
 			return -EFAULT;
 		}
 
-		attachl = list_first_entry(&sgt_info->active_attached->list,
+		attachl = list_first_entry(&exported->active_attached->list,
 					   struct attachment_list, list);
-		sgtl = list_first_entry(&sgt_info->active_sgts->list,
+		sgtl = list_first_entry(&exported->active_sgts->list,
 					struct sgt_list, list);
 
 		dma_buf_unmap_attachment(attachl->attach, sgtl->sgt,
@@ -166,30 +164,30 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
 		break;
 
 	case HYPER_DMABUF_OPS_RELEASE:
-		dev_dbg(hyper_dmabuf_private.device,
+		dev_dbg(hy_drv_priv->dev,
 			"Buffer {id:%d key:%d %d %d} released, references left: %d\n",
-			 sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-			 sgt_info->hid.rng_key[2], sgt_info->importer_exported -1);
+			 exported->hid.id, exported->hid.rng_key[0], exported->hid.rng_key[1],
+			 exported->hid.rng_key[2], exported->active - 1);
 
-                sgt_info->importer_exported--;
+                exported->active--;
 		/* If there are still importers just break, if no then continue with final cleanup */
-		if (sgt_info->importer_exported)
+		if (exported->active)
 			break;
 
 		/*
 		 * Importer just released buffer fd, check if there is any other importer still using it.
 		 * If not and buffer was unexported, clean up shared data and remove that buffer.
 		 */
-		dev_dbg(hyper_dmabuf_private.device,
+		dev_dbg(hy_drv_priv->dev,
 			"Buffer {id:%d key:%d %d %d} final released\n",
-			sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
-			sgt_info->hid.rng_key[2]);
+			exported->hid.id, exported->hid.rng_key[0], exported->hid.rng_key[1],
+			exported->hid.rng_key[2]);
 
-		if (!sgt_info->valid && !sgt_info->importer_exported &&
-		    !sgt_info->unexport_scheduled) {
-			hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
+		if (!exported->valid && !exported->active &&
+		    !exported->unexport_sched) {
+			hyper_dmabuf_cleanup_sgt_info(exported, false);
 			hyper_dmabuf_remove_exported(hid);
-			kfree(sgt_info);
+			kfree(exported);
 			/* store hyper_dmabuf_id in the list for reuse */
 			store_reusable_hid(hid);
 		}
@@ -197,19 +195,19 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
 		break;
 
 	case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS:
-		ret = dma_buf_begin_cpu_access(sgt_info->dma_buf, DMA_BIDIRECTIONAL);
+		ret = dma_buf_begin_cpu_access(exported->dma_buf, DMA_BIDIRECTIONAL);
 		if (ret) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n");
 			return ret;
 		}
 		break;
 
 	case HYPER_DMABUF_OPS_END_CPU_ACCESS:
-		ret = dma_buf_end_cpu_access(sgt_info->dma_buf, DMA_BIDIRECTIONAL);
+		ret = dma_buf_end_cpu_access(exported->dma_buf, DMA_BIDIRECTIONAL);
 		if (ret) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_END_CPU_ACCESS\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_END_CPU_ACCESS\n");
 			return ret;
 		}
 		break;
@@ -218,49 +216,49 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
 	case HYPER_DMABUF_OPS_KMAP:
 		va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL);
 		if (!va_kmapl) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
 			return -ENOMEM;
 		}
 
 		/* dummy kmapping of 1 page */
 		if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC)
-			va_kmapl->vaddr = dma_buf_kmap_atomic(sgt_info->dma_buf, 1);
+			va_kmapl->vaddr = dma_buf_kmap_atomic(exported->dma_buf, 1);
 		else
-			va_kmapl->vaddr = dma_buf_kmap(sgt_info->dma_buf, 1);
+			va_kmapl->vaddr = dma_buf_kmap(exported->dma_buf, 1);
 
 		if (!va_kmapl->vaddr) {
 			kfree(va_kmapl);
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
 			return -ENOMEM;
 		}
-		list_add(&va_kmapl->list, &sgt_info->va_kmapped->list);
+		list_add(&va_kmapl->list, &exported->va_kmapped->list);
 		break;
 
 	case HYPER_DMABUF_OPS_KUNMAP_ATOMIC:
 	case HYPER_DMABUF_OPS_KUNMAP:
-		if (list_empty(&sgt_info->va_kmapped->list)) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
-			dev_err(hyper_dmabuf_private.device,
+		if (list_empty(&exported->va_kmapped->list)) {
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
+			dev_err(hy_drv_priv->dev,
 				"no more dmabuf VA to be freed\n");
 			return -EFAULT;
 		}
 
-		va_kmapl = list_first_entry(&sgt_info->va_kmapped->list,
+		va_kmapl = list_first_entry(&exported->va_kmapped->list,
 					    struct kmap_vaddr_list, list);
 		if (!va_kmapl->vaddr) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
 			return PTR_ERR(va_kmapl->vaddr);
 		}
 
 		/* unmapping 1 page */
 		if (ops == HYPER_DMABUF_OPS_KUNMAP_ATOMIC)
-			dma_buf_kunmap_atomic(sgt_info->dma_buf, 1, va_kmapl->vaddr);
+			dma_buf_kunmap_atomic(exported->dma_buf, 1, va_kmapl->vaddr);
 		else
-			dma_buf_kunmap(sgt_info->dma_buf, 1, va_kmapl->vaddr);
+			dma_buf_kunmap(exported->dma_buf, 1, va_kmapl->vaddr);
 
 		list_del(&va_kmapl->list);
 		kfree(va_kmapl);
@@ -269,48 +267,48 @@ int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
 	case HYPER_DMABUF_OPS_MMAP:
 		/* currently not supported: looking for a way to create
 		 * a dummy vma */
-		dev_warn(hyper_dmabuf_private.device,
-			 "dmabuf remote sync::sychronized mmap is not supported\n");
+		dev_warn(hy_drv_priv->dev,
+			 "remote sync::sychronized mmap is not supported\n");
 		break;
 
 	case HYPER_DMABUF_OPS_VMAP:
 		va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL);
 
 		if (!va_vmapl) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VMAP\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_VMAP\n");
 			return -ENOMEM;
 		}
 
 		/* dummy vmapping */
-		va_vmapl->vaddr = dma_buf_vmap(sgt_info->dma_buf);
+		va_vmapl->vaddr = dma_buf_vmap(exported->dma_buf);
 
 		if (!va_vmapl->vaddr) {
 			kfree(va_vmapl);
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VMAP\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_VMAP\n");
 			return -ENOMEM;
 		}
-		list_add(&va_vmapl->list, &sgt_info->va_vmapped->list);
+		list_add(&va_vmapl->list, &exported->va_vmapped->list);
 		break;
 
 	case HYPER_DMABUF_OPS_VUNMAP:
-		if (list_empty(&sgt_info->va_vmapped->list)) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VUNMAP\n");
-			dev_err(hyper_dmabuf_private.device,
+		if (list_empty(&exported->va_vmapped->list)) {
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_VUNMAP\n");
+			dev_err(hy_drv_priv->dev,
 				"no more dmabuf VA to be freed\n");
 			return -EFAULT;
 		}
-		va_vmapl = list_first_entry(&sgt_info->va_vmapped->list,
+		va_vmapl = list_first_entry(&exported->va_vmapped->list,
 					struct vmap_vaddr_list, list);
 		if (!va_vmapl || va_vmapl->vaddr == NULL) {
-			dev_err(hyper_dmabuf_private.device,
-				"dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VUNMAP\n");
+			dev_err(hy_drv_priv->dev,
+				"remote sync::HYPER_DMABUF_OPS_VUNMAP\n");
 			return -EFAULT;
 		}
 
-		dma_buf_vunmap(sgt_info->dma_buf, va_vmapl->vaddr);
+		dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr);
 
 		list_del(&va_vmapl->list);
 		kfree(va_vmapl);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
index 691a714..315c354 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
@@ -32,8 +32,6 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/dma-buf.h>
-#include <xen/grant_table.h>
-#include <asm/xen/page.h>
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_struct.h"
 #include "hyper_dmabuf_sgl_proc.h"
@@ -41,8 +39,6 @@
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_list.h"
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 #define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
 
 int dmabuf_refcount(struct dma_buf *dma_buf)
@@ -66,60 +62,68 @@ static int hyper_dmabuf_get_num_pgs(struct sg_table *sgt)
 	sgl = sgt->sgl;
 
 	length = sgl->length - PAGE_SIZE + sgl->offset;
-	num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE); /* round-up */
+
+	/* round-up */
+	num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE);
 
 	for (i = 1; i < sgt->nents; i++) {
 		sgl = sg_next(sgl);
-		num_pages += ((sgl->length + PAGE_SIZE - 1) / PAGE_SIZE); /* round-up */
+
+		/* round-up */
+		num_pages += ((sgl->length + PAGE_SIZE - 1) /
+			     PAGE_SIZE); /* round-up */
 	}
 
 	return num_pages;
 }
 
 /* extract pages directly from struct sg_table */
-struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt)
+struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt)
 {
-	struct hyper_dmabuf_pages_info *pinfo;
+	struct pages_info *pg_info;
 	int i, j, k;
 	int length;
 	struct scatterlist *sgl;
 
-	pinfo = kmalloc(sizeof(*pinfo), GFP_KERNEL);
-	if (!pinfo)
+	pg_info = kmalloc(sizeof(*pg_info), GFP_KERNEL);
+	if (!pg_info)
 		return NULL;
 
-	pinfo->pages = kmalloc(sizeof(struct page *)*hyper_dmabuf_get_num_pgs(sgt), GFP_KERNEL);
-	if (!pinfo->pages) {
-		kfree(pinfo);
+	pg_info->pgs = kmalloc(sizeof(struct page *) *
+			       hyper_dmabuf_get_num_pgs(sgt),
+			       GFP_KERNEL);
+
+	if (!pg_info->pgs) {
+		kfree(pg_info);
 		return NULL;
 	}
 
 	sgl = sgt->sgl;
 
-	pinfo->nents = 1;
-	pinfo->frst_ofst = sgl->offset;
-	pinfo->pages[0] = sg_page(sgl);
+	pg_info->nents = 1;
+	pg_info->frst_ofst = sgl->offset;
+	pg_info->pgs[0] = sg_page(sgl);
 	length = sgl->length - PAGE_SIZE + sgl->offset;
 	i = 1;
 
 	while (length > 0) {
-		pinfo->pages[i] = nth_page(sg_page(sgl), i);
+		pg_info->pgs[i] = nth_page(sg_page(sgl), i);
 		length -= PAGE_SIZE;
-		pinfo->nents++;
+		pg_info->nents++;
 		i++;
 	}
 
 	for (j = 1; j < sgt->nents; j++) {
 		sgl = sg_next(sgl);
-		pinfo->pages[i++] = sg_page(sgl);
+		pg_info->pgs[i++] = sg_page(sgl);
 		length = sgl->length - PAGE_SIZE;
-		pinfo->nents++;
+		pg_info->nents++;
 		k = 1;
 
 		while (length > 0) {
-			pinfo->pages[i++] = nth_page(sg_page(sgl), k++);
+			pg_info->pgs[i++] = nth_page(sg_page(sgl), k++);
 			length -= PAGE_SIZE;
-			pinfo->nents++;
+			pg_info->nents++;
 		}
 	}
 
@@ -127,13 +131,13 @@ struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt)
 	 * lenght at that point will be 0 or negative,
 	 * so to calculate last page size just add it to PAGE_SIZE
 	 */
-	pinfo->last_len = PAGE_SIZE + length;
+	pg_info->last_len = PAGE_SIZE + length;
 
-	return pinfo;
+	return pg_info;
 }
 
 /* create sg_table with given pages and other parameters */
-struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
+struct sg_table* hyper_dmabuf_create_sgt(struct page **pgs,
 					 int frst_ofst, int last_len, int nents)
 {
 	struct sg_table *sgt;
@@ -157,31 +161,32 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
 
 	sgl = sgt->sgl;
 
-	sg_set_page(sgl, pages[0], PAGE_SIZE-frst_ofst, frst_ofst);
+	sg_set_page(sgl, pgs[0], PAGE_SIZE-frst_ofst, frst_ofst);
 
 	for (i=1; i<nents-1; i++) {
 		sgl = sg_next(sgl);
-		sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
+		sg_set_page(sgl, pgs[i], PAGE_SIZE, 0);
 	}
 
 	if (nents > 1) /* more than one page */ {
 		sgl = sg_next(sgl);
-		sg_set_page(sgl, pages[i], last_len, 0);
+		sg_set_page(sgl, pgs[i], last_len, 0);
 	}
 
 	return sgt;
 }
 
-int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int force)
+int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported,
+				  int force)
 {
 	struct sgt_list *sgtl;
 	struct attachment_list *attachl;
 	struct kmap_vaddr_list *va_kmapl;
 	struct vmap_vaddr_list *va_vmapl;
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+	struct hyper_dmabuf_backend_ops *ops = hy_drv_priv->backend_ops;
 
-	if (!sgt_info) {
-		dev_err(hyper_dmabuf_private.device, "invalid hyper_dmabuf_id\n");
+	if (!exported) {
+		dev_err(hy_drv_priv->dev, "invalid hyper_dmabuf_id\n");
 		return -EINVAL;
 	}
 
@@ -190,35 +195,37 @@ int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int fo
 	 * side.
 	 */
 	if (!force &&
-	    sgt_info->importer_exported) {
-		dev_warn(hyper_dmabuf_private.device, "dma-buf is used by importer\n");
+	    exported->active) {
+		dev_warn(hy_drv_priv->dev,
+			 "dma-buf is used by importer\n");
+
 		return -EPERM;
 	}
 
 	/* force == 1 is not recommended */
-	while (!list_empty(&sgt_info->va_kmapped->list)) {
-		va_kmapl = list_first_entry(&sgt_info->va_kmapped->list,
+	while (!list_empty(&exported->va_kmapped->list)) {
+		va_kmapl = list_first_entry(&exported->va_kmapped->list,
 					    struct kmap_vaddr_list, list);
 
-		dma_buf_kunmap(sgt_info->dma_buf, 1, va_kmapl->vaddr);
+		dma_buf_kunmap(exported->dma_buf, 1, va_kmapl->vaddr);
 		list_del(&va_kmapl->list);
 		kfree(va_kmapl);
 	}
 
-	while (!list_empty(&sgt_info->va_vmapped->list)) {
-		va_vmapl = list_first_entry(&sgt_info->va_vmapped->list,
+	while (!list_empty(&exported->va_vmapped->list)) {
+		va_vmapl = list_first_entry(&exported->va_vmapped->list,
 					    struct vmap_vaddr_list, list);
 
-		dma_buf_vunmap(sgt_info->dma_buf, va_vmapl->vaddr);
+		dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr);
 		list_del(&va_vmapl->list);
 		kfree(va_vmapl);
 	}
 
-	while (!list_empty(&sgt_info->active_sgts->list)) {
-		attachl = list_first_entry(&sgt_info->active_attached->list,
+	while (!list_empty(&exported->active_sgts->list)) {
+		attachl = list_first_entry(&exported->active_attached->list,
 					   struct attachment_list, list);
 
-		sgtl = list_first_entry(&sgt_info->active_sgts->list,
+		sgtl = list_first_entry(&exported->active_sgts->list,
 					struct sgt_list, list);
 
 		dma_buf_unmap_attachment(attachl->attach, sgtl->sgt,
@@ -227,35 +234,35 @@ int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int fo
 		kfree(sgtl);
 	}
 
-	while (!list_empty(&sgt_info->active_sgts->list)) {
-		attachl = list_first_entry(&sgt_info->active_attached->list,
+	while (!list_empty(&exported->active_sgts->list)) {
+		attachl = list_first_entry(&exported->active_attached->list,
 					   struct attachment_list, list);
 
-		dma_buf_detach(sgt_info->dma_buf, attachl->attach);
+		dma_buf_detach(exported->dma_buf, attachl->attach);
 		list_del(&attachl->list);
 		kfree(attachl);
 	}
 
 	/* Start cleanup of buffer in reverse order to exporting */
-	ops->unshare_pages(&sgt_info->refs_info, sgt_info->nents);
+	ops->unshare_pages(&exported->refs_info, exported->nents);
 
 	/* unmap dma-buf */
-	dma_buf_unmap_attachment(sgt_info->active_attached->attach,
-				 sgt_info->active_sgts->sgt,
+	dma_buf_unmap_attachment(exported->active_attached->attach,
+				 exported->active_sgts->sgt,
 				 DMA_BIDIRECTIONAL);
 
 	/* detatch dma-buf */
-	dma_buf_detach(sgt_info->dma_buf, sgt_info->active_attached->attach);
+	dma_buf_detach(exported->dma_buf, exported->active_attached->attach);
 
 	/* close connection to dma-buf completely */
-	dma_buf_put(sgt_info->dma_buf);
-	sgt_info->dma_buf = NULL;
-
-	kfree(sgt_info->active_sgts);
-	kfree(sgt_info->active_attached);
-	kfree(sgt_info->va_kmapped);
-	kfree(sgt_info->va_vmapped);
-	kfree(sgt_info->priv);
+	dma_buf_put(exported->dma_buf);
+	exported->dma_buf = NULL;
+
+	kfree(exported->active_sgts);
+	kfree(exported->active_attached);
+	kfree(exported->va_kmapped);
+	kfree(exported->va_vmapped);
+	kfree(exported->priv);
 
 	return 0;
 }
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
index 237ccf5..930bade 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
@@ -28,13 +28,15 @@
 int dmabuf_refcount(struct dma_buf *dma_buf);
 
 /* extract pages directly from struct sg_table */
-struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt);
+struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt);
 
 /* create sg_table with given pages and other parameters */
-struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
-                                int frst_ofst, int last_len, int nents);
+struct sg_table* hyper_dmabuf_create_sgt(struct page **pgs,
+					 int frst_ofst, int last_len,
+					 int nents);
 
-int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int force);
+int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported,
+				  int force);
 
 void hyper_dmabuf_free_sgt(struct sg_table *sgt);
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
index 6f929f2..8a612d1 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
@@ -50,24 +50,20 @@ struct vmap_vaddr_list {
 };
 
 /* Exporter builds pages_info before sharing pages */
-struct hyper_dmabuf_pages_info {
+struct pages_info {
         int frst_ofst; /* offset of data in the first page */
         int last_len; /* length of data in the last page */
         int nents; /* # of pages */
-        struct page **pages; /* pages that contains reference numbers of shared pages*/
+        struct page **pgs; /* pages that contains reference numbers of shared pages*/
 };
 
 
-/* Both importer and exporter use this structure to point to sg lists
- *
- * Exporter stores references to sgt in a hash table
+/* Exporter stores references to sgt in a hash table
  * Exporter keeps these references for synchronization and tracking purposes
- *
- * Importer use this structure exporting to other drivers in the same domain
  */
-struct hyper_dmabuf_sgt_info {
+struct exported_sgt_info {
         hyper_dmabuf_id_t hid; /* unique id to reference dmabuf in remote domain */
-	int hyper_dmabuf_rdomain; /* domain importing this sgt */
+	int rdomid; /* domain importing this sgt */
 
 	struct dma_buf *dma_buf; /* needed to store this for freeing it later */
 	int nents;
@@ -79,10 +75,10 @@ struct hyper_dmabuf_sgt_info {
 	struct vmap_vaddr_list *va_vmapped;
 
 	bool valid; /* set to 0 once unexported. Needed to prevent further mapping by importer */
-	int importer_exported; /* exported locally on importer's side */
+	int active; /* locally shared on importer's side */
 	void *refs_info; /* hypervisor-specific info for the references */
-	struct delayed_work unexport_work;
-	bool unexport_scheduled;
+	struct delayed_work unexport;
+	bool unexport_sched;
 
 	/* owner of buffer
 	 * TODO: that is naiive as buffer may be reused by
@@ -99,7 +95,7 @@ struct hyper_dmabuf_sgt_info {
 /* Importer store references (before mapping) on shared pages
  * Importer store these references in the table and map it in
  * its own memory map once userspace asks for reference for the buffer */
-struct hyper_dmabuf_imported_sgt_info {
+struct imported_sgt_info {
 	hyper_dmabuf_id_t hid; /* unique id for shared dmabuf imported */
 
 	int ref_handle; /* reference number of top level addressing page of shared pages */
@@ -112,7 +108,7 @@ struct hyper_dmabuf_imported_sgt_info {
 
 	void *refs_info;
 	bool valid;
-	int num_importers;
+	int importers;
 
 	size_t sz_priv;
 	char *priv; /* device specific info (e.g. image's meta info?) */
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
index 920ecf4..f70b4ea 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
@@ -45,8 +45,6 @@ static int export_req_id = 0;
 
 struct hyper_dmabuf_req req_pending = {0};
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 extern int xenstored_ready;
 
 static void xen_get_domid_delayed(struct work_struct *unused);
@@ -62,7 +60,9 @@ static int xen_comm_setup_data_dir(void)
 {
 	char buf[255];
 
-	sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", hyper_dmabuf_private.domid);
+	sprintf(buf, "/local/domain/%d/data/hyper_dmabuf",
+		hy_drv_priv->domid);
+
 	return xenbus_mkdir(XBT_NIL, buf, "");
 }
 
@@ -76,7 +76,9 @@ static int xen_comm_destroy_data_dir(void)
 {
 	char buf[255];
 
-	sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", hyper_dmabuf_private.domid);
+	sprintf(buf, "/local/domain/%d/data/hyper_dmabuf",
+		hy_drv_priv->domid);
+
 	return xenbus_rm(XBT_NIL, buf, "");
 }
 
@@ -91,20 +93,26 @@ static int xen_comm_expose_ring_details(int domid, int rdomid,
 	char buf[255];
 	int ret;
 
-	sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", domid, rdomid);
+	sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d",
+		domid, rdomid);
+
 	ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", gref);
 
 	if (ret) {
-		dev_err(hyper_dmabuf_private.device,
-			"Failed to write xenbus entry %s: %d\n", buf, ret);
+		dev_err(hy_drv_priv->dev,
+			"Failed to write xenbus entry %s: %d\n",
+			buf, ret);
+
 		return ret;
 	}
 
 	ret = xenbus_printf(XBT_NIL, buf, "port", "%d", port);
 
 	if (ret) {
-		dev_err(hyper_dmabuf_private.device,
-			"Failed to write xenbus entry %s: %d\n", buf, ret);
+		dev_err(hy_drv_priv->dev,
+			"Failed to write xenbus entry %s: %d\n",
+			buf, ret);
+
 		return ret;
 	}
 
@@ -114,25 +122,32 @@ static int xen_comm_expose_ring_details(int domid, int rdomid,
 /*
  * Queries details of ring exposed by remote domain.
  */
-static int xen_comm_get_ring_details(int domid, int rdomid, int *grefid, int *port)
+static int xen_comm_get_ring_details(int domid, int rdomid,
+				     int *grefid, int *port)
 {
 	char buf[255];
 	int ret;
 
-	sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", rdomid, domid);
+	sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d",
+		rdomid, domid);
+
 	ret = xenbus_scanf(XBT_NIL, buf, "grefid", "%d", grefid);
 
 	if (ret <= 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"Failed to read xenbus entry %s: %d\n", buf, ret);
+		dev_err(hy_drv_priv->dev,
+			"Failed to read xenbus entry %s: %d\n",
+			buf, ret);
+
 		return ret;
 	}
 
 	ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", port);
 
 	if (ret <= 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"Failed to read xenbus entry %s: %d\n", buf, ret);
+		dev_err(hy_drv_priv->dev,
+			"Failed to read xenbus entry %s: %d\n",
+			buf, ret);
+
 		return ret;
 	}
 
@@ -146,9 +161,8 @@ void xen_get_domid_delayed(struct work_struct *unused)
 
 	/* scheduling another if driver is still running
 	 * and xenstore has not been initialized */
-	if (hyper_dmabuf_private.exited == false &&
-	    likely(xenstored_ready == 0)) {
-		dev_dbg(hyper_dmabuf_private.device,
+	if (likely(xenstored_ready == 0)) {
+		dev_dbg(hy_drv_priv->dev,
 			"Xenstore is not quite ready yet. Will retry it in 500ms\n");
 		schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500));
 	} else {
@@ -163,14 +177,14 @@ void xen_get_domid_delayed(struct work_struct *unused)
 
 		/* try again since -1 is an invalid id for domain
 		 * (but only if driver is still running) */
-		if (hyper_dmabuf_private.exited == false && unlikely(domid == -1)) {
-			dev_dbg(hyper_dmabuf_private.device,
+		if (unlikely(domid == -1)) {
+			dev_dbg(hy_drv_priv->dev,
 				"domid==-1 is invalid. Will retry it in 500ms\n");
 			schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500));
 		} else {
-			dev_info(hyper_dmabuf_private.device,
+			dev_info(hy_drv_priv->dev,
 				"Successfully retrieved domid from Xenstore:%d\n", domid);
-			hyper_dmabuf_private.domid = domid;
+			hy_drv_priv->domid = domid;
 		}
 	}
 }
@@ -232,28 +246,30 @@ static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch,
 		return;
 	}
 
-	/* Check if we have importer ring for given remote domain alrady created */
+	/* Check if we have importer ring for given remote domain already
+	 * created */
+
 	ring_info = xen_comm_find_rx_ring(rdom);
 
-	/* Try to query remote domain exporter ring details - if that will
-	 * fail and we have importer ring that means remote domains has cleanup
-	 * its exporter ring, so our importer ring is no longer useful.
+	/* Try to query remote domain exporter ring details - if
+	 * that will fail and we have importer ring that means remote
+	 * domains has cleanup its exporter ring, so our importer ring
+	 * is no longer useful.
 	 *
 	 * If querying details will succeed and we don't have importer ring,
-	 * it means that remote domain has setup it for us and we should connect
-	 * to it.
+	 * it means that remote domain has setup it for us and we should
+	 * connect to it.
 	 */
 
-
-	ret = xen_comm_get_ring_details(hyper_dmabuf_xen_get_domid(), rdom,
-					&grefid, &port);
+	ret = xen_comm_get_ring_details(hyper_dmabuf_xen_get_domid(),
+					rdom, &grefid, &port);
 
 	if (ring_info && ret != 0) {
-		dev_info(hyper_dmabuf_private.device,
+		dev_info(hy_drv_priv->dev,
 			 "Remote exporter closed, cleaninup importer\n");
 		hyper_dmabuf_xen_cleanup_rx_rbuf(rdom);
 	} else if (!ring_info && ret == 0) {
-		dev_info(hyper_dmabuf_private.device,
+		dev_info(hy_drv_priv->dev,
 			 "Registering importer\n");
 		hyper_dmabuf_xen_init_rx_rbuf(rdom);
 	}
@@ -274,7 +290,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 	ring_info = xen_comm_find_tx_ring(domid);
 
 	if (ring_info) {
-		dev_info(hyper_dmabuf_private.device,
+		dev_info(hy_drv_priv->dev,
 			 "tx ring ch to domid = %d already exist\ngref = %d, port = %d\n",
 		ring_info->rdomain, ring_info->gref_ring, ring_info->port);
 		return 0;
@@ -283,7 +299,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 	ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
 
 	if (!ring_info) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No more spae left\n");
 		return -ENOMEM;
 	}
@@ -313,9 +329,9 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 	alloc_unbound.dom = DOMID_SELF;
 	alloc_unbound.remote_dom = domid;
 	ret = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
-					&alloc_unbound);
+					  &alloc_unbound);
 	if (ret) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"Cannot allocate event channel\n");
 		kfree(ring_info);
 		return -EIO;
@@ -327,7 +343,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 					NULL, (void*) ring_info);
 
 	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"Failed to setup event channel\n");
 		close.port = alloc_unbound.port;
 		HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
@@ -343,7 +359,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 
 	mutex_init(&ring_info->lock);
 
-	dev_dbg(hyper_dmabuf_private.device,
+	dev_dbg(hy_drv_priv->dev,
 		"%s: allocated eventchannel gref %d  port: %d  irq: %d\n",
 		__func__,
 		ring_info->gref_ring,
@@ -364,7 +380,7 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
 	ring_info->watch.node = (const char*) kmalloc(sizeof(char) * 255, GFP_KERNEL);
 
 	if (!ring_info->watch.node) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No more space left\n");
 		kfree(ring_info);
 		return -ENOMEM;
@@ -414,7 +430,8 @@ void hyper_dmabuf_xen_cleanup_tx_rbuf(int domid)
 	if (!rx_ring_info)
 		return;
 
-	BACK_RING_INIT(&(rx_ring_info->ring_back), rx_ring_info->ring_back.sring, PAGE_SIZE);
+	BACK_RING_INIT(&(rx_ring_info->ring_back), rx_ring_info->ring_back.sring,
+		       PAGE_SIZE);
 }
 
 /* importer needs to know about shared page and port numbers for
@@ -436,25 +453,28 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 	ring_info = xen_comm_find_rx_ring(domid);
 
 	if (ring_info) {
-		dev_info(hyper_dmabuf_private.device,
-			 "rx ring ch from domid = %d already exist\n", ring_info->sdomain);
+		dev_info(hy_drv_priv->dev,
+			 "rx ring ch from domid = %d already exist\n",
+			 ring_info->sdomain);
+
 		return 0;
 	}
 
-
 	ret = xen_comm_get_ring_details(hyper_dmabuf_xen_get_domid(), domid,
 					&rx_gref, &rx_port);
 
 	if (ret) {
-		dev_err(hyper_dmabuf_private.device,
-			"Domain %d has not created exporter ring for current domain\n", domid);
+		dev_err(hy_drv_priv->dev,
+			"Domain %d has not created exporter ring for current domain\n",
+			domid);
+
 		return ret;
 	}
 
 	ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
 
 	if (!ring_info) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		return -ENOMEM;
 	}
@@ -465,7 +485,7 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 	map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL);
 
 	if (!map_ops) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		ret = -ENOMEM;
 		goto fail_no_map_ops;
@@ -476,21 +496,23 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 		goto fail_others;
 	}
 
-	gnttab_set_map_op(&map_ops[0], (unsigned long)pfn_to_kaddr(page_to_pfn(shared_ring)),
+	gnttab_set_map_op(&map_ops[0],
+			  (unsigned long)pfn_to_kaddr(page_to_pfn(shared_ring)),
 			  GNTMAP_host_map, rx_gref, domid);
 
-	gnttab_set_unmap_op(&ring_info->unmap_op, (unsigned long)pfn_to_kaddr(page_to_pfn(shared_ring)),
+	gnttab_set_unmap_op(&ring_info->unmap_op,
+			    (unsigned long)pfn_to_kaddr(page_to_pfn(shared_ring)),
 			    GNTMAP_host_map, -1);
 
 	ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1);
 	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device, "Cannot map ring\n");
+		dev_err(hy_drv_priv->dev, "Cannot map ring\n");
 		ret = -EFAULT;
 		goto fail_others;
 	}
 
 	if (map_ops[0].status) {
-		dev_err(hyper_dmabuf_private.device, "Ring mapping failed\n");
+		dev_err(hy_drv_priv->dev, "Ring mapping failed\n");
 		ret = -EFAULT;
 		goto fail_others;
 	} else {
@@ -512,7 +534,7 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
 
 	ring_info->irq = ret;
 
-	dev_dbg(hyper_dmabuf_private.device,
+	dev_dbg(hy_drv_priv->dev,
 		"%s: bound to eventchannel port: %d  irq: %d\n", __func__,
 		rx_port,
 		ring_info->irq);
@@ -569,7 +591,9 @@ void hyper_dmabuf_xen_cleanup_rx_rbuf(int domid)
 		return;
 
 	SHARED_RING_INIT(tx_ring_info->ring_front.sring);
-	FRONT_RING_INIT(&(tx_ring_info->ring_front), tx_ring_info->ring_front.sring, PAGE_SIZE);
+	FRONT_RING_INIT(&(tx_ring_info->ring_front),
+			tx_ring_info->ring_front.sring,
+			PAGE_SIZE);
 }
 
 #ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
@@ -587,20 +611,20 @@ static void xen_rx_ch_add_delayed(struct work_struct *unused)
 	char buf[128];
 	int i, dummy;
 
-	dev_dbg(hyper_dmabuf_private.device,
+	dev_dbg(hy_drv_priv->dev,
 		"Scanning new tx channel comming from another domain\n");
 
 	/* check other domains and schedule another work if driver
 	 * is still running and backend is valid
 	 */
-	if (hyper_dmabuf_private.exited == false &&
-	    hyper_dmabuf_private.backend_initialized == true) {
+	if (hy_drv_priv &&
+	    hy_drv_priv->initialized) {
 		for (i = DOMID_SCAN_START; i < DOMID_SCAN_END + 1; i++) {
-			if (i == hyper_dmabuf_private.domid)
+			if (i == hy_drv_priv->domid)
 				continue;
 
-			sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", i,
-				hyper_dmabuf_private.domid);
+			sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d",
+				i, hy_drv_priv->domid);
 
 			ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", &dummy);
 
@@ -611,13 +635,14 @@ static void xen_rx_ch_add_delayed(struct work_struct *unused)
 				ret = hyper_dmabuf_xen_init_rx_rbuf(i);
 
 				if (!ret)
-					dev_info(hyper_dmabuf_private.device,
+					dev_info(hy_drv_priv->dev,
 						 "Finishing up setting up rx channel for domain %d\n", i);
 			}
 		}
 
 		/* check every 10 seconds */
-		schedule_delayed_work(&xen_rx_ch_auto_add_work, msecs_to_jiffies(10000));
+		schedule_delayed_work(&xen_rx_ch_auto_add_work,
+				      msecs_to_jiffies(10000));
 	}
 }
 
@@ -630,21 +655,21 @@ void xen_init_comm_env_delayed(struct work_struct *unused)
 	/* scheduling another work if driver is still running
 	 * and xenstore hasn't been initialized or dom_id hasn't
 	 * been correctly retrieved. */
-	if (hyper_dmabuf_private.exited == false &&
-	    likely(xenstored_ready == 0 ||
-	    hyper_dmabuf_private.domid == -1)) {
-		dev_dbg(hyper_dmabuf_private.device,
-			"Xenstore is not ready yet. Re-try this again in 500ms\n");
-		schedule_delayed_work(&xen_init_comm_env_work, msecs_to_jiffies(500));
+	if (likely(xenstored_ready == 0 ||
+	    hy_drv_priv->domid == -1)) {
+		dev_dbg(hy_drv_priv->dev,
+			"Xenstore not ready Will re-try in 500ms\n");
+		schedule_delayed_work(&xen_init_comm_env_work,
+				      msecs_to_jiffies(500));
 	} else {
 		ret = xen_comm_setup_data_dir();
 		if (ret < 0) {
-			dev_err(hyper_dmabuf_private.device,
+			dev_err(hy_drv_priv->dev,
 				"Failed to create data dir in Xenstore\n");
 		} else {
-			dev_info(hyper_dmabuf_private.device,
-				"Successfully finished comm env initialization\n");
-			hyper_dmabuf_private.backend_initialized = true;
+			dev_info(hy_drv_priv->dev,
+				"Successfully finished comm env init\n");
+			hy_drv_priv->initialized = true;
 
 #ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
 			xen_rx_ch_add_delayed(NULL);
@@ -659,20 +684,21 @@ int hyper_dmabuf_xen_init_comm_env(void)
 
 	xen_comm_ring_table_init();
 
-	if (unlikely(xenstored_ready == 0 || hyper_dmabuf_private.domid == -1)) {
+	if (unlikely(xenstored_ready == 0 ||
+	    hy_drv_priv->domid == -1)) {
 		xen_init_comm_env_delayed(NULL);
 		return -1;
 	}
 
 	ret = xen_comm_setup_data_dir();
 	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"Failed to create data dir in Xenstore\n");
 	} else {
-		dev_info(hyper_dmabuf_private.device,
+		dev_info(hy_drv_priv->dev,
 			"Successfully finished comm env initialization\n");
 
-		hyper_dmabuf_private.backend_initialized = true;
+		hy_drv_priv->initialized = true;
 	}
 
 	return ret;
@@ -691,7 +717,8 @@ void hyper_dmabuf_xen_destroy_comm(void)
 	xen_comm_destroy_data_dir();
 }
 
-int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
+int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req,
+			      int wait)
 {
 	struct xen_comm_front_ring *ring;
 	struct hyper_dmabuf_req *new_req;
@@ -706,22 +733,21 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
 	/* find a ring info for the channel */
 	ring_info = xen_comm_find_tx_ring(domid);
 	if (!ring_info) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"Can't find ring info for the channel\n");
 		return -ENOENT;
 	}
 
-	mutex_lock(&ring_info->lock);
 
 	ring = &ring_info->ring_front;
 
 	do_gettimeofday(&tv_start);
 
 	while (RING_FULL(ring)) {
-		dev_dbg(hyper_dmabuf_private.device, "RING_FULL\n");
+		dev_dbg(hy_drv_priv->dev, "RING_FULL\n");
 
 		if (timeout == 0) {
-			dev_err(hyper_dmabuf_private.device,
+			dev_err(hy_drv_priv->dev,
 				"Timeout while waiting for an entry in the ring\n");
 			return -EIO;
 		}
@@ -731,15 +757,17 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
 
 	timeout = 1000;
 
+	mutex_lock(&ring_info->lock);
+
 	new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt);
 	if (!new_req) {
 		mutex_unlock(&ring_info->lock);
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"NULL REQUEST\n");
 		return -EIO;
 	}
 
-	req->request_id = xen_comm_next_req_id();
+	req->req_id = xen_comm_next_req_id();
 
 	/* update req_pending with current request */
 	memcpy(&req_pending, req, sizeof(req_pending));
@@ -756,7 +784,7 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
 
 	if (wait) {
 		while (timeout--) {
-			if (req_pending.status !=
+			if (req_pending.stat !=
 			    HYPER_DMABUF_REQ_NOT_RESPONDED)
 				break;
 			usleep_range(100, 120);
@@ -764,7 +792,7 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
 
 		if (timeout < 0) {
 			mutex_unlock(&ring_info->lock);
-			dev_err(hyper_dmabuf_private.device, "request timed-out\n");
+			dev_err(hy_drv_priv->dev, "request timed-out\n");
 			return -EBUSY;
 		}
 
@@ -781,10 +809,8 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
 		}
 
 		if (tv_diff.tv_sec != 0 && tv_diff.tv_usec > 16000)
-			dev_dbg(hyper_dmabuf_private.device, "send_req:time diff: %ld sec, %ld usec\n",
+			dev_dbg(hy_drv_priv->dev, "send_req:time diff: %ld sec, %ld usec\n",
 				tv_diff.tv_sec, tv_diff.tv_usec);
-
-		return req_pending.status;
 	}
 
 	mutex_unlock(&ring_info->lock);
@@ -808,7 +834,7 @@ static irqreturn_t back_ring_isr(int irq, void *info)
 	ring_info = (struct xen_comm_rx_ring_info *)info;
 	ring = &ring_info->ring_back;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s\n", __func__);
 
 	do {
 		rc = ring->req_cons;
@@ -828,13 +854,13 @@ static irqreturn_t back_ring_isr(int irq, void *info)
 				 * the requester
 				 */
 				memcpy(&resp, &req, sizeof(resp));
-				memcpy(RING_GET_RESPONSE(ring, ring->rsp_prod_pvt), &resp,
-							sizeof(resp));
+				memcpy(RING_GET_RESPONSE(ring, ring->rsp_prod_pvt),
+							 &resp, sizeof(resp));
 				ring->rsp_prod_pvt++;
 
-				dev_dbg(hyper_dmabuf_private.device,
+				dev_dbg(hy_drv_priv->dev,
 					"sending response to exporter for request id:%d\n",
-					resp.response_id);
+					resp.resp_id);
 
 				RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify);
 
@@ -864,7 +890,7 @@ static irqreturn_t front_ring_isr(int irq, void *info)
 	ring_info = (struct xen_comm_tx_ring_info *)info;
 	ring = &ring_info->ring_front;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s\n", __func__);
 
 	do {
 		more_to_do = 0;
@@ -876,33 +902,33 @@ static irqreturn_t front_ring_isr(int irq, void *info)
 			 * in the response
 			 */
 
-			dev_dbg(hyper_dmabuf_private.device,
+			dev_dbg(hy_drv_priv->dev,
 				"getting response from importer\n");
 
-			if (req_pending.request_id == resp->response_id) {
-				req_pending.status = resp->status;
+			if (req_pending.req_id == resp->resp_id) {
+				req_pending.stat = resp->stat;
 			}
 
-			if (resp->status == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) {
+			if (resp->stat == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) {
 				/* parsing response */
 				ret = hyper_dmabuf_msg_parse(ring_info->rdomain,
 							(struct hyper_dmabuf_req *)resp);
 
 				if (ret < 0) {
-					dev_err(hyper_dmabuf_private.device,
+					dev_err(hy_drv_priv->dev,
 						"getting error while parsing response\n");
 				}
-			} else if (resp->status == HYPER_DMABUF_REQ_PROCESSED) {
+			} else if (resp->stat == HYPER_DMABUF_REQ_PROCESSED) {
 				/* for debugging dma_buf remote synchronization */
-				dev_dbg(hyper_dmabuf_private.device,
-					"original request = 0x%x\n", resp->command);
-				dev_dbg(hyper_dmabuf_private.device,
+				dev_dbg(hy_drv_priv->dev,
+					"original request = 0x%x\n", resp->cmd);
+				dev_dbg(hy_drv_priv->dev,
 					"Just got HYPER_DMABUF_REQ_PROCESSED\n");
-			} else if (resp->status == HYPER_DMABUF_REQ_ERROR) {
+			} else if (resp->stat == HYPER_DMABUF_REQ_ERROR) {
 				/* for debugging dma_buf remote synchronization */
-				dev_dbg(hyper_dmabuf_private.device,
-					"original request = 0x%x\n", resp->command);
-				dev_dbg(hyper_dmabuf_private.device,
+				dev_dbg(hy_drv_priv->dev,
+					"original request = 0x%x\n", resp->cmd);
+				dev_dbg(hy_drv_priv->dev,
 					"Just got HYPER_DMABUF_REQ_ERROR\n");
 			}
 		}
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
index 4708b49..7a8ec73 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
@@ -38,8 +38,6 @@
 #include "hyper_dmabuf_xen_comm.h"
 #include "hyper_dmabuf_xen_comm_list.h"
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING);
 DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING);
 
@@ -56,7 +54,7 @@ int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info)
 	info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
 	if (!info_entry) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		return -ENOMEM;
 	}
@@ -76,7 +74,7 @@ int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info)
 	info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
 
 	if (!info_entry) {
-		dev_err(hyper_dmabuf_private.device,
+		dev_err(hy_drv_priv->dev,
 			"No memory left to be allocated\n");
 		return -ENOMEM;
 	}
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
index 908eda8..424417d 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -36,8 +36,6 @@
 
 #define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
 
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
 /*
  * Creates 2 level page directory structure for referencing shared pages.
  * Top level page is a single page that contains up to 1024 refids that
@@ -98,7 +96,7 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
 	sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
 
 	if (!sh_pages_info) {
-		dev_err(hyper_dmabuf_private.device, "No more space left\n");
+		dev_err(hy_drv_priv->dev, "No more space left\n");
 		return -ENOMEM;
 	}
 
@@ -107,10 +105,10 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
 	/* share data pages in readonly mode for security */
 	for (i=0; i<nents; i++) {
 		lvl2_table[i] = gnttab_grant_foreign_access(domid,
-							    pfn_to_mfn(page_to_pfn(pages[i])),
-							    true /* read-only from remote domain */);
+					pfn_to_mfn(page_to_pfn(pages[i])),
+					true /* read-only from remote domain */);
 		if (lvl2_table[i] == -ENOSPC) {
-			dev_err(hyper_dmabuf_private.device, "No more space left in grant table\n");
+			dev_err(hy_drv_priv->dev, "No more space left in grant table\n");
 
 			/* Unshare all already shared pages for lvl2 */
 			while(i--) {
@@ -124,10 +122,11 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
 	/* Share 2nd level addressing pages in readonly mode*/
 	for (i=0; i< n_lvl2_grefs; i++) {
 		lvl3_table[i] = gnttab_grant_foreign_access(domid,
-							    virt_to_mfn((unsigned long)lvl2_table+i*PAGE_SIZE ),
-							    true);
+					virt_to_mfn((unsigned long)lvl2_table+i*PAGE_SIZE ),
+					true);
+
 		if (lvl3_table[i] == -ENOSPC) {
-			dev_err(hyper_dmabuf_private.device, "No more space left in grant table\n");
+			dev_err(hy_drv_priv->dev, "No more space left in grant table\n");
 
 			/* Unshare all already shared pages for lvl3 */
 			while(i--) {
@@ -147,11 +146,11 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
 
 	/* Share lvl3_table in readonly mode*/
 	lvl3_gref = gnttab_grant_foreign_access(domid,
-						virt_to_mfn((unsigned long)lvl3_table),
-						true);
+			virt_to_mfn((unsigned long)lvl3_table),
+			true);
 
 	if (lvl3_gref == -ENOSPC) {
-		dev_err(hyper_dmabuf_private.device, "No more space left in grant table\n");
+		dev_err(hy_drv_priv->dev, "No more space left in grant table\n");
 
 		/* Unshare all pages for lvl3 */
 		while(i--) {
@@ -178,7 +177,7 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
 	/* Store exported pages refid to be unshared later */
 	sh_pages_info->lvl3_gref = lvl3_gref;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
 	return lvl3_gref;
 
 err_cleanup:
@@ -190,16 +189,17 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
 
 int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
 	struct xen_shared_pages_info *sh_pages_info;
-	int n_lvl2_grefs = (nents/REFS_PER_PAGE + ((nents % REFS_PER_PAGE) ? 1: 0));
+	int n_lvl2_grefs = (nents/REFS_PER_PAGE +
+			    ((nents % REFS_PER_PAGE) ? 1: 0));
 	int i;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
 	sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
 
 	if (sh_pages_info->lvl3_table == NULL ||
 	    sh_pages_info->lvl2_table ==  NULL ||
 	    sh_pages_info->lvl3_gref == -1) {
-		dev_warn(hyper_dmabuf_private.device,
+		dev_warn(hy_drv_priv->dev,
 			 "gref table for hyper_dmabuf already cleaned up\n");
 		return 0;
 	}
@@ -207,7 +207,7 @@ int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
 	/* End foreign access for data pages, but do not free them */
 	for (i = 0; i < nents; i++) {
 		if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i])) {
-			dev_warn(hyper_dmabuf_private.device, "refid not shared !!\n");
+			dev_warn(hy_drv_priv->dev, "refid not shared !!\n");
 		}
 		gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0);
 		gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]);
@@ -216,17 +216,17 @@ int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
 	/* End foreign access for 2nd level addressing pages */
 	for (i = 0; i < n_lvl2_grefs; i++) {
 		if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i])) {
-			dev_warn(hyper_dmabuf_private.device, "refid not shared !!\n");
+			dev_warn(hy_drv_priv->dev, "refid not shared !!\n");
 		}
 		if (!gnttab_end_foreign_access_ref(sh_pages_info->lvl3_table[i], 1)) {
-			dev_warn(hyper_dmabuf_private.device, "refid still in use!!!\n");
+			dev_warn(hy_drv_priv->dev, "refid still in use!!!\n");
 		}
 		gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]);
 	}
 
 	/* End foreign access for top level addressing page */
 	if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref)) {
-		dev_warn(hyper_dmabuf_private.device, "gref not shared !!\n");
+		dev_warn(hy_drv_priv->dev, "gref not shared !!\n");
 	}
 
 	gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1);
@@ -242,7 +242,7 @@ int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
 	kfree(sh_pages_info);
 	sh_pages_info = NULL;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
 	return 0;
 }
 
@@ -270,27 +270,33 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 
 	/* # of grefs in the last page of lvl2 table */
 	int nents_last = (nents - 1) % REFS_PER_PAGE + 1;
-	int n_lvl2_grefs = (nents / REFS_PER_PAGE) + ((nents_last > 0) ? 1 : 0) -
+	int n_lvl2_grefs = (nents / REFS_PER_PAGE) +
+			   ((nents_last > 0) ? 1 : 0) -
 			   (nents_last == REFS_PER_PAGE);
 	int i, j, k;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
 
 	sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
 	*refs_info = (void *) sh_pages_info;
 
-	lvl2_table_pages = kcalloc(sizeof(struct page*), n_lvl2_grefs, GFP_KERNEL);
+	lvl2_table_pages = kcalloc(sizeof(struct page*), n_lvl2_grefs,
+				   GFP_KERNEL);
+
 	data_pages = kcalloc(sizeof(struct page*), nents, GFP_KERNEL);
 
-	lvl2_map_ops = kcalloc(sizeof(*lvl2_map_ops), n_lvl2_grefs, GFP_KERNEL);
-	lvl2_unmap_ops = kcalloc(sizeof(*lvl2_unmap_ops), n_lvl2_grefs, GFP_KERNEL);
+	lvl2_map_ops = kcalloc(sizeof(*lvl2_map_ops), n_lvl2_grefs,
+			       GFP_KERNEL);
+
+	lvl2_unmap_ops = kcalloc(sizeof(*lvl2_unmap_ops), n_lvl2_grefs,
+				 GFP_KERNEL);
 
 	data_map_ops = kcalloc(sizeof(*data_map_ops), nents, GFP_KERNEL);
 	data_unmap_ops = kcalloc(sizeof(*data_unmap_ops), nents, GFP_KERNEL);
 
 	/* Map top level addressing page */
 	if (gnttab_alloc_pages(1, &lvl3_table_page)) {
-		dev_err(hyper_dmabuf_private.device, "Cannot allocate pages\n");
+		dev_err(hy_drv_priv->dev, "Cannot allocate pages\n");
 		return NULL;
 	}
 
@@ -304,13 +310,16 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 			    GNTMAP_host_map | GNTMAP_readonly, -1);
 
 	if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) {
-		dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed");
+		dev_err(hy_drv_priv->dev,
+			"HYPERVISOR map grant ref failed");
 		return NULL;
 	}
 
 	if (lvl3_map_ops.status) {
-		dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed status = %d",
+		dev_err(hy_drv_priv->dev,
+			"HYPERVISOR map grant ref failed status = %d",
 			lvl3_map_ops.status);
+
 		goto error_cleanup_lvl3;
 	} else {
 		lvl3_unmap_ops.handle = lvl3_map_ops.handle;
@@ -318,35 +327,43 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 
 	/* Map all second level pages */
 	if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) {
-		dev_err(hyper_dmabuf_private.device, "Cannot allocate pages\n");
+		dev_err(hy_drv_priv->dev, "Cannot allocate pages\n");
 		goto error_cleanup_lvl3;
 	}
 
 	for (i = 0; i < n_lvl2_grefs; i++) {
 		lvl2_table = (grant_ref_t *)pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i]));
-		gnttab_set_map_op(&lvl2_map_ops[i], (unsigned long)lvl2_table, GNTMAP_host_map | GNTMAP_readonly,
+		gnttab_set_map_op(&lvl2_map_ops[i],
+				  (unsigned long)lvl2_table, GNTMAP_host_map |
+				  GNTMAP_readonly,
 				  lvl3_table[i], domid);
-		gnttab_set_unmap_op(&lvl2_unmap_ops[i], (unsigned long)lvl2_table, GNTMAP_host_map | GNTMAP_readonly, -1);
+		gnttab_set_unmap_op(&lvl2_unmap_ops[i],
+				    (unsigned long)lvl2_table, GNTMAP_host_map |
+				    GNTMAP_readonly, -1);
 	}
 
 	/* Unmap top level page, as it won't be needed any longer */
-	if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL, &lvl3_table_page, 1)) {
-		dev_err(hyper_dmabuf_private.device, "xen: cannot unmap top level page\n");
+	if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL,
+			      &lvl3_table_page, 1)) {
+		dev_err(hy_drv_priv->dev,
+			"xen: cannot unmap top level page\n");
 		return NULL;
 	} else {
 		/* Mark that page was unmapped */
 		lvl3_unmap_ops.handle = -1;
 	}
 
-	if (gnttab_map_refs(lvl2_map_ops, NULL, lvl2_table_pages, n_lvl2_grefs)) {
-		dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed");
+	if (gnttab_map_refs(lvl2_map_ops, NULL,
+			    lvl2_table_pages, n_lvl2_grefs)) {
+		dev_err(hy_drv_priv->dev,
+			"HYPERVISOR map grant ref failed");
 		return NULL;
 	}
 
 	/* Checks if pages were mapped correctly */
 	for (i = 0; i < n_lvl2_grefs; i++) {
 		if (lvl2_map_ops[i].status) {
-			dev_err(hyper_dmabuf_private.device,
+			dev_err(hy_drv_priv->dev,
 				"HYPERVISOR map grant ref failed status = %d",
 				lvl2_map_ops[i].status);
 			goto error_cleanup_lvl2;
@@ -356,7 +373,8 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 	}
 
 	if (gnttab_alloc_pages(nents, data_pages)) {
-		dev_err(hyper_dmabuf_private.device, "Cannot allocate pages\n");
+		dev_err(hy_drv_priv->dev,
+			"Cannot allocate pages\n");
 		goto error_cleanup_lvl2;
 	}
 
@@ -366,13 +384,13 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 		lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i]));
 		for (j = 0; j < REFS_PER_PAGE; j++) {
 			gnttab_set_map_op(&data_map_ops[k],
-					  (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
-					  GNTMAP_host_map | GNTMAP_readonly,
-					  lvl2_table[j], domid);
+				(unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+				GNTMAP_host_map | GNTMAP_readonly,
+				lvl2_table[j], domid);
 
 			gnttab_set_unmap_op(&data_unmap_ops[k],
-					    (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
-					    GNTMAP_host_map | GNTMAP_readonly, -1);
+				(unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+				GNTMAP_host_map | GNTMAP_readonly, -1);
 			k++;
 		}
 	}
@@ -382,25 +400,29 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 
 	for (j = 0; j < nents_last; j++) {
 		gnttab_set_map_op(&data_map_ops[k],
-				  (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
-				  GNTMAP_host_map | GNTMAP_readonly,
-				  lvl2_table[j], domid);
+			(unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+			GNTMAP_host_map | GNTMAP_readonly,
+			lvl2_table[j], domid);
 
 		gnttab_set_unmap_op(&data_unmap_ops[k],
-				    (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
-				    GNTMAP_host_map | GNTMAP_readonly, -1);
+			(unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+			GNTMAP_host_map | GNTMAP_readonly, -1);
 		k++;
 	}
 
-	if (gnttab_map_refs(data_map_ops, NULL, data_pages, nents)) {
-		dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed\n");
+	if (gnttab_map_refs(data_map_ops, NULL,
+			    data_pages, nents)) {
+		dev_err(hy_drv_priv->dev,
+			"HYPERVISOR map grant ref failed\n");
 		return NULL;
 	}
 
 	/* unmapping lvl2 table pages */
-	if (gnttab_unmap_refs(lvl2_unmap_ops, NULL, lvl2_table_pages,
+	if (gnttab_unmap_refs(lvl2_unmap_ops,
+			      NULL, lvl2_table_pages,
 			      n_lvl2_grefs)) {
-		dev_err(hyper_dmabuf_private.device, "Cannot unmap 2nd level refs\n");
+		dev_err(hy_drv_priv->dev,
+			"Cannot unmap 2nd level refs\n");
 		return NULL;
 	} else {
 		/* Mark that pages were unmapped */
@@ -411,7 +433,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 
 	for (i = 0; i < nents; i++) {
 		if (data_map_ops[i].status) {
-			dev_err(hyper_dmabuf_private.device,
+			dev_err(hy_drv_priv->dev,
 				"HYPERVISOR map grant ref failed status = %d\n",
 				data_map_ops[i].status);
 			goto error_cleanup_data;
@@ -431,7 +453,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 	kfree(lvl2_unmap_ops);
 	kfree(data_map_ops);
 
-	dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
 	return data_pages;
 
 error_cleanup_data:
@@ -442,13 +464,14 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 
 error_cleanup_lvl2:
 	if (lvl2_unmap_ops[0].handle != -1)
-		gnttab_unmap_refs(lvl2_unmap_ops, NULL, lvl2_table_pages,
-				  n_lvl2_grefs);
+		gnttab_unmap_refs(lvl2_unmap_ops, NULL,
+				  lvl2_table_pages, n_lvl2_grefs);
 	gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages);
 
 error_cleanup_lvl3:
 	if (lvl3_unmap_ops.handle != -1)
-		gnttab_unmap_refs(&lvl3_unmap_ops, NULL, &lvl3_table_page, 1);
+		gnttab_unmap_refs(&lvl3_unmap_ops, NULL,
+				  &lvl3_table_page, 1);
 	gnttab_free_pages(1, &lvl3_table_page);
 
 	kfree(lvl2_table_pages);
@@ -463,20 +486,20 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
 int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents) {
 	struct xen_shared_pages_info *sh_pages_info;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
 
 	sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
 
 	if (sh_pages_info->unmap_ops == NULL ||
 	    sh_pages_info->data_pages == NULL) {
-		dev_warn(hyper_dmabuf_private.device,
-			 "Imported pages already cleaned up or buffer was not imported yet\n");
+		dev_warn(hy_drv_priv->dev,
+			 "pages already cleaned up or buffer not imported yet\n");
 		return 0;
 	}
 
 	if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL,
 			      sh_pages_info->data_pages, nents) ) {
-		dev_err(hyper_dmabuf_private.device, "Cannot unmap data pages\n");
+		dev_err(hy_drv_priv->dev, "Cannot unmap data pages\n");
 		return -EFAULT;
 	}
 
@@ -489,6 +512,6 @@ int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents) {
 	kfree(sh_pages_info);
 	sh_pages_info = NULL;
 
-	dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
+	dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
 	return 0;
 }
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ