[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1513711816-2618-60-git-send-email-dongwon.kim@intel.com>
Date: Tue, 19 Dec 2017 11:30:16 -0800
From: Dongwon Kim <dongwon.kim@...el.com>
To: linux-kernel@...r.kernel.org
Cc: dri-devel@...ts.freedesktop.org, xen-devel@...ts.xenproject.org,
mateuszx.potrola@...el.com, dongwon.kim@...el.com
Subject: [RFC PATCH 60/60] hyper_dmabuf: move hyper_dmabuf to under drivers/dma-buf/
This driver's ultimate goal is to expand the boundary of data
sharing via DMA-BUF to across different OSes running on the same
hardware regardless of what Hypervisor is currently used for the
OS virtualization. So it makes more sense to have its implementation
under drivers/dma-buf.
Signed-off-by: Dongwon Kim <dongwon.kim@...el.com>
---
drivers/dma-buf/hyper_dmabuf/Kconfig | 42 +
drivers/dma-buf/hyper_dmabuf/Makefile | 49 ++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c | 408 +++++++++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h | 118 +++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c | 122 +++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h | 38 +
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c | 133 +++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h | 51 ++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c | 786 +++++++++++++++++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h | 50 ++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c | 293 +++++++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h | 71 ++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c | 414 +++++++++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h | 87 ++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c | 413 +++++++++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h | 32 +
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c | 172 ++++
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h | 10 +
.../hyper_dmabuf/hyper_dmabuf_remote_sync.c | 322 +++++++
.../hyper_dmabuf/hyper_dmabuf_remote_sync.h | 30 +
.../dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c | 255 ++++++
.../dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h | 41 +
drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h | 141 +++
.../xen-backend/hyper_dmabuf_xen_comm.c | 941 +++++++++++++++++++++
.../xen-backend/hyper_dmabuf_xen_comm.h | 78 ++
.../xen-backend/hyper_dmabuf_xen_comm_list.c | 158 ++++
.../xen-backend/hyper_dmabuf_xen_comm_list.h | 67 ++
.../xen-backend/hyper_dmabuf_xen_drv.c | 46 +
.../xen-backend/hyper_dmabuf_xen_drv.h | 53 ++
.../xen-backend/hyper_dmabuf_xen_shm.c | 525 ++++++++++++
.../xen-backend/hyper_dmabuf_xen_shm.h | 46 +
drivers/xen/Kconfig | 2 +-
drivers/xen/Makefile | 2 +-
drivers/xen/hyper_dmabuf/Kconfig | 42 -
drivers/xen/hyper_dmabuf/Makefile | 49 --
drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c | 408 ---------
drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h | 118 ---
drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c | 122 ---
drivers/xen/hyper_dmabuf/hyper_dmabuf_event.h | 38 -
drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c | 133 ---
drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h | 51 --
drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 786 -----------------
drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h | 50 --
drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c | 293 -------
drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h | 71 --
drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c | 414 ---------
drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h | 87 --
drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c | 413 ---------
drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h | 32 -
drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c | 172 ----
drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h | 10 -
.../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c | 322 -------
.../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h | 30 -
drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c | 255 ------
drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h | 41 -
drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h | 141 ---
.../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c | 941 ---------------------
.../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h | 78 --
.../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c | 158 ----
.../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h | 67 --
.../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c | 46 -
.../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h | 53 --
.../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c | 525 ------------
.../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h | 46 -
64 files changed, 5994 insertions(+), 5994 deletions(-)
create mode 100644 drivers/dma-buf/hyper_dmabuf/Kconfig
create mode 100644 drivers/dma-buf/hyper_dmabuf/Makefile
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm_list.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm_list.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_drv.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_drv.h
create mode 100644 drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_shm.c
create mode 100644 drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_shm.h
delete mode 100644 drivers/xen/hyper_dmabuf/Kconfig
delete mode 100644 drivers/xen/hyper_dmabuf/Makefile
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_event.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
delete mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
delete mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
delete mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
delete mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h
delete mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
delete mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
delete mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
delete mode 100644 drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h
diff --git a/drivers/dma-buf/hyper_dmabuf/Kconfig b/drivers/dma-buf/hyper_dmabuf/Kconfig
new file mode 100644
index 0000000..5efcd44
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/Kconfig
@@ -0,0 +1,42 @@
+menu "hyper_dmabuf options"
+
+config HYPER_DMABUF
+ tristate "Enables hyper dmabuf driver"
+ default y
+
+config HYPER_DMABUF_XEN
+ bool "Configure hyper_dmabuf for XEN hypervisor"
+ default y
+ depends on HYPER_DMABUF
+ help
+ Configuring hyper_dmabuf driver for XEN hypervisor
+
+config HYPER_DMABUF_SYSFS
+ bool "Enable sysfs information about hyper DMA buffers"
+ default y
+ depends on HYPER_DMABUF
+ help
+ Expose information about imported and exported buffers using
+ hyper_dmabuf driver
+
+config HYPER_DMABUF_EVENT_GEN
+ bool "Enable event-generation and polling operation"
+ default n
+ depends on HYPER_DMABUF
+ help
+ With this config enabled, hyper_dmabuf driver on the importer side
+ generates events and queue those up in the event list whenever a new
+ shared DMA-BUF is available. Events in the list can be retrieved by
+ read operation.
+
+config HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
+ bool "Enable automatic rx-ch add with 10 secs interval"
+ default y
+ depends on HYPER_DMABUF && HYPER_DMABUF_XEN
+ help
+ If enabled, driver reads a node in xenstore every 10 seconds
+ to check whether there is any tx comm ch configured by another
+ domain then initialize matched rx comm ch automatically for any
+ existing tx comm chs.
+
+endmenu
diff --git a/drivers/dma-buf/hyper_dmabuf/Makefile b/drivers/dma-buf/hyper_dmabuf/Makefile
new file mode 100644
index 0000000..cce8e69
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/Makefile
@@ -0,0 +1,49 @@
+TARGET_MODULE:=hyper_dmabuf
+
+PLATFORM:=XEN
+
+# If we running by kernel building system
+ifneq ($(KERNELRELEASE),)
+ $(TARGET_MODULE)-objs := hyper_dmabuf_drv.o \
+ hyper_dmabuf_ioctl.o \
+ hyper_dmabuf_list.o \
+ hyper_dmabuf_sgl_proc.o \
+ hyper_dmabuf_ops.o \
+ hyper_dmabuf_msg.o \
+ hyper_dmabuf_id.o \
+ hyper_dmabuf_remote_sync.o \
+ hyper_dmabuf_query.o \
+
+ifeq ($(CONFIG_HYPER_DMABUF_EVENT_GEN), y)
+ $(TARGET_MODULE)-objs += hyper_dmabuf_event.o
+endif
+
+ifeq ($(CONFIG_HYPER_DMABUF_XEN), y)
+ $(TARGET_MODULE)-objs += xen-backend/hyper_dmabuf_xen_comm.o \
+ xen-backend/hyper_dmabuf_xen_comm_list.o \
+ xen-backend/hyper_dmabuf_xen_shm.o \
+ xen-backend/hyper_dmabuf_xen_drv.o
+endif
+
+obj-$(CONFIG_HYPER_DMABUF) := $(TARGET_MODULE).o
+
+# If we are running without kernel build system
+else
+BUILDSYSTEM_DIR?=../../../
+PWD:=$(shell pwd)
+
+all :
+# run kernel build system to make module
+$(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules
+
+clean:
+# run kernel build system to cleanup in current directory
+$(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) clean
+
+load:
+ insmod ./$(TARGET_MODULE).ko
+
+unload:
+ rmmod ./$(TARGET_MODULE).ko
+
+endif
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c
new file mode 100644
index 0000000..498b06c
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/dma-buf.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_ioctl.h"
+#include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_event.h"
+
+#ifdef CONFIG_HYPER_DMABUF_XEN
+#include "xen-backend/hyper_dmabuf_xen_drv.h"
+#endif
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Intel Corporation");
+
+struct hyper_dmabuf_private *hy_drv_priv;
+
+static void force_free(struct exported_sgt_info *exported,
+ void *attr)
+{
+ struct ioctl_hyper_dmabuf_unexport unexport_attr;
+ struct file *filp = (struct file *)attr;
+
+ if (!filp || !exported)
+ return;
+
+ if (exported->filp == filp) {
+ dev_dbg(hy_drv_priv->dev,
+ "Forcefully releasing buffer {id:%d key:%d %d %d}\n",
+ exported->hid.id, exported->hid.rng_key[0],
+ exported->hid.rng_key[1], exported->hid.rng_key[2]);
+
+ unexport_attr.hid = exported->hid;
+ unexport_attr.delay_ms = 0;
+
+ hyper_dmabuf_unexport_ioctl(filp, &unexport_attr);
+ }
+}
+
+static int hyper_dmabuf_open(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+
+ /* Do not allow exclusive open */
+ if (filp->f_flags & O_EXCL)
+ return -EBUSY;
+
+ return ret;
+}
+
+static int hyper_dmabuf_release(struct inode *inode, struct file *filp)
+{
+ hyper_dmabuf_foreach_exported(force_free, filp);
+
+ return 0;
+}
+
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+
+static unsigned int hyper_dmabuf_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ poll_wait(filp, &hy_drv_priv->event_wait, wait);
+
+ if (!list_empty(&hy_drv_priv->event_list))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ int ret;
+
+ /* only root can read events */
+ if (!capable(CAP_DAC_OVERRIDE)) {
+ dev_err(hy_drv_priv->dev,
+ "Only root can read events\n");
+ return -EPERM;
+ }
+
+ /* make sure user buffer can be written */
+ if (!access_ok(VERIFY_WRITE, buffer, count)) {
+ dev_err(hy_drv_priv->dev,
+ "User buffer can't be written.\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&hy_drv_priv->event_read_lock);
+ if (ret)
+ return ret;
+
+ while (1) {
+ struct hyper_dmabuf_event *e = NULL;
+
+ spin_lock_irq(&hy_drv_priv->event_lock);
+ if (!list_empty(&hy_drv_priv->event_list)) {
+ e = list_first_entry(&hy_drv_priv->event_list,
+ struct hyper_dmabuf_event, link);
+ list_del(&e->link);
+ }
+ spin_unlock_irq(&hy_drv_priv->event_lock);
+
+ if (!e) {
+ if (ret)
+ break;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ mutex_unlock(&hy_drv_priv->event_read_lock);
+ ret = wait_event_interruptible(hy_drv_priv->event_wait,
+ !list_empty(&hy_drv_priv->event_list));
+
+ if (ret == 0)
+ ret = mutex_lock_interruptible(
+ &hy_drv_priv->event_read_lock);
+
+ if (ret)
+ return ret;
+ } else {
+ unsigned int length = (sizeof(e->event_data.hdr) +
+ e->event_data.hdr.size);
+
+ if (length > count - ret) {
+put_back_event:
+ spin_lock_irq(&hy_drv_priv->event_lock);
+ list_add(&e->link, &hy_drv_priv->event_list);
+ spin_unlock_irq(&hy_drv_priv->event_lock);
+ break;
+ }
+
+ if (copy_to_user(buffer + ret, &e->event_data.hdr,
+ sizeof(e->event_data.hdr))) {
+ if (ret == 0)
+ ret = -EFAULT;
+
+ goto put_back_event;
+ }
+
+ ret += sizeof(e->event_data.hdr);
+
+ if (copy_to_user(buffer + ret, e->event_data.data,
+ e->event_data.hdr.size)) {
+ /* error while copying void *data */
+
+ struct hyper_dmabuf_event_hdr dummy_hdr = {0};
+
+ ret -= sizeof(e->event_data.hdr);
+
+ /* nullifying hdr of the event in user buffer */
+ if (copy_to_user(buffer + ret, &dummy_hdr,
+ sizeof(dummy_hdr))) {
+ dev_err(hy_drv_priv->dev,
+ "failed to nullify invalid hdr already in userspace\n");
+ }
+
+ ret = -EFAULT;
+
+ goto put_back_event;
+ }
+
+ ret += e->event_data.hdr.size;
+ hy_drv_priv->pending--;
+ kfree(e);
+ }
+ }
+
+ mutex_unlock(&hy_drv_priv->event_read_lock);
+
+ return ret;
+}
+
+#endif
+
+static const struct file_operations hyper_dmabuf_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = hyper_dmabuf_open,
+ .release = hyper_dmabuf_release,
+
+/* poll and read interfaces are needed only for event-polling */
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+ .read = hyper_dmabuf_event_read,
+ .poll = hyper_dmabuf_event_poll,
+#endif
+
+ .unlocked_ioctl = hyper_dmabuf_ioctl,
+};
+
+static struct miscdevice hyper_dmabuf_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hyper_dmabuf",
+ .fops = &hyper_dmabuf_driver_fops,
+};
+
+static int register_device(void)
+{
+ int ret = 0;
+
+ ret = misc_register(&hyper_dmabuf_miscdev);
+
+ if (ret) {
+ printk(KERN_ERR "hyper_dmabuf: driver can't be registered\n");
+ return ret;
+ }
+
+ hy_drv_priv->dev = hyper_dmabuf_miscdev.this_device;
+
+ /* TODO: Check if there is a different way to initialize dma mask */
+ dma_coerce_mask_and_coherent(hy_drv_priv->dev, DMA_BIT_MASK(64));
+
+ return ret;
+}
+
+static void unregister_device(void)
+{
+ dev_info(hy_drv_priv->dev,
+ "hyper_dmabuf: unregister_device() is called\n");
+
+ misc_deregister(&hyper_dmabuf_miscdev);
+}
+
+static int __init hyper_dmabuf_drv_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n");
+
+ hy_drv_priv = kcalloc(1, sizeof(struct hyper_dmabuf_private),
+ GFP_KERNEL);
+
+ if (!hy_drv_priv)
+ return -ENOMEM;
+
+ ret = register_device();
+ if (ret < 0) {
+ kfree(hy_drv_priv);
+ return ret;
+ }
+
+/* currently only supports XEN hypervisor */
+#ifdef CONFIG_HYPER_DMABUF_XEN
+ hy_drv_priv->bknd_ops = &xen_bknd_ops;
+#else
+ hy_drv_priv->bknd_ops = NULL;
+ printk(KERN_ERR "hyper_dmabuf drv currently supports XEN only.\n");
+#endif
+
+ if (hy_drv_priv->bknd_ops == NULL) {
+ printk(KERN_ERR "Hyper_dmabuf: no backend found\n");
+ kfree(hy_drv_priv);
+ return -1;
+ }
+
+ mutex_init(&hy_drv_priv->lock);
+
+ mutex_lock(&hy_drv_priv->lock);
+
+ hy_drv_priv->initialized = false;
+
+ dev_info(hy_drv_priv->dev,
+ "initializing database for imported/exported dmabufs\n");
+
+ hy_drv_priv->work_queue = create_workqueue("hyper_dmabuf_wqueue");
+
+ ret = hyper_dmabuf_table_init();
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev,
+ "fail to init table for exported/imported entries\n");
+ mutex_unlock(&hy_drv_priv->lock);
+ kfree(hy_drv_priv);
+ return ret;
+ }
+
+#ifdef CONFIG_HYPER_DMABUF_SYSFS
+ ret = hyper_dmabuf_register_sysfs(hy_drv_priv->dev);
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev,
+ "failed to initialize sysfs\n");
+ mutex_unlock(&hy_drv_priv->lock);
+ kfree(hy_drv_priv);
+ return ret;
+ }
+#endif
+
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+ mutex_init(&hy_drv_priv->event_read_lock);
+ spin_lock_init(&hy_drv_priv->event_lock);
+
+ /* Initialize event queue */
+ INIT_LIST_HEAD(&hy_drv_priv->event_list);
+ init_waitqueue_head(&hy_drv_priv->event_wait);
+
+ /* resetting number of pending events */
+ hy_drv_priv->pending = 0;
+#endif
+
+ if (hy_drv_priv->bknd_ops->init) {
+ ret = hy_drv_priv->bknd_ops->init();
+
+ if (ret < 0) {
+ dev_dbg(hy_drv_priv->dev,
+ "failed to initialize backend.\n");
+ mutex_unlock(&hy_drv_priv->lock);
+ kfree(hy_drv_priv);
+ return ret;
+ }
+ }
+
+ hy_drv_priv->domid = hy_drv_priv->bknd_ops->get_vm_id();
+
+ ret = hy_drv_priv->bknd_ops->init_comm_env();
+ if (ret < 0) {
+ dev_dbg(hy_drv_priv->dev,
+ "failed to initialize comm-env.\n");
+ } else {
+ hy_drv_priv->initialized = true;
+ }
+
+ mutex_unlock(&hy_drv_priv->lock);
+
+ dev_info(hy_drv_priv->dev,
+ "Finishing up initialization of hyper_dmabuf drv\n");
+
+ /* interrupt for comm should be registered here: */
+ return ret;
+}
+
+static void hyper_dmabuf_drv_exit(void)
+{
+#ifdef CONFIG_HYPER_DMABUF_SYSFS
+ hyper_dmabuf_unregister_sysfs(hy_drv_priv->dev);
+#endif
+
+ mutex_lock(&hy_drv_priv->lock);
+
+ /* hash tables for export/import entries and ring_infos */
+ hyper_dmabuf_table_destroy();
+
+ hy_drv_priv->bknd_ops->destroy_comm();
+
+ if (hy_drv_priv->bknd_ops->cleanup) {
+ hy_drv_priv->bknd_ops->cleanup();
+ };
+
+ /* destroy workqueue */
+ if (hy_drv_priv->work_queue)
+ destroy_workqueue(hy_drv_priv->work_queue);
+
+ /* destroy id_queue */
+ if (hy_drv_priv->id_queue)
+ hyper_dmabuf_free_hid_list();
+
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+ /* clean up event queue */
+ hyper_dmabuf_events_release();
+#endif
+
+ mutex_unlock(&hy_drv_priv->lock);
+
+ dev_info(hy_drv_priv->dev,
+ "hyper_dmabuf driver: Exiting\n");
+
+ kfree(hy_drv_priv);
+
+ unregister_device();
+}
+
+module_init(hyper_dmabuf_drv_init);
+module_exit(hyper_dmabuf_drv_exit);
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h
new file mode 100644
index 0000000..c2bb3ce
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
+#define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
+
+#include <linux/device.h>
+#include <xen/hyper_dmabuf.h>
+
+struct hyper_dmabuf_req;
+
+struct hyper_dmabuf_event {
+ struct hyper_dmabuf_event_data event_data;
+ struct list_head link;
+};
+
+struct hyper_dmabuf_private {
+ struct device *dev;
+
+ /* VM(domain) id of current VM instance */
+ int domid;
+
+ /* workqueue dedicated to hyper_dmabuf driver */
+ struct workqueue_struct *work_queue;
+
+ /* list of reusable hyper_dmabuf_ids */
+ struct list_reusable_id *id_queue;
+
+ /* backend ops - hypervisor specific */
+ struct hyper_dmabuf_bknd_ops *bknd_ops;
+
+ /* device global lock */
+ /* TODO: might need a lock per resource (e.g. EXPORT LIST) */
+ struct mutex lock;
+
+ /* flag that shows whether backend is initialized */
+ bool initialized;
+
+ wait_queue_head_t event_wait;
+ struct list_head event_list;
+
+ spinlock_t event_lock;
+ struct mutex event_read_lock;
+
+ /* # of pending events */
+ int pending;
+};
+
+struct list_reusable_id {
+ hyper_dmabuf_id_t hid;
+ struct list_head list;
+};
+
+struct hyper_dmabuf_bknd_ops {
+ /* backend initialization routine (optional) */
+ int (*init)(void);
+
+ /* backend cleanup routine (optional) */
+ int (*cleanup)(void);
+
+ /* retreiving id of current virtual machine */
+ int (*get_vm_id)(void);
+
+ /* get pages shared via hypervisor-specific method */
+ int (*share_pages)(struct page **, int, int, void **);
+
+ /* make shared pages unshared via hypervisor specific method */
+ int (*unshare_pages)(void **, int);
+
+ /* map remotely shared pages on importer's side via
+ * hypervisor-specific method
+ */
+ struct page ** (*map_shared_pages)(unsigned long, int, int, void **);
+
+ /* unmap and free shared pages on importer's side via
+ * hypervisor-specific method
+ */
+ int (*unmap_shared_pages)(void **, int);
+
+ /* initialize communication environment */
+ int (*init_comm_env)(void);
+
+ void (*destroy_comm)(void);
+
+ /* upstream ch setup (receiving and responding) */
+ int (*init_rx_ch)(int);
+
+ /* downstream ch setup (transmitting and parsing responses) */
+ int (*init_tx_ch)(int);
+
+ int (*send_req)(int, struct hyper_dmabuf_req *, int);
+};
+
+/* exporting global drv private info */
+extern struct hyper_dmabuf_private *hy_drv_priv;
+
+#endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c
new file mode 100644
index 0000000..392ea99
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_struct.h"
+#include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_event.h"
+
+static void send_event(struct hyper_dmabuf_event *e)
+{
+ struct hyper_dmabuf_event *oldest;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags);
+
+ /* check current number of event then if it hits the max num allowed
+ * then remove the oldest event in the list
+ */
+ if (hy_drv_priv->pending > MAX_DEPTH_EVENT_QUEUE - 1) {
+ oldest = list_first_entry(&hy_drv_priv->event_list,
+ struct hyper_dmabuf_event, link);
+ list_del(&oldest->link);
+ hy_drv_priv->pending--;
+ kfree(oldest);
+ }
+
+ list_add_tail(&e->link,
+ &hy_drv_priv->event_list);
+
+ hy_drv_priv->pending++;
+
+ wake_up_interruptible(&hy_drv_priv->event_wait);
+
+ spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags);
+}
+
+void hyper_dmabuf_events_release(void)
+{
+ struct hyper_dmabuf_event *e, *et;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags);
+
+ list_for_each_entry_safe(e, et, &hy_drv_priv->event_list,
+ link) {
+ list_del(&e->link);
+ kfree(e);
+ hy_drv_priv->pending--;
+ }
+
+ if (hy_drv_priv->pending) {
+ dev_err(hy_drv_priv->dev,
+ "possible leak on event_list\n");
+ }
+
+ spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags);
+}
+
+int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid)
+{
+ struct hyper_dmabuf_event *e;
+ struct imported_sgt_info *imported;
+
+ imported = hyper_dmabuf_find_imported(hid);
+
+ if (!imported) {
+ dev_err(hy_drv_priv->dev,
+ "can't find imported_sgt_info in the list\n");
+ return -EINVAL;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+ if (!e)
+ return -ENOMEM;
+
+ e->event_data.hdr.event_type = HYPER_DMABUF_NEW_IMPORT;
+ e->event_data.hdr.hid = hid;
+ e->event_data.data = (void *)imported->priv;
+ e->event_data.hdr.size = imported->sz_priv;
+
+ send_event(e);
+
+ dev_dbg(hy_drv_priv->dev,
+ "event number = %d :", hy_drv_priv->pending);
+
+ dev_dbg(hy_drv_priv->dev,
+ "generating events for {%d, %d, %d, %d}\n",
+ imported->hid.id, imported->hid.rng_key[0],
+ imported->hid.rng_key[1], imported->hid.rng_key[2]);
+
+ return 0;
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h
new file mode 100644
index 0000000..50db04f
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_EVENT_H__
+#define __HYPER_DMABUF_EVENT_H__
+
+#define MAX_DEPTH_EVENT_QUEUE 32
+
+enum hyper_dmabuf_event_type {
+ HYPER_DMABUF_NEW_IMPORT = 0x10000,
+};
+
+void hyper_dmabuf_events_release(void);
+
+int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid);
+
+#endif /* __HYPER_DMABUF_EVENT_H__ */
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c
new file mode 100644
index 0000000..e67b84a
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_id.h"
+
+void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid)
+{
+ struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
+ struct list_reusable_id *new_reusable;
+
+ new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL);
+
+ if (!new_reusable)
+ return;
+
+ new_reusable->hid = hid;
+
+ list_add(&new_reusable->list, &reusable_head->list);
+}
+
+static hyper_dmabuf_id_t get_reusable_hid(void)
+{
+ struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
+ hyper_dmabuf_id_t hid = {-1, {0, 0, 0} };
+
+ /* check there is reusable id */
+ if (!list_empty(&reusable_head->list)) {
+ reusable_head = list_first_entry(&reusable_head->list,
+ struct list_reusable_id,
+ list);
+
+ list_del(&reusable_head->list);
+ hid = reusable_head->hid;
+ kfree(reusable_head);
+ }
+
+ return hid;
+}
+
+void hyper_dmabuf_free_hid_list(void)
+{
+ struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
+ struct list_reusable_id *temp_head;
+
+ if (reusable_head) {
+ /* freeing mem space all reusable ids in the stack */
+ while (!list_empty(&reusable_head->list)) {
+ temp_head = list_first_entry(&reusable_head->list,
+ struct list_reusable_id,
+ list);
+ list_del(&temp_head->list);
+ kfree(temp_head);
+ }
+
+ /* freeing head */
+ kfree(reusable_head);
+ }
+}
+
+hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
+{
+ static int count;
+ hyper_dmabuf_id_t hid;
+ struct list_reusable_id *reusable_head;
+
+ /* first call to hyper_dmabuf_get_id */
+ if (count == 0) {
+ reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL);
+
+ if (!reusable_head)
+ return (hyper_dmabuf_id_t){-1, {0, 0, 0} };
+
+ /* list head has an invalid count */
+ reusable_head->hid.id = -1;
+ INIT_LIST_HEAD(&reusable_head->list);
+ hy_drv_priv->id_queue = reusable_head;
+ }
+
+ hid = get_reusable_hid();
+
+ /*creating a new H-ID only if nothing in the reusable id queue
+ * and count is less than maximum allowed
+ */
+ if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX)
+ hid.id = HYPER_DMABUF_ID_CREATE(hy_drv_priv->domid, count++);
+
+ /* random data embedded in the id for security */
+ get_random_bytes(&hid.rng_key[0], 12);
+
+ return hid;
+}
+
+bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2)
+{
+ int i;
+
+ /* compare keys */
+ for (i = 0; i < 3; i++) {
+ if (hid1.rng_key[i] != hid2.rng_key[i])
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h
new file mode 100644
index 0000000..ed690f3
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_ID_H__
+#define __HYPER_DMABUF_ID_H__
+
+#define HYPER_DMABUF_ID_CREATE(domid, cnt) \
+ ((((domid) & 0xFF) << 24) | ((cnt) & 0xFFFFFF))
+
+#define HYPER_DMABUF_DOM_ID(hid) \
+ (((hid.id) >> 24) & 0xFF)
+
+/* currently maximum number of buffers shared
+ * at any given moment is limited to 1000
+ */
+#define HYPER_DMABUF_ID_MAX 1000
+
+/* adding freed hid to the reusable list */
+void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid);
+
+/* freeing the reusasble list */
+void hyper_dmabuf_free_hid_list(void);
+
+/* getting a hid available to use. */
+hyper_dmabuf_id_t hyper_dmabuf_get_hid(void);
+
+/* comparing two different hid */
+bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2);
+
+#endif /*__HYPER_DMABUF_ID_H*/
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
new file mode 100644
index 0000000..ca6edf2
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -0,0 +1,786 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_struct.h"
+#include "hyper_dmabuf_ioctl.h"
+#include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_sgl_proc.h"
+#include "hyper_dmabuf_ops.h"
+#include "hyper_dmabuf_query.h"
+
+static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data)
+{
+ struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr;
+ struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
+ int ret = 0;
+
+ if (!data) {
+ dev_err(hy_drv_priv->dev, "user data is NULL\n");
+ return -EINVAL;
+ }
+ tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data;
+
+ ret = bknd_ops->init_tx_ch(tx_ch_attr->remote_domain);
+
+ return ret;
+}
+
+static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data)
+{
+ struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr;
+ struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
+ int ret = 0;
+
+ if (!data) {
+ dev_err(hy_drv_priv->dev, "user data is NULL\n");
+ return -EINVAL;
+ }
+
+ rx_ch_attr = (struct ioctl_hyper_dmabuf_rx_ch_setup *)data;
+
+ ret = bknd_ops->init_rx_ch(rx_ch_attr->source_domain);
+
+ return ret;
+}
+
+static int send_export_msg(struct exported_sgt_info *exported,
+ struct pages_info *pg_info)
+{
+ struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
+ struct hyper_dmabuf_req *req;
+ int op[MAX_NUMBER_OF_OPERANDS] = {0};
+ int ret, i;
+
+ /* now create request for importer via ring */
+ op[0] = exported->hid.id;
+
+ for (i = 0; i < 3; i++)
+ op[i+1] = exported->hid.rng_key[i];
+
+ if (pg_info) {
+ op[4] = pg_info->nents;
+ op[5] = pg_info->frst_ofst;
+ op[6] = pg_info->last_len;
+ op[7] = bknd_ops->share_pages(pg_info->pgs, exported->rdomid,
+ pg_info->nents, &exported->refs_info);
+ if (op[7] < 0) {
+ dev_err(hy_drv_priv->dev, "pages sharing failed\n");
+ return op[7];
+ }
+ }
+
+ op[8] = exported->sz_priv;
+
+ /* driver/application specific private info */
+ memcpy(&op[9], exported->priv, op[8]);
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return -ENOMEM;
+
+ /* composing a message to the importer */
+ hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT, &op[0]);
+
+ ret = bknd_ops->send_req(exported->rdomid, req, true);
+
+ kfree(req);
+
+ return ret;
+}
+
+/* Fast path exporting routine in case same buffer is already exported.
+ * In this function, we skip normal exporting process and just update
+ * private data on both VMs (importer and exporter)
+ *
+ * return '1' if reexport is needed, return '0' if succeeds, return
+ * Kernel error code if something goes wrong
+ */
+static int fastpath_export(hyper_dmabuf_id_t hid, int sz_priv, char *priv)
+{
+ int reexport = 1;
+ int ret = 0;
+ struct exported_sgt_info *exported;
+
+ exported = hyper_dmabuf_find_exported(hid);
+
+ if (!exported)
+ return reexport;
+
+ if (exported->valid == false)
+ return reexport;
+
+ /*
+ * Check if unexport is already scheduled for that buffer,
+ * if so try to cancel it. If that will fail, buffer needs
+ * to be reexport once again.
+ */
+ if (exported->unexport_sched) {
+ if (!cancel_delayed_work_sync(&exported->unexport))
+ return reexport;
+
+ exported->unexport_sched = false;
+ }
+
+ /* if there's any change in size of private data.
+ * we reallocate space for private data with new size
+ */
+ if (sz_priv != exported->sz_priv) {
+ kfree(exported->priv);
+
+ /* truncating size */
+ if (sz_priv > MAX_SIZE_PRIV_DATA)
+ exported->sz_priv = MAX_SIZE_PRIV_DATA;
+ else
+ exported->sz_priv = sz_priv;
+
+ exported->priv = kcalloc(1, exported->sz_priv,
+ GFP_KERNEL);
+
+ if (!exported->priv) {
+ hyper_dmabuf_remove_exported(exported->hid);
+ hyper_dmabuf_cleanup_sgt_info(exported, true);
+ kfree(exported);
+ return -ENOMEM;
+ }
+ }
+
+ /* update private data in sgt_info with new ones */
+ ret = copy_from_user(exported->priv, priv, exported->sz_priv);
+ if (ret) {
+ dev_err(hy_drv_priv->dev,
+ "Failed to load a new private data\n");
+ ret = -EINVAL;
+ } else {
+ /* send an export msg for updating priv in importer */
+ ret = send_export_msg(exported, NULL);
+
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev,
+ "Failed to send a new private data\n");
+ ret = -EBUSY;
+ }
+ }
+
+ return ret;
+}
+
+static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
+{
+ struct ioctl_hyper_dmabuf_export_remote *export_remote_attr =
+ (struct ioctl_hyper_dmabuf_export_remote *)data;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct pages_info *pg_info;
+ struct exported_sgt_info *exported;
+ hyper_dmabuf_id_t hid;
+ int ret = 0;
+
+ if (hy_drv_priv->domid == export_remote_attr->remote_domain) {
+ dev_err(hy_drv_priv->dev,
+ "exporting to the same VM is not permitted\n");
+ return -EINVAL;
+ }
+
+ dma_buf = dma_buf_get(export_remote_attr->dmabuf_fd);
+
+ if (IS_ERR(dma_buf)) {
+ dev_err(hy_drv_priv->dev, "Cannot get dma buf\n");
+ return PTR_ERR(dma_buf);
+ }
+
+ /* we check if this specific attachment was already exported
+ * to the same domain and if yes and it's valid sgt_info,
+ * it returns hyper_dmabuf_id of pre-exported sgt_info
+ */
+ hid = hyper_dmabuf_find_hid_exported(dma_buf,
+ export_remote_attr->remote_domain);
+
+ if (hid.id != -1) {
+ ret = fastpath_export(hid, export_remote_attr->sz_priv,
+ export_remote_attr->priv);
+
+ /* return if fastpath_export succeeds or
+ * gets some fatal error
+ */
+ if (ret <= 0) {
+ dma_buf_put(dma_buf);
+ export_remote_attr->hid = hid;
+ return ret;
+ }
+ }
+
+ attachment = dma_buf_attach(dma_buf, hy_drv_priv->dev);
+ if (IS_ERR(attachment)) {
+ dev_err(hy_drv_priv->dev, "cannot get attachment\n");
+ ret = PTR_ERR(attachment);
+ goto fail_attach;
+ }
+
+ sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
+
+ if (IS_ERR(sgt)) {
+ dev_err(hy_drv_priv->dev, "cannot map attachment\n");
+ ret = PTR_ERR(sgt);
+ goto fail_map_attachment;
+ }
+
+ exported = kcalloc(1, sizeof(*exported), GFP_KERNEL);
+
+ if (!exported) {
+ ret = -ENOMEM;
+ goto fail_sgt_info_creation;
+ }
+
+ /* possible truncation */
+ if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA)
+ exported->sz_priv = MAX_SIZE_PRIV_DATA;
+ else
+ exported->sz_priv = export_remote_attr->sz_priv;
+
+ /* creating buffer for private data of buffer */
+ if (exported->sz_priv != 0) {
+ exported->priv = kcalloc(1, exported->sz_priv, GFP_KERNEL);
+
+ if (!exported->priv) {
+ ret = -ENOMEM;
+ goto fail_priv_creation;
+ }
+ } else {
+ dev_err(hy_drv_priv->dev, "size is 0\n");
+ }
+
+ exported->hid = hyper_dmabuf_get_hid();
+
+ /* no more exported dmabuf allowed */
+ if (exported->hid.id == -1) {
+ dev_err(hy_drv_priv->dev,
+ "exceeds allowed number of dmabuf to be exported\n");
+ ret = -ENOMEM;
+ goto fail_sgt_info_creation;
+ }
+
+ exported->rdomid = export_remote_attr->remote_domain;
+ exported->dma_buf = dma_buf;
+ exported->valid = true;
+
+ exported->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL);
+ if (!exported->active_sgts) {
+ ret = -ENOMEM;
+ goto fail_map_active_sgts;
+ }
+
+ exported->active_attached = kmalloc(sizeof(struct attachment_list),
+ GFP_KERNEL);
+ if (!exported->active_attached) {
+ ret = -ENOMEM;
+ goto fail_map_active_attached;
+ }
+
+ exported->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list),
+ GFP_KERNEL);
+ if (!exported->va_kmapped) {
+ ret = -ENOMEM;
+ goto fail_map_va_kmapped;
+ }
+
+ exported->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list),
+ GFP_KERNEL);
+ if (!exported->va_vmapped) {
+ ret = -ENOMEM;
+ goto fail_map_va_vmapped;
+ }
+
+ exported->active_sgts->sgt = sgt;
+ exported->active_attached->attach = attachment;
+ exported->va_kmapped->vaddr = NULL;
+ exported->va_vmapped->vaddr = NULL;
+
+ /* initialize list of sgt, attachment and vaddr for dmabuf sync
+ * via shadow dma-buf
+ */
+ INIT_LIST_HEAD(&exported->active_sgts->list);
+ INIT_LIST_HEAD(&exported->active_attached->list);
+ INIT_LIST_HEAD(&exported->va_kmapped->list);
+ INIT_LIST_HEAD(&exported->va_vmapped->list);
+
+ /* copy private data to sgt_info */
+ ret = copy_from_user(exported->priv, export_remote_attr->priv,
+ exported->sz_priv);
+
+ if (ret) {
+ dev_err(hy_drv_priv->dev,
+ "failed to load private data\n");
+ ret = -EINVAL;
+ goto fail_export;
+ }
+
+ pg_info = hyper_dmabuf_ext_pgs(sgt);
+ if (!pg_info) {
+ dev_err(hy_drv_priv->dev,
+ "failed to construct pg_info\n");
+ ret = -ENOMEM;
+ goto fail_export;
+ }
+
+ exported->nents = pg_info->nents;
+
+ /* now register it to export list */
+ hyper_dmabuf_register_exported(exported);
+
+ export_remote_attr->hid = exported->hid;
+
+ ret = send_export_msg(exported, pg_info);
+
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev,
+ "failed to send out the export request\n");
+ goto fail_send_request;
+ }
+
+ /* free pg_info */
+ kfree(pg_info->pgs);
+ kfree(pg_info);
+
+ exported->filp = filp;
+
+ return ret;
+
+/* Clean-up if error occurs */
+
+fail_send_request:
+ hyper_dmabuf_remove_exported(exported->hid);
+
+ /* free pg_info */
+ kfree(pg_info->pgs);
+ kfree(pg_info);
+
+fail_export:
+ kfree(exported->va_vmapped);
+
+fail_map_va_vmapped:
+ kfree(exported->va_kmapped);
+
+fail_map_va_kmapped:
+ kfree(exported->active_attached);
+
+fail_map_active_attached:
+ kfree(exported->active_sgts);
+ kfree(exported->priv);
+
+fail_priv_creation:
+ kfree(exported);
+
+fail_map_active_sgts:
+fail_sgt_info_creation:
+ dma_buf_unmap_attachment(attachment, sgt,
+ DMA_BIDIRECTIONAL);
+
+fail_map_attachment:
+ dma_buf_detach(dma_buf, attachment);
+
+fail_attach:
+ dma_buf_put(dma_buf);
+
+ return ret;
+}
+
+static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
+{
+ struct ioctl_hyper_dmabuf_export_fd *export_fd_attr =
+ (struct ioctl_hyper_dmabuf_export_fd *)data;
+ struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
+ struct imported_sgt_info *imported;
+ struct hyper_dmabuf_req *req;
+ struct page **data_pgs;
+ int op[4];
+ int i;
+ int ret = 0;
+
+ dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
+
+ /* look for dmabuf for the id */
+ imported = hyper_dmabuf_find_imported(export_fd_attr->hid);
+
+ /* can't find sgt from the table */
+ if (!imported) {
+ dev_err(hy_drv_priv->dev, "can't find the entry\n");
+ return -ENOENT;
+ }
+
+ mutex_lock(&hy_drv_priv->lock);
+
+ imported->importers++;
+
+ /* send notification for export_fd to exporter */
+ op[0] = imported->hid.id;
+
+ for (i = 0; i < 3; i++)
+ op[i+1] = imported->hid.rng_key[i];
+
+ dev_dbg(hy_drv_priv->dev, "Export FD of buffer {id:%d key:%d %d %d}\n",
+ imported->hid.id, imported->hid.rng_key[0],
+ imported->hid.rng_key[1], imported->hid.rng_key[2]);
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+ if (!req) {
+ mutex_unlock(&hy_drv_priv->lock);
+ return -ENOMEM;
+ }
+
+ hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD, &op[0]);
+
+ ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true);
+
+ if (ret < 0) {
+ /* in case of timeout other end eventually will receive request,
+ * so we need to undo it
+ */
+ hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED,
+ &op[0]);
+ bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, false);
+ kfree(req);
+ dev_err(hy_drv_priv->dev,
+ "Failed to create sgt or notify exporter\n");
+ imported->importers--;
+ mutex_unlock(&hy_drv_priv->lock);
+ return ret;
+ }
+
+ kfree(req);
+
+ if (ret == HYPER_DMABUF_REQ_ERROR) {
+ dev_err(hy_drv_priv->dev,
+ "Buffer invalid {id:%d key:%d %d %d}, cannot import\n",
+ imported->hid.id, imported->hid.rng_key[0],
+ imported->hid.rng_key[1], imported->hid.rng_key[2]);
+
+ imported->importers--;
+ mutex_unlock(&hy_drv_priv->lock);
+ return -EINVAL;
+ }
+
+ ret = 0;
+
+ dev_dbg(hy_drv_priv->dev,
+ "Found buffer gref %d off %d\n",
+ imported->ref_handle, imported->frst_ofst);
+
+ dev_dbg(hy_drv_priv->dev,
+ "last len %d nents %d domain %d\n",
+ imported->last_len, imported->nents,
+ HYPER_DMABUF_DOM_ID(imported->hid));
+
+ if (!imported->sgt) {
+ dev_dbg(hy_drv_priv->dev,
+ "buffer {id:%d key:%d %d %d} pages not mapped yet\n",
+ imported->hid.id, imported->hid.rng_key[0],
+ imported->hid.rng_key[1], imported->hid.rng_key[2]);
+
+ data_pgs = bknd_ops->map_shared_pages(imported->ref_handle,
+ HYPER_DMABUF_DOM_ID(imported->hid),
+ imported->nents,
+ &imported->refs_info);
+
+ if (!data_pgs) {
+ dev_err(hy_drv_priv->dev,
+ "can't map pages hid {id:%d key:%d %d %d}\n",
+ imported->hid.id, imported->hid.rng_key[0],
+ imported->hid.rng_key[1],
+ imported->hid.rng_key[2]);
+
+ imported->importers--;
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+ if (!req) {
+ mutex_unlock(&hy_drv_priv->lock);
+ return -ENOMEM;
+ }
+
+ hyper_dmabuf_create_req(req,
+ HYPER_DMABUF_EXPORT_FD_FAILED,
+ &op[0]);
+ bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req,
+ false);
+ kfree(req);
+ mutex_unlock(&hy_drv_priv->lock);
+ return -EINVAL;
+ }
+
+ imported->sgt = hyper_dmabuf_create_sgt(data_pgs,
+ imported->frst_ofst,
+ imported->last_len,
+ imported->nents);
+
+ }
+
+ export_fd_attr->fd = hyper_dmabuf_export_fd(imported,
+ export_fd_attr->flags);
+
+ if (export_fd_attr->fd < 0) {
+ /* fail to get fd */
+ ret = export_fd_attr->fd;
+ }
+
+ mutex_unlock(&hy_drv_priv->lock);
+
+ dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
+ return ret;
+}
+
+/* unexport dmabuf from the database and send int req to the source domain
+ * to unmap it.
+ */
+static void delayed_unexport(struct work_struct *work)
+{
+ struct hyper_dmabuf_req *req;
+ struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
+ struct exported_sgt_info *exported =
+ container_of(work, struct exported_sgt_info, unexport.work);
+ int op[4];
+ int i, ret;
+
+ if (!exported)
+ return;
+
+ dev_dbg(hy_drv_priv->dev,
+ "Marking buffer {id:%d key:%d %d %d} as invalid\n",
+ exported->hid.id, exported->hid.rng_key[0],
+ exported->hid.rng_key[1], exported->hid.rng_key[2]);
+
+ /* no longer valid */
+ exported->valid = false;
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return;
+
+ op[0] = exported->hid.id;
+
+ for (i = 0; i < 3; i++)
+ op[i+1] = exported->hid.rng_key[i];
+
+ hyper_dmabuf_create_req(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &op[0]);
+
+ /* Now send unexport request to remote domain, marking
+ * that buffer should not be used anymore
+ */
+ ret = bknd_ops->send_req(exported->rdomid, req, true);
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev,
+ "unexport message for buffer {id:%d key:%d %d %d} failed\n",
+ exported->hid.id, exported->hid.rng_key[0],
+ exported->hid.rng_key[1], exported->hid.rng_key[2]);
+ }
+
+ kfree(req);
+ exported->unexport_sched = false;
+
+ /* Immediately clean-up if it has never been exported by importer
+ * (so no SGT is constructed on importer).
+ * clean it up later in remote sync when final release ops
+ * is called (importer does this only when there's no
+ * no consumer of locally exported FDs)
+ */
+ if (exported->active == 0) {
+ dev_dbg(hy_drv_priv->dev,
+ "claning up buffer {id:%d key:%d %d %d} completly\n",
+ exported->hid.id, exported->hid.rng_key[0],
+ exported->hid.rng_key[1], exported->hid.rng_key[2]);
+
+ hyper_dmabuf_cleanup_sgt_info(exported, false);
+ hyper_dmabuf_remove_exported(exported->hid);
+
+ /* register hyper_dmabuf_id to the list for reuse */
+ hyper_dmabuf_store_hid(exported->hid);
+
+ if (exported->sz_priv > 0 && !exported->priv)
+ kfree(exported->priv);
+
+ kfree(exported);
+ }
+}
+
+/* Schedule unexport of dmabuf.
+ */
+int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data)
+{
+ struct ioctl_hyper_dmabuf_unexport *unexport_attr =
+ (struct ioctl_hyper_dmabuf_unexport *)data;
+ struct exported_sgt_info *exported;
+
+ dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
+
+ /* find dmabuf in export list */
+ exported = hyper_dmabuf_find_exported(unexport_attr->hid);
+
+ dev_dbg(hy_drv_priv->dev,
+ "scheduling unexport of buffer {id:%d key:%d %d %d}\n",
+ unexport_attr->hid.id, unexport_attr->hid.rng_key[0],
+ unexport_attr->hid.rng_key[1], unexport_attr->hid.rng_key[2]);
+
+ /* failed to find corresponding entry in export list */
+ if (exported == NULL) {
+ unexport_attr->status = -ENOENT;
+ return -ENOENT;
+ }
+
+ if (exported->unexport_sched)
+ return 0;
+
+ exported->unexport_sched = true;
+ INIT_DELAYED_WORK(&exported->unexport, delayed_unexport);
+ schedule_delayed_work(&exported->unexport,
+ msecs_to_jiffies(unexport_attr->delay_ms));
+
+ dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
+ return 0;
+}
+
+static int hyper_dmabuf_query_ioctl(struct file *filp, void *data)
+{
+ struct ioctl_hyper_dmabuf_query *query_attr =
+ (struct ioctl_hyper_dmabuf_query *)data;
+ struct exported_sgt_info *exported = NULL;
+ struct imported_sgt_info *imported = NULL;
+ int ret = 0;
+
+ if (HYPER_DMABUF_DOM_ID(query_attr->hid) == hy_drv_priv->domid) {
+ /* query for exported dmabuf */
+ exported = hyper_dmabuf_find_exported(query_attr->hid);
+ if (exported) {
+ ret = hyper_dmabuf_query_exported(exported,
+ query_attr->item,
+ &query_attr->info);
+ } else {
+ dev_err(hy_drv_priv->dev,
+ "hid {id:%d key:%d %d %d} not in exp list\n",
+ query_attr->hid.id,
+ query_attr->hid.rng_key[0],
+ query_attr->hid.rng_key[1],
+ query_attr->hid.rng_key[2]);
+ return -ENOENT;
+ }
+ } else {
+ /* query for imported dmabuf */
+ imported = hyper_dmabuf_find_imported(query_attr->hid);
+ if (imported) {
+ ret = hyper_dmabuf_query_imported(imported,
+ query_attr->item,
+ &query_attr->info);
+ } else {
+ dev_err(hy_drv_priv->dev,
+ "hid {id:%d key:%d %d %d} not in imp list\n",
+ query_attr->hid.id,
+ query_attr->hid.rng_key[0],
+ query_attr->hid.rng_key[1],
+ query_attr->hid.rng_key[2]);
+ return -ENOENT;
+ }
+ }
+
+ return ret;
+}
+
+const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = {
+ HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP,
+ hyper_dmabuf_tx_ch_setup_ioctl, 0),
+ HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP,
+ hyper_dmabuf_rx_ch_setup_ioctl, 0),
+ HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE,
+ hyper_dmabuf_export_remote_ioctl, 0),
+ HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD,
+ hyper_dmabuf_export_fd_ioctl, 0),
+ HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT,
+ hyper_dmabuf_unexport_ioctl, 0),
+ HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY,
+ hyper_dmabuf_query_ioctl, 0),
+};
+
+long hyper_dmabuf_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long param)
+{
+ const struct hyper_dmabuf_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+ int ret;
+ hyper_dmabuf_ioctl_t func;
+ char *kdata;
+
+ if (nr > ARRAY_SIZE(hyper_dmabuf_ioctls)) {
+ dev_err(hy_drv_priv->dev, "invalid ioctl\n");
+ return -EINVAL;
+ }
+
+ ioctl = &hyper_dmabuf_ioctls[nr];
+
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ dev_err(hy_drv_priv->dev, "no function\n");
+ return -EINVAL;
+ }
+
+ kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+ if (!kdata)
+ return -ENOMEM;
+
+ if (copy_from_user(kdata, (void __user *)param,
+ _IOC_SIZE(cmd)) != 0) {
+ dev_err(hy_drv_priv->dev,
+ "failed to copy from user arguments\n");
+ ret = -EFAULT;
+ goto ioctl_error;
+ }
+
+ ret = func(filp, kdata);
+
+ if (copy_to_user((void __user *)param, kdata,
+ _IOC_SIZE(cmd)) != 0) {
+ dev_err(hy_drv_priv->dev,
+ "failed to copy to user arguments\n");
+ ret = -EFAULT;
+ goto ioctl_error;
+ }
+
+ioctl_error:
+ kfree(kdata);
+
+ return ret;
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h
new file mode 100644
index 0000000..5991a87
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_IOCTL_H__
+#define __HYPER_DMABUF_IOCTL_H__
+
+typedef int (*hyper_dmabuf_ioctl_t)(struct file *filp, void *data);
+
+struct hyper_dmabuf_ioctl_desc {
+ unsigned int cmd;
+ int flags;
+ hyper_dmabuf_ioctl_t func;
+ const char *name;
+};
+
+#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags) \
+ [_IOC_NR(ioctl)] = { \
+ .cmd = ioctl, \
+ .func = _func, \
+ .flags = _flags, \
+ .name = #ioctl \
+ }
+
+long hyper_dmabuf_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long param);
+
+int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data);
+
+#endif //__HYPER_DMABUF_IOCTL_H__
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c
new file mode 100644
index 0000000..bba6d1d
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/hashtable.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_event.h"
+
+DECLARE_HASHTABLE(hyper_dmabuf_hash_imported, MAX_ENTRY_IMPORTED);
+DECLARE_HASHTABLE(hyper_dmabuf_hash_exported, MAX_ENTRY_EXPORTED);
+
+#ifdef CONFIG_HYPER_DMABUF_SYSFS
+static ssize_t hyper_dmabuf_imported_show(struct device *drv,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct list_entry_imported *info_entry;
+ int bkt;
+ ssize_t count = 0;
+ size_t total = 0;
+
+ hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) {
+ hyper_dmabuf_id_t hid = info_entry->imported->hid;
+ int nents = info_entry->imported->nents;
+ bool valid = info_entry->imported->valid;
+ int num_importers = info_entry->imported->importers;
+
+ total += nents;
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "hid:{%d %d %d %d}, nent:%d, v:%c, numi:%d\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1],
+ hid.rng_key[2], nents, (valid ? 't' : 'f'),
+ num_importers);
+ }
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "total nents: %lu\n", total);
+
+ return count;
+}
+
+static ssize_t hyper_dmabuf_exported_show(struct device *drv,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct list_entry_exported *info_entry;
+ int bkt;
+ ssize_t count = 0;
+ size_t total = 0;
+
+ hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) {
+ hyper_dmabuf_id_t hid = info_entry->exported->hid;
+ int nents = info_entry->exported->nents;
+ bool valid = info_entry->exported->valid;
+ int importer_exported = info_entry->exported->active;
+
+ total += nents;
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "hid:{%d %d %d %d}, nent:%d, v:%c, ie:%d\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1],
+ hid.rng_key[2], nents, (valid ? 't' : 'f'),
+ importer_exported);
+ }
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "total nents: %lu\n", total);
+
+ return count;
+}
+
+static DEVICE_ATTR(imported, 0400, hyper_dmabuf_imported_show, NULL);
+static DEVICE_ATTR(exported, 0400, hyper_dmabuf_exported_show, NULL);
+
+int hyper_dmabuf_register_sysfs(struct device *dev)
+{
+ int err;
+
+ err = device_create_file(dev, &dev_attr_imported);
+ if (err < 0)
+ goto err1;
+ err = device_create_file(dev, &dev_attr_exported);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+err2:
+ device_remove_file(dev, &dev_attr_imported);
+err1:
+ return -1;
+}
+
+int hyper_dmabuf_unregister_sysfs(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_imported);
+ device_remove_file(dev, &dev_attr_exported);
+ return 0;
+}
+
+#endif
+
+int hyper_dmabuf_table_init(void)
+{
+ hash_init(hyper_dmabuf_hash_imported);
+ hash_init(hyper_dmabuf_hash_exported);
+ return 0;
+}
+
+int hyper_dmabuf_table_destroy(void)
+{
+ /* TODO: cleanup hyper_dmabuf_hash_imported
+ * and hyper_dmabuf_hash_exported
+ */
+ return 0;
+}
+
+int hyper_dmabuf_register_exported(struct exported_sgt_info *exported)
+{
+ struct list_entry_exported *info_entry;
+
+ info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
+
+ if (!info_entry)
+ return -ENOMEM;
+
+ info_entry->exported = exported;
+
+ hash_add(hyper_dmabuf_hash_exported, &info_entry->node,
+ info_entry->exported->hid.id);
+
+ return 0;
+}
+
+int hyper_dmabuf_register_imported(struct imported_sgt_info *imported)
+{
+ struct list_entry_imported *info_entry;
+
+ info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
+
+ if (!info_entry)
+ return -ENOMEM;
+
+ info_entry->imported = imported;
+
+ hash_add(hyper_dmabuf_hash_imported, &info_entry->node,
+ info_entry->imported->hid.id);
+
+ return 0;
+}
+
+struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid)
+{
+ struct list_entry_exported *info_entry;
+ int bkt;
+
+ hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
+ /* checking hid.id first */
+ if (info_entry->exported->hid.id == hid.id) {
+ /* then key is compared */
+ if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid,
+ hid))
+ return info_entry->exported;
+
+ /* if key is unmatched, given HID is invalid,
+ * so returning NULL
+ */
+ break;
+ }
+
+ return NULL;
+}
+
+/* search for pre-exported sgt and return id of it if it exist */
+hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf,
+ int domid)
+{
+ struct list_entry_exported *info_entry;
+ hyper_dmabuf_id_t hid = {-1, {0, 0, 0} };
+ int bkt;
+
+ hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
+ if (info_entry->exported->dma_buf == dmabuf &&
+ info_entry->exported->rdomid == domid)
+ return info_entry->exported->hid;
+
+ return hid;
+}
+
+struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid)
+{
+ struct list_entry_imported *info_entry;
+ int bkt;
+
+ hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node)
+ /* checking hid.id first */
+ if (info_entry->imported->hid.id == hid.id) {
+ /* then key is compared */
+ if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid,
+ hid))
+ return info_entry->imported;
+ /* if key is unmatched, given HID is invalid,
+ * so returning NULL
+ */
+ break;
+ }
+
+ return NULL;
+}
+
+int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid)
+{
+ struct list_entry_exported *info_entry;
+ int bkt;
+
+ hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
+ /* checking hid.id first */
+ if (info_entry->exported->hid.id == hid.id) {
+ /* then key is compared */
+ if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid,
+ hid)) {
+ hash_del(&info_entry->node);
+ kfree(info_entry);
+ return 0;
+ }
+
+ break;
+ }
+
+ return -ENOENT;
+}
+
+int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid)
+{
+ struct list_entry_imported *info_entry;
+ int bkt;
+
+ hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node)
+ /* checking hid.id first */
+ if (info_entry->imported->hid.id == hid.id) {
+ /* then key is compared */
+ if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid,
+ hid)) {
+ hash_del(&info_entry->node);
+ kfree(info_entry);
+ return 0;
+ }
+
+ break;
+ }
+
+ return -ENOENT;
+}
+
+void hyper_dmabuf_foreach_exported(
+ void (*func)(struct exported_sgt_info *, void *attr),
+ void *attr)
+{
+ struct list_entry_exported *info_entry;
+ struct hlist_node *tmp;
+ int bkt;
+
+ hash_for_each_safe(hyper_dmabuf_hash_exported, bkt, tmp,
+ info_entry, node) {
+ func(info_entry->exported, attr);
+ }
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h
new file mode 100644
index 0000000..f7102f5
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_LIST_H__
+#define __HYPER_DMABUF_LIST_H__
+
+#include "hyper_dmabuf_struct.h"
+
+/* number of bits to be used for exported dmabufs hash table */
+#define MAX_ENTRY_EXPORTED 7
+/* number of bits to be used for imported dmabufs hash table */
+#define MAX_ENTRY_IMPORTED 7
+
+struct list_entry_exported {
+ struct exported_sgt_info *exported;
+ struct hlist_node node;
+};
+
+struct list_entry_imported {
+ struct imported_sgt_info *imported;
+ struct hlist_node node;
+};
+
+int hyper_dmabuf_table_init(void);
+
+int hyper_dmabuf_table_destroy(void);
+
+int hyper_dmabuf_register_exported(struct exported_sgt_info *info);
+
+/* search for pre-exported sgt and return id of it if it exist */
+hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf,
+ int domid);
+
+int hyper_dmabuf_register_imported(struct imported_sgt_info *info);
+
+struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid);
+
+struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid);
+
+int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid);
+
+int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid);
+
+void hyper_dmabuf_foreach_exported(void (*func)(struct exported_sgt_info *,
+ void *attr), void *attr);
+
+int hyper_dmabuf_register_sysfs(struct device *dev);
+int hyper_dmabuf_unregister_sysfs(struct device *dev);
+
+#endif /* __HYPER_DMABUF_LIST_H__ */
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c
new file mode 100644
index 0000000..afc1fd6e
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_remote_sync.h"
+#include "hyper_dmabuf_event.h"
+#include "hyper_dmabuf_list.h"
+
+struct cmd_process {
+ struct work_struct work;
+ struct hyper_dmabuf_req *rq;
+ int domid;
+};
+
+void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
+ enum hyper_dmabuf_command cmd, int *op)
+{
+ int i;
+
+ req->stat = HYPER_DMABUF_REQ_NOT_RESPONDED;
+ req->cmd = cmd;
+
+ switch (cmd) {
+ /* as exporter, commands to importer */
+ case HYPER_DMABUF_EXPORT:
+ /* exporting pages for dmabuf */
+ /* command : HYPER_DMABUF_EXPORT,
+ * op0~op3 : hyper_dmabuf_id
+ * op4 : number of pages to be shared
+ * op5 : offset of data in the first page
+ * op6 : length of data in the last page
+ * op7 : top-level reference number for shared pages
+ * op8 : size of private data (from op9)
+ * op9 ~ : Driver-specific private data
+ * (e.g. graphic buffer's meta info)
+ */
+
+ memcpy(&req->op[0], &op[0], 9 * sizeof(int) + op[8]);
+ break;
+
+ case HYPER_DMABUF_NOTIFY_UNEXPORT:
+ /* destroy sg_list for hyper_dmabuf_id on remote side */
+ /* command : DMABUF_DESTROY,
+ * op0~op3 : hyper_dmabuf_id_t hid
+ */
+
+ for (i = 0; i < 4; i++)
+ req->op[i] = op[i];
+ break;
+
+ case HYPER_DMABUF_EXPORT_FD:
+ case HYPER_DMABUF_EXPORT_FD_FAILED:
+ /* dmabuf fd is being created on imported side or importing
+ * failed
+ *
+ * command : HYPER_DMABUF_EXPORT_FD or
+ * HYPER_DMABUF_EXPORT_FD_FAILED,
+ * op0~op3 : hyper_dmabuf_id
+ */
+
+ for (i = 0; i < 4; i++)
+ req->op[i] = op[i];
+ break;
+
+ case HYPER_DMABUF_OPS_TO_REMOTE:
+ /* notifying dmabuf map/unmap to importer (probably not needed)
+ * for dmabuf synchronization
+ */
+ break;
+
+ case HYPER_DMABUF_OPS_TO_SOURCE:
+ /* notifying dmabuf map/unmap to exporter, map will make
+ * the driver to do shadow mapping or unmapping for
+ * synchronization with original exporter (e.g. i915)
+ *
+ * command : DMABUF_OPS_TO_SOURCE.
+ * op0~3 : hyper_dmabuf_id
+ * op4 : map(=1)/unmap(=2)/attach(=3)/detach(=4)
+ */
+ for (i = 0; i < 5; i++)
+ req->op[i] = op[i];
+ break;
+
+ default:
+ /* no command found */
+ return;
+ }
+}
+
+static void cmd_process_work(struct work_struct *work)
+{
+ struct imported_sgt_info *imported;
+ struct cmd_process *proc = container_of(work,
+ struct cmd_process, work);
+ struct hyper_dmabuf_req *req;
+ int domid;
+ int i;
+
+ req = proc->rq;
+ domid = proc->domid;
+
+ switch (req->cmd) {
+ case HYPER_DMABUF_EXPORT:
+ /* exporting pages for dmabuf */
+ /* command : HYPER_DMABUF_EXPORT,
+ * op0~op3 : hyper_dmabuf_id
+ * op4 : number of pages to be shared
+ * op5 : offset of data in the first page
+ * op6 : length of data in the last page
+ * op7 : top-level reference number for shared pages
+ * op8 : size of private data (from op9)
+ * op9 ~ : Driver-specific private data
+ * (e.g. graphic buffer's meta info)
+ */
+
+ /* if nents == 0, it means it is a message only for
+ * priv synchronization. for existing imported_sgt_info
+ * so not creating a new one
+ */
+ if (req->op[4] == 0) {
+ hyper_dmabuf_id_t exist = {req->op[0],
+ {req->op[1], req->op[2],
+ req->op[3] } };
+
+ imported = hyper_dmabuf_find_imported(exist);
+
+ if (!imported) {
+ dev_err(hy_drv_priv->dev,
+ "Can't find imported sgt_info\n");
+ break;
+ }
+
+ /* if size of new private data is different,
+ * we reallocate it.
+ */
+ if (imported->sz_priv != req->op[8]) {
+ kfree(imported->priv);
+ imported->sz_priv = req->op[8];
+ imported->priv = kcalloc(1, req->op[8],
+ GFP_KERNEL);
+ if (!imported->priv) {
+ /* set it invalid */
+ imported->valid = 0;
+ break;
+ }
+ }
+
+ /* updating priv data */
+ memcpy(imported->priv, &req->op[9], req->op[8]);
+
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+ /* generating import event */
+ hyper_dmabuf_import_event(imported->hid);
+#endif
+
+ break;
+ }
+
+ imported = kcalloc(1, sizeof(*imported), GFP_KERNEL);
+
+ if (!imported)
+ break;
+
+ imported->sz_priv = req->op[8];
+ imported->priv = kcalloc(1, req->op[8], GFP_KERNEL);
+
+ if (!imported->priv) {
+ kfree(imported);
+ break;
+ }
+
+ imported->hid.id = req->op[0];
+
+ for (i = 0; i < 3; i++)
+ imported->hid.rng_key[i] = req->op[i+1];
+
+ imported->nents = req->op[4];
+ imported->frst_ofst = req->op[5];
+ imported->last_len = req->op[6];
+ imported->ref_handle = req->op[7];
+
+ dev_dbg(hy_drv_priv->dev, "DMABUF was exported\n");
+ dev_dbg(hy_drv_priv->dev, "\thid{id:%d key:%d %d %d}\n",
+ req->op[0], req->op[1], req->op[2],
+ req->op[3]);
+ dev_dbg(hy_drv_priv->dev, "\tnents %d\n", req->op[4]);
+ dev_dbg(hy_drv_priv->dev, "\tfirst offset %d\n", req->op[5]);
+ dev_dbg(hy_drv_priv->dev, "\tlast len %d\n", req->op[6]);
+ dev_dbg(hy_drv_priv->dev, "\tgrefid %d\n", req->op[7]);
+
+ memcpy(imported->priv, &req->op[9], req->op[8]);
+
+ imported->valid = true;
+ hyper_dmabuf_register_imported(imported);
+
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+ /* generating import event */
+ hyper_dmabuf_import_event(imported->hid);
+#endif
+
+ break;
+
+ case HYPER_DMABUF_OPS_TO_REMOTE:
+ /* notifying dmabuf map/unmap to importer
+ * (probably not needed) for dmabuf synchronization
+ */
+ break;
+
+ default:
+ /* shouldn't get here */
+ break;
+ }
+
+ kfree(req);
+ kfree(proc);
+}
+
+int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
+{
+ struct cmd_process *proc;
+ struct hyper_dmabuf_req *temp_req;
+ struct imported_sgt_info *imported;
+ struct exported_sgt_info *exported;
+ hyper_dmabuf_id_t hid;
+ int ret;
+
+ if (!req) {
+ dev_err(hy_drv_priv->dev, "request is NULL\n");
+ return -EINVAL;
+ }
+
+ hid.id = req->op[0];
+ hid.rng_key[0] = req->op[1];
+ hid.rng_key[1] = req->op[2];
+ hid.rng_key[2] = req->op[3];
+
+ if ((req->cmd < HYPER_DMABUF_EXPORT) ||
+ (req->cmd > HYPER_DMABUF_OPS_TO_SOURCE)) {
+ dev_err(hy_drv_priv->dev, "invalid command\n");
+ return -EINVAL;
+ }
+
+ req->stat = HYPER_DMABUF_REQ_PROCESSED;
+
+ /* HYPER_DMABUF_DESTROY requires immediate
+ * follow up so can't be processed in workqueue
+ */
+ if (req->cmd == HYPER_DMABUF_NOTIFY_UNEXPORT) {
+ /* destroy sg_list for hyper_dmabuf_id on remote side */
+ /* command : HYPER_DMABUF_NOTIFY_UNEXPORT,
+ * op0~3 : hyper_dmabuf_id
+ */
+ dev_dbg(hy_drv_priv->dev,
+ "processing HYPER_DMABUF_NOTIFY_UNEXPORT\n");
+
+ imported = hyper_dmabuf_find_imported(hid);
+
+ if (imported) {
+ /* if anything is still using dma_buf */
+ if (imported->importers) {
+ /* Buffer is still in use, just mark that
+ * it should not be allowed to export its fd
+ * anymore.
+ */
+ imported->valid = false;
+ } else {
+ /* No one is using buffer, remove it from
+ * imported list
+ */
+ hyper_dmabuf_remove_imported(hid);
+ kfree(imported);
+ }
+ } else {
+ req->stat = HYPER_DMABUF_REQ_ERROR;
+ }
+
+ return req->cmd;
+ }
+
+ /* dma buf remote synchronization */
+ if (req->cmd == HYPER_DMABUF_OPS_TO_SOURCE) {
+ /* notifying dmabuf map/unmap to exporter, map will
+ * make the driver to do shadow mapping
+ * or unmapping for synchronization with original
+ * exporter (e.g. i915)
+ *
+ * command : DMABUF_OPS_TO_SOURCE.
+ * op0~3 : hyper_dmabuf_id
+ * op1 : enum hyper_dmabuf_ops {....}
+ */
+ dev_dbg(hy_drv_priv->dev,
+ "%s: HYPER_DMABUF_OPS_TO_SOURCE\n", __func__);
+
+ ret = hyper_dmabuf_remote_sync(hid, req->op[4]);
+
+ if (ret)
+ req->stat = HYPER_DMABUF_REQ_ERROR;
+ else
+ req->stat = HYPER_DMABUF_REQ_PROCESSED;
+
+ return req->cmd;
+ }
+
+ /* synchronous dma_buf_fd export */
+ if (req->cmd == HYPER_DMABUF_EXPORT_FD) {
+ /* find a corresponding SGT for the id */
+ dev_dbg(hy_drv_priv->dev,
+ "HYPER_DMABUF_EXPORT_FD for {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
+
+ exported = hyper_dmabuf_find_exported(hid);
+
+ if (!exported) {
+ dev_err(hy_drv_priv->dev,
+ "buffer {id:%d key:%d %d %d} not found\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1],
+ hid.rng_key[2]);
+
+ req->stat = HYPER_DMABUF_REQ_ERROR;
+ } else if (!exported->valid) {
+ dev_dbg(hy_drv_priv->dev,
+ "Buffer no longer valid {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1],
+ hid.rng_key[2]);
+
+ req->stat = HYPER_DMABUF_REQ_ERROR;
+ } else {
+ dev_dbg(hy_drv_priv->dev,
+ "Buffer still valid {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1],
+ hid.rng_key[2]);
+
+ exported->active++;
+ req->stat = HYPER_DMABUF_REQ_PROCESSED;
+ }
+ return req->cmd;
+ }
+
+ if (req->cmd == HYPER_DMABUF_EXPORT_FD_FAILED) {
+ dev_dbg(hy_drv_priv->dev,
+ "HYPER_DMABUF_EXPORT_FD_FAILED for {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
+
+ exported = hyper_dmabuf_find_exported(hid);
+
+ if (!exported) {
+ dev_err(hy_drv_priv->dev,
+ "buffer {id:%d key:%d %d %d} not found\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1],
+ hid.rng_key[2]);
+
+ req->stat = HYPER_DMABUF_REQ_ERROR;
+ } else {
+ exported->active--;
+ req->stat = HYPER_DMABUF_REQ_PROCESSED;
+ }
+ return req->cmd;
+ }
+
+ dev_dbg(hy_drv_priv->dev,
+ "%s: putting request to workqueue\n", __func__);
+ temp_req = kmalloc(sizeof(*temp_req), GFP_KERNEL);
+
+ if (!temp_req)
+ return -ENOMEM;
+
+ memcpy(temp_req, req, sizeof(*temp_req));
+
+ proc = kcalloc(1, sizeof(struct cmd_process), GFP_KERNEL);
+
+ if (!proc) {
+ kfree(temp_req);
+ return -ENOMEM;
+ }
+
+ proc->rq = temp_req;
+ proc->domid = domid;
+
+ INIT_WORK(&(proc->work), cmd_process_work);
+
+ queue_work(hy_drv_priv->work_queue, &(proc->work));
+
+ return req->cmd;
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h
new file mode 100644
index 0000000..9c8a76b
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_MSG_H__
+#define __HYPER_DMABUF_MSG_H__
+
+#define MAX_NUMBER_OF_OPERANDS 64
+
+struct hyper_dmabuf_req {
+ unsigned int req_id;
+ unsigned int stat;
+ unsigned int cmd;
+ unsigned int op[MAX_NUMBER_OF_OPERANDS];
+};
+
+struct hyper_dmabuf_resp {
+ unsigned int resp_id;
+ unsigned int stat;
+ unsigned int cmd;
+ unsigned int op[MAX_NUMBER_OF_OPERANDS];
+};
+
+enum hyper_dmabuf_command {
+ HYPER_DMABUF_EXPORT = 0x10,
+ HYPER_DMABUF_EXPORT_FD,
+ HYPER_DMABUF_EXPORT_FD_FAILED,
+ HYPER_DMABUF_NOTIFY_UNEXPORT,
+ HYPER_DMABUF_OPS_TO_REMOTE,
+ HYPER_DMABUF_OPS_TO_SOURCE,
+};
+
+enum hyper_dmabuf_ops {
+ HYPER_DMABUF_OPS_ATTACH = 0x1000,
+ HYPER_DMABUF_OPS_DETACH,
+ HYPER_DMABUF_OPS_MAP,
+ HYPER_DMABUF_OPS_UNMAP,
+ HYPER_DMABUF_OPS_RELEASE,
+ HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS,
+ HYPER_DMABUF_OPS_END_CPU_ACCESS,
+ HYPER_DMABUF_OPS_KMAP_ATOMIC,
+ HYPER_DMABUF_OPS_KUNMAP_ATOMIC,
+ HYPER_DMABUF_OPS_KMAP,
+ HYPER_DMABUF_OPS_KUNMAP,
+ HYPER_DMABUF_OPS_MMAP,
+ HYPER_DMABUF_OPS_VMAP,
+ HYPER_DMABUF_OPS_VUNMAP,
+};
+
+enum hyper_dmabuf_req_feedback {
+ HYPER_DMABUF_REQ_PROCESSED = 0x100,
+ HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP,
+ HYPER_DMABUF_REQ_ERROR,
+ HYPER_DMABUF_REQ_NOT_RESPONDED
+};
+
+/* create a request packet with given command and operands */
+void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
+ enum hyper_dmabuf_command command,
+ int *operands);
+
+/* parse incoming request packet (or response) and take
+ * appropriate actions for those
+ */
+int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req);
+
+#endif // __HYPER_DMABUF_MSG_H__
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c
new file mode 100644
index 0000000..e85f619
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_struct.h"
+#include "hyper_dmabuf_ops.h"
+#include "hyper_dmabuf_sgl_proc.h"
+#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_list.h"
+
+#define WAIT_AFTER_SYNC_REQ 0
+#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
+
+static int dmabuf_refcount(struct dma_buf *dma_buf)
+{
+ if ((dma_buf != NULL) && (dma_buf->file != NULL))
+ return file_count(dma_buf->file);
+
+ return -EINVAL;
+}
+
+static int sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops)
+{
+ struct hyper_dmabuf_req *req;
+ struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
+ int op[5];
+ int i;
+ int ret;
+
+ op[0] = hid.id;
+
+ for (i = 0; i < 3; i++)
+ op[i+1] = hid.rng_key[i];
+
+ op[4] = dmabuf_ops;
+
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return -ENOMEM;
+
+ hyper_dmabuf_create_req(req, HYPER_DMABUF_OPS_TO_SOURCE, &op[0]);
+
+ /* send request and wait for a response */
+ ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(hid), req,
+ WAIT_AFTER_SYNC_REQ);
+
+ if (ret < 0) {
+ dev_dbg(hy_drv_priv->dev,
+ "dmabuf sync request failed:%d\n", req->op[4]);
+ }
+
+ kfree(req);
+
+ return ret;
+}
+
+static int hyper_dmabuf_ops_attach(struct dma_buf *dmabuf,
+ struct device *dev,
+ struct dma_buf_attachment *attach)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!attach->dmabuf->priv)
+ return -EINVAL;
+
+ imported = (struct imported_sgt_info *)attach->dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_ATTACH);
+
+ return ret;
+}
+
+static void hyper_dmabuf_ops_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!attach->dmabuf->priv)
+ return;
+
+ imported = (struct imported_sgt_info *)attach->dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_DETACH);
+}
+
+static struct sg_table *hyper_dmabuf_ops_map(
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct sg_table *st;
+ struct imported_sgt_info *imported;
+ struct pages_info *pg_info;
+ int ret;
+
+ if (!attachment->dmabuf->priv)
+ return NULL;
+
+ imported = (struct imported_sgt_info *)attachment->dmabuf->priv;
+
+ /* extract pages from sgt */
+ pg_info = hyper_dmabuf_ext_pgs(imported->sgt);
+
+ if (!pg_info)
+ return NULL;
+
+ /* create a new sg_table with extracted pages */
+ st = hyper_dmabuf_create_sgt(pg_info->pgs, pg_info->frst_ofst,
+ pg_info->last_len, pg_info->nents);
+ if (!st)
+ goto err_free_sg;
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir))
+ goto err_free_sg;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MAP);
+
+ kfree(pg_info->pgs);
+ kfree(pg_info);
+
+ return st;
+
+err_free_sg:
+ if (st) {
+ sg_free_table(st);
+ kfree(st);
+ }
+
+ kfree(pg_info->pgs);
+ kfree(pg_info);
+
+ return NULL;
+}
+
+static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *sg,
+ enum dma_data_direction dir)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!attachment->dmabuf->priv)
+ return;
+
+ imported = (struct imported_sgt_info *)attachment->dmabuf->priv;
+
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+
+ sg_free_table(sg);
+ kfree(sg);
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_UNMAP);
+}
+
+static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
+{
+ struct imported_sgt_info *imported;
+ struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
+ int ret;
+ int finish;
+
+ if (!dma_buf->priv)
+ return;
+
+ imported = (struct imported_sgt_info *)dma_buf->priv;
+
+ if (!dmabuf_refcount(imported->dma_buf))
+ imported->dma_buf = NULL;
+
+ imported->importers--;
+
+ if (imported->importers == 0) {
+ bknd_ops->unmap_shared_pages(&imported->refs_info,
+ imported->nents);
+
+ if (imported->sgt) {
+ sg_free_table(imported->sgt);
+ kfree(imported->sgt);
+ imported->sgt = NULL;
+ }
+ }
+
+ finish = imported && !imported->valid &&
+ !imported->importers;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_RELEASE);
+
+ /*
+ * Check if buffer is still valid and if not remove it
+ * from imported list. That has to be done after sending
+ * sync request
+ */
+ if (finish) {
+ hyper_dmabuf_remove_imported(imported->hid);
+ kfree(imported);
+ }
+}
+
+static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction dir)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!dmabuf->priv)
+ return -EINVAL;
+
+ imported = (struct imported_sgt_info *)dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
+
+ return ret;
+}
+
+static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction dir)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!dmabuf->priv)
+ return -EINVAL;
+
+ imported = (struct imported_sgt_info *)dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_END_CPU_ACCESS);
+
+ return 0;
+}
+
+static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf,
+ unsigned long pgnum)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!dmabuf->priv)
+ return NULL;
+
+ imported = (struct imported_sgt_info *)dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP_ATOMIC);
+
+ /* TODO: NULL for now. Need to return the addr of mapped region */
+ return NULL;
+}
+
+static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf,
+ unsigned long pgnum, void *vaddr)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!dmabuf->priv)
+ return;
+
+ imported = (struct imported_sgt_info *)dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
+}
+
+static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!dmabuf->priv)
+ return NULL;
+
+ imported = (struct imported_sgt_info *)dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP);
+
+ /* for now NULL.. need to return the address of mapped region */
+ return NULL;
+}
+
+static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum,
+ void *vaddr)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!dmabuf->priv)
+ return;
+
+ imported = (struct imported_sgt_info *)dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP);
+}
+
+static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf,
+ struct vm_area_struct *vma)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!dmabuf->priv)
+ return -EINVAL;
+
+ imported = (struct imported_sgt_info *)dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MMAP);
+
+ return ret;
+}
+
+static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!dmabuf->priv)
+ return NULL;
+
+ imported = (struct imported_sgt_info *)dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_VMAP);
+
+ return NULL;
+}
+
+static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ struct imported_sgt_info *imported;
+ int ret;
+
+ if (!dmabuf->priv)
+ return;
+
+ imported = (struct imported_sgt_info *)dmabuf->priv;
+
+ ret = sync_request(imported->hid, HYPER_DMABUF_OPS_VUNMAP);
+}
+
+static const struct dma_buf_ops hyper_dmabuf_ops = {
+ .attach = hyper_dmabuf_ops_attach,
+ .detach = hyper_dmabuf_ops_detach,
+ .map_dma_buf = hyper_dmabuf_ops_map,
+ .unmap_dma_buf = hyper_dmabuf_ops_unmap,
+ .release = hyper_dmabuf_ops_release,
+ .begin_cpu_access = (void *)hyper_dmabuf_ops_begin_cpu_access,
+ .end_cpu_access = (void *)hyper_dmabuf_ops_end_cpu_access,
+ .map_atomic = hyper_dmabuf_ops_kmap_atomic,
+ .unmap_atomic = hyper_dmabuf_ops_kunmap_atomic,
+ .map = hyper_dmabuf_ops_kmap,
+ .unmap = hyper_dmabuf_ops_kunmap,
+ .mmap = hyper_dmabuf_ops_mmap,
+ .vmap = hyper_dmabuf_ops_vmap,
+ .vunmap = hyper_dmabuf_ops_vunmap,
+};
+
+/* exporting dmabuf as fd */
+int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags)
+{
+ int fd = -1;
+
+ /* call hyper_dmabuf_export_dmabuf and create
+ * and bind a handle for it then release
+ */
+ hyper_dmabuf_export_dma_buf(imported);
+
+ if (imported->dma_buf)
+ fd = dma_buf_fd(imported->dma_buf, flags);
+
+ return fd;
+}
+
+void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &hyper_dmabuf_ops;
+
+ /* multiple of PAGE_SIZE, not considering offset */
+ exp_info.size = imported->sgt->nents * PAGE_SIZE;
+ exp_info.flags = /* not sure about flag */ 0;
+ exp_info.priv = imported;
+
+ imported->dma_buf = dma_buf_export(&exp_info);
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h
new file mode 100644
index 0000000..c5505a4
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_OPS_H__
+#define __HYPER_DMABUF_OPS_H__
+
+int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags);
+
+void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported);
+
+#endif /* __HYPER_DMABUF_IMP_H__ */
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c
new file mode 100644
index 0000000..1f2f56b
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/uaccess.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_struct.h"
+#include "hyper_dmabuf_id.h"
+
+#define HYPER_DMABUF_SIZE(nents, first_offset, last_len) \
+ ((nents)*PAGE_SIZE - (first_offset) - PAGE_SIZE + (last_len))
+
+int hyper_dmabuf_query_exported(struct exported_sgt_info *exported,
+ int query, unsigned long *info)
+{
+ switch (query) {
+ case HYPER_DMABUF_QUERY_TYPE:
+ *info = EXPORTED;
+ break;
+
+ /* exporting domain of this specific dmabuf*/
+ case HYPER_DMABUF_QUERY_EXPORTER:
+ *info = HYPER_DMABUF_DOM_ID(exported->hid);
+ break;
+
+ /* importing domain of this specific dmabuf */
+ case HYPER_DMABUF_QUERY_IMPORTER:
+ *info = exported->rdomid;
+ break;
+
+ /* size of dmabuf in byte */
+ case HYPER_DMABUF_QUERY_SIZE:
+ *info = exported->dma_buf->size;
+ break;
+
+ /* whether the buffer is used by importer */
+ case HYPER_DMABUF_QUERY_BUSY:
+ *info = (exported->active > 0);
+ break;
+
+ /* whether the buffer is unexported */
+ case HYPER_DMABUF_QUERY_UNEXPORTED:
+ *info = !exported->valid;
+ break;
+
+ /* whether the buffer is scheduled to be unexported */
+ case HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED:
+ *info = !exported->unexport_sched;
+ break;
+
+ /* size of private info attached to buffer */
+ case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
+ *info = exported->sz_priv;
+ break;
+
+ /* copy private info attached to buffer */
+ case HYPER_DMABUF_QUERY_PRIV_INFO:
+ if (exported->sz_priv > 0) {
+ int n;
+
+ n = copy_to_user((void __user *) *info,
+ exported->priv,
+ exported->sz_priv);
+ if (n != 0)
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+int hyper_dmabuf_query_imported(struct imported_sgt_info *imported,
+ int query, unsigned long *info)
+{
+ switch (query) {
+ case HYPER_DMABUF_QUERY_TYPE:
+ *info = IMPORTED;
+ break;
+
+ /* exporting domain of this specific dmabuf*/
+ case HYPER_DMABUF_QUERY_EXPORTER:
+ *info = HYPER_DMABUF_DOM_ID(imported->hid);
+ break;
+
+ /* importing domain of this specific dmabuf */
+ case HYPER_DMABUF_QUERY_IMPORTER:
+ *info = hy_drv_priv->domid;
+ break;
+
+ /* size of dmabuf in byte */
+ case HYPER_DMABUF_QUERY_SIZE:
+ if (imported->dma_buf) {
+ /* if local dma_buf is created (if it's
+ * ever mapped), retrieve it directly
+ * from struct dma_buf *
+ */
+ *info = imported->dma_buf->size;
+ } else {
+ /* calcuate it from given nents, frst_ofst
+ * and last_len
+ */
+ *info = HYPER_DMABUF_SIZE(imported->nents,
+ imported->frst_ofst,
+ imported->last_len);
+ }
+ break;
+
+ /* whether the buffer is used or not */
+ case HYPER_DMABUF_QUERY_BUSY:
+ /* checks if it's used by importer */
+ *info = (imported->importers > 0);
+ break;
+
+ /* whether the buffer is unexported */
+ case HYPER_DMABUF_QUERY_UNEXPORTED:
+ *info = !imported->valid;
+ break;
+
+ /* size of private info attached to buffer */
+ case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
+ *info = imported->sz_priv;
+ break;
+
+ /* copy private info attached to buffer */
+ case HYPER_DMABUF_QUERY_PRIV_INFO:
+ if (imported->sz_priv > 0) {
+ int n;
+
+ n = copy_to_user((void __user *)*info,
+ imported->priv,
+ imported->sz_priv);
+ if (n != 0)
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h
new file mode 100644
index 0000000..65ae738
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h
@@ -0,0 +1,10 @@
+#ifndef __HYPER_DMABUF_QUERY_H__
+#define __HYPER_DMABUF_QUERY_H__
+
+int hyper_dmabuf_query_imported(struct imported_sgt_info *imported,
+ int query, unsigned long *info);
+
+int hyper_dmabuf_query_exported(struct exported_sgt_info *exported,
+ int query, unsigned long *info);
+
+#endif // __HYPER_DMABUF_QUERY_H__
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c
new file mode 100644
index 0000000..a82fd7b
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_struct.h"
+#include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_sgl_proc.h"
+
+/* Whenever importer does dma operations from remote domain,
+ * a notification is sent to the exporter so that exporter
+ * issues equivalent dma operation on the original dma buf
+ * for indirect synchronization via shadow operations.
+ *
+ * All ptrs and references (e.g struct sg_table*,
+ * struct dma_buf_attachment) created via these operations on
+ * exporter's side are kept in stack (implemented as circular
+ * linked-lists) separately so that those can be re-referenced
+ * later when unmapping operations are invoked to free those.
+ *
+ * The very first element on the bottom of each stack holds
+ * is what is created when initial exporting is issued so it
+ * should not be modified or released by this fuction.
+ */
+int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
+{
+ struct exported_sgt_info *exported;
+ struct sgt_list *sgtl;
+ struct attachment_list *attachl;
+ struct kmap_vaddr_list *va_kmapl;
+ struct vmap_vaddr_list *va_vmapl;
+ int ret;
+
+ /* find a coresponding SGT for the id */
+ exported = hyper_dmabuf_find_exported(hid);
+
+ if (!exported) {
+ dev_err(hy_drv_priv->dev,
+ "dmabuf remote sync::can't find exported list\n");
+ return -ENOENT;
+ }
+
+ switch (ops) {
+ case HYPER_DMABUF_OPS_ATTACH:
+ attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL);
+
+ if (!attachl)
+ return -ENOMEM;
+
+ attachl->attach = dma_buf_attach(exported->dma_buf,
+ hy_drv_priv->dev);
+
+ if (!attachl->attach) {
+ kfree(attachl);
+ dev_err(hy_drv_priv->dev,
+ "remote sync::HYPER_DMABUF_OPS_ATTACH\n");
+ return -ENOMEM;
+ }
+
+ list_add(&attachl->list, &exported->active_attached->list);
+ break;
+
+ case HYPER_DMABUF_OPS_DETACH:
+ if (list_empty(&exported->active_attached->list)) {
+ dev_err(hy_drv_priv->dev,
+ "remote sync::HYPER_DMABUF_OPS_DETACH\n");
+ dev_err(hy_drv_priv->dev,
+ "no more dmabuf attachment left to be detached\n");
+ return -EFAULT;
+ }
+
+ attachl = list_first_entry(&exported->active_attached->list,
+ struct attachment_list, list);
+
+ dma_buf_detach(exported->dma_buf, attachl->attach);
+ list_del(&attachl->list);
+ kfree(attachl);
+ break;
+
+ case HYPER_DMABUF_OPS_MAP:
+ if (list_empty(&exported->active_attached->list)) {
+ dev_err(hy_drv_priv->dev,
+ "remote sync::HYPER_DMABUF_OPS_MAP\n");
+ dev_err(hy_drv_priv->dev,
+ "no more dmabuf attachment left to be mapped\n");
+ return -EFAULT;
+ }
+
+ attachl = list_first_entry(&exported->active_attached->list,
+ struct attachment_list, list);
+
+ sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL);
+
+ if (!sgtl)
+ return -ENOMEM;
+
+ sgtl->sgt = dma_buf_map_attachment(attachl->attach,
+ DMA_BIDIRECTIONAL);
+ if (!sgtl->sgt) {
+ kfree(sgtl);
+ dev_err(hy_drv_priv->dev,
+ "remote sync::HYPER_DMABUF_OPS_MAP\n");
+ return -ENOMEM;
+ }
+ list_add(&sgtl->list, &exported->active_sgts->list);
+ break;
+
+ case HYPER_DMABUF_OPS_UNMAP:
+ if (list_empty(&exported->active_sgts->list) ||
+ list_empty(&exported->active_attached->list)) {
+ dev_err(hy_drv_priv->dev,
+ "remote sync::HYPER_DMABUF_OPS_UNMAP\n");
+ dev_err(hy_drv_priv->dev,
+ "no SGT or attach left to be unmapped\n");
+ return -EFAULT;
+ }
+
+ attachl = list_first_entry(&exported->active_attached->list,
+ struct attachment_list, list);
+ sgtl = list_first_entry(&exported->active_sgts->list,
+ struct sgt_list, list);
+
+ dma_buf_unmap_attachment(attachl->attach, sgtl->sgt,
+ DMA_BIDIRECTIONAL);
+ list_del(&sgtl->list);
+ kfree(sgtl);
+ break;
+
+ case HYPER_DMABUF_OPS_RELEASE:
+ dev_dbg(hy_drv_priv->dev,
+ "id:%d key:%d %d %d} released, ref left: %d\n",
+ exported->hid.id, exported->hid.rng_key[0],
+ exported->hid.rng_key[1], exported->hid.rng_key[2],
+ exported->active - 1);
+
+ exported->active--;
+
+ /* If there are still importers just break, if no then
+ * continue with final cleanup
+ */
+ if (exported->active)
+ break;
+
+ /* Importer just released buffer fd, check if there is
+ * any other importer still using it.
+ * If not and buffer was unexported, clean up shared
+ * data and remove that buffer.
+ */
+ dev_dbg(hy_drv_priv->dev,
+ "Buffer {id:%d key:%d %d %d} final released\n",
+ exported->hid.id, exported->hid.rng_key[0],
+ exported->hid.rng_key[1], exported->hid.rng_key[2]);
+
+ if (!exported->valid && !exported->active &&
+ !exported->unexport_sched) {
+ hyper_dmabuf_cleanup_sgt_info(exported, false);
+ hyper_dmabuf_remove_exported(hid);
+ kfree(exported);
+ /* store hyper_dmabuf_id in the list for reuse */
+ hyper_dmabuf_store_hid(hid);
+ }
+
+ break;
+
+ case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS:
+ ret = dma_buf_begin_cpu_access(exported->dma_buf,
+ DMA_BIDIRECTIONAL);
+ if (ret) {
+ dev_err(hy_drv_priv->dev,
+ "HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n");
+ return ret;
+ }
+ break;
+
+ case HYPER_DMABUF_OPS_END_CPU_ACCESS:
+ ret = dma_buf_end_cpu_access(exported->dma_buf,
+ DMA_BIDIRECTIONAL);
+ if (ret) {
+ dev_err(hy_drv_priv->dev,
+ "HYPER_DMABUF_OPS_END_CPU_ACCESS\n");
+ return ret;
+ }
+ break;
+
+ case HYPER_DMABUF_OPS_KMAP_ATOMIC:
+ case HYPER_DMABUF_OPS_KMAP:
+ va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL);
+ if (!va_kmapl)
+ return -ENOMEM;
+
+ /* dummy kmapping of 1 page */
+ if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC)
+ va_kmapl->vaddr = dma_buf_kmap_atomic(
+ exported->dma_buf, 1);
+ else
+ va_kmapl->vaddr = dma_buf_kmap(
+ exported->dma_buf, 1);
+
+ if (!va_kmapl->vaddr) {
+ kfree(va_kmapl);
+ dev_err(hy_drv_priv->dev,
+ "HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
+ return -ENOMEM;
+ }
+ list_add(&va_kmapl->list, &exported->va_kmapped->list);
+ break;
+
+ case HYPER_DMABUF_OPS_KUNMAP_ATOMIC:
+ case HYPER_DMABUF_OPS_KUNMAP:
+ if (list_empty(&exported->va_kmapped->list)) {
+ dev_err(hy_drv_priv->dev,
+ "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
+ dev_err(hy_drv_priv->dev,
+ "no more dmabuf VA to be freed\n");
+ return -EFAULT;
+ }
+
+ va_kmapl = list_first_entry(&exported->va_kmapped->list,
+ struct kmap_vaddr_list, list);
+ if (!va_kmapl->vaddr) {
+ dev_err(hy_drv_priv->dev,
+ "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
+ return PTR_ERR(va_kmapl->vaddr);
+ }
+
+ /* unmapping 1 page */
+ if (ops == HYPER_DMABUF_OPS_KUNMAP_ATOMIC)
+ dma_buf_kunmap_atomic(exported->dma_buf,
+ 1, va_kmapl->vaddr);
+ else
+ dma_buf_kunmap(exported->dma_buf,
+ 1, va_kmapl->vaddr);
+
+ list_del(&va_kmapl->list);
+ kfree(va_kmapl);
+ break;
+
+ case HYPER_DMABUF_OPS_MMAP:
+ /* currently not supported: looking for a way to create
+ * a dummy vma
+ */
+ dev_warn(hy_drv_priv->dev,
+ "remote sync::sychronized mmap is not supported\n");
+ break;
+
+ case HYPER_DMABUF_OPS_VMAP:
+ va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL);
+
+ if (!va_vmapl)
+ return -ENOMEM;
+
+ /* dummy vmapping */
+ va_vmapl->vaddr = dma_buf_vmap(exported->dma_buf);
+
+ if (!va_vmapl->vaddr) {
+ kfree(va_vmapl);
+ dev_err(hy_drv_priv->dev,
+ "remote sync::HYPER_DMABUF_OPS_VMAP\n");
+ return -ENOMEM;
+ }
+ list_add(&va_vmapl->list, &exported->va_vmapped->list);
+ break;
+
+ case HYPER_DMABUF_OPS_VUNMAP:
+ if (list_empty(&exported->va_vmapped->list)) {
+ dev_err(hy_drv_priv->dev,
+ "remote sync::HYPER_DMABUF_OPS_VUNMAP\n");
+ dev_err(hy_drv_priv->dev,
+ "no more dmabuf VA to be freed\n");
+ return -EFAULT;
+ }
+ va_vmapl = list_first_entry(&exported->va_vmapped->list,
+ struct vmap_vaddr_list, list);
+ if (!va_vmapl || va_vmapl->vaddr == NULL) {
+ dev_err(hy_drv_priv->dev,
+ "remote sync::HYPER_DMABUF_OPS_VUNMAP\n");
+ return -EFAULT;
+ }
+
+ dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr);
+
+ list_del(&va_vmapl->list);
+ kfree(va_vmapl);
+ break;
+
+ default:
+ /* program should not get here */
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h
new file mode 100644
index 0000000..36638928
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_REMOTE_SYNC_H__
+#define __HYPER_DMABUF_REMOTE_SYNC_H__
+
+int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops);
+
+#endif // __HYPER_DMABUF_REMOTE_SYNC_H__
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
new file mode 100644
index 0000000..d15eb17
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_struct.h"
+#include "hyper_dmabuf_sgl_proc.h"
+
+#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
+
+/* return total number of pages referenced by a sgt
+ * for pre-calculation of # of pages behind a given sgt
+ */
+static int get_num_pgs(struct sg_table *sgt)
+{
+ struct scatterlist *sgl;
+ int length, i;
+ /* at least one page */
+ int num_pages = 1;
+
+ sgl = sgt->sgl;
+
+ length = sgl->length - PAGE_SIZE + sgl->offset;
+
+ /* round-up */
+ num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE);
+
+ for (i = 1; i < sgt->nents; i++) {
+ sgl = sg_next(sgl);
+
+ /* round-up */
+ num_pages += ((sgl->length + PAGE_SIZE - 1) /
+ PAGE_SIZE); /* round-up */
+ }
+
+ return num_pages;
+}
+
+/* extract pages directly from struct sg_table */
+struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt)
+{
+ struct pages_info *pg_info;
+ int i, j, k;
+ int length;
+ struct scatterlist *sgl;
+
+ pg_info = kmalloc(sizeof(*pg_info), GFP_KERNEL);
+ if (!pg_info)
+ return NULL;
+
+ pg_info->pgs = kmalloc_array(get_num_pgs(sgt),
+ sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!pg_info->pgs) {
+ kfree(pg_info);
+ return NULL;
+ }
+
+ sgl = sgt->sgl;
+
+ pg_info->nents = 1;
+ pg_info->frst_ofst = sgl->offset;
+ pg_info->pgs[0] = sg_page(sgl);
+ length = sgl->length - PAGE_SIZE + sgl->offset;
+ i = 1;
+
+ while (length > 0) {
+ pg_info->pgs[i] = nth_page(sg_page(sgl), i);
+ length -= PAGE_SIZE;
+ pg_info->nents++;
+ i++;
+ }
+
+ for (j = 1; j < sgt->nents; j++) {
+ sgl = sg_next(sgl);
+ pg_info->pgs[i++] = sg_page(sgl);
+ length = sgl->length - PAGE_SIZE;
+ pg_info->nents++;
+ k = 1;
+
+ while (length > 0) {
+ pg_info->pgs[i++] = nth_page(sg_page(sgl), k++);
+ length -= PAGE_SIZE;
+ pg_info->nents++;
+ }
+ }
+
+ /*
+ * lenght at that point will be 0 or negative,
+ * so to calculate last page size just add it to PAGE_SIZE
+ */
+ pg_info->last_len = PAGE_SIZE + length;
+
+ return pg_info;
+}
+
+/* create sg_table with given pages and other parameters */
+struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs,
+ int frst_ofst, int last_len,
+ int nents)
+{
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ int i, ret;
+
+ sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+ if (ret) {
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+
+ return NULL;
+ }
+
+ sgl = sgt->sgl;
+
+ sg_set_page(sgl, pgs[0], PAGE_SIZE-frst_ofst, frst_ofst);
+
+ for (i = 1; i < nents-1; i++) {
+ sgl = sg_next(sgl);
+ sg_set_page(sgl, pgs[i], PAGE_SIZE, 0);
+ }
+
+ if (nents > 1) /* more than one page */ {
+ sgl = sg_next(sgl);
+ sg_set_page(sgl, pgs[i], last_len, 0);
+ }
+
+ return sgt;
+}
+
+int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported,
+ int force)
+{
+ struct sgt_list *sgtl;
+ struct attachment_list *attachl;
+ struct kmap_vaddr_list *va_kmapl;
+ struct vmap_vaddr_list *va_vmapl;
+ struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
+
+ if (!exported) {
+ dev_err(hy_drv_priv->dev, "invalid hyper_dmabuf_id\n");
+ return -EINVAL;
+ }
+
+ /* if force != 1, sgt_info can be released only if
+ * there's no activity on exported dma-buf on importer
+ * side.
+ */
+ if (!force &&
+ exported->active) {
+ dev_warn(hy_drv_priv->dev,
+ "dma-buf is used by importer\n");
+
+ return -EPERM;
+ }
+
+ /* force == 1 is not recommended */
+ while (!list_empty(&exported->va_kmapped->list)) {
+ va_kmapl = list_first_entry(&exported->va_kmapped->list,
+ struct kmap_vaddr_list, list);
+
+ dma_buf_kunmap(exported->dma_buf, 1, va_kmapl->vaddr);
+ list_del(&va_kmapl->list);
+ kfree(va_kmapl);
+ }
+
+ while (!list_empty(&exported->va_vmapped->list)) {
+ va_vmapl = list_first_entry(&exported->va_vmapped->list,
+ struct vmap_vaddr_list, list);
+
+ dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr);
+ list_del(&va_vmapl->list);
+ kfree(va_vmapl);
+ }
+
+ while (!list_empty(&exported->active_sgts->list)) {
+ attachl = list_first_entry(&exported->active_attached->list,
+ struct attachment_list, list);
+
+ sgtl = list_first_entry(&exported->active_sgts->list,
+ struct sgt_list, list);
+
+ dma_buf_unmap_attachment(attachl->attach, sgtl->sgt,
+ DMA_BIDIRECTIONAL);
+ list_del(&sgtl->list);
+ kfree(sgtl);
+ }
+
+ while (!list_empty(&exported->active_sgts->list)) {
+ attachl = list_first_entry(&exported->active_attached->list,
+ struct attachment_list, list);
+
+ dma_buf_detach(exported->dma_buf, attachl->attach);
+ list_del(&attachl->list);
+ kfree(attachl);
+ }
+
+ /* Start cleanup of buffer in reverse order to exporting */
+ bknd_ops->unshare_pages(&exported->refs_info, exported->nents);
+
+ /* unmap dma-buf */
+ dma_buf_unmap_attachment(exported->active_attached->attach,
+ exported->active_sgts->sgt,
+ DMA_BIDIRECTIONAL);
+
+ /* detatch dma-buf */
+ dma_buf_detach(exported->dma_buf, exported->active_attached->attach);
+
+ /* close connection to dma-buf completely */
+ dma_buf_put(exported->dma_buf);
+ exported->dma_buf = NULL;
+
+ kfree(exported->active_sgts);
+ kfree(exported->active_attached);
+ kfree(exported->va_kmapped);
+ kfree(exported->va_vmapped);
+ kfree(exported->priv);
+
+ return 0;
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
new file mode 100644
index 0000000..869d982
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_IMP_H__
+#define __HYPER_DMABUF_IMP_H__
+
+/* extract pages directly from struct sg_table */
+struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt);
+
+/* create sg_table with given pages and other parameters */
+struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs,
+ int frst_ofst, int last_len,
+ int nents);
+
+int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported,
+ int force);
+
+void hyper_dmabuf_free_sgt(struct sg_table *sgt);
+
+#endif /* __HYPER_DMABUF_IMP_H__ */
diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h
new file mode 100644
index 0000000..a11f804
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_STRUCT_H__
+#define __HYPER_DMABUF_STRUCT_H__
+
+/* stack of mapped sgts */
+struct sgt_list {
+ struct sg_table *sgt;
+ struct list_head list;
+};
+
+/* stack of attachments */
+struct attachment_list {
+ struct dma_buf_attachment *attach;
+ struct list_head list;
+};
+
+/* stack of vaddr mapped via kmap */
+struct kmap_vaddr_list {
+ void *vaddr;
+ struct list_head list;
+};
+
+/* stack of vaddr mapped via vmap */
+struct vmap_vaddr_list {
+ void *vaddr;
+ struct list_head list;
+};
+
+/* Exporter builds pages_info before sharing pages */
+struct pages_info {
+ int frst_ofst;
+ int last_len;
+ int nents;
+ struct page **pgs;
+};
+
+
+/* Exporter stores references to sgt in a hash table
+ * Exporter keeps these references for synchronization
+ * and tracking purposes
+ */
+struct exported_sgt_info {
+ hyper_dmabuf_id_t hid;
+
+ /* VM ID of importer */
+ int rdomid;
+
+ struct dma_buf *dma_buf;
+ int nents;
+
+ /* list for tracking activities on dma_buf */
+ struct sgt_list *active_sgts;
+ struct attachment_list *active_attached;
+ struct kmap_vaddr_list *va_kmapped;
+ struct vmap_vaddr_list *va_vmapped;
+
+ /* set to 0 when unexported. Importer doesn't
+ * do a new mapping of buffer if valid == false
+ */
+ bool valid;
+
+ /* active == true if the buffer is actively used
+ * (mapped) by importer
+ */
+ int active;
+
+ /* hypervisor specific reference data for shared pages */
+ void *refs_info;
+
+ struct delayed_work unexport;
+ bool unexport_sched;
+
+ /* list for file pointers associated with all user space
+ * application that have exported this same buffer to
+ * another VM. This needs to be tracked to know whether
+ * the buffer can be completely freed.
+ */
+ struct file *filp;
+
+ /* size of private */
+ size_t sz_priv;
+
+ /* private data associated with the exported buffer */
+ char *priv;
+};
+
+/* imported_sgt_info contains information about imported DMA_BUF
+ * this info is kept in IMPORT list and asynchorously retrieved and
+ * used to map DMA_BUF on importer VM's side upon export fd ioctl
+ * request from user-space
+ */
+
+struct imported_sgt_info {
+ hyper_dmabuf_id_t hid; /* unique id for shared dmabuf imported */
+
+ /* hypervisor-specific handle to pages */
+ int ref_handle;
+
+ /* offset and size info of DMA_BUF */
+ int frst_ofst;
+ int last_len;
+ int nents;
+
+ struct dma_buf *dma_buf;
+ struct sg_table *sgt;
+
+ void *refs_info;
+ bool valid;
+ int importers;
+
+ /* size of private */
+ size_t sz_priv;
+
+ /* private data associated with the exported buffer */
+ char *priv;
+};
+
+#endif /* __HYPER_DMABUF_STRUCT_H__ */
diff --git a/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm.c b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm.c
new file mode 100644
index 0000000..4a073ce
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm.c
@@ -0,0 +1,941 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/xenbus.h>
+#include <asm/xen/page.h>
+#include "hyper_dmabuf_xen_comm.h"
+#include "hyper_dmabuf_xen_comm_list.h"
+#include "../hyper_dmabuf_drv.h"
+
+static int export_req_id;
+
+struct hyper_dmabuf_req req_pending = {0};
+
+static void xen_get_domid_delayed(struct work_struct *unused);
+static void xen_init_comm_env_delayed(struct work_struct *unused);
+
+static DECLARE_DELAYED_WORK(get_vm_id_work, xen_get_domid_delayed);
+static DECLARE_DELAYED_WORK(xen_init_comm_env_work, xen_init_comm_env_delayed);
+
+/* Creates entry in xen store that will keep details of all
+ * exporter rings created by this domain
+ */
+static int xen_comm_setup_data_dir(void)
+{
+ char buf[255];
+
+ sprintf(buf, "/local/domain/%d/data/hyper_dmabuf",
+ hy_drv_priv->domid);
+
+ return xenbus_mkdir(XBT_NIL, buf, "");
+}
+
+/* Removes entry from xenstore with exporter ring details.
+ * Other domains that has connected to any of exporter rings
+ * created by this domain, will be notified about removal of
+ * this entry and will treat that as signal to cleanup importer
+ * rings created for this domain
+ */
+static int xen_comm_destroy_data_dir(void)
+{
+ char buf[255];
+
+ sprintf(buf, "/local/domain/%d/data/hyper_dmabuf",
+ hy_drv_priv->domid);
+
+ return xenbus_rm(XBT_NIL, buf, "");
+}
+
+/* Adds xenstore entries with details of exporter ring created
+ * for given remote domain. It requires special daemon running
+ * in dom0 to make sure that given remote domain will have right
+ * permissions to access that data.
+ */
+static int xen_comm_expose_ring_details(int domid, int rdomid,
+ int gref, int port)
+{
+ char buf[255];
+ int ret;
+
+ sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d",
+ domid, rdomid);
+
+ ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", gref);
+
+ if (ret) {
+ dev_err(hy_drv_priv->dev,
+ "Failed to write xenbus entry %s: %d\n",
+ buf, ret);
+
+ return ret;
+ }
+
+ ret = xenbus_printf(XBT_NIL, buf, "port", "%d", port);
+
+ if (ret) {
+ dev_err(hy_drv_priv->dev,
+ "Failed to write xenbus entry %s: %d\n",
+ buf, ret);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Queries details of ring exposed by remote domain.
+ */
+static int xen_comm_get_ring_details(int domid, int rdomid,
+ int *grefid, int *port)
+{
+ char buf[255];
+ int ret;
+
+ sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d",
+ rdomid, domid);
+
+ ret = xenbus_scanf(XBT_NIL, buf, "grefid", "%d", grefid);
+
+ if (ret <= 0) {
+ dev_err(hy_drv_priv->dev,
+ "Failed to read xenbus entry %s: %d\n",
+ buf, ret);
+
+ return ret;
+ }
+
+ ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", port);
+
+ if (ret <= 0) {
+ dev_err(hy_drv_priv->dev,
+ "Failed to read xenbus entry %s: %d\n",
+ buf, ret);
+
+ return ret;
+ }
+
+ return (ret <= 0 ? 1 : 0);
+}
+
+static void xen_get_domid_delayed(struct work_struct *unused)
+{
+ struct xenbus_transaction xbt;
+ int domid, ret;
+
+ /* scheduling another if driver is still running
+ * and xenstore has not been initialized
+ */
+ if (likely(xenstored_ready == 0)) {
+ dev_dbg(hy_drv_priv->dev,
+ "Xenstore is not ready yet. Will retry in 500ms\n");
+ schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500));
+ } else {
+ xenbus_transaction_start(&xbt);
+
+ ret = xenbus_scanf(xbt, "domid", "", "%d", &domid);
+
+ if (ret <= 0)
+ domid = -1;
+
+ xenbus_transaction_end(xbt, 0);
+
+ /* try again since -1 is an invalid id for domain
+ * (but only if driver is still running)
+ */
+ if (unlikely(domid == -1)) {
+ dev_dbg(hy_drv_priv->dev,
+ "domid==-1 is invalid. Will retry it in 500ms\n");
+ schedule_delayed_work(&get_vm_id_work,
+ msecs_to_jiffies(500));
+ } else {
+ dev_info(hy_drv_priv->dev,
+ "Successfully retrieved domid from Xenstore:%d\n",
+ domid);
+ hy_drv_priv->domid = domid;
+ }
+ }
+}
+
+int xen_be_get_domid(void)
+{
+ struct xenbus_transaction xbt;
+ int domid;
+
+ if (unlikely(xenstored_ready == 0)) {
+ xen_get_domid_delayed(NULL);
+ return -1;
+ }
+
+ xenbus_transaction_start(&xbt);
+
+ if (!xenbus_scanf(xbt, "domid", "", "%d", &domid))
+ domid = -1;
+
+ xenbus_transaction_end(xbt, 0);
+
+ return domid;
+}
+
+static int xen_comm_next_req_id(void)
+{
+ export_req_id++;
+ return export_req_id;
+}
+
+/* For now cache latast rings as global variables TODO: keep them in list*/
+static irqreturn_t front_ring_isr(int irq, void *info);
+static irqreturn_t back_ring_isr(int irq, void *info);
+
+/* Callback function that will be called on any change of xenbus path
+ * being watched. Used for detecting creation/destruction of remote
+ * domain exporter ring.
+ *
+ * When remote domain's exporter ring will be detected, importer ring
+ * on this domain will be created.
+ *
+ * When remote domain's exporter ring destruction will be detected it
+ * will celanup this domain importer ring.
+ *
+ * Destruction can be caused by unloading module by remote domain or
+ * it's crash/force shutdown.
+ */
+static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch,
+ const char *path, const char *token)
+{
+ int rdom, ret;
+ uint32_t grefid, port;
+ struct xen_comm_rx_ring_info *ring_info;
+
+ /* Check which domain has changed its exporter rings */
+ ret = sscanf(watch->node, "/local/domain/%d/", &rdom);
+ if (ret <= 0)
+ return;
+
+ /* Check if we have importer ring for given remote domain already
+ * created
+ */
+ ring_info = xen_comm_find_rx_ring(rdom);
+
+ /* Try to query remote domain exporter ring details - if
+ * that will fail and we have importer ring that means remote
+ * domains has cleanup its exporter ring, so our importer ring
+ * is no longer useful.
+ *
+ * If querying details will succeed and we don't have importer ring,
+ * it means that remote domain has setup it for us and we should
+ * connect to it.
+ */
+
+ ret = xen_comm_get_ring_details(xen_be_get_domid(),
+ rdom, &grefid, &port);
+
+ if (ring_info && ret != 0) {
+ dev_info(hy_drv_priv->dev,
+ "Remote exporter closed, cleaninup importer\n");
+ xen_be_cleanup_rx_rbuf(rdom);
+ } else if (!ring_info && ret == 0) {
+ dev_info(hy_drv_priv->dev,
+ "Registering importer\n");
+ xen_be_init_rx_rbuf(rdom);
+ }
+}
+
+/* exporter needs to generated info for page sharing */
+int xen_be_init_tx_rbuf(int domid)
+{
+ struct xen_comm_tx_ring_info *ring_info;
+ struct xen_comm_sring *sring;
+ struct evtchn_alloc_unbound alloc_unbound;
+ struct evtchn_close close;
+
+ void *shared_ring;
+ int ret;
+
+ /* check if there's any existing tx channel in the table */
+ ring_info = xen_comm_find_tx_ring(domid);
+
+ if (ring_info) {
+ dev_info(hy_drv_priv->dev,
+ "tx ring ch to domid = %d already exist\ngref = %d, port = %d\n",
+ ring_info->rdomain, ring_info->gref_ring, ring_info->port);
+ return 0;
+ }
+
+ ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
+
+ if (!ring_info)
+ return -ENOMEM;
+
+ /* from exporter to importer */
+ shared_ring = (void *)__get_free_pages(GFP_KERNEL, 1);
+ if (shared_ring == 0) {
+ kfree(ring_info);
+ return -ENOMEM;
+ }
+
+ sring = (struct xen_comm_sring *) shared_ring;
+
+ SHARED_RING_INIT(sring);
+
+ FRONT_RING_INIT(&(ring_info->ring_front), sring, PAGE_SIZE);
+
+ ring_info->gref_ring = gnttab_grant_foreign_access(domid,
+ virt_to_mfn(shared_ring),
+ 0);
+ if (ring_info->gref_ring < 0) {
+ /* fail to get gref */
+ kfree(ring_info);
+ return -EFAULT;
+ }
+
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = domid;
+ ret = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
+ if (ret) {
+ dev_err(hy_drv_priv->dev,
+ "Cannot allocate event channel\n");
+ kfree(ring_info);
+ return -EIO;
+ }
+
+ /* setting up interrupt */
+ ret = bind_evtchn_to_irqhandler(alloc_unbound.port,
+ front_ring_isr, 0,
+ NULL, (void *) ring_info);
+
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev,
+ "Failed to setup event channel\n");
+ close.port = alloc_unbound.port;
+ HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
+ gnttab_end_foreign_access(ring_info->gref_ring, 0,
+ virt_to_mfn(shared_ring));
+ kfree(ring_info);
+ return -EIO;
+ }
+
+ ring_info->rdomain = domid;
+ ring_info->irq = ret;
+ ring_info->port = alloc_unbound.port;
+
+ mutex_init(&ring_info->lock);
+
+ dev_dbg(hy_drv_priv->dev,
+ "%s: allocated eventchannel gref %d port: %d irq: %d\n",
+ __func__,
+ ring_info->gref_ring,
+ ring_info->port,
+ ring_info->irq);
+
+ ret = xen_comm_add_tx_ring(ring_info);
+
+ ret = xen_comm_expose_ring_details(xen_be_get_domid(),
+ domid,
+ ring_info->gref_ring,
+ ring_info->port);
+
+ /* Register watch for remote domain exporter ring.
+ * When remote domain will setup its exporter ring,
+ * we will automatically connect our importer ring to it.
+ */
+ ring_info->watch.callback = remote_dom_exporter_watch_cb;
+ ring_info->watch.node = kmalloc(255, GFP_KERNEL);
+
+ if (!ring_info->watch.node) {
+ kfree(ring_info);
+ return -ENOMEM;
+ }
+
+ sprintf((char *)ring_info->watch.node,
+ "/local/domain/%d/data/hyper_dmabuf/%d/port",
+ domid, xen_be_get_domid());
+
+ register_xenbus_watch(&ring_info->watch);
+
+ return ret;
+}
+
+/* cleans up exporter ring created for given remote domain */
+void xen_be_cleanup_tx_rbuf(int domid)
+{
+ struct xen_comm_tx_ring_info *ring_info;
+ struct xen_comm_rx_ring_info *rx_ring_info;
+
+ /* check if we at all have exporter ring for given rdomain */
+ ring_info = xen_comm_find_tx_ring(domid);
+
+ if (!ring_info)
+ return;
+
+ xen_comm_remove_tx_ring(domid);
+
+ unregister_xenbus_watch(&ring_info->watch);
+ kfree(ring_info->watch.node);
+
+ /* No need to close communication channel, will be done by
+ * this function
+ */
+ unbind_from_irqhandler(ring_info->irq, (void *) ring_info);
+
+ /* No need to free sring page, will be freed by this function
+ * when other side will end its access
+ */
+ gnttab_end_foreign_access(ring_info->gref_ring, 0,
+ (unsigned long) ring_info->ring_front.sring);
+
+ kfree(ring_info);
+
+ rx_ring_info = xen_comm_find_rx_ring(domid);
+ if (!rx_ring_info)
+ return;
+
+ BACK_RING_INIT(&(rx_ring_info->ring_back),
+ rx_ring_info->ring_back.sring,
+ PAGE_SIZE);
+}
+
+/* importer needs to know about shared page and port numbers for
+ * ring buffer and event channel
+ */
+int xen_be_init_rx_rbuf(int domid)
+{
+ struct xen_comm_rx_ring_info *ring_info;
+ struct xen_comm_sring *sring;
+
+ struct page *shared_ring;
+
+ struct gnttab_map_grant_ref *map_ops;
+
+ int ret;
+ int rx_gref, rx_port;
+
+ /* check if there's existing rx ring channel */
+ ring_info = xen_comm_find_rx_ring(domid);
+
+ if (ring_info) {
+ dev_info(hy_drv_priv->dev,
+ "rx ring ch from domid = %d already exist\n",
+ ring_info->sdomain);
+
+ return 0;
+ }
+
+ ret = xen_comm_get_ring_details(xen_be_get_domid(), domid,
+ &rx_gref, &rx_port);
+
+ if (ret) {
+ dev_err(hy_drv_priv->dev,
+ "Domain %d has not created exporter ring for current domain\n",
+ domid);
+
+ return ret;
+ }
+
+ ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
+
+ if (!ring_info)
+ return -ENOMEM;
+
+ ring_info->sdomain = domid;
+ ring_info->evtchn = rx_port;
+
+ map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL);
+
+ if (!map_ops) {
+ ret = -ENOMEM;
+ goto fail_no_map_ops;
+ }
+
+ if (gnttab_alloc_pages(1, &shared_ring)) {
+ ret = -ENOMEM;
+ goto fail_others;
+ }
+
+ gnttab_set_map_op(&map_ops[0],
+ (unsigned long)pfn_to_kaddr(
+ page_to_pfn(shared_ring)),
+ GNTMAP_host_map, rx_gref, domid);
+
+ gnttab_set_unmap_op(&ring_info->unmap_op,
+ (unsigned long)pfn_to_kaddr(
+ page_to_pfn(shared_ring)),
+ GNTMAP_host_map, -1);
+
+ ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1);
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev, "Cannot map ring\n");
+ ret = -EFAULT;
+ goto fail_others;
+ }
+
+ if (map_ops[0].status) {
+ dev_err(hy_drv_priv->dev, "Ring mapping failed\n");
+ ret = -EFAULT;
+ goto fail_others;
+ } else {
+ ring_info->unmap_op.handle = map_ops[0].handle;
+ }
+
+ kfree(map_ops);
+
+ sring = (struct xen_comm_sring *)pfn_to_kaddr(page_to_pfn(shared_ring));
+
+ BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE);
+
+ ret = bind_interdomain_evtchn_to_irq(domid, rx_port);
+
+ if (ret < 0) {
+ ret = -EIO;
+ goto fail_others;
+ }
+
+ ring_info->irq = ret;
+
+ dev_dbg(hy_drv_priv->dev,
+ "%s: bound to eventchannel port: %d irq: %d\n", __func__,
+ rx_port,
+ ring_info->irq);
+
+ ret = xen_comm_add_rx_ring(ring_info);
+
+ /* Setup communcation channel in opposite direction */
+ if (!xen_comm_find_tx_ring(domid))
+ ret = xen_be_init_tx_rbuf(domid);
+
+ ret = request_irq(ring_info->irq,
+ back_ring_isr, 0,
+ NULL, (void *)ring_info);
+
+ return ret;
+
+fail_others:
+ kfree(map_ops);
+
+fail_no_map_ops:
+ kfree(ring_info);
+
+ return ret;
+}
+
+/* clenas up importer ring create for given source domain */
+void xen_be_cleanup_rx_rbuf(int domid)
+{
+ struct xen_comm_rx_ring_info *ring_info;
+ struct xen_comm_tx_ring_info *tx_ring_info;
+ struct page *shared_ring;
+
+ /* check if we have importer ring created for given sdomain */
+ ring_info = xen_comm_find_rx_ring(domid);
+
+ if (!ring_info)
+ return;
+
+ xen_comm_remove_rx_ring(domid);
+
+ /* no need to close event channel, will be done by that function */
+ unbind_from_irqhandler(ring_info->irq, (void *)ring_info);
+
+ /* unmapping shared ring page */
+ shared_ring = virt_to_page(ring_info->ring_back.sring);
+ gnttab_unmap_refs(&ring_info->unmap_op, NULL, &shared_ring, 1);
+ gnttab_free_pages(1, &shared_ring);
+
+ kfree(ring_info);
+
+ tx_ring_info = xen_comm_find_tx_ring(domid);
+ if (!tx_ring_info)
+ return;
+
+ SHARED_RING_INIT(tx_ring_info->ring_front.sring);
+ FRONT_RING_INIT(&(tx_ring_info->ring_front),
+ tx_ring_info->ring_front.sring,
+ PAGE_SIZE);
+}
+
+#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
+
+static void xen_rx_ch_add_delayed(struct work_struct *unused);
+
+static DECLARE_DELAYED_WORK(xen_rx_ch_auto_add_work, xen_rx_ch_add_delayed);
+
+#define DOMID_SCAN_START 1 /* domid = 1 */
+#define DOMID_SCAN_END 10 /* domid = 10 */
+
+static void xen_rx_ch_add_delayed(struct work_struct *unused)
+{
+ int ret;
+ char buf[128];
+ int i, dummy;
+
+ dev_dbg(hy_drv_priv->dev,
+ "Scanning new tx channel comming from another domain\n");
+
+ /* check other domains and schedule another work if driver
+ * is still running and backend is valid
+ */
+ if (hy_drv_priv &&
+ hy_drv_priv->initialized) {
+ for (i = DOMID_SCAN_START; i < DOMID_SCAN_END + 1; i++) {
+ if (i == hy_drv_priv->domid)
+ continue;
+
+ sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d",
+ i, hy_drv_priv->domid);
+
+ ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", &dummy);
+
+ if (ret > 0) {
+ if (xen_comm_find_rx_ring(i) != NULL)
+ continue;
+
+ ret = xen_be_init_rx_rbuf(i);
+
+ if (!ret)
+ dev_info(hy_drv_priv->dev,
+ "Done rx ch init for VM %d\n",
+ i);
+ }
+ }
+
+ /* check every 10 seconds */
+ schedule_delayed_work(&xen_rx_ch_auto_add_work,
+ msecs_to_jiffies(10000));
+ }
+}
+
+#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */
+
+void xen_init_comm_env_delayed(struct work_struct *unused)
+{
+ int ret;
+
+ /* scheduling another work if driver is still running
+ * and xenstore hasn't been initialized or dom_id hasn't
+ * been correctly retrieved.
+ */
+ if (likely(xenstored_ready == 0 ||
+ hy_drv_priv->domid == -1)) {
+ dev_dbg(hy_drv_priv->dev,
+ "Xenstore not ready Will re-try in 500ms\n");
+ schedule_delayed_work(&xen_init_comm_env_work,
+ msecs_to_jiffies(500));
+ } else {
+ ret = xen_comm_setup_data_dir();
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev,
+ "Failed to create data dir in Xenstore\n");
+ } else {
+ dev_info(hy_drv_priv->dev,
+ "Successfully finished comm env init\n");
+ hy_drv_priv->initialized = true;
+
+#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
+ xen_rx_ch_add_delayed(NULL);
+#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */
+ }
+ }
+}
+
+int xen_be_init_comm_env(void)
+{
+ int ret;
+
+ xen_comm_ring_table_init();
+
+ if (unlikely(xenstored_ready == 0 ||
+ hy_drv_priv->domid == -1)) {
+ xen_init_comm_env_delayed(NULL);
+ return -1;
+ }
+
+ ret = xen_comm_setup_data_dir();
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev,
+ "Failed to create data dir in Xenstore\n");
+ } else {
+ dev_info(hy_drv_priv->dev,
+ "Successfully finished comm env initialization\n");
+
+ hy_drv_priv->initialized = true;
+ }
+
+ return ret;
+}
+
+/* cleans up all tx/rx rings */
+static void xen_be_cleanup_all_rbufs(void)
+{
+ xen_comm_foreach_tx_ring(xen_be_cleanup_tx_rbuf);
+ xen_comm_foreach_rx_ring(xen_be_cleanup_rx_rbuf);
+}
+
+void xen_be_destroy_comm(void)
+{
+ xen_be_cleanup_all_rbufs();
+ xen_comm_destroy_data_dir();
+}
+
+int xen_be_send_req(int domid, struct hyper_dmabuf_req *req,
+ int wait)
+{
+ struct xen_comm_front_ring *ring;
+ struct hyper_dmabuf_req *new_req;
+ struct xen_comm_tx_ring_info *ring_info;
+ int notify;
+
+ struct timeval tv_start, tv_end;
+ struct timeval tv_diff;
+
+ int timeout = 1000;
+
+ /* find a ring info for the channel */
+ ring_info = xen_comm_find_tx_ring(domid);
+ if (!ring_info) {
+ dev_err(hy_drv_priv->dev,
+ "Can't find ring info for the channel\n");
+ return -ENOENT;
+ }
+
+
+ ring = &ring_info->ring_front;
+
+ do_gettimeofday(&tv_start);
+
+ while (RING_FULL(ring)) {
+ dev_dbg(hy_drv_priv->dev, "RING_FULL\n");
+
+ if (timeout == 0) {
+ dev_err(hy_drv_priv->dev,
+ "Timeout while waiting for an entry in the ring\n");
+ return -EIO;
+ }
+ usleep_range(100, 120);
+ timeout--;
+ }
+
+ timeout = 1000;
+
+ mutex_lock(&ring_info->lock);
+
+ new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt);
+ if (!new_req) {
+ mutex_unlock(&ring_info->lock);
+ dev_err(hy_drv_priv->dev,
+ "NULL REQUEST\n");
+ return -EIO;
+ }
+
+ req->req_id = xen_comm_next_req_id();
+
+ /* update req_pending with current request */
+ memcpy(&req_pending, req, sizeof(req_pending));
+
+ /* pass current request to the ring */
+ memcpy(new_req, req, sizeof(*new_req));
+
+ ring->req_prod_pvt++;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
+ if (notify)
+ notify_remote_via_irq(ring_info->irq);
+
+ if (wait) {
+ while (timeout--) {
+ if (req_pending.stat !=
+ HYPER_DMABUF_REQ_NOT_RESPONDED)
+ break;
+ usleep_range(100, 120);
+ }
+
+ if (timeout < 0) {
+ mutex_unlock(&ring_info->lock);
+ dev_err(hy_drv_priv->dev,
+ "request timed-out\n");
+ return -EBUSY;
+ }
+
+ mutex_unlock(&ring_info->lock);
+ do_gettimeofday(&tv_end);
+
+ /* checking time duration for round-trip of a request
+ * for debugging
+ */
+ if (tv_end.tv_usec >= tv_start.tv_usec) {
+ tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec;
+ tv_diff.tv_usec = tv_end.tv_usec-tv_start.tv_usec;
+ } else {
+ tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec-1;
+ tv_diff.tv_usec = tv_end.tv_usec+1000000-
+ tv_start.tv_usec;
+ }
+
+ if (tv_diff.tv_sec != 0 && tv_diff.tv_usec > 16000)
+ dev_dbg(hy_drv_priv->dev,
+ "send_req:time diff: %ld sec, %ld usec\n",
+ tv_diff.tv_sec, tv_diff.tv_usec);
+ }
+
+ mutex_unlock(&ring_info->lock);
+
+ return 0;
+}
+
+/* ISR for handling request */
+static irqreturn_t back_ring_isr(int irq, void *info)
+{
+ RING_IDX rc, rp;
+ struct hyper_dmabuf_req req;
+ struct hyper_dmabuf_resp resp;
+
+ int notify, more_to_do;
+ int ret;
+
+ struct xen_comm_rx_ring_info *ring_info;
+ struct xen_comm_back_ring *ring;
+
+ ring_info = (struct xen_comm_rx_ring_info *)info;
+ ring = &ring_info->ring_back;
+
+ dev_dbg(hy_drv_priv->dev, "%s\n", __func__);
+
+ do {
+ rc = ring->req_cons;
+ rp = ring->sring->req_prod;
+ more_to_do = 0;
+ while (rc != rp) {
+ if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
+ break;
+
+ memcpy(&req, RING_GET_REQUEST(ring, rc), sizeof(req));
+ ring->req_cons = ++rc;
+
+ ret = hyper_dmabuf_msg_parse(ring_info->sdomain, &req);
+
+ if (ret > 0) {
+ /* preparing a response for the request and
+ * send it to the requester
+ */
+ memcpy(&resp, &req, sizeof(resp));
+ memcpy(RING_GET_RESPONSE(ring,
+ ring->rsp_prod_pvt),
+ &resp, sizeof(resp));
+ ring->rsp_prod_pvt++;
+
+ dev_dbg(hy_drv_priv->dev,
+ "responding to exporter for req:%d\n",
+ resp.resp_id);
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring,
+ notify);
+
+ if (notify)
+ notify_remote_via_irq(ring_info->irq);
+ }
+
+ RING_FINAL_CHECK_FOR_REQUESTS(ring, more_to_do);
+ }
+ } while (more_to_do);
+
+ return IRQ_HANDLED;
+}
+
+/* ISR for handling responses */
+static irqreturn_t front_ring_isr(int irq, void *info)
+{
+ /* front ring only care about response from back */
+ struct hyper_dmabuf_resp *resp;
+ RING_IDX i, rp;
+ int more_to_do, ret;
+
+ struct xen_comm_tx_ring_info *ring_info;
+ struct xen_comm_front_ring *ring;
+
+ ring_info = (struct xen_comm_tx_ring_info *)info;
+ ring = &ring_info->ring_front;
+
+ dev_dbg(hy_drv_priv->dev, "%s\n", __func__);
+
+ do {
+ more_to_do = 0;
+ rp = ring->sring->rsp_prod;
+ for (i = ring->rsp_cons; i != rp; i++) {
+ resp = RING_GET_RESPONSE(ring, i);
+
+ /* update pending request's status with what is
+ * in the response
+ */
+
+ dev_dbg(hy_drv_priv->dev,
+ "getting response from importer\n");
+
+ if (req_pending.req_id == resp->resp_id)
+ req_pending.stat = resp->stat;
+
+ if (resp->stat == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) {
+ /* parsing response */
+ ret = hyper_dmabuf_msg_parse(ring_info->rdomain,
+ (struct hyper_dmabuf_req *)resp);
+
+ if (ret < 0) {
+ dev_err(hy_drv_priv->dev,
+ "err while parsing resp\n");
+ }
+ } else if (resp->stat == HYPER_DMABUF_REQ_PROCESSED) {
+ /* for debugging dma_buf remote synch */
+ dev_dbg(hy_drv_priv->dev,
+ "original request = 0x%x\n", resp->cmd);
+ dev_dbg(hy_drv_priv->dev,
+ "got HYPER_DMABUF_REQ_PROCESSED\n");
+ } else if (resp->stat == HYPER_DMABUF_REQ_ERROR) {
+ /* for debugging dma_buf remote synch */
+ dev_dbg(hy_drv_priv->dev,
+ "original request = 0x%x\n", resp->cmd);
+ dev_dbg(hy_drv_priv->dev,
+ "got HYPER_DMABUF_REQ_ERROR\n");
+ }
+ }
+
+ ring->rsp_cons = i;
+
+ if (i != ring->req_prod_pvt)
+ RING_FINAL_CHECK_FOR_RESPONSES(ring, more_to_do);
+ else
+ ring->sring->rsp_event = i+1;
+
+ } while (more_to_do);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm.h b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm.h
new file mode 100644
index 0000000..70a2b70
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_XEN_COMM_H__
+#define __HYPER_DMABUF_XEN_COMM_H__
+
+#include "xen/interface/io/ring.h"
+#include "xen/xenbus.h"
+#include "../hyper_dmabuf_msg.h"
+
+extern int xenstored_ready;
+
+DEFINE_RING_TYPES(xen_comm, struct hyper_dmabuf_req, struct hyper_dmabuf_resp);
+
+struct xen_comm_tx_ring_info {
+ struct xen_comm_front_ring ring_front;
+ int rdomain;
+ int gref_ring;
+ int irq;
+ int port;
+ struct mutex lock;
+ struct xenbus_watch watch;
+};
+
+struct xen_comm_rx_ring_info {
+ int sdomain;
+ int irq;
+ int evtchn;
+ struct xen_comm_back_ring ring_back;
+ struct gnttab_unmap_grant_ref unmap_op;
+};
+
+int xen_be_get_domid(void);
+
+int xen_be_init_comm_env(void);
+
+/* exporter needs to generated info for page sharing */
+int xen_be_init_tx_rbuf(int domid);
+
+/* importer needs to know about shared page and port numbers
+ * for ring buffer and event channel
+ */
+int xen_be_init_rx_rbuf(int domid);
+
+/* cleans up exporter ring created for given domain */
+void xen_be_cleanup_tx_rbuf(int domid);
+
+/* cleans up importer ring created for given domain */
+void xen_be_cleanup_rx_rbuf(int domid);
+
+void xen_be_destroy_comm(void);
+
+/* send request to the remote domain */
+int xen_be_send_req(int domid, struct hyper_dmabuf_req *req,
+ int wait);
+
+#endif /* __HYPER_DMABUF_XEN_COMM_H__ */
diff --git a/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm_list.c b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm_list.c
new file mode 100644
index 0000000..15023db
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm_list.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/hashtable.h>
+#include <xen/grant_table.h>
+#include "../hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_xen_comm.h"
+#include "hyper_dmabuf_xen_comm_list.h"
+
+DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING);
+DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING);
+
+void xen_comm_ring_table_init(void)
+{
+ hash_init(xen_comm_rx_ring_hash);
+ hash_init(xen_comm_tx_ring_hash);
+}
+
+int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info)
+{
+ struct xen_comm_tx_ring_info_entry *info_entry;
+
+ info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
+
+ if (!info_entry)
+ return -ENOMEM;
+
+ info_entry->info = ring_info;
+
+ hash_add(xen_comm_tx_ring_hash, &info_entry->node,
+ info_entry->info->rdomain);
+
+ return 0;
+}
+
+int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info)
+{
+ struct xen_comm_rx_ring_info_entry *info_entry;
+
+ info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
+
+ if (!info_entry)
+ return -ENOMEM;
+
+ info_entry->info = ring_info;
+
+ hash_add(xen_comm_rx_ring_hash, &info_entry->node,
+ info_entry->info->sdomain);
+
+ return 0;
+}
+
+struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid)
+{
+ struct xen_comm_tx_ring_info_entry *info_entry;
+ int bkt;
+
+ hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node)
+ if (info_entry->info->rdomain == domid)
+ return info_entry->info;
+
+ return NULL;
+}
+
+struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid)
+{
+ struct xen_comm_rx_ring_info_entry *info_entry;
+ int bkt;
+
+ hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node)
+ if (info_entry->info->sdomain == domid)
+ return info_entry->info;
+
+ return NULL;
+}
+
+int xen_comm_remove_tx_ring(int domid)
+{
+ struct xen_comm_tx_ring_info_entry *info_entry;
+ int bkt;
+
+ hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node)
+ if (info_entry->info->rdomain == domid) {
+ hash_del(&info_entry->node);
+ kfree(info_entry);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int xen_comm_remove_rx_ring(int domid)
+{
+ struct xen_comm_rx_ring_info_entry *info_entry;
+ int bkt;
+
+ hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node)
+ if (info_entry->info->sdomain == domid) {
+ hash_del(&info_entry->node);
+ kfree(info_entry);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+void xen_comm_foreach_tx_ring(void (*func)(int domid))
+{
+ struct xen_comm_tx_ring_info_entry *info_entry;
+ struct hlist_node *tmp;
+ int bkt;
+
+ hash_for_each_safe(xen_comm_tx_ring_hash, bkt, tmp,
+ info_entry, node) {
+ func(info_entry->info->rdomain);
+ }
+}
+
+void xen_comm_foreach_rx_ring(void (*func)(int domid))
+{
+ struct xen_comm_rx_ring_info_entry *info_entry;
+ struct hlist_node *tmp;
+ int bkt;
+
+ hash_for_each_safe(xen_comm_rx_ring_hash, bkt, tmp,
+ info_entry, node) {
+ func(info_entry->info->sdomain);
+ }
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm_list.h b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm_list.h
new file mode 100644
index 0000000..8502fe7
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_comm_list.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_XEN_COMM_LIST_H__
+#define __HYPER_DMABUF_XEN_COMM_LIST_H__
+
+/* number of bits to be used for exported dmabufs hash table */
+#define MAX_ENTRY_TX_RING 7
+/* number of bits to be used for imported dmabufs hash table */
+#define MAX_ENTRY_RX_RING 7
+
+struct xen_comm_tx_ring_info_entry {
+ struct xen_comm_tx_ring_info *info;
+ struct hlist_node node;
+};
+
+struct xen_comm_rx_ring_info_entry {
+ struct xen_comm_rx_ring_info *info;
+ struct hlist_node node;
+};
+
+void xen_comm_ring_table_init(void);
+
+int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info);
+
+int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info);
+
+int xen_comm_remove_tx_ring(int domid);
+
+int xen_comm_remove_rx_ring(int domid);
+
+struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid);
+
+struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid);
+
+/* iterates over all exporter rings and calls provided
+ * function for each of them
+ */
+void xen_comm_foreach_tx_ring(void (*func)(int domid));
+
+/* iterates over all importer rings and calls provided
+ * function for each of them
+ */
+void xen_comm_foreach_rx_ring(void (*func)(int domid));
+
+#endif // __HYPER_DMABUF_XEN_COMM_LIST_H__
diff --git a/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_drv.c b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_drv.c
new file mode 100644
index 0000000..14ed3bc
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_drv.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include "../hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_xen_comm.h"
+#include "hyper_dmabuf_xen_shm.h"
+
+struct hyper_dmabuf_bknd_ops xen_bknd_ops = {
+ .init = NULL, /* not needed for xen */
+ .cleanup = NULL, /* not needed for xen */
+ .get_vm_id = xen_be_get_domid,
+ .share_pages = xen_be_share_pages,
+ .unshare_pages = xen_be_unshare_pages,
+ .map_shared_pages = (void *)xen_be_map_shared_pages,
+ .unmap_shared_pages = xen_be_unmap_shared_pages,
+ .init_comm_env = xen_be_init_comm_env,
+ .destroy_comm = xen_be_destroy_comm,
+ .init_rx_ch = xen_be_init_rx_rbuf,
+ .init_tx_ch = xen_be_init_tx_rbuf,
+ .send_req = xen_be_send_req,
+};
diff --git a/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_drv.h b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_drv.h
new file mode 100644
index 0000000..a4902b7
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_drv.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_XEN_DRV_H__
+#define __HYPER_DMABUF_XEN_DRV_H__
+#include <xen/interface/grant_table.h>
+
+extern struct hyper_dmabuf_bknd_ops xen_bknd_ops;
+
+/* Main purpose of this structure is to keep
+ * all references created or acquired for sharing
+ * pages with another domain for freeing those later
+ * when unsharing.
+ */
+struct xen_shared_pages_info {
+ /* top level refid */
+ grant_ref_t lvl3_gref;
+
+ /* page of top level addressing, it contains refids of 2nd lvl pages */
+ grant_ref_t *lvl3_table;
+
+ /* table of 2nd level pages, that contains refids to data pages */
+ grant_ref_t *lvl2_table;
+
+ /* unmap ops for mapped pages */
+ struct gnttab_unmap_grant_ref *unmap_ops;
+
+ /* data pages to be unmapped */
+ struct page **data_pages;
+};
+
+#endif // __HYPER_DMABUF_XEN_COMM_H__
diff --git a/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_shm.c b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_shm.c
new file mode 100644
index 0000000..c6a15f1
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_shm.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@...el.com>
+ * Mateusz Polrola <mateuszx.potrola@...el.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <xen/grant_table.h>
+#include <asm/xen/page.h>
+#include "hyper_dmabuf_xen_drv.h"
+#include "../hyper_dmabuf_drv.h"
+
+#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
+
+/*
+ * Creates 2 level page directory structure for referencing shared pages.
+ * Top level page is a single page that contains up to 1024 refids that
+ * point to 2nd level pages.
+ *
+ * Each 2nd level page contains up to 1024 refids that point to shared
+ * data pages.
+ *
+ * There will always be one top level page and number of 2nd level pages
+ * depends on number of shared data pages.
+ *
+ * 3rd level page 2nd level pages Data pages
+ * +-------------------------+ ┌>+--------------------+ ┌>+------------+
+ * |2nd level page 0 refid |---┘ |Data page 0 refid |-┘ |Data page 0 |
+ * |2nd level page 1 refid |---┐ |Data page 1 refid |-┐ +------------+
+ * | ... | | | .... | |
+ * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └>+------------+
+ * +-------------------------+ | | +--------------------+ |Data page 1 |
+ * | | +------------+
+ * | └>+--------------------+
+ * | |Data page 1024 refid|
+ * | |Data page 1025 refid|
+ * | | ... |
+ * | |Data page 2047 refid|
+ * | +--------------------+
+ * |
+ * | .....
+ * └-->+-----------------------+
+ * |Data page 1047552 refid|
+ * |Data page 1047553 refid|
+ * | ... |
+ * |Data page 1048575 refid|
+ * +-----------------------+
+ *
+ * Using such 2 level structure it is possible to reference up to 4GB of
+ * shared data using single refid pointing to top level page.
+ *
+ * Returns refid of top level page.
+ */
+int xen_be_share_pages(struct page **pages, int domid, int nents,
+ void **refs_info)
+{
+ grant_ref_t lvl3_gref;
+ grant_ref_t *lvl2_table;
+ grant_ref_t *lvl3_table;
+
+ /*
+ * Calculate number of pages needed for 2nd level addresing:
+ */
+ int n_lvl2_grefs = (nents/REFS_PER_PAGE +
+ ((nents % REFS_PER_PAGE) ? 1 : 0));
+
+ struct xen_shared_pages_info *sh_pages_info;
+ int i;
+
+ lvl3_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, 1);
+ lvl2_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs);
+
+ sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
+
+ if (!sh_pages_info)
+ return -ENOMEM;
+
+ *refs_info = (void *)sh_pages_info;
+
+ /* share data pages in readonly mode for security */
+ for (i = 0; i < nents; i++) {
+ lvl2_table[i] = gnttab_grant_foreign_access(domid,
+ pfn_to_mfn(page_to_pfn(pages[i])),
+ true /* read only */);
+ if (lvl2_table[i] == -ENOSPC) {
+ dev_err(hy_drv_priv->dev,
+ "No more space left in grant table\n");
+
+ /* Unshare all already shared pages for lvl2 */
+ while (i--) {
+ gnttab_end_foreign_access_ref(lvl2_table[i], 0);
+ gnttab_free_grant_reference(lvl2_table[i]);
+ }
+ goto err_cleanup;
+ }
+ }
+
+ /* Share 2nd level addressing pages in readonly mode*/
+ for (i = 0; i < n_lvl2_grefs; i++) {
+ lvl3_table[i] = gnttab_grant_foreign_access(domid,
+ virt_to_mfn(
+ (unsigned long)lvl2_table+i*PAGE_SIZE),
+ true);
+
+ if (lvl3_table[i] == -ENOSPC) {
+ dev_err(hy_drv_priv->dev,
+ "No more space left in grant table\n");
+
+ /* Unshare all already shared pages for lvl3 */
+ while (i--) {
+ gnttab_end_foreign_access_ref(lvl3_table[i], 1);
+ gnttab_free_grant_reference(lvl3_table[i]);
+ }
+
+ /* Unshare all pages for lvl2 */
+ while (nents--) {
+ gnttab_end_foreign_access_ref(
+ lvl2_table[nents], 0);
+ gnttab_free_grant_reference(lvl2_table[nents]);
+ }
+
+ goto err_cleanup;
+ }
+ }
+
+ /* Share lvl3_table in readonly mode*/
+ lvl3_gref = gnttab_grant_foreign_access(domid,
+ virt_to_mfn((unsigned long)lvl3_table),
+ true);
+
+ if (lvl3_gref == -ENOSPC) {
+ dev_err(hy_drv_priv->dev,
+ "No more space left in grant table\n");
+
+ /* Unshare all pages for lvl3 */
+ while (i--) {
+ gnttab_end_foreign_access_ref(lvl3_table[i], 1);
+ gnttab_free_grant_reference(lvl3_table[i]);
+ }
+
+ /* Unshare all pages for lvl2 */
+ while (nents--) {
+ gnttab_end_foreign_access_ref(lvl2_table[nents], 0);
+ gnttab_free_grant_reference(lvl2_table[nents]);
+ }
+
+ goto err_cleanup;
+ }
+
+ /* Store lvl3_table page to be freed later */
+ sh_pages_info->lvl3_table = lvl3_table;
+
+ /* Store lvl2_table pages to be freed later */
+ sh_pages_info->lvl2_table = lvl2_table;
+
+
+ /* Store exported pages refid to be unshared later */
+ sh_pages_info->lvl3_gref = lvl3_gref;
+
+ dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
+ return lvl3_gref;
+
+err_cleanup:
+ free_pages((unsigned long)lvl2_table, n_lvl2_grefs);
+ free_pages((unsigned long)lvl3_table, 1);
+
+ return -ENOSPC;
+}
+
+int xen_be_unshare_pages(void **refs_info, int nents)
+{
+ struct xen_shared_pages_info *sh_pages_info;
+ int n_lvl2_grefs = (nents/REFS_PER_PAGE +
+ ((nents % REFS_PER_PAGE) ? 1 : 0));
+ int i;
+
+ dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
+ sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
+
+ if (sh_pages_info->lvl3_table == NULL ||
+ sh_pages_info->lvl2_table == NULL ||
+ sh_pages_info->lvl3_gref == -1) {
+ dev_warn(hy_drv_priv->dev,
+ "gref table for hyper_dmabuf already cleaned up\n");
+ return 0;
+ }
+
+ /* End foreign access for data pages, but do not free them */
+ for (i = 0; i < nents; i++) {
+ if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i]))
+ dev_warn(hy_drv_priv->dev, "refid not shared !!\n");
+
+ gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0);
+ gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]);
+ }
+
+ /* End foreign access for 2nd level addressing pages */
+ for (i = 0; i < n_lvl2_grefs; i++) {
+ if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i]))
+ dev_warn(hy_drv_priv->dev, "refid not shared !!\n");
+
+ if (!gnttab_end_foreign_access_ref(
+ sh_pages_info->lvl3_table[i], 1))
+ dev_warn(hy_drv_priv->dev, "refid still in use!!!\n");
+
+ gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]);
+ }
+
+ /* End foreign access for top level addressing page */
+ if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref))
+ dev_warn(hy_drv_priv->dev, "gref not shared !!\n");
+
+ gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1);
+ gnttab_free_grant_reference(sh_pages_info->lvl3_gref);
+
+ /* freeing all pages used for 2 level addressing */
+ free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs);
+ free_pages((unsigned long)sh_pages_info->lvl3_table, 1);
+
+ sh_pages_info->lvl3_gref = -1;
+ sh_pages_info->lvl2_table = NULL;
+ sh_pages_info->lvl3_table = NULL;
+ kfree(sh_pages_info);
+ sh_pages_info = NULL;
+
+ dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
+ return 0;
+}
+
+/* Maps provided top level ref id and then return array of pages
+ * containing data refs.
+ */
+struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid,
+ int nents, void **refs_info)
+{
+ struct page *lvl3_table_page;
+ struct page **lvl2_table_pages;
+ struct page **data_pages;
+ struct xen_shared_pages_info *sh_pages_info;
+
+ grant_ref_t *lvl3_table;
+ grant_ref_t *lvl2_table;
+
+ struct gnttab_map_grant_ref lvl3_map_ops;
+ struct gnttab_unmap_grant_ref lvl3_unmap_ops;
+
+ struct gnttab_map_grant_ref *lvl2_map_ops;
+ struct gnttab_unmap_grant_ref *lvl2_unmap_ops;
+
+ struct gnttab_map_grant_ref *data_map_ops;
+ struct gnttab_unmap_grant_ref *data_unmap_ops;
+
+ /* # of grefs in the last page of lvl2 table */
+ int nents_last = (nents - 1) % REFS_PER_PAGE + 1;
+ int n_lvl2_grefs = (nents / REFS_PER_PAGE) +
+ ((nents_last > 0) ? 1 : 0) -
+ (nents_last == REFS_PER_PAGE);
+ int i, j, k;
+
+ dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
+
+ sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
+ *refs_info = (void *) sh_pages_info;
+
+ lvl2_table_pages = kcalloc(n_lvl2_grefs, sizeof(struct page *),
+ GFP_KERNEL);
+
+ data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
+
+ lvl2_map_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_map_ops),
+ GFP_KERNEL);
+
+ lvl2_unmap_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_unmap_ops),
+ GFP_KERNEL);
+
+ data_map_ops = kcalloc(nents, sizeof(*data_map_ops), GFP_KERNEL);
+ data_unmap_ops = kcalloc(nents, sizeof(*data_unmap_ops), GFP_KERNEL);
+
+ /* Map top level addressing page */
+ if (gnttab_alloc_pages(1, &lvl3_table_page)) {
+ dev_err(hy_drv_priv->dev, "Cannot allocate pages\n");
+ return NULL;
+ }
+
+ lvl3_table = (grant_ref_t *)pfn_to_kaddr(page_to_pfn(lvl3_table_page));
+
+ gnttab_set_map_op(&lvl3_map_ops, (unsigned long)lvl3_table,
+ GNTMAP_host_map | GNTMAP_readonly,
+ (grant_ref_t)lvl3_gref, domid);
+
+ gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table,
+ GNTMAP_host_map | GNTMAP_readonly, -1);
+
+ if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) {
+ dev_err(hy_drv_priv->dev,
+ "HYPERVISOR map grant ref failed");
+ return NULL;
+ }
+
+ if (lvl3_map_ops.status) {
+ dev_err(hy_drv_priv->dev,
+ "HYPERVISOR map grant ref failed status = %d",
+ lvl3_map_ops.status);
+
+ goto error_cleanup_lvl3;
+ } else {
+ lvl3_unmap_ops.handle = lvl3_map_ops.handle;
+ }
+
+ /* Map all second level pages */
+ if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) {
+ dev_err(hy_drv_priv->dev, "Cannot allocate pages\n");
+ goto error_cleanup_lvl3;
+ }
+
+ for (i = 0; i < n_lvl2_grefs; i++) {
+ lvl2_table = (grant_ref_t *)pfn_to_kaddr(
+ page_to_pfn(lvl2_table_pages[i]));
+ gnttab_set_map_op(&lvl2_map_ops[i],
+ (unsigned long)lvl2_table, GNTMAP_host_map |
+ GNTMAP_readonly,
+ lvl3_table[i], domid);
+ gnttab_set_unmap_op(&lvl2_unmap_ops[i],
+ (unsigned long)lvl2_table, GNTMAP_host_map |
+ GNTMAP_readonly, -1);
+ }
+
+ /* Unmap top level page, as it won't be needed any longer */
+ if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL,
+ &lvl3_table_page, 1)) {
+ dev_err(hy_drv_priv->dev,
+ "xen: cannot unmap top level page\n");
+ return NULL;
+ }
+
+ /* Mark that page was unmapped */
+ lvl3_unmap_ops.handle = -1;
+
+ if (gnttab_map_refs(lvl2_map_ops, NULL,
+ lvl2_table_pages, n_lvl2_grefs)) {
+ dev_err(hy_drv_priv->dev,
+ "HYPERVISOR map grant ref failed");
+ return NULL;
+ }
+
+ /* Checks if pages were mapped correctly */
+ for (i = 0; i < n_lvl2_grefs; i++) {
+ if (lvl2_map_ops[i].status) {
+ dev_err(hy_drv_priv->dev,
+ "HYPERVISOR map grant ref failed status = %d",
+ lvl2_map_ops[i].status);
+ goto error_cleanup_lvl2;
+ } else {
+ lvl2_unmap_ops[i].handle = lvl2_map_ops[i].handle;
+ }
+ }
+
+ if (gnttab_alloc_pages(nents, data_pages)) {
+ dev_err(hy_drv_priv->dev,
+ "Cannot allocate pages\n");
+ goto error_cleanup_lvl2;
+ }
+
+ k = 0;
+
+ for (i = 0; i < n_lvl2_grefs - 1; i++) {
+ lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i]));
+ for (j = 0; j < REFS_PER_PAGE; j++) {
+ gnttab_set_map_op(&data_map_ops[k],
+ (unsigned long)pfn_to_kaddr(
+ page_to_pfn(data_pages[k])),
+ GNTMAP_host_map | GNTMAP_readonly,
+ lvl2_table[j], domid);
+
+ gnttab_set_unmap_op(&data_unmap_ops[k],
+ (unsigned long)pfn_to_kaddr(
+ page_to_pfn(data_pages[k])),
+ GNTMAP_host_map | GNTMAP_readonly, -1);
+ k++;
+ }
+ }
+
+ /* for grefs in the last lvl2 table page */
+ lvl2_table = pfn_to_kaddr(page_to_pfn(
+ lvl2_table_pages[n_lvl2_grefs - 1]));
+
+ for (j = 0; j < nents_last; j++) {
+ gnttab_set_map_op(&data_map_ops[k],
+ (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+ GNTMAP_host_map | GNTMAP_readonly,
+ lvl2_table[j], domid);
+
+ gnttab_set_unmap_op(&data_unmap_ops[k],
+ (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
+ GNTMAP_host_map | GNTMAP_readonly, -1);
+ k++;
+ }
+
+ if (gnttab_map_refs(data_map_ops, NULL,
+ data_pages, nents)) {
+ dev_err(hy_drv_priv->dev,
+ "HYPERVISOR map grant ref failed\n");
+ return NULL;
+ }
+
+ /* unmapping lvl2 table pages */
+ if (gnttab_unmap_refs(lvl2_unmap_ops,
+ NULL, lvl2_table_pages,
+ n_lvl2_grefs)) {
+ dev_err(hy_drv_priv->dev,
+ "Cannot unmap 2nd level refs\n");
+ return NULL;
+ }
+
+ /* Mark that pages were unmapped */
+ for (i = 0; i < n_lvl2_grefs; i++)
+ lvl2_unmap_ops[i].handle = -1;
+
+ for (i = 0; i < nents; i++) {
+ if (data_map_ops[i].status) {
+ dev_err(hy_drv_priv->dev,
+ "HYPERVISOR map grant ref failed status = %d\n",
+ data_map_ops[i].status);
+ goto error_cleanup_data;
+ } else {
+ data_unmap_ops[i].handle = data_map_ops[i].handle;
+ }
+ }
+
+ /* store these references for unmapping in the future */
+ sh_pages_info->unmap_ops = data_unmap_ops;
+ sh_pages_info->data_pages = data_pages;
+
+ gnttab_free_pages(1, &lvl3_table_page);
+ gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages);
+ kfree(lvl2_table_pages);
+ kfree(lvl2_map_ops);
+ kfree(lvl2_unmap_ops);
+ kfree(data_map_ops);
+
+ dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
+ return data_pages;
+
+error_cleanup_data:
+ gnttab_unmap_refs(data_unmap_ops, NULL, data_pages,
+ nents);
+
+ gnttab_free_pages(nents, data_pages);
+
+error_cleanup_lvl2:
+ if (lvl2_unmap_ops[0].handle != -1)
+ gnttab_unmap_refs(lvl2_unmap_ops, NULL,
+ lvl2_table_pages, n_lvl2_grefs);
+ gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages);
+
+error_cleanup_lvl3:
+ if (lvl3_unmap_ops.handle != -1)
+ gnttab_unmap_refs(&lvl3_unmap_ops, NULL,
+ &lvl3_table_page, 1);
+ gnttab_free_pages(1, &lvl3_table_page);
+
+ kfree(lvl2_table_pages);
+ kfree(lvl2_map_ops);
+ kfree(lvl2_unmap_ops);
+ kfree(data_map_ops);
+
+
+ return NULL;
+}
+
+int xen_be_unmap_shared_pages(void **refs_info, int nents)
+{
+ struct xen_shared_pages_info *sh_pages_info;
+
+ dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
+
+ sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
+
+ if (sh_pages_info->unmap_ops == NULL ||
+ sh_pages_info->data_pages == NULL) {
+ dev_warn(hy_drv_priv->dev,
+ "pages already cleaned up or buffer not imported yet\n");
+ return 0;
+ }
+
+ if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL,
+ sh_pages_info->data_pages, nents)) {
+ dev_err(hy_drv_priv->dev, "Cannot unmap data pages\n");
+ return -EFAULT;
+ }
+
+ gnttab_free_pages(nents, sh_pages_info->data_pages);
+
+ kfree(sh_pages_info->data_pages);
+ kfree(sh_pages_info->unmap_ops);
+ sh_pages_info->unmap_ops = NULL;
+ sh_pages_info->data_pages = NULL;
+ kfree(sh_pages_info);
+ sh_pages_info = NULL;
+
+ dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
+ return 0;
+}
diff --git a/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_shm.h b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_shm.h
new file mode 100644
index 0000000..d5236b5
--- /dev/null
+++ b/drivers/dma-buf/hyper_dmabuf/xen-backend/hyper_dmabuf_xen_shm.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_XEN_SHM_H__
+#define __HYPER_DMABUF_XEN_SHM_H__
+
+/* This collects all reference numbers for 2nd level shared pages and
+ * create a table with those in 1st level shared pages then return reference
+ * numbers for this top level table.
+ */
+int xen_be_share_pages(struct page **pages, int domid, int nents,
+ void **refs_info);
+
+int xen_be_unshare_pages(void **refs_info, int nents);
+
+/* Maps provided top level ref id and then return array of pages containing
+ * data refs.
+ */
+struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid,
+ int nents,
+ void **refs_info);
+
+int xen_be_unmap_shared_pages(void **refs_info, int nents);
+
+#endif /* __HYPER_DMABUF_XEN_SHM_H__ */
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index b59b0e3..6aa302d 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -321,6 +321,6 @@ config XEN_SYMS
config XEN_HAVE_VPMU
bool
-source "drivers/xen/hyper_dmabuf/Kconfig"
+source "drivers/dma-buf/hyper_dmabuf/Kconfig"
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index a6e253a..ede7082 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
obj-y += events/
obj-y += xenbus/
-obj-y += hyper_dmabuf/
+obj-y += ../dma-buf/hyper_dmabuf/
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_features.o := $(nostackp)
diff --git a/drivers/xen/hyper_dmabuf/Kconfig b/drivers/xen/hyper_dmabuf/Kconfig
deleted file mode 100644
index 5efcd44..0000000
--- a/drivers/xen/hyper_dmabuf/Kconfig
+++ /dev/null
@@ -1,42 +0,0 @@
-menu "hyper_dmabuf options"
-
-config HYPER_DMABUF
- tristate "Enables hyper dmabuf driver"
- default y
-
-config HYPER_DMABUF_XEN
- bool "Configure hyper_dmabuf for XEN hypervisor"
- default y
- depends on HYPER_DMABUF
- help
- Configuring hyper_dmabuf driver for XEN hypervisor
-
-config HYPER_DMABUF_SYSFS
- bool "Enable sysfs information about hyper DMA buffers"
- default y
- depends on HYPER_DMABUF
- help
- Expose information about imported and exported buffers using
- hyper_dmabuf driver
-
-config HYPER_DMABUF_EVENT_GEN
- bool "Enable event-generation and polling operation"
- default n
- depends on HYPER_DMABUF
- help
- With this config enabled, hyper_dmabuf driver on the importer side
- generates events and queue those up in the event list whenever a new
- shared DMA-BUF is available. Events in the list can be retrieved by
- read operation.
-
-config HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
- bool "Enable automatic rx-ch add with 10 secs interval"
- default y
- depends on HYPER_DMABUF && HYPER_DMABUF_XEN
- help
- If enabled, driver reads a node in xenstore every 10 seconds
- to check whether there is any tx comm ch configured by another
- domain then initialize matched rx comm ch automatically for any
- existing tx comm chs.
-
-endmenu
diff --git a/drivers/xen/hyper_dmabuf/Makefile b/drivers/xen/hyper_dmabuf/Makefile
deleted file mode 100644
index a113bfc..0000000
--- a/drivers/xen/hyper_dmabuf/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-TARGET_MODULE:=hyper_dmabuf
-
-PLATFORM:=XEN
-
-# If we running by kernel building system
-ifneq ($(KERNELRELEASE),)
- $(TARGET_MODULE)-objs := hyper_dmabuf_drv.o \
- hyper_dmabuf_ioctl.o \
- hyper_dmabuf_list.o \
- hyper_dmabuf_sgl_proc.o \
- hyper_dmabuf_ops.o \
- hyper_dmabuf_msg.o \
- hyper_dmabuf_id.o \
- hyper_dmabuf_remote_sync.o \
- hyper_dmabuf_query.o \
-
-ifeq ($(CONFIG_HYPER_DMABUF_EVENT_GEN), y)
- $(TARGET_MODULE)-objs += hyper_dmabuf_event.o
-endif
-
-ifeq ($(CONFIG_HYPER_DMABUF_XEN), y)
- $(TARGET_MODULE)-objs += xen/hyper_dmabuf_xen_comm.o \
- xen/hyper_dmabuf_xen_comm_list.o \
- xen/hyper_dmabuf_xen_shm.o \
- xen/hyper_dmabuf_xen_drv.o
-endif
-
-obj-$(CONFIG_HYPER_DMABUF) := $(TARGET_MODULE).o
-
-# If we are running without kernel build system
-else
-BUILDSYSTEM_DIR?=../../../
-PWD:=$(shell pwd)
-
-all :
-# run kernel build system to make module
-$(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules
-
-clean:
-# run kernel build system to cleanup in current directory
-$(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) clean
-
-load:
- insmod ./$(TARGET_MODULE).ko
-
-unload:
- rmmod ./$(TARGET_MODULE).ko
-
-endif
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
deleted file mode 100644
index eead4c0..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/workqueue.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/uaccess.h>
-#include <linux/poll.h>
-#include <linux/dma-buf.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_ioctl.h"
-#include "hyper_dmabuf_list.h"
-#include "hyper_dmabuf_id.h"
-#include "hyper_dmabuf_event.h"
-
-#ifdef CONFIG_HYPER_DMABUF_XEN
-#include "xen/hyper_dmabuf_xen_drv.h"
-#endif
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Intel Corporation");
-
-struct hyper_dmabuf_private *hy_drv_priv;
-
-static void force_free(struct exported_sgt_info *exported,
- void *attr)
-{
- struct ioctl_hyper_dmabuf_unexport unexport_attr;
- struct file *filp = (struct file *)attr;
-
- if (!filp || !exported)
- return;
-
- if (exported->filp == filp) {
- dev_dbg(hy_drv_priv->dev,
- "Forcefully releasing buffer {id:%d key:%d %d %d}\n",
- exported->hid.id, exported->hid.rng_key[0],
- exported->hid.rng_key[1], exported->hid.rng_key[2]);
-
- unexport_attr.hid = exported->hid;
- unexport_attr.delay_ms = 0;
-
- hyper_dmabuf_unexport_ioctl(filp, &unexport_attr);
- }
-}
-
-static int hyper_dmabuf_open(struct inode *inode, struct file *filp)
-{
- int ret = 0;
-
- /* Do not allow exclusive open */
- if (filp->f_flags & O_EXCL)
- return -EBUSY;
-
- return ret;
-}
-
-static int hyper_dmabuf_release(struct inode *inode, struct file *filp)
-{
- hyper_dmabuf_foreach_exported(force_free, filp);
-
- return 0;
-}
-
-#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
-
-static unsigned int hyper_dmabuf_event_poll(struct file *filp,
- struct poll_table_struct *wait)
-{
- poll_wait(filp, &hy_drv_priv->event_wait, wait);
-
- if (!list_empty(&hy_drv_priv->event_list))
- return POLLIN | POLLRDNORM;
-
- return 0;
-}
-
-static ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- int ret;
-
- /* only root can read events */
- if (!capable(CAP_DAC_OVERRIDE)) {
- dev_err(hy_drv_priv->dev,
- "Only root can read events\n");
- return -EPERM;
- }
-
- /* make sure user buffer can be written */
- if (!access_ok(VERIFY_WRITE, buffer, count)) {
- dev_err(hy_drv_priv->dev,
- "User buffer can't be written.\n");
- return -EINVAL;
- }
-
- ret = mutex_lock_interruptible(&hy_drv_priv->event_read_lock);
- if (ret)
- return ret;
-
- while (1) {
- struct hyper_dmabuf_event *e = NULL;
-
- spin_lock_irq(&hy_drv_priv->event_lock);
- if (!list_empty(&hy_drv_priv->event_list)) {
- e = list_first_entry(&hy_drv_priv->event_list,
- struct hyper_dmabuf_event, link);
- list_del(&e->link);
- }
- spin_unlock_irq(&hy_drv_priv->event_lock);
-
- if (!e) {
- if (ret)
- break;
-
- if (filp->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
-
- mutex_unlock(&hy_drv_priv->event_read_lock);
- ret = wait_event_interruptible(hy_drv_priv->event_wait,
- !list_empty(&hy_drv_priv->event_list));
-
- if (ret == 0)
- ret = mutex_lock_interruptible(
- &hy_drv_priv->event_read_lock);
-
- if (ret)
- return ret;
- } else {
- unsigned int length = (sizeof(e->event_data.hdr) +
- e->event_data.hdr.size);
-
- if (length > count - ret) {
-put_back_event:
- spin_lock_irq(&hy_drv_priv->event_lock);
- list_add(&e->link, &hy_drv_priv->event_list);
- spin_unlock_irq(&hy_drv_priv->event_lock);
- break;
- }
-
- if (copy_to_user(buffer + ret, &e->event_data.hdr,
- sizeof(e->event_data.hdr))) {
- if (ret == 0)
- ret = -EFAULT;
-
- goto put_back_event;
- }
-
- ret += sizeof(e->event_data.hdr);
-
- if (copy_to_user(buffer + ret, e->event_data.data,
- e->event_data.hdr.size)) {
- /* error while copying void *data */
-
- struct hyper_dmabuf_event_hdr dummy_hdr = {0};
-
- ret -= sizeof(e->event_data.hdr);
-
- /* nullifying hdr of the event in user buffer */
- if (copy_to_user(buffer + ret, &dummy_hdr,
- sizeof(dummy_hdr))) {
- dev_err(hy_drv_priv->dev,
- "failed to nullify invalid hdr already in userspace\n");
- }
-
- ret = -EFAULT;
-
- goto put_back_event;
- }
-
- ret += e->event_data.hdr.size;
- hy_drv_priv->pending--;
- kfree(e);
- }
- }
-
- mutex_unlock(&hy_drv_priv->event_read_lock);
-
- return ret;
-}
-
-#endif
-
-static const struct file_operations hyper_dmabuf_driver_fops = {
- .owner = THIS_MODULE,
- .open = hyper_dmabuf_open,
- .release = hyper_dmabuf_release,
-
-/* poll and read interfaces are needed only for event-polling */
-#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
- .read = hyper_dmabuf_event_read,
- .poll = hyper_dmabuf_event_poll,
-#endif
-
- .unlocked_ioctl = hyper_dmabuf_ioctl,
-};
-
-static struct miscdevice hyper_dmabuf_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "hyper_dmabuf",
- .fops = &hyper_dmabuf_driver_fops,
-};
-
-static int register_device(void)
-{
- int ret = 0;
-
- ret = misc_register(&hyper_dmabuf_miscdev);
-
- if (ret) {
- printk(KERN_ERR "hyper_dmabuf: driver can't be registered\n");
- return ret;
- }
-
- hy_drv_priv->dev = hyper_dmabuf_miscdev.this_device;
-
- /* TODO: Check if there is a different way to initialize dma mask */
- dma_coerce_mask_and_coherent(hy_drv_priv->dev, DMA_BIT_MASK(64));
-
- return ret;
-}
-
-static void unregister_device(void)
-{
- dev_info(hy_drv_priv->dev,
- "hyper_dmabuf: unregister_device() is called\n");
-
- misc_deregister(&hyper_dmabuf_miscdev);
-}
-
-static int __init hyper_dmabuf_drv_init(void)
-{
- int ret = 0;
-
- printk(KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n");
-
- hy_drv_priv = kcalloc(1, sizeof(struct hyper_dmabuf_private),
- GFP_KERNEL);
-
- if (!hy_drv_priv)
- return -ENOMEM;
-
- ret = register_device();
- if (ret < 0) {
- kfree(hy_drv_priv);
- return ret;
- }
-
-/* currently only supports XEN hypervisor */
-#ifdef CONFIG_HYPER_DMABUF_XEN
- hy_drv_priv->bknd_ops = &xen_bknd_ops;
-#else
- hy_drv_priv->bknd_ops = NULL;
- printk(KERN_ERR "hyper_dmabuf drv currently supports XEN only.\n");
-#endif
-
- if (hy_drv_priv->bknd_ops == NULL) {
- printk(KERN_ERR "Hyper_dmabuf: no backend found\n");
- kfree(hy_drv_priv);
- return -1;
- }
-
- mutex_init(&hy_drv_priv->lock);
-
- mutex_lock(&hy_drv_priv->lock);
-
- hy_drv_priv->initialized = false;
-
- dev_info(hy_drv_priv->dev,
- "initializing database for imported/exported dmabufs\n");
-
- hy_drv_priv->work_queue = create_workqueue("hyper_dmabuf_wqueue");
-
- ret = hyper_dmabuf_table_init();
- if (ret < 0) {
- dev_err(hy_drv_priv->dev,
- "fail to init table for exported/imported entries\n");
- mutex_unlock(&hy_drv_priv->lock);
- kfree(hy_drv_priv);
- return ret;
- }
-
-#ifdef CONFIG_HYPER_DMABUF_SYSFS
- ret = hyper_dmabuf_register_sysfs(hy_drv_priv->dev);
- if (ret < 0) {
- dev_err(hy_drv_priv->dev,
- "failed to initialize sysfs\n");
- mutex_unlock(&hy_drv_priv->lock);
- kfree(hy_drv_priv);
- return ret;
- }
-#endif
-
-#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
- mutex_init(&hy_drv_priv->event_read_lock);
- spin_lock_init(&hy_drv_priv->event_lock);
-
- /* Initialize event queue */
- INIT_LIST_HEAD(&hy_drv_priv->event_list);
- init_waitqueue_head(&hy_drv_priv->event_wait);
-
- /* resetting number of pending events */
- hy_drv_priv->pending = 0;
-#endif
-
- if (hy_drv_priv->bknd_ops->init) {
- ret = hy_drv_priv->bknd_ops->init();
-
- if (ret < 0) {
- dev_dbg(hy_drv_priv->dev,
- "failed to initialize backend.\n");
- mutex_unlock(&hy_drv_priv->lock);
- kfree(hy_drv_priv);
- return ret;
- }
- }
-
- hy_drv_priv->domid = hy_drv_priv->bknd_ops->get_vm_id();
-
- ret = hy_drv_priv->bknd_ops->init_comm_env();
- if (ret < 0) {
- dev_dbg(hy_drv_priv->dev,
- "failed to initialize comm-env.\n");
- } else {
- hy_drv_priv->initialized = true;
- }
-
- mutex_unlock(&hy_drv_priv->lock);
-
- dev_info(hy_drv_priv->dev,
- "Finishing up initialization of hyper_dmabuf drv\n");
-
- /* interrupt for comm should be registered here: */
- return ret;
-}
-
-static void hyper_dmabuf_drv_exit(void)
-{
-#ifdef CONFIG_HYPER_DMABUF_SYSFS
- hyper_dmabuf_unregister_sysfs(hy_drv_priv->dev);
-#endif
-
- mutex_lock(&hy_drv_priv->lock);
-
- /* hash tables for export/import entries and ring_infos */
- hyper_dmabuf_table_destroy();
-
- hy_drv_priv->bknd_ops->destroy_comm();
-
- if (hy_drv_priv->bknd_ops->cleanup) {
- hy_drv_priv->bknd_ops->cleanup();
- };
-
- /* destroy workqueue */
- if (hy_drv_priv->work_queue)
- destroy_workqueue(hy_drv_priv->work_queue);
-
- /* destroy id_queue */
- if (hy_drv_priv->id_queue)
- hyper_dmabuf_free_hid_list();
-
-#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
- /* clean up event queue */
- hyper_dmabuf_events_release();
-#endif
-
- mutex_unlock(&hy_drv_priv->lock);
-
- dev_info(hy_drv_priv->dev,
- "hyper_dmabuf driver: Exiting\n");
-
- kfree(hy_drv_priv);
-
- unregister_device();
-}
-
-module_init(hyper_dmabuf_drv_init);
-module_exit(hyper_dmabuf_drv_exit);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
deleted file mode 100644
index c2bb3ce..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
-#define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
-
-#include <linux/device.h>
-#include <xen/hyper_dmabuf.h>
-
-struct hyper_dmabuf_req;
-
-struct hyper_dmabuf_event {
- struct hyper_dmabuf_event_data event_data;
- struct list_head link;
-};
-
-struct hyper_dmabuf_private {
- struct device *dev;
-
- /* VM(domain) id of current VM instance */
- int domid;
-
- /* workqueue dedicated to hyper_dmabuf driver */
- struct workqueue_struct *work_queue;
-
- /* list of reusable hyper_dmabuf_ids */
- struct list_reusable_id *id_queue;
-
- /* backend ops - hypervisor specific */
- struct hyper_dmabuf_bknd_ops *bknd_ops;
-
- /* device global lock */
- /* TODO: might need a lock per resource (e.g. EXPORT LIST) */
- struct mutex lock;
-
- /* flag that shows whether backend is initialized */
- bool initialized;
-
- wait_queue_head_t event_wait;
- struct list_head event_list;
-
- spinlock_t event_lock;
- struct mutex event_read_lock;
-
- /* # of pending events */
- int pending;
-};
-
-struct list_reusable_id {
- hyper_dmabuf_id_t hid;
- struct list_head list;
-};
-
-struct hyper_dmabuf_bknd_ops {
- /* backend initialization routine (optional) */
- int (*init)(void);
-
- /* backend cleanup routine (optional) */
- int (*cleanup)(void);
-
- /* retreiving id of current virtual machine */
- int (*get_vm_id)(void);
-
- /* get pages shared via hypervisor-specific method */
- int (*share_pages)(struct page **, int, int, void **);
-
- /* make shared pages unshared via hypervisor specific method */
- int (*unshare_pages)(void **, int);
-
- /* map remotely shared pages on importer's side via
- * hypervisor-specific method
- */
- struct page ** (*map_shared_pages)(unsigned long, int, int, void **);
-
- /* unmap and free shared pages on importer's side via
- * hypervisor-specific method
- */
- int (*unmap_shared_pages)(void **, int);
-
- /* initialize communication environment */
- int (*init_comm_env)(void);
-
- void (*destroy_comm)(void);
-
- /* upstream ch setup (receiving and responding) */
- int (*init_rx_ch)(int);
-
- /* downstream ch setup (transmitting and parsing responses) */
- int (*init_tx_ch)(int);
-
- int (*send_req)(int, struct hyper_dmabuf_req *, int);
-};
-
-/* exporting global drv private info */
-extern struct hyper_dmabuf_private *hy_drv_priv;
-
-#endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
deleted file mode 100644
index 392ea99..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_struct.h"
-#include "hyper_dmabuf_list.h"
-#include "hyper_dmabuf_event.h"
-
-static void send_event(struct hyper_dmabuf_event *e)
-{
- struct hyper_dmabuf_event *oldest;
- unsigned long irqflags;
-
- spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags);
-
- /* check current number of event then if it hits the max num allowed
- * then remove the oldest event in the list
- */
- if (hy_drv_priv->pending > MAX_DEPTH_EVENT_QUEUE - 1) {
- oldest = list_first_entry(&hy_drv_priv->event_list,
- struct hyper_dmabuf_event, link);
- list_del(&oldest->link);
- hy_drv_priv->pending--;
- kfree(oldest);
- }
-
- list_add_tail(&e->link,
- &hy_drv_priv->event_list);
-
- hy_drv_priv->pending++;
-
- wake_up_interruptible(&hy_drv_priv->event_wait);
-
- spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags);
-}
-
-void hyper_dmabuf_events_release(void)
-{
- struct hyper_dmabuf_event *e, *et;
- unsigned long irqflags;
-
- spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags);
-
- list_for_each_entry_safe(e, et, &hy_drv_priv->event_list,
- link) {
- list_del(&e->link);
- kfree(e);
- hy_drv_priv->pending--;
- }
-
- if (hy_drv_priv->pending) {
- dev_err(hy_drv_priv->dev,
- "possible leak on event_list\n");
- }
-
- spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags);
-}
-
-int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid)
-{
- struct hyper_dmabuf_event *e;
- struct imported_sgt_info *imported;
-
- imported = hyper_dmabuf_find_imported(hid);
-
- if (!imported) {
- dev_err(hy_drv_priv->dev,
- "can't find imported_sgt_info in the list\n");
- return -EINVAL;
- }
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
-
- if (!e)
- return -ENOMEM;
-
- e->event_data.hdr.event_type = HYPER_DMABUF_NEW_IMPORT;
- e->event_data.hdr.hid = hid;
- e->event_data.data = (void *)imported->priv;
- e->event_data.hdr.size = imported->sz_priv;
-
- send_event(e);
-
- dev_dbg(hy_drv_priv->dev,
- "event number = %d :", hy_drv_priv->pending);
-
- dev_dbg(hy_drv_priv->dev,
- "generating events for {%d, %d, %d, %d}\n",
- imported->hid.id, imported->hid.rng_key[0],
- imported->hid.rng_key[1], imported->hid.rng_key[2]);
-
- return 0;
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.h
deleted file mode 100644
index 50db04f..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_event.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_EVENT_H__
-#define __HYPER_DMABUF_EVENT_H__
-
-#define MAX_DEPTH_EVENT_QUEUE 32
-
-enum hyper_dmabuf_event_type {
- HYPER_DMABUF_NEW_IMPORT = 0x10000,
-};
-
-void hyper_dmabuf_events_release(void);
-
-int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid);
-
-#endif /* __HYPER_DMABUF_EVENT_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
deleted file mode 100644
index e67b84a..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_id.h"
-
-void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid)
-{
- struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
- struct list_reusable_id *new_reusable;
-
- new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL);
-
- if (!new_reusable)
- return;
-
- new_reusable->hid = hid;
-
- list_add(&new_reusable->list, &reusable_head->list);
-}
-
-static hyper_dmabuf_id_t get_reusable_hid(void)
-{
- struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
- hyper_dmabuf_id_t hid = {-1, {0, 0, 0} };
-
- /* check there is reusable id */
- if (!list_empty(&reusable_head->list)) {
- reusable_head = list_first_entry(&reusable_head->list,
- struct list_reusable_id,
- list);
-
- list_del(&reusable_head->list);
- hid = reusable_head->hid;
- kfree(reusable_head);
- }
-
- return hid;
-}
-
-void hyper_dmabuf_free_hid_list(void)
-{
- struct list_reusable_id *reusable_head = hy_drv_priv->id_queue;
- struct list_reusable_id *temp_head;
-
- if (reusable_head) {
- /* freeing mem space all reusable ids in the stack */
- while (!list_empty(&reusable_head->list)) {
- temp_head = list_first_entry(&reusable_head->list,
- struct list_reusable_id,
- list);
- list_del(&temp_head->list);
- kfree(temp_head);
- }
-
- /* freeing head */
- kfree(reusable_head);
- }
-}
-
-hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
-{
- static int count;
- hyper_dmabuf_id_t hid;
- struct list_reusable_id *reusable_head;
-
- /* first call to hyper_dmabuf_get_id */
- if (count == 0) {
- reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL);
-
- if (!reusable_head)
- return (hyper_dmabuf_id_t){-1, {0, 0, 0} };
-
- /* list head has an invalid count */
- reusable_head->hid.id = -1;
- INIT_LIST_HEAD(&reusable_head->list);
- hy_drv_priv->id_queue = reusable_head;
- }
-
- hid = get_reusable_hid();
-
- /*creating a new H-ID only if nothing in the reusable id queue
- * and count is less than maximum allowed
- */
- if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX)
- hid.id = HYPER_DMABUF_ID_CREATE(hy_drv_priv->domid, count++);
-
- /* random data embedded in the id for security */
- get_random_bytes(&hid.rng_key[0], 12);
-
- return hid;
-}
-
-bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2)
-{
- int i;
-
- /* compare keys */
- for (i = 0; i < 3; i++) {
- if (hid1.rng_key[i] != hid2.rng_key[i])
- return false;
- }
-
- return true;
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h
deleted file mode 100644
index ed690f3..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_ID_H__
-#define __HYPER_DMABUF_ID_H__
-
-#define HYPER_DMABUF_ID_CREATE(domid, cnt) \
- ((((domid) & 0xFF) << 24) | ((cnt) & 0xFFFFFF))
-
-#define HYPER_DMABUF_DOM_ID(hid) \
- (((hid.id) >> 24) & 0xFF)
-
-/* currently maximum number of buffers shared
- * at any given moment is limited to 1000
- */
-#define HYPER_DMABUF_ID_MAX 1000
-
-/* adding freed hid to the reusable list */
-void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid);
-
-/* freeing the reusasble list */
-void hyper_dmabuf_free_hid_list(void);
-
-/* getting a hid available to use. */
-hyper_dmabuf_id_t hyper_dmabuf_get_hid(void);
-
-/* comparing two different hid */
-bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2);
-
-#endif /*__HYPER_DMABUF_ID_H*/
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
deleted file mode 100644
index ca6edf2..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ /dev/null
@@ -1,786 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/dma-buf.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_id.h"
-#include "hyper_dmabuf_struct.h"
-#include "hyper_dmabuf_ioctl.h"
-#include "hyper_dmabuf_list.h"
-#include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_sgl_proc.h"
-#include "hyper_dmabuf_ops.h"
-#include "hyper_dmabuf_query.h"
-
-static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data)
-{
- struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr;
- struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
- int ret = 0;
-
- if (!data) {
- dev_err(hy_drv_priv->dev, "user data is NULL\n");
- return -EINVAL;
- }
- tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data;
-
- ret = bknd_ops->init_tx_ch(tx_ch_attr->remote_domain);
-
- return ret;
-}
-
-static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data)
-{
- struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr;
- struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
- int ret = 0;
-
- if (!data) {
- dev_err(hy_drv_priv->dev, "user data is NULL\n");
- return -EINVAL;
- }
-
- rx_ch_attr = (struct ioctl_hyper_dmabuf_rx_ch_setup *)data;
-
- ret = bknd_ops->init_rx_ch(rx_ch_attr->source_domain);
-
- return ret;
-}
-
-static int send_export_msg(struct exported_sgt_info *exported,
- struct pages_info *pg_info)
-{
- struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
- struct hyper_dmabuf_req *req;
- int op[MAX_NUMBER_OF_OPERANDS] = {0};
- int ret, i;
-
- /* now create request for importer via ring */
- op[0] = exported->hid.id;
-
- for (i = 0; i < 3; i++)
- op[i+1] = exported->hid.rng_key[i];
-
- if (pg_info) {
- op[4] = pg_info->nents;
- op[5] = pg_info->frst_ofst;
- op[6] = pg_info->last_len;
- op[7] = bknd_ops->share_pages(pg_info->pgs, exported->rdomid,
- pg_info->nents, &exported->refs_info);
- if (op[7] < 0) {
- dev_err(hy_drv_priv->dev, "pages sharing failed\n");
- return op[7];
- }
- }
-
- op[8] = exported->sz_priv;
-
- /* driver/application specific private info */
- memcpy(&op[9], exported->priv, op[8]);
-
- req = kcalloc(1, sizeof(*req), GFP_KERNEL);
-
- if (!req)
- return -ENOMEM;
-
- /* composing a message to the importer */
- hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT, &op[0]);
-
- ret = bknd_ops->send_req(exported->rdomid, req, true);
-
- kfree(req);
-
- return ret;
-}
-
-/* Fast path exporting routine in case same buffer is already exported.
- * In this function, we skip normal exporting process and just update
- * private data on both VMs (importer and exporter)
- *
- * return '1' if reexport is needed, return '0' if succeeds, return
- * Kernel error code if something goes wrong
- */
-static int fastpath_export(hyper_dmabuf_id_t hid, int sz_priv, char *priv)
-{
- int reexport = 1;
- int ret = 0;
- struct exported_sgt_info *exported;
-
- exported = hyper_dmabuf_find_exported(hid);
-
- if (!exported)
- return reexport;
-
- if (exported->valid == false)
- return reexport;
-
- /*
- * Check if unexport is already scheduled for that buffer,
- * if so try to cancel it. If that will fail, buffer needs
- * to be reexport once again.
- */
- if (exported->unexport_sched) {
- if (!cancel_delayed_work_sync(&exported->unexport))
- return reexport;
-
- exported->unexport_sched = false;
- }
-
- /* if there's any change in size of private data.
- * we reallocate space for private data with new size
- */
- if (sz_priv != exported->sz_priv) {
- kfree(exported->priv);
-
- /* truncating size */
- if (sz_priv > MAX_SIZE_PRIV_DATA)
- exported->sz_priv = MAX_SIZE_PRIV_DATA;
- else
- exported->sz_priv = sz_priv;
-
- exported->priv = kcalloc(1, exported->sz_priv,
- GFP_KERNEL);
-
- if (!exported->priv) {
- hyper_dmabuf_remove_exported(exported->hid);
- hyper_dmabuf_cleanup_sgt_info(exported, true);
- kfree(exported);
- return -ENOMEM;
- }
- }
-
- /* update private data in sgt_info with new ones */
- ret = copy_from_user(exported->priv, priv, exported->sz_priv);
- if (ret) {
- dev_err(hy_drv_priv->dev,
- "Failed to load a new private data\n");
- ret = -EINVAL;
- } else {
- /* send an export msg for updating priv in importer */
- ret = send_export_msg(exported, NULL);
-
- if (ret < 0) {
- dev_err(hy_drv_priv->dev,
- "Failed to send a new private data\n");
- ret = -EBUSY;
- }
- }
-
- return ret;
-}
-
-static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
-{
- struct ioctl_hyper_dmabuf_export_remote *export_remote_attr =
- (struct ioctl_hyper_dmabuf_export_remote *)data;
- struct dma_buf *dma_buf;
- struct dma_buf_attachment *attachment;
- struct sg_table *sgt;
- struct pages_info *pg_info;
- struct exported_sgt_info *exported;
- hyper_dmabuf_id_t hid;
- int ret = 0;
-
- if (hy_drv_priv->domid == export_remote_attr->remote_domain) {
- dev_err(hy_drv_priv->dev,
- "exporting to the same VM is not permitted\n");
- return -EINVAL;
- }
-
- dma_buf = dma_buf_get(export_remote_attr->dmabuf_fd);
-
- if (IS_ERR(dma_buf)) {
- dev_err(hy_drv_priv->dev, "Cannot get dma buf\n");
- return PTR_ERR(dma_buf);
- }
-
- /* we check if this specific attachment was already exported
- * to the same domain and if yes and it's valid sgt_info,
- * it returns hyper_dmabuf_id of pre-exported sgt_info
- */
- hid = hyper_dmabuf_find_hid_exported(dma_buf,
- export_remote_attr->remote_domain);
-
- if (hid.id != -1) {
- ret = fastpath_export(hid, export_remote_attr->sz_priv,
- export_remote_attr->priv);
-
- /* return if fastpath_export succeeds or
- * gets some fatal error
- */
- if (ret <= 0) {
- dma_buf_put(dma_buf);
- export_remote_attr->hid = hid;
- return ret;
- }
- }
-
- attachment = dma_buf_attach(dma_buf, hy_drv_priv->dev);
- if (IS_ERR(attachment)) {
- dev_err(hy_drv_priv->dev, "cannot get attachment\n");
- ret = PTR_ERR(attachment);
- goto fail_attach;
- }
-
- sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
-
- if (IS_ERR(sgt)) {
- dev_err(hy_drv_priv->dev, "cannot map attachment\n");
- ret = PTR_ERR(sgt);
- goto fail_map_attachment;
- }
-
- exported = kcalloc(1, sizeof(*exported), GFP_KERNEL);
-
- if (!exported) {
- ret = -ENOMEM;
- goto fail_sgt_info_creation;
- }
-
- /* possible truncation */
- if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA)
- exported->sz_priv = MAX_SIZE_PRIV_DATA;
- else
- exported->sz_priv = export_remote_attr->sz_priv;
-
- /* creating buffer for private data of buffer */
- if (exported->sz_priv != 0) {
- exported->priv = kcalloc(1, exported->sz_priv, GFP_KERNEL);
-
- if (!exported->priv) {
- ret = -ENOMEM;
- goto fail_priv_creation;
- }
- } else {
- dev_err(hy_drv_priv->dev, "size is 0\n");
- }
-
- exported->hid = hyper_dmabuf_get_hid();
-
- /* no more exported dmabuf allowed */
- if (exported->hid.id == -1) {
- dev_err(hy_drv_priv->dev,
- "exceeds allowed number of dmabuf to be exported\n");
- ret = -ENOMEM;
- goto fail_sgt_info_creation;
- }
-
- exported->rdomid = export_remote_attr->remote_domain;
- exported->dma_buf = dma_buf;
- exported->valid = true;
-
- exported->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL);
- if (!exported->active_sgts) {
- ret = -ENOMEM;
- goto fail_map_active_sgts;
- }
-
- exported->active_attached = kmalloc(sizeof(struct attachment_list),
- GFP_KERNEL);
- if (!exported->active_attached) {
- ret = -ENOMEM;
- goto fail_map_active_attached;
- }
-
- exported->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list),
- GFP_KERNEL);
- if (!exported->va_kmapped) {
- ret = -ENOMEM;
- goto fail_map_va_kmapped;
- }
-
- exported->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list),
- GFP_KERNEL);
- if (!exported->va_vmapped) {
- ret = -ENOMEM;
- goto fail_map_va_vmapped;
- }
-
- exported->active_sgts->sgt = sgt;
- exported->active_attached->attach = attachment;
- exported->va_kmapped->vaddr = NULL;
- exported->va_vmapped->vaddr = NULL;
-
- /* initialize list of sgt, attachment and vaddr for dmabuf sync
- * via shadow dma-buf
- */
- INIT_LIST_HEAD(&exported->active_sgts->list);
- INIT_LIST_HEAD(&exported->active_attached->list);
- INIT_LIST_HEAD(&exported->va_kmapped->list);
- INIT_LIST_HEAD(&exported->va_vmapped->list);
-
- /* copy private data to sgt_info */
- ret = copy_from_user(exported->priv, export_remote_attr->priv,
- exported->sz_priv);
-
- if (ret) {
- dev_err(hy_drv_priv->dev,
- "failed to load private data\n");
- ret = -EINVAL;
- goto fail_export;
- }
-
- pg_info = hyper_dmabuf_ext_pgs(sgt);
- if (!pg_info) {
- dev_err(hy_drv_priv->dev,
- "failed to construct pg_info\n");
- ret = -ENOMEM;
- goto fail_export;
- }
-
- exported->nents = pg_info->nents;
-
- /* now register it to export list */
- hyper_dmabuf_register_exported(exported);
-
- export_remote_attr->hid = exported->hid;
-
- ret = send_export_msg(exported, pg_info);
-
- if (ret < 0) {
- dev_err(hy_drv_priv->dev,
- "failed to send out the export request\n");
- goto fail_send_request;
- }
-
- /* free pg_info */
- kfree(pg_info->pgs);
- kfree(pg_info);
-
- exported->filp = filp;
-
- return ret;
-
-/* Clean-up if error occurs */
-
-fail_send_request:
- hyper_dmabuf_remove_exported(exported->hid);
-
- /* free pg_info */
- kfree(pg_info->pgs);
- kfree(pg_info);
-
-fail_export:
- kfree(exported->va_vmapped);
-
-fail_map_va_vmapped:
- kfree(exported->va_kmapped);
-
-fail_map_va_kmapped:
- kfree(exported->active_attached);
-
-fail_map_active_attached:
- kfree(exported->active_sgts);
- kfree(exported->priv);
-
-fail_priv_creation:
- kfree(exported);
-
-fail_map_active_sgts:
-fail_sgt_info_creation:
- dma_buf_unmap_attachment(attachment, sgt,
- DMA_BIDIRECTIONAL);
-
-fail_map_attachment:
- dma_buf_detach(dma_buf, attachment);
-
-fail_attach:
- dma_buf_put(dma_buf);
-
- return ret;
-}
-
-static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
-{
- struct ioctl_hyper_dmabuf_export_fd *export_fd_attr =
- (struct ioctl_hyper_dmabuf_export_fd *)data;
- struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
- struct imported_sgt_info *imported;
- struct hyper_dmabuf_req *req;
- struct page **data_pgs;
- int op[4];
- int i;
- int ret = 0;
-
- dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
-
- /* look for dmabuf for the id */
- imported = hyper_dmabuf_find_imported(export_fd_attr->hid);
-
- /* can't find sgt from the table */
- if (!imported) {
- dev_err(hy_drv_priv->dev, "can't find the entry\n");
- return -ENOENT;
- }
-
- mutex_lock(&hy_drv_priv->lock);
-
- imported->importers++;
-
- /* send notification for export_fd to exporter */
- op[0] = imported->hid.id;
-
- for (i = 0; i < 3; i++)
- op[i+1] = imported->hid.rng_key[i];
-
- dev_dbg(hy_drv_priv->dev, "Export FD of buffer {id:%d key:%d %d %d}\n",
- imported->hid.id, imported->hid.rng_key[0],
- imported->hid.rng_key[1], imported->hid.rng_key[2]);
-
- req = kcalloc(1, sizeof(*req), GFP_KERNEL);
-
- if (!req) {
- mutex_unlock(&hy_drv_priv->lock);
- return -ENOMEM;
- }
-
- hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD, &op[0]);
-
- ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true);
-
- if (ret < 0) {
- /* in case of timeout other end eventually will receive request,
- * so we need to undo it
- */
- hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED,
- &op[0]);
- bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, false);
- kfree(req);
- dev_err(hy_drv_priv->dev,
- "Failed to create sgt or notify exporter\n");
- imported->importers--;
- mutex_unlock(&hy_drv_priv->lock);
- return ret;
- }
-
- kfree(req);
-
- if (ret == HYPER_DMABUF_REQ_ERROR) {
- dev_err(hy_drv_priv->dev,
- "Buffer invalid {id:%d key:%d %d %d}, cannot import\n",
- imported->hid.id, imported->hid.rng_key[0],
- imported->hid.rng_key[1], imported->hid.rng_key[2]);
-
- imported->importers--;
- mutex_unlock(&hy_drv_priv->lock);
- return -EINVAL;
- }
-
- ret = 0;
-
- dev_dbg(hy_drv_priv->dev,
- "Found buffer gref %d off %d\n",
- imported->ref_handle, imported->frst_ofst);
-
- dev_dbg(hy_drv_priv->dev,
- "last len %d nents %d domain %d\n",
- imported->last_len, imported->nents,
- HYPER_DMABUF_DOM_ID(imported->hid));
-
- if (!imported->sgt) {
- dev_dbg(hy_drv_priv->dev,
- "buffer {id:%d key:%d %d %d} pages not mapped yet\n",
- imported->hid.id, imported->hid.rng_key[0],
- imported->hid.rng_key[1], imported->hid.rng_key[2]);
-
- data_pgs = bknd_ops->map_shared_pages(imported->ref_handle,
- HYPER_DMABUF_DOM_ID(imported->hid),
- imported->nents,
- &imported->refs_info);
-
- if (!data_pgs) {
- dev_err(hy_drv_priv->dev,
- "can't map pages hid {id:%d key:%d %d %d}\n",
- imported->hid.id, imported->hid.rng_key[0],
- imported->hid.rng_key[1],
- imported->hid.rng_key[2]);
-
- imported->importers--;
-
- req = kcalloc(1, sizeof(*req), GFP_KERNEL);
-
- if (!req) {
- mutex_unlock(&hy_drv_priv->lock);
- return -ENOMEM;
- }
-
- hyper_dmabuf_create_req(req,
- HYPER_DMABUF_EXPORT_FD_FAILED,
- &op[0]);
- bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req,
- false);
- kfree(req);
- mutex_unlock(&hy_drv_priv->lock);
- return -EINVAL;
- }
-
- imported->sgt = hyper_dmabuf_create_sgt(data_pgs,
- imported->frst_ofst,
- imported->last_len,
- imported->nents);
-
- }
-
- export_fd_attr->fd = hyper_dmabuf_export_fd(imported,
- export_fd_attr->flags);
-
- if (export_fd_attr->fd < 0) {
- /* fail to get fd */
- ret = export_fd_attr->fd;
- }
-
- mutex_unlock(&hy_drv_priv->lock);
-
- dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
- return ret;
-}
-
-/* unexport dmabuf from the database and send int req to the source domain
- * to unmap it.
- */
-static void delayed_unexport(struct work_struct *work)
-{
- struct hyper_dmabuf_req *req;
- struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
- struct exported_sgt_info *exported =
- container_of(work, struct exported_sgt_info, unexport.work);
- int op[4];
- int i, ret;
-
- if (!exported)
- return;
-
- dev_dbg(hy_drv_priv->dev,
- "Marking buffer {id:%d key:%d %d %d} as invalid\n",
- exported->hid.id, exported->hid.rng_key[0],
- exported->hid.rng_key[1], exported->hid.rng_key[2]);
-
- /* no longer valid */
- exported->valid = false;
-
- req = kcalloc(1, sizeof(*req), GFP_KERNEL);
-
- if (!req)
- return;
-
- op[0] = exported->hid.id;
-
- for (i = 0; i < 3; i++)
- op[i+1] = exported->hid.rng_key[i];
-
- hyper_dmabuf_create_req(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &op[0]);
-
- /* Now send unexport request to remote domain, marking
- * that buffer should not be used anymore
- */
- ret = bknd_ops->send_req(exported->rdomid, req, true);
- if (ret < 0) {
- dev_err(hy_drv_priv->dev,
- "unexport message for buffer {id:%d key:%d %d %d} failed\n",
- exported->hid.id, exported->hid.rng_key[0],
- exported->hid.rng_key[1], exported->hid.rng_key[2]);
- }
-
- kfree(req);
- exported->unexport_sched = false;
-
- /* Immediately clean-up if it has never been exported by importer
- * (so no SGT is constructed on importer).
- * clean it up later in remote sync when final release ops
- * is called (importer does this only when there's no
- * no consumer of locally exported FDs)
- */
- if (exported->active == 0) {
- dev_dbg(hy_drv_priv->dev,
- "claning up buffer {id:%d key:%d %d %d} completly\n",
- exported->hid.id, exported->hid.rng_key[0],
- exported->hid.rng_key[1], exported->hid.rng_key[2]);
-
- hyper_dmabuf_cleanup_sgt_info(exported, false);
- hyper_dmabuf_remove_exported(exported->hid);
-
- /* register hyper_dmabuf_id to the list for reuse */
- hyper_dmabuf_store_hid(exported->hid);
-
- if (exported->sz_priv > 0 && !exported->priv)
- kfree(exported->priv);
-
- kfree(exported);
- }
-}
-
-/* Schedule unexport of dmabuf.
- */
-int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data)
-{
- struct ioctl_hyper_dmabuf_unexport *unexport_attr =
- (struct ioctl_hyper_dmabuf_unexport *)data;
- struct exported_sgt_info *exported;
-
- dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
-
- /* find dmabuf in export list */
- exported = hyper_dmabuf_find_exported(unexport_attr->hid);
-
- dev_dbg(hy_drv_priv->dev,
- "scheduling unexport of buffer {id:%d key:%d %d %d}\n",
- unexport_attr->hid.id, unexport_attr->hid.rng_key[0],
- unexport_attr->hid.rng_key[1], unexport_attr->hid.rng_key[2]);
-
- /* failed to find corresponding entry in export list */
- if (exported == NULL) {
- unexport_attr->status = -ENOENT;
- return -ENOENT;
- }
-
- if (exported->unexport_sched)
- return 0;
-
- exported->unexport_sched = true;
- INIT_DELAYED_WORK(&exported->unexport, delayed_unexport);
- schedule_delayed_work(&exported->unexport,
- msecs_to_jiffies(unexport_attr->delay_ms));
-
- dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
- return 0;
-}
-
-static int hyper_dmabuf_query_ioctl(struct file *filp, void *data)
-{
- struct ioctl_hyper_dmabuf_query *query_attr =
- (struct ioctl_hyper_dmabuf_query *)data;
- struct exported_sgt_info *exported = NULL;
- struct imported_sgt_info *imported = NULL;
- int ret = 0;
-
- if (HYPER_DMABUF_DOM_ID(query_attr->hid) == hy_drv_priv->domid) {
- /* query for exported dmabuf */
- exported = hyper_dmabuf_find_exported(query_attr->hid);
- if (exported) {
- ret = hyper_dmabuf_query_exported(exported,
- query_attr->item,
- &query_attr->info);
- } else {
- dev_err(hy_drv_priv->dev,
- "hid {id:%d key:%d %d %d} not in exp list\n",
- query_attr->hid.id,
- query_attr->hid.rng_key[0],
- query_attr->hid.rng_key[1],
- query_attr->hid.rng_key[2]);
- return -ENOENT;
- }
- } else {
- /* query for imported dmabuf */
- imported = hyper_dmabuf_find_imported(query_attr->hid);
- if (imported) {
- ret = hyper_dmabuf_query_imported(imported,
- query_attr->item,
- &query_attr->info);
- } else {
- dev_err(hy_drv_priv->dev,
- "hid {id:%d key:%d %d %d} not in imp list\n",
- query_attr->hid.id,
- query_attr->hid.rng_key[0],
- query_attr->hid.rng_key[1],
- query_attr->hid.rng_key[2]);
- return -ENOENT;
- }
- }
-
- return ret;
-}
-
-const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = {
- HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP,
- hyper_dmabuf_tx_ch_setup_ioctl, 0),
- HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP,
- hyper_dmabuf_rx_ch_setup_ioctl, 0),
- HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE,
- hyper_dmabuf_export_remote_ioctl, 0),
- HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD,
- hyper_dmabuf_export_fd_ioctl, 0),
- HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT,
- hyper_dmabuf_unexport_ioctl, 0),
- HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY,
- hyper_dmabuf_query_ioctl, 0),
-};
-
-long hyper_dmabuf_ioctl(struct file *filp,
- unsigned int cmd, unsigned long param)
-{
- const struct hyper_dmabuf_ioctl_desc *ioctl = NULL;
- unsigned int nr = _IOC_NR(cmd);
- int ret;
- hyper_dmabuf_ioctl_t func;
- char *kdata;
-
- if (nr > ARRAY_SIZE(hyper_dmabuf_ioctls)) {
- dev_err(hy_drv_priv->dev, "invalid ioctl\n");
- return -EINVAL;
- }
-
- ioctl = &hyper_dmabuf_ioctls[nr];
-
- func = ioctl->func;
-
- if (unlikely(!func)) {
- dev_err(hy_drv_priv->dev, "no function\n");
- return -EINVAL;
- }
-
- kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
- if (!kdata)
- return -ENOMEM;
-
- if (copy_from_user(kdata, (void __user *)param,
- _IOC_SIZE(cmd)) != 0) {
- dev_err(hy_drv_priv->dev,
- "failed to copy from user arguments\n");
- ret = -EFAULT;
- goto ioctl_error;
- }
-
- ret = func(filp, kdata);
-
- if (copy_to_user((void __user *)param, kdata,
- _IOC_SIZE(cmd)) != 0) {
- dev_err(hy_drv_priv->dev,
- "failed to copy to user arguments\n");
- ret = -EFAULT;
- goto ioctl_error;
- }
-
-ioctl_error:
- kfree(kdata);
-
- return ret;
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
deleted file mode 100644
index 5991a87..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_IOCTL_H__
-#define __HYPER_DMABUF_IOCTL_H__
-
-typedef int (*hyper_dmabuf_ioctl_t)(struct file *filp, void *data);
-
-struct hyper_dmabuf_ioctl_desc {
- unsigned int cmd;
- int flags;
- hyper_dmabuf_ioctl_t func;
- const char *name;
-};
-
-#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags) \
- [_IOC_NR(ioctl)] = { \
- .cmd = ioctl, \
- .func = _func, \
- .flags = _flags, \
- .name = #ioctl \
- }
-
-long hyper_dmabuf_ioctl(struct file *filp,
- unsigned int cmd, unsigned long param);
-
-int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data);
-
-#endif //__HYPER_DMABUF_IOCTL_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
deleted file mode 100644
index bba6d1d..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/cdev.h>
-#include <linux/hashtable.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_list.h"
-#include "hyper_dmabuf_id.h"
-#include "hyper_dmabuf_event.h"
-
-DECLARE_HASHTABLE(hyper_dmabuf_hash_imported, MAX_ENTRY_IMPORTED);
-DECLARE_HASHTABLE(hyper_dmabuf_hash_exported, MAX_ENTRY_EXPORTED);
-
-#ifdef CONFIG_HYPER_DMABUF_SYSFS
-static ssize_t hyper_dmabuf_imported_show(struct device *drv,
- struct device_attribute *attr,
- char *buf)
-{
- struct list_entry_imported *info_entry;
- int bkt;
- ssize_t count = 0;
- size_t total = 0;
-
- hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) {
- hyper_dmabuf_id_t hid = info_entry->imported->hid;
- int nents = info_entry->imported->nents;
- bool valid = info_entry->imported->valid;
- int num_importers = info_entry->imported->importers;
-
- total += nents;
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "hid:{%d %d %d %d}, nent:%d, v:%c, numi:%d\n",
- hid.id, hid.rng_key[0], hid.rng_key[1],
- hid.rng_key[2], nents, (valid ? 't' : 'f'),
- num_importers);
- }
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "total nents: %lu\n", total);
-
- return count;
-}
-
-static ssize_t hyper_dmabuf_exported_show(struct device *drv,
- struct device_attribute *attr,
- char *buf)
-{
- struct list_entry_exported *info_entry;
- int bkt;
- ssize_t count = 0;
- size_t total = 0;
-
- hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) {
- hyper_dmabuf_id_t hid = info_entry->exported->hid;
- int nents = info_entry->exported->nents;
- bool valid = info_entry->exported->valid;
- int importer_exported = info_entry->exported->active;
-
- total += nents;
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "hid:{%d %d %d %d}, nent:%d, v:%c, ie:%d\n",
- hid.id, hid.rng_key[0], hid.rng_key[1],
- hid.rng_key[2], nents, (valid ? 't' : 'f'),
- importer_exported);
- }
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "total nents: %lu\n", total);
-
- return count;
-}
-
-static DEVICE_ATTR(imported, 0400, hyper_dmabuf_imported_show, NULL);
-static DEVICE_ATTR(exported, 0400, hyper_dmabuf_exported_show, NULL);
-
-int hyper_dmabuf_register_sysfs(struct device *dev)
-{
- int err;
-
- err = device_create_file(dev, &dev_attr_imported);
- if (err < 0)
- goto err1;
- err = device_create_file(dev, &dev_attr_exported);
- if (err < 0)
- goto err2;
-
- return 0;
-err2:
- device_remove_file(dev, &dev_attr_imported);
-err1:
- return -1;
-}
-
-int hyper_dmabuf_unregister_sysfs(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_imported);
- device_remove_file(dev, &dev_attr_exported);
- return 0;
-}
-
-#endif
-
-int hyper_dmabuf_table_init(void)
-{
- hash_init(hyper_dmabuf_hash_imported);
- hash_init(hyper_dmabuf_hash_exported);
- return 0;
-}
-
-int hyper_dmabuf_table_destroy(void)
-{
- /* TODO: cleanup hyper_dmabuf_hash_imported
- * and hyper_dmabuf_hash_exported
- */
- return 0;
-}
-
-int hyper_dmabuf_register_exported(struct exported_sgt_info *exported)
-{
- struct list_entry_exported *info_entry;
-
- info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
-
- if (!info_entry)
- return -ENOMEM;
-
- info_entry->exported = exported;
-
- hash_add(hyper_dmabuf_hash_exported, &info_entry->node,
- info_entry->exported->hid.id);
-
- return 0;
-}
-
-int hyper_dmabuf_register_imported(struct imported_sgt_info *imported)
-{
- struct list_entry_imported *info_entry;
-
- info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
-
- if (!info_entry)
- return -ENOMEM;
-
- info_entry->imported = imported;
-
- hash_add(hyper_dmabuf_hash_imported, &info_entry->node,
- info_entry->imported->hid.id);
-
- return 0;
-}
-
-struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid)
-{
- struct list_entry_exported *info_entry;
- int bkt;
-
- hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
- /* checking hid.id first */
- if (info_entry->exported->hid.id == hid.id) {
- /* then key is compared */
- if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid,
- hid))
- return info_entry->exported;
-
- /* if key is unmatched, given HID is invalid,
- * so returning NULL
- */
- break;
- }
-
- return NULL;
-}
-
-/* search for pre-exported sgt and return id of it if it exist */
-hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf,
- int domid)
-{
- struct list_entry_exported *info_entry;
- hyper_dmabuf_id_t hid = {-1, {0, 0, 0} };
- int bkt;
-
- hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
- if (info_entry->exported->dma_buf == dmabuf &&
- info_entry->exported->rdomid == domid)
- return info_entry->exported->hid;
-
- return hid;
-}
-
-struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid)
-{
- struct list_entry_imported *info_entry;
- int bkt;
-
- hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node)
- /* checking hid.id first */
- if (info_entry->imported->hid.id == hid.id) {
- /* then key is compared */
- if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid,
- hid))
- return info_entry->imported;
- /* if key is unmatched, given HID is invalid,
- * so returning NULL
- */
- break;
- }
-
- return NULL;
-}
-
-int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid)
-{
- struct list_entry_exported *info_entry;
- int bkt;
-
- hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
- /* checking hid.id first */
- if (info_entry->exported->hid.id == hid.id) {
- /* then key is compared */
- if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid,
- hid)) {
- hash_del(&info_entry->node);
- kfree(info_entry);
- return 0;
- }
-
- break;
- }
-
- return -ENOENT;
-}
-
-int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid)
-{
- struct list_entry_imported *info_entry;
- int bkt;
-
- hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node)
- /* checking hid.id first */
- if (info_entry->imported->hid.id == hid.id) {
- /* then key is compared */
- if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid,
- hid)) {
- hash_del(&info_entry->node);
- kfree(info_entry);
- return 0;
- }
-
- break;
- }
-
- return -ENOENT;
-}
-
-void hyper_dmabuf_foreach_exported(
- void (*func)(struct exported_sgt_info *, void *attr),
- void *attr)
-{
- struct list_entry_exported *info_entry;
- struct hlist_node *tmp;
- int bkt;
-
- hash_for_each_safe(hyper_dmabuf_hash_exported, bkt, tmp,
- info_entry, node) {
- func(info_entry->exported, attr);
- }
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h
deleted file mode 100644
index f7102f5..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_LIST_H__
-#define __HYPER_DMABUF_LIST_H__
-
-#include "hyper_dmabuf_struct.h"
-
-/* number of bits to be used for exported dmabufs hash table */
-#define MAX_ENTRY_EXPORTED 7
-/* number of bits to be used for imported dmabufs hash table */
-#define MAX_ENTRY_IMPORTED 7
-
-struct list_entry_exported {
- struct exported_sgt_info *exported;
- struct hlist_node node;
-};
-
-struct list_entry_imported {
- struct imported_sgt_info *imported;
- struct hlist_node node;
-};
-
-int hyper_dmabuf_table_init(void);
-
-int hyper_dmabuf_table_destroy(void);
-
-int hyper_dmabuf_register_exported(struct exported_sgt_info *info);
-
-/* search for pre-exported sgt and return id of it if it exist */
-hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf,
- int domid);
-
-int hyper_dmabuf_register_imported(struct imported_sgt_info *info);
-
-struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid);
-
-struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid);
-
-int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid);
-
-int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid);
-
-void hyper_dmabuf_foreach_exported(void (*func)(struct exported_sgt_info *,
- void *attr), void *attr);
-
-int hyper_dmabuf_register_sysfs(struct device *dev);
-int hyper_dmabuf_unregister_sysfs(struct device *dev);
-
-#endif /* __HYPER_DMABUF_LIST_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
deleted file mode 100644
index afc1fd6e..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_remote_sync.h"
-#include "hyper_dmabuf_event.h"
-#include "hyper_dmabuf_list.h"
-
-struct cmd_process {
- struct work_struct work;
- struct hyper_dmabuf_req *rq;
- int domid;
-};
-
-void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
- enum hyper_dmabuf_command cmd, int *op)
-{
- int i;
-
- req->stat = HYPER_DMABUF_REQ_NOT_RESPONDED;
- req->cmd = cmd;
-
- switch (cmd) {
- /* as exporter, commands to importer */
- case HYPER_DMABUF_EXPORT:
- /* exporting pages for dmabuf */
- /* command : HYPER_DMABUF_EXPORT,
- * op0~op3 : hyper_dmabuf_id
- * op4 : number of pages to be shared
- * op5 : offset of data in the first page
- * op6 : length of data in the last page
- * op7 : top-level reference number for shared pages
- * op8 : size of private data (from op9)
- * op9 ~ : Driver-specific private data
- * (e.g. graphic buffer's meta info)
- */
-
- memcpy(&req->op[0], &op[0], 9 * sizeof(int) + op[8]);
- break;
-
- case HYPER_DMABUF_NOTIFY_UNEXPORT:
- /* destroy sg_list for hyper_dmabuf_id on remote side */
- /* command : DMABUF_DESTROY,
- * op0~op3 : hyper_dmabuf_id_t hid
- */
-
- for (i = 0; i < 4; i++)
- req->op[i] = op[i];
- break;
-
- case HYPER_DMABUF_EXPORT_FD:
- case HYPER_DMABUF_EXPORT_FD_FAILED:
- /* dmabuf fd is being created on imported side or importing
- * failed
- *
- * command : HYPER_DMABUF_EXPORT_FD or
- * HYPER_DMABUF_EXPORT_FD_FAILED,
- * op0~op3 : hyper_dmabuf_id
- */
-
- for (i = 0; i < 4; i++)
- req->op[i] = op[i];
- break;
-
- case HYPER_DMABUF_OPS_TO_REMOTE:
- /* notifying dmabuf map/unmap to importer (probably not needed)
- * for dmabuf synchronization
- */
- break;
-
- case HYPER_DMABUF_OPS_TO_SOURCE:
- /* notifying dmabuf map/unmap to exporter, map will make
- * the driver to do shadow mapping or unmapping for
- * synchronization with original exporter (e.g. i915)
- *
- * command : DMABUF_OPS_TO_SOURCE.
- * op0~3 : hyper_dmabuf_id
- * op4 : map(=1)/unmap(=2)/attach(=3)/detach(=4)
- */
- for (i = 0; i < 5; i++)
- req->op[i] = op[i];
- break;
-
- default:
- /* no command found */
- return;
- }
-}
-
-static void cmd_process_work(struct work_struct *work)
-{
- struct imported_sgt_info *imported;
- struct cmd_process *proc = container_of(work,
- struct cmd_process, work);
- struct hyper_dmabuf_req *req;
- int domid;
- int i;
-
- req = proc->rq;
- domid = proc->domid;
-
- switch (req->cmd) {
- case HYPER_DMABUF_EXPORT:
- /* exporting pages for dmabuf */
- /* command : HYPER_DMABUF_EXPORT,
- * op0~op3 : hyper_dmabuf_id
- * op4 : number of pages to be shared
- * op5 : offset of data in the first page
- * op6 : length of data in the last page
- * op7 : top-level reference number for shared pages
- * op8 : size of private data (from op9)
- * op9 ~ : Driver-specific private data
- * (e.g. graphic buffer's meta info)
- */
-
- /* if nents == 0, it means it is a message only for
- * priv synchronization. for existing imported_sgt_info
- * so not creating a new one
- */
- if (req->op[4] == 0) {
- hyper_dmabuf_id_t exist = {req->op[0],
- {req->op[1], req->op[2],
- req->op[3] } };
-
- imported = hyper_dmabuf_find_imported(exist);
-
- if (!imported) {
- dev_err(hy_drv_priv->dev,
- "Can't find imported sgt_info\n");
- break;
- }
-
- /* if size of new private data is different,
- * we reallocate it.
- */
- if (imported->sz_priv != req->op[8]) {
- kfree(imported->priv);
- imported->sz_priv = req->op[8];
- imported->priv = kcalloc(1, req->op[8],
- GFP_KERNEL);
- if (!imported->priv) {
- /* set it invalid */
- imported->valid = 0;
- break;
- }
- }
-
- /* updating priv data */
- memcpy(imported->priv, &req->op[9], req->op[8]);
-
-#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
- /* generating import event */
- hyper_dmabuf_import_event(imported->hid);
-#endif
-
- break;
- }
-
- imported = kcalloc(1, sizeof(*imported), GFP_KERNEL);
-
- if (!imported)
- break;
-
- imported->sz_priv = req->op[8];
- imported->priv = kcalloc(1, req->op[8], GFP_KERNEL);
-
- if (!imported->priv) {
- kfree(imported);
- break;
- }
-
- imported->hid.id = req->op[0];
-
- for (i = 0; i < 3; i++)
- imported->hid.rng_key[i] = req->op[i+1];
-
- imported->nents = req->op[4];
- imported->frst_ofst = req->op[5];
- imported->last_len = req->op[6];
- imported->ref_handle = req->op[7];
-
- dev_dbg(hy_drv_priv->dev, "DMABUF was exported\n");
- dev_dbg(hy_drv_priv->dev, "\thid{id:%d key:%d %d %d}\n",
- req->op[0], req->op[1], req->op[2],
- req->op[3]);
- dev_dbg(hy_drv_priv->dev, "\tnents %d\n", req->op[4]);
- dev_dbg(hy_drv_priv->dev, "\tfirst offset %d\n", req->op[5]);
- dev_dbg(hy_drv_priv->dev, "\tlast len %d\n", req->op[6]);
- dev_dbg(hy_drv_priv->dev, "\tgrefid %d\n", req->op[7]);
-
- memcpy(imported->priv, &req->op[9], req->op[8]);
-
- imported->valid = true;
- hyper_dmabuf_register_imported(imported);
-
-#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
- /* generating import event */
- hyper_dmabuf_import_event(imported->hid);
-#endif
-
- break;
-
- case HYPER_DMABUF_OPS_TO_REMOTE:
- /* notifying dmabuf map/unmap to importer
- * (probably not needed) for dmabuf synchronization
- */
- break;
-
- default:
- /* shouldn't get here */
- break;
- }
-
- kfree(req);
- kfree(proc);
-}
-
-int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
-{
- struct cmd_process *proc;
- struct hyper_dmabuf_req *temp_req;
- struct imported_sgt_info *imported;
- struct exported_sgt_info *exported;
- hyper_dmabuf_id_t hid;
- int ret;
-
- if (!req) {
- dev_err(hy_drv_priv->dev, "request is NULL\n");
- return -EINVAL;
- }
-
- hid.id = req->op[0];
- hid.rng_key[0] = req->op[1];
- hid.rng_key[1] = req->op[2];
- hid.rng_key[2] = req->op[3];
-
- if ((req->cmd < HYPER_DMABUF_EXPORT) ||
- (req->cmd > HYPER_DMABUF_OPS_TO_SOURCE)) {
- dev_err(hy_drv_priv->dev, "invalid command\n");
- return -EINVAL;
- }
-
- req->stat = HYPER_DMABUF_REQ_PROCESSED;
-
- /* HYPER_DMABUF_DESTROY requires immediate
- * follow up so can't be processed in workqueue
- */
- if (req->cmd == HYPER_DMABUF_NOTIFY_UNEXPORT) {
- /* destroy sg_list for hyper_dmabuf_id on remote side */
- /* command : HYPER_DMABUF_NOTIFY_UNEXPORT,
- * op0~3 : hyper_dmabuf_id
- */
- dev_dbg(hy_drv_priv->dev,
- "processing HYPER_DMABUF_NOTIFY_UNEXPORT\n");
-
- imported = hyper_dmabuf_find_imported(hid);
-
- if (imported) {
- /* if anything is still using dma_buf */
- if (imported->importers) {
- /* Buffer is still in use, just mark that
- * it should not be allowed to export its fd
- * anymore.
- */
- imported->valid = false;
- } else {
- /* No one is using buffer, remove it from
- * imported list
- */
- hyper_dmabuf_remove_imported(hid);
- kfree(imported);
- }
- } else {
- req->stat = HYPER_DMABUF_REQ_ERROR;
- }
-
- return req->cmd;
- }
-
- /* dma buf remote synchronization */
- if (req->cmd == HYPER_DMABUF_OPS_TO_SOURCE) {
- /* notifying dmabuf map/unmap to exporter, map will
- * make the driver to do shadow mapping
- * or unmapping for synchronization with original
- * exporter (e.g. i915)
- *
- * command : DMABUF_OPS_TO_SOURCE.
- * op0~3 : hyper_dmabuf_id
- * op1 : enum hyper_dmabuf_ops {....}
- */
- dev_dbg(hy_drv_priv->dev,
- "%s: HYPER_DMABUF_OPS_TO_SOURCE\n", __func__);
-
- ret = hyper_dmabuf_remote_sync(hid, req->op[4]);
-
- if (ret)
- req->stat = HYPER_DMABUF_REQ_ERROR;
- else
- req->stat = HYPER_DMABUF_REQ_PROCESSED;
-
- return req->cmd;
- }
-
- /* synchronous dma_buf_fd export */
- if (req->cmd == HYPER_DMABUF_EXPORT_FD) {
- /* find a corresponding SGT for the id */
- dev_dbg(hy_drv_priv->dev,
- "HYPER_DMABUF_EXPORT_FD for {id:%d key:%d %d %d}\n",
- hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
-
- exported = hyper_dmabuf_find_exported(hid);
-
- if (!exported) {
- dev_err(hy_drv_priv->dev,
- "buffer {id:%d key:%d %d %d} not found\n",
- hid.id, hid.rng_key[0], hid.rng_key[1],
- hid.rng_key[2]);
-
- req->stat = HYPER_DMABUF_REQ_ERROR;
- } else if (!exported->valid) {
- dev_dbg(hy_drv_priv->dev,
- "Buffer no longer valid {id:%d key:%d %d %d}\n",
- hid.id, hid.rng_key[0], hid.rng_key[1],
- hid.rng_key[2]);
-
- req->stat = HYPER_DMABUF_REQ_ERROR;
- } else {
- dev_dbg(hy_drv_priv->dev,
- "Buffer still valid {id:%d key:%d %d %d}\n",
- hid.id, hid.rng_key[0], hid.rng_key[1],
- hid.rng_key[2]);
-
- exported->active++;
- req->stat = HYPER_DMABUF_REQ_PROCESSED;
- }
- return req->cmd;
- }
-
- if (req->cmd == HYPER_DMABUF_EXPORT_FD_FAILED) {
- dev_dbg(hy_drv_priv->dev,
- "HYPER_DMABUF_EXPORT_FD_FAILED for {id:%d key:%d %d %d}\n",
- hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
-
- exported = hyper_dmabuf_find_exported(hid);
-
- if (!exported) {
- dev_err(hy_drv_priv->dev,
- "buffer {id:%d key:%d %d %d} not found\n",
- hid.id, hid.rng_key[0], hid.rng_key[1],
- hid.rng_key[2]);
-
- req->stat = HYPER_DMABUF_REQ_ERROR;
- } else {
- exported->active--;
- req->stat = HYPER_DMABUF_REQ_PROCESSED;
- }
- return req->cmd;
- }
-
- dev_dbg(hy_drv_priv->dev,
- "%s: putting request to workqueue\n", __func__);
- temp_req = kmalloc(sizeof(*temp_req), GFP_KERNEL);
-
- if (!temp_req)
- return -ENOMEM;
-
- memcpy(temp_req, req, sizeof(*temp_req));
-
- proc = kcalloc(1, sizeof(struct cmd_process), GFP_KERNEL);
-
- if (!proc) {
- kfree(temp_req);
- return -ENOMEM;
- }
-
- proc->rq = temp_req;
- proc->domid = domid;
-
- INIT_WORK(&(proc->work), cmd_process_work);
-
- queue_work(hy_drv_priv->work_queue, &(proc->work));
-
- return req->cmd;
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
deleted file mode 100644
index 9c8a76b..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_MSG_H__
-#define __HYPER_DMABUF_MSG_H__
-
-#define MAX_NUMBER_OF_OPERANDS 64
-
-struct hyper_dmabuf_req {
- unsigned int req_id;
- unsigned int stat;
- unsigned int cmd;
- unsigned int op[MAX_NUMBER_OF_OPERANDS];
-};
-
-struct hyper_dmabuf_resp {
- unsigned int resp_id;
- unsigned int stat;
- unsigned int cmd;
- unsigned int op[MAX_NUMBER_OF_OPERANDS];
-};
-
-enum hyper_dmabuf_command {
- HYPER_DMABUF_EXPORT = 0x10,
- HYPER_DMABUF_EXPORT_FD,
- HYPER_DMABUF_EXPORT_FD_FAILED,
- HYPER_DMABUF_NOTIFY_UNEXPORT,
- HYPER_DMABUF_OPS_TO_REMOTE,
- HYPER_DMABUF_OPS_TO_SOURCE,
-};
-
-enum hyper_dmabuf_ops {
- HYPER_DMABUF_OPS_ATTACH = 0x1000,
- HYPER_DMABUF_OPS_DETACH,
- HYPER_DMABUF_OPS_MAP,
- HYPER_DMABUF_OPS_UNMAP,
- HYPER_DMABUF_OPS_RELEASE,
- HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS,
- HYPER_DMABUF_OPS_END_CPU_ACCESS,
- HYPER_DMABUF_OPS_KMAP_ATOMIC,
- HYPER_DMABUF_OPS_KUNMAP_ATOMIC,
- HYPER_DMABUF_OPS_KMAP,
- HYPER_DMABUF_OPS_KUNMAP,
- HYPER_DMABUF_OPS_MMAP,
- HYPER_DMABUF_OPS_VMAP,
- HYPER_DMABUF_OPS_VUNMAP,
-};
-
-enum hyper_dmabuf_req_feedback {
- HYPER_DMABUF_REQ_PROCESSED = 0x100,
- HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP,
- HYPER_DMABUF_REQ_ERROR,
- HYPER_DMABUF_REQ_NOT_RESPONDED
-};
-
-/* create a request packet with given command and operands */
-void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req,
- enum hyper_dmabuf_command command,
- int *operands);
-
-/* parse incoming request packet (or response) and take
- * appropriate actions for those
- */
-int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req);
-
-#endif // __HYPER_DMABUF_MSG_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
deleted file mode 100644
index e85f619..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
+++ /dev/null
@@ -1,413 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/dma-buf.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_struct.h"
-#include "hyper_dmabuf_ops.h"
-#include "hyper_dmabuf_sgl_proc.h"
-#include "hyper_dmabuf_id.h"
-#include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_list.h"
-
-#define WAIT_AFTER_SYNC_REQ 0
-#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
-
-static int dmabuf_refcount(struct dma_buf *dma_buf)
-{
- if ((dma_buf != NULL) && (dma_buf->file != NULL))
- return file_count(dma_buf->file);
-
- return -EINVAL;
-}
-
-static int sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops)
-{
- struct hyper_dmabuf_req *req;
- struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
- int op[5];
- int i;
- int ret;
-
- op[0] = hid.id;
-
- for (i = 0; i < 3; i++)
- op[i+1] = hid.rng_key[i];
-
- op[4] = dmabuf_ops;
-
- req = kcalloc(1, sizeof(*req), GFP_KERNEL);
-
- if (!req)
- return -ENOMEM;
-
- hyper_dmabuf_create_req(req, HYPER_DMABUF_OPS_TO_SOURCE, &op[0]);
-
- /* send request and wait for a response */
- ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(hid), req,
- WAIT_AFTER_SYNC_REQ);
-
- if (ret < 0) {
- dev_dbg(hy_drv_priv->dev,
- "dmabuf sync request failed:%d\n", req->op[4]);
- }
-
- kfree(req);
-
- return ret;
-}
-
-static int hyper_dmabuf_ops_attach(struct dma_buf *dmabuf,
- struct device *dev,
- struct dma_buf_attachment *attach)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!attach->dmabuf->priv)
- return -EINVAL;
-
- imported = (struct imported_sgt_info *)attach->dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_ATTACH);
-
- return ret;
-}
-
-static void hyper_dmabuf_ops_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attach)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!attach->dmabuf->priv)
- return;
-
- imported = (struct imported_sgt_info *)attach->dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_DETACH);
-}
-
-static struct sg_table *hyper_dmabuf_ops_map(
- struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
-{
- struct sg_table *st;
- struct imported_sgt_info *imported;
- struct pages_info *pg_info;
- int ret;
-
- if (!attachment->dmabuf->priv)
- return NULL;
-
- imported = (struct imported_sgt_info *)attachment->dmabuf->priv;
-
- /* extract pages from sgt */
- pg_info = hyper_dmabuf_ext_pgs(imported->sgt);
-
- if (!pg_info)
- return NULL;
-
- /* create a new sg_table with extracted pages */
- st = hyper_dmabuf_create_sgt(pg_info->pgs, pg_info->frst_ofst,
- pg_info->last_len, pg_info->nents);
- if (!st)
- goto err_free_sg;
-
- if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir))
- goto err_free_sg;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MAP);
-
- kfree(pg_info->pgs);
- kfree(pg_info);
-
- return st;
-
-err_free_sg:
- if (st) {
- sg_free_table(st);
- kfree(st);
- }
-
- kfree(pg_info->pgs);
- kfree(pg_info);
-
- return NULL;
-}
-
-static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment,
- struct sg_table *sg,
- enum dma_data_direction dir)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!attachment->dmabuf->priv)
- return;
-
- imported = (struct imported_sgt_info *)attachment->dmabuf->priv;
-
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
-
- sg_free_table(sg);
- kfree(sg);
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_UNMAP);
-}
-
-static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
-{
- struct imported_sgt_info *imported;
- struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
- int ret;
- int finish;
-
- if (!dma_buf->priv)
- return;
-
- imported = (struct imported_sgt_info *)dma_buf->priv;
-
- if (!dmabuf_refcount(imported->dma_buf))
- imported->dma_buf = NULL;
-
- imported->importers--;
-
- if (imported->importers == 0) {
- bknd_ops->unmap_shared_pages(&imported->refs_info,
- imported->nents);
-
- if (imported->sgt) {
- sg_free_table(imported->sgt);
- kfree(imported->sgt);
- imported->sgt = NULL;
- }
- }
-
- finish = imported && !imported->valid &&
- !imported->importers;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_RELEASE);
-
- /*
- * Check if buffer is still valid and if not remove it
- * from imported list. That has to be done after sending
- * sync request
- */
- if (finish) {
- hyper_dmabuf_remove_imported(imported->hid);
- kfree(imported);
- }
-}
-
-static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction dir)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!dmabuf->priv)
- return -EINVAL;
-
- imported = (struct imported_sgt_info *)dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
-
- return ret;
-}
-
-static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction dir)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!dmabuf->priv)
- return -EINVAL;
-
- imported = (struct imported_sgt_info *)dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_END_CPU_ACCESS);
-
- return 0;
-}
-
-static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf,
- unsigned long pgnum)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!dmabuf->priv)
- return NULL;
-
- imported = (struct imported_sgt_info *)dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP_ATOMIC);
-
- /* TODO: NULL for now. Need to return the addr of mapped region */
- return NULL;
-}
-
-static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf,
- unsigned long pgnum, void *vaddr)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!dmabuf->priv)
- return;
-
- imported = (struct imported_sgt_info *)dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
-}
-
-static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!dmabuf->priv)
- return NULL;
-
- imported = (struct imported_sgt_info *)dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP);
-
- /* for now NULL.. need to return the address of mapped region */
- return NULL;
-}
-
-static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum,
- void *vaddr)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!dmabuf->priv)
- return;
-
- imported = (struct imported_sgt_info *)dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP);
-}
-
-static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf,
- struct vm_area_struct *vma)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!dmabuf->priv)
- return -EINVAL;
-
- imported = (struct imported_sgt_info *)dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MMAP);
-
- return ret;
-}
-
-static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!dmabuf->priv)
- return NULL;
-
- imported = (struct imported_sgt_info *)dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_VMAP);
-
- return NULL;
-}
-
-static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr)
-{
- struct imported_sgt_info *imported;
- int ret;
-
- if (!dmabuf->priv)
- return;
-
- imported = (struct imported_sgt_info *)dmabuf->priv;
-
- ret = sync_request(imported->hid, HYPER_DMABUF_OPS_VUNMAP);
-}
-
-static const struct dma_buf_ops hyper_dmabuf_ops = {
- .attach = hyper_dmabuf_ops_attach,
- .detach = hyper_dmabuf_ops_detach,
- .map_dma_buf = hyper_dmabuf_ops_map,
- .unmap_dma_buf = hyper_dmabuf_ops_unmap,
- .release = hyper_dmabuf_ops_release,
- .begin_cpu_access = (void *)hyper_dmabuf_ops_begin_cpu_access,
- .end_cpu_access = (void *)hyper_dmabuf_ops_end_cpu_access,
- .map_atomic = hyper_dmabuf_ops_kmap_atomic,
- .unmap_atomic = hyper_dmabuf_ops_kunmap_atomic,
- .map = hyper_dmabuf_ops_kmap,
- .unmap = hyper_dmabuf_ops_kunmap,
- .mmap = hyper_dmabuf_ops_mmap,
- .vmap = hyper_dmabuf_ops_vmap,
- .vunmap = hyper_dmabuf_ops_vunmap,
-};
-
-/* exporting dmabuf as fd */
-int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags)
-{
- int fd = -1;
-
- /* call hyper_dmabuf_export_dmabuf and create
- * and bind a handle for it then release
- */
- hyper_dmabuf_export_dma_buf(imported);
-
- if (imported->dma_buf)
- fd = dma_buf_fd(imported->dma_buf, flags);
-
- return fd;
-}
-
-void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported)
-{
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &hyper_dmabuf_ops;
-
- /* multiple of PAGE_SIZE, not considering offset */
- exp_info.size = imported->sgt->nents * PAGE_SIZE;
- exp_info.flags = /* not sure about flag */ 0;
- exp_info.priv = imported;
-
- imported->dma_buf = dma_buf_export(&exp_info);
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
deleted file mode 100644
index c5505a4..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_OPS_H__
-#define __HYPER_DMABUF_OPS_H__
-
-int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags);
-
-void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported);
-
-#endif /* __HYPER_DMABUF_IMP_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c
deleted file mode 100644
index 1f2f56b..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/dma-buf.h>
-#include <linux/uaccess.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_struct.h"
-#include "hyper_dmabuf_id.h"
-
-#define HYPER_DMABUF_SIZE(nents, first_offset, last_len) \
- ((nents)*PAGE_SIZE - (first_offset) - PAGE_SIZE + (last_len))
-
-int hyper_dmabuf_query_exported(struct exported_sgt_info *exported,
- int query, unsigned long *info)
-{
- switch (query) {
- case HYPER_DMABUF_QUERY_TYPE:
- *info = EXPORTED;
- break;
-
- /* exporting domain of this specific dmabuf*/
- case HYPER_DMABUF_QUERY_EXPORTER:
- *info = HYPER_DMABUF_DOM_ID(exported->hid);
- break;
-
- /* importing domain of this specific dmabuf */
- case HYPER_DMABUF_QUERY_IMPORTER:
- *info = exported->rdomid;
- break;
-
- /* size of dmabuf in byte */
- case HYPER_DMABUF_QUERY_SIZE:
- *info = exported->dma_buf->size;
- break;
-
- /* whether the buffer is used by importer */
- case HYPER_DMABUF_QUERY_BUSY:
- *info = (exported->active > 0);
- break;
-
- /* whether the buffer is unexported */
- case HYPER_DMABUF_QUERY_UNEXPORTED:
- *info = !exported->valid;
- break;
-
- /* whether the buffer is scheduled to be unexported */
- case HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED:
- *info = !exported->unexport_sched;
- break;
-
- /* size of private info attached to buffer */
- case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
- *info = exported->sz_priv;
- break;
-
- /* copy private info attached to buffer */
- case HYPER_DMABUF_QUERY_PRIV_INFO:
- if (exported->sz_priv > 0) {
- int n;
-
- n = copy_to_user((void __user *) *info,
- exported->priv,
- exported->sz_priv);
- if (n != 0)
- return -EINVAL;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-int hyper_dmabuf_query_imported(struct imported_sgt_info *imported,
- int query, unsigned long *info)
-{
- switch (query) {
- case HYPER_DMABUF_QUERY_TYPE:
- *info = IMPORTED;
- break;
-
- /* exporting domain of this specific dmabuf*/
- case HYPER_DMABUF_QUERY_EXPORTER:
- *info = HYPER_DMABUF_DOM_ID(imported->hid);
- break;
-
- /* importing domain of this specific dmabuf */
- case HYPER_DMABUF_QUERY_IMPORTER:
- *info = hy_drv_priv->domid;
- break;
-
- /* size of dmabuf in byte */
- case HYPER_DMABUF_QUERY_SIZE:
- if (imported->dma_buf) {
- /* if local dma_buf is created (if it's
- * ever mapped), retrieve it directly
- * from struct dma_buf *
- */
- *info = imported->dma_buf->size;
- } else {
- /* calcuate it from given nents, frst_ofst
- * and last_len
- */
- *info = HYPER_DMABUF_SIZE(imported->nents,
- imported->frst_ofst,
- imported->last_len);
- }
- break;
-
- /* whether the buffer is used or not */
- case HYPER_DMABUF_QUERY_BUSY:
- /* checks if it's used by importer */
- *info = (imported->importers > 0);
- break;
-
- /* whether the buffer is unexported */
- case HYPER_DMABUF_QUERY_UNEXPORTED:
- *info = !imported->valid;
- break;
-
- /* size of private info attached to buffer */
- case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE:
- *info = imported->sz_priv;
- break;
-
- /* copy private info attached to buffer */
- case HYPER_DMABUF_QUERY_PRIV_INFO:
- if (imported->sz_priv > 0) {
- int n;
-
- n = copy_to_user((void __user *)*info,
- imported->priv,
- imported->sz_priv);
- if (n != 0)
- return -EINVAL;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h
deleted file mode 100644
index 65ae738..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_query.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __HYPER_DMABUF_QUERY_H__
-#define __HYPER_DMABUF_QUERY_H__
-
-int hyper_dmabuf_query_imported(struct imported_sgt_info *imported,
- int query, unsigned long *info);
-
-int hyper_dmabuf_query_exported(struct exported_sgt_info *exported,
- int query, unsigned long *info);
-
-#endif // __HYPER_DMABUF_QUERY_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
deleted file mode 100644
index a82fd7b..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/dma-buf.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_struct.h"
-#include "hyper_dmabuf_list.h"
-#include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_id.h"
-#include "hyper_dmabuf_sgl_proc.h"
-
-/* Whenever importer does dma operations from remote domain,
- * a notification is sent to the exporter so that exporter
- * issues equivalent dma operation on the original dma buf
- * for indirect synchronization via shadow operations.
- *
- * All ptrs and references (e.g struct sg_table*,
- * struct dma_buf_attachment) created via these operations on
- * exporter's side are kept in stack (implemented as circular
- * linked-lists) separately so that those can be re-referenced
- * later when unmapping operations are invoked to free those.
- *
- * The very first element on the bottom of each stack holds
- * is what is created when initial exporting is issued so it
- * should not be modified or released by this fuction.
- */
-int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
-{
- struct exported_sgt_info *exported;
- struct sgt_list *sgtl;
- struct attachment_list *attachl;
- struct kmap_vaddr_list *va_kmapl;
- struct vmap_vaddr_list *va_vmapl;
- int ret;
-
- /* find a coresponding SGT for the id */
- exported = hyper_dmabuf_find_exported(hid);
-
- if (!exported) {
- dev_err(hy_drv_priv->dev,
- "dmabuf remote sync::can't find exported list\n");
- return -ENOENT;
- }
-
- switch (ops) {
- case HYPER_DMABUF_OPS_ATTACH:
- attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL);
-
- if (!attachl)
- return -ENOMEM;
-
- attachl->attach = dma_buf_attach(exported->dma_buf,
- hy_drv_priv->dev);
-
- if (!attachl->attach) {
- kfree(attachl);
- dev_err(hy_drv_priv->dev,
- "remote sync::HYPER_DMABUF_OPS_ATTACH\n");
- return -ENOMEM;
- }
-
- list_add(&attachl->list, &exported->active_attached->list);
- break;
-
- case HYPER_DMABUF_OPS_DETACH:
- if (list_empty(&exported->active_attached->list)) {
- dev_err(hy_drv_priv->dev,
- "remote sync::HYPER_DMABUF_OPS_DETACH\n");
- dev_err(hy_drv_priv->dev,
- "no more dmabuf attachment left to be detached\n");
- return -EFAULT;
- }
-
- attachl = list_first_entry(&exported->active_attached->list,
- struct attachment_list, list);
-
- dma_buf_detach(exported->dma_buf, attachl->attach);
- list_del(&attachl->list);
- kfree(attachl);
- break;
-
- case HYPER_DMABUF_OPS_MAP:
- if (list_empty(&exported->active_attached->list)) {
- dev_err(hy_drv_priv->dev,
- "remote sync::HYPER_DMABUF_OPS_MAP\n");
- dev_err(hy_drv_priv->dev,
- "no more dmabuf attachment left to be mapped\n");
- return -EFAULT;
- }
-
- attachl = list_first_entry(&exported->active_attached->list,
- struct attachment_list, list);
-
- sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL);
-
- if (!sgtl)
- return -ENOMEM;
-
- sgtl->sgt = dma_buf_map_attachment(attachl->attach,
- DMA_BIDIRECTIONAL);
- if (!sgtl->sgt) {
- kfree(sgtl);
- dev_err(hy_drv_priv->dev,
- "remote sync::HYPER_DMABUF_OPS_MAP\n");
- return -ENOMEM;
- }
- list_add(&sgtl->list, &exported->active_sgts->list);
- break;
-
- case HYPER_DMABUF_OPS_UNMAP:
- if (list_empty(&exported->active_sgts->list) ||
- list_empty(&exported->active_attached->list)) {
- dev_err(hy_drv_priv->dev,
- "remote sync::HYPER_DMABUF_OPS_UNMAP\n");
- dev_err(hy_drv_priv->dev,
- "no SGT or attach left to be unmapped\n");
- return -EFAULT;
- }
-
- attachl = list_first_entry(&exported->active_attached->list,
- struct attachment_list, list);
- sgtl = list_first_entry(&exported->active_sgts->list,
- struct sgt_list, list);
-
- dma_buf_unmap_attachment(attachl->attach, sgtl->sgt,
- DMA_BIDIRECTIONAL);
- list_del(&sgtl->list);
- kfree(sgtl);
- break;
-
- case HYPER_DMABUF_OPS_RELEASE:
- dev_dbg(hy_drv_priv->dev,
- "id:%d key:%d %d %d} released, ref left: %d\n",
- exported->hid.id, exported->hid.rng_key[0],
- exported->hid.rng_key[1], exported->hid.rng_key[2],
- exported->active - 1);
-
- exported->active--;
-
- /* If there are still importers just break, if no then
- * continue with final cleanup
- */
- if (exported->active)
- break;
-
- /* Importer just released buffer fd, check if there is
- * any other importer still using it.
- * If not and buffer was unexported, clean up shared
- * data and remove that buffer.
- */
- dev_dbg(hy_drv_priv->dev,
- "Buffer {id:%d key:%d %d %d} final released\n",
- exported->hid.id, exported->hid.rng_key[0],
- exported->hid.rng_key[1], exported->hid.rng_key[2]);
-
- if (!exported->valid && !exported->active &&
- !exported->unexport_sched) {
- hyper_dmabuf_cleanup_sgt_info(exported, false);
- hyper_dmabuf_remove_exported(hid);
- kfree(exported);
- /* store hyper_dmabuf_id in the list for reuse */
- hyper_dmabuf_store_hid(hid);
- }
-
- break;
-
- case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS:
- ret = dma_buf_begin_cpu_access(exported->dma_buf,
- DMA_BIDIRECTIONAL);
- if (ret) {
- dev_err(hy_drv_priv->dev,
- "HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n");
- return ret;
- }
- break;
-
- case HYPER_DMABUF_OPS_END_CPU_ACCESS:
- ret = dma_buf_end_cpu_access(exported->dma_buf,
- DMA_BIDIRECTIONAL);
- if (ret) {
- dev_err(hy_drv_priv->dev,
- "HYPER_DMABUF_OPS_END_CPU_ACCESS\n");
- return ret;
- }
- break;
-
- case HYPER_DMABUF_OPS_KMAP_ATOMIC:
- case HYPER_DMABUF_OPS_KMAP:
- va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL);
- if (!va_kmapl)
- return -ENOMEM;
-
- /* dummy kmapping of 1 page */
- if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC)
- va_kmapl->vaddr = dma_buf_kmap_atomic(
- exported->dma_buf, 1);
- else
- va_kmapl->vaddr = dma_buf_kmap(
- exported->dma_buf, 1);
-
- if (!va_kmapl->vaddr) {
- kfree(va_kmapl);
- dev_err(hy_drv_priv->dev,
- "HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
- return -ENOMEM;
- }
- list_add(&va_kmapl->list, &exported->va_kmapped->list);
- break;
-
- case HYPER_DMABUF_OPS_KUNMAP_ATOMIC:
- case HYPER_DMABUF_OPS_KUNMAP:
- if (list_empty(&exported->va_kmapped->list)) {
- dev_err(hy_drv_priv->dev,
- "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
- dev_err(hy_drv_priv->dev,
- "no more dmabuf VA to be freed\n");
- return -EFAULT;
- }
-
- va_kmapl = list_first_entry(&exported->va_kmapped->list,
- struct kmap_vaddr_list, list);
- if (!va_kmapl->vaddr) {
- dev_err(hy_drv_priv->dev,
- "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
- return PTR_ERR(va_kmapl->vaddr);
- }
-
- /* unmapping 1 page */
- if (ops == HYPER_DMABUF_OPS_KUNMAP_ATOMIC)
- dma_buf_kunmap_atomic(exported->dma_buf,
- 1, va_kmapl->vaddr);
- else
- dma_buf_kunmap(exported->dma_buf,
- 1, va_kmapl->vaddr);
-
- list_del(&va_kmapl->list);
- kfree(va_kmapl);
- break;
-
- case HYPER_DMABUF_OPS_MMAP:
- /* currently not supported: looking for a way to create
- * a dummy vma
- */
- dev_warn(hy_drv_priv->dev,
- "remote sync::sychronized mmap is not supported\n");
- break;
-
- case HYPER_DMABUF_OPS_VMAP:
- va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL);
-
- if (!va_vmapl)
- return -ENOMEM;
-
- /* dummy vmapping */
- va_vmapl->vaddr = dma_buf_vmap(exported->dma_buf);
-
- if (!va_vmapl->vaddr) {
- kfree(va_vmapl);
- dev_err(hy_drv_priv->dev,
- "remote sync::HYPER_DMABUF_OPS_VMAP\n");
- return -ENOMEM;
- }
- list_add(&va_vmapl->list, &exported->va_vmapped->list);
- break;
-
- case HYPER_DMABUF_OPS_VUNMAP:
- if (list_empty(&exported->va_vmapped->list)) {
- dev_err(hy_drv_priv->dev,
- "remote sync::HYPER_DMABUF_OPS_VUNMAP\n");
- dev_err(hy_drv_priv->dev,
- "no more dmabuf VA to be freed\n");
- return -EFAULT;
- }
- va_vmapl = list_first_entry(&exported->va_vmapped->list,
- struct vmap_vaddr_list, list);
- if (!va_vmapl || va_vmapl->vaddr == NULL) {
- dev_err(hy_drv_priv->dev,
- "remote sync::HYPER_DMABUF_OPS_VUNMAP\n");
- return -EFAULT;
- }
-
- dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr);
-
- list_del(&va_vmapl->list);
- kfree(va_vmapl);
- break;
-
- default:
- /* program should not get here */
- break;
- }
-
- return 0;
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h
deleted file mode 100644
index 36638928..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_REMOTE_SYNC_H__
-#define __HYPER_DMABUF_REMOTE_SYNC_H__
-
-int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops);
-
-#endif // __HYPER_DMABUF_REMOTE_SYNC_H__
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
deleted file mode 100644
index d15eb17..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/dma-buf.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_struct.h"
-#include "hyper_dmabuf_sgl_proc.h"
-
-#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
-
-/* return total number of pages referenced by a sgt
- * for pre-calculation of # of pages behind a given sgt
- */
-static int get_num_pgs(struct sg_table *sgt)
-{
- struct scatterlist *sgl;
- int length, i;
- /* at least one page */
- int num_pages = 1;
-
- sgl = sgt->sgl;
-
- length = sgl->length - PAGE_SIZE + sgl->offset;
-
- /* round-up */
- num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE);
-
- for (i = 1; i < sgt->nents; i++) {
- sgl = sg_next(sgl);
-
- /* round-up */
- num_pages += ((sgl->length + PAGE_SIZE - 1) /
- PAGE_SIZE); /* round-up */
- }
-
- return num_pages;
-}
-
-/* extract pages directly from struct sg_table */
-struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt)
-{
- struct pages_info *pg_info;
- int i, j, k;
- int length;
- struct scatterlist *sgl;
-
- pg_info = kmalloc(sizeof(*pg_info), GFP_KERNEL);
- if (!pg_info)
- return NULL;
-
- pg_info->pgs = kmalloc_array(get_num_pgs(sgt),
- sizeof(struct page *),
- GFP_KERNEL);
-
- if (!pg_info->pgs) {
- kfree(pg_info);
- return NULL;
- }
-
- sgl = sgt->sgl;
-
- pg_info->nents = 1;
- pg_info->frst_ofst = sgl->offset;
- pg_info->pgs[0] = sg_page(sgl);
- length = sgl->length - PAGE_SIZE + sgl->offset;
- i = 1;
-
- while (length > 0) {
- pg_info->pgs[i] = nth_page(sg_page(sgl), i);
- length -= PAGE_SIZE;
- pg_info->nents++;
- i++;
- }
-
- for (j = 1; j < sgt->nents; j++) {
- sgl = sg_next(sgl);
- pg_info->pgs[i++] = sg_page(sgl);
- length = sgl->length - PAGE_SIZE;
- pg_info->nents++;
- k = 1;
-
- while (length > 0) {
- pg_info->pgs[i++] = nth_page(sg_page(sgl), k++);
- length -= PAGE_SIZE;
- pg_info->nents++;
- }
- }
-
- /*
- * lenght at that point will be 0 or negative,
- * so to calculate last page size just add it to PAGE_SIZE
- */
- pg_info->last_len = PAGE_SIZE + length;
-
- return pg_info;
-}
-
-/* create sg_table with given pages and other parameters */
-struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs,
- int frst_ofst, int last_len,
- int nents)
-{
- struct sg_table *sgt;
- struct scatterlist *sgl;
- int i, ret;
-
- sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!sgt)
- return NULL;
-
- ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
- if (ret) {
- if (sgt) {
- sg_free_table(sgt);
- kfree(sgt);
- }
-
- return NULL;
- }
-
- sgl = sgt->sgl;
-
- sg_set_page(sgl, pgs[0], PAGE_SIZE-frst_ofst, frst_ofst);
-
- for (i = 1; i < nents-1; i++) {
- sgl = sg_next(sgl);
- sg_set_page(sgl, pgs[i], PAGE_SIZE, 0);
- }
-
- if (nents > 1) /* more than one page */ {
- sgl = sg_next(sgl);
- sg_set_page(sgl, pgs[i], last_len, 0);
- }
-
- return sgt;
-}
-
-int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported,
- int force)
-{
- struct sgt_list *sgtl;
- struct attachment_list *attachl;
- struct kmap_vaddr_list *va_kmapl;
- struct vmap_vaddr_list *va_vmapl;
- struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops;
-
- if (!exported) {
- dev_err(hy_drv_priv->dev, "invalid hyper_dmabuf_id\n");
- return -EINVAL;
- }
-
- /* if force != 1, sgt_info can be released only if
- * there's no activity on exported dma-buf on importer
- * side.
- */
- if (!force &&
- exported->active) {
- dev_warn(hy_drv_priv->dev,
- "dma-buf is used by importer\n");
-
- return -EPERM;
- }
-
- /* force == 1 is not recommended */
- while (!list_empty(&exported->va_kmapped->list)) {
- va_kmapl = list_first_entry(&exported->va_kmapped->list,
- struct kmap_vaddr_list, list);
-
- dma_buf_kunmap(exported->dma_buf, 1, va_kmapl->vaddr);
- list_del(&va_kmapl->list);
- kfree(va_kmapl);
- }
-
- while (!list_empty(&exported->va_vmapped->list)) {
- va_vmapl = list_first_entry(&exported->va_vmapped->list,
- struct vmap_vaddr_list, list);
-
- dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr);
- list_del(&va_vmapl->list);
- kfree(va_vmapl);
- }
-
- while (!list_empty(&exported->active_sgts->list)) {
- attachl = list_first_entry(&exported->active_attached->list,
- struct attachment_list, list);
-
- sgtl = list_first_entry(&exported->active_sgts->list,
- struct sgt_list, list);
-
- dma_buf_unmap_attachment(attachl->attach, sgtl->sgt,
- DMA_BIDIRECTIONAL);
- list_del(&sgtl->list);
- kfree(sgtl);
- }
-
- while (!list_empty(&exported->active_sgts->list)) {
- attachl = list_first_entry(&exported->active_attached->list,
- struct attachment_list, list);
-
- dma_buf_detach(exported->dma_buf, attachl->attach);
- list_del(&attachl->list);
- kfree(attachl);
- }
-
- /* Start cleanup of buffer in reverse order to exporting */
- bknd_ops->unshare_pages(&exported->refs_info, exported->nents);
-
- /* unmap dma-buf */
- dma_buf_unmap_attachment(exported->active_attached->attach,
- exported->active_sgts->sgt,
- DMA_BIDIRECTIONAL);
-
- /* detatch dma-buf */
- dma_buf_detach(exported->dma_buf, exported->active_attached->attach);
-
- /* close connection to dma-buf completely */
- dma_buf_put(exported->dma_buf);
- exported->dma_buf = NULL;
-
- kfree(exported->active_sgts);
- kfree(exported->active_attached);
- kfree(exported->va_kmapped);
- kfree(exported->va_vmapped);
- kfree(exported->priv);
-
- return 0;
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
deleted file mode 100644
index 869d982..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_IMP_H__
-#define __HYPER_DMABUF_IMP_H__
-
-/* extract pages directly from struct sg_table */
-struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt);
-
-/* create sg_table with given pages and other parameters */
-struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs,
- int frst_ofst, int last_len,
- int nents);
-
-int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported,
- int force);
-
-void hyper_dmabuf_free_sgt(struct sg_table *sgt);
-
-#endif /* __HYPER_DMABUF_IMP_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
deleted file mode 100644
index a11f804..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_STRUCT_H__
-#define __HYPER_DMABUF_STRUCT_H__
-
-/* stack of mapped sgts */
-struct sgt_list {
- struct sg_table *sgt;
- struct list_head list;
-};
-
-/* stack of attachments */
-struct attachment_list {
- struct dma_buf_attachment *attach;
- struct list_head list;
-};
-
-/* stack of vaddr mapped via kmap */
-struct kmap_vaddr_list {
- void *vaddr;
- struct list_head list;
-};
-
-/* stack of vaddr mapped via vmap */
-struct vmap_vaddr_list {
- void *vaddr;
- struct list_head list;
-};
-
-/* Exporter builds pages_info before sharing pages */
-struct pages_info {
- int frst_ofst;
- int last_len;
- int nents;
- struct page **pgs;
-};
-
-
-/* Exporter stores references to sgt in a hash table
- * Exporter keeps these references for synchronization
- * and tracking purposes
- */
-struct exported_sgt_info {
- hyper_dmabuf_id_t hid;
-
- /* VM ID of importer */
- int rdomid;
-
- struct dma_buf *dma_buf;
- int nents;
-
- /* list for tracking activities on dma_buf */
- struct sgt_list *active_sgts;
- struct attachment_list *active_attached;
- struct kmap_vaddr_list *va_kmapped;
- struct vmap_vaddr_list *va_vmapped;
-
- /* set to 0 when unexported. Importer doesn't
- * do a new mapping of buffer if valid == false
- */
- bool valid;
-
- /* active == true if the buffer is actively used
- * (mapped) by importer
- */
- int active;
-
- /* hypervisor specific reference data for shared pages */
- void *refs_info;
-
- struct delayed_work unexport;
- bool unexport_sched;
-
- /* list for file pointers associated with all user space
- * application that have exported this same buffer to
- * another VM. This needs to be tracked to know whether
- * the buffer can be completely freed.
- */
- struct file *filp;
-
- /* size of private */
- size_t sz_priv;
-
- /* private data associated with the exported buffer */
- char *priv;
-};
-
-/* imported_sgt_info contains information about imported DMA_BUF
- * this info is kept in IMPORT list and asynchorously retrieved and
- * used to map DMA_BUF on importer VM's side upon export fd ioctl
- * request from user-space
- */
-
-struct imported_sgt_info {
- hyper_dmabuf_id_t hid; /* unique id for shared dmabuf imported */
-
- /* hypervisor-specific handle to pages */
- int ref_handle;
-
- /* offset and size info of DMA_BUF */
- int frst_ofst;
- int last_len;
- int nents;
-
- struct dma_buf *dma_buf;
- struct sg_table *sgt;
-
- void *refs_info;
- bool valid;
- int importers;
-
- /* size of private */
- size_t sz_priv;
-
- /* private data associated with the exported buffer */
- char *priv;
-};
-
-#endif /* __HYPER_DMABUF_STRUCT_H__ */
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
deleted file mode 100644
index 4a073ce..0000000
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c
+++ /dev/null
@@ -1,941 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <xen/grant_table.h>
-#include <xen/events.h>
-#include <xen/xenbus.h>
-#include <asm/xen/page.h>
-#include "hyper_dmabuf_xen_comm.h"
-#include "hyper_dmabuf_xen_comm_list.h"
-#include "../hyper_dmabuf_drv.h"
-
-static int export_req_id;
-
-struct hyper_dmabuf_req req_pending = {0};
-
-static void xen_get_domid_delayed(struct work_struct *unused);
-static void xen_init_comm_env_delayed(struct work_struct *unused);
-
-static DECLARE_DELAYED_WORK(get_vm_id_work, xen_get_domid_delayed);
-static DECLARE_DELAYED_WORK(xen_init_comm_env_work, xen_init_comm_env_delayed);
-
-/* Creates entry in xen store that will keep details of all
- * exporter rings created by this domain
- */
-static int xen_comm_setup_data_dir(void)
-{
- char buf[255];
-
- sprintf(buf, "/local/domain/%d/data/hyper_dmabuf",
- hy_drv_priv->domid);
-
- return xenbus_mkdir(XBT_NIL, buf, "");
-}
-
-/* Removes entry from xenstore with exporter ring details.
- * Other domains that has connected to any of exporter rings
- * created by this domain, will be notified about removal of
- * this entry and will treat that as signal to cleanup importer
- * rings created for this domain
- */
-static int xen_comm_destroy_data_dir(void)
-{
- char buf[255];
-
- sprintf(buf, "/local/domain/%d/data/hyper_dmabuf",
- hy_drv_priv->domid);
-
- return xenbus_rm(XBT_NIL, buf, "");
-}
-
-/* Adds xenstore entries with details of exporter ring created
- * for given remote domain. It requires special daemon running
- * in dom0 to make sure that given remote domain will have right
- * permissions to access that data.
- */
-static int xen_comm_expose_ring_details(int domid, int rdomid,
- int gref, int port)
-{
- char buf[255];
- int ret;
-
- sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d",
- domid, rdomid);
-
- ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", gref);
-
- if (ret) {
- dev_err(hy_drv_priv->dev,
- "Failed to write xenbus entry %s: %d\n",
- buf, ret);
-
- return ret;
- }
-
- ret = xenbus_printf(XBT_NIL, buf, "port", "%d", port);
-
- if (ret) {
- dev_err(hy_drv_priv->dev,
- "Failed to write xenbus entry %s: %d\n",
- buf, ret);
-
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Queries details of ring exposed by remote domain.
- */
-static int xen_comm_get_ring_details(int domid, int rdomid,
- int *grefid, int *port)
-{
- char buf[255];
- int ret;
-
- sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d",
- rdomid, domid);
-
- ret = xenbus_scanf(XBT_NIL, buf, "grefid", "%d", grefid);
-
- if (ret <= 0) {
- dev_err(hy_drv_priv->dev,
- "Failed to read xenbus entry %s: %d\n",
- buf, ret);
-
- return ret;
- }
-
- ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", port);
-
- if (ret <= 0) {
- dev_err(hy_drv_priv->dev,
- "Failed to read xenbus entry %s: %d\n",
- buf, ret);
-
- return ret;
- }
-
- return (ret <= 0 ? 1 : 0);
-}
-
-static void xen_get_domid_delayed(struct work_struct *unused)
-{
- struct xenbus_transaction xbt;
- int domid, ret;
-
- /* scheduling another if driver is still running
- * and xenstore has not been initialized
- */
- if (likely(xenstored_ready == 0)) {
- dev_dbg(hy_drv_priv->dev,
- "Xenstore is not ready yet. Will retry in 500ms\n");
- schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500));
- } else {
- xenbus_transaction_start(&xbt);
-
- ret = xenbus_scanf(xbt, "domid", "", "%d", &domid);
-
- if (ret <= 0)
- domid = -1;
-
- xenbus_transaction_end(xbt, 0);
-
- /* try again since -1 is an invalid id for domain
- * (but only if driver is still running)
- */
- if (unlikely(domid == -1)) {
- dev_dbg(hy_drv_priv->dev,
- "domid==-1 is invalid. Will retry it in 500ms\n");
- schedule_delayed_work(&get_vm_id_work,
- msecs_to_jiffies(500));
- } else {
- dev_info(hy_drv_priv->dev,
- "Successfully retrieved domid from Xenstore:%d\n",
- domid);
- hy_drv_priv->domid = domid;
- }
- }
-}
-
-int xen_be_get_domid(void)
-{
- struct xenbus_transaction xbt;
- int domid;
-
- if (unlikely(xenstored_ready == 0)) {
- xen_get_domid_delayed(NULL);
- return -1;
- }
-
- xenbus_transaction_start(&xbt);
-
- if (!xenbus_scanf(xbt, "domid", "", "%d", &domid))
- domid = -1;
-
- xenbus_transaction_end(xbt, 0);
-
- return domid;
-}
-
-static int xen_comm_next_req_id(void)
-{
- export_req_id++;
- return export_req_id;
-}
-
-/* For now cache latast rings as global variables TODO: keep them in list*/
-static irqreturn_t front_ring_isr(int irq, void *info);
-static irqreturn_t back_ring_isr(int irq, void *info);
-
-/* Callback function that will be called on any change of xenbus path
- * being watched. Used for detecting creation/destruction of remote
- * domain exporter ring.
- *
- * When remote domain's exporter ring will be detected, importer ring
- * on this domain will be created.
- *
- * When remote domain's exporter ring destruction will be detected it
- * will celanup this domain importer ring.
- *
- * Destruction can be caused by unloading module by remote domain or
- * it's crash/force shutdown.
- */
-static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch,
- const char *path, const char *token)
-{
- int rdom, ret;
- uint32_t grefid, port;
- struct xen_comm_rx_ring_info *ring_info;
-
- /* Check which domain has changed its exporter rings */
- ret = sscanf(watch->node, "/local/domain/%d/", &rdom);
- if (ret <= 0)
- return;
-
- /* Check if we have importer ring for given remote domain already
- * created
- */
- ring_info = xen_comm_find_rx_ring(rdom);
-
- /* Try to query remote domain exporter ring details - if
- * that will fail and we have importer ring that means remote
- * domains has cleanup its exporter ring, so our importer ring
- * is no longer useful.
- *
- * If querying details will succeed and we don't have importer ring,
- * it means that remote domain has setup it for us and we should
- * connect to it.
- */
-
- ret = xen_comm_get_ring_details(xen_be_get_domid(),
- rdom, &grefid, &port);
-
- if (ring_info && ret != 0) {
- dev_info(hy_drv_priv->dev,
- "Remote exporter closed, cleaninup importer\n");
- xen_be_cleanup_rx_rbuf(rdom);
- } else if (!ring_info && ret == 0) {
- dev_info(hy_drv_priv->dev,
- "Registering importer\n");
- xen_be_init_rx_rbuf(rdom);
- }
-}
-
-/* exporter needs to generated info for page sharing */
-int xen_be_init_tx_rbuf(int domid)
-{
- struct xen_comm_tx_ring_info *ring_info;
- struct xen_comm_sring *sring;
- struct evtchn_alloc_unbound alloc_unbound;
- struct evtchn_close close;
-
- void *shared_ring;
- int ret;
-
- /* check if there's any existing tx channel in the table */
- ring_info = xen_comm_find_tx_ring(domid);
-
- if (ring_info) {
- dev_info(hy_drv_priv->dev,
- "tx ring ch to domid = %d already exist\ngref = %d, port = %d\n",
- ring_info->rdomain, ring_info->gref_ring, ring_info->port);
- return 0;
- }
-
- ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
-
- if (!ring_info)
- return -ENOMEM;
-
- /* from exporter to importer */
- shared_ring = (void *)__get_free_pages(GFP_KERNEL, 1);
- if (shared_ring == 0) {
- kfree(ring_info);
- return -ENOMEM;
- }
-
- sring = (struct xen_comm_sring *) shared_ring;
-
- SHARED_RING_INIT(sring);
-
- FRONT_RING_INIT(&(ring_info->ring_front), sring, PAGE_SIZE);
-
- ring_info->gref_ring = gnttab_grant_foreign_access(domid,
- virt_to_mfn(shared_ring),
- 0);
- if (ring_info->gref_ring < 0) {
- /* fail to get gref */
- kfree(ring_info);
- return -EFAULT;
- }
-
- alloc_unbound.dom = DOMID_SELF;
- alloc_unbound.remote_dom = domid;
- ret = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
- &alloc_unbound);
- if (ret) {
- dev_err(hy_drv_priv->dev,
- "Cannot allocate event channel\n");
- kfree(ring_info);
- return -EIO;
- }
-
- /* setting up interrupt */
- ret = bind_evtchn_to_irqhandler(alloc_unbound.port,
- front_ring_isr, 0,
- NULL, (void *) ring_info);
-
- if (ret < 0) {
- dev_err(hy_drv_priv->dev,
- "Failed to setup event channel\n");
- close.port = alloc_unbound.port;
- HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
- gnttab_end_foreign_access(ring_info->gref_ring, 0,
- virt_to_mfn(shared_ring));
- kfree(ring_info);
- return -EIO;
- }
-
- ring_info->rdomain = domid;
- ring_info->irq = ret;
- ring_info->port = alloc_unbound.port;
-
- mutex_init(&ring_info->lock);
-
- dev_dbg(hy_drv_priv->dev,
- "%s: allocated eventchannel gref %d port: %d irq: %d\n",
- __func__,
- ring_info->gref_ring,
- ring_info->port,
- ring_info->irq);
-
- ret = xen_comm_add_tx_ring(ring_info);
-
- ret = xen_comm_expose_ring_details(xen_be_get_domid(),
- domid,
- ring_info->gref_ring,
- ring_info->port);
-
- /* Register watch for remote domain exporter ring.
- * When remote domain will setup its exporter ring,
- * we will automatically connect our importer ring to it.
- */
- ring_info->watch.callback = remote_dom_exporter_watch_cb;
- ring_info->watch.node = kmalloc(255, GFP_KERNEL);
-
- if (!ring_info->watch.node) {
- kfree(ring_info);
- return -ENOMEM;
- }
-
- sprintf((char *)ring_info->watch.node,
- "/local/domain/%d/data/hyper_dmabuf/%d/port",
- domid, xen_be_get_domid());
-
- register_xenbus_watch(&ring_info->watch);
-
- return ret;
-}
-
-/* cleans up exporter ring created for given remote domain */
-void xen_be_cleanup_tx_rbuf(int domid)
-{
- struct xen_comm_tx_ring_info *ring_info;
- struct xen_comm_rx_ring_info *rx_ring_info;
-
- /* check if we at all have exporter ring for given rdomain */
- ring_info = xen_comm_find_tx_ring(domid);
-
- if (!ring_info)
- return;
-
- xen_comm_remove_tx_ring(domid);
-
- unregister_xenbus_watch(&ring_info->watch);
- kfree(ring_info->watch.node);
-
- /* No need to close communication channel, will be done by
- * this function
- */
- unbind_from_irqhandler(ring_info->irq, (void *) ring_info);
-
- /* No need to free sring page, will be freed by this function
- * when other side will end its access
- */
- gnttab_end_foreign_access(ring_info->gref_ring, 0,
- (unsigned long) ring_info->ring_front.sring);
-
- kfree(ring_info);
-
- rx_ring_info = xen_comm_find_rx_ring(domid);
- if (!rx_ring_info)
- return;
-
- BACK_RING_INIT(&(rx_ring_info->ring_back),
- rx_ring_info->ring_back.sring,
- PAGE_SIZE);
-}
-
-/* importer needs to know about shared page and port numbers for
- * ring buffer and event channel
- */
-int xen_be_init_rx_rbuf(int domid)
-{
- struct xen_comm_rx_ring_info *ring_info;
- struct xen_comm_sring *sring;
-
- struct page *shared_ring;
-
- struct gnttab_map_grant_ref *map_ops;
-
- int ret;
- int rx_gref, rx_port;
-
- /* check if there's existing rx ring channel */
- ring_info = xen_comm_find_rx_ring(domid);
-
- if (ring_info) {
- dev_info(hy_drv_priv->dev,
- "rx ring ch from domid = %d already exist\n",
- ring_info->sdomain);
-
- return 0;
- }
-
- ret = xen_comm_get_ring_details(xen_be_get_domid(), domid,
- &rx_gref, &rx_port);
-
- if (ret) {
- dev_err(hy_drv_priv->dev,
- "Domain %d has not created exporter ring for current domain\n",
- domid);
-
- return ret;
- }
-
- ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL);
-
- if (!ring_info)
- return -ENOMEM;
-
- ring_info->sdomain = domid;
- ring_info->evtchn = rx_port;
-
- map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL);
-
- if (!map_ops) {
- ret = -ENOMEM;
- goto fail_no_map_ops;
- }
-
- if (gnttab_alloc_pages(1, &shared_ring)) {
- ret = -ENOMEM;
- goto fail_others;
- }
-
- gnttab_set_map_op(&map_ops[0],
- (unsigned long)pfn_to_kaddr(
- page_to_pfn(shared_ring)),
- GNTMAP_host_map, rx_gref, domid);
-
- gnttab_set_unmap_op(&ring_info->unmap_op,
- (unsigned long)pfn_to_kaddr(
- page_to_pfn(shared_ring)),
- GNTMAP_host_map, -1);
-
- ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1);
- if (ret < 0) {
- dev_err(hy_drv_priv->dev, "Cannot map ring\n");
- ret = -EFAULT;
- goto fail_others;
- }
-
- if (map_ops[0].status) {
- dev_err(hy_drv_priv->dev, "Ring mapping failed\n");
- ret = -EFAULT;
- goto fail_others;
- } else {
- ring_info->unmap_op.handle = map_ops[0].handle;
- }
-
- kfree(map_ops);
-
- sring = (struct xen_comm_sring *)pfn_to_kaddr(page_to_pfn(shared_ring));
-
- BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE);
-
- ret = bind_interdomain_evtchn_to_irq(domid, rx_port);
-
- if (ret < 0) {
- ret = -EIO;
- goto fail_others;
- }
-
- ring_info->irq = ret;
-
- dev_dbg(hy_drv_priv->dev,
- "%s: bound to eventchannel port: %d irq: %d\n", __func__,
- rx_port,
- ring_info->irq);
-
- ret = xen_comm_add_rx_ring(ring_info);
-
- /* Setup communcation channel in opposite direction */
- if (!xen_comm_find_tx_ring(domid))
- ret = xen_be_init_tx_rbuf(domid);
-
- ret = request_irq(ring_info->irq,
- back_ring_isr, 0,
- NULL, (void *)ring_info);
-
- return ret;
-
-fail_others:
- kfree(map_ops);
-
-fail_no_map_ops:
- kfree(ring_info);
-
- return ret;
-}
-
-/* clenas up importer ring create for given source domain */
-void xen_be_cleanup_rx_rbuf(int domid)
-{
- struct xen_comm_rx_ring_info *ring_info;
- struct xen_comm_tx_ring_info *tx_ring_info;
- struct page *shared_ring;
-
- /* check if we have importer ring created for given sdomain */
- ring_info = xen_comm_find_rx_ring(domid);
-
- if (!ring_info)
- return;
-
- xen_comm_remove_rx_ring(domid);
-
- /* no need to close event channel, will be done by that function */
- unbind_from_irqhandler(ring_info->irq, (void *)ring_info);
-
- /* unmapping shared ring page */
- shared_ring = virt_to_page(ring_info->ring_back.sring);
- gnttab_unmap_refs(&ring_info->unmap_op, NULL, &shared_ring, 1);
- gnttab_free_pages(1, &shared_ring);
-
- kfree(ring_info);
-
- tx_ring_info = xen_comm_find_tx_ring(domid);
- if (!tx_ring_info)
- return;
-
- SHARED_RING_INIT(tx_ring_info->ring_front.sring);
- FRONT_RING_INIT(&(tx_ring_info->ring_front),
- tx_ring_info->ring_front.sring,
- PAGE_SIZE);
-}
-
-#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
-
-static void xen_rx_ch_add_delayed(struct work_struct *unused);
-
-static DECLARE_DELAYED_WORK(xen_rx_ch_auto_add_work, xen_rx_ch_add_delayed);
-
-#define DOMID_SCAN_START 1 /* domid = 1 */
-#define DOMID_SCAN_END 10 /* domid = 10 */
-
-static void xen_rx_ch_add_delayed(struct work_struct *unused)
-{
- int ret;
- char buf[128];
- int i, dummy;
-
- dev_dbg(hy_drv_priv->dev,
- "Scanning new tx channel comming from another domain\n");
-
- /* check other domains and schedule another work if driver
- * is still running and backend is valid
- */
- if (hy_drv_priv &&
- hy_drv_priv->initialized) {
- for (i = DOMID_SCAN_START; i < DOMID_SCAN_END + 1; i++) {
- if (i == hy_drv_priv->domid)
- continue;
-
- sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d",
- i, hy_drv_priv->domid);
-
- ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", &dummy);
-
- if (ret > 0) {
- if (xen_comm_find_rx_ring(i) != NULL)
- continue;
-
- ret = xen_be_init_rx_rbuf(i);
-
- if (!ret)
- dev_info(hy_drv_priv->dev,
- "Done rx ch init for VM %d\n",
- i);
- }
- }
-
- /* check every 10 seconds */
- schedule_delayed_work(&xen_rx_ch_auto_add_work,
- msecs_to_jiffies(10000));
- }
-}
-
-#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */
-
-void xen_init_comm_env_delayed(struct work_struct *unused)
-{
- int ret;
-
- /* scheduling another work if driver is still running
- * and xenstore hasn't been initialized or dom_id hasn't
- * been correctly retrieved.
- */
- if (likely(xenstored_ready == 0 ||
- hy_drv_priv->domid == -1)) {
- dev_dbg(hy_drv_priv->dev,
- "Xenstore not ready Will re-try in 500ms\n");
- schedule_delayed_work(&xen_init_comm_env_work,
- msecs_to_jiffies(500));
- } else {
- ret = xen_comm_setup_data_dir();
- if (ret < 0) {
- dev_err(hy_drv_priv->dev,
- "Failed to create data dir in Xenstore\n");
- } else {
- dev_info(hy_drv_priv->dev,
- "Successfully finished comm env init\n");
- hy_drv_priv->initialized = true;
-
-#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD
- xen_rx_ch_add_delayed(NULL);
-#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */
- }
- }
-}
-
-int xen_be_init_comm_env(void)
-{
- int ret;
-
- xen_comm_ring_table_init();
-
- if (unlikely(xenstored_ready == 0 ||
- hy_drv_priv->domid == -1)) {
- xen_init_comm_env_delayed(NULL);
- return -1;
- }
-
- ret = xen_comm_setup_data_dir();
- if (ret < 0) {
- dev_err(hy_drv_priv->dev,
- "Failed to create data dir in Xenstore\n");
- } else {
- dev_info(hy_drv_priv->dev,
- "Successfully finished comm env initialization\n");
-
- hy_drv_priv->initialized = true;
- }
-
- return ret;
-}
-
-/* cleans up all tx/rx rings */
-static void xen_be_cleanup_all_rbufs(void)
-{
- xen_comm_foreach_tx_ring(xen_be_cleanup_tx_rbuf);
- xen_comm_foreach_rx_ring(xen_be_cleanup_rx_rbuf);
-}
-
-void xen_be_destroy_comm(void)
-{
- xen_be_cleanup_all_rbufs();
- xen_comm_destroy_data_dir();
-}
-
-int xen_be_send_req(int domid, struct hyper_dmabuf_req *req,
- int wait)
-{
- struct xen_comm_front_ring *ring;
- struct hyper_dmabuf_req *new_req;
- struct xen_comm_tx_ring_info *ring_info;
- int notify;
-
- struct timeval tv_start, tv_end;
- struct timeval tv_diff;
-
- int timeout = 1000;
-
- /* find a ring info for the channel */
- ring_info = xen_comm_find_tx_ring(domid);
- if (!ring_info) {
- dev_err(hy_drv_priv->dev,
- "Can't find ring info for the channel\n");
- return -ENOENT;
- }
-
-
- ring = &ring_info->ring_front;
-
- do_gettimeofday(&tv_start);
-
- while (RING_FULL(ring)) {
- dev_dbg(hy_drv_priv->dev, "RING_FULL\n");
-
- if (timeout == 0) {
- dev_err(hy_drv_priv->dev,
- "Timeout while waiting for an entry in the ring\n");
- return -EIO;
- }
- usleep_range(100, 120);
- timeout--;
- }
-
- timeout = 1000;
-
- mutex_lock(&ring_info->lock);
-
- new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt);
- if (!new_req) {
- mutex_unlock(&ring_info->lock);
- dev_err(hy_drv_priv->dev,
- "NULL REQUEST\n");
- return -EIO;
- }
-
- req->req_id = xen_comm_next_req_id();
-
- /* update req_pending with current request */
- memcpy(&req_pending, req, sizeof(req_pending));
-
- /* pass current request to the ring */
- memcpy(new_req, req, sizeof(*new_req));
-
- ring->req_prod_pvt++;
-
- RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
- if (notify)
- notify_remote_via_irq(ring_info->irq);
-
- if (wait) {
- while (timeout--) {
- if (req_pending.stat !=
- HYPER_DMABUF_REQ_NOT_RESPONDED)
- break;
- usleep_range(100, 120);
- }
-
- if (timeout < 0) {
- mutex_unlock(&ring_info->lock);
- dev_err(hy_drv_priv->dev,
- "request timed-out\n");
- return -EBUSY;
- }
-
- mutex_unlock(&ring_info->lock);
- do_gettimeofday(&tv_end);
-
- /* checking time duration for round-trip of a request
- * for debugging
- */
- if (tv_end.tv_usec >= tv_start.tv_usec) {
- tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec;
- tv_diff.tv_usec = tv_end.tv_usec-tv_start.tv_usec;
- } else {
- tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec-1;
- tv_diff.tv_usec = tv_end.tv_usec+1000000-
- tv_start.tv_usec;
- }
-
- if (tv_diff.tv_sec != 0 && tv_diff.tv_usec > 16000)
- dev_dbg(hy_drv_priv->dev,
- "send_req:time diff: %ld sec, %ld usec\n",
- tv_diff.tv_sec, tv_diff.tv_usec);
- }
-
- mutex_unlock(&ring_info->lock);
-
- return 0;
-}
-
-/* ISR for handling request */
-static irqreturn_t back_ring_isr(int irq, void *info)
-{
- RING_IDX rc, rp;
- struct hyper_dmabuf_req req;
- struct hyper_dmabuf_resp resp;
-
- int notify, more_to_do;
- int ret;
-
- struct xen_comm_rx_ring_info *ring_info;
- struct xen_comm_back_ring *ring;
-
- ring_info = (struct xen_comm_rx_ring_info *)info;
- ring = &ring_info->ring_back;
-
- dev_dbg(hy_drv_priv->dev, "%s\n", __func__);
-
- do {
- rc = ring->req_cons;
- rp = ring->sring->req_prod;
- more_to_do = 0;
- while (rc != rp) {
- if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
- break;
-
- memcpy(&req, RING_GET_REQUEST(ring, rc), sizeof(req));
- ring->req_cons = ++rc;
-
- ret = hyper_dmabuf_msg_parse(ring_info->sdomain, &req);
-
- if (ret > 0) {
- /* preparing a response for the request and
- * send it to the requester
- */
- memcpy(&resp, &req, sizeof(resp));
- memcpy(RING_GET_RESPONSE(ring,
- ring->rsp_prod_pvt),
- &resp, sizeof(resp));
- ring->rsp_prod_pvt++;
-
- dev_dbg(hy_drv_priv->dev,
- "responding to exporter for req:%d\n",
- resp.resp_id);
-
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring,
- notify);
-
- if (notify)
- notify_remote_via_irq(ring_info->irq);
- }
-
- RING_FINAL_CHECK_FOR_REQUESTS(ring, more_to_do);
- }
- } while (more_to_do);
-
- return IRQ_HANDLED;
-}
-
-/* ISR for handling responses */
-static irqreturn_t front_ring_isr(int irq, void *info)
-{
- /* front ring only care about response from back */
- struct hyper_dmabuf_resp *resp;
- RING_IDX i, rp;
- int more_to_do, ret;
-
- struct xen_comm_tx_ring_info *ring_info;
- struct xen_comm_front_ring *ring;
-
- ring_info = (struct xen_comm_tx_ring_info *)info;
- ring = &ring_info->ring_front;
-
- dev_dbg(hy_drv_priv->dev, "%s\n", __func__);
-
- do {
- more_to_do = 0;
- rp = ring->sring->rsp_prod;
- for (i = ring->rsp_cons; i != rp; i++) {
- resp = RING_GET_RESPONSE(ring, i);
-
- /* update pending request's status with what is
- * in the response
- */
-
- dev_dbg(hy_drv_priv->dev,
- "getting response from importer\n");
-
- if (req_pending.req_id == resp->resp_id)
- req_pending.stat = resp->stat;
-
- if (resp->stat == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) {
- /* parsing response */
- ret = hyper_dmabuf_msg_parse(ring_info->rdomain,
- (struct hyper_dmabuf_req *)resp);
-
- if (ret < 0) {
- dev_err(hy_drv_priv->dev,
- "err while parsing resp\n");
- }
- } else if (resp->stat == HYPER_DMABUF_REQ_PROCESSED) {
- /* for debugging dma_buf remote synch */
- dev_dbg(hy_drv_priv->dev,
- "original request = 0x%x\n", resp->cmd);
- dev_dbg(hy_drv_priv->dev,
- "got HYPER_DMABUF_REQ_PROCESSED\n");
- } else if (resp->stat == HYPER_DMABUF_REQ_ERROR) {
- /* for debugging dma_buf remote synch */
- dev_dbg(hy_drv_priv->dev,
- "original request = 0x%x\n", resp->cmd);
- dev_dbg(hy_drv_priv->dev,
- "got HYPER_DMABUF_REQ_ERROR\n");
- }
- }
-
- ring->rsp_cons = i;
-
- if (i != ring->req_prod_pvt)
- RING_FINAL_CHECK_FOR_RESPONSES(ring, more_to_do);
- else
- ring->sring->rsp_event = i+1;
-
- } while (more_to_do);
-
- return IRQ_HANDLED;
-}
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
deleted file mode 100644
index 70a2b70..0000000
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_XEN_COMM_H__
-#define __HYPER_DMABUF_XEN_COMM_H__
-
-#include "xen/interface/io/ring.h"
-#include "xen/xenbus.h"
-#include "../hyper_dmabuf_msg.h"
-
-extern int xenstored_ready;
-
-DEFINE_RING_TYPES(xen_comm, struct hyper_dmabuf_req, struct hyper_dmabuf_resp);
-
-struct xen_comm_tx_ring_info {
- struct xen_comm_front_ring ring_front;
- int rdomain;
- int gref_ring;
- int irq;
- int port;
- struct mutex lock;
- struct xenbus_watch watch;
-};
-
-struct xen_comm_rx_ring_info {
- int sdomain;
- int irq;
- int evtchn;
- struct xen_comm_back_ring ring_back;
- struct gnttab_unmap_grant_ref unmap_op;
-};
-
-int xen_be_get_domid(void);
-
-int xen_be_init_comm_env(void);
-
-/* exporter needs to generated info for page sharing */
-int xen_be_init_tx_rbuf(int domid);
-
-/* importer needs to know about shared page and port numbers
- * for ring buffer and event channel
- */
-int xen_be_init_rx_rbuf(int domid);
-
-/* cleans up exporter ring created for given domain */
-void xen_be_cleanup_tx_rbuf(int domid);
-
-/* cleans up importer ring created for given domain */
-void xen_be_cleanup_rx_rbuf(int domid);
-
-void xen_be_destroy_comm(void);
-
-/* send request to the remote domain */
-int xen_be_send_req(int domid, struct hyper_dmabuf_req *req,
- int wait);
-
-#endif /* __HYPER_DMABUF_XEN_COMM_H__ */
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
deleted file mode 100644
index 15023db..0000000
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/cdev.h>
-#include <linux/hashtable.h>
-#include <xen/grant_table.h>
-#include "../hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_xen_comm.h"
-#include "hyper_dmabuf_xen_comm_list.h"
-
-DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING);
-DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING);
-
-void xen_comm_ring_table_init(void)
-{
- hash_init(xen_comm_rx_ring_hash);
- hash_init(xen_comm_tx_ring_hash);
-}
-
-int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info)
-{
- struct xen_comm_tx_ring_info_entry *info_entry;
-
- info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
-
- if (!info_entry)
- return -ENOMEM;
-
- info_entry->info = ring_info;
-
- hash_add(xen_comm_tx_ring_hash, &info_entry->node,
- info_entry->info->rdomain);
-
- return 0;
-}
-
-int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info)
-{
- struct xen_comm_rx_ring_info_entry *info_entry;
-
- info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL);
-
- if (!info_entry)
- return -ENOMEM;
-
- info_entry->info = ring_info;
-
- hash_add(xen_comm_rx_ring_hash, &info_entry->node,
- info_entry->info->sdomain);
-
- return 0;
-}
-
-struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid)
-{
- struct xen_comm_tx_ring_info_entry *info_entry;
- int bkt;
-
- hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node)
- if (info_entry->info->rdomain == domid)
- return info_entry->info;
-
- return NULL;
-}
-
-struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid)
-{
- struct xen_comm_rx_ring_info_entry *info_entry;
- int bkt;
-
- hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node)
- if (info_entry->info->sdomain == domid)
- return info_entry->info;
-
- return NULL;
-}
-
-int xen_comm_remove_tx_ring(int domid)
-{
- struct xen_comm_tx_ring_info_entry *info_entry;
- int bkt;
-
- hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node)
- if (info_entry->info->rdomain == domid) {
- hash_del(&info_entry->node);
- kfree(info_entry);
- return 0;
- }
-
- return -ENOENT;
-}
-
-int xen_comm_remove_rx_ring(int domid)
-{
- struct xen_comm_rx_ring_info_entry *info_entry;
- int bkt;
-
- hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node)
- if (info_entry->info->sdomain == domid) {
- hash_del(&info_entry->node);
- kfree(info_entry);
- return 0;
- }
-
- return -ENOENT;
-}
-
-void xen_comm_foreach_tx_ring(void (*func)(int domid))
-{
- struct xen_comm_tx_ring_info_entry *info_entry;
- struct hlist_node *tmp;
- int bkt;
-
- hash_for_each_safe(xen_comm_tx_ring_hash, bkt, tmp,
- info_entry, node) {
- func(info_entry->info->rdomain);
- }
-}
-
-void xen_comm_foreach_rx_ring(void (*func)(int domid))
-{
- struct xen_comm_rx_ring_info_entry *info_entry;
- struct hlist_node *tmp;
- int bkt;
-
- hash_for_each_safe(xen_comm_rx_ring_hash, bkt, tmp,
- info_entry, node) {
- func(info_entry->info->sdomain);
- }
-}
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h
deleted file mode 100644
index 8502fe7..0000000
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_XEN_COMM_LIST_H__
-#define __HYPER_DMABUF_XEN_COMM_LIST_H__
-
-/* number of bits to be used for exported dmabufs hash table */
-#define MAX_ENTRY_TX_RING 7
-/* number of bits to be used for imported dmabufs hash table */
-#define MAX_ENTRY_RX_RING 7
-
-struct xen_comm_tx_ring_info_entry {
- struct xen_comm_tx_ring_info *info;
- struct hlist_node node;
-};
-
-struct xen_comm_rx_ring_info_entry {
- struct xen_comm_rx_ring_info *info;
- struct hlist_node node;
-};
-
-void xen_comm_ring_table_init(void);
-
-int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info);
-
-int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info);
-
-int xen_comm_remove_tx_ring(int domid);
-
-int xen_comm_remove_rx_ring(int domid);
-
-struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid);
-
-struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid);
-
-/* iterates over all exporter rings and calls provided
- * function for each of them
- */
-void xen_comm_foreach_tx_ring(void (*func)(int domid));
-
-/* iterates over all importer rings and calls provided
- * function for each of them
- */
-void xen_comm_foreach_rx_ring(void (*func)(int domid));
-
-#endif // __HYPER_DMABUF_XEN_COMM_LIST_H__
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
deleted file mode 100644
index 14ed3bc..0000000
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include "../hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_xen_comm.h"
-#include "hyper_dmabuf_xen_shm.h"
-
-struct hyper_dmabuf_bknd_ops xen_bknd_ops = {
- .init = NULL, /* not needed for xen */
- .cleanup = NULL, /* not needed for xen */
- .get_vm_id = xen_be_get_domid,
- .share_pages = xen_be_share_pages,
- .unshare_pages = xen_be_unshare_pages,
- .map_shared_pages = (void *)xen_be_map_shared_pages,
- .unmap_shared_pages = xen_be_unmap_shared_pages,
- .init_comm_env = xen_be_init_comm_env,
- .destroy_comm = xen_be_destroy_comm,
- .init_rx_ch = xen_be_init_rx_rbuf,
- .init_tx_ch = xen_be_init_tx_rbuf,
- .send_req = xen_be_send_req,
-};
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
deleted file mode 100644
index a4902b7..0000000
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_XEN_DRV_H__
-#define __HYPER_DMABUF_XEN_DRV_H__
-#include <xen/interface/grant_table.h>
-
-extern struct hyper_dmabuf_bknd_ops xen_bknd_ops;
-
-/* Main purpose of this structure is to keep
- * all references created or acquired for sharing
- * pages with another domain for freeing those later
- * when unsharing.
- */
-struct xen_shared_pages_info {
- /* top level refid */
- grant_ref_t lvl3_gref;
-
- /* page of top level addressing, it contains refids of 2nd lvl pages */
- grant_ref_t *lvl3_table;
-
- /* table of 2nd level pages, that contains refids to data pages */
- grant_ref_t *lvl2_table;
-
- /* unmap ops for mapped pages */
- struct gnttab_unmap_grant_ref *unmap_ops;
-
- /* data pages to be unmapped */
- struct page **data_pages;
-};
-
-#endif // __HYPER_DMABUF_XEN_COMM_H__
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
deleted file mode 100644
index c6a15f1..0000000
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Dongwon Kim <dongwon.kim@...el.com>
- * Mateusz Polrola <mateuszx.potrola@...el.com>
- *
- */
-
-#include <linux/slab.h>
-#include <xen/grant_table.h>
-#include <asm/xen/page.h>
-#include "hyper_dmabuf_xen_drv.h"
-#include "../hyper_dmabuf_drv.h"
-
-#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
-
-/*
- * Creates 2 level page directory structure for referencing shared pages.
- * Top level page is a single page that contains up to 1024 refids that
- * point to 2nd level pages.
- *
- * Each 2nd level page contains up to 1024 refids that point to shared
- * data pages.
- *
- * There will always be one top level page and number of 2nd level pages
- * depends on number of shared data pages.
- *
- * 3rd level page 2nd level pages Data pages
- * +-------------------------+ ┌>+--------------------+ ┌>+------------+
- * |2nd level page 0 refid |---┘ |Data page 0 refid |-┘ |Data page 0 |
- * |2nd level page 1 refid |---┐ |Data page 1 refid |-┐ +------------+
- * | ... | | | .... | |
- * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └>+------------+
- * +-------------------------+ | | +--------------------+ |Data page 1 |
- * | | +------------+
- * | └>+--------------------+
- * | |Data page 1024 refid|
- * | |Data page 1025 refid|
- * | | ... |
- * | |Data page 2047 refid|
- * | +--------------------+
- * |
- * | .....
- * └-->+-----------------------+
- * |Data page 1047552 refid|
- * |Data page 1047553 refid|
- * | ... |
- * |Data page 1048575 refid|
- * +-----------------------+
- *
- * Using such 2 level structure it is possible to reference up to 4GB of
- * shared data using single refid pointing to top level page.
- *
- * Returns refid of top level page.
- */
-int xen_be_share_pages(struct page **pages, int domid, int nents,
- void **refs_info)
-{
- grant_ref_t lvl3_gref;
- grant_ref_t *lvl2_table;
- grant_ref_t *lvl3_table;
-
- /*
- * Calculate number of pages needed for 2nd level addresing:
- */
- int n_lvl2_grefs = (nents/REFS_PER_PAGE +
- ((nents % REFS_PER_PAGE) ? 1 : 0));
-
- struct xen_shared_pages_info *sh_pages_info;
- int i;
-
- lvl3_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, 1);
- lvl2_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs);
-
- sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
-
- if (!sh_pages_info)
- return -ENOMEM;
-
- *refs_info = (void *)sh_pages_info;
-
- /* share data pages in readonly mode for security */
- for (i = 0; i < nents; i++) {
- lvl2_table[i] = gnttab_grant_foreign_access(domid,
- pfn_to_mfn(page_to_pfn(pages[i])),
- true /* read only */);
- if (lvl2_table[i] == -ENOSPC) {
- dev_err(hy_drv_priv->dev,
- "No more space left in grant table\n");
-
- /* Unshare all already shared pages for lvl2 */
- while (i--) {
- gnttab_end_foreign_access_ref(lvl2_table[i], 0);
- gnttab_free_grant_reference(lvl2_table[i]);
- }
- goto err_cleanup;
- }
- }
-
- /* Share 2nd level addressing pages in readonly mode*/
- for (i = 0; i < n_lvl2_grefs; i++) {
- lvl3_table[i] = gnttab_grant_foreign_access(domid,
- virt_to_mfn(
- (unsigned long)lvl2_table+i*PAGE_SIZE),
- true);
-
- if (lvl3_table[i] == -ENOSPC) {
- dev_err(hy_drv_priv->dev,
- "No more space left in grant table\n");
-
- /* Unshare all already shared pages for lvl3 */
- while (i--) {
- gnttab_end_foreign_access_ref(lvl3_table[i], 1);
- gnttab_free_grant_reference(lvl3_table[i]);
- }
-
- /* Unshare all pages for lvl2 */
- while (nents--) {
- gnttab_end_foreign_access_ref(
- lvl2_table[nents], 0);
- gnttab_free_grant_reference(lvl2_table[nents]);
- }
-
- goto err_cleanup;
- }
- }
-
- /* Share lvl3_table in readonly mode*/
- lvl3_gref = gnttab_grant_foreign_access(domid,
- virt_to_mfn((unsigned long)lvl3_table),
- true);
-
- if (lvl3_gref == -ENOSPC) {
- dev_err(hy_drv_priv->dev,
- "No more space left in grant table\n");
-
- /* Unshare all pages for lvl3 */
- while (i--) {
- gnttab_end_foreign_access_ref(lvl3_table[i], 1);
- gnttab_free_grant_reference(lvl3_table[i]);
- }
-
- /* Unshare all pages for lvl2 */
- while (nents--) {
- gnttab_end_foreign_access_ref(lvl2_table[nents], 0);
- gnttab_free_grant_reference(lvl2_table[nents]);
- }
-
- goto err_cleanup;
- }
-
- /* Store lvl3_table page to be freed later */
- sh_pages_info->lvl3_table = lvl3_table;
-
- /* Store lvl2_table pages to be freed later */
- sh_pages_info->lvl2_table = lvl2_table;
-
-
- /* Store exported pages refid to be unshared later */
- sh_pages_info->lvl3_gref = lvl3_gref;
-
- dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
- return lvl3_gref;
-
-err_cleanup:
- free_pages((unsigned long)lvl2_table, n_lvl2_grefs);
- free_pages((unsigned long)lvl3_table, 1);
-
- return -ENOSPC;
-}
-
-int xen_be_unshare_pages(void **refs_info, int nents)
-{
- struct xen_shared_pages_info *sh_pages_info;
- int n_lvl2_grefs = (nents/REFS_PER_PAGE +
- ((nents % REFS_PER_PAGE) ? 1 : 0));
- int i;
-
- dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
- sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
-
- if (sh_pages_info->lvl3_table == NULL ||
- sh_pages_info->lvl2_table == NULL ||
- sh_pages_info->lvl3_gref == -1) {
- dev_warn(hy_drv_priv->dev,
- "gref table for hyper_dmabuf already cleaned up\n");
- return 0;
- }
-
- /* End foreign access for data pages, but do not free them */
- for (i = 0; i < nents; i++) {
- if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i]))
- dev_warn(hy_drv_priv->dev, "refid not shared !!\n");
-
- gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0);
- gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]);
- }
-
- /* End foreign access for 2nd level addressing pages */
- for (i = 0; i < n_lvl2_grefs; i++) {
- if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i]))
- dev_warn(hy_drv_priv->dev, "refid not shared !!\n");
-
- if (!gnttab_end_foreign_access_ref(
- sh_pages_info->lvl3_table[i], 1))
- dev_warn(hy_drv_priv->dev, "refid still in use!!!\n");
-
- gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]);
- }
-
- /* End foreign access for top level addressing page */
- if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref))
- dev_warn(hy_drv_priv->dev, "gref not shared !!\n");
-
- gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1);
- gnttab_free_grant_reference(sh_pages_info->lvl3_gref);
-
- /* freeing all pages used for 2 level addressing */
- free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs);
- free_pages((unsigned long)sh_pages_info->lvl3_table, 1);
-
- sh_pages_info->lvl3_gref = -1;
- sh_pages_info->lvl2_table = NULL;
- sh_pages_info->lvl3_table = NULL;
- kfree(sh_pages_info);
- sh_pages_info = NULL;
-
- dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
- return 0;
-}
-
-/* Maps provided top level ref id and then return array of pages
- * containing data refs.
- */
-struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid,
- int nents, void **refs_info)
-{
- struct page *lvl3_table_page;
- struct page **lvl2_table_pages;
- struct page **data_pages;
- struct xen_shared_pages_info *sh_pages_info;
-
- grant_ref_t *lvl3_table;
- grant_ref_t *lvl2_table;
-
- struct gnttab_map_grant_ref lvl3_map_ops;
- struct gnttab_unmap_grant_ref lvl3_unmap_ops;
-
- struct gnttab_map_grant_ref *lvl2_map_ops;
- struct gnttab_unmap_grant_ref *lvl2_unmap_ops;
-
- struct gnttab_map_grant_ref *data_map_ops;
- struct gnttab_unmap_grant_ref *data_unmap_ops;
-
- /* # of grefs in the last page of lvl2 table */
- int nents_last = (nents - 1) % REFS_PER_PAGE + 1;
- int n_lvl2_grefs = (nents / REFS_PER_PAGE) +
- ((nents_last > 0) ? 1 : 0) -
- (nents_last == REFS_PER_PAGE);
- int i, j, k;
-
- dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
-
- sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
- *refs_info = (void *) sh_pages_info;
-
- lvl2_table_pages = kcalloc(n_lvl2_grefs, sizeof(struct page *),
- GFP_KERNEL);
-
- data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
-
- lvl2_map_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_map_ops),
- GFP_KERNEL);
-
- lvl2_unmap_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_unmap_ops),
- GFP_KERNEL);
-
- data_map_ops = kcalloc(nents, sizeof(*data_map_ops), GFP_KERNEL);
- data_unmap_ops = kcalloc(nents, sizeof(*data_unmap_ops), GFP_KERNEL);
-
- /* Map top level addressing page */
- if (gnttab_alloc_pages(1, &lvl3_table_page)) {
- dev_err(hy_drv_priv->dev, "Cannot allocate pages\n");
- return NULL;
- }
-
- lvl3_table = (grant_ref_t *)pfn_to_kaddr(page_to_pfn(lvl3_table_page));
-
- gnttab_set_map_op(&lvl3_map_ops, (unsigned long)lvl3_table,
- GNTMAP_host_map | GNTMAP_readonly,
- (grant_ref_t)lvl3_gref, domid);
-
- gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table,
- GNTMAP_host_map | GNTMAP_readonly, -1);
-
- if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) {
- dev_err(hy_drv_priv->dev,
- "HYPERVISOR map grant ref failed");
- return NULL;
- }
-
- if (lvl3_map_ops.status) {
- dev_err(hy_drv_priv->dev,
- "HYPERVISOR map grant ref failed status = %d",
- lvl3_map_ops.status);
-
- goto error_cleanup_lvl3;
- } else {
- lvl3_unmap_ops.handle = lvl3_map_ops.handle;
- }
-
- /* Map all second level pages */
- if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) {
- dev_err(hy_drv_priv->dev, "Cannot allocate pages\n");
- goto error_cleanup_lvl3;
- }
-
- for (i = 0; i < n_lvl2_grefs; i++) {
- lvl2_table = (grant_ref_t *)pfn_to_kaddr(
- page_to_pfn(lvl2_table_pages[i]));
- gnttab_set_map_op(&lvl2_map_ops[i],
- (unsigned long)lvl2_table, GNTMAP_host_map |
- GNTMAP_readonly,
- lvl3_table[i], domid);
- gnttab_set_unmap_op(&lvl2_unmap_ops[i],
- (unsigned long)lvl2_table, GNTMAP_host_map |
- GNTMAP_readonly, -1);
- }
-
- /* Unmap top level page, as it won't be needed any longer */
- if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL,
- &lvl3_table_page, 1)) {
- dev_err(hy_drv_priv->dev,
- "xen: cannot unmap top level page\n");
- return NULL;
- }
-
- /* Mark that page was unmapped */
- lvl3_unmap_ops.handle = -1;
-
- if (gnttab_map_refs(lvl2_map_ops, NULL,
- lvl2_table_pages, n_lvl2_grefs)) {
- dev_err(hy_drv_priv->dev,
- "HYPERVISOR map grant ref failed");
- return NULL;
- }
-
- /* Checks if pages were mapped correctly */
- for (i = 0; i < n_lvl2_grefs; i++) {
- if (lvl2_map_ops[i].status) {
- dev_err(hy_drv_priv->dev,
- "HYPERVISOR map grant ref failed status = %d",
- lvl2_map_ops[i].status);
- goto error_cleanup_lvl2;
- } else {
- lvl2_unmap_ops[i].handle = lvl2_map_ops[i].handle;
- }
- }
-
- if (gnttab_alloc_pages(nents, data_pages)) {
- dev_err(hy_drv_priv->dev,
- "Cannot allocate pages\n");
- goto error_cleanup_lvl2;
- }
-
- k = 0;
-
- for (i = 0; i < n_lvl2_grefs - 1; i++) {
- lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i]));
- for (j = 0; j < REFS_PER_PAGE; j++) {
- gnttab_set_map_op(&data_map_ops[k],
- (unsigned long)pfn_to_kaddr(
- page_to_pfn(data_pages[k])),
- GNTMAP_host_map | GNTMAP_readonly,
- lvl2_table[j], domid);
-
- gnttab_set_unmap_op(&data_unmap_ops[k],
- (unsigned long)pfn_to_kaddr(
- page_to_pfn(data_pages[k])),
- GNTMAP_host_map | GNTMAP_readonly, -1);
- k++;
- }
- }
-
- /* for grefs in the last lvl2 table page */
- lvl2_table = pfn_to_kaddr(page_to_pfn(
- lvl2_table_pages[n_lvl2_grefs - 1]));
-
- for (j = 0; j < nents_last; j++) {
- gnttab_set_map_op(&data_map_ops[k],
- (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
- GNTMAP_host_map | GNTMAP_readonly,
- lvl2_table[j], domid);
-
- gnttab_set_unmap_op(&data_unmap_ops[k],
- (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])),
- GNTMAP_host_map | GNTMAP_readonly, -1);
- k++;
- }
-
- if (gnttab_map_refs(data_map_ops, NULL,
- data_pages, nents)) {
- dev_err(hy_drv_priv->dev,
- "HYPERVISOR map grant ref failed\n");
- return NULL;
- }
-
- /* unmapping lvl2 table pages */
- if (gnttab_unmap_refs(lvl2_unmap_ops,
- NULL, lvl2_table_pages,
- n_lvl2_grefs)) {
- dev_err(hy_drv_priv->dev,
- "Cannot unmap 2nd level refs\n");
- return NULL;
- }
-
- /* Mark that pages were unmapped */
- for (i = 0; i < n_lvl2_grefs; i++)
- lvl2_unmap_ops[i].handle = -1;
-
- for (i = 0; i < nents; i++) {
- if (data_map_ops[i].status) {
- dev_err(hy_drv_priv->dev,
- "HYPERVISOR map grant ref failed status = %d\n",
- data_map_ops[i].status);
- goto error_cleanup_data;
- } else {
- data_unmap_ops[i].handle = data_map_ops[i].handle;
- }
- }
-
- /* store these references for unmapping in the future */
- sh_pages_info->unmap_ops = data_unmap_ops;
- sh_pages_info->data_pages = data_pages;
-
- gnttab_free_pages(1, &lvl3_table_page);
- gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages);
- kfree(lvl2_table_pages);
- kfree(lvl2_map_ops);
- kfree(lvl2_unmap_ops);
- kfree(data_map_ops);
-
- dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
- return data_pages;
-
-error_cleanup_data:
- gnttab_unmap_refs(data_unmap_ops, NULL, data_pages,
- nents);
-
- gnttab_free_pages(nents, data_pages);
-
-error_cleanup_lvl2:
- if (lvl2_unmap_ops[0].handle != -1)
- gnttab_unmap_refs(lvl2_unmap_ops, NULL,
- lvl2_table_pages, n_lvl2_grefs);
- gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages);
-
-error_cleanup_lvl3:
- if (lvl3_unmap_ops.handle != -1)
- gnttab_unmap_refs(&lvl3_unmap_ops, NULL,
- &lvl3_table_page, 1);
- gnttab_free_pages(1, &lvl3_table_page);
-
- kfree(lvl2_table_pages);
- kfree(lvl2_map_ops);
- kfree(lvl2_unmap_ops);
- kfree(data_map_ops);
-
-
- return NULL;
-}
-
-int xen_be_unmap_shared_pages(void **refs_info, int nents)
-{
- struct xen_shared_pages_info *sh_pages_info;
-
- dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__);
-
- sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
-
- if (sh_pages_info->unmap_ops == NULL ||
- sh_pages_info->data_pages == NULL) {
- dev_warn(hy_drv_priv->dev,
- "pages already cleaned up or buffer not imported yet\n");
- return 0;
- }
-
- if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL,
- sh_pages_info->data_pages, nents)) {
- dev_err(hy_drv_priv->dev, "Cannot unmap data pages\n");
- return -EFAULT;
- }
-
- gnttab_free_pages(nents, sh_pages_info->data_pages);
-
- kfree(sh_pages_info->data_pages);
- kfree(sh_pages_info->unmap_ops);
- sh_pages_info->unmap_ops = NULL;
- sh_pages_info->data_pages = NULL;
- kfree(sh_pages_info);
- sh_pages_info = NULL;
-
- dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__);
- return 0;
-}
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h
deleted file mode 100644
index d5236b5..0000000
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_XEN_SHM_H__
-#define __HYPER_DMABUF_XEN_SHM_H__
-
-/* This collects all reference numbers for 2nd level shared pages and
- * create a table with those in 1st level shared pages then return reference
- * numbers for this top level table.
- */
-int xen_be_share_pages(struct page **pages, int domid, int nents,
- void **refs_info);
-
-int xen_be_unshare_pages(void **refs_info, int nents);
-
-/* Maps provided top level ref id and then return array of pages containing
- * data refs.
- */
-struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid,
- int nents,
- void **refs_info);
-
-int xen_be_unmap_shared_pages(void **refs_info, int nents);
-
-#endif /* __HYPER_DMABUF_XEN_SHM_H__ */
--
2.7.4
Powered by blists - more mailing lists