lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 15 Jun 2017 16:00:11 +0800
From:   Xiaoguang Chen <xiaoguang.chen@...el.com>
To:     alex.williamson@...hat.com, kraxel@...hat.com,
        chris@...is-wilson.co.uk, intel-gfx@...ts.freedesktop.org,
        linux-kernel@...r.kernel.org, zhenyuw@...ux.intel.com,
        zhiyuan.lv@...el.com, intel-gvt-dev@...ts.freedesktop.org,
        zhi.a.wang@...el.com, kevin.tian@...el.com
Cc:     Xiaoguang Chen <xiaoguang.chen@...el.com>
Subject: [PATCH v9 7/7] drm/i915/gvt: Adding user interface for dma-buf

User space should create the management fd for the dma-buf operation first.
Then user can query the plane information and create dma-buf if necessary
using the management fd.

Signed-off-by: Xiaoguang Chen <xiaoguang.chen@...el.com>
Tested-by: Kechen Lu <kechen.lu@...el.com>
---
 drivers/gpu/drm/i915/gvt/dmabuf.c    |  45 +++++++++++-
 drivers/gpu/drm/i915/gvt/dmabuf.h    |   5 ++
 drivers/gpu/drm/i915/gvt/gvt.c       |   3 +
 drivers/gpu/drm/i915/gvt/gvt.h       |   6 ++
 drivers/gpu/drm/i915/gvt/hypercall.h |   3 +
 drivers/gpu/drm/i915/gvt/kvmgt.c     | 136 +++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/gvt/mpt.h       |  30 ++++++++
 drivers/gpu/drm/i915/gvt/vgpu.c      |   2 +
 8 files changed, 229 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6ef4f60..a6a6f6d 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -81,6 +81,31 @@ static void intel_vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
 
 static void intel_vgpu_gem_release(struct drm_i915_gem_object *obj)
 {
+	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+	struct intel_vgpu_fb_info *fb_info;
+	struct intel_vgpu *vgpu;
+	struct list_head *pos;
+
+	fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
+	if (WARN_ON(!fb_info || !fb_info->vgpu)) {
+		gvt_vgpu_err("gvt info is invalid\n");
+		goto out;
+	}
+
+	vgpu = fb_info->vgpu;
+	mutex_lock(&vgpu->dmabuf_list_lock);
+	list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+		dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+						list);
+		if ((dmabuf_obj != NULL) && (dmabuf_obj->obj == obj)) {
+			kfree(dmabuf_obj);
+			list_del(pos);
+			break;
+		}
+	}
+	mutex_unlock(&vgpu->dmabuf_list_lock);
+	intel_gvt_hypervisor_put_vfio_device(vgpu);
+out:
 	kfree(obj->gvt_info);
 }
 
@@ -216,6 +241,7 @@ int intel_vgpu_create_dmabuf(struct intel_vgpu *vgpu, void *args)
 	struct vfio_dmabuf_mgr_create_dmabuf *gvt_dmabuf = args;
 	struct intel_vgpu_fb_info *fb_info;
 	int ret;
+	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
 
 	ret = intel_vgpu_get_plane_info(dev, vgpu, &gvt_dmabuf->plane_info,
 					gvt_dmabuf->plane_id);
@@ -238,6 +264,18 @@ int intel_vgpu_create_dmabuf(struct intel_vgpu *vgpu, void *args)
 	fb_info->vgpu = vgpu;
 	obj->gvt_info = fb_info;
 
+	dmabuf_obj = kmalloc(sizeof(*dmabuf_obj), GFP_KERNEL);
+	if (!dmabuf_obj) {
+		gvt_vgpu_err("alloc dmabuf_obj failed\n");
+		ret = -ENOMEM;
+		goto out_free_info;
+	}
+	dmabuf_obj->obj = obj;
+	INIT_LIST_HEAD(&dmabuf_obj->list);
+	mutex_lock(&vgpu->dmabuf_list_lock);
+	list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
+	mutex_unlock(&vgpu->dmabuf_list_lock);
+
 	dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
 
 	if (IS_ERR(dmabuf)) {
@@ -251,11 +289,16 @@ int intel_vgpu_create_dmabuf(struct intel_vgpu *vgpu, void *args)
 		gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
 		goto out_free;
 	}
-
+	if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
+		gvt_vgpu_err("get vfio device failed\n");
+		goto out_free;
+	}
 	gvt_dmabuf->fd = ret;
 
 	return 0;
 out_free:
+	kfree(dmabuf_obj);
+out_free_info:
 	kfree(fb_info);
 out:
 	i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.h b/drivers/gpu/drm/i915/gvt/dmabuf.h
index 8be9979..cafa781 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.h
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.h
@@ -31,6 +31,11 @@ struct intel_vgpu_fb_info {
 	uint32_t fb_size;
 };
 
+struct intel_vgpu_dmabuf_obj {
+	struct drm_i915_gem_object *obj;
+	struct list_head list;
+};
+
 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args);
 int intel_vgpu_create_dmabuf(struct intel_vgpu *vgpu, void *args);
 
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 2032917..d589830 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -54,6 +54,9 @@ static const struct intel_gvt_ops intel_gvt_ops = {
 	.vgpu_reset = intel_gvt_reset_vgpu,
 	.vgpu_activate = intel_gvt_activate_vgpu,
 	.vgpu_deactivate = intel_gvt_deactivate_vgpu,
+	.vgpu_query_plane = intel_vgpu_query_plane,
+	.vgpu_create_dmabuf = intel_vgpu_create_dmabuf,
+
 };
 
 /**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 763a8c5..df7e216 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -185,8 +185,12 @@ struct intel_vgpu {
 		struct kvm *kvm;
 		struct work_struct release_work;
 		atomic_t released;
+		struct vfio_device *vfio_device;
 	} vdev;
 #endif
+	atomic_t mgr_fd_opened;
+	struct list_head dmabuf_obj_list_head;
+	struct mutex dmabuf_list_lock;
 };
 
 struct intel_gvt_gm {
@@ -467,6 +471,8 @@ struct intel_gvt_ops {
 	void (*vgpu_reset)(struct intel_vgpu *);
 	void (*vgpu_activate)(struct intel_vgpu *);
 	void (*vgpu_deactivate)(struct intel_vgpu *);
+	int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
+	int (*vgpu_create_dmabuf)(struct intel_vgpu *vgpu, void *);
 };
 
 
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 32c345c..4f2161c 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -56,6 +56,9 @@ struct intel_gvt_mpt {
 	int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
 			     bool map);
 	int (*set_opregion)(void *vgpu);
+	int (*get_vfio_device)(void *vgpu);
+	void (*put_vfio_device)(void *vgpu);
+
 };
 
 extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 6b4652a..9f71325 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -41,6 +41,7 @@
 #include <linux/kvm_host.h>
 #include <linux/vfio.h>
 #include <linux/mdev.h>
+#include <linux/anon_inodes.h>
 
 #include "i915_drv.h"
 #include "gvt.h"
@@ -523,6 +524,98 @@ static int kvmgt_set_opregion(void *p_vgpu)
 	return ret;
 }
 
+static int kvmgt_get_vfio_device(void *vgpu)
+{
+	if (!vfio_device_get_from_dev(
+			mdev_dev(((struct intel_vgpu *)vgpu)->vdev.mdev)))
+		return -ENODEV;
+
+	return 0;
+}
+
+static void kvmgt_put_vfio_device(void *vgpu)
+{
+	if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+		return;
+
+	vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+}
+
+static int intel_vgpu_dmabuf_mgr_fd_mmap(struct file *file,
+		struct vm_area_struct *vma)
+{
+	return -EPERM;
+}
+
+static int intel_vgpu_dmabuf_mgr_fd_release(struct inode *inode,
+		struct file *filp)
+{
+	struct intel_vgpu *vgpu = filp->private_data;
+	struct intel_vgpu_dmabuf_obj *obj;
+	struct list_head *pos;
+
+	list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+		obj = container_of(pos, struct intel_vgpu_dmabuf_obj, list);
+		if (WARN_ON(!obj))
+			return -ENODEV;
+		i915_gem_object_put(obj->obj);
+	}
+	kvmgt_put_vfio_device(vgpu);
+	atomic_set(&vgpu->mgr_fd_opened, 0);
+
+	return 0;
+}
+
+static long intel_vgpu_dmabuf_mgr_fd_ioctl(struct file *filp,
+		unsigned int ioctl, unsigned long arg)
+{
+	struct intel_vgpu *vgpu = filp->private_data;
+	int minsz;
+	int ret = 0;
+
+	if (ioctl == VFIO_DMABUF_MGR_QUERY_PLANE) {
+		struct vfio_dmabuf_mgr_query_plane plane_info;
+
+		minsz = offsetofend(struct vfio_dmabuf_mgr_query_plane,
+					plane_id);
+		if (copy_from_user(&plane_info, (void __user *)arg, minsz))
+			return -EFAULT;
+		if (plane_info.argsz < minsz || plane_info.flags != 0)
+			return -EINVAL;
+		ret = intel_gvt_ops->vgpu_query_plane(vgpu, &plane_info);
+		if (ret != 0) {
+			gvt_vgpu_err("query plane failed:%d\n", ret);
+			return -EINVAL;
+		}
+		return copy_to_user((void __user *)arg, &plane_info, minsz) ?
+								-EFAULT : 0;
+	} else if (ioctl == VFIO_DMABUF_MGR_CREATE_DMABUF) {
+		struct vfio_dmabuf_mgr_create_dmabuf dmabuf;
+
+		minsz = offsetofend(struct vfio_dmabuf_mgr_create_dmabuf, fd);
+		if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
+			return -EFAULT;
+		if (dmabuf.argsz < minsz || dmabuf.flags != 0)
+			return -EINVAL;
+
+		ret = intel_gvt_ops->vgpu_create_dmabuf(vgpu, &dmabuf);
+		if (ret != 0)
+			return ret;
+
+		return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
+								-EFAULT : 0;
+	} else
+		gvt_vgpu_err("unsupported dmabuf mgr fd operation\n");
+
+	return -EINVAL;
+}
+
+static const struct file_operations intel_vgpu_dmabuf_mgr_fd_ops = {
+	.release        = intel_vgpu_dmabuf_mgr_fd_release,
+	.unlocked_ioctl = intel_vgpu_dmabuf_mgr_fd_ioctl,
+	.mmap           = intel_vgpu_dmabuf_mgr_fd_mmap,
+	.llseek         = noop_llseek,
+};
 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 {
 	struct intel_vgpu *vgpu = NULL;
@@ -530,6 +623,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 	struct device *pdev;
 	void *gvt;
 	int ret;
+	struct vfio_device *device;
 
 	pdev = mdev_parent_dev(mdev);
 	gvt = kdev_to_i915(pdev)->gvt;
@@ -554,6 +648,14 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 	vgpu->vdev.mdev = mdev;
 	mdev_set_drvdata(mdev, vgpu);
 
+	device = vfio_device_get_from_dev(mdev_dev(mdev));
+	if (device == NULL) {
+		gvt_vgpu_err("get vfio device failed\n");
+		ret = -ENODEV;
+		goto out;
+	}
+	vgpu->vdev.vfio_device = device;
+
 	gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
 		     dev_name(mdev_dev(mdev)));
 	ret = 0;
@@ -1249,6 +1351,36 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 	} else if (cmd == VFIO_DEVICE_RESET) {
 		intel_gvt_ops->vgpu_reset(vgpu);
 		return 0;
+	} else if (cmd == VFIO_DEVICE_GET_FD) {
+		int fd;
+		u32 type;
+		int ret;
+
+		if (copy_from_user(&type, (void __user *)arg, sizeof(type)))
+			return -EINVAL;
+		if (type != VFIO_DEVICE_DMABUF_MGR_FD)
+			return -EINVAL;
+
+		if (atomic_read(&vgpu->mgr_fd_opened)) {
+			gvt_vgpu_err("mgr fd already opened\n");
+			return -EPERM;
+		}
+
+		ret = kvmgt_get_vfio_device(vgpu);
+		if (ret != 0)
+			return ret;
+
+		fd = anon_inode_getfd("intel-vgpu-dmabuf-mgr-fd",
+			&intel_vgpu_dmabuf_mgr_fd_ops,
+			vgpu, O_RDWR | O_CLOEXEC);
+		if (fd < 0) {
+			kvmgt_put_vfio_device(vgpu);
+			gvt_vgpu_err("create dmabuf mgr fd failed\n");
+			return -EINVAL;
+		}
+		atomic_cmpxchg(&vgpu->mgr_fd_opened, 0, 1);
+
+		return fd;
 	}
 
 	return 0;
@@ -1470,6 +1602,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 
 	kvmgt_protect_table_init(info);
 	gvt_cache_init(vgpu);
+	mutex_init(&vgpu->dmabuf_list_lock);
 
 	info->track_node.track_write = kvmgt_page_track_write;
 	info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
@@ -1612,6 +1745,9 @@ struct intel_gvt_mpt kvmgt_mpt = {
 	.write_gpa = kvmgt_write_gpa,
 	.gfn_to_mfn = kvmgt_gfn_to_pfn,
 	.set_opregion = kvmgt_set_opregion,
+	.get_vfio_device = kvmgt_get_vfio_device,
+	.put_vfio_device = kvmgt_put_vfio_device,
+
 };
 EXPORT_SYMBOL_GPL(kvmgt_mpt);
 
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index ab71300..12dabfc 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -308,4 +308,34 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
 	return intel_gvt_host.mpt->set_opregion(vgpu);
 }
 
+/**
+ * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
+{
+	if (!intel_gvt_host.mpt->get_vfio_device)
+		return 0;
+
+	return intel_gvt_host.mpt->get_vfio_device(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
+{
+	if (!intel_gvt_host.mpt->put_vfio_device)
+		return;
+
+	intel_gvt_host.mpt->put_vfio_device(vgpu);
+}
+
 #endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 8e1d504..8747613 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -346,6 +346,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
 	vgpu->gvt = gvt;
 	vgpu->sched_ctl.weight = param->weight;
 	bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
+	INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
+	atomic_set(&vgpu->mgr_fd_opened, 0);
 
 	intel_vgpu_init_cfg_space(vgpu, param->primary);
 
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ