lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250401161106.790710-10-pbonzini@redhat.com>
Date: Tue,  1 Apr 2025 18:10:46 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: linux-kernel@...r.kernel.org,
	kvm@...r.kernel.org
Cc: roy.hopkins@...e.com,
	seanjc@...gle.com,
	thomas.lendacky@....com,
	ashish.kalra@....com,
	michael.roth@....com,
	jroedel@...e.de,
	nsaenz@...zon.com,
	anelkz@...zon.de,
	James.Bottomley@...senPartnership.com
Subject: [PATCH 09/29] KVM: implement plane file descriptors ioctl and creation

Add the file_operations for planes, the means to create new file
descriptors for them, and the KVM_CHECK_EXTENSION implementation for
the two new capabilities.

KVM_SIGNAL_MSI and KVM_SET_MEMORY_ATTRIBUTES are now available
through both vm and plane file descriptors, forward them to the
same function that is used by the file_operations for planes.
KVM_CHECK_EXTENSION instead remains separate, because it only
advertises a very small subset of capabilities when applied to
plane file descriptors.

Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
 include/linux/kvm_host.h |  19 +++++
 include/uapi/linux/kvm.h |   2 +
 virt/kvm/kvm_main.c      | 154 +++++++++++++++++++++++++++++++++------
 3 files changed, 154 insertions(+), 21 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0a91b556767e..dbca418d64f5 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -342,6 +342,8 @@ struct kvm_vcpu {
 	unsigned long guest_debug;
 
 	struct mutex mutex;
+
+	/* Shared for all planes */
 	struct kvm_run *run;
 
 #ifndef __KVM_HAVE_ARCH_WQP
@@ -922,6 +924,23 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
 }
 
 
+#if KVM_MAX_VCPU_PLANES == 1
+static inline int kvm_arch_nr_vcpu_planes(struct kvm *kvm)
+{
+	return KVM_MAX_VCPU_PLANES;
+}
+
+static inline struct kvm_plane *vcpu_to_plane(struct kvm_vcpu *vcpu)
+{
+	return vcpu->kvm->planes[0];
+}
+#else
+static inline struct kvm_plane *vcpu_to_plane(struct kvm_vcpu *vcpu)
+{
+	return vcpu->kvm->planes[vcpu->plane_id];
+}
+#endif
+
 #define KVM_BUG(cond, kvm, fmt...)				\
 ({								\
 	bool __ret = !!(cond);					\
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index b0cca93ebcb3..96d25c7fa18f 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1690,4 +1690,6 @@ struct kvm_pre_fault_memory {
 	__u64 padding[5];
 };
 
+#define KVM_CREATE_PLANE	_IO(KVMIO, 0xd6)
+
 #endif /* __LINUX_KVM_H */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index cd4dfc399cad..b08fea91dc74 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4388,6 +4388,80 @@ static int kvm_wait_for_vcpu_online(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
+static int kvm_plane_ioctl_check_extension(struct kvm_plane *plane, long arg)
+{
+	switch (arg) {
+#ifdef CONFIG_HAVE_KVM_MSI
+	case KVM_CAP_SIGNAL_MSI:
+#endif
+	case KVM_CAP_CHECK_EXTENSION_VM:
+		return 1;
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+	case KVM_CAP_MEMORY_ATTRIBUTES:
+		return kvm_supported_mem_attributes(plane);
+#endif
+	default:
+		return 0;
+	}
+}
+
+static long __kvm_plane_ioctl(struct kvm_plane *plane, unsigned int ioctl,
+			      unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+
+	switch (ioctl) {
+#ifdef CONFIG_HAVE_KVM_MSI
+	case KVM_SIGNAL_MSI: {
+		struct kvm_msi msi;
+
+		if (copy_from_user(&msi, argp, sizeof(msi)))
+			return -EFAULT;
+		return kvm_send_userspace_msi(plane, &msi);
+	}
+#endif
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+	case KVM_SET_MEMORY_ATTRIBUTES: {
+		struct kvm_memory_attributes attrs;
+
+		if (copy_from_user(&attrs, argp, sizeof(attrs)))
+			return -EFAULT;
+		return kvm_vm_ioctl_set_mem_attributes(plane, &attrs);
+	}
+#endif
+	case KVM_CHECK_EXTENSION:
+		return kvm_plane_ioctl_check_extension(plane, arg);
+	default:
+		return -ENOTTY;
+	}
+}
+
+static long kvm_plane_ioctl(struct file *filp, unsigned int ioctl,
+			     unsigned long arg)
+{
+	struct kvm_plane *plane = filp->private_data;
+
+	if (plane->kvm->mm != current->mm || plane->kvm->vm_dead)
+		return -EIO;
+
+	return __kvm_plane_ioctl(plane, ioctl, arg);
+}
+
+static int kvm_plane_release(struct inode *inode, struct file *filp)
+{
+	struct kvm_plane *plane = filp->private_data;
+
+	kvm_put_kvm(plane->kvm);
+	return 0;
+}
+
+static struct file_operations kvm_plane_fops = {
+	.unlocked_ioctl = kvm_plane_ioctl,
+	.release = kvm_plane_release,
+	KVM_COMPAT(kvm_plane_ioctl),
+};
+
+
 static long kvm_vcpu_ioctl(struct file *filp,
 			   unsigned int ioctl, unsigned long arg)
 {
@@ -4878,6 +4952,14 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
 		if (kvm)
 			return kvm_arch_nr_memslot_as_ids(kvm);
 		return KVM_MAX_NR_ADDRESS_SPACES;
+#endif
+#if KVM_MAX_VCPU_PLANES > 1
+	case KVM_CAP_PLANES:
+		if (kvm)
+			return kvm_arch_nr_vcpu_planes(kvm);
+		return KVM_MAX_PLANES;
+	case KVM_CAP_PLANES_FPU:
+		return kvm_arch_planes_share_fpu(kvm);
 #endif
 	case KVM_CAP_NR_MEMSLOTS:
 		return KVM_USER_MEM_SLOTS;
@@ -5112,6 +5194,48 @@ static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
 	return fd;
 }
 
+static int kvm_vm_ioctl_create_plane(struct kvm *kvm, unsigned id)
+{
+	struct kvm_plane *plane;
+	struct file *file;
+	int r, fd;
+
+	if (id >= KVM_MAX_VCPU_PLANES)
+		return -EINVAL;
+
+	guard(mutex)(&kvm->lock);
+	if (kvm->planes[id])
+		return -EEXIST;
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0)
+		return fd;
+
+	plane = kvm_create_vm_plane(kvm, id);
+	if (IS_ERR(plane)) {
+		r = PTR_ERR(plane);
+		goto put_fd;
+	}
+
+	kvm_get_kvm(kvm);
+	file = anon_inode_getfile("kvm-plane", &kvm_plane_fops, plane, O_RDWR);
+	if (IS_ERR(file)) {
+		r = PTR_ERR(file);
+		goto put_kvm;
+	}
+
+	kvm->planes[id] = plane;
+	fd_install(fd, file);
+	return fd;
+
+put_kvm:
+	kvm_put_kvm(kvm);
+	kfree(plane);
+put_fd:
+	put_unused_fd(fd);
+	return r;
+}
+
 #define SANITY_CHECK_MEM_REGION_FIELD(field)					\
 do {										\
 	BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) !=		\
@@ -5130,6 +5254,9 @@ static long kvm_vm_ioctl(struct file *filp,
 	if (kvm->mm != current->mm || kvm->vm_dead)
 		return -EIO;
 	switch (ioctl) {
+	case KVM_CREATE_PLANE:
+		r = kvm_vm_ioctl_create_plane(kvm, arg);
+		break;
 	case KVM_CREATE_VCPU:
 		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
 		break;
@@ -5236,16 +5363,12 @@ static long kvm_vm_ioctl(struct file *filp,
 		break;
 	}
 #ifdef CONFIG_HAVE_KVM_MSI
-	case KVM_SIGNAL_MSI: {
-		struct kvm_msi msi;
-
-		r = -EFAULT;
-		if (copy_from_user(&msi, argp, sizeof(msi)))
-			goto out;
-		r = kvm_send_userspace_msi(kvm->planes[0], &msi);
-		break;
-	}
+	case KVM_SIGNAL_MSI:
 #endif
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+	case KVM_SET_MEMORY_ATTRIBUTES:
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+		return __kvm_plane_ioctl(kvm->planes[0], ioctl, arg);
 #ifdef __KVM_HAVE_IRQ_LINE
 	case KVM_IRQ_LINE_STATUS:
 	case KVM_IRQ_LINE: {
@@ -5301,18 +5424,6 @@ static long kvm_vm_ioctl(struct file *filp,
 		break;
 	}
 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
-#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
-	case KVM_SET_MEMORY_ATTRIBUTES: {
-		struct kvm_memory_attributes attrs;
-
-		r = -EFAULT;
-		if (copy_from_user(&attrs, argp, sizeof(attrs)))
-			goto out;
-
-		r = kvm_vm_ioctl_set_mem_attributes(kvm->planes[0], &attrs);
-		break;
-	}
-#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
 	case KVM_CREATE_DEVICE: {
 		struct kvm_create_device cd;
 
@@ -6467,6 +6578,7 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
 	kvm_chardev_ops.owner = module;
 	kvm_vm_fops.owner = module;
 	kvm_vcpu_fops.owner = module;
+	kvm_plane_fops.owner = module;
 	kvm_device_fops.owner = module;
 
 	kvm_preempt_ops.sched_in = kvm_sched_in;
-- 
2.49.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ