lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aW-pw7GlQdFv-lf5@skinsburskii.localdomain>
Date: Tue, 20 Jan 2026 08:13:55 -0800
From: Stanislav Kinsburskii <skinsburskii@...ux.microsoft.com>
To: Mukesh R <mrathor@...ux.microsoft.com>
Cc: linux-kernel@...r.kernel.org, linux-hyperv@...r.kernel.org,
	linux-arm-kernel@...ts.infradead.org, iommu@...ts.linux.dev,
	linux-pci@...r.kernel.org, linux-arch@...r.kernel.org,
	kys@...rosoft.com, haiyangz@...rosoft.com, wei.liu@...nel.org,
	decui@...rosoft.com, longli@...rosoft.com, catalin.marinas@....com,
	will@...nel.org, tglx@...utronix.de, mingo@...hat.com, bp@...en8.de,
	dave.hansen@...ux.intel.com, hpa@...or.com, joro@...tes.org,
	lpieralisi@...nel.org, kwilczynski@...nel.org, mani@...nel.org,
	robh@...nel.org, bhelgaas@...gle.com, arnd@...db.de,
	nunodasneves@...ux.microsoft.com, mhklinux@...look.com,
	romank@...ux.microsoft.com
Subject: Re: [PATCH v0 07/15] mshv: Add ioctl support for MSHV-VFIO bridge
 device

On Mon, Jan 19, 2026 at 10:42:22PM -0800, Mukesh R wrote:
> From: Mukesh Rathor <mrathor@...ux.microsoft.com>
> 
> Add ioctl support for creating MSHV devices for a paritition. At
> present only VFIO device types are supported, but more could be
> added. At a high level, a partition ioctl to create device verifies
> it is of type VFIO and does some setup for bridge code in mshv_vfio.c.
> Adapted from KVM device ioctls.
> 
> Credits: Original author: Wei Liu <wei.liu@...nel.org>
> NB: Slightly modified from the original version.
> 
> Signed-off-by: Mukesh Rathor <mrathor@...ux.microsoft.com>
> ---
>  drivers/hv/mshv_root_main.c | 126 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 126 insertions(+)
> 
> diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
> index 83c7bad269a0..27313419828d 100644
> --- a/drivers/hv/mshv_root_main.c
> +++ b/drivers/hv/mshv_root_main.c
> @@ -1551,6 +1551,129 @@ mshv_partition_ioctl_initialize(struct mshv_partition *partition)
>  	return ret;
>  }
>  
> +static long mshv_device_attr_ioctl(struct mshv_device *mshv_dev, int cmd,
> +				   ulong uarg)
> +{
> +	struct mshv_device_attr attr;
> +	const struct mshv_device_ops *devops = mshv_dev->device_ops;
> +
> +	if (copy_from_user(&attr, (void __user *)uarg, sizeof(attr)))
> +		return -EFAULT;
> +
> +	switch (cmd) {
> +	case MSHV_SET_DEVICE_ATTR:
> +		if (devops->device_set_attr)
> +			return devops->device_set_attr(mshv_dev, &attr);
> +		break;
> +	case MSHV_HAS_DEVICE_ATTR:
> +		if (devops->device_has_attr)
> +			return devops->device_has_attr(mshv_dev, &attr);
> +		break;
> +	}
> +
> +	return -EPERM;
> +}
> +
> +static long mshv_device_fop_ioctl(struct file *filp, unsigned int cmd,
> +				  ulong uarg)
> +{
> +	struct mshv_device *mshv_dev = filp->private_data;
> +
> +	switch (cmd) {
> +	case MSHV_SET_DEVICE_ATTR:
> +	case MSHV_HAS_DEVICE_ATTR:
> +		return mshv_device_attr_ioctl(mshv_dev, cmd, uarg);
> +	}
> +
> +	return -ENOTTY;
> +}
> +
> +static int mshv_device_fop_release(struct inode *inode, struct file *filp)
> +{
> +	struct mshv_device *mshv_dev = filp->private_data;
> +	struct mshv_partition *partition = mshv_dev->device_pt;
> +
> +	if (mshv_dev->device_ops->device_release) {
> +		mutex_lock(&partition->pt_mutex);
> +		hlist_del(&mshv_dev->device_ptnode);
> +		mshv_dev->device_ops->device_release(mshv_dev);
> +		mutex_unlock(&partition->pt_mutex);
> +	}
> +
> +	mshv_partition_put(partition);
> +	return 0;
> +}
> +
> +static const struct file_operations mshv_device_fops = {
> +	.owner = THIS_MODULE,
> +	.unlocked_ioctl = mshv_device_fop_ioctl,
> +	.release = mshv_device_fop_release,
> +};
> +
> +long mshv_partition_ioctl_create_device(struct mshv_partition *partition,
> +					void __user *uarg)
> +{
> +	long rc;
> +	struct mshv_create_device devargk;
> +	struct mshv_device *mshv_dev;
> +	const struct mshv_device_ops *vfio_ops;
> +	int type;
> +
> +	if (copy_from_user(&devargk, uarg, sizeof(devargk))) {
> +		rc = -EFAULT;
> +		goto out;
> +	}
> +
> +	/* At present, only VFIO is supported */
> +	if (devargk.type != MSHV_DEV_TYPE_VFIO) {
> +		rc = -ENODEV;
> +		goto out;
> +	}
> +
> +	if (devargk.flags & MSHV_CREATE_DEVICE_TEST) {
> +		rc = 0;
> +		goto out;
> +	}
> +
> +	mshv_dev = kzalloc(sizeof(*mshv_dev), GFP_KERNEL_ACCOUNT);
> +	if (mshv_dev == NULL) {
> +		rc = -ENOMEM;
> +		goto out;
> +	}
> +
> +	vfio_ops = &mshv_vfio_device_ops;
> +	mshv_dev->device_ops = vfio_ops;
> +	mshv_dev->device_pt = partition;
> +
> +	rc = vfio_ops->device_create(mshv_dev, type);
> +	if (rc < 0) {
> +		kfree(mshv_dev);
> +		goto out;
> +	}
> +
> +	hlist_add_head(&mshv_dev->device_ptnode, &partition->pt_devices);
> +
> +	mshv_partition_get(partition);
> +	rc = anon_inode_getfd(vfio_ops->device_name, &mshv_device_fops,
> +			      mshv_dev, O_RDWR | O_CLOEXEC);
> +	if (rc < 0) {
> +		mshv_partition_put(partition);
> +		hlist_del(&mshv_dev->device_ptnode);
> +		vfio_ops->device_release(mshv_dev);
> +		goto out;
> +	}
> +
> +	devargk.fd = rc;
> +	rc = 0;
> +
> +	if (copy_to_user(uarg, &devargk, sizeof(devargk))) {

Shouldn't the partition be put here?

Thanks,
Stanislav

> +		rc = -EFAULT;
> +		goto out;
> +	}
> +out:
> +	return rc;
> +}
> +
>  static long
>  mshv_partition_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
>  {
> @@ -1587,6 +1710,9 @@ mshv_partition_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
>  	case MSHV_ROOT_HVCALL:
>  		ret = mshv_ioctl_passthru_hvcall(partition, true, uarg);
>  		break;
> +	case MSHV_CREATE_DEVICE:
> +		ret = mshv_partition_ioctl_create_device(partition, uarg);
> +		break;
>  	default:
>  		ret = -ENOTTY;
>  	}
> -- 
> 2.51.2.vfs.0.1
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ