[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4f3d9563a4246a97eae28486eee1730d134b222b.camel@redhat.com>
Date: Tue, 28 Nov 2023 09:44:00 +0200
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Nicolas Saenz Julienne <nsaenz@...zon.com>, kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, linux-hyperv@...r.kernel.org,
pbonzini@...hat.com, seanjc@...gle.com, vkuznets@...hat.com,
anelkz@...zon.com, graf@...zon.com, dwmw@...zon.co.uk,
jgowans@...zon.com, corbert@....net, kys@...rosoft.com,
haiyangz@...rosoft.com, decui@...rosoft.com, x86@...nel.org,
linux-doc@...r.kernel.org
Subject: Re: [RFC 26/33] KVM: x86: hyper-vsm: Allow setting per-VTL memory
attributes
On Wed, 2023-11-08 at 11:17 +0000, Nicolas Saenz Julienne wrote:
> Introduce KVM_SET_MEMORY_ATTRIBUTES ioctl support for VTL KVM devices.
> The attributes are stored in an xarray private to the VTL device.
>
> The following memory attributes are supported:
> - KVM_MEMORY_ATTRIBUTE_READ
> - KVM_MEMORY_ATTRIBUTE_WRITE
> - KVM_MEMORY_ATTRIBUTE_EXECUTE
> - KVM_MEMORY_ATTRIBUTE_NO_ACCESS
> Although only some combinations are valid, see code comment below.
>
> Signed-off-by: Nicolas Saenz Julienne <nsaenz@...zon.com>
> ---
> arch/x86/kvm/hyperv.c | 61 +++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 61 insertions(+)
>
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> index 0d8402dba596..bcace0258af1 100644
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -62,6 +62,10 @@
> */
> #define HV_EXT_CALL_MAX (HV_EXT_CALL_QUERY_CAPABILITIES + 64)
>
> +#define KVM_HV_VTL_ATTRS \
> + (KVM_MEMORY_ATTRIBUTE_READ | KVM_MEMORY_ATTRIBUTE_WRITE | \
> + KVM_MEMORY_ATTRIBUTE_EXECUTE | KVM_MEMORY_ATTRIBUTE_NO_ACCESS)
> +
> static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
> bool vcpu_kick);
>
> @@ -3025,6 +3029,7 @@ int kvm_vm_ioctl_get_hv_vsm_state(struct kvm *kvm, struct kvm_hv_vsm_state *stat
>
> struct kvm_hv_vtl_dev {
> int vtl;
> + struct xarray mem_attrs;
> };
>
> static int kvm_hv_vtl_get_attr(struct kvm_device *dev,
> @@ -3047,16 +3052,71 @@ static void kvm_hv_vtl_release(struct kvm_device *dev)
> {
> struct kvm_hv_vtl_dev *vtl_dev = dev->private;
>
> + xa_destroy(&vtl_dev->mem_attrs);
> kfree(vtl_dev);
> kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
> }
>
> +/*
> + * The TLFS lists the valid memory protection combinations (15.9.3):
> + * - No access
> + * - Read-only, no execute
> + * - Read-only, execute
> + * - Read/write, no execute
> + * - Read/write, execute
> + */
> +static bool kvm_hv_validate_vtl_mem_attributes(struct kvm_memory_attributes *attrs)
> +{
> + u64 attr = attrs->attributes;
> +
> + if (attr & ~KVM_HV_VTL_ATTRS)
> + return false;
> +
> + if (attr == KVM_MEMORY_ATTRIBUTE_NO_ACCESS)
> + return true;
> +
> + if (!(attr & KVM_MEMORY_ATTRIBUTE_READ))
> + return false;
> +
> + return true;
> +}
> +
> +static long kvm_hv_vtl_ioctl(struct kvm_device *dev, unsigned int ioctl,
> + unsigned long arg)
> +{
> + switch (ioctl) {
> + case KVM_SET_MEMORY_ATTRIBUTES: {
> + struct kvm_hv_vtl_dev *vtl_dev = dev->private;
> + struct kvm_memory_attributes attrs;
> + int r;
> +
> + if (copy_from_user(&attrs, (void __user *)arg, sizeof(attrs)))
> + return -EFAULT;
> +
> + r = -EINVAL;
> + if (!kvm_hv_validate_vtl_mem_attributes(&attrs))
> + return r;
> +
> + r = kvm_ioctl_set_mem_attributes(dev->kvm, &vtl_dev->mem_attrs,
> + KVM_HV_VTL_ATTRS, &attrs);
> + if (r)
> + return r;
> + break;
> + }
> + default:
> + return -ENOTTY;
> + }
> +
> + return 0;
> +}
> +
> static int kvm_hv_vtl_create(struct kvm_device *dev, u32 type);
>
> static struct kvm_device_ops kvm_hv_vtl_ops = {
> .name = "kvm-hv-vtl",
> .create = kvm_hv_vtl_create,
> .release = kvm_hv_vtl_release,
> + .ioctl = kvm_hv_vtl_ioctl,
> .get_attr = kvm_hv_vtl_get_attr,
> };
>
> @@ -3076,6 +3136,7 @@ static int kvm_hv_vtl_create(struct kvm_device *dev, u32 type)
> vtl++;
>
> vtl_dev->vtl = vtl;
> + xa_init(&vtl_dev->mem_attrs);
> dev->private = vtl_dev;
>
> return 0;
It makes sense, but hopefully we won't need it if we adopt the VM per VTL approach.
Best regards,
Maxim Levitsky
Powered by blists - more mailing lists