[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b0758ad2-6ec0-8f79-ad04-e2bd24dd66a5@redhat.com>
Date: Thu, 6 Jul 2017 14:39:24 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Claudio Imbrenda <imbrenda@...ux.vnet.ibm.com>, kvm@...r.kernel.org
Cc: borntraeger@...ibm.com, linux-kernel@...r.kernel.org,
Radim Krčmář <rkrcmar@...hat.com>
Subject: Re: [PATCH v2 1/1] KVM: trigger uevents when starting or stopping a
VM
On 06/07/2017 12:48, Claudio Imbrenda wrote:
> This patch adds a few lines to the KVM common code to fire a
> KOBJ_CHANGE uevent whenever a KVM VM is created or destroyed. The event
> carries four environment variables:
>
> KVM_VM_CREATED indicates how many times a new VM has been created. It
> is useful for example to trigger specific actions when
> the first VM is started
> KVM_VM_COUNT indicates how many VMs are currently active. This can be
> used for logging or monitoring purposes
> KVM_VM_PID has the pid of the KVM process that has been started or
> stopped. This can be used to perform process-specific
> tuning.
> KVM_VM_STATS_PATH contains the path in /sys to the directory with all
> the runtime statistics for this VM. This is useful
> for performance monitoring and profiling.
>
> Specific udev rules can be then set up in userspace to deal with the
> creation or destruction of VMs as needed.
>
> Signed-off-by: Claudio Imbrenda <imbrenda@...ux.vnet.ibm.com>
> ---
> virt/kvm/kvm_main.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 50 insertions(+)
>
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 6e3b12c..f4cf7d2 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -130,6 +130,12 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
>
> static bool largepages_enabled = true;
>
> +#define KVM_EVENT_CREATE_VM 0
> +#define KVM_EVENT_DESTROY_VM 1
> +static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
> +static unsigned long long kvm_createvm_count;
> +static unsigned long long kvm_active_vms;
> +
> bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
> {
> if (pfn_valid(pfn))
> @@ -740,6 +746,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
> int i;
> struct mm_struct *mm = kvm->mm;
>
> + kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
> kvm_destroy_vm_debugfs(kvm);
> kvm_arch_sync_events(kvm);
> spin_lock(&kvm_lock);
> @@ -3212,6 +3219,7 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
> fput(file);
> return -ENOMEM;
> }
> + kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
>
> fd_install(r, file);
> return r;
> @@ -3864,6 +3872,48 @@ static const struct file_operations *stat_fops[] = {
> [KVM_STAT_VM] = &vm_stat_fops,
> };
>
> +static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
> +{
> + char cbuf[40], abuf[40], pidbuf[40], pathvar[18] = "KVM_VM_STATS_PATH=";
> + char *ptr[5] = {cbuf, abuf, pidbuf, NULL, NULL};
> + char *tmp, *pathbuf;
> + unsigned long long created, active;
> +
> + if (!kvm_dev.this_device || !kvm || !kvm->debugfs_dentry)
> + return;
> +
> + spin_lock(&kvm_lock);
> + if (type == KVM_EVENT_CREATE_VM) {
> + kvm_createvm_count++;
> + kvm_active_vms++;
> + } else if (type == KVM_EVENT_DESTROY_VM) {
> + kvm_active_vms--;
> + }
> + created = kvm_createvm_count;
> + active = kvm_active_vms;
> + spin_unlock(&kvm_lock);
> +
> + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
> + if (pathbuf) {
> + tmp = dentry_path_raw(kvm->debugfs_dentry,
> + pathbuf + sizeof(pathvar),
> + PATH_MAX - sizeof(pathvar));
> + if (!IS_ERR(tmp)) {
> + memcpy(tmp - sizeof(pathvar), pathvar, sizeof(pathvar));
> + ptr[3] = tmp - sizeof(pathvar);
> + }
> + }
> + snprintf(cbuf, sizeof(cbuf), "KVM_VM_CREATED=%llu", created);
> + snprintf(abuf, sizeof(abuf), "KVM_VM_ACTIVE=%llu", active);
> + snprintf(pidbuf, sizeof(pidbuf), "KVM_VM_PID=%s",
> + kvm->debugfs_dentry->d_name.name);
> + tmp = strchr(pidbuf, '-');
> + if (tmp)
> + *tmp = '\0';
> + kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, ptr);
> + kfree(pathbuf);
> +}
> +
> static int kvm_init_debug(void)
> {
> int r = -EEXIST;
>
Looks good to me (I haven't tested it) for 4.13.
Paolo
Powered by blists - more mailing lists