lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4A52272A.2060101@novell.com>
Date:	Mon, 06 Jul 2009 12:32:42 -0400
From:	Gregory Haskins <ghaskins@...ell.com>
To:	"Michael S. Tsirkin" <mst@...hat.com>
CC:	kvm@...r.kernel.org, linux-kernel@...r.kernel.org, avi@...hat.com,
	davidel@...ilserver.org
Subject: Re: [KVM PATCH v9 5/5] KVM: create irqfd-cleanup-wq on demand

Michael S. Tsirkin wrote:
> On Mon, Jul 06, 2009 at 12:03:28PM -0400, Gregory Haskins wrote:
>   
>> Michael S. Tsirkin wrote:
>>     
>>> On Thu, Jul 02, 2009 at 11:38:22AM -0400, Gregory Haskins wrote:
>>>   
>>>       
>>>> We currently create this wq on module_init, which may be wasteful if the
>>>> host never creates a guest that uses irqfd.  This patch changes the
>>>> algorithm so that the workqueue is only created when at least one guest
>>>> is using irqfd.  The queue is cleaned up when the last guest using irqfd
>>>> is shutdown.
>>>>
>>>> To keep things simple, we only check whether the guest has tried to create
>>>> an irqfd, not whether there are actually irqfds active.
>>>>
>>>> Signed-off-by: Gregory Haskins <ghaskins@...ell.com>
>>>> ---
>>>>
>>>>  include/linux/kvm_host.h |    1 
>>>>  virt/kvm/eventfd.c       |  100 ++++++++++++++++++++++++++++++++++------------
>>>>  2 files changed, 75 insertions(+), 26 deletions(-)
>>>>
>>>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>>>> index 8e04a34..cd1a0f3 100644
>>>> --- a/include/linux/kvm_host.h
>>>> +++ b/include/linux/kvm_host.h
>>>> @@ -149,6 +149,7 @@ struct kvm {
>>>>  	struct {
>>>>  		spinlock_t        lock;
>>>>  		struct list_head  items;
>>>> +		bool              init;
>>>>  	} irqfds;
>>>>  #endif
>>>>  	struct kvm_vm_stat stat;
>>>> diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
>>>> index 4092b8d..fcc3469 100644
>>>> --- a/virt/kvm/eventfd.c
>>>> +++ b/virt/kvm/eventfd.c
>>>> @@ -49,7 +49,16 @@ struct _irqfd {
>>>>  	struct work_struct        shutdown;
>>>>  };
>>>>  
>>>> -static struct workqueue_struct *irqfd_cleanup_wq;
>>>> +struct _irqfd_cleanup {
>>>> +	struct mutex             lock;
>>>> +	int                      refs;
>>>> +	struct workqueue_struct *wq;
>>>> +};
>>>> +
>>>> +static struct _irqfd_cleanup irqfd_cleanup = {
>>>> +	.lock = __MUTEX_INITIALIZER(irqfd_cleanup.lock),
>>>> +	.refs = 0,
>>>> +};
>>>>  
>>>>  static void
>>>>  irqfd_inject(struct work_struct *work)
>>>> @@ -110,7 +119,7 @@ irqfd_deactivate(struct _irqfd *irqfd)
>>>>  
>>>>  	list_del_init(&irqfd->list);
>>>>  
>>>> -	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
>>>> +	queue_work(irqfd_cleanup.wq, &irqfd->shutdown);
>>>>  }
>>>>  
>>>>  /*
>>>> @@ -161,6 +170,62 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
>>>>  	add_wait_queue(wqh, &irqfd->wait);
>>>>  }
>>>>  
>>>> +/*
>>>> + * create a host-wide workqueue for issuing deferred shutdown requests
>>>> + * aggregated from all vm* instances. We need our own isolated single-thread
>>>> + * queue to prevent deadlock against flushing the normal work-queue.
>>>> + */
>>>> +static int
>>>> +irqfd_cleanup_init(struct kvm *kvm)
>>>> +{
>>>> +	int ret = 0;
>>>> +
>>>> +	mutex_lock(&irqfd_cleanup.lock);
>>>> +
>>>> +	/*
>>>> +	 * Check the current init state from within the lock so that we
>>>> +	 * sync all users to the thread creation.
>>>> +	 */
>>>> +	if (kvm->irqfds.init)
>>>> +		goto out;
>>>> +
>>>> +	if (!irqfd_cleanup.refs) {
>>>> +		struct workqueue_struct *wq;
>>>> +
>>>> +		wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
>>>> +		if (!wq) {
>>>> +			ret = -ENOMEM;
>>>> +			goto out;
>>>> +		}
>>>> +
>>>> +		irqfd_cleanup.wq = wq;
>>>> +	}
>>>> +
>>>> +	irqfd_cleanup.refs++;
>>>> +	kvm->irqfds.init = true;
>>>> +
>>>> +out:
>>>> +	mutex_unlock(&irqfd_cleanup.lock);
>>>> +
>>>> +	return ret;
>>>> +}
>>>> +
>>>> +static void
>>>> +irqfd_cleanup_release(struct kvm *kvm)
>>>> +{
>>>> +	if (!kvm->irqfds.init)
>>>> +		return;
>>>>     
>>>>         
>>> init is checked outside the lock here.
>>> Why?
>>>   
>>>       
>> Guest is shutting down via vmfd->f_ops->release() and is already
>> guaranteed to be single-threaded, so proper locking is not really
>> important.  Probably should document that, though ;)
>>     
>
> Is this an impirtant optimization? Simple locking is better..
>   

Its not "important" from a performance perspective or anything like
that.  Its just that locking here is inappropriate.  The init value is
per-vm and we only need to guard it going from false->true in the assign
path to cover the unlikely case that more than one threads tries to
assign an irqfd at the same time.  The shutdown path has no such
requirement since the VM has already implicitly ceased to exist and can
therefore not possibly assign another irqfd.  Therefore, locking here,
while harmless in every dimension, is simply gratuitous.  I would rather
just document it, personally: i.e. I think overlocking is just sloppy. 
Otherwise we should be documenting that the lock isn't really needed, etc.

For the record, I would have rather just used my originally proposed
slow-work thread instead of all this ;)

-Greg

>   
>>>> +
>>>> +	mutex_lock(&irqfd_cleanup.lock);
>>>> +
>>>> +	if (!(--irqfd_cleanup.refs))
>>>> +		destroy_workqueue(irqfd_cleanup.wq);
>>>> +
>>>> +	mutex_unlock(&irqfd_cleanup.lock);
>>>> +
>>>> +	kvm->irqfds.init = false;
>>>>     
>>>>         
>>> ... and cleaned outside the lock as well.
>>>
>>>   
>>>       
>> Ditto
>>
>>     
>>>> +}
>>>> +
>>>>  static int
>>>>  kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
>>>>  {
>>>> @@ -170,6 +235,10 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
>>>>  	int ret;
>>>>  	unsigned int events;
>>>>  
>>>> +	ret = irqfd_cleanup_init(kvm);
>>>> +	if (ret < 0)
>>>> +		return ret;
>>>> +
>>>>  	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
>>>>  	if (!irqfd)
>>>>  		return -ENOMEM;
>>>> @@ -268,7 +337,7 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
>>>>  	 * so that we guarantee there will not be any more interrupts on this
>>>>  	 * gsi once this deassign function returns.
>>>>  	 */
>>>> -	flush_workqueue(irqfd_cleanup_wq);
>>>> +	flush_workqueue(irqfd_cleanup.wq);
>>>>  
>>>>  	return 0;
>>>>  }
>>>> @@ -302,28 +371,7 @@ kvm_irqfd_release(struct kvm *kvm)
>>>>  	 * Block until we know all outstanding shutdown jobs have completed
>>>>  	 * since we do not take a kvm* reference.
>>>>  	 */
>>>> -	flush_workqueue(irqfd_cleanup_wq);
>>>> -
>>>> -}
>>>> -
>>>> -/*
>>>> - * create a host-wide workqueue for issuing deferred shutdown requests
>>>> - * aggregated from all vm* instances. We need our own isolated single-thread
>>>> - * queue to prevent deadlock against flushing the normal work-queue.
>>>> - */
>>>> -static int __init irqfd_module_init(void)
>>>> -{
>>>> -	irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
>>>> -	if (!irqfd_cleanup_wq)
>>>> -		return -ENOMEM;
>>>> -
>>>> -	return 0;
>>>> -}
>>>> +	flush_workqueue(irqfd_cleanup.wq);
>>>> +	irqfd_cleanup_release(kvm);
>>>>  
>>>> -static void __exit irqfd_module_exit(void)
>>>> -{
>>>> -	destroy_workqueue(irqfd_cleanup_wq);
>>>>  }
>>>> -
>>>> -module_init(irqfd_module_init);
>>>> -module_exit(irqfd_module_exit);
>>>>     
>>>>         
>>> --
>>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>>> the body of a message to majordomo@...r.kernel.org
>>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>>> Please read the FAQ at  http://www.tux.org/lkml/
>>>   
>>>       
>>     
>
>
>   



Download attachment "signature.asc" of type "application/pgp-signature" (267 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ