lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150616230700-mutt-send-email-mst@redhat.com>
Date:	Tue, 16 Jun 2015 23:07:24 +0200
From:	"Michael S. Tsirkin" <mst@...hat.com>
To:	Igor Mammedov <imammedo@...hat.com>
Cc:	linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
	pbonzini@...hat.com
Subject: Re: [PATCH 1/5] vhost: use binary search instead of linear in
 find_region()

On Tue, Jun 16, 2015 at 06:33:35PM +0200, Igor Mammedov wrote:
> For default region layouts performance stays the same
> as linear search i.e. it takes around 210ns average for
> translate_desc() that inlines find_region().
> 
> But it scales better with larger amount of regions,
> 235ns BS vs 300ns LS with 55 memory regions
> and it will be about the same values when allowed number
> of slots is increased to 509 like it has been done in kvm.
> 
> Signed-off-by: Igor Mammedov <imammedo@...hat.com>
> ---
>  drivers/vhost/vhost.c | 38 ++++++++++++++++++++++++++++----------
>  1 file changed, 28 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index 2ee2826..a22f8c3 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -25,6 +25,7 @@
>  #include <linux/kthread.h>
>  #include <linux/cgroup.h>
>  #include <linux/module.h>
> +#include <linux/sort.h>
>  
>  #include "vhost.h"
>  
> @@ -590,6 +591,16 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
>  }
>  EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
>  
> +static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
> +{
> +	const struct vhost_memory_region *r1 = p1, *r2 = p2;
> +	if (r1->guest_phys_addr < r2->guest_phys_addr)
> +		return 1;
> +	if (r1->guest_phys_addr > r2->guest_phys_addr)
> +		return -1;
> +	return 0;
> +}
> +
>  static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
>  {
>  	struct vhost_memory mem, *newmem, *oldmem;
> @@ -609,9 +620,11 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
>  	memcpy(newmem, &mem, size);
>  	if (copy_from_user(newmem->regions, m->regions,
>  			   mem.nregions * sizeof *m->regions)) {
> -		kfree(newmem);
> +		kvfree(newmem);
>  		return -EFAULT;
>  	}

What's this doing here?

> +	sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
> +		vhost_memory_reg_sort_cmp, NULL);
>  
>  	if (!memory_access_ok(d, newmem, 0)) {
>  		kfree(newmem);
> @@ -913,17 +926,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
>  static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
>  						     __u64 addr, __u32 len)
>  {
> -	struct vhost_memory_region *reg;
> -	int i;
> +	const struct vhost_memory_region *reg;
> +	int start = 0, end = mem->nregions;
>  
> -	/* linear search is not brilliant, but we really have on the order of 6
> -	 * regions in practice */
> -	for (i = 0; i < mem->nregions; ++i) {
> -		reg = mem->regions + i;
> -		if (reg->guest_phys_addr <= addr &&
> -		    reg->guest_phys_addr + reg->memory_size - 1 >= addr)
> -			return reg;
> +	while (start < end) {
> +		int slot = start + (end - start) / 2;
> +		reg = mem->regions + slot;
> +		if (addr >= reg->guest_phys_addr)
> +			end = slot;
> +		else
> +			start = slot + 1;
>  	}
> +
> +	reg = mem->regions + start;
> +	if (addr >= reg->guest_phys_addr &&
> +		reg->guest_phys_addr + reg->memory_size > addr)
> +		return reg;
>  	return NULL;
>  }
>  
> -- 
> 1.8.3.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ