lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sun, 16 Jun 2013 14:46:42 +1000
From:	Benjamin Herrenschmidt <benh@...nel.crashing.org>
To:	Alexey Kardashevskiy <aik@...abs.ru>
Cc:	linuxppc-dev@...ts.ozlabs.org,
	David Gibson <david@...son.dropbear.id.au>,
	Alexander Graf <agraf@...e.de>,
	Paul Mackerras <paulus@...ba.org>, kvm@...r.kernel.org,
	linux-kernel@...r.kernel.org, kvm-ppc@...r.kernel.org
Subject: Re: [PATCH 4/4] KVM: PPC: Add hugepage support for IOMMU in-kernel
 handling

On Wed, 2013-06-05 at 16:11 +1000, Alexey Kardashevskiy wrote:

> @@ -185,7 +186,31 @@ static unsigned long kvmppc_realmode_gpa_to_hpa(struct kvm_vcpu *vcpu,
>  	unsigned long hva, hpa, pg_size = 0, offset;
>  	unsigned long gfn = gpa >> PAGE_SHIFT;
>  	bool writing = gpa & TCE_PCI_WRITE;
> +	struct kvmppc_iommu_hugepage *hp;
>  
> +	/*
> +	 * Try to find an already used hugepage.
> +	 * If it is not there, the kvmppc_lookup_pte() will return zero
> +	 * as it won't do get_page() on a huge page in real mode
> +	 * and therefore the request will be passed to the virtual mode.
> +	 */
> +	if (tt) {
> +		spin_lock(&tt->hugepages_lock);
> +		list_for_each_entry(hp, &tt->hugepages, list) {
> +			if ((gpa < hp->gpa) || (gpa >= hp->gpa + hp->size))
> +				continue;
> +
> +			/* Calculate host phys address keeping flags and offset in the page */
> +			offset = gpa & (hp->size - 1);
> +
> +			/* pte_pfn(pte) should return an address aligned to pg_size */
> +			hpa = (pte_pfn(hp->pte) << PAGE_SHIFT) + offset;
> +			spin_unlock(&tt->hugepages_lock);
> +
> +			return hpa;
> +		}
> +		spin_unlock(&tt->hugepages_lock);
> +	}

Wow .... this is run in real mode right ?

spin_lock() and spin_unlock() are a big no-no in real mode. If lockdep
and/or spinlock debugging are enabled and something goes pear-shaped
they are going to bring your whole system down in a blink in quite
horrible ways.

If you are going to do that, you need some kind of custom low-level
lock.

Also, I see that you are basically using a non-ordered list and doing a
linear search in it every time. That's going to COST !

You should really consider a more efficient data structure. You should
also be able to do something that doesn't require locks for readers.

>  	/* Find a KVM memslot */
>  	memslot = search_memslots(kvm_memslots(vcpu->kvm), gfn);
>  	if (!memslot)
> @@ -237,6 +262,10 @@ static long kvmppc_clear_tce_real_mode(struct kvm_vcpu *vcpu,
>  		if (oldtce & TCE_PCI_WRITE)
>  			SetPageDirty(page);
>  
> +		/* Do not put a huge page and continue without error */
> +		if (PageCompound(page))
> +			continue;
> +
>  		if (realmode_put_page(page)) {
>  			ret = H_TOO_HARD;
>  			break;
> @@ -282,7 +311,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
>  			if (iommu_tce_put_param_check(tbl, ioba, tce))
>  				return H_PARAMETER;
>  
> -			hpa = kvmppc_realmode_gpa_to_hpa(vcpu, tce, true);
> +			hpa = kvmppc_realmode_gpa_to_hpa(vcpu, tt, tce, true);
>  			if (hpa == ERROR_ADDR) {
>  				vcpu->arch.tce_reason = H_TOO_HARD;
>  				return H_TOO_HARD;
> @@ -295,6 +324,11 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
>  			if (unlikely(ret)) {
>  				struct page *pg = realmode_pfn_to_page(hpa);
>  				BUG_ON(!pg);
> +
> +				/* Do not put a huge page and return an error */
> +				if (!PageCompound(pg))
> +					return H_HARDWARE;
> +
>  				if (realmode_put_page(pg)) {
>  					vcpu->arch.tce_reason = H_HARDWARE;
>  					return H_TOO_HARD;
> @@ -351,7 +385,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
>  	vcpu->arch.tce_tmp_num = 0;
>  	vcpu->arch.tce_reason = 0;
>  
> -	tces = (unsigned long *) kvmppc_realmode_gpa_to_hpa(vcpu,
> +	tces = (unsigned long *) kvmppc_realmode_gpa_to_hpa(vcpu, NULL,
>  			tce_list, false);
>  	if ((unsigned long)tces == ERROR_ADDR)
>  		return H_TOO_HARD;
> @@ -374,7 +408,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
>  
>  		/* Translate TCEs and go get_page */
>  		for (i = 0; i < npages; ++i) {
> -			unsigned long hpa = kvmppc_realmode_gpa_to_hpa(vcpu,
> +			unsigned long hpa = kvmppc_realmode_gpa_to_hpa(vcpu, tt,
>  					vcpu->arch.tce_tmp[i], true);
>  			if (hpa == ERROR_ADDR) {
>  				vcpu->arch.tce_tmp_num = i;

Cheers,
Ben.


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ