lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1457437538-65867-2-git-send-email-pbonzini@redhat.com>
Date:	Tue,  8 Mar 2016 12:45:37 +0100
From:	Paolo Bonzini <pbonzini@...hat.com>
To:	linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc:	guangrong.xiao@...ux.intel.com, huaitong.han@...el.com
Subject: [RFC PATCH 1/2] KVM: MMU: precompute page fault error code

For the next patch, we will want to filter PFERR_FETCH_MASK away early,
and not pass it to permission_fault if neither NX nor SMEP are enabled.
Prepare for the change.

Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
 arch/x86/kvm/mmu.c         |  2 +-
 arch/x86/kvm/paging_tmpl.h | 26 +++++++++++++++-----------
 2 files changed, 16 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2463de0b935c..e57f7be061e3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3883,7 +3883,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
 			u = bit & ACC_USER_MASK;
 
 			if (!ept) {
-				/* Not really needed: !nx will cause pte.nx to fault */
+				/* Not really needed: if !nx, ff will always be zero */
 				x |= !mmu->nx;
 				/* Allow supervisor writes if !cr0.wp */
 				w |= !is_write_protection(vcpu) && !uf;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6013f3685ef4..285858d3223b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -272,13 +272,24 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 	gpa_t pte_gpa;
 	int offset;
 	const int write_fault = access & PFERR_WRITE_MASK;
-	const int user_fault  = access & PFERR_USER_MASK;
-	const int fetch_fault = access & PFERR_FETCH_MASK;
-	u16 errcode = 0;
+	u16 errcode;
 	gpa_t real_gpa;
 	gfn_t gfn;
 
 	trace_kvm_mmu_pagetable_walk(addr, access);
+
+	/*
+	 * Do not modify PFERR_FETCH_MASK in access.  It is used later in the call to
+	 * mmu->translate_gpa and, when nested virtualization is in use, the X or NX
+	 * bit of nested page tables always applies---even if NX and SMEP are disabled
+	 * in the guest.
+	 *
+	 * TODO: cache the result of the NX and SMEP test in struct kvm_mmu?
+	 */
+	errcode = access;
+	if (!(mmu->nx || kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
+		errcode &= ~PFERR_FETCH_MASK;
+
 retry_walk:
 	walker->level = mmu->root_level;
 	pte           = mmu->get_cr3(vcpu);
@@ -389,9 +400,7 @@ retry_walk:
 
 	if (unlikely(!accessed_dirty)) {
 		ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
-		if (unlikely(ret < 0))
-			goto error;
-		else if (ret)
+		if (ret > 0)
 			goto retry_walk;
 	}
 
@@ -402,11 +411,6 @@ retry_walk:
 	return 1;
 
 error:
-	errcode |= write_fault | user_fault;
-	if (fetch_fault && (mmu->nx ||
-			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
-		errcode |= PFERR_FETCH_MASK;
-
 	walker->fault.vector = PF_VECTOR;
 	walker->fault.error_code_valid = true;
 	walker->fault.error_code = errcode;
-- 
1.8.3.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ