[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1457437538-65867-3-git-send-email-pbonzini@redhat.com>
Date: Tue, 8 Mar 2016 12:45:38 +0100
From: Paolo Bonzini <pbonzini@...hat.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: guangrong.xiao@...ux.intel.com, huaitong.han@...el.com
Subject: [RFC PATCH 2/2] KVM: MMU: return page fault error code from permission_fault
Intel's new PKU extension introduces a bit of the page fault error code
that must be dynamically computed after the PTE lookup (unlike I/D, U/S
and W/R). To ease this, these two patches make permission_fault return
the page fault error code.
Right now it only adds the P bit to the input parameter "pfec", but PKU
can change that.
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
arch/x86/kvm/mmu.h | 15 ++++++++++-----
arch/x86/kvm/paging_tmpl.h | 5 ++---
2 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 58fe98a0a526..8b2b3dfca1ae 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -141,11 +141,15 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu)
}
/*
- * Will a fault with a given page-fault error code (pfec) cause a permission
- * fault with the given access (in ACC_* format)?
+ * Check if a given access (described through the I/D, W/R and U/S bits of a
+ * page fault error code pfec) causes a permission fault with the given PTE
+ * access rights (in ACC_* format).
+ *
+ * Return zero if the access does not fault; return the page fault error code
+ * if the access faults.
*/
-static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- unsigned pte_access, unsigned pfec)
+static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ unsigned pte_access, unsigned pfec)
{
int cpl = kvm_x86_ops->get_cpl(vcpu);
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
@@ -169,7 +173,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
WARN_ON(pfec & PFERR_RSVD_MASK);
- return (mmu->permissions[index] >> pte_access) & 1;
+ pfec |= PFERR_PRESENT_MASK;
+ return -((mmu->permissions[index] >> pte_access) & 1) & pfec;
}
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 285858d3223b..4a6866dab848 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -370,10 +370,9 @@ retry_walk:
walker->ptes[walker->level - 1] = pte;
} while (!is_last_gpte(mmu, walker->level, pte));
- if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) {
- errcode |= PFERR_PRESENT_MASK;
+ errcode = permission_fault(vcpu, mmu, pte_access, errcode);
+ if (unlikely(errcode))
goto error;
- }
gfn = gpte_to_gfn_lvl(pte, walker->level);
gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
--
1.8.3.1
Powered by blists - more mailing lists