[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210811101126.8973-5-chenyi.qiang@intel.com>
Date: Wed, 11 Aug 2021 18:11:23 +0800
From: Chenyi Qiang <chenyi.qiang@...el.com>
To: Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Xiaoyao Li <xiaoyao.li@...el.com>
Cc: Chenyi Qiang <chenyi.qiang@...el.com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v5 4/7] KVM: MMU: Rename the pkru to pkr
PKRU represents the PKU register utilized in the protection key rights
check for user pages. Protection Keys for Superviosr Pages (PKS) extends
the protection key architecture to cover supervisor pages.
Rename the *pkru* related variables and functions to *pkr* which stands
for both of the PKRU and PKRS. It makes sense because PKS and PKU each
have:
- a single control register (PKRU and PKRS)
- the same number of keys (16 in total)
- the same format in control registers (Access and Write disable bits)
PKS and PKU can also share the same bitmap pkr_mask cache conditions
where protection key checks are neede, because they can share almost the
same requirements for PK restrictions to cause a fault, except they
focus on different pages (supervisor and user pages).
Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
Signed-off-by: Chenyi Qiang <chenyi.qiang@...el.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/mmu.h | 12 ++++++------
arch/x86/kvm/mmu/mmu.c | 10 +++++-----
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c2bcb88781b3..3d55aca9167b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -444,7 +444,7 @@ struct kvm_mmu {
* with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
* Each domain has 2 bits which are ANDed with AD and WD from PKRU.
*/
- u32 pkru_mask;
+ u32 pkr_mask;
u64 *pae_root;
u64 *pml4_root;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 83e6c6965f1e..5e94f6a90e80 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -200,8 +200,8 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u32 errcode = PFERR_PRESENT_MASK;
WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
- if (unlikely(mmu->pkru_mask)) {
- u32 pkru_bits, offset;
+ if (unlikely(mmu->pkr_mask)) {
+ u32 pkr_bits, offset;
/*
* PKRU defines 32 bits, there are 16 domains and 2
@@ -209,15 +209,15 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
* index of the protection domain, so pte_pkey * 2 is
* is the index of the first bit for the domain.
*/
- pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
+ pkr_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
offset = (pfec & ~1) +
((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
- pkru_bits &= mmu->pkru_mask >> offset;
- errcode |= -pkru_bits & PFERR_PK_MASK;
- fault |= (pkru_bits != 0);
+ pkr_bits &= mmu->pkr_mask >> offset;
+ errcode |= -pkr_bits & PFERR_PK_MASK;
+ fault |= (pkr_bits != 0);
}
return -(u32)fault & errcode;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 66f7f5bc3482..49fd2dc98cc6 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4443,13 +4443,13 @@ static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
* away both AD and WD. For all reads or if the last condition holds, WD
* only will be masked away.
*/
-static void update_pkru_bitmask(struct kvm_mmu *mmu)
+static void update_pkr_bitmask(struct kvm_mmu *mmu)
{
unsigned bit;
bool wp;
if (!is_cr4_pke(mmu)) {
- mmu->pkru_mask = 0;
+ mmu->pkr_mask = 0;
return;
}
@@ -4483,7 +4483,7 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu)
/* PKRU.WD stops write access. */
pkey_bits |= (!!check_write) << 1;
- mmu->pkru_mask |= (pkey_bits & 3) << pfec;
+ mmu->pkr_mask |= (pkey_bits & 3) << pfec;
}
}
@@ -4495,7 +4495,7 @@ static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
reset_rsvds_bits_mask(vcpu, mmu);
update_permission_bitmask(mmu, false);
- update_pkru_bitmask(mmu);
+ update_pkr_bitmask(mmu);
}
static void paging64_init_context(struct kvm_mmu *context)
@@ -4763,7 +4763,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->direct_map = false;
update_permission_bitmask(context, true);
- update_pkru_bitmask(context);
+ update_pkr_bitmask(context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
--
2.17.1
Powered by blists - more mailing lists