[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251223054806.1611168-5-jon@nutanix.com>
Date: Mon, 22 Dec 2025 22:47:57 -0700
From: Jon Kohler <jon@...anix.com>
To: seanjc@...gle.com, pbonzini@...hat.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, dave.hansen@...ux.intel.com,
x86@...nel.org, hpa@...or.com, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: ken@...elabs.ch, Alexander.Grest@...rosoft.com, chao.gao@...el.com,
madvenka@...ux.microsoft.com, mic@...ikod.net, nsaenz@...zon.es,
tao1.su@...ux.intel.com, xiaoyao.li@...el.com, zhao1.liu@...el.com,
Jon Kohler <jon@...anix.com>
Subject: [PATCH 4/8] KVM: x86/mmu: update access permissions from ACC_ALL to ACC_RWX
Introduce ACC_RWX to capture traditional RWX access bits and modify the
various consumers of ACC_ALL to use ACC_RWX instead, to prepare for
Intel MBEC enablement, as suggested by Sean [1].
The only areas that really need ACC_ALL are kvm_mmu_page_get_access()
and trace_mark_mmio_spte().
No functional change intended.
[1] https://lore.kernel.org/all/aCI-z5vzzLwxOCfw@google.com/
Suggested-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Jon Kohler <jon@...anix.com>
---
arch/x86/kvm/mmu/mmu.c | 16 ++++++++--------
arch/x86/kvm/mmu/spte.h | 3 ++-
arch/x86/kvm/mmu/tdp_mmu.c | 4 ++--
3 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 667d66cf76d5..b1a7c7cc0c44 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3452,7 +3452,7 @@ static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (it.level == fault->goal_level)
break;
- sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL);
+ sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_RWX);
if (sp == ERR_PTR(-EEXIST))
continue;
@@ -3465,7 +3465,7 @@ static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (WARN_ON_ONCE(it.level != fault->goal_level))
return -EFAULT;
- ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
+ ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_RWX,
base_gfn, fault->pfn, fault);
if (ret == RET_PF_SPURIOUS)
return ret;
@@ -3698,7 +3698,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* current CPU took the fault.
*
* Need not check the access of upper level table entries since
- * they are always ACC_ALL.
+ * they are always ACC_RWX.
*/
if (is_access_allowed(fault, spte)) {
ret = RET_PF_SPURIOUS;
@@ -4804,7 +4804,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (r)
return r;
- r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
+ r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_RWX);
if (r != RET_PF_CONTINUE)
return r;
@@ -4895,7 +4895,7 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
if (r)
return r;
- r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
+ r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_RWX);
if (r != RET_PF_CONTINUE)
return r;
@@ -5614,7 +5614,7 @@ static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
{
union kvm_cpu_role role = {0};
- role.base.access = ACC_ALL;
+ role.base.access = ACC_RWX;
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);
role.ext.valid = 1;
@@ -5695,7 +5695,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
{
union kvm_mmu_page_role role = {0};
- role.access = ACC_ALL;
+ role.access = ACC_RWX;
role.cr0_wp = true;
role.efer_nx = true;
role.smm = cpu_role.base.smm;
@@ -5826,7 +5826,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
role.base.direct = false;
role.base.ad_disabled = !accessed_dirty;
role.base.guest_mode = true;
- role.base.access = ACC_ALL;
+ role.base.access = ACC_RWX;
role.ext.word = 0;
role.ext.execonly = execonly;
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index b60666778f61..101a2f61ec96 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -45,7 +45,8 @@ static_assert(SPTE_TDP_AD_ENABLED == 0);
#define ACC_EXEC_MASK 1
#define ACC_WRITE_MASK PT_WRITABLE_MASK
#define ACC_USER_MASK PT_USER_MASK
-#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
+#define ACC_RWX (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
+#define ACC_ALL ACC_RWX
/* The mask for the R/X bits in EPT PTEs */
#define SPTE_EPT_READABLE_MASK 0x1ull
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index c5734ca5c17d..98221ed34c51 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1190,9 +1190,9 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
}
if (unlikely(!fault->slot))
- new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
+ new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_RWX);
else
- wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
+ wrprot = make_spte(vcpu, sp, fault->slot, ACC_RWX, iter->gfn,
fault->pfn, iter->old_spte, fault->prefetch,
false, fault->map_writable, &new_spte);
--
2.43.0
Powered by blists - more mailing lists