[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240609154945.55332-13-nsaenz@amazon.com>
Date: Sun, 9 Jun 2024 15:49:41 +0000
From: Nicolas Saenz Julienne <nsaenz@...zon.com>
To: <linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>
CC: <pbonzini@...hat.com>, <seanjc@...gle.com>, <vkuznets@...hat.com>,
<linux-doc@...r.kernel.org>, <linux-hyperv@...r.kernel.org>,
<linux-arch@...r.kernel.org>, <linux-trace-kernel@...r.kernel.org>,
<graf@...zon.de>, <dwmw2@...radead.org>, <paul@...zon.com>,
<nsaenz@...zon.com>, <mlevitsk@...hat.com>, <jgowans@...zon.com>,
<corbet@....net>, <decui@...rosoft.com>, <tglx@...utronix.de>,
<mingo@...hat.com>, <bp@...en8.de>, <dave.hansen@...ux.intel.com>,
<x86@...nel.org>, <amoorthy@...gle.com>
Subject: [PATCH 12/18] KVM: x86/mmu: Introduce infrastructure to handle non-executable mappings
The upcoming access restriction KVM memory attributes open the door to
installing non-executable mappings. Introduce a new attribute in struct
kvm_page_fault, map_executable, to control whether the gfn range should
be mapped as executable and make sure it's taken into account when
generating new sptes.
No functional change intended.
Signed-off-by: Nicolas Saenz Julienne <nsaenz@...zon.com>
---
arch/x86/kvm/mmu/mmu.c | 6 +++++-
arch/x86/kvm/mmu/mmu_internal.h | 2 ++
arch/x86/kvm/mmu/tdp_mmu.c | 8 ++++++--
3 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 39b113afefdfc..b0c210b96419f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3197,6 +3197,7 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_
static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
struct kvm_shadow_walk_iterator it;
+ unsigned int access = ACC_ALL;
struct kvm_mmu_page *sp;
int ret;
gfn_t base_gfn = fault->gfn;
@@ -3229,7 +3230,10 @@ static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (WARN_ON_ONCE(it.level != fault->goal_level))
return -EFAULT;
- ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
+ if (!fault->map_executable)
+ access &= ~ACC_EXEC_MASK;
+
+ ret = mmu_set_spte(vcpu, fault->slot, it.sptep, access,
base_gfn, fault->pfn, fault);
if (ret == RET_PF_SPURIOUS)
return ret;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 4f5c4c8af9941..af0c3a154ed89 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -241,6 +241,7 @@ struct kvm_page_fault {
kvm_pfn_t pfn;
hva_t hva;
bool map_writable;
+ bool map_executable;
/*
* Indicates the guest is trying to write a gfn that contains one or
@@ -313,6 +314,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.pfn = KVM_PFN_ERR_FAULT,
.hva = KVM_HVA_ERR_BAD,
+ .map_executable = true,
};
int r;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 36539c1b36cd6..344781981999a 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1018,6 +1018,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
struct tdp_iter *iter)
{
struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
+ unsigned int access = ACC_ALL;
u64 new_spte;
int ret = RET_PF_FIXED;
bool wrprot = false;
@@ -1025,10 +1026,13 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
return RET_PF_RETRY;
+ if (!fault->map_executable)
+ access &= ~ACC_EXEC_MASK;
+
if (unlikely(!fault->slot))
- new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
+ new_spte = make_mmio_spte(vcpu, iter->gfn, access);
else
- wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
+ wrprot = make_spte(vcpu, sp, fault->slot, access, iter->gfn,
fault->pfn, iter->old_spte, fault->prefetch, true,
fault->map_writable, &new_spte);
--
2.40.1
Powered by blists - more mailing lists