[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250110110023.2963795-8-aneesh.kumar@kernel.org>
Date: Fri, 10 Jan 2025 16:30:23 +0530
From: "Aneesh Kumar K.V (Arm)" <aneesh.kumar@...nel.org>
To: linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.linux.dev
Cc: Suzuki K Poulose <Suzuki.Poulose@....com>,
Steven Price <steven.price@....com>,
Will Deacon <will@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Joey Gouly <joey.gouly@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
"Aneesh Kumar K.V (Arm)" <aneesh.kumar@...nel.org>
Subject: [PATCH v2 7/7] KVM: arm64: Split some of the kvm_pgtable_prot bits into separate defines
Some of the kvm_pgtable_prot values are mutually exclusive, like
KVM_PGTABLE_PROT_NORMAL_NC and KVM_PGTABLE_PROT_DEVICE. This patch
splits the Normal memory non-cacheable and NoTagAccess attributes into
separate #defines. With this change, the kvm_pgtable_prot bits only
indicate whether it is a device or normal memory mapping.
There are no functional changes in this patch.
Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@...nel.org>
---
arch/arm64/include/asm/kvm_pgtable.h | 10 +++---
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 2 +-
arch/arm64/kvm/hyp/pgtable.c | 47 +++++++++++++--------------
arch/arm64/kvm/mmu.c | 10 +++---
4 files changed, 36 insertions(+), 33 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 0daf4ffedc99..9443a8ad9343 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -239,7 +239,6 @@ enum kvm_pgtable_stage2_flags {
* @KVM_PGTABLE_PROT_W: Write permission.
* @KVM_PGTABLE_PROT_R: Read permission.
* @KVM_PGTABLE_PROT_DEVICE: Device attributes.
- * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes.
* @KVM_PGTABLE_PROT_SW0: Software bit 0.
* @KVM_PGTABLE_PROT_SW1: Software bit 1.
* @KVM_PGTABLE_PROT_SW2: Software bit 2.
@@ -251,8 +250,6 @@ enum kvm_pgtable_prot {
KVM_PGTABLE_PROT_R = BIT(2),
KVM_PGTABLE_PROT_DEVICE = BIT(3),
- KVM_PGTABLE_PROT_NORMAL_NC = BIT(4),
- KVM_PGTABLE_PROT_NORMAL_NOTAGACCESS = BIT(5),
KVM_PGTABLE_PROT_SW0 = BIT(55),
KVM_PGTABLE_PROT_SW1 = BIT(56),
@@ -263,6 +260,11 @@ enum kvm_pgtable_prot {
#define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
#define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
+/* different memory attribute requested */
+#define KVM_PGTABLE_ATTR_NORMAL_NC 0x1
+#define KVM_PGTABLE_ATTR_NORMAL_NOTAGACCESS 0x2
+
+
#define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX
#define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW
@@ -606,7 +608,7 @@ kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
* Return: 0 on success, negative error code on failure.
*/
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
- u64 phys, enum kvm_pgtable_prot prot,
+ u64 phys, enum kvm_pgtable_prot prot, int mem_attr,
void *mc, enum kvm_pgtable_walk_flags flags);
/**
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index caba3e4bd09e..25c8b2fbce15 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -411,7 +411,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot)
{
return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
- prot, &host_s2_pool, 0);
+ prot, 0, &host_s2_pool, 0);
}
/*
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 4eb6e9345c12..9dd93ae8bb97 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -673,35 +673,34 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
- kvm_pte_t *ptep)
+ int mem_attr, kvm_pte_t *ptep)
{
kvm_pte_t attr;
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
- unsigned long prot_mask = KVM_PGTABLE_PROT_DEVICE |
- KVM_PGTABLE_PROT_NORMAL_NC |
- KVM_PGTABLE_PROT_NORMAL_NOTAGACCESS;
+ bool device = prot & KVM_PGTABLE_PROT_DEVICE;
- switch (prot & prot_mask) {
- case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC:
- return -EINVAL;
- case KVM_PGTABLE_PROT_DEVICE:
+ if (device) {
if (prot & KVM_PGTABLE_PROT_X)
return -EINVAL;
attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
- break;
- case KVM_PGTABLE_PROT_NORMAL_NC:
- if (prot & KVM_PGTABLE_PROT_X)
+ if (!mem_attr)
return -EINVAL;
- attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
- break;
- case KVM_PGTABLE_PROT_NORMAL_NOTAGACCESS:
- if (system_supports_notagaccess())
- attr = KVM_S2_MEMATTR(pgt, NORMAL_NOTAGACCESS);
- else
- return -EINVAL;
- break;
- default:
- attr = KVM_S2_MEMATTR(pgt, NORMAL);
+ } else {
+ switch (mem_attr) {
+ case KVM_PGTABLE_ATTR_NORMAL_NC:
+ if (prot & KVM_PGTABLE_PROT_X)
+ return -EINVAL;
+ attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
+ break;
+ case KVM_PGTABLE_ATTR_NORMAL_NOTAGACCESS:
+ if (system_supports_notagaccess())
+ attr = KVM_S2_MEMATTR(pgt, NORMAL_NOTAGACCESS);
+ else
+ return -EINVAL;
+ break;
+ default:
+ attr = KVM_S2_MEMATTR(pgt, NORMAL);
+ }
}
if (!(prot & KVM_PGTABLE_PROT_X))
@@ -1060,7 +1059,7 @@ static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
}
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
- u64 phys, enum kvm_pgtable_prot prot,
+ u64 phys, enum kvm_pgtable_prot prot, int mem_attr,
void *mc, enum kvm_pgtable_walk_flags flags)
{
int ret;
@@ -1081,7 +1080,7 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
return -EINVAL;
- ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
+ ret = stage2_set_prot_attr(pgt, prot, mem_attr, &map_data.attr);
if (ret)
return ret;
@@ -1408,7 +1407,7 @@ kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
if (!IS_ALIGNED(phys, kvm_granule_size(level)))
return ERR_PTR(-EINVAL);
- ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
+ ret = stage2_set_prot_attr(pgt, prot, 0, &map_data.attr);
if (ret)
return ERR_PTR(ret);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 54e5bfe4f126..87afc8862459 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1130,8 +1130,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
break;
write_lock(&kvm->mmu_lock);
- ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
- &cache, 0);
+ ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE,
+ pa, prot, 0, &cache, 0);
write_unlock(&kvm->mmu_lock);
if (ret)
break;
@@ -1452,6 +1452,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt;
struct page *page;
+ int normal_memattr = 0;
if (fault_is_perm)
fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
@@ -1666,7 +1667,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (mte_allowed)
sanitise_mte_tags(kvm, pfn, vma_pagesize);
else if (kvm_has_mte_perm(kvm))
- prot |= KVM_PGTABLE_PROT_NORMAL_NOTAGACCESS;
+ normal_memattr = KVM_PGTABLE_ATTR_NORMAL_NOTAGACCESS;
else {
ret = -EFAULT;
goto out_unlock;
@@ -1681,7 +1682,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (device) {
if (vfio_allow_any_uc)
- prot |= KVM_PGTABLE_PROT_NORMAL_NC;
+ normal_memattr = KVM_PGTABLE_ATTR_NORMAL_NC;
else
prot |= KVM_PGTABLE_PROT_DEVICE;
} else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC) &&
@@ -1704,6 +1705,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
} else {
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
+ normal_memattr,
memcache,
KVM_PGTABLE_WALK_HANDLE_FAULT |
KVM_PGTABLE_WALK_SHARED);
--
2.43.0
Powered by blists - more mailing lists