lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240911143421.85612-8-faresx@amazon.de>
Date: Wed, 11 Sep 2024 14:34:06 +0000
From: Fares Mehanna <faresx@...zon.de>
To: 
CC: <nh-open-source@...zon.com>, Fares Mehanna <faresx@...zon.de>, "Marc
 Zyngier" <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>, James Morse
	<james.morse@....com>, Suzuki K Poulose <suzuki.poulose@....com>, Zenghui Yu
	<yuzenghui@...wei.com>, Catalin Marinas <catalin.marinas@....com>, "Will
 Deacon" <will@...nel.org>, Andrew Morton <akpm@...ux-foundation.org>, "Kemeng
 Shi" <shikemeng@...weicloud.com>, Pierre-Clément Tosi
	<ptosi@...gle.com>, Ard Biesheuvel <ardb@...nel.org>, Mark Rutland
	<mark.rutland@....com>, Javier Martinez Canillas <javierm@...hat.com>, "Arnd
 Bergmann" <arnd@...db.de>, Fuad Tabba <tabba@...gle.com>, Mark Brown
	<broonie@...nel.org>, Joey Gouly <joey.gouly@....com>, Kristina Martsenko
	<kristina.martsenko@....com>, Randy Dunlap <rdunlap@...radead.org>, "Bjorn
 Helgaas" <bhelgaas@...gle.com>, Jean-Philippe Brucker
	<jean-philippe@...aro.org>, "Mike Rapoport (IBM)" <rppt@...nel.org>, "David
 Hildenbrand" <david@...hat.com>, Roman Kagan <rkagan@...zon.de>, "moderated
 list:KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)"
	<linux-arm-kernel@...ts.infradead.org>, "open list:KERNEL VIRTUAL MACHINE FOR
 ARM64 (KVM/arm64)" <kvmarm@...ts.linux.dev>, open list
	<linux-kernel@...r.kernel.org>, "open list:MEMORY MANAGEMENT"
	<linux-mm@...ck.org>
Subject: [RFC PATCH 7/7] arm64: KVM: Allocate vCPU fp-regs dynamically on VHE and KERNEL_SECRETMEM enabled systems

Similar to what was done in this commit:
  "arm64: KVM: Allocate vCPU gp-regs dynamically on VHE and KERNEL_SECRETMEM enabled systems"

We're moving fp-regs to dynamic memory for systems supporting VHE and compiled
with KERNEL_SECRETMEM support. Otherwise, we will use the "fp_regs_storage"
struct embedded in the vCPU context.

Accessing fp-regs embedded in the vCPU context without de-reference is done as:
add     \regs, \ctxt, #offsetof(struct kvm_cpu_context, fp_regs_storage)

Accessing the dynamically allocated fp-regs with de-reference is done as:
ldr     \regs, [\ctxt, #offsetof(struct kvm_cpu_context, fp_regs)]

Signed-off-by: Fares Mehanna <faresx@...zon.de>
---
 arch/arm64/include/asm/kvm_host.h | 16 ++++++++++++++--
 arch/arm64/kernel/image-vars.h    |  1 +
 arch/arm64/kvm/arm.c              | 29 +++++++++++++++++++++++++++--
 arch/arm64/kvm/va_layout.c        | 23 +++++++++++++++++++----
 4 files changed, 61 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e8ed2c12479f..4132c57d7e69 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -550,7 +550,9 @@ struct kvm_cpu_context {
 	u64	spsr_irq;
 	u64	spsr_fiq;
 
-	struct user_fpsimd_state fp_regs;
+	struct user_fpsimd_state *fp_regs;
+	struct user_fpsimd_state fp_regs_storage;
+	struct secretmem_area *fp_regs_area;
 
 	u64 sys_regs[NR_SYS_REGS];
 
@@ -968,7 +970,17 @@ static __always_inline struct user_pt_regs *ctxt_gp_regs(const struct kvm_cpu_co
 	return regs;
 }
 #define vcpu_gp_regs(v)		(ctxt_gp_regs(&(v)->arch.ctxt))
-#define ctxt_fp_regs(ctxt)	(&(ctxt).fp_regs)
+
+static __always_inline struct user_fpsimd_state *ctxt_fp_regs(const struct kvm_cpu_context *ctxt)
+{
+	struct user_fpsimd_state *fp_regs = (void *) ctxt;
+	asm volatile(ALTERNATIVE_CB("add %0, %0, %1\n",
+				    ARM64_HAS_VIRT_HOST_EXTN,
+				    kvm_update_ctxt_fp_regs)
+		    : "+r" (fp_regs)
+		    : "I" (offsetof(struct kvm_cpu_context, fp_regs_storage)));
+	return fp_regs;
+}
 #define vcpu_fp_regs(v)		(ctxt_fp_regs(&(v)->arch.ctxt))
 
 /*
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index e3bb626e299c..904573598e0f 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -87,6 +87,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask);
 KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
 KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
 KVM_NVHE_ALIAS(kvm_update_ctxt_gp_regs);
+KVM_NVHE_ALIAS(kvm_update_ctxt_fp_regs);
 KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
 KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
 KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 7542af3f766a..17b42e9099c3 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -477,6 +477,14 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 		if (!vcpu->arch.ctxt.regs_area)
 			return -ENOMEM;
 		vcpu->arch.ctxt.regs = vcpu->arch.ctxt.regs_area->ptr;
+
+		pages_needed = (sizeof(*vcpu_fp_regs(vcpu)) + PAGE_SIZE - 1) / PAGE_SIZE;
+		vcpu->arch.ctxt.fp_regs_area = secretmem_allocate_pages(fls(pages_needed - 1));
+		if (!vcpu->arch.ctxt.fp_regs_area) {
+			err = -ENOMEM;
+			goto free_vcpu_ctxt;
+		}
+		vcpu->arch.ctxt.fp_regs = vcpu->arch.ctxt.fp_regs_area->ptr;
 	}
 
 	/* Set up the timer */
@@ -504,8 +512,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 	return kvm_share_hyp(vcpu, vcpu + 1);
 
 free_vcpu_ctxt:
-	if (kvm_use_dynamic_regs())
+	if (kvm_use_dynamic_regs()) {
 		secretmem_release_pages(vcpu->arch.ctxt.regs_area);
+		secretmem_release_pages(vcpu->arch.ctxt.fp_regs_area);
+	}
 	return err;
 }
 
@@ -524,8 +534,10 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 	kvm_vgic_vcpu_destroy(vcpu);
 	kvm_arm_vcpu_destroy(vcpu);
 
-	if (kvm_use_dynamic_regs())
+	if (kvm_use_dynamic_regs()) {
 		secretmem_release_pages(vcpu->arch.ctxt.regs_area);
+		secretmem_release_pages(vcpu->arch.ctxt.fp_regs_area);
+	}
 }
 
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -2729,12 +2741,25 @@ static int init_hyp_hve_mode(void)
 		per_cpu(kvm_host_data, cpu).host_ctxt.regs = kvm_host_data_regs;
 	}
 
+	/* Allocate fp-regs */
+	for_each_possible_cpu(cpu) {
+		void *kvm_host_data_regs;
+
+		kvm_host_data_regs = kzalloc(sizeof(struct user_fpsimd_state), GFP_KERNEL);
+		if (!kvm_host_data_regs) {
+			err = -ENOMEM;
+			goto free_regs;
+		}
+		per_cpu(kvm_host_data, cpu).host_ctxt.fp_regs = kvm_host_data_regs;
+	}
+
 	return 0;
 
 free_regs:
 	for_each_possible_cpu(cpu) {
 		kfree(per_cpu(kvm_hyp_ctxt, cpu).regs);
 		kfree(per_cpu(kvm_host_data, cpu).host_ctxt.regs);
+		kfree(per_cpu(kvm_host_data, cpu).host_ctxt.fp_regs);
 	}
 
 	return err;
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index fcef7e89d042..ba1030fa5b08 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -185,10 +185,12 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
 	}
 }
 
-void __init kvm_update_ctxt_gp_regs(struct alt_instr *alt,
-				    __le32 *origptr, __le32 *updptr, int nr_inst)
+static __always_inline void __init kvm_update_ctxt_regs(struct alt_instr *alt,
+							__le32 *origptr,
+							__le32 *updptr,
+							int nr_inst, u32 imm)
 {
-	u32 rd, rn, imm, insn, oinsn;
+	u32 rd, rn, insn, oinsn;
 
 	BUG_ON(nr_inst != 1);
 
@@ -198,7 +200,6 @@ void __init kvm_update_ctxt_gp_regs(struct alt_instr *alt,
 	oinsn = le32_to_cpu(origptr[0]);
 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
 	rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
-	imm = offsetof(struct kvm_cpu_context, regs);
 
 	insn = aarch64_insn_gen_load_store_imm(rd, rn, imm,
 					       AARCH64_INSN_SIZE_64,
@@ -208,6 +209,20 @@ void __init kvm_update_ctxt_gp_regs(struct alt_instr *alt,
 	updptr[0] = cpu_to_le32(insn);
 }
 
+void __init kvm_update_ctxt_gp_regs(struct alt_instr *alt,
+				    __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+	u32 offset = offsetof(struct kvm_cpu_context, regs);
+	kvm_update_ctxt_regs(alt, origptr, updptr, nr_inst, offset);
+}
+
+void __init kvm_update_ctxt_fp_regs(struct alt_instr *alt,
+				    __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+	u32 offset = offsetof(struct kvm_cpu_context, fp_regs);
+	kvm_update_ctxt_regs(alt, origptr, updptr, nr_inst, offset);
+}
+
 void kvm_patch_vector_branch(struct alt_instr *alt,
 			     __le32 *origptr, __le32 *updptr, int nr_inst)
 {
-- 
2.40.1




Amazon Web Services Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss
Eingetragen am Amtsgericht Charlottenburg unter HRB 257764 B
Sitz: Berlin
Ust-ID: DE 365 538 597


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ