lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <32866e5d00174697730d6231d2fb81f6b8d98c8a.1676659352.git.ackerleytng@google.com>
Date:   Fri, 17 Feb 2023 18:54:05 +0000
From:   Ackerley Tng <ackerleytng@...gle.com>
To:     pbonzini@...hat.com, shuah@...nel.org, seanjc@...gle.com,
        dmatlack@...gle.com, kvm@...r.kernel.org,
        linux-kselftest@...r.kernel.org, linux-kernel@...r.kernel.org
Cc:     erdemaktas@...gle.com, vannapurve@...gle.com, sagis@...gle.com,
        mail@...iej.szmigiero.name, Ackerley Tng <ackerleytng@...gle.com>
Subject: [PATCH 1/1] KVM: selftests: Adjust VM's initial stack address to
 align with SysV ABI spec

Align stack to match calling sequence requirements in section "The
Stack Frame" of the System V ABI AMD64 Architecture Processor
Supplement, which requires the value (%rsp + 8) to be a multiple of 16
when control is transferred to the function entry point.

This is required because GCC is already aligned with the SysV ABI
spec, and compiles code resulting in (%rsp + 8) being a multiple of 16
when control is transferred to the function entry point.

This fixes guest crashes when compiled guest code contains certain SSE
instructions, because thes SSE instructions expect memory
references (including those on the stack) to be 16-byte-aligned.

Signed-off-by: Ackerley Tng <ackerleytng@...gle.com>
---

This patch is a follow-up from discussions at
https://lore.kernel.org/lkml/20230121001542.2472357-9-ackerleytng@google.com/

---
 .../selftests/kvm/include/linux/align.h        | 15 +++++++++++++++
 .../selftests/kvm/lib/x86_64/processor.c       | 18 +++++++++++++++++-
 2 files changed, 32 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/kvm/include/linux/align.h

diff --git a/tools/testing/selftests/kvm/include/linux/align.h b/tools/testing/selftests/kvm/include/linux/align.h
new file mode 100644
index 000000000000..2b4acec7b95a
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/linux/align.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ALIGN_H
+#define _LINUX_ALIGN_H
+
+#include <linux/const.h>
+
+/* @a is a power of 2 value */
+#define ALIGN(x, a)		__ALIGN_KERNEL((x), (a))
+#define ALIGN_DOWN(x, a)	__ALIGN_KERNEL((x) - ((a) - 1), (a))
+#define __ALIGN_MASK(x, mask)	__ALIGN_KERNEL_MASK((x), (mask))
+#define PTR_ALIGN(p, a)		((typeof(p))ALIGN((unsigned long)(p), (a)))
+#define PTR_ALIGN_DOWN(p, a)	((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
+#define IS_ALIGNED(x, a)		(((x) & ((typeof(x))(a) - 1)) == 0)
+
+#endif	/* _LINUX_ALIGN_H */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index acfa1d01e7df..09b48ae96fdd 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -5,6 +5,7 @@
  * Copyright (C) 2018, Google LLC.
  */

+#include "linux/align.h"
 #include "test_util.h"
 #include "kvm_util.h"
 #include "processor.h"
@@ -569,6 +570,21 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
 				       DEFAULT_GUEST_STACK_VADDR_MIN,
 				       MEM_REGION_DATA);

+	stack_vaddr += DEFAULT_STACK_PGS * getpagesize();
+
+	/*
+	 * Align stack to match calling sequence requirements in section "The
+	 * Stack Frame" of the System V ABI AMD64 Architecture Processor
+	 * Supplement, which requires the value (%rsp + 8) to be a multiple of
+	 * 16 when control is transferred to the function entry point.
+	 *
+	 * If this code is ever used to launch a vCPU with 32-bit entry point it
+	 * may need to subtract 4 bytes instead of 8 bytes.
+	 */
+	TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE),
+		"stack_vaddr must be page aligned for stack adjustment of -8 to work");
+	stack_vaddr -= 8;
+
 	vcpu = __vm_vcpu_add(vm, vcpu_id);
 	vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
 	vcpu_setup(vm, vcpu);
@@ -576,7 +592,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
 	/* Setup guest general purpose registers */
 	vcpu_regs_get(vcpu, &regs);
 	regs.rflags = regs.rflags | 0x2;
-	regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
+	regs.rsp = stack_vaddr;
 	regs.rip = (unsigned long) guest_code;
 	vcpu_regs_set(vcpu, &regs);

--
2.39.2.637.g21b0678d19-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ