[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230213163351.30704-5-minipli@grsecurity.net>
Date: Mon, 13 Feb 2023 17:33:50 +0100
From: Mathias Krause <minipli@...ecurity.net>
To: kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org,
Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Mathias Krause <minipli@...ecurity.net>
Subject: [PATCH 4/5] KVM: x86: Shrink struct kvm_vcpu_arch
Reshuffle the members of struct kvm_vcpu_arch to make use of otherwise
unused padding holes, allowing denser packing without disrupting their
grouping.
This allows us to shrink the object size by 48 bytes for 64 bit builds.
Signed-off-by: Mathias Krause <minipli@...ecurity.net>
---
Instead of attempting to create an optimal shuffle by sorting members by
their alignment constraints, I intended to keep the members grouped by
their meaning to keep the maintainability of the code.
arch/x86/include/asm/kvm_host.h | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 040eee3e9583..5036456b05b0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -824,18 +824,18 @@ struct kvm_vcpu_arch {
int halt_request; /* real mode on Intel only */
+ u32 kvm_cpuid_base;
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
- u32 kvm_cpuid_base;
u64 reserved_gpa_bits;
int maxphyaddr;
/* emulate context */
- struct x86_emulate_ctxt *emulate_ctxt;
bool emulate_regs_need_sync_to_vcpu;
bool emulate_regs_need_sync_from_vcpu;
+ struct x86_emulate_ctxt *emulate_ctxt;
int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
gpa_t time;
@@ -916,17 +916,17 @@ struct kvm_vcpu_arch {
unsigned long last_retry_addr;
struct {
- bool halted;
gfn_t gfns[ASYNC_PF_PER_VCPU];
struct gfn_to_hva_cache data;
u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
- u16 vec;
u32 id;
+ u16 vec;
bool send_user_only;
u32 host_apf_flags;
bool delivery_as_pf_vmexit;
bool pageready_pending;
+ bool halted;
} apf;
/* OSVW MSRs (AMD only) */
@@ -942,6 +942,9 @@ struct kvm_vcpu_arch {
u64 msr_kvm_poll_control;
+ /* set at EPT violation at this point */
+ unsigned long exit_qualification;
+
/*
* Indicates the guest is trying to write a gfn that contains one or
* more of the PTEs used to translate the write itself, i.e. the access
@@ -959,9 +962,6 @@ struct kvm_vcpu_arch {
*/
bool write_fault_to_shadow_pgtable;
- /* set at EPT violation at this point */
- unsigned long exit_qualification;
-
/* pv related host specific info */
struct {
bool pv_unhalted;
@@ -979,9 +979,6 @@ struct kvm_vcpu_arch {
/* Host CPU on which VM-entry was most recently attempted */
int last_vmentry_cpu;
- /* AMD MSRC001_0015 Hardware Configuration */
- u64 msr_hwcr;
-
/* pv related cpuid info */
struct {
/*
@@ -1006,6 +1003,9 @@ struct kvm_vcpu_arch {
*/
bool pdptrs_from_userspace;
+ /* AMD MSRC001_0015 Hardware Configuration */
+ u64 msr_hwcr;
+
#if IS_ENABLED(CONFIG_HYPERV)
hpa_t hv_root_tdp;
#endif
--
2.39.1
Powered by blists - more mailing lists