[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251110180131.28264-2-chang.seok.bae@intel.com>
Date: Mon, 10 Nov 2025 18:01:12 +0000
From: "Chang S. Bae" <chang.seok.bae@...el.com>
To: kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: pbonzini@...hat.com,
seanjc@...gle.com,
chao.gao@...el.com,
zhao1.liu@...el.com,
chang.seok.bae@...el.com
Subject: [PATCH RFC v1 01/20] KVM: x86: Rename register accessors to be GPR-specific
Refactor the VCPU register state accessors to make them explicitly
GPR-only.
The existing register accessors operate on the cached VCPU register
state. That cache holds GPRs and RIP. RIP has its own interface already.
This renaming clarifies GPR access only.
No functional changes intended.
Signed-off-by: Chang S. Bae <chang.seok.bae@...el.com>
---
arch/x86/kvm/svm/svm.c | 8 ++++----
arch/x86/kvm/vmx/nested.c | 20 ++++++++++----------
arch/x86/kvm/vmx/vmx.c | 12 ++++++------
arch/x86/kvm/x86.c | 10 +++++-----
arch/x86/kvm/x86.h | 5 ++---
arch/x86/kvm/xen.c | 2 +-
6 files changed, 28 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 153c12dbf3eb..3aa2c37754ef 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2473,7 +2473,7 @@ static int cr_interception(struct kvm_vcpu *vcpu)
err = 0;
if (cr >= 16) { /* mov to cr */
cr -= 16;
- val = kvm_register_read(vcpu, reg);
+ val = kvm_gpr_read(vcpu, reg);
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
@@ -2519,7 +2519,7 @@ static int cr_interception(struct kvm_vcpu *vcpu)
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
- kvm_register_write(vcpu, reg, val);
+ kvm_gpr_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
}
return kvm_complete_insn_gp(vcpu, err);
@@ -2591,9 +2591,9 @@ static int dr_interception(struct kvm_vcpu *vcpu)
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */
dr -= 16;
- err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
+ err = kvm_set_dr(vcpu, dr, kvm_gpr_read(vcpu, reg));
} else {
- kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
+ kvm_gpr_write(vcpu, reg, kvm_get_dr(vcpu, dr));
}
return kvm_complete_insn_gp(vcpu, err);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 76271962cb70..47a941989787 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -5325,9 +5325,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
else if (addr_size == 0)
off = (gva_t)sign_extend64(off, 15);
if (base_is_valid)
- off += kvm_register_read(vcpu, base_reg);
+ off += kvm_gpr_read(vcpu, base_reg);
if (index_is_valid)
- off += kvm_register_read(vcpu, index_reg) << scaling;
+ off += kvm_gpr_read(vcpu, index_reg) << scaling;
vmx_get_segment(vcpu, &s, seg_reg);
/*
@@ -5719,7 +5719,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
return 1;
/* Decode instruction info and find the field to read */
- field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
+ field = kvm_gpr_read(vcpu, (((instr_info) >> 28) & 0xf));
if (!nested_vmx_is_evmptr12_valid(vmx)) {
/*
@@ -5768,7 +5768,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
* on the guest's mode (32 or 64 bit), not on the given field's length.
*/
if (instr_info & BIT(10)) {
- kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
+ kvm_gpr_write(vcpu, (((instr_info) >> 3) & 0xf), value);
} else {
len = is_64_bit_mode(vcpu) ? 8 : 4;
if (get_vmx_mem_address(vcpu, exit_qualification,
@@ -5842,7 +5842,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return nested_vmx_failInvalid(vcpu);
if (instr_info & BIT(10))
- value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
+ value = kvm_gpr_read(vcpu, (((instr_info) >> 3) & 0xf));
else {
len = is_64_bit_mode(vcpu) ? 8 : 4;
if (get_vmx_mem_address(vcpu, exit_qualification,
@@ -5853,7 +5853,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return kvm_handle_memory_failure(vcpu, r, &e);
}
- field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
+ field = kvm_gpr_read(vcpu, (((instr_info) >> 28) & 0xf));
offset = get_vmcs12_field_offset(field);
if (offset < 0)
@@ -6051,7 +6051,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
- type = kvm_register_read(vcpu, gpr_index);
+ type = kvm_gpr_read(vcpu, gpr_index);
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
@@ -6132,7 +6132,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
- type = kvm_register_read(vcpu, gpr_index);
+ type = kvm_gpr_read(vcpu, gpr_index);
types = (vmx->nested.msrs.vpid_caps &
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
@@ -6406,7 +6406,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
reg = (exit_qualification >> 8) & 15;
- val = kvm_register_read(vcpu, reg);
+ val = kvm_gpr_read(vcpu, reg);
switch (cr) {
case 0:
if (vmcs12->cr0_guest_host_mask &
@@ -6492,7 +6492,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
/* Decode instruction info and find the field to access */
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ field = kvm_gpr_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Out-of-range fields always cause a VM exit from L2 to L1 */
if (field >> 15)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f87c216d976d..c7d38f7692cf 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5462,7 +5462,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
- val = kvm_register_read(vcpu, reg);
+ val = kvm_gpr_read(vcpu, reg);
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
@@ -5504,12 +5504,12 @@ static int handle_cr(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(enable_unrestricted_guest);
val = kvm_read_cr3(vcpu);
- kvm_register_write(vcpu, reg, val);
+ kvm_gpr_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
return kvm_skip_emulated_instruction(vcpu);
case 8:
val = kvm_get_cr8(vcpu);
- kvm_register_write(vcpu, reg, val);
+ kvm_gpr_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
return kvm_skip_emulated_instruction(vcpu);
}
@@ -5579,10 +5579,10 @@ static int handle_dr(struct kvm_vcpu *vcpu)
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) {
- kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
+ kvm_gpr_write(vcpu, reg, kvm_get_dr(vcpu, dr));
err = 0;
} else {
- err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
+ err = kvm_set_dr(vcpu, dr, kvm_gpr_read(vcpu, reg));
}
out:
@@ -5941,7 +5941,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
- type = kvm_register_read(vcpu, gpr_index);
+ type = kvm_gpr_read(vcpu, gpr_index);
/* According to the Intel instruction reference, the memory operand
* is read even if it isn't needed (e.g., for type==all)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b4b5d2d09634..603057ea7421 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2084,8 +2084,8 @@ static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
static int complete_fast_rdmsr_imm(struct kvm_vcpu *vcpu)
{
if (!vcpu->run->msr.error)
- kvm_register_write(vcpu, vcpu->arch.cui_rdmsr_imm_reg,
- vcpu->run->msr.data);
+ kvm_gpr_write(vcpu, vcpu->arch.cui_rdmsr_imm_reg,
+ vcpu->run->msr.data);
return complete_fast_msr_access(vcpu);
}
@@ -2139,7 +2139,7 @@ static int __kvm_emulate_rdmsr(struct kvm_vcpu *vcpu, u32 msr, int reg,
kvm_rax_write(vcpu, data & -1u);
kvm_rdx_write(vcpu, (data >> 32) & -1u);
} else {
- kvm_register_write(vcpu, reg, data);
+ kvm_gpr_write(vcpu, reg, data);
}
} else {
/* MSR read failed? See if we should ask user space */
@@ -2197,7 +2197,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr);
int kvm_emulate_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
{
- return __kvm_emulate_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg));
+ return __kvm_emulate_wrmsr(vcpu, msr, kvm_gpr_read(vcpu, reg));
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr_imm);
@@ -2301,7 +2301,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr);
fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
{
- return __handle_fastpath_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg));
+ return __handle_fastpath_wrmsr(vcpu, msr, kvm_gpr_read(vcpu, reg));
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr_imm);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index f3dc77f006f9..4edadd64d3d5 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -400,15 +400,14 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return false;
}
-static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
+static inline unsigned long kvm_gpr_read(struct kvm_vcpu *vcpu, int reg)
{
unsigned long val = kvm_register_read_raw(vcpu, reg);
return is_64_bit_mode(vcpu) ? val : (u32)val;
}
-static inline void kvm_register_write(struct kvm_vcpu *vcpu,
- int reg, unsigned long val)
+static inline void kvm_gpr_write(struct kvm_vcpu *vcpu, int reg, unsigned long val)
{
if (!is_64_bit_mode(vcpu))
val = (u32)val;
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index d6b2a665b499..c9700dc88bb1 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -1679,7 +1679,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
bool handled = false;
u8 cpl;
- input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
+ input = (u64)kvm_gpr_read(vcpu, VCPU_REGS_RAX);
/* Hyper-V hypercalls get bit 31 set in EAX */
if ((input & 0x80000000) &&
--
2.51.0
Powered by blists - more mailing lists