[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240226143630.33643-42-jiangshanlai@gmail.com>
Date: Mon, 26 Feb 2024 22:35:58 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Lai Jiangshan <jiangshan.ljs@...group.com>,
Hou Wenlong <houwenlong.hwl@...group.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Peter Zijlstra <peterz@...radead.org>,
Sean Christopherson <seanjc@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...en8.de>,
Ingo Molnar <mingo@...hat.com>,
kvm@...r.kernel.org,
Paolo Bonzini <pbonzini@...hat.com>,
x86@...nel.org,
Kees Cook <keescook@...omium.org>,
Juergen Gross <jgross@...e.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [RFC PATCH 41/73] KVM: x86/PVM: Allow to load guest TLS in host GDT
From: Lai Jiangshan <jiangshan.ljs@...group.com>
The 32-bit process needs to use TLS in libc, so a hypercall is
introduced to load the guest TLS into the host GDT. The checking of the
guest TLS is the same as tls_desc_okay() in the arch/x86/kernel/tls.c
file.
Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@...group.com>
---
arch/x86/kvm/pvm/pvm.c | 81 ++++++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/pvm/pvm.h | 1 +
2 files changed, 82 insertions(+)
diff --git a/arch/x86/kvm/pvm/pvm.c b/arch/x86/kvm/pvm/pvm.c
index ee55e99fb204..e68052f33186 100644
--- a/arch/x86/kvm/pvm/pvm.c
+++ b/arch/x86/kvm/pvm/pvm.c
@@ -281,6 +281,26 @@ static void segments_save_guest_and_switch_to_host(struct vcpu_pvm *pvm)
wrmsrl(MSR_FS_BASE, current->thread.fsbase);
}
+/*
+ * Load guest TLS entries into the GDT.
+ */
+static inline void host_gdt_set_tls(struct vcpu_pvm *pvm)
+{
+ struct desc_struct *gdt = get_current_gdt_rw();
+ unsigned int i;
+
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = pvm->tls_array[i];
+}
+
+/*
+ * Load current task's TLS into the GDT.
+ */
+static inline void host_gdt_restore_tls(void)
+{
+ native_load_tls(¤t->thread, smp_processor_id());
+}
+
static void pvm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{
struct vcpu_pvm *pvm = to_pvm(vcpu);
@@ -304,6 +324,8 @@ static void pvm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
native_tss_invalidate_io_bitmap();
#endif
+ host_gdt_set_tls(pvm);
+
#ifdef CONFIG_MODIFY_LDT_SYSCALL
/* PVM doesn't support LDT. */
if (unlikely(current->mm->context.ldt))
@@ -334,6 +356,8 @@ static void pvm_prepare_switch_to_host(struct vcpu_pvm *pvm)
kvm_load_ldt(GDT_ENTRY_LDT*8);
#endif
+ host_gdt_restore_tls();
+
segments_save_guest_and_switch_to_host(pvm);
pvm->loaded_cpu_state = 0;
}
@@ -1629,6 +1653,60 @@ static int handle_hc_wrmsr(struct kvm_vcpu *vcpu, u32 index, u64 value)
return 1;
}
+// Check if the tls desc is allowed on the host GDT.
+// The same logic as tls_desc_okay() in arch/x86/kernel/tls.c.
+static bool tls_desc_okay(struct desc_struct *desc)
+{
+ // Only allow present segments.
+ if (!desc->p)
+ return false;
+
+ // Only allow data segments.
+ if (desc->type & (1 << 3))
+ return false;
+
+ // Only allow 32-bit data segments.
+ if (!desc->d)
+ return false;
+
+ return true;
+}
+
+/*
+ * Hypercall: PVM_HC_LOAD_TLS
+ * Load guest TLS desc into host GDT.
+ */
+static int handle_hc_load_tls(struct kvm_vcpu *vcpu, unsigned long tls_desc_0,
+ unsigned long tls_desc_1, unsigned long tls_desc_2)
+{
+ struct vcpu_pvm *pvm = to_pvm(vcpu);
+ unsigned long *tls_array = (unsigned long *)&pvm->tls_array[0];
+ int i;
+
+ tls_array[0] = tls_desc_0;
+ tls_array[1] = tls_desc_1;
+ tls_array[2] = tls_desc_2;
+
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) {
+ if (!tls_desc_okay(&pvm->tls_array[i])) {
+ pvm->tls_array[i] = (struct desc_struct){0};
+ continue;
+ }
+ /* Standarding TLS descs, same as fill_ldt(). */
+ pvm->tls_array[i].type |= 1;
+ pvm->tls_array[i].s = 1;
+ pvm->tls_array[i].dpl = 0x3;
+ pvm->tls_array[i].l = 0;
+ }
+
+ preempt_disable();
+ if (pvm->loaded_cpu_state)
+ host_gdt_set_tls(pvm);
+ preempt_enable();
+
+ return 1;
+}
+
static int handle_kvm_hypercall(struct kvm_vcpu *vcpu)
{
int r;
@@ -1679,6 +1757,8 @@ static int handle_exit_syscall(struct kvm_vcpu *vcpu)
return handle_hc_rdmsr(vcpu, a0);
case PVM_HC_WRMSR:
return handle_hc_wrmsr(vcpu, a0, a1);
+ case PVM_HC_LOAD_TLS:
+ return handle_hc_load_tls(vcpu, a0, a1, a2);
default:
return handle_kvm_hypercall(vcpu);
}
@@ -2296,6 +2376,7 @@ static void pvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
pvm->hw_ss = __USER_DS;
pvm->int_shadow = 0;
pvm->nmi_mask = false;
+ memset(&pvm->tls_array[0], 0, sizeof(pvm->tls_array));
pvm->msr_vcpu_struct = 0;
pvm->msr_supervisor_rsp = 0;
diff --git a/arch/x86/kvm/pvm/pvm.h b/arch/x86/kvm/pvm/pvm.h
index 31060831e009..f28ab0b48f40 100644
--- a/arch/x86/kvm/pvm/pvm.h
+++ b/arch/x86/kvm/pvm/pvm.h
@@ -98,6 +98,7 @@ struct vcpu_pvm {
struct kvm_segment segments[NR_VCPU_SREG];
struct desc_ptr idt_ptr;
struct desc_ptr gdt_ptr;
+ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
};
struct kvm_pvm {
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists