lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <725ca48f-8194-658e-0296-65d4368803b5@intel.com>
Date:   Fri, 3 Apr 2020 00:20:08 +0800
From:   Xiaoyao Li <xiaoyao.li@...el.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Thomas Gleixner <tglx@...utronix.de>
Cc:     LKML <linux-kernel@...r.kernel.org>, x86@...nel.org,
        "Kenneth R. Crudup" <kenny@...ix.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Jessica Yu <jeyu@...nel.org>,
        Fenghua Yu <fenghua.yu@...el.com>,
        Nadav Amit <namit@...are.com>,
        Thomas Hellstrom <thellstrom@...are.com>,
        Sean Christopherson <sean.j.christopherson@...el.com>,
        Tony Luck <tony.luck@...el.com>,
        Steven Rostedt <rostedt@...dmis.org>
Subject: Re: [patch v2 1/2] x86,module: Detect VMX modules and disable
 Split-Lock-Detect

On 4/2/2020 11:23 PM, Peter Zijlstra wrote:
> 
> I picked VMXOFF (which also appears in vmmon.ko) instead of VMXON
> because that latter takes an argument is therefore more difficult to
> decode.
> 
> ---
> Subject: x86,module: Detect VMX modules and disable Split-Lock-Detect
> From: Peter Zijlstra <peterz@...radead.org>
> Date: Thu, 02 Apr 2020 14:32:59 +0200
> 
> It turns out that with Split-Lock-Detect enabled (default) any VMX
> hypervisor needs at least a little modification in order to not blindly
> inject the #AC into the guest without the guest being ready for it.
> 
> Since there is no telling which module implements a hypervisor, scan the
> module text and look for the VMLAUNCH/VMXOFF instructions. If found, the
> module is assumed to be a hypervisor of some sort and SLD is disabled.
> 
> Hypervisors, which have been modified and are known to work correctly,
> can add:
> 
>    MODULE_INFO(sld_safe, "Y");
> 
> to explicitly tell the module loader they're good.
> 
> NOTE: it is unfortunate that struct load_info is not available to the
>        arch module code, this means CONFIG_CPU_SUP_INTEL gunk is needed
>        in generic code.
> 
> NOTE: while we can 'trivially' fix KVM, we're still stuck with stuff
>        like VMware and VirtualBox doing their own thing.
> 
> Reported-by: "Kenneth R. Crudup" <kenny@...ix.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> ---
>   arch/x86/include/asm/cpu.h  |    2 ++
>   arch/x86/kernel/cpu/intel.c |   41 ++++++++++++++++++++++++++++++++++++++++-
>   arch/x86/kernel/module.c    |    6 ++++++
>   include/linux/module.h      |    4 ++++
>   kernel/module.c             |    5 +++++
>   5 files changed, 57 insertions(+), 1 deletion(-)
> 
> --- a/arch/x86/include/asm/cpu.h
> +++ b/arch/x86/include/asm/cpu.h
> @@ -44,6 +44,7 @@ unsigned int x86_stepping(unsigned int s
>   extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
>   extern void switch_to_sld(unsigned long tifn);
>   extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
> +extern void split_lock_validate_module_text(struct module *me, void *text, void *text_end);
>   #else
>   static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
>   static inline void switch_to_sld(unsigned long tifn) {}
> @@ -51,5 +52,6 @@ static inline bool handle_user_split_loc
>   {
>   	return false;
>   }
> +static inline void split_lock_validate_module_text(struct module *me, void *text, void *text_end) {}
>   #endif
>   #endif /* _ASM_X86_CPU_H */
> --- a/arch/x86/kernel/cpu/intel.c
> +++ b/arch/x86/kernel/cpu/intel.c
> @@ -9,6 +9,7 @@
>   #include <linux/thread_info.h>
>   #include <linux/init.h>
>   #include <linux/uaccess.h>
> +#include <linux/module.h>
>   
>   #include <asm/cpufeature.h>
>   #include <asm/pgtable.h>
> @@ -21,6 +22,7 @@
>   #include <asm/elf.h>
>   #include <asm/cpu_device_id.h>
>   #include <asm/cmdline.h>
> +#include <asm/insn.h>
>   
>   #ifdef CONFIG_X86_64
>   #include <linux/topology.h>
> @@ -1055,12 +1057,49 @@ static void sld_update_msr(bool on)
>   {
>   	u64 test_ctrl_val = msr_test_ctrl_cache;
>   
> -	if (on)
> +	if (on && (sld_state != sld_off))
>   		test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
>   
>   	wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
>   }
>   
> +static void sld_remote_kill(void *arg)
> +{
> +	sld_update_msr(false);
> +}
> +
> +void split_lock_validate_module_text(struct module *me, void *text, void *text_end)
> +{
> +	u8 vmxoff[] = { 0x0f, 0x01, 0xc4 };
> +	u8 vmlaunch[] = { 0x0f, 0x01, 0xc2 };
> +	struct insn insn;
> +
> +	if (sld_state == sld_off)
> +		return;
> +
> +	while (text < text_end) {
> +		kernel_insn_init(&insn, text, text_end - text);
> +		insn_get_length(&insn);
> +
> +		if (WARN_ON_ONCE(!insn_complete(&insn)))
> +			break;
> +
> +		if (insn.length == 3 &&
> +		    (!memcmp(text, vmlaunch, sizeof(vmlaunch)) ||
> +		     !memcmp(text, vmxoff, sizeof(vmxoff))))
> +				goto bad_module;
> +
> +		text += insn.length;
> +	}
> +
> +	return;
> +
> +bad_module:
> +	pr_warn("disabled due to VMX in module: %s\n", me->name);
> +	sld_state = sld_off;

shouldn't we remove the __ro_after_init of sld_state?

And, shouldn't we clear X86_FEATURE_SPLIT_LOCK_DETECT flag?

> +	on_each_cpu(sld_remote_kill, NULL, 1);
> +}
> +
>   static void split_lock_init(void)
>   {
>   	split_lock_verify_msr(sld_state != sld_off);
> --- a/arch/x86/kernel/module.c
> +++ b/arch/x86/kernel/module.c
> @@ -24,6 +24,7 @@
>   #include <asm/pgtable.h>
>   #include <asm/setup.h>
>   #include <asm/unwind.h>
> +#include <asm/cpu.h>
>   
>   #if 0
>   #define DEBUGP(fmt, ...)				\
> @@ -253,6 +254,11 @@ int module_finalize(const Elf_Ehdr *hdr,
>   					    tseg, tseg + text->sh_size);
>   	}
>   
> +	if (text && !me->sld_safe) {
> +		void *tseg = (void *)text->sh_addr;
> +		split_lock_validate_module_text(me, tseg, tseg + text->sh_size);
> +	}
> +
>   	if (para) {
>   		void *pseg = (void *)para->sh_addr;
>   		apply_paravirt(pseg, pseg + para->sh_size);
> --- a/include/linux/module.h
> +++ b/include/linux/module.h
> @@ -407,6 +407,10 @@ struct module {
>   	bool sig_ok;
>   #endif
>   
> +#ifdef CONFIG_CPU_SUP_INTEL
> +	bool sld_safe;
> +#endif
> +
>   	bool async_probe_requested;
>   
>   	/* symbols that will be GPL-only in the near future. */
> --- a/kernel/module.c
> +++ b/kernel/module.c
> @@ -3096,6 +3096,11 @@ static int check_modinfo(struct module *
>   			"is unknown, you have been warned.\n", mod->name);
>   	}
>   
> +#ifdef CONFIG_CPU_SUP_INTEL
> +	if (get_modinfo(info, "sld_safe"))
> +		mod->sld_safe = true;
> +#endif
> +
>   	err = check_modinfo_livepatch(mod, info);
>   	if (err)
>   		return err;
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ