lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190308210209.usq2rpedccz25va5@treble>
Date:   Fri, 8 Mar 2019 15:02:09 -0600
From:   Josh Poimboeuf <jpoimboe@...hat.com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     torvalds@...ux-foundation.org, tglx@...utronix.de, hpa@...or.com,
        julien.thierry@....com, will.deacon@....com, luto@...capital.net,
        mingo@...nel.org, catalin.marinas@....com, james.morse@....com,
        valentin.schneider@....com, brgerst@...il.com, luto@...nel.org,
        bp@...en8.de, dvlasenk@...hat.com, linux-kernel@...r.kernel.org,
        dvyukov@...gle.com, rostedt@...dmis.org
Subject: Re: [PATCH 18/20] objtool: Add UACCESS validation

On Thu, Mar 07, 2019 at 12:45:29PM +0100, Peter Zijlstra wrote:
> --- a/tools/objtool/arch/x86/decode.c
> +++ b/tools/objtool/arch/x86/decode.c
> @@ -369,7 +369,19 @@ int arch_decode_instruction(struct elf *
>  
>  	case 0x0f:
>  
> -		if (op2 >= 0x80 && op2 <= 0x8f) {
> +		if (op2 == 0x01) {
> +
> +			if (modrm == 0xca) {
> +
> +				*type = INSN_CLAC;
> +
> +			} else if (modrm == 0xcb) {
> +
> +				*type = INSN_STAC;
> +
> +			}

Style nit, no need for all those brackets and newlines.

> --- a/tools/objtool/check.c
> +++ b/tools/objtool/check.c
> @@ -442,6 +442,81 @@ static void add_ignores(struct objtool_f
>  	}
>  }
>  
> +static const char *uaccess_safe_builtin[] = {
> +	/* KASAN */

A short comment would be good here, something describing why a function
might be added to the list.

> +	"memset_orig", /* XXX why not memset_erms */
> +	"__memset",
> +	"kasan_poison_shadow",
> +	"kasan_unpoison_shadow",
> +	"__asan_poison_stack_memory",
> +	"__asan_unpoison_stack_memory",
> +	"kasan_report",
> +	"check_memory_region",
> +	/* KASAN out-of-line */
> +	"__asan_loadN_noabort",
> +	"__asan_load1_noabort",
> +	"__asan_load2_noabort",
> +	"__asan_load4_noabort",
> +	"__asan_load8_noabort",
> +	"__asan_load16_noabort",
> +	"__asan_storeN_noabort",
> +	"__asan_store1_noabort",
> +	"__asan_store2_noabort",
> +	"__asan_store4_noabort",
> +	"__asan_store8_noabort",
> +	"__asan_store16_noabort",
> +	/* KASAN in-line */
> +	"__asan_report_load_n_noabort",
> +	"__asan_report_load1_noabort",
> +	"__asan_report_load2_noabort",
> +	"__asan_report_load4_noabort",
> +	"__asan_report_load8_noabort",
> +	"__asan_report_load16_noabort",
> +	"__asan_report_store_n_noabort",
> +	"__asan_report_store1_noabort",
> +	"__asan_report_store2_noabort",
> +	"__asan_report_store4_noabort",
> +	"__asan_report_store8_noabort",
> +	"__asan_report_store16_noabort",
> +	/* KCOV */
> +	"write_comp_data",
> +	"__sanitizer_cov_trace_pc",
> +	"__sanitizer_cov_trace_const_cmp1",
> +	"__sanitizer_cov_trace_const_cmp2",
> +	"__sanitizer_cov_trace_const_cmp4",
> +	"__sanitizer_cov_trace_const_cmp8",
> +	"__sanitizer_cov_trace_cmp1",
> +	"__sanitizer_cov_trace_cmp2",
> +	"__sanitizer_cov_trace_cmp4",
> +	"__sanitizer_cov_trace_cmp8",
> +	/* UBSAN */
> +	"ubsan_type_mismatch_common",
> +	"__ubsan_handle_type_mismatch",
> +	"__ubsan_handle_type_mismatch_v1",
> +	/* misc */
> +	"csum_partial_copy_generic",
> +	"__memcpy_mcsafe",
> +	"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
> +	NULL
> +};
> +
> +static void add_uaccess_safe(struct objtool_file *file)
> +{
> +	struct symbol *func;
> +	const char **name;
> +
> +	if (!uaccess)
> +		return;
> +
> +	for (name = uaccess_safe_builtin; *name; name++) {
> +		func = find_symbol_by_name(file->elf, *name);
> +		if (!func)
> +			continue;

This won't work if the function name changes due to IPA optimizations.
I assume these are all global functions so maybe it's fine?

> @@ -1914,6 +2008,16 @@ static int validate_branch(struct objtoo
>  		switch (insn->type) {
>  
>  		case INSN_RETURN:
> +			if (state.uaccess && !func_uaccess_safe(func)) {
> +				WARN_FUNC("return with UACCESS enabled", sec, insn->offset);
> +				return 1;
> +			}
> +
> +			if (!state.uaccess && func_uaccess_safe(func)) {
> +				WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", sec, insn->offset);
> +				return 1;
> +			}
> +
>  			if (func && has_modified_stack_frame(&state)) {
>  				WARN_FUNC("return with modified stack frame",
>  					  sec, insn->offset);
> @@ -1929,17 +2033,32 @@ static int validate_branch(struct objtoo
>  			return 0;
>  
>  		case INSN_CALL:
> -			if (is_fentry_call(insn))
> -				break;
> +		case INSN_CALL_DYNAMIC:
> +do_call:
> +			if (state.uaccess && !func_uaccess_safe(insn->call_dest)) {
> +				WARN_FUNC("call to %s() with UACCESS enabled",
> +					  sec, insn->offset, insn_dest_name(insn));
> +				return 1;
> +			}
>  
> -			ret = dead_end_function(file, insn->call_dest);
> -			if (ret == 1)
> +			if (insn->type == INSN_JUMP_UNCONDITIONAL ||
> +			    insn->type == INSN_JUMP_DYNAMIC)
>  				return 0;
> -			if (ret == -1)
> -				return 1;
>  
> -			/* fallthrough */
> -		case INSN_CALL_DYNAMIC:
> +			if (insn->type == INSN_JUMP_CONDITIONAL)
> +				break;
> +
> +			if (insn->type == INSN_CALL) {
> +				if (is_fentry_call(insn))
> +					break;
> +
> +				ret = dead_end_function(file, insn->call_dest);
> +				if (ret == 1)
> +					return 0;
> +				if (ret == -1)
> +					return 1;
> +			}
> +
>  			if (!no_fp && func && !has_valid_stack_frame(&state)) {
>  				WARN_FUNC("call without frame pointer save/setup",
>  					  sec, insn->offset);
> @@ -1956,6 +2075,8 @@ static int validate_branch(struct objtoo
>  							sec, insn->offset);
>  					return 1;
>  				}
> +				goto do_call;
> +

These gotos make my head spin.  Again I would much prefer a small amount
of code duplication over this.

> +++ b/tools/objtool/special.c
> @@ -42,6 +42,7 @@
>  #define ALT_NEW_LEN_OFFSET	11
>  
>  #define X86_FEATURE_POPCNT (4*32+23)
> +#define X86_FEATURE_SMAP   (9*32+20)
>  
>  struct special_entry {
>  	const char *sec;
> @@ -107,8 +108,15 @@ static int get_alt_entry(struct elf *elf
>  		 * It has been requested that we don't validate the !POPCNT
>  		 * feature path which is a "very very small percentage of
>  		 * machines".
> +		 *
> +		 * Also, unconditionally enable SMAP; this avoids seeing paths
> +		 * that pass through the STAC alternative and through the CLAC
> +		 * NOPs.

Why is this a problem?

> +		 *
> +		 * XXX: We could do this for all binary NOP/single-INSN
> +		 * alternatives.

Same question here.

>  		 */
> -		if (feature == X86_FEATURE_POPCNT)
> +		if (feature == X86_FEATURE_POPCNT || feature == X86_FEATURE_SMAP)
>  			alt->skip_orig = true;
>  	}
>  
> 
> 

-- 
Josh

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ