lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ef02ff0d-10b0-47db-93dc-3cb9f6db140e@rivosinc.com>
Date:   Mon, 9 Oct 2023 15:02:52 +0200
From:   Clément Léger <cleger@...osinc.com>
To:     Björn Töpel <bjorn@...nel.org>,
        Paul Walmsley <paul.walmsley@...ive.com>,
        Palmer Dabbelt <palmer@...belt.com>,
        Albert Ou <aou@...s.berkeley.edu>
Cc:     Atish Patra <atishp@...osinc.com>,
        Andrew Jones <ajones@...tanamicro.com>,
        Evan Green <evan@...osinc.com>,
        Björn Topel <bjorn@...osinc.com>,
        linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
        Ron Minnich <rminnich@...il.com>,
        Daniel Maslowski <cyrevolt@...glemail.com>,
        Conor Dooley <conor@...nel.org>
Subject: Re: [PATCH v2 2/8] riscv: add support for misaligned trap handling in
 S-mode



On 04/10/2023 19:00, Björn Töpel wrote:
> Clément Léger <cleger@...osinc.com> writes:
> 
>> Misalignment trap handling is only supported for M-mode and uses direct
>> accesses to user memory. In S-mode, when handling usermode fault, this
>> requires to use the get_user()/put_user() accessors. Implement
>> load_u8(), store_u8() and get_insn() using these accessors for
>> userspace and direct text access for kernel.
>>
>> Signed-off-by: Clément Léger <cleger@...osinc.com>
>> ---
>>  arch/riscv/Kconfig                    |   8 ++
>>  arch/riscv/include/asm/entry-common.h |  14 +++
>>  arch/riscv/kernel/Makefile            |   2 +-
>>  arch/riscv/kernel/traps.c             |   9 --
>>  arch/riscv/kernel/traps_misaligned.c  | 119 +++++++++++++++++++++++---
>>  5 files changed, 129 insertions(+), 23 deletions(-)
>>
>> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
>> index d607ab0f7c6d..6e167358a897 100644
>> --- a/arch/riscv/Kconfig
>> +++ b/arch/riscv/Kconfig
>> @@ -636,6 +636,14 @@ config THREAD_SIZE_ORDER
>>  	  Specify the Pages of thread stack size (from 4KB to 64KB), which also
>>  	  affects irq stack size, which is equal to thread stack size.
>>  
>> +config RISCV_MISALIGNED
>> +	bool "Support misaligned load/store traps for kernel and userspace"
>> +	default y
>> +	help
>> +	  Say Y here if you want the kernel to embed support for misaligned
>> +	  load/store for both kernel and userspace. When disable, misaligned
>> +	  accesses will generate SIGBUS in userspace and panic in kernel.
>> +
>>  endmenu # "Platform type"
>>  
>>  menu "Kernel features"
>> diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
>> index 6e4dee49d84b..7ab5e34318c8 100644
>> --- a/arch/riscv/include/asm/entry-common.h
>> +++ b/arch/riscv/include/asm/entry-common.h
>> @@ -8,4 +8,18 @@
>>  void handle_page_fault(struct pt_regs *regs);
>>  void handle_break(struct pt_regs *regs);
>>  
>> +#ifdef CONFIG_RISCV_MISALIGNED
>> +int handle_misaligned_load(struct pt_regs *regs);
>> +int handle_misaligned_store(struct pt_regs *regs);
>> +#else
>> +static inline int handle_misaligned_load(struct pt_regs *regs)
>> +{
>> +	return -1;
>> +}
>> +static inline int handle_misaligned_store(struct pt_regs *regs)
>> +{
>> +	return -1;
>> +}
>> +#endif
>> +
>>  #endif /* _ASM_RISCV_ENTRY_COMMON_H */
>> diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
>> index 95cf25d48405..0d874fb24b51 100644
>> --- a/arch/riscv/kernel/Makefile
>> +++ b/arch/riscv/kernel/Makefile
>> @@ -59,7 +59,7 @@ obj-y	+= patch.o
>>  obj-y	+= probes/
>>  obj-$(CONFIG_MMU) += vdso.o vdso/
>>  
>> -obj-$(CONFIG_RISCV_M_MODE)	+= traps_misaligned.o
>> +obj-$(CONFIG_RISCV_MISALIGNED)	+= traps_misaligned.o
>>  obj-$(CONFIG_FPU)		+= fpu.o
>>  obj-$(CONFIG_RISCV_ISA_V)	+= vector.o
>>  obj-$(CONFIG_SMP)		+= smpboot.o
>> diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
>> index 19807c4d3805..d69779e4b967 100644
>> --- a/arch/riscv/kernel/traps.c
>> +++ b/arch/riscv/kernel/traps.c
>> @@ -179,14 +179,6 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
>>  
>>  DO_ERROR_INFO(do_trap_load_fault,
>>  	SIGSEGV, SEGV_ACCERR, "load access fault");
>> -#ifndef CONFIG_RISCV_M_MODE
>> -DO_ERROR_INFO(do_trap_load_misaligned,
>> -	SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
>> -DO_ERROR_INFO(do_trap_store_misaligned,
>> -	SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
>> -#else
>> -int handle_misaligned_load(struct pt_regs *regs);
>> -int handle_misaligned_store(struct pt_regs *regs);
>>  
>>  asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
>>  {
>> @@ -229,7 +221,6 @@ asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs
>>  		irqentry_nmi_exit(regs, state);
>>  	}
>>  }
>> -#endif
>>  DO_ERROR_INFO(do_trap_store_fault,
>>  	SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
>>  DO_ERROR_INFO(do_trap_ecall_s,
>> diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
>> index e7bfb33089c1..9daed7d756ae 100644
>> --- a/arch/riscv/kernel/traps_misaligned.c
>> +++ b/arch/riscv/kernel/traps_misaligned.c
>> @@ -12,6 +12,7 @@
>>  #include <asm/processor.h>
>>  #include <asm/ptrace.h>
>>  #include <asm/csr.h>
>> +#include <asm/entry-common.h>
>>  
>>  #define INSN_MATCH_LB			0x3
>>  #define INSN_MASK_LB			0x707f
>> @@ -151,21 +152,25 @@
>>  #define PRECISION_S 0
>>  #define PRECISION_D 1
>>  
>> -static inline u8 load_u8(const u8 *addr)
>> +#ifdef CONFIG_RISCV_M_MODE
>> +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
>>  {
>>  	u8 val;
>>  
>>  	asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
>> +	*r_val = val;
>>  
>> -	return val;
>> +	return 0;
>>  }
>>  
>> -static inline void store_u8(u8 *addr, u8 val)
>> +static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
>>  {
>>  	asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
>> +
>> +	return 0;
>>  }
>>  
>> -static inline ulong get_insn(ulong mepc)
>> +static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
>>  {
>>  	register ulong __mepc asm ("a2") = mepc;
>>  	ulong val, rvc_mask = 3, tmp;
>> @@ -194,9 +199,87 @@ static inline ulong get_insn(ulong mepc)
>>  	: [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
>>  	  [xlen_minus_16] "i" (XLEN_MINUS_16));
>>  
>> -	return val;
>> +	*r_insn = val;
>> +
>> +	return 0;
>> +}
>> +#else
>> +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
>> +{
>> +	if (user_mode(regs)) {
>> +		return __get_user(*r_val, addr);
>> +	} else {
>> +		*r_val = *addr;
>> +		return 0;
>> +	}
> 
> One nit (...well two) ;-)
> 
> If you're respinning I'd get rid of the "inlines",and personally I
> think early exit is easier to read. Applies to the whole patch.

Noted, I'll fix that (checkpatch suggested it so that makes sense).

Clément

> 
>   | {
>   | 	if (user_mode(regs))
>   | 		return __get_user(*r_val, addr);
>   | 
>   |         *r_val = *addr;
>   | 	return 0;
>   | }
> 
> 
> Regardless if you change or not,
> 
> Reviewed-by: Björn Töpel <bjorn@...osinc.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ