lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Thu, 28 Sep 2023 12:55:53 +0200
From: Björn Töpel <bjorn@...nel.org>
To: Pu Lehui <pulehui@...weicloud.com>, bpf@...r.kernel.org,
 linux-riscv@...ts.infradead.org, netdev@...r.kernel.org
Cc: Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann
 <daniel@...earbox.net>, Andrii Nakryiko <andrii@...nel.org>, Martin KaFai
 Lau <martin.lau@...ux.dev>, Song Liu <song@...nel.org>, Yonghong Song
 <yhs@...com>, John Fastabend <john.fastabend@...il.com>, KP Singh
 <kpsingh@...nel.org>, Stanislav Fomichev <sdf@...gle.com>, Hao Luo
 <haoluo@...gle.com>, Jiri Olsa <jolsa@...nel.org>, Palmer Dabbelt
 <palmer@...belt.com>, Conor Dooley <conor@...nel.org>, Luke Nelson
 <luke.r.nels@...il.com>, Pu Lehui <pulehui@...wei.com>, Pu Lehui
 <pulehui@...weicloud.com>
Subject: Re: [PATCH bpf-next v2 3/6] riscv, bpf: Simplify sext and zext
 logics in branch instructions

Pu Lehui <pulehui@...weicloud.com> writes:

> From: Pu Lehui <pulehui@...wei.com>
>
> There are many extension helpers in the current branch instructions, and
> the implementation is a bit complicated. We simplify this logic through
> two simple extension helpers with alternate register.
>
> Signed-off-by: Pu Lehui <pulehui@...wei.com>
> ---
>  arch/riscv/net/bpf_jit_comp64.c | 82 +++++++++++++--------------------
>  1 file changed, 31 insertions(+), 51 deletions(-)
>
> diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
> index 4a649e195..0c6ffe11a 100644
> --- a/arch/riscv/net/bpf_jit_comp64.c
> +++ b/arch/riscv/net/bpf_jit_comp64.c
> @@ -141,6 +141,19 @@ static bool in_auipc_jalr_range(s64 val)
>  		val < ((1L << 31) - (1L << 11));
>  }
>  
> +/* Modify rd pointer to alternate reg to avoid corrupting original reg */
> +static void emit_sextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
> +{
> +	emit_sextw(ra, *rd, ctx);
> +	*rd = ra;
> +}
> +
> +static void emit_zextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
> +{
> +	emit_zextw(ra, *rd, ctx);
> +	*rd = ra;
> +}
> +
>  /* Emit fixed-length instructions for address */
>  static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
>  {
> @@ -395,38 +408,6 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
>  		*rs = bpf_to_rv_reg(insn->src_reg, ctx);
>  }
>  
> -static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
> -{
> -	emit_mv(RV_REG_T2, *rd, ctx);
> -	emit_zextw(RV_REG_T2, RV_REG_T2, ctx);
> -	emit_mv(RV_REG_T1, *rs, ctx);
> -	emit_zextw(RV_REG_T1, RV_REG_T1, ctx);
> -	*rd = RV_REG_T2;
> -	*rs = RV_REG_T1;
> -}
> -
> -static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
> -{
> -	emit_sextw(RV_REG_T2, *rd, ctx);
> -	emit_sextw(RV_REG_T1, *rs, ctx);
> -	*rd = RV_REG_T2;
> -	*rs = RV_REG_T1;
> -}
> -
> -static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
> -{
> -	emit_mv(RV_REG_T2, *rd, ctx);
> -	emit_zextw(RV_REG_T2, RV_REG_T2, ctx);
> -	emit_zextw(RV_REG_T1, RV_REG_T2, ctx);
> -	*rd = RV_REG_T2;
> -}
> -
> -static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
> -{
> -	emit_sextw(RV_REG_T2, *rd, ctx);
> -	*rd = RV_REG_T2;
> -}
> -
>  static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
>  			      struct rv_jit_context *ctx)
>  {
> @@ -1372,22 +1353,22 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
>  		rvoff = rv_offset(i, off, ctx);
>  		if (!is64) {
>  			s = ctx->ninsns;
> -			if (is_signed_bpf_cond(BPF_OP(code)))
> -				emit_sext_32_rd_rs(&rd, &rs, ctx);
> -			else
> -				emit_zext_32_rd_rs(&rd, &rs, ctx);
> +			if (is_signed_bpf_cond(BPF_OP(code))) {
> +				emit_sextw_alt(&rs, RV_REG_T1, ctx);
> +				emit_sextw_alt(&rd, RV_REG_T2, ctx);
> +			} else {
> +				emit_zextw_alt(&rs, RV_REG_T1, ctx);
> +				emit_zextw_alt(&rd, RV_REG_T2, ctx);
> +			}
>  			e = ctx->ninsns;
> -

Please avoid changes like this.


>  			/* Adjust for extra insns */
>  			rvoff -= ninsns_rvoff(e - s);
>  		}
> -

Dito.

>  		if (BPF_OP(code) == BPF_JSET) {
>  			/* Adjust for and */
>  			rvoff -= 4;
>  			emit_and(RV_REG_T1, rd, rs, ctx);
> -			emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
> -				    ctx);
> +			emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
>  		} else {
>  			emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
>  		}
> @@ -1416,21 +1397,20 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
>  	case BPF_JMP32 | BPF_JSLE | BPF_K:
>  		rvoff = rv_offset(i, off, ctx);
>  		s = ctx->ninsns;
> -		if (imm) {
> +		if (imm)
>  			emit_imm(RV_REG_T1, imm, ctx);
> -			rs = RV_REG_T1;
> -		} else {
> -			/* If imm is 0, simply use zero register. */
> -			rs = RV_REG_ZERO;
> -		}
> +		rs = imm ? RV_REG_T1 : RV_REG_ZERO;
>  		if (!is64) {
> -			if (is_signed_bpf_cond(BPF_OP(code)))
> -				emit_sext_32_rd(&rd, ctx);
> -			else
> -				emit_zext_32_rd_t1(&rd, ctx);
> +			if (is_signed_bpf_cond(BPF_OP(code))) {
> +				emit_sextw_alt(&rd, RV_REG_T2, ctx);
> +				/* rs has been sign extended */
> +			} else {
> +				emit_zextw_alt(&rd, RV_REG_T2, ctx);
> +				if (imm)
> +					emit_zextw(rs, rs, ctx);
> +			}
>  		}
>  		e = ctx->ninsns;
> -

Dito.

Other than the formatting changes, it looks good!


Björn

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ