lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <E1ieH2l-0004io-VM@rmk-PC.armlinux.org.uk> Date: Mon, 09 Dec 2019 11:17:35 +0000 From: Russell King <rmk+kernel@...linux.org.uk> To: Daniel Borkmann <daniel@...earbox.net> Cc: Shubham Bansal <illusionist.neo@...il.com>, Alexei Starovoitov <ast@...nel.org>, Martin KaFai Lau <kafai@...com>, Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>, Andrii Nakryiko <andriin@...com>, netdev@...r.kernel.org, bpf@...r.kernel.org, linux-arm-kernel@...ts.infradead.org Subject: [PATCH] ARM: net: bpf: improve endian conversion Make the endian conversion function easier to read by moving it out of the big switch, and avoid doing anything if we're requested to convert from a 64-bit LE value (we're LE anyway here.) Signed-off-by: Russell King <rmk+kernel@...linux.org.uk> --- arch/arm/net/bpf_jit_32.c | 91 +++++++++++++++++++++------------------ 1 file changed, 50 insertions(+), 41 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index cc29869d12a3..646ab5785ca4 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1245,6 +1245,55 @@ static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) #endif } +static void emit_a32_endian(const s8 dst[], u8 code, s32 bits, + struct jit_ctx *ctx) +{ + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *tmp2 = bpf2a32[TMP_REG_2]; + const s8 *rd; + + /* Converting from LE and 64-bit value is a no-op. */ + if (code == BPF_FROM_LE && bits == 64) + return; + + rd = arm_bpf_get_reg64(dst, tmp, ctx); + + if (code != BPF_FROM_LE) { + /* endian swap */ + switch (imm) { + case 16: + emit_rev16(rd[1], rd[1], ctx); + break; + case 32: + emit_rev32(rd[1], rd[1], ctx); + break; + case 64: + emit_rev32(ARM_LR, rd[1], ctx); + emit_rev32(rd[1], rd[0], ctx); + emit(ARM_MOV_R(rd[0], ARM_LR), ctx); + break; + } + } + + /* zero-extend size to 64-bit */ + switch (imm) { + case 16: +#if __LINUX_ARM_ARCH__ < 6 + emit_a32_mov_i(tmp2[1], 0xffff, ctx); + emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx); +#else /* ARMv6+ */ + emit(ARM_UXTH(rd[1], rd[1]), ctx); +#endif + /* FALLTHROUGH */ + case 32: + if (!ctx->prog->aux->verifier_zext) + emit(ARM_MOV_I(rd[0], 0), ctx); + break; + } + + arm_bpf_put_reg64(dst, rd, ctx); +} + // push the scratch stack register on top of the stack static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx) { @@ -1523,47 +1572,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* dst = htobe(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: case BPF_ALU | BPF_END | BPF_FROM_BE: - rd = arm_bpf_get_reg64(dst, tmp, ctx); - if (BPF_SRC(code) == BPF_FROM_LE) - goto emit_bswap_uxt; - switch (imm) { - case 16: - emit_rev16(rd[1], rd[1], ctx); - goto emit_bswap_uxt; - case 32: - emit_rev32(rd[1], rd[1], ctx); - goto emit_bswap_uxt; - case 64: - emit_rev32(ARM_LR, rd[1], ctx); - emit_rev32(rd[1], rd[0], ctx); - emit(ARM_MOV_R(rd[0], ARM_LR), ctx); - break; - } - goto exit; -emit_bswap_uxt: - switch (imm) { - case 16: - /* zero-extend 16 bits into 64 bits */ -#if __LINUX_ARM_ARCH__ < 6 - emit_a32_mov_i(tmp2[1], 0xffff, ctx); - emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx); -#else /* ARMv6+ */ - emit(ARM_UXTH(rd[1], rd[1]), ctx); -#endif - if (!ctx->prog->aux->verifier_zext) - emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); - break; - case 32: - /* zero-extend 32 bits into 64 bits */ - if (!ctx->prog->aux->verifier_zext) - emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); - break; - case 64: - /* nop */ - break; - } -exit: - arm_bpf_put_reg64(dst, rd, ctx); + emit_a32_endian(dst, BPF_SRC(code), imm, ctx); break; /* dst = imm64 */ case BPF_LD | BPF_IMM | BPF_DW: -- 2.20.1
Powered by blists - more mailing lists