lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <88cdae60-494e-6294-b2c1-10b9cbeb95ac@iogearbox.net>
Date:   Mon, 4 Feb 2019 21:06:40 +0100
From:   Daniel Borkmann <daniel@...earbox.net>
To:     bjorn.topel@...il.com, linux-riscv@...ts.infradead.org,
        ast@...nel.org, netdev@...r.kernel.org
Cc:     palmer@...ive.com, hch@...radead.org
Subject: Re: [PATCH bpf-next 1/3] bpf, riscv: add BPF JIT for RV64G

On 02/03/2019 12:51 PM, bjorn.topel@...il.com wrote:
> From: Björn Töpel <bjorn.topel@...il.com>
> 
> This commit adds BPF JIT for RV64G.
> 
> The JIT is a two-pass JIT, and has a dynamic prolog/epilogue (similar
> to the MIPS64 BPF JIT) instead of static ones (e.g. x86_64).
> 
> At the moment the RISC-V Linux port does not support HAVE_KPROBES,
> which means that CONFIG_BPF_EVENTS is not supported. Thus, no tests
> involving BPF_PROG_TYPE_TRACEPOINT passes.
> 
> Further, the implementation does not support "far branching" (>4KiB).
> 
> The implementation passes all the test_bpf.ko tests:
>   test_bpf: Summary: 378 PASSED, 0 FAILED, [366/366 JIT'ed]
> 
> All the tail_call tests in the selftest/bpf/test_verifier program
> passes.
> 
> All tests where done on QEMU (QEMU emulator version 3.1.50
> (v3.1.0-688-g8ae951fbc106)).
> 
> Signed-off-by: Björn Töpel <bjorn.topel@...il.com>

Some minor comments:

Looks like all the BPF_JMP32 instructions are missing. Would probably
make sense to include these into the initial merge as well unless there
is some good reason not to; presumably the test_verifier parts with
BPF_JMP32 haven't been tried out?

[...]
> +
> +enum {
> +	RV_CTX_F_SEEN_TAIL_CALL =	0,
> +	RV_CTX_F_SEEN_CALL =		RV_REG_RA,
> +	RV_CTX_F_SEEN_S1 =		RV_REG_S1,
> +	RV_CTX_F_SEEN_S2 =		RV_REG_S2,
> +	RV_CTX_F_SEEN_S3 =		RV_REG_S3,
> +	RV_CTX_F_SEEN_S4 =		RV_REG_S4,
> +	RV_CTX_F_SEEN_S5 =		RV_REG_S5,
> +	RV_CTX_F_SEEN_S6 =		RV_REG_S6,
> +};
> +
> +struct rv_jit_context {
> +	struct bpf_prog *prog;
> +	u32 *insns; /* RV insns */
> +	int ninsns;
> +	int epilogue_offset;
> +	int *offset; /* BPF to RV */
> +	unsigned long flags;
> +	int stack_size;
> +};
> +
> +struct rv_jit_data {
> +	struct bpf_binary_header *header;
> +	u8 *image;
> +	struct rv_jit_context ctx;
> +};
> +
> +static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx)
> +{
> +	u8 reg = regmap[bpf_reg];
> +
> +	switch (reg) {
> +	case RV_CTX_F_SEEN_S1:
> +	case RV_CTX_F_SEEN_S2:
> +	case RV_CTX_F_SEEN_S3:
> +	case RV_CTX_F_SEEN_S4:
> +	case RV_CTX_F_SEEN_S5:
> +	case RV_CTX_F_SEEN_S6:
> +		__set_bit(reg, &ctx->flags);
> +	}
> +	return reg;
> +};
> +
> +static bool seen_reg(int reg, struct rv_jit_context *ctx)
> +{
> +	switch (reg) {
> +	case RV_CTX_F_SEEN_CALL:
> +	case RV_CTX_F_SEEN_S1:
> +	case RV_CTX_F_SEEN_S2:
> +	case RV_CTX_F_SEEN_S3:
> +	case RV_CTX_F_SEEN_S4:
> +	case RV_CTX_F_SEEN_S5:
> +	case RV_CTX_F_SEEN_S6:
> +		return test_bit(reg, &ctx->flags);
> +	}
> +	return false;
> +}
> +
> +static void mark_call(struct rv_jit_context *ctx)
> +{
> +	__set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
> +}
> +
> +static bool seen_call(struct rv_jit_context *ctx)
> +{
> +	return seen_reg(RV_REG_RA, ctx);
> +}

Just nit: probably might be more obvious to remove this asymmetry in
seen_reg() and do __set_bit()/test_bit() for RV_CTX_F_SEEN_CALL similar
like below.

> +static void mark_tail_call(struct rv_jit_context *ctx)
> +{
> +	__set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
> +}
> +
> +static bool seen_tail_call(struct rv_jit_context *ctx)
> +{
> +	return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
> +}
> +
> +static u8 rv_tail_call_reg(struct rv_jit_context *ctx)
> +{
> +	mark_tail_call(ctx);
> +
> +	if (seen_call(ctx)) {
> +		__set_bit(RV_CTX_F_SEEN_S6, &ctx->flags);
> +		return RV_REG_S6;
> +	}
> +	return RV_REG_A6;
> +}
> +
> +static void emit(const u32 insn, struct rv_jit_context *ctx)
> +{
> +	if (ctx->insns)
> +		ctx->insns[ctx->ninsns] = insn;
> +
> +	ctx->ninsns++;
> +}
> +
> +static u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd, u8 opcode)
> +{
[...]
> +	/* Allocate image, now that we know the size. */
> +	image_size = sizeof(u32) * ctx->ninsns;
> +	jit_data->header = bpf_jit_binary_alloc(image_size, &jit_data->image,
> +						sizeof(u32),
> +						bpf_fill_ill_insns);
> +	if (!jit_data->header) {
> +		prog = orig_prog;
> +		goto out_offset;
> +	}
> +
> +	/* Second, real pass, that acutally emits the image. */
> +	ctx->insns = (u32 *)jit_data->image;
> +skip_init_ctx:
> +	ctx->ninsns = 0;
> +
> +	build_prologue(ctx);
> +	if (build_body(ctx, extra_pass)) {
> +		bpf_jit_binary_free(jit_data->header);
> +		prog = orig_prog;
> +		goto out_offset;
> +	}
> +	build_epilogue(ctx);
> +
> +	if (bpf_jit_enable > 1)
> +		bpf_jit_dump(prog->len, image_size, 2, ctx->insns);
> +
> +	prog->bpf_func = (void *)ctx->insns;
> +	prog->jited = 1;
> +	prog->jited_len = image_size;
> +
> +	bpf_flush_icache(jit_data->header, (u8 *)ctx->insns + ctx->ninsns);

Shouldn't this be '(u32 *)ctx->insns + ctx->ninsns' to cover the range?

> +
> +	if (!prog->is_func || extra_pass) {
> +out_offset:
> +		kfree(ctx->offset);
> +		kfree(jit_data);
> +		prog->aux->jit_data = NULL;
> +	}
> +out:
> +	if (tmp_blinded)
> +		bpf_jit_prog_release_other(prog, prog == orig_prog ?
> +					   tmp : orig_prog);
> +	return prog;
> +}
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ