[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171221.112819.2190571738067203400.davem@davemloft.net>
Date: Thu, 21 Dec 2017 11:28:19 -0500 (EST)
From: David Miller <davem@...emloft.net>
To: alexei.starovoitov@...il.com
Cc: daniel@...earbox.net, ast@...nel.org, netdev@...r.kernel.org
Subject: Re: pull-request: bpf-next 2017-12-18
From: David Miller <davem@...emloft.net>
Date: Wed, 20 Dec 2017 16:16:44 -0500 (EST)
> I think I understand how this new stuff works, I'll take a stab at
> doing the sparc64 JIT bits.
This patch should do it, please queue up for bpf-next.
But this is really overkill on sparc64.
No matter where you relocate the call destination to, the size of the
program and the code output will be identical except for the call
instruction PC relative offset field.
So at some point as a follow-up I should change this code to simply
scan the insns for the function calls and fixup the offsets, rather
than do a full set of code generation passes.
Thanks.
====================
bpf: sparc64: Add JIT support for multi-function programs.
Modelled strongly upon the arm64 implementation.
Signed-off-by: David S. Miller <davem@...emloft.net>
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index a2f1b5e..4ee417f 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1507,11 +1507,19 @@ static void jit_fill_hole(void *area, unsigned int size)
*ptr++ = 0x91d02005; /* ta 5 */
}
+struct sparc64_jit_data {
+ struct bpf_binary_header *header;
+ u8 *image;
+ struct jit_ctx ctx;
+};
+
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_prog *tmp, *orig_prog = prog;
+ struct sparc64_jit_data *jit_data;
struct bpf_binary_header *header;
bool tmp_blinded = false;
+ bool extra_pass = false;
struct jit_ctx ctx;
u32 image_size;
u8 *image_ptr;
@@ -1531,13 +1539,30 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp;
}
+ jit_data = prog->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ prog = orig_prog;
+ goto out;
+ }
+ }
+ if (jit_data->ctx.offset) {
+ ctx = jit_data->ctx;
+ image_ptr = jit_data->image;
+ header = jit_data->header;
+ extra_pass = true;
+ image_size = sizeof(u32) * ctx.idx;
+ goto skip_init_ctx;
+ }
+
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
- goto out;
+ goto out_off;
}
/* Fake pass to detect features used, and get an accurate assessment
@@ -1560,7 +1585,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.image = (u32 *)image_ptr;
-
+skip_init_ctx:
for (pass = 1; pass < 3; pass++) {
ctx.idx = 0;
@@ -1591,14 +1616,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
- bpf_jit_binary_lock_ro(header);
+ if (!prog->is_func || extra_pass) {
+ bpf_jit_binary_lock_ro(header);
+ } else {
+ jit_data->ctx = ctx;
+ jit_data->image = image_ptr;
+ jit_data->header = header;
+ }
prog->bpf_func = (void *)ctx.image;
prog->jited = 1;
prog->jited_len = image_size;
+ if (!prog->is_func || extra_pass) {
out_off:
- kfree(ctx.offset);
+ kfree(ctx.offset);
+ kfree(jit_data);
+ prog->aux->jit_data = NULL;
+ }
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
Powered by blists - more mailing lists