[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <7d95d2483eb648960a9f33e38a01fcc678da545e.1484090585.git.luto@kernel.org>
Date: Tue, 10 Jan 2017 15:24:43 -0800
From: Andy Lutomirski <luto@...nel.org>
To: Daniel Borkmann <daniel@...earbox.net>,
Netdev <netdev@...r.kernel.org>,
LKML <linux-kernel@...r.kernel.org>,
Linux Crypto Mailing List <linux-crypto@...r.kernel.org>
Cc: "Jason A. Donenfeld" <Jason@...c4.com>,
Hannes Frederic Sowa <hannes@...essinduktion.org>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
Eric Dumazet <edumazet@...gle.com>,
Eric Biggers <ebiggers3@...il.com>,
Tom Herbert <tom@...bertland.com>,
"David S. Miller" <davem@...emloft.net>,
Andy Lutomirski <luto@...nel.org>,
Alexei Starovoitov <ast@...nel.org>
Subject: [PATCH v2 5/8] bpf: Avoid copying the entire BPF program when hashing it
The sha256 helpers can consume a message incrementally, so there's no need
to allocate a buffer to store the whole blob to be hashed.
This may be a slight slowdown for very long messages because gcc can't
inline the sha256_update() calls. For reasonably-sized programs,
however, this should be a considerable speedup as vmalloc() is quite
slow.
Cc: Daniel Borkmann <daniel@...earbox.net>
Cc: Alexei Starovoitov <ast@...nel.org>
Signed-off-by: Andy Lutomirski <luto@...nel.org>
---
kernel/bpf/core.c | 33 +++++++++++++--------------------
1 file changed, 13 insertions(+), 20 deletions(-)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 668b92f6ab58..106162a1bc54 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -149,44 +149,37 @@ void __bpf_prog_free(struct bpf_prog *fp)
int bpf_prog_calc_digest(struct bpf_prog *fp)
{
struct sha256_state sha;
- u32 i, psize;
- struct bpf_insn *dst;
+ u32 i;
bool was_ld_map;
- u8 *raw;
-
- psize = bpf_prog_insn_size(fp);
- raw = vmalloc(psize);
- if (!raw)
- return -ENOMEM;
sha256_init_direct(&sha);
/* We need to take out the map fd for the digest calculation
* since they are unstable from user space side.
*/
- dst = (void *)raw;
for (i = 0, was_ld_map = false; i < fp->len; i++) {
- dst[i] = fp->insnsi[i];
+ struct bpf_insn insn = fp->insnsi[i];
+
if (!was_ld_map &&
- dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
- dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
+ insn.code == (BPF_LD | BPF_IMM | BPF_DW) &&
+ insn.src_reg == BPF_PSEUDO_MAP_FD) {
was_ld_map = true;
- dst[i].imm = 0;
+ insn.imm = 0;
} else if (was_ld_map &&
- dst[i].code == 0 &&
- dst[i].dst_reg == 0 &&
- dst[i].src_reg == 0 &&
- dst[i].off == 0) {
+ insn.code == 0 &&
+ insn.dst_reg == 0 &&
+ insn.src_reg == 0 &&
+ insn.off == 0) {
was_ld_map = false;
- dst[i].imm = 0;
+ insn.imm = 0;
} else {
was_ld_map = false;
}
+
+ sha256_update_direct(&sha, (const u8 *)&insn, sizeof(insn));
}
- sha256_update_direct(&sha, raw, psize);
sha256_final_direct(&sha, fp->digest);
- vfree(raw);
return 0;
}
--
2.9.3
Powered by blists - more mailing lists