lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180329004839.4506-4-jakub.kicinski@netronome.com>
Date:   Wed, 28 Mar 2018 17:48:27 -0700
From:   Jakub Kicinski <jakub.kicinski@...ronome.com>
To:     alexei.starovoitov@...il.com, daniel@...earbox.net
Cc:     netdev@...r.kernel.org, oss-drivers@...ronome.com,
        Jan Gossens <jan.gossens@...h-aachen.de>,
        Jiong Wang <jiong.wang@...ronome.com>
Subject: [PATCH bpf-next 03/14] nfp: bpf: detect packet reads could be cached, enable the optimisation

From: Jiong Wang <jiong.wang@...ronome.com>

This patch is the front end of this optimisation, it detects and marks
those packet reads that could be cached. Then the optimisation "backend"
will be activated automatically.

Signed-off-by: Jiong Wang <jiong.wang@...ronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@...ronome.com>
---
 drivers/net/ethernet/netronome/nfp/bpf/jit.c  | 115 ++++++++++++++++++++++++++
 drivers/net/ethernet/netronome/nfp/bpf/main.h |  30 +++++++
 2 files changed, 145 insertions(+)

diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 38ed2f72a4b0..0701590e8302 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -2964,6 +2964,120 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
 	}
 }
 
+static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
+{
+	struct nfp_insn_meta *meta, *range_node = NULL;
+	s16 range_start = 0, range_end = 0;
+	bool cache_avail = false;
+	struct bpf_insn *insn;
+	s32 range_ptr_off = 0;
+	u32 range_ptr_id = 0;
+
+	list_for_each_entry(meta, &nfp_prog->insns, l) {
+		if (meta->flags & FLAG_INSN_IS_JUMP_DST)
+			cache_avail = false;
+
+		if (meta->skip)
+			continue;
+
+		insn = &meta->insn;
+
+		if (is_mbpf_store_pkt(meta) ||
+		    insn->code == (BPF_JMP | BPF_CALL) ||
+		    is_mbpf_classic_store_pkt(meta) ||
+		    is_mbpf_classic_load(meta)) {
+			cache_avail = false;
+			continue;
+		}
+
+		if (!is_mbpf_load(meta))
+			continue;
+
+		if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) {
+			cache_avail = false;
+			continue;
+		}
+
+		if (!cache_avail) {
+			cache_avail = true;
+			if (range_node)
+				goto end_current_then_start_new;
+			goto start_new;
+		}
+
+		/* Check ID to make sure two reads share the same
+		 * variable offset against PTR_TO_PACKET, and check OFF
+		 * to make sure they also share the same constant
+		 * offset.
+		 *
+		 * OFFs don't really need to be the same, because they
+		 * are the constant offsets against PTR_TO_PACKET, so
+		 * for different OFFs, we could canonicalize them to
+		 * offsets against original packet pointer. We don't
+		 * support this.
+		 */
+		if (meta->ptr.id == range_ptr_id &&
+		    meta->ptr.off == range_ptr_off) {
+			s16 new_start = range_start;
+			s16 end, off = insn->off;
+			s16 new_end = range_end;
+			bool changed = false;
+
+			if (off < range_start) {
+				new_start = off;
+				changed = true;
+			}
+
+			end = off + BPF_LDST_BYTES(insn);
+			if (end > range_end) {
+				new_end = end;
+				changed = true;
+			}
+
+			if (!changed)
+				continue;
+
+			if (new_end - new_start <= 64) {
+				/* Install new range. */
+				range_start = new_start;
+				range_end = new_end;
+				continue;
+			}
+		}
+
+end_current_then_start_new:
+		range_node->pkt_cache.range_start = range_start;
+		range_node->pkt_cache.range_end = range_end;
+start_new:
+		range_node = meta;
+		range_node->pkt_cache.do_init = true;
+		range_ptr_id = range_node->ptr.id;
+		range_ptr_off = range_node->ptr.off;
+		range_start = insn->off;
+		range_end = insn->off + BPF_LDST_BYTES(insn);
+	}
+
+	if (range_node) {
+		range_node->pkt_cache.range_start = range_start;
+		range_node->pkt_cache.range_end = range_end;
+	}
+
+	list_for_each_entry(meta, &nfp_prog->insns, l) {
+		if (meta->skip)
+			continue;
+
+		if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
+			if (meta->pkt_cache.do_init) {
+				range_start = meta->pkt_cache.range_start;
+				range_end = meta->pkt_cache.range_end;
+			} else {
+				meta->pkt_cache.range_start = range_start;
+				meta->pkt_cache.range_end = range_end;
+			}
+		}
+	}
+}
+
 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
 {
 	nfp_bpf_opt_reg_init(nfp_prog);
@@ -2971,6 +3085,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
 	nfp_bpf_opt_ld_mask(nfp_prog);
 	nfp_bpf_opt_ld_shift(nfp_prog);
 	nfp_bpf_opt_ldst_gather(nfp_prog);
+	nfp_bpf_opt_pkt_cache(nfp_prog);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 861211e27ea6..c220ac5a734a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -278,6 +278,36 @@ static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
 	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
 }
 
+static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
+{
+	return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
+}
+
+static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
+{
+	return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
+}
+
+static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
+{
+	u8 code = meta->insn.code;
+
+	return BPF_CLASS(code) == BPF_LD &&
+	       (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
+}
+
+static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
+{
+	u8 code = meta->insn.code;
+
+	return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
+}
+
+static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
+{
+	return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
+}
+
 /**
  * struct nfp_prog - nfp BPF program
  * @bpf: backpointer to the bpf app priv structure
-- 
2.16.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ