lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180304194044.26751-2-fw@strlen.de>
Date:   Sun,  4 Mar 2018 20:40:42 +0100
From:   Florian Westphal <fw@...len.de>
To:     <netdev@...r.kernel.org>
Cc:     daniel@...earbox.net, ast@...nel.org, pablo@...filter.org,
        Florian Westphal <fw@...len.de>
Subject: [RFC,POC 1/3] bpfilter: add experimental IMR bpf translator

This is a basic intermediate representation to decouple
the ruleset representation (iptables, nftables) from the
ebpf translation.

The IMR currently assumes that translation will always be
into ebpf, its pseudo-registers map 1:1 to ebpf ones.

Objects implemented at the moment:
- relop (eq, ne only for now)
- immediate (32, 64 bit constants)
- payload, with relative addressing (mac header, network header, transport header)

This doesn't add a user; files will not even be compiled yet.

Signed-off-by: Florian Westphal <fw@...len.de>
---
 net/bpfilter/imr.c | 655 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 net/bpfilter/imr.h |  78 +++++++
 2 files changed, 733 insertions(+)
 create mode 100644 net/bpfilter/imr.c
 create mode 100644 net/bpfilter/imr.h

diff --git a/net/bpfilter/imr.c b/net/bpfilter/imr.c
new file mode 100644
index 000000000000..09c557ea7c21
--- /dev/null
+++ b/net/bpfilter/imr.c
@@ -0,0 +1,655 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <limits.h>
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+
+#include <linux/if_ether.h>
+typedef __u16 __bitwise __sum16; /* hack */
+#include <linux/ip.h>
+#include <arpa/inet.h>
+
+#include "imr.h"
+#include "bpfilter_gen.h"
+
+#define EMIT(ctx, x)					\
+	do {						\
+		if ((ctx)->len_cur + 1 > (ctx)->len_max)\
+			return -ENOMEM;			\
+		(ctx)->img[(ctx)->len_cur++] = x;	\
+	} while (0)
+
+struct imr_object {
+	enum imr_obj_type type:8;
+	uint8_t len;
+
+	union {
+		struct {
+			union {
+				uint64_t value64;
+				uint32_t value32;
+			};
+		} immedate;
+		struct {
+			struct imr_object *left;
+			struct imr_object *right;
+			enum imr_relop op:8;
+		} relational;
+		struct {
+			uint16_t offset;
+			enum imr_payload_base base:8;
+		} payload;
+		struct {
+			enum imr_verdict verdict;
+		} verdict;
+	};
+};
+
+struct imr_state {
+	struct bpf_insn	*img;
+	uint32_t len_cur;
+	uint32_t len_max;
+
+	struct imr_object *registers[IMR_REG_COUNT];
+	uint8_t regcount;
+
+	uint32_t num_objects;
+	struct imr_object **objects;
+};
+
+static int imr_jit_object(struct bpfilter_gen_ctx *ctx,
+			  struct imr_state *, const struct imr_object *o);
+
+static void internal_error(const char *s)
+{
+	fprintf(stderr, "FIXME: internal error %s\n", s);
+	exit(1);
+}
+
+/* FIXME: consider len too (e.g. reserve 2 registers for len == 8) */
+static int imr_register_alloc(struct imr_state *s, uint32_t len)
+{
+	uint8_t reg = s->regcount;
+
+	if (s->regcount >= IMR_REG_COUNT)
+		return -1;
+
+	s->regcount++;
+
+	return reg;
+}
+
+static int imr_register_get(const struct imr_state *s, uint32_t len)
+{
+	if (len > sizeof(uint64_t))
+		internal_error(">64bit types not yet implemented");
+	if (s->regcount == 0)
+		internal_error("no registers in use");
+
+	return s->regcount - 1;
+}
+
+static int imr_to_bpf_reg(enum imr_reg_num n)
+{
+	/* currently maps 1:1 */
+	return (int)n;
+}
+
+static int bpf_reg_width(unsigned int len)
+{
+	switch (len) {
+	case sizeof(uint8_t): return BPF_B;
+	case sizeof(uint16_t): return BPF_H;
+	case sizeof(uint32_t): return BPF_W;
+	case sizeof(uint64_t): return BPF_DW;
+	default:
+		internal_error("reg size not supported");
+	}
+
+	return -EINVAL;
+}
+
+static void imr_register_release(struct imr_state *s)
+{
+	if (s->regcount == 0)
+		internal_error("regcount underflow");
+	s->regcount--;
+}
+
+void imr_register_store(struct imr_state *s, enum imr_reg_num reg, struct imr_object *o)
+{
+	s->registers[reg] = o;
+}
+
+struct imr_object *imr_register_load(const struct imr_state *s, enum imr_reg_num reg)
+{
+	return s->registers[reg];
+}
+
+struct imr_state *imr_state_alloc(void)
+{
+	struct imr_state *s = calloc(1, sizeof(*s));
+
+	return s;
+}
+
+void imr_state_free(struct imr_state *s)
+{
+	int i;
+
+	for (i = 0; i < s->num_objects; i++)
+		imr_object_free(s->objects[i]);
+
+	free(s);
+}
+
+struct imr_object *imr_object_alloc(enum imr_obj_type t)
+{
+	struct imr_object *o = calloc(1, sizeof(*o));
+
+	if (o)
+		o->type = t;
+
+	return o;
+}
+
+void imr_object_free(struct imr_object *o)
+{
+	switch (o->type) {
+	case IMR_OBJ_TYPE_VERDICT:
+	case IMR_OBJ_TYPE_IMMEDIATE:
+	case IMR_OBJ_TYPE_PAYLOAD:
+		break;
+	case IMR_OBJ_TYPE_RELATIONAL:
+		imr_object_free(o->relational.left);
+		imr_object_free(o->relational.right);
+		break;
+	}
+
+	free(o);
+}
+
+struct imr_object *imr_object_alloc_imm32(uint32_t value)
+{
+	struct imr_object *o = imr_object_alloc(IMR_OBJ_TYPE_IMMEDIATE);
+
+	if (o) {
+		o->immedate.value32 = value;
+		o->len = sizeof(value);
+	}
+	return o;
+}
+
+struct imr_object *imr_object_alloc_imm64(uint64_t value)
+{
+	struct imr_object *o = imr_object_alloc(IMR_OBJ_TYPE_IMMEDIATE);
+
+	if (o) {
+		o->immedate.value64 = value;
+		o->len = sizeof(value);
+	}
+	return o;
+}
+
+struct imr_object *imr_object_alloc_verdict(enum imr_verdict v)
+{
+	struct imr_object *o = imr_object_alloc(IMR_OBJ_TYPE_VERDICT);
+
+	if (!o)
+		return NULL;
+
+	o->verdict.verdict = v;
+	o->len = sizeof(v);
+
+	return o;
+}
+
+static const char *op_to_str(enum imr_relop op)
+{
+	switch (op) {
+	case IMR_RELOP_NE: return "ne";
+	case IMR_RELOP_EQ: return "eq";
+	}
+
+	return "invalid";
+}
+
+static const char *verdict_to_str(enum imr_verdict v)
+{
+	switch (v) {
+	case IMR_VERDICT_NEXT: return "next";
+	case IMR_VERDICT_PASS: return "pass";
+	case IMR_VERDICT_DROP: return "drop";
+	}
+
+	return "invalid";
+}
+
+static int imr_object_print_imm(FILE *fp, const const struct imr_object *o)
+{
+	int ret = fprintf(fp, "TYPE_IMMEDIATE (");
+	if (ret < 0)
+		return ret;
+
+	switch (o->len) {
+	case sizeof(uint64_t):
+		return fprintf(fp, "0x%16llx)\n", (unsigned long long)o->immedate.value64);
+	case sizeof(uint32_t):
+		return fprintf(fp, "0x%08x)\n", (unsigned int)o->immedate.value32);
+	default:
+		return fprintf(fp, "0x%llx (?)\n", (unsigned long long)o->immedate.value64);
+	}
+}
+
+static int imr_object_print(FILE *fp, int depth, const struct imr_object *o)
+{
+	int ret, total = 0;
+	int i;
+
+	for (i = 0; i < depth; i++) {
+		ret = fprintf(fp, "\t");
+		if (ret < 0)
+			return ret;
+	}
+
+	switch (o->type) {
+	case IMR_OBJ_TYPE_VERDICT:
+		return fprintf(fp, "TYPE_VERDICT: %s\n", verdict_to_str(o->verdict.verdict));
+	case IMR_OBJ_TYPE_RELATIONAL:
+		++depth;
+
+		ret = fprintf(fp, "IMR_OBJ_TYPE_RELATIONAL {\n");
+		if (ret < 0)
+			return ret;
+		total += ret;
+
+		ret = imr_object_print(fp, depth, o->relational.left);
+		if (ret < 0)
+			return ret;
+		total += ret;
+
+		for (i = 0; i < depth; i++)
+			fprintf(fp, "\t");
+
+		ret = fprintf(fp , "op: %s\n", op_to_str(o->relational.op));
+		if (ret < 0)
+			return ret;
+		total += ret;
+
+		ret = imr_object_print(fp, depth, o->relational.right);
+		if (ret < 0)
+			return ret;
+		total += ret;
+
+		--depth;
+		for (i = 0; i < depth; i++)
+			fprintf(fp, "\t");
+
+		ret = fprintf(fp, "}\n");
+		if (ret < 0)
+			return ret;
+
+		return total + ret;
+	case IMR_OBJ_TYPE_PAYLOAD:
+		return fprintf(fp, "TYPE_PAYLOAD: base %d,offset %d, length %d\n",
+				o->payload.base, o->payload.offset, o->len);
+	case IMR_OBJ_TYPE_IMMEDIATE:
+		return imr_object_print_imm(fp, o);
+	}
+
+	internal_error("missing print support");
+	return 0;
+}
+
+void imr_state_print(FILE *fp, struct imr_state *s)
+{
+	int i;
+
+	for (i = 0; i < s->num_objects; i++)
+		imr_object_print(fp, 0, s->objects[i]);
+}
+
+struct imr_object *imr_object_alloc_payload(enum imr_payload_base b, uint16_t off, uint16_t len)
+{
+	struct imr_object *o = imr_object_alloc(IMR_OBJ_TYPE_PAYLOAD);
+
+	if (!o)
+		return NULL;
+
+	o->payload.base = b;
+	o->payload.offset = off;
+	if (len > 16) {
+
+		return NULL;
+	}
+	if (len == 0)
+		internal_error("payload length is 0");
+	if (len > 16)
+		internal_error("payload length exceeds 16 byte");
+
+	o->len = len;
+
+	return o;
+}
+
+struct imr_object *imr_object_alloc_relational(enum imr_relop op, struct imr_object *l, struct imr_object *r)
+{
+	struct imr_object *o = imr_object_alloc(IMR_OBJ_TYPE_RELATIONAL);
+
+	if (!o)
+		return NULL;
+
+	o->relational.op = op;
+	o->relational.left = l;
+	o->relational.right = r;
+
+	if (l->len == 0 || r->len == 0)
+		internal_error("relational op with 0 op length");
+
+	o->len = l->len;
+	if (r->len > o->len)
+		o->len = r->len;
+
+	return o;
+}
+
+int imr_state_add_obj(struct imr_state *s, struct imr_object *o)
+{
+	struct imr_object **new;
+	uint32_t slot = s->num_objects;
+
+	if (s->num_objects >= INT_MAX / sizeof(*o))
+		return -1;
+
+	s->num_objects++;
+	new = realloc(s->objects, sizeof(o) * s->num_objects);
+	if (!new) {
+		imr_object_free(o);
+		return -1;
+	}
+
+	new[slot] = o;
+	if (new != s->objects)
+		s->objects = new;
+
+	return 0;
+}
+
+int imr_state_rule_end(struct imr_state *s)
+{
+	uint32_t slot = s->num_objects;
+	struct imr_object *last;
+
+	if (slot == 0)
+		internal_error("rule end, but no objects present\n");
+	last = s->objects[slot - 1];
+
+	if (last->type == IMR_OBJ_TYPE_VERDICT)
+		return 0;
+
+	return imr_state_add_obj(s, imr_object_alloc_verdict(IMR_VERDICT_NEXT));
+}
+
+static int imr_jit_obj_immediate(struct bpfilter_gen_ctx *ctx,
+				 const struct imr_state *s,
+				 const struct imr_object *o)
+{
+	int bpf_reg = imr_to_bpf_reg(imr_register_get(s, o->len));
+
+	fprintf(stderr, "store immediate in bpf reg %d\n", bpf_reg);
+	switch (o->len) {
+	case sizeof(uint32_t):
+		EMIT(ctx, BPF_MOV32_IMM(bpf_reg, o->immedate.value32));
+		return 0;
+	case sizeof(uint64_t):
+		EMIT(ctx, BPF_MOV64_IMM(bpf_reg, o->immedate.value64));
+		return 0;
+	default:
+		break;
+	}
+
+	internal_error("unhandled immediate size");
+	return -EINVAL;
+}
+
+static int imr_jit_obj_verdict(struct bpfilter_gen_ctx *ctx,
+			       const struct imr_state *s,
+			       const struct imr_object *o)
+{
+	uint32_t verdict = o->verdict.verdict;
+	enum xdp_action match_xdp;
+
+	match_xdp = verdict == IMR_VERDICT_DROP ? XDP_DROP : XDP_PASS;
+	fprintf(stderr, "jit verdict: %s (imr: %d)\n", match_xdp == XDP_DROP ? "drop" : "pass", verdict);
+
+	EMIT(ctx, BPF_MOV32_IMM(BPF_REG_0, match_xdp));
+	EMIT(ctx, BPF_EXIT_INSN());
+
+	return 0;
+}
+
+static int imr_jit_obj_payload(struct bpfilter_gen_ctx *ctx,
+			       const struct imr_state *state,
+			       const struct imr_object *o)
+{
+	int base = o->payload.base;
+	int offset;
+	int bpf_width, bpf_reg;
+
+	offset = o->payload.offset;
+
+	switch (base) {
+	case IMR_PAYLOAD_BASE_LL:
+	        EMIT(ctx, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+                    -(int)sizeof(struct ethhdr)));
+		break;
+	case IMR_PAYLOAD_BASE_NH:
+		break;
+	case IMR_PAYLOAD_BASE_TH:
+		/* XXX: ip options */
+		offset += sizeof(struct iphdr);
+		break;
+	}
+
+	bpf_width = bpf_reg_width(o->len);
+	bpf_reg = imr_to_bpf_reg(imr_register_get(state, o->len));
+
+	fprintf(stderr, "store payload in bpf reg %d\n", bpf_reg);
+        EMIT(ctx, BPF_LDX_MEM(bpf_width, bpf_reg, BPF_REG_1, offset));
+
+	switch (base) {
+	case IMR_PAYLOAD_BASE_LL:
+	        EMIT(ctx, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+					(int)sizeof(struct ethhdr)));
+		break;
+	case IMR_PAYLOAD_BASE_NH:
+		break;
+	case IMR_PAYLOAD_BASE_TH:
+	        EMIT(ctx, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+					-(int)sizeof(struct iphdr)));
+		break;
+	}
+
+	return 0;
+}
+
+static int imr_jit_obj_relational(struct bpfilter_gen_ctx *ctx,
+				  struct imr_state *state,
+				  const struct imr_object *o)
+{
+	const struct imr_object *right;
+	enum imr_reg_num regl, regr;
+	int ret, op, bpf_reg;
+
+	switch (o->relational.op) {
+	case IMR_RELOP_EQ:
+		op = BPF_JNE;
+		break;
+	case IMR_RELOP_NE:
+		op = BPF_JEQ;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	regl = imr_register_alloc(state, o->len);
+	if (regl < 0)
+		return -ENOSPC;
+
+	ret = imr_jit_object(ctx, state, o->relational.left);
+	if (ret) {
+		imr_register_release(state);
+		return ret;
+	}
+
+	right = o->relational.right;
+	bpf_reg = imr_to_bpf_reg(regl);
+
+	/* avoid 2nd register if possible */
+	if (right->type == IMR_OBJ_TYPE_IMMEDIATE) {
+		switch (right->len) {
+		case sizeof(uint32_t):
+			EMIT(ctx, BPF_JMP_IMM(op, bpf_reg, right->immedate.value32, 0));
+			imr_register_release(state);
+			return 0;
+		}
+	}
+
+	regr = imr_register_alloc(state, right->len);
+	if (regr < 0) {
+		imr_register_release(state);
+		return -ENOSPC;
+	}
+
+	ret = imr_jit_object(ctx, state, right);
+	if (ret) {
+		imr_register_release(state);
+		imr_register_release(state);
+		return ret;
+	}
+
+	fprintf(stderr, "CMP: %d %d\n", bpf_reg, imr_to_bpf_reg(regr));
+	EMIT(ctx, BPF_JMP_REG(op, bpf_reg, imr_to_bpf_reg(regr), 0));
+	imr_register_release(state);
+	imr_register_release(state);
+	return 0;
+}
+
+static int imr_jit_object(struct bpfilter_gen_ctx *ctx,
+			  struct imr_state *s,
+			  const struct imr_object *o)
+{
+	switch (o->type) {
+	case IMR_OBJ_TYPE_VERDICT:
+		return imr_jit_obj_verdict(ctx, s, o);
+	case IMR_OBJ_TYPE_RELATIONAL:
+		return imr_jit_obj_relational(ctx, s, o);
+	case IMR_OBJ_TYPE_PAYLOAD:
+		return imr_jit_obj_payload(ctx, s, o);
+	case IMR_OBJ_TYPE_IMMEDIATE:
+		return imr_jit_obj_immediate(ctx, s, o);
+	}
+
+	return -EINVAL;
+}
+
+static int imr_jit_rule(struct bpfilter_gen_ctx *ctx,
+			struct imr_state *state,
+			int i)
+{
+	unsigned int start, end, count, pc, pc_end, len_cur;
+
+	end = state->num_objects;
+	if (i >= end)
+		return -EINVAL;
+
+	len_cur = ctx->len_cur;
+
+	EMIT(ctx, BPF_MOV64_REG(BPF_REG_1, BPF_REG_2));
+	EMIT(ctx, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+			   sizeof(struct ethhdr) + sizeof(struct iphdr)));
+	EMIT(ctx, BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0));
+	EMIT(ctx, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -(int)sizeof(struct iphdr)));
+
+	start = i;
+	count = 0;
+
+	for (i = start; start < end; i++) {
+		int ret = imr_jit_object(ctx, state, state->objects[i]);
+
+		if (ret < 0) {
+			fprintf(stderr, "failed to JIT object type %d\n",  state->objects[i]->type);
+			return ret;
+		}
+
+		count++;
+
+		if (state->objects[i]->type == IMR_OBJ_TYPE_VERDICT)
+			break;
+	}
+
+	if (i == end) {/* malformed -- no verdict */
+		fprintf(stderr, "rule had no verdict, start %d end %d\n", start, end);
+		internal_error("no verdict found in rule");
+	}
+
+	pc = 0;
+	pc_end = ctx->len_cur - len_cur; /* start of next rule */
+
+	for (i = len_cur; pc < pc_end; pc++, i++) {
+		if (BPF_CLASS(ctx->img[i].code) == BPF_JMP) {
+			if (ctx->img[i].code == (BPF_EXIT | BPF_JMP))
+				continue;
+
+			fprintf(stderr, "fix jump to %d: should be %d, pc is %d\n", ctx->img[i].off, pc_end - pc, pc);
+			ctx->img[i].off = pc_end - pc - 1;
+		}
+	}
+
+	return count;
+}
+
+/* test function, would only return bpf prog */
+int imr_do_bpf(struct imr_state *s)
+{
+	struct bpfilter_gen_ctx ctx;
+	int ret, i = 0;
+
+	ret = bpfilter_gen_init(&ctx);
+	if (ret < 0)
+		return ret;
+
+	ret = bpfilter_gen_prologue(&ctx);
+	if (ret < 0)
+		return ret;
+
+	/* Hack: don't touch/use first 4 bpf registers */
+	s->regcount = 4;
+	do {
+		int insns = imr_jit_rule(&ctx, s, i);
+		if (insns < 0) {
+			ret = insns;
+			break;
+		}
+		if (insns == 0)
+			internal_error("rule jit yields 0 insns");
+
+		i += insns;
+	} while (i < s->num_objects);
+
+	ctx.ifindex = 1;
+	if (ret == 0) {
+		EMIT(&ctx, BPF_MOV32_IMM(BPF_REG_0, XDP_PASS));
+		EMIT(&ctx, BPF_EXIT_INSN());
+		bpfilter_gen_commit(&ctx);
+	} else {
+		fprintf(stderr, "Error when generating bpf code");
+	}
+
+	bpfilter_gen_destroy(&ctx);
+
+	return ret;
+}
diff --git a/net/bpfilter/imr.h b/net/bpfilter/imr.h
new file mode 100644
index 000000000000..3f602bf315df
--- /dev/null
+++ b/net/bpfilter/imr.h
@@ -0,0 +1,78 @@
+#ifndef IMR_HDR
+#define IMR_HDR
+#include <stdint.h>
+#include <stdio.h>
+
+enum imr_reg_num {
+	IMR_REG_0 = 0,
+	IMR_REG_1,
+	IMR_REG_2,
+	IMR_REG_3,
+	IMR_REG_4,
+	IMR_REG_5,
+	IMR_REG_6,
+	IMR_REG_7,
+	IMR_REG_8,
+	IMR_REG_9,
+	IMR_REG_10,
+	IMR_REG_COUNT,
+};
+
+struct imr_state;
+struct imr_object;
+
+enum imr_obj_type {
+	IMR_OBJ_TYPE_VERDICT,
+	IMR_OBJ_TYPE_IMMEDIATE,
+	IMR_OBJ_TYPE_RELATIONAL,
+	IMR_OBJ_TYPE_PAYLOAD,
+};
+
+enum imr_relop {
+	IMR_RELOP_EQ,
+	IMR_RELOP_NE,
+};
+
+enum imr_verdict {
+	IMR_VERDICT_NEXT,	/* move to next rule */
+	IMR_VERDICT_PASS,	/* end processing, accept packet */
+	IMR_VERDICT_DROP,	/* end processing, drop packet */
+};
+
+enum imr_payload_base {
+	IMR_PAYLOAD_BASE_INVALID,
+	IMR_PAYLOAD_BASE_LL,
+	IMR_PAYLOAD_BASE_NH,
+	IMR_PAYLOAD_BASE_TH,
+};
+
+struct imr_state *imr_state_alloc(void);
+void imr_state_free(struct imr_state *s);
+void imr_state_print(FILE *fp, struct imr_state *s);
+
+static inline int imr_state_rule_begin(struct imr_state *s)
+{
+	/* nothing for now */
+	return 0;
+}
+
+int imr_state_rule_end(struct imr_state *s);
+
+void imr_register_store(struct imr_state *s, enum imr_reg_num r, struct imr_object *o);
+struct imr_object *imr_register_load(const struct imr_state *s, enum imr_reg_num r);
+
+struct imr_object *imr_object_alloc(enum imr_obj_type t);
+void imr_object_free(struct imr_object *o);
+
+struct imr_object *imr_object_alloc_imm32(uint32_t value);
+struct imr_object *imr_object_alloc_imm64(uint64_t value);
+struct imr_object *imr_object_alloc_verdict(enum imr_verdict v);
+
+struct imr_object *imr_object_alloc_payload(enum imr_payload_base b, uint16_t off, uint16_t len);
+struct imr_object *imr_object_alloc_relational(enum imr_relop op, struct imr_object *l, struct imr_object *r);
+
+int imr_state_add_obj(struct imr_state *s, struct imr_object *o);
+
+int imr_do_bpf(struct imr_state *s);
+
+#endif /* IMR_HDR */
-- 
2.16.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ