[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190830005336.23604-5-pablo@netfilter.org>
Date: Fri, 30 Aug 2019 02:53:36 +0200
From: Pablo Neira Ayuso <pablo@...filter.org>
To: netfilter-devel@...r.kernel.org
Cc: davem@...emloft.net, netdev@...r.kernel.org, vishal@...lsio.com,
jakub.kicinski@...ronome.com, saeedm@...lanox.com, jiri@...nulli.us
Subject: [PATCH net-next 4/4] netfilter: nft_payload: packet mangling offload support
This patch allows for mangling packet fields using hardware offload
infrastructure.
Signed-off-by: Pablo Neira Ayuso <pablo@...filter.org>
---
net/netfilter/nft_payload.c | 72 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 72 insertions(+)
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 22a80eb60222..39882f81ca8d 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -562,12 +562,84 @@ static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr
return -1;
}
+static int nft_payload_offload_set_nh(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ const struct nft_payload_set *priv)
+{
+ int type = FLOW_ACT_MANGLE_UNSPEC;
+
+ switch (ctx->dep.l3num) {
+ case htons(ETH_P_IP):
+ type = FLOW_ACT_MANGLE_HDR_TYPE_IP4;
+ break;
+ case htons(ETH_P_IPV6):
+ type = FLOW_ACT_MANGLE_HDR_TYPE_IP6;
+ break;
+ }
+
+ return type;
+}
+
+static int nft_payload_offload_set_th(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ const struct nft_payload_set *priv)
+{
+ int type = FLOW_ACT_MANGLE_UNSPEC;
+
+ switch (ctx->dep.protonum) {
+ case IPPROTO_TCP:
+ type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
+ break;
+ case IPPROTO_UDP:
+ type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
+ break;
+ }
+
+ return type;
+}
+
+static int nft_payload_set_offload(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ const struct nft_expr *expr)
+{
+ const struct nft_payload_set *priv = nft_expr_priv(expr);
+ struct nft_offload_reg *sreg = &ctx->regs[priv->sreg];
+ int type = FLOW_ACT_MANGLE_UNSPEC;
+ struct flow_action_entry *entry;
+
+ switch (priv->base) {
+ case NFT_PAYLOAD_LL_HEADER:
+ type = FLOW_ACT_MANGLE_HDR_TYPE_ETH;
+ break;
+ case NFT_PAYLOAD_NETWORK_HEADER:
+ type = nft_payload_offload_set_nh(ctx, flow, priv);
+ break;
+ case NFT_PAYLOAD_TRANSPORT_HEADER:
+ type = nft_payload_offload_set_th(ctx, flow, priv);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ entry = &flow->rule->action.entries[ctx->num_actions++];
+ entry->mangle.htype = type;
+ entry->mangle.offset = priv->offset;
+ entry->mangle.len = priv->len;
+
+ memcpy(entry->mangle.val, sreg->data.data, priv->len);
+ memset(entry->mangle.mask, 0xff, priv->len);
+
+ return type != FLOW_ACT_MANGLE_UNSPEC ? 0 : -EOPNOTSUPP;
+}
+
static const struct nft_expr_ops nft_payload_set_ops = {
.type = &nft_payload_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
.eval = nft_payload_set_eval,
.init = nft_payload_set_init,
.dump = nft_payload_set_dump,
+ .offload = nft_payload_set_offload,
};
static const struct nft_expr_ops *
--
2.11.0
Powered by blists - more mailing lists