[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191018040748.30593-10-toshiaki.makita1@gmail.com>
Date: Fri, 18 Oct 2019 13:07:42 +0900
From: Toshiaki Makita <toshiaki.makita1@...il.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <jakub.kicinski@...ronome.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
Jiri Pirko <jiri@...nulli.us>,
Pablo Neira Ayuso <pablo@...filter.org>,
Jozsef Kadlecsik <kadlec@...filter.org>,
Florian Westphal <fw@...len.de>,
Pravin B Shelar <pshelar@....org>
Cc: Toshiaki Makita <toshiaki.makita1@...il.com>,
netdev@...r.kernel.org, bpf@...r.kernel.org,
William Tu <u9012063@...il.com>,
Stanislav Fomichev <sdf@...ichev.me>
Subject: [RFC PATCH v2 bpf-next 09/15] xdp_flow: Implement flow replacement/deletion logic in xdp_flow kmod
As struct flow_rule has descrete storages for flow_dissector and
key/mask containers, we need to serialize them in some way to pass them
to UMH.
Convert flow_rule into flow key form used in xdp_flow bpf prog and
pass it.
Signed-off-by: Toshiaki Makita <toshiaki.makita1@...il.com>
---
net/xdp_flow/xdp_flow_kern_mod.c | 331 ++++++++++++++++++++++++++++++++++++++-
1 file changed, 329 insertions(+), 2 deletions(-)
diff --git a/net/xdp_flow/xdp_flow_kern_mod.c b/net/xdp_flow/xdp_flow_kern_mod.c
index 2c80590..e70a86a 100644
--- a/net/xdp_flow/xdp_flow_kern_mod.c
+++ b/net/xdp_flow/xdp_flow_kern_mod.c
@@ -3,8 +3,10 @@
#include <linux/module.h>
#include <linux/umh.h>
#include <linux/sched/signal.h>
+#include <linux/etherdevice.h>
#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
#include <linux/filter.h>
#include "xdp_flow.h"
#include "msgfmt.h"
@@ -24,9 +26,261 @@ struct xdp_flow_prog {
static struct rhashtable progs;
+struct xdp_flow_rule {
+ struct rhash_head ht_node;
+ unsigned long cookie;
+ struct xdp_flow_key key;
+ struct xdp_flow_key mask;
+};
+
+static const struct rhashtable_params rules_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct xdp_flow_rule, cookie),
+ .head_offset = offsetof(struct xdp_flow_rule, ht_node),
+ .automatic_shrinking = true,
+};
+
+static struct rhashtable rules;
+
extern char xdp_flow_umh_start;
extern char xdp_flow_umh_end;
+static int xdp_flow_parse_actions(struct xdp_flow_actions *actions,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry *act;
+ int i;
+
+ if (!flow_action_has_entries(flow_action))
+ return 0;
+
+ if (flow_action->num_entries > MAX_XDP_FLOW_ACTIONS)
+ return -ENOBUFS;
+
+ flow_action_for_each(i, act, flow_action) {
+ struct xdp_flow_action *action = &actions->actions[i];
+
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ action->id = XDP_FLOW_ACTION_ACCEPT;
+ break;
+ case FLOW_ACTION_DROP:
+ action->id = XDP_FLOW_ACTION_DROP;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_MANGLE:
+ case FLOW_ACTION_MANGLE:
+ case FLOW_ACTION_CSUM:
+ /* TODO: implement these */
+ /* fall through */
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+ }
+ actions->num_actions = flow_action->num_entries;
+
+ return 0;
+}
+
+static int xdp_flow_parse_ports(struct xdp_flow_key *key,
+ struct xdp_flow_key *mask,
+ struct flow_cls_offload *f, u8 ip_proto)
+{
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_ports match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Only UDP and TCP keys are supported");
+ return -EINVAL;
+ }
+
+ flow_rule_match_ports(rule, &match);
+
+ key->l4port.src = match.key->src;
+ mask->l4port.src = match.mask->src;
+ key->l4port.dst = match.key->dst;
+ mask->l4port.dst = match.mask->dst;
+
+ return 0;
+}
+
+static int xdp_flow_parse_tcp(struct xdp_flow_key *key,
+ struct xdp_flow_key *mask,
+ struct flow_cls_offload *f, u8 ip_proto)
+{
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_tcp match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "TCP keys supported only for TCP");
+ return -EINVAL;
+ }
+
+ flow_rule_match_tcp(rule, &match);
+
+ key->tcp.flags = match.key->flags;
+ mask->tcp.flags = match.mask->flags;
+
+ return 0;
+}
+
+static int xdp_flow_parse_ip(struct xdp_flow_key *key,
+ struct xdp_flow_key *mask,
+ struct flow_cls_offload *f, __be16 n_proto)
+{
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_ip match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
+ return 0;
+
+ if (n_proto != htons(ETH_P_IP) && n_proto != htons(ETH_P_IPV6)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "IP keys supported only for IPv4/6");
+ return -EINVAL;
+ }
+
+ flow_rule_match_ip(rule, &match);
+
+ key->ip.ttl = match.key->ttl;
+ mask->ip.ttl = match.mask->ttl;
+ key->ip.tos = match.key->tos;
+ mask->ip.tos = match.mask->tos;
+
+ return 0;
+}
+
+static int xdp_flow_parse(struct xdp_flow_key *key, struct xdp_flow_key *mask,
+ struct xdp_flow_actions *actions,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+ __be16 n_proto = 0, n_proto_mask = 0;
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+ int err;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_TCP) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ n_proto = match.key->n_proto;
+ n_proto_mask = match.mask->n_proto;
+ if (n_proto == htons(ETH_P_ALL)) {
+ n_proto = 0;
+ n_proto_mask = 0;
+ }
+
+ key->eth.type = n_proto;
+ mask->eth.type = n_proto_mask;
+
+ if (match.mask->ip_proto) {
+ ip_proto = match.key->ip_proto;
+ key->ip.proto = ip_proto;
+ mask->ip.proto = match.mask->ip_proto;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ ether_addr_copy(key->eth.dst, match.key->dst);
+ ether_addr_copy(mask->eth.dst, match.mask->dst);
+ ether_addr_copy(key->eth.src, match.key->src);
+ ether_addr_copy(mask->eth.src, match.mask->src);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ key->vlan.tpid = match.key->vlan_tpid;
+ mask->vlan.tpid = match.mask->vlan_tpid;
+ key->vlan.tci = htons(match.key->vlan_id |
+ (match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT));
+ mask->vlan.tci = htons(match.mask->vlan_id |
+ (match.mask->vlan_priority <<
+ VLAN_PRIO_SHIFT));
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+
+ key->ipv4.src = match.key->src;
+ mask->ipv4.src = match.mask->src;
+ key->ipv4.dst = match.key->dst;
+ mask->ipv4.dst = match.mask->dst;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+
+ key->ipv6.src = match.key->src;
+ mask->ipv6.src = match.mask->src;
+ key->ipv6.dst = match.key->dst;
+ mask->ipv6.dst = match.mask->dst;
+ }
+
+ err = xdp_flow_parse_ports(key, mask, f, ip_proto);
+ if (err)
+ return err;
+ err = xdp_flow_parse_tcp(key, mask, f, ip_proto);
+ if (err)
+ return err;
+
+ err = xdp_flow_parse_ip(key, mask, f, n_proto);
+ if (err)
+ return err;
+
+ // TODO: encapsulation related tasks
+
+ return xdp_flow_parse_actions(actions, &rule->action,
+ f->common.extack);
+}
+
static void shutdown_umh(void)
{
struct task_struct *tsk;
@@ -77,12 +331,78 @@ static int transact_umh(struct mbox_request *req, u32 *id)
static int xdp_flow_replace(struct net_device *dev, struct flow_cls_offload *f)
{
- return -EOPNOTSUPP;
+ struct xdp_flow_rule *rule;
+ struct mbox_request *req;
+ int err;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ req->flow.priority = f->common.prio >> 16;
+ err = xdp_flow_parse(&req->flow.key, &req->flow.mask,
+ &req->flow.actions, f);
+ if (err)
+ goto err_rule;
+
+ rule->cookie = f->cookie;
+ rule->key = req->flow.key;
+ rule->mask = req->flow.mask;
+ err = rhashtable_insert_fast(&rules, &rule->ht_node, rules_params);
+ if (err)
+ goto err_rule;
+
+ req->cmd = XDP_FLOW_CMD_REPLACE;
+ req->ifindex = dev->ifindex;
+ err = transact_umh(req, NULL);
+ if (err)
+ goto err_rht;
+out:
+ kfree(req);
+
+ return err;
+err_rht:
+ rhashtable_remove_fast(&rules, &rule->ht_node, rules_params);
+err_rule:
+ kfree(rule);
+ goto out;
}
static int xdp_flow_destroy(struct net_device *dev, struct flow_cls_offload *f)
{
- return -EOPNOTSUPP;
+ struct xdp_flow_rule *rule;
+ struct mbox_request *req;
+ int err;
+
+ rule = rhashtable_lookup_fast(&rules, &f->cookie, rules_params);
+ if (!rule)
+ return 0;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->flow.priority = f->common.prio >> 16;
+ req->flow.key = rule->key;
+ req->flow.mask = rule->mask;
+ req->cmd = XDP_FLOW_CMD_DELETE;
+ req->ifindex = dev->ifindex;
+ err = transact_umh(req, NULL);
+
+ kfree(req);
+
+ if (!err) {
+ rhashtable_remove_fast(&rules, &rule->ht_node, rules_params);
+ kfree(rule);
+ }
+
+ return err;
}
static int xdp_flow_setup_flower(struct net_device *dev,
@@ -308,6 +628,10 @@ static int __init load_umh(void)
if (err)
return err;
+ err = rhashtable_init(&rules, &rules_params);
+ if (err)
+ goto err_progs;
+
mutex_lock(&xdp_flow_ops.lock);
if (!xdp_flow_ops.stop) {
err = -EFAULT;
@@ -327,6 +651,8 @@ static int __init load_umh(void)
return 0;
err:
mutex_unlock(&xdp_flow_ops.lock);
+ rhashtable_destroy(&rules);
+err_progs:
rhashtable_destroy(&progs);
return err;
}
@@ -340,6 +666,7 @@ static void __exit fini_umh(void)
xdp_flow_ops.setup = NULL;
xdp_flow_ops.setup_cb = NULL;
mutex_unlock(&xdp_flow_ops.lock);
+ rhashtable_destroy(&rules);
rhashtable_destroy(&progs);
}
module_init(load_umh);
--
1.8.3.1
Powered by blists - more mailing lists