lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 16 Feb 2022 14:29:26 +0100
From:   Tobias Waldekranz <tobias@...dekranz.com>
To:     davem@...emloft.net, kuba@...nel.org
Cc:     Andrew Lunn <andrew@...n.ch>,
        Vivien Didelot <vivien.didelot@...il.com>,
        Florian Fainelli <f.fainelli@...il.com>,
        Vladimir Oltean <olteanv@...il.com>,
        Jiri Pirko <jiri@...nulli.us>,
        Ivan Vecera <ivecera@...hat.com>,
        Roopa Prabhu <roopa@...dia.com>,
        Nikolay Aleksandrov <nikolay@...dia.com>,
        Russell King <linux@...linux.org.uk>, netdev@...r.kernel.org,
        linux-kernel@...r.kernel.org, bridge@...ts.linux-foundation.org
Subject: [RFC net-next 1/9] net: bridge: vlan: Introduce multiple spanning trees (MST)

Before this commit, the bridge was able to manage the forwarding state
of a port on either a global or per-VLAN basis. I.e. either 1:N or
N:N. There are two issues with this:

1. In order to support MSTP (802.1Q-2018 13.5), the controlling entity
   expects the bridge to be able to group multiple VLANs to operate on
   the same tree (MST). I.e. an M:N mapping, where M <= N.

2. Some hardware (e.g. mv88e6xxx) has a smaller pool of spanning tree
   groups than VLANs. I.e. the full set of 4k VLANs can be configured,
   but each VLAN must be mapped to one of only 64 spanning trees.

While somewhat less efficient (and non-atomic), (1) can be worked
around in software by iterating over all affected VLANs when changing
the state of a tree to make sure that they are all in
sync. Unfortunately, (2) means that offloading is not possible in this
architecture.

Therefore, add a level of indirection in the per-VLAN STP state. By
default, each new VLAN will be assigned to a separate MST. I.e. there
are no functional changes introduced by this commit.

Upcoming commits will then extend the VLAN DB configuration to allow
arbitrary M:N mappings.

Signed-off-by: Tobias Waldekranz <tobias@...dekranz.com>
---
 include/linux/if_bridge.h    |   6 ++
 net/bridge/br_private.h      |  41 +++++--
 net/bridge/br_vlan.c         | 200 +++++++++++++++++++++++++++++++++--
 net/bridge/br_vlan_options.c |   9 +-
 4 files changed, 234 insertions(+), 22 deletions(-)

diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 509e18c7e740..a3b0e95c3047 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -118,6 +118,7 @@ int br_vlan_get_info(const struct net_device *dev, u16 vid,
 		     struct bridge_vlan_info *p_vinfo);
 int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
 			 struct bridge_vlan_info *p_vinfo);
+int br_vlan_get_mstid(const struct net_device *dev, u16 vid, u16 *mstid);
 #else
 static inline bool br_vlan_enabled(const struct net_device *dev)
 {
@@ -150,6 +151,11 @@ static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
 {
 	return -EINVAL;
 }
+static inline int br_vlan_get_mstid(const struct net_device *dev, u16 vid,
+				    u16 *mstid)
+{
+	return -EINVAL;
+}
 #endif
 
 #if IS_ENABLED(CONFIG_BRIDGE)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2661dda1a92b..7781e7a4449b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -153,6 +153,14 @@ struct br_tunnel_info {
 	struct metadata_dst __rcu	*tunnel_dst;
 };
 
+struct br_vlan_mst {
+	refcount_t refcnt;
+	u16 id;
+	u8 state;
+
+	struct rcu_head rcu;
+};
+
 /* private vlan flags */
 enum {
 	BR_VLFLAG_PER_PORT_STATS = BIT(0),
@@ -168,7 +176,8 @@ enum {
  * @vid: VLAN id
  * @flags: bridge vlan flags
  * @priv_flags: private (in-kernel) bridge vlan flags
- * @state: STP state (e.g. blocking, learning, forwarding)
+ * @mst: the port's STP state (e.g. blocking, learning, forwarding) in the MST
+ *       associated with this VLAN
  * @stats: per-cpu VLAN statistics
  * @br: if MASTER flag set, this points to a bridge struct
  * @port: if MASTER flag unset, this points to a port struct
@@ -192,7 +201,7 @@ struct net_bridge_vlan {
 	u16				vid;
 	u16				flags;
 	u16				priv_flags;
-	u8				state;
+	struct br_vlan_mst		__rcu *mst;
 	struct pcpu_sw_netstats __percpu *stats;
 	union {
 		struct net_bridge	*br;
@@ -215,6 +224,20 @@ struct net_bridge_vlan {
 	struct rcu_head			rcu;
 };
 
+static inline u8 br_vlan_get_state_rcu(const struct net_bridge_vlan *v)
+{
+	const struct br_vlan_mst *mst = rcu_dereference(v->mst);
+
+	return mst->state;
+}
+
+static inline u8 br_vlan_get_state_rtnl(const struct net_bridge_vlan *v)
+{
+	const struct br_vlan_mst *mst = rtnl_dereference(v->mst);
+
+	return mst->state;
+}
+
 /**
  * struct net_bridge_vlan_group
  *
@@ -1179,7 +1202,7 @@ br_multicast_port_ctx_state_disabled(const struct net_bridge_mcast_port *pmctx)
 	return pmctx->port->state == BR_STATE_DISABLED ||
 	       (br_multicast_port_ctx_is_vlan(pmctx) &&
 		(br_multicast_port_ctx_vlan_disabled(pmctx) ||
-		 pmctx->vlan->state == BR_STATE_DISABLED));
+		 br_vlan_get_state_rcu(pmctx->vlan) == BR_STATE_DISABLED));
 }
 
 static inline bool
@@ -1188,7 +1211,7 @@ br_multicast_port_ctx_state_stopped(const struct net_bridge_mcast_port *pmctx)
 	return br_multicast_port_ctx_state_disabled(pmctx) ||
 	       pmctx->port->state == BR_STATE_BLOCKING ||
 	       (br_multicast_port_ctx_is_vlan(pmctx) &&
-		pmctx->vlan->state == BR_STATE_BLOCKING);
+		br_vlan_get_state_rcu(pmctx->vlan) == BR_STATE_BLOCKING);
 }
 
 static inline bool
@@ -1729,15 +1752,11 @@ bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
 bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
 			      const struct net_bridge_vlan *v_opts);
 
-/* vlan state manipulation helpers using *_ONCE to annotate lock-free access */
-static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v)
-{
-	return READ_ONCE(v->state);
-}
-
 static inline void br_vlan_set_state(struct net_bridge_vlan *v, u8 state)
 {
-	WRITE_ONCE(v->state, state);
+	struct br_vlan_mst *mst = rtnl_dereference(v->mst);
+
+	mst->state = state;
 }
 
 static inline u8 br_vlan_get_pvid_state(const struct net_bridge_vlan_group *vg)
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 6315e43a7a3e..b0383ec6cc91 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -34,6 +34,187 @@ static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
 	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
 }
 
+static void br_vlan_mst_rcu_free(struct rcu_head *rcu)
+{
+	struct br_vlan_mst *mst = container_of(rcu, struct br_vlan_mst, rcu);
+
+	kfree(mst);
+}
+
+static void br_vlan_mst_put(struct net_bridge_vlan *v)
+{
+	struct br_vlan_mst *mst = rtnl_dereference(v->mst);
+
+	if (refcount_dec_and_test(&mst->refcnt))
+		call_rcu(&mst->rcu, br_vlan_mst_rcu_free);
+}
+
+static struct br_vlan_mst *br_vlan_mst_new(u16 id)
+{
+	struct br_vlan_mst *mst;
+
+	mst = kzalloc(sizeof(*mst), GFP_KERNEL);
+	if (!mst)
+		return NULL;
+
+	refcount_set(&mst->refcnt, 1);
+	mst->id = id;
+	mst->state = BR_STATE_FORWARDING;
+	return mst;
+}
+
+static int br_vlan_mstid_get_free(struct net_bridge *br)
+{
+	const struct net_bridge_vlan *v;
+	struct rhashtable_iter iter;
+	struct br_vlan_mst *mst;
+	unsigned long *busy;
+	int err = 0;
+	u16 id;
+
+	busy = bitmap_zalloc(VLAN_N_VID, GFP_KERNEL);
+	if (!busy)
+		return -ENOMEM;
+
+	/* MSTID 0 is reserved for the CIST */
+	set_bit(0, busy);
+
+	rhashtable_walk_enter(&br_vlan_group(br)->vlan_hash, &iter);
+	rhashtable_walk_start(&iter);
+
+	while ((v = rhashtable_walk_next(&iter))) {
+		if (IS_ERR(v)) {
+			err = PTR_ERR(v);
+			goto out_free;
+		}
+
+		mst = rtnl_dereference(v->mst);
+		set_bit(mst->id, busy);
+	}
+
+	rhashtable_walk_stop(&iter);
+
+	id = find_first_zero_bit(busy, VLAN_N_VID);
+	if (id >= VLAN_N_VID)
+		err = -ENOSPC;
+
+out_free:
+	kfree(busy);
+	return err ? : id;
+}
+
+u16 br_vlan_mstid_get(const struct net_bridge_vlan *v)
+{
+	const struct net_bridge_vlan *masterv;
+	const struct br_vlan_mst *mst;
+	const struct net_bridge *br;
+
+	if (br_vlan_is_master(v))
+		br = v->br;
+	else
+		br = v->port->br;
+
+	masterv = br_vlan_lookup(&br_vlan_group(br)->vlan_hash, v->vid);
+
+	mst = rtnl_dereference(masterv->mst);
+
+	return mst->id;
+}
+
+int br_vlan_get_mstid(const struct net_device *dev, u16 vid, u16 *mstid)
+{
+	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_vlan *v;
+
+	v = br_vlan_lookup(&br_vlan_group(br)->vlan_hash, vid);
+	if (!v)
+		return -ENOENT;
+
+	*mstid = br_vlan_mstid_get(v);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(br_vlan_get_mstid);
+
+static struct br_vlan_mst *br_vlan_group_mst_get(struct net_bridge_vlan_group *vg, u16 mstid)
+{
+	struct net_bridge_vlan *v;
+	struct br_vlan_mst *mst;
+
+	list_for_each_entry(v, &vg->vlan_list, vlist) {
+		mst = rtnl_dereference(v->mst);
+		if (mst->id == mstid) {
+			refcount_inc(&mst->refcnt);
+			return mst;
+		}
+	}
+
+	return NULL;
+}
+
+static int br_vlan_mst_migrate(struct net_bridge_vlan *v, u16 mstid)
+{
+	struct net_bridge_vlan_group *vg;
+	struct br_vlan_mst *mst;
+
+	if (br_vlan_is_master(v))
+		vg = br_vlan_group(v->br);
+	else
+		vg = nbp_vlan_group(v->port);
+
+	mst = br_vlan_group_mst_get(vg, mstid);
+	if (!mst) {
+		mst = br_vlan_mst_new(mstid);
+		if (!mst)
+			return -ENOMEM;
+	}
+
+	if (rtnl_dereference(v->mst))
+		br_vlan_mst_put(v);
+
+	rcu_assign_pointer(v->mst, mst);
+	return 0;
+}
+
+static int br_vlan_mst_init_master(struct net_bridge_vlan *v)
+{
+	struct net_bridge *br = v->br;
+	struct br_vlan_mst *mst;
+	int mstid;
+
+	/* The bridge VLAN is always added first, either as context or
+	 * as a proper entry. Since the bridge default is a 1:1 map
+	 * from VID to MST, we always need to allocate a new ID in
+	 * this case.
+	 */
+	mstid = br_vlan_mstid_get_free(br);
+	if (mstid < 0)
+		return mstid;
+
+	mst = br_vlan_mst_new(mstid);
+	if (!mst)
+		return -ENOMEM;
+
+	rcu_assign_pointer(v->mst, mst);
+	return 0;
+}
+
+static int br_vlan_mst_init_port(struct net_bridge_vlan *v)
+{
+	u16 mstid;
+
+	mstid = br_vlan_mstid_get(v);
+
+	return br_vlan_mst_migrate(v, mstid);
+}
+
+static int br_vlan_mst_init(struct net_bridge_vlan *v)
+{
+	if (br_vlan_is_master(v))
+		return br_vlan_mst_init_master(v);
+	else
+		return br_vlan_mst_init_port(v);
+}
+
 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
 			    const struct net_bridge_vlan *v)
 {
@@ -41,7 +222,7 @@ static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
 		return false;
 
 	smp_wmb();
-	br_vlan_set_pvid_state(vg, v->state);
+	br_vlan_set_pvid_state(vg, br_vlan_get_state_rtnl(v));
 	vg->pvid = v->vid;
 
 	return true;
@@ -301,13 +482,14 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
 		vg->num_vlans++;
 	}
 
-	/* set the state before publishing */
-	v->state = BR_STATE_FORWARDING;
+	err = br_vlan_mst_init(v);
+	if (err)
+		goto out_fdb_insert;
 
 	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
 					    br_vlan_rht_params);
 	if (err)
-		goto out_fdb_insert;
+		goto out_mst_init;
 
 	__vlan_add_list(v);
 	__vlan_add_flags(v, flags);
@@ -318,6 +500,9 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
 out:
 	return err;
 
+out_mst_init:
+	br_vlan_mst_put(v);
+
 out_fdb_insert:
 	if (br_vlan_should_use(v)) {
 		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
@@ -385,6 +570,7 @@ static int __vlan_del(struct net_bridge_vlan *v)
 		call_rcu(&v->rcu, nbp_vlan_rcu_free);
 	}
 
+	br_vlan_mst_put(v);
 	br_vlan_put_master(masterv);
 out:
 	return err;
@@ -578,7 +764,7 @@ static bool __allowed_ingress(const struct net_bridge *br,
 		goto drop;
 
 	if (*state == BR_STATE_FORWARDING) {
-		*state = br_vlan_get_state(v);
+		*state = br_vlan_get_state_rcu(v);
 		if (!br_vlan_state_allowed(*state, true))
 			goto drop;
 	}
@@ -631,7 +817,7 @@ bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 	br_vlan_get_tag(skb, &vid);
 	v = br_vlan_find(vg, vid);
 	if (v && br_vlan_should_use(v) &&
-	    br_vlan_state_allowed(br_vlan_get_state(v), false))
+	    br_vlan_state_allowed(br_vlan_get_state_rcu(v), false))
 		return true;
 
 	return false;
@@ -665,7 +851,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 	}
 
 	v = br_vlan_find(vg, *vid);
-	if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
+	if (v && br_vlan_state_allowed(br_vlan_get_state_rcu(v), true))
 		return true;
 
 	return false;
diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c
index a6382973b3e7..0b1099709d4b 100644
--- a/net/bridge/br_vlan_options.c
+++ b/net/bridge/br_vlan_options.c
@@ -43,14 +43,14 @@ bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr,
 	u8 range_mc_rtr = br_vlan_multicast_router(range_end);
 	u8 curr_mc_rtr = br_vlan_multicast_router(v_curr);
 
-	return v_curr->state == range_end->state &&
+	return br_vlan_get_state_rtnl(v_curr) == br_vlan_get_state_rtnl(range_end) &&
 	       __vlan_tun_can_enter_range(v_curr, range_end) &&
 	       curr_mc_rtr == range_mc_rtr;
 }
 
 bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v)
 {
-	if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state(v)) ||
+	if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state_rtnl(v)) ||
 	    !__vlan_tun_put(skb, v))
 		return false;
 
@@ -99,7 +99,7 @@ static int br_vlan_modify_state(struct net_bridge_vlan_group *vg,
 		return -EBUSY;
 	}
 
-	if (v->state == state)
+	if (br_vlan_get_state_rtnl(v) == state)
 		return 0;
 
 	if (v->vid == br_get_pvid(vg))
@@ -294,7 +294,8 @@ bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
 	       ((v_curr->priv_flags ^ r_end->priv_flags) &
 		BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 &&
 		br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx,
-					       &r_end->br_mcast_ctx);
+					       &r_end->br_mcast_ctx) &&
+		br_vlan_mstid_get(v_curr) == br_vlan_mstid_get(r_end);
 }
 
 bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ