[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220409105857.803667-5-razor@blackwall.org>
Date: Sat, 9 Apr 2022 13:58:55 +0300
From: Nikolay Aleksandrov <razor@...ckwall.org>
To: netdev@...r.kernel.org
Cc: roopa@...dia.com, kuba@...nel.org, davem@...emloft.net,
bridge@...ts.linux-foundation.org,
Nikolay Aleksandrov <razor@...ckwall.org>
Subject: [PATCH net-next 4/6] net: bridge: fdb: add support for flush filtering based on ndm flags and state
Add support for fdb flush filtering based on ndm flags and state. The
new attributes allow users to specify a mask and value which are mapped
to bridge-specific flags. NTF_USE is used to represent added_by_user
flag since it sets it on fdb add and we don't have a 1:1 mapping for it.
Signed-off-by: Nikolay Aleksandrov <razor@...ckwall.org>
---
include/uapi/linux/if_bridge.h | 4 +++
net/bridge/br_fdb.c | 55 ++++++++++++++++++++++++++++++++++
2 files changed, 59 insertions(+)
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 2f3799cf14b2..4638d7e39f2a 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -815,6 +815,10 @@ enum {
/* embedded in BRIDGE_FLUSH_FDB */
enum {
FDB_FLUSH_UNSPEC,
+ FDB_FLUSH_NDM_STATE,
+ FDB_FLUSH_NDM_STATE_MASK,
+ FDB_FLUSH_NDM_FLAGS,
+ FDB_FLUSH_NDM_FLAGS_MASK,
__FDB_FLUSH_MAX
};
#define FDB_FLUSH_MAX (__FDB_FLUSH_MAX - 1)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 62f694a739e1..340a2ace1d5e 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -594,8 +594,40 @@ void br_fdb_flush(struct net_bridge *br,
rcu_read_unlock();
}
+static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state)
+{
+ unsigned long flags = 0;
+
+ if (ndm_state & NUD_PERMANENT)
+ __set_bit(BR_FDB_LOCAL, &flags);
+ if (ndm_state & NUD_NOARP)
+ __set_bit(BR_FDB_STATIC, &flags);
+
+ return flags;
+}
+
+static unsigned long __ndm_flags_to_fdb_flags(u16 ndm_flags)
+{
+ unsigned long flags = 0;
+
+ if (ndm_flags & NTF_USE)
+ __set_bit(BR_FDB_ADDED_BY_USER, &flags);
+ if (ndm_flags & NTF_EXT_LEARNED)
+ __set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags);
+ if (ndm_flags & NTF_OFFLOADED)
+ __set_bit(BR_FDB_OFFLOADED, &flags);
+ if (ndm_flags & NTF_STICKY)
+ __set_bit(BR_FDB_STICKY, &flags);
+
+ return flags;
+}
+
static const struct nla_policy br_fdb_flush_policy[FDB_FLUSH_MAX + 1] = {
[FDB_FLUSH_UNSPEC] = { .type = NLA_REJECT },
+ [FDB_FLUSH_NDM_STATE] = { .type = NLA_U16 },
+ [FDB_FLUSH_NDM_FLAGS] = { .type = NLA_U16 },
+ [FDB_FLUSH_NDM_STATE_MASK] = { .type = NLA_U16 },
+ [FDB_FLUSH_NDM_FLAGS_MASK] = { .type = NLA_U16 },
};
int br_fdb_flush_nlattr(struct net_bridge *br, struct nlattr *fdb_flush_attr,
@@ -610,6 +642,29 @@ int br_fdb_flush_nlattr(struct net_bridge *br, struct nlattr *fdb_flush_attr,
if (err)
return err;
+ if (fdb_flush_tb[FDB_FLUSH_NDM_STATE]) {
+ u16 ndm_state = nla_get_u16(fdb_flush_tb[FDB_FLUSH_NDM_STATE]);
+
+ desc.flags |= __ndm_state_to_fdb_flags(ndm_state);
+ }
+ if (fdb_flush_tb[FDB_FLUSH_NDM_STATE_MASK]) {
+ u16 ndm_state_mask;
+
+ ndm_state_mask = nla_get_u16(fdb_flush_tb[FDB_FLUSH_NDM_STATE_MASK]);
+ desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask);
+ }
+ if (fdb_flush_tb[FDB_FLUSH_NDM_FLAGS]) {
+ u16 ndm_flags = nla_get_u16(fdb_flush_tb[FDB_FLUSH_NDM_FLAGS]);
+
+ desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags);
+ }
+ if (fdb_flush_tb[FDB_FLUSH_NDM_FLAGS_MASK]) {
+ u16 ndm_flags_mask;
+
+ ndm_flags_mask = nla_get_u16(fdb_flush_tb[FDB_FLUSH_NDM_FLAGS_MASK]);
+ desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask);
+ }
+
br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n",
desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask);
--
2.35.1
Powered by blists - more mailing lists