[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181002002851.5002-24-dsahern@kernel.org>
Date: Mon, 1 Oct 2018 17:28:49 -0700
From: David Ahern <dsahern@...nel.org>
To: netdev@...r.kernel.org, davem@...emloft.net
Cc: christian@...uner.io, jbenc@...hat.com, stephen@...workplumber.org,
David Ahern <dsahern@...il.com>
Subject: [PATCH RFC v2 net-next 23/25] net/mpls: Plumb support for filtering route dumps
From: David Ahern <dsahern@...il.com>
Implement kernel side filtering of routes by egress device index and
protocol. MPLS uses only a single table and route type.
Signed-off-by: David Ahern <dsahern@...il.com>
---
net/mpls/af_mpls.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 54 insertions(+), 1 deletion(-)
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index f94d1db63eb5..4dd8a2a026e7 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2031,6 +2031,28 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
return -EMSGSIZE;
}
+static bool mpls_rt_uses_dev(struct mpls_route *rt,
+ const struct net_device *dev)
+{
+ struct net_device *nh_dev;
+
+ if (rt->rt_nhn == 1) {
+ struct mpls_nh *nh = rt->rt_nh;
+
+ nh_dev = rtnl_dereference(nh->nh_dev);
+ if (dev == nh_dev)
+ return true;
+ } else {
+ for_nexthops(rt) {
+ nh_dev = rtnl_dereference(nh->nh_dev);
+ if (nh_dev == dev)
+ return true;
+ } endfor_nexthops(rt);
+ }
+
+ return false;
+}
+
static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
@@ -2039,6 +2061,7 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
struct fib_dump_filter filter = {};
size_t platform_labels;
unsigned int index;
+ int err;
ASSERT_RTNL();
@@ -2047,6 +2070,15 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
if (err)
return err;
+
+ /* for MPLS, there is only 1 table with fixed type, scope
+ * tos and flags. If any of these are set in the filter then
+ * return nothing
+ */
+ if ((filter.table_id && filter.table_id != RT_TABLE_MAIN) ||
+ (filter.rt_type && filter.rt_type != RTN_UNICAST) ||
+ filter.scope || filter.tos || filter.flags)
+ return 0;
}
index = cb->args[0];
@@ -2055,20 +2087,41 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
platform_label = rtnl_dereference(net->mpls.platform_label);
platform_labels = net->mpls.platform_labels;
+
+ rcu_read_lock();
+
+ if (filter.ifindex) {
+ filter.dev = dev_get_by_index_rcu(net, filter.ifindex);
+ if (!filter.dev) {
+ err = -ENODEV;
+ goto out_err;
+ }
+ }
+
for (; index < platform_labels; index++) {
struct mpls_route *rt;
+
rt = rtnl_dereference(platform_label[index]);
if (!rt)
continue;
+ if (filter.protocol && rt->rt_protocol != filter.protocol)
+ continue;
+
+ if (filter.dev && !mpls_rt_uses_dev(rt, filter.dev))
+ continue;
+
if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
index, rt, NLM_F_MULTI) < 0)
break;
}
cb->args[0] = index;
+ err = skb->len;
- return skb->len;
+out_err:
+ rcu_read_unlock();
+ return err;
}
static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
--
2.11.0
Powered by blists - more mailing lists