[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220220140405.1646839-13-roopa@nvidia.com>
Date: Sun, 20 Feb 2022 14:04:05 +0000
From: Roopa Prabhu <roopa@...dia.com>
To: <davem@...emloft.net>, <kuba@...nel.org>
CC: <netdev@...r.kernel.org>, <stephen@...workplumber.org>,
<nikolay@...ulusnetworks.com>, <idosch@...dia.com>,
<dsahern@...il.com>
Subject: [PATCH net-next 12/12] drivers: vxlan: vnifilter: add support for stats dumping
From: Nikolay Aleksandrov <nikolay@...dia.com>
Add support for VXLAN vni filter entries' stats dumping.
Signed-off-by: Nikolay Aleksandrov <nikolay@...dia.com>
---
drivers/net/vxlan/vxlan_vnifilter.c | 55 ++++++++++++++++++++++++++---
include/uapi/linux/if_link.h | 30 +++++++++++++++-
2 files changed, 79 insertions(+), 6 deletions(-)
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index 935f3007f348..861f7195fe58 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -186,9 +186,48 @@ static size_t vxlan_vnifilter_entry_nlmsg_size(void)
+ nla_total_size(sizeof(struct in6_addr));/* VXLAN_VNIFILTER_ENTRY_GROUP{6} */
}
+static int __vnifilter_entry_fill_stats(struct sk_buff *skb,
+ const struct vxlan_vni_node *vbegin)
+{
+ struct vxlan_vni_stats vstats;
+ struct nlattr *vstats_attr;
+
+ vstats_attr = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY_STATS);
+ if (!vstats_attr)
+ goto out_stats_err;
+
+ vxlan_vnifilter_stats_get(vbegin, &vstats);
+ if (nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_BYTES,
+ vstats.rx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_PKTS,
+ vstats.rx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_DROPS,
+ vstats.rx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_ERRORS,
+ vstats.rx_errors, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_BYTES,
+ vstats.tx_bytes, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_PKTS,
+ vstats.tx_packets, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_DROPS,
+ vstats.tx_drops, VNIFILTER_ENTRY_STATS_PAD) ||
+ nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_ERRORS,
+ vstats.tx_errors, VNIFILTER_ENTRY_STATS_PAD))
+ goto out_stats_err;
+
+ nla_nest_end(skb, vstats_attr);
+
+ return 0;
+
+out_stats_err:
+ nla_nest_cancel(skb, vstats_attr);
+ return -EMSGSIZE;
+}
+
static bool vxlan_fill_vni_filter_entry(struct sk_buff *skb,
struct vxlan_vni_node *vbegin,
- struct vxlan_vni_node *vend)
+ struct vxlan_vni_node *vend,
+ bool fill_stats)
{
struct nlattr *ventry;
u32 vs = be32_to_cpu(vbegin->vni);
@@ -221,6 +260,9 @@ static bool vxlan_fill_vni_filter_entry(struct sk_buff *skb,
}
}
+ if (fill_stats && __vnifilter_entry_fill_stats(skb, vbegin))
+ goto out_err;
+
nla_nest_end(skb, ventry);
return true;
@@ -253,7 +295,7 @@ static void vxlan_vnifilter_notify(const struct vxlan_dev *vxlan,
tmsg->family = AF_BRIDGE;
tmsg->ifindex = vxlan->dev->ifindex;
- if (!vxlan_fill_vni_filter_entry(skb, vninode, vninode))
+ if (!vxlan_fill_vni_filter_entry(skb, vninode, vninode, false))
goto out_err;
nlmsg_end(skb, nlh);
@@ -277,6 +319,7 @@ static int vxlan_vnifilter_dump_dev(const struct net_device *dev,
int idx = 0, s_idx = cb->args[1];
struct vxlan_vni_group *vg;
struct nlmsghdr *nlh;
+ bool dump_stats;
int err = 0;
if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
@@ -288,6 +331,7 @@ static int vxlan_vnifilter_dump_dev(const struct net_device *dev,
return 0;
tmsg = nlmsg_data(cb->nlh);
+ dump_stats = !!(tmsg->flags & TUNNEL_MSG_FLAG_STATS);
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
RTM_NEWTUNNEL, sizeof(*new_tmsg), NLM_F_MULTI);
@@ -308,11 +352,12 @@ static int vxlan_vnifilter_dump_dev(const struct net_device *dev,
vend = v;
continue;
}
- if (vnirange(vend, v) == 1 &&
+ if (!dump_stats && vnirange(vend, v) == 1 &&
vxlan_addr_equal(&v->remote_ip, &vend->remote_ip)) {
goto update_end;
} else {
- if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend)) {
+ if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend,
+ dump_stats)) {
err = -EMSGSIZE;
break;
}
@@ -324,7 +369,7 @@ static int vxlan_vnifilter_dump_dev(const struct net_device *dev,
}
if (!err && vbegin) {
- if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend))
+ if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend, dump_stats))
err = -EMSGSIZE;
}
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index eb046a82188d..1a362c2a8e4b 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -715,17 +715,37 @@ enum ipvlan_mode {
/* Tunnel RTM header */
struct tunnel_msg {
__u8 family;
- __u8 reserved1;
+ __u8 flags;
__u16 reserved2;
__u32 ifindex;
};
+/* include statistics in the dump */
+#define TUNNEL_MSG_FLAG_STATS 0x01
+
+/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */
+enum {
+ VNIFILTER_ENTRY_STATS_UNSPEC,
+ VNIFILTER_ENTRY_STATS_RX_BYTES,
+ VNIFILTER_ENTRY_STATS_RX_PKTS,
+ VNIFILTER_ENTRY_STATS_RX_DROPS,
+ VNIFILTER_ENTRY_STATS_RX_ERRORS,
+ VNIFILTER_ENTRY_STATS_TX_BYTES,
+ VNIFILTER_ENTRY_STATS_TX_PKTS,
+ VNIFILTER_ENTRY_STATS_TX_DROPS,
+ VNIFILTER_ENTRY_STATS_TX_ERRORS,
+ VNIFILTER_ENTRY_STATS_PAD,
+ __VNIFILTER_ENTRY_STATS_MAX
+};
+#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1)
+
enum {
VXLAN_VNIFILTER_ENTRY_UNSPEC,
VXLAN_VNIFILTER_ENTRY_START,
VXLAN_VNIFILTER_ENTRY_END,
VXLAN_VNIFILTER_ENTRY_GROUP,
VXLAN_VNIFILTER_ENTRY_GROUP6,
+ VXLAN_VNIFILTER_ENTRY_STATS,
__VXLAN_VNIFILTER_ENTRY_MAX
};
#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1)
@@ -737,6 +757,14 @@ enum {
};
#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1)
+/* Embedded inside LINK_XSTATS_TYPE_VXLAN */
+enum {
+ VXLAN_XSTATS_UNSPEC,
+ VXLAN_XSTATS_VNIFILTER,
+ __VXLAN_XSTATS_MAX
+};
+#define VXLAN_XSTATS_MAX (__VXLAN_XSTATS_MAX - 1)
+
/* VXLAN section */
enum {
IFLA_VXLAN_UNSPEC,
--
2.25.1
Powered by blists - more mailing lists