[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230928100418.521594-1-yajun.deng@linux.dev>
Date: Thu, 28 Sep 2023 18:04:18 +0800
From: Yajun Deng <yajun.deng@...ux.dev>
To: davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
pabeni@...hat.com
Cc: netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
Yajun Deng <yajun.deng@...ux.dev>,
Alexander Lobakin <aleksander.lobakin@...el.com>
Subject: [PATCH v6] net/core: Introduce netdev_core_stats_inc()
Although there is a kfree_skb_reason() helper function that can be used to
find the reason why this skb is dropped, but most callers didn't increase
one of rx_dropped, tx_dropped, rx_nohandler and rx_otherhost_dropped.
For the users, people are more concerned about why the dropped in ip
is increasing.
Introduce netdev_core_stats_inc() for trace the caller of the dropped
skb. Also, add __code to netdev_core_stats_alloc(), as it's called
unlinkly.
Signed-off-by: Yajun Deng <yajun.deng@...ux.dev>
Suggested-by: Alexander Lobakin <aleksander.lobakin@...el.com>
---
v6: merge netdev_core_stats and netdev_core_stats_inc together
v5: Access the per cpu pointer before reach the relevant offset.
v4: Introduce netdev_core_stats_inc() instead of export dev_core_stats_*_inc()
v3: __cold should be added to the netdev_core_stats_alloc().
v2: use __cold instead of inline in dev_core_stats().
v1: https://lore.kernel.org/netdev/20230911082016.3694700-1-yajun.deng@linux.dev/
---
include/linux/netdevice.h | 21 ++++-----------------
net/core/dev.c | 17 +++++++++++++++--
2 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7e520c14eb8c..eb1fa04fbccc 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4002,32 +4002,19 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
return false;
}
-struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev);
-
-static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev)
-{
- /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
- struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
-
- if (likely(p))
- return p;
-
- return netdev_core_stats_alloc(dev);
-}
+void netdev_core_stats_inc(struct net_device *dev, u32 offset);
#define DEV_CORE_STATS_INC(FIELD) \
static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
{ \
- struct net_device_core_stats __percpu *p; \
- \
- p = dev_core_stats(dev); \
- if (p) \
- this_cpu_inc(p->FIELD); \
+ netdev_core_stats_inc(dev, \
+ offsetof(struct net_device_core_stats, FIELD)); \
}
DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)
DEV_CORE_STATS_INC(rx_nohandler)
DEV_CORE_STATS_INC(rx_otherhost_dropped)
+#undef DEV_CORE_STATS_INC
static __always_inline int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb,
diff --git a/net/core/dev.c b/net/core/dev.c
index 606a366cc209..88a32c392c1d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -10497,7 +10497,8 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
}
EXPORT_SYMBOL(netdev_stats_to_stats64);
-struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
+static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc(
+ struct net_device *dev)
{
struct net_device_core_stats __percpu *p;
@@ -10510,7 +10511,19 @@ struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device
/* This READ_ONCE() pairs with the cmpxchg() above */
return READ_ONCE(dev->core_stats);
}
-EXPORT_SYMBOL(netdev_core_stats_alloc);
+
+void netdev_core_stats_inc(struct net_device *dev, u32 offset)
+{
+ /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
+ struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
+
+ if (unlikely(!p))
+ p = netdev_core_stats_alloc(dev);
+
+ if (p)
+ (*(unsigned long *)((void *)this_cpu_ptr(p) + offset))++;
+}
+EXPORT_SYMBOL_GPL(netdev_core_stats_inc);
/**
* dev_get_stats - get network device statistics
--
2.25.1
Powered by blists - more mailing lists