[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210711050007.1200-1-xiangxia.m.yue@gmail.com>
Date: Sun, 11 Jul 2021 13:00:06 +0800
From: xiangxia.m.yue@...il.com
To: xiyou.wangcong@...il.com, jhs@...atatu.com
Cc: netdev@...r.kernel.org, Tonghao Zhang <xiangxia.m.yue@...il.com>
Subject: [net-next 1/2] qdisc: add tracepoint qdisc:qdisc_enqueue for enqueued SKBs
From: Tonghao Zhang <xiangxia.m.yue@...il.com>
This tracepoint can work with qdisc:qdisc_dequeue to measure
packets latency in qdisc queue. In some case, for example,
if TX queues are stopped or frozen, sch_direct_xmit will invoke
the dev_requeue_skb to requeue SKBs to qdisc->gso_skb, that may
delay the SKBs in qdisc queue.
With this patch, we can measure packets latency.
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@...il.com>
---
include/net/pkt_sched.h | 4 ++++
include/trace/events/qdisc.h | 32 ++++++++++++++++++++++++++++++++
net/core/dev.c | 4 ++--
net/sched/sch_generic.c | 11 +++++++++++
4 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6d7b12cba015..66411b4ff284 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -133,6 +133,10 @@ static inline void qdisc_run(struct Qdisc *q)
}
}
+int qdisc_enqueue_skb(struct netdev_queue *txq, struct Qdisc *q,
+ struct sk_buff *skb,
+ struct sk_buff **to_free);
+
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index 330d32d84485..b0e76237bb74 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -11,6 +11,38 @@
#include <linux/pkt_sched.h>
#include <net/sch_generic.h>
+TRACE_EVENT(qdisc_enqueue,
+
+ TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq,
+ struct sk_buff *skb, int ret),
+
+ TP_ARGS(qdisc, txq, skb, ret),
+
+ TP_STRUCT__entry(
+ __field( struct Qdisc *, qdisc )
+ __field(const struct netdev_queue *, txq )
+ __field( void *, skbaddr )
+ __field( int, ifindex )
+ __field( u32, handle )
+ __field( u32, parent )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->qdisc = qdisc;
+ __entry->txq = txq;
+ __entry->skbaddr = skb;
+ __entry->ifindex = txq->dev ? txq->dev->ifindex : 0;
+ __entry->handle = qdisc->handle;
+ __entry->parent = qdisc->parent;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%p ret=%d",
+ __entry->ifindex, __entry->handle, __entry->parent,
+ __entry->skbaddr, __entry->ret)
+);
+
TRACE_EVENT(qdisc_dequeue,
TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq,
diff --git a/net/core/dev.c b/net/core/dev.c
index 50531a2d0b20..78efac6b2e60 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3852,7 +3852,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
qdisc_calculate_pkt_len(skb, q);
if (q->flags & TCQ_F_NOLOCK) {
- rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ rc = qdisc_enqueue_skb(txq, q, skb, &to_free);
if (likely(!netif_xmit_frozen_or_stopped(txq)))
qdisc_run(q);
@@ -3896,7 +3896,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
qdisc_run_end(q);
rc = NET_XMIT_SUCCESS;
} else {
- rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ rc = qdisc_enqueue_skb(txq, q, skb, &to_free);
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index e9c0afc8becc..75605075178f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -415,6 +415,17 @@ void __qdisc_run(struct Qdisc *q)
}
}
+int qdisc_enqueue_skb(struct netdev_queue *txq, struct Qdisc *q,
+ struct sk_buff *skb,
+ struct sk_buff **to_free)
+{
+ int ret;
+
+ ret = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
+ trace_qdisc_enqueue(q, txq, skb, ret);
+ return ret;
+}
+
unsigned long dev_trans_start(struct net_device *dev)
{
unsigned long val, res;
--
2.27.0
Powered by blists - more mailing lists