[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181112194405.4133-1-gbastien@versatic.net>
Date: Mon, 12 Nov 2018 14:44:05 -0500
From: Geneviève Bastien <gbastien@...satic.net>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org,
Geneviève Bastien <gbastien@...satic.net>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Steven Rostedt <rostedt@...dmis.org>,
Ingo Molnar <mingo@...hat.com>
Subject: [PATCH v2] net: Add trace events for all receive exit points
Trace events are already present for the receive entry points, to indicate
how the reception entered the stack.
This patch adds the corresponding exit trace events that will bound the
reception such that all events occurring between the entry and the exit
can be considered as part of the reception context. This greatly helps
for dependency and root cause analyses.
Without this, it is impossible to determine whether a sched_wakeup
event following a netif_receive_skb event is the result of the packet
reception or a simple coincidence after further processing by the
thread.
Signed-off-by: Geneviève Bastien <gbastien@...satic.net>
CC: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
CC: Steven Rostedt <rostedt@...dmis.org>
CC: Ingo Molnar <mingo@...hat.com>
CC: David S. Miller <davem@...emloft.net>
---
Changes in v2:
- Add the return value to tracepoints where applicable
- Verify if tracepoint is enabled before walking list in
netif_receive_skb_list
---
include/trace/events/net.h | 78 ++++++++++++++++++++++++++++++++++++++
net/core/dev.c | 38 ++++++++++++++++---
2 files changed, 110 insertions(+), 6 deletions(-)
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 00aa72ce0e7c..cff1a7b9d0bb 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -117,6 +117,42 @@ DECLARE_EVENT_CLASS(net_dev_template,
__get_str(name), __entry->skbaddr, __entry->len)
)
+DECLARE_EVENT_CLASS(net_dev_template_return,
+
+ TP_PROTO(struct sk_buff *skb, int ret),
+
+ TP_ARGS(skb, ret),
+
+ TP_STRUCT__entry(
+ __field(void *, skbaddr)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("skbaddr=%p ret=%d", __entry->skbaddr, __entry->ret)
+)
+
+DECLARE_EVENT_CLASS(net_dev_template_simple,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(void *, skbaddr)
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ ),
+
+ TP_printk("skbaddr=%p", __entry->skbaddr)
+)
+
DEFINE_EVENT(net_dev_template, net_dev_queue,
TP_PROTO(struct sk_buff *skb),
@@ -244,6 +280,48 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
TP_ARGS(skb)
);
+DEFINE_EVENT(net_dev_template_return, napi_gro_frags_exit,
+
+ TP_PROTO(struct sk_buff *skb, int ret),
+
+ TP_ARGS(skb, ret)
+);
+
+DEFINE_EVENT(net_dev_template_return, napi_gro_receive_exit,
+
+ TP_PROTO(struct sk_buff *skb, int ret),
+
+ TP_ARGS(skb, ret)
+);
+
+DEFINE_EVENT(net_dev_template_return, netif_receive_skb_exit,
+
+ TP_PROTO(struct sk_buff *skb, int ret),
+
+ TP_ARGS(skb, ret)
+);
+
+DEFINE_EVENT(net_dev_template_simple, netif_receive_skb_list_exit,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(net_dev_template_return, netif_rx_exit,
+
+ TP_PROTO(struct sk_buff *skb, int ret),
+
+ TP_ARGS(skb, ret)
+);
+
+DEFINE_EVENT(net_dev_template_return, netif_rx_ni_exit,
+
+ TP_PROTO(struct sk_buff *skb, int ret),
+
+ TP_ARGS(skb, ret)
+);
+
#endif /* _TRACE_NET_H */
/* This part must be outside protection */
diff --git a/net/core/dev.c b/net/core/dev.c
index 0ffcbdd55fa9..c4dc5df34abe 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4520,9 +4520,14 @@ static int netif_rx_internal(struct sk_buff *skb)
int netif_rx(struct sk_buff *skb)
{
+ int ret;
+
trace_netif_rx_entry(skb);
- return netif_rx_internal(skb);
+ ret = netif_rx_internal(skb);
+ trace_netif_rx_exit(skb, ret);
+
+ return ret;
}
EXPORT_SYMBOL(netif_rx);
@@ -4537,6 +4542,7 @@ int netif_rx_ni(struct sk_buff *skb)
if (local_softirq_pending())
do_softirq();
preempt_enable();
+ trace_netif_rx_ni_exit(skb, err);
return err;
}
@@ -5222,9 +5228,14 @@ static void netif_receive_skb_list_internal(struct list_head *head)
*/
int netif_receive_skb(struct sk_buff *skb)
{
+ int ret;
+
trace_netif_receive_skb_entry(skb);
- return netif_receive_skb_internal(skb);
+ ret = netif_receive_skb_internal(skb);
+ trace_netif_receive_skb_exit(skb, ret);
+
+ return ret;
}
EXPORT_SYMBOL(netif_receive_skb);
@@ -5244,9 +5255,15 @@ void netif_receive_skb_list(struct list_head *head)
if (list_empty(head))
return;
- list_for_each_entry(skb, head, list)
- trace_netif_receive_skb_list_entry(skb);
+ if (trace_netif_receive_skb_list_entry_enabled()) {
+ list_for_each_entry(skb, head, list)
+ trace_netif_receive_skb_list_entry(skb);
+ }
netif_receive_skb_list_internal(head);
+ if (trace_netif_receive_skb_list_exit_enabled()) {
+ list_for_each_entry(skb, head, list)
+ trace_netif_receive_skb_list_exit(skb);
+ }
}
EXPORT_SYMBOL(netif_receive_skb_list);
@@ -5634,12 +5651,17 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
+ gro_result_t ret;
+
skb_mark_napi_id(skb, napi);
trace_napi_gro_receive_entry(skb);
skb_gro_reset_offset(skb);
- return napi_skb_finish(dev_gro_receive(napi, skb), skb);
+ ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
+ trace_napi_gro_receive_exit(skb, ret);
+
+ return ret;
}
EXPORT_SYMBOL(napi_gro_receive);
@@ -5753,6 +5775,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
gro_result_t napi_gro_frags(struct napi_struct *napi)
{
+ gro_result_t ret;
struct sk_buff *skb = napi_frags_skb(napi);
if (!skb)
@@ -5760,7 +5783,10 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
trace_napi_gro_frags_entry(skb);
- return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+ ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+ trace_napi_gro_frags_exit(skb, ret);
+
+ return ret;
}
EXPORT_SYMBOL(napi_gro_frags);
--
2.19.1
Powered by blists - more mailing lists