[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4dfa606e376f151b283df1813faac360c9c78005.1554740883.git.pabeni@redhat.com>
Date: Mon, 8 Apr 2019 18:35:09 +0200
From: Paolo Abeni <pabeni@...hat.com>
To: netdev@...r.kernel.org
Cc: "David S. Miller" <davem@...emloft.net>,
Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
Jiri Pirko <jiri@...nulli.us>,
Eric Dumazet <edumazet@...gle.com>,
Ivan Vecera <ivecera@...hat.com>
Subject: [PATCH net-next 3/5] net: sched: always do stats accounting according to TCQ_F_CPUSTATS
The core sched implementation checks independently for NOLOCK flag
to acquire/release the root spin lock and for qdisc_is_percpu_stats()
to account per CPU values in many places.
This change update the last few places checking the TCQ_F_NOLOCK to
do per CPU stats accounting according to qdisc_is_percpu_stats()
value.
The above allows to clean dev_requeue_skb() implementation a bit
and makes stats update always consistent with a single flag.
Signed-off-by: Paolo Abeni <pabeni@...hat.com>
---
include/net/sch_generic.h | 25 ++++++++++++--------
net/sched/sch_generic.c | 50 +++++++++++++--------------------------
2 files changed, 32 insertions(+), 43 deletions(-)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 19206d669655..249817d97bf7 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -146,13 +146,6 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
}
-static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
-{
- if (qdisc->flags & TCQ_F_NOLOCK)
- return qdisc->empty;
- return !qdisc->q.qlen;
-}
-
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
@@ -490,7 +483,7 @@ static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
{
u32 qlen = q->qstats.qlen;
- if (q->flags & TCQ_F_NOLOCK)
+ if (q->flags & TCQ_F_CPUSTATS)
qlen += atomic_read(&q->q.atomic_qlen);
else
qlen += q->q.qlen;
@@ -822,6 +815,13 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
return q->flags & TCQ_F_CPUSTATS;
}
+static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
+{
+ if (qdisc_is_percpu_stats(qdisc))
+ return qdisc->empty;
+ return !qdisc->q.qlen;
+}
+
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
__u64 bytes, __u32 packets)
{
@@ -1113,8 +1113,13 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
if (skb) {
skb = __skb_dequeue(&sch->gso_skb);
- qdisc_qstats_backlog_dec(sch, skb);
- sch->q.qlen--;
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
+ qdisc_qstats_atomic_qlen_dec(sch);
+ } else {
+ qdisc_qstats_backlog_dec(sch, skb);
+ sch->q.qlen--;
+ }
} else {
skb = sch->dequeue(sch);
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 81356ef38d1d..ddff2952be87 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -118,52 +118,36 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
spin_unlock(lock);
}
-static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
- while (skb) {
- struct sk_buff *next = skb->next;
-
- __skb_queue_tail(&q->gso_skb, skb);
- q->qstats.requeues++;
- qdisc_qstats_backlog_inc(q, skb);
- q->q.qlen++; /* it's still part of the queue */
+ spinlock_t *lock = NULL;
- skb = next;
+ if (q->flags & TCQ_F_NOLOCK) {
+ lock = qdisc_lock(q);
+ spin_lock(lock);
}
- __netif_schedule(q);
-
- return 0;
-}
-static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
-{
- spinlock_t *lock = qdisc_lock(q);
-
- spin_lock(lock);
while (skb) {
struct sk_buff *next = skb->next;
__skb_queue_tail(&q->gso_skb, skb);
- qdisc_qstats_cpu_requeues_inc(q);
- qdisc_qstats_cpu_backlog_inc(q, skb);
- qdisc_qstats_atomic_qlen_inc(q);
+ /* it's still part of the queue */
+ if (qdisc_is_percpu_stats(q)) {
+ qdisc_qstats_cpu_requeues_inc(q);
+ qdisc_qstats_cpu_backlog_inc(q, skb);
+ qdisc_qstats_atomic_qlen_inc(q);
+ } else {
+ q->qstats.requeues++;
+ qdisc_qstats_backlog_inc(q, skb);
+ q->q.qlen++;
+ }
skb = next;
}
- spin_unlock(lock);
-
+ if (lock)
+ spin_unlock(lock);
__netif_schedule(q);
-
- return 0;
-}
-
-static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
-{
- if (q->flags & TCQ_F_NOLOCK)
- return dev_requeue_skb_locked(skb, q);
- else
- return __dev_requeue_skb(skb, q);
}
static void try_bulk_dequeue_skb(struct Qdisc *q,
--
2.20.1
Powered by blists - more mailing lists