[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171113201316.6245.7507.stgit@john-Precision-Tower-5810>
Date: Mon, 13 Nov 2017 12:13:16 -0800
From: John Fastabend <john.fastabend@...il.com>
To: willemdebruijn.kernel@...il.com, daniel@...earbox.net,
eric.dumazet@...il.com
Cc: make0818@...il.com, netdev@...r.kernel.org, jiri@...nulli.us,
xiyou.wangcong@...il.com
Subject: [RFC PATCH 17/17] net: sched: lock once per bulk dequeue
Signed-off-by: John Fastabend <john.fastabend@...il.com>
---
0 files changed
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 683f6ec..8ab7933 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -206,33 +206,22 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
{
const struct netdev_queue *txq = q->dev_queue;
struct sk_buff *skb = NULL;
+ spinlock_t *lock = NULL;
- *packets = 1;
- if (unlikely(!skb_queue_empty(&q->gso_skb))) {
- spinlock_t *lock = NULL;
-
- if (q->flags & TCQ_F_NOLOCK) {
- lock = qdisc_lock(q);
- spin_lock(lock);
- }
-
- skb = skb_peek(&q->gso_skb);
-
- /* skb may be null if another cpu pulls gso_skb off in between
- * empty check and lock.
- */
- if (!skb) {
- if (lock)
- spin_unlock(lock);
- goto validate;
- }
+ if (q->flags & TCQ_F_NOLOCK) {
+ lock = qdisc_lock(q);
+ spin_lock(lock);
+ }
+ *packets = 1;
+ skb = skb_peek(&q->gso_skb);
+ if (unlikely(skb)) {
/* skb in gso_skb were already validated */
*validate = false;
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
- skb = __skb_dequeue(&q->gso_skb);
+ __skb_unlink(skb, &q->gso_skb);
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_dec(q, skb);
qdisc_qstats_cpu_qlen_dec(q);
@@ -247,17 +236,17 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
spin_unlock(lock);
goto trace;
}
-validate:
- *validate = true;
- if ((q->flags & TCQ_F_ONETXQUEUE) &&
- netif_xmit_frozen_or_stopped(txq))
- return skb;
+ *validate = true;
skb = qdisc_dequeue_skb_bad_txq(q);
if (unlikely(skb))
goto bulk;
- skb = q->dequeue(q);
+
+ if (!(q->flags & TCQ_F_ONETXQUEUE) ||
+ !netif_xmit_frozen_or_stopped(txq))
+ skb = q->dequeue(q);
+
if (skb) {
bulk:
if (qdisc_may_bulk(q))
@@ -265,6 +254,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
else
try_bulk_dequeue_skb_slow(q, skb, packets);
}
+ if (lock)
+ spin_unlock(lock);
trace:
trace_qdisc_dequeue(q, txq, *packets, skb);
return skb;
@@ -624,7 +615,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
if (__skb_array_empty(q))
continue;
- skb = skb_array_consume_bh(q);
+ skb = __skb_array_consume(q);
}
if (likely(skb)) {
qdisc_qstats_cpu_backlog_dec(qdisc, skb);
@@ -661,7 +652,7 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
struct skb_array *q = band2list(priv, band);
struct sk_buff *skb;
- while ((skb = skb_array_consume_bh(q)) != NULL)
+ while ((skb = __skb_array_consume(q)) != NULL)
__skb_array_destroy_skb(skb);
}
Powered by blists - more mailing lists