[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210315115332.1647e92b@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com>
Date: Mon, 15 Mar 2021 11:53:32 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: Yunsheng Lin <linyunsheng@...wei.com>
Cc: <davem@...emloft.net>, <olteanv@...il.com>, <ast@...nel.org>,
<daniel@...earbox.net>, <andriin@...com>, <edumazet@...gle.com>,
<weiwan@...gle.com>, <cong.wang@...edance.com>,
<ap420073@...il.com>, <netdev@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <linuxarm@...neuler.org>,
<mkl@...gutronix.de>, <linux-can@...r.kernel.org>
Subject: Re: [RFC v2] net: sched: implement TCQ_F_CAN_BYPASS for lockless
qdisc
On Mon, 15 Mar 2021 11:10:18 +0800 Yunsheng Lin wrote:
> @@ -606,6 +623,11 @@ static const u8 prio2band[TC_PRIO_MAX + 1] = {
> */
> struct pfifo_fast_priv {
> struct skb_array q[PFIFO_FAST_BANDS];
> +
> + /* protect against data race between enqueue/dequeue and
> + * qdisc->empty setting
> + */
> + spinlock_t lock;
> };
>
> static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
> @@ -623,7 +645,10 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
> unsigned int pkt_len = qdisc_pkt_len(skb);
> int err;
>
> - err = skb_array_produce(q, skb);
> + spin_lock(&priv->lock);
> + err = __ptr_ring_produce(&q->ring, skb);
> + WRITE_ONCE(qdisc->empty, false);
> + spin_unlock(&priv->lock);
>
> if (unlikely(err)) {
> if (qdisc_is_percpu_stats(qdisc))
> @@ -642,6 +667,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
> struct sk_buff *skb = NULL;
> int band;
>
> + spin_lock(&priv->lock);
> for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
> struct skb_array *q = band2list(priv, band);
>
> @@ -655,6 +681,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
> } else {
> WRITE_ONCE(qdisc->empty, true);
> }
> + spin_unlock(&priv->lock);
>
> return skb;
> }
I thought pfifo was supposed to be "lockless" and this change
re-introduces a lock between producer and consumer, no?
Powered by blists - more mailing lists