lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 16 Oct 2021 10:49:08 +0200
From:   Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To:     netdev@...r.kernel.org, netfilter-devel@...r.kernel.org
Cc:     Jakub Kicinski <kuba@...nel.org>,
        "David S. Miller" <davem@...emloft.net>,
        Pablo Neira Ayuso <pablo@...filter.org>,
        Jozsef Kadlecsik <kadlec@...filter.org>,
        Florian Westphal <fw@...len.de>,
        Jamal Hadi Salim <jhs@...atatu.com>,
        Cong Wang <xiyou.wangcong@...il.com>,
        Jiri Pirko <jiri@...nulli.us>,
        "Ahmed S. Darwish" <a.darwish@...utronix.de>,
        Eric Dumazet <edumazet@...gle.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: [PATCH net-next 7/9] net: sched: Use _bstats_update/set() instead of raw writes

From: "Ahmed S. Darwish" <a.darwish@...utronix.de>

The Qdisc::running sequence counter, used to protect Qdisc::bstats reads
from parallel writes, is in the process of being removed. Qdisc::bstats
read/writes will synchronize using an internal u64_stats sync point
instead.

Modify all bstats writes to use _bstats_update(). This ensures that
the internal u64_stats sync point is always acquired and released as
appropriate.

Signed-off-by: Ahmed S. Darwish <a.darwish@...utronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
 net/core/gen_stats.c |  9 +++++----
 net/sched/sch_cbq.c  |  3 +--
 net/sched/sch_gred.c |  7 ++++---
 net/sched/sch_htb.c  | 25 +++++++++++++++----------
 net/sched/sch_qfq.c  |  3 +--
 5 files changed, 26 insertions(+), 21 deletions(-)

diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index f2e12fe7112b1..69576972a25f0 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -126,6 +126,7 @@ EXPORT_SYMBOL(gnet_stats_basic_packed_init);
 static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats,
 				     struct gnet_stats_basic_cpu __percpu *cpu)
 {
+	u64 t_bytes = 0, t_packets = 0;
 	int i;
 
 	for_each_possible_cpu(i) {
@@ -139,9 +140,10 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_packed *bstats,
 			packets = bcpu->bstats.packets;
 		} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
 
-		bstats->bytes += bytes;
-		bstats->packets += packets;
+		t_bytes += bytes;
+		t_packets += packets;
 	}
+	_bstats_update(bstats, t_bytes, t_packets);
 }
 
 void gnet_stats_add_basic(const seqcount_t *running,
@@ -164,8 +166,7 @@ void gnet_stats_add_basic(const seqcount_t *running,
 		packets = b->packets;
 	} while (running && read_seqcount_retry(running, seq));
 
-	bstats->bytes += bytes;
-	bstats->packets += packets;
+	_bstats_update(bstats, bytes, packets);
 }
 EXPORT_SYMBOL(gnet_stats_add_basic);
 
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d01f6ec315f87..ef9e87175d35c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -565,8 +565,7 @@ cbq_update(struct cbq_sched_data *q)
 		long avgidle = cl->avgidle;
 		long idle;
 
-		cl->bstats.packets++;
-		cl->bstats.bytes += len;
+		_bstats_update(&cl->bstats, len, 1);
 
 		/*
 		 * (now - last) is total time between packet right edges.
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 2ddcbb2efdbbc..02b03d6d24ea4 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -353,6 +353,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
 {
 	struct gred_sched *table = qdisc_priv(sch);
 	struct tc_gred_qopt_offload *hw_stats;
+	u64 bytes = 0, packets = 0;
 	unsigned int i;
 	int ret;
 
@@ -381,15 +382,15 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
 		table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
 		table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
 
-		_bstats_update(&sch->bstats,
-			       hw_stats->stats.bstats[i].bytes,
-			       hw_stats->stats.bstats[i].packets);
+		bytes += hw_stats->stats.bstats[i].bytes;
+		packets += hw_stats->stats.bstats[i].packets;
 		sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
 		sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
 		sch->qstats.drops += hw_stats->stats.qstats[i].drops;
 		sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
 		sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
 	}
+	_bstats_update(&sch->bstats, bytes, packets);
 
 	kfree(hw_stats);
 	return ret;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 2e805b17efcf9..324ecfdf842a3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1308,6 +1308,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
 static void htb_offload_aggregate_stats(struct htb_sched *q,
 					struct htb_class *cl)
 {
+	u64 bytes = 0, packets = 0;
 	struct htb_class *c;
 	unsigned int i;
 
@@ -1323,14 +1324,15 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
 			if (p != cl)
 				continue;
 
-			cl->bstats.bytes += c->bstats_bias.bytes;
-			cl->bstats.packets += c->bstats_bias.packets;
+			bytes += c->bstats_bias.bytes;
+			packets += c->bstats_bias.packets;
 			if (c->level == 0) {
-				cl->bstats.bytes += c->leaf.q->bstats.bytes;
-				cl->bstats.packets += c->leaf.q->bstats.packets;
+				bytes += c->leaf.q->bstats.bytes;
+				packets += c->leaf.q->bstats.packets;
 			}
 		}
 	}
+	_bstats_update(&cl->bstats, bytes, packets);
 }
 
 static int
@@ -1358,8 +1360,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 				cl->bstats = cl->leaf.q->bstats;
 			else
 				gnet_stats_basic_packed_init(&cl->bstats);
-			cl->bstats.bytes += cl->bstats_bias.bytes;
-			cl->bstats.packets += cl->bstats_bias.packets;
+			_bstats_update(&cl->bstats,
+				       cl->bstats_bias.bytes,
+				       cl->bstats_bias.packets);
 		} else {
 			htb_offload_aggregate_stats(q, cl);
 		}
@@ -1578,8 +1581,9 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
 		WARN_ON(old != q);
 
 	if (cl->parent) {
-		cl->parent->bstats_bias.bytes += q->bstats.bytes;
-		cl->parent->bstats_bias.packets += q->bstats.packets;
+		_bstats_update(&cl->parent->bstats_bias,
+			       q->bstats.bytes,
+			       q->bstats.packets);
 	}
 
 	offload_opt = (struct tc_htb_qopt_offload) {
@@ -1925,8 +1929,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 				htb_graft_helper(dev_queue, old_q);
 				goto err_kill_estimator;
 			}
-			parent->bstats_bias.bytes += old_q->bstats.bytes;
-			parent->bstats_bias.packets += old_q->bstats.packets;
+			_bstats_update(&parent->bstats_bias,
+				       old_q->bstats.bytes,
+				       old_q->bstats.packets);
 			qdisc_put(old_q);
 		}
 		new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index b6d989b69324d..bea68c91027a3 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1235,8 +1235,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 		return err;
 	}
 
-	cl->bstats.bytes += len;
-	cl->bstats.packets += gso_segs;
+	_bstats_update(&cl->bstats, len, gso_segs);
 	sch->qstats.backlog += len;
 	++sch->q.qlen;
 
-- 
2.33.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ