[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200831180749.843-13-paulmck@kernel.org>
Date: Mon, 31 Aug 2020 11:07:43 -0700
From: paulmck@...nel.org
To: rcu@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, kernel-team@...com, mingo@...nel.org,
jiangshanlai@...il.com, dipankar@...ibm.com,
akpm@...ux-foundation.org, mathieu.desnoyers@...icios.com,
josh@...htriplett.org, tglx@...utronix.de, peterz@...radead.org,
rostedt@...dmis.org, dhowells@...hat.com, edumazet@...gle.com,
fweisbec@...il.com, oleg@...hat.com, joel@...lfernandes.org,
"Paul E. McKenney" <paulmck@...nel.org>
Subject: [PATCH tip/core/rcu 13/19] scftorture: Prevent compiler from reducing race probabilities
From: "Paul E. McKenney" <paulmck@...nel.org>
Detecting smp_call_function() memory misordering requires close timing,
so it is necessary to have the checks immediately before and after
the call to the smp_call_function*() function under test. This commit
therefore inserts barrier() calls to prevent the compiler from optimizing
memory-misordering detection down into the zone of extreme improbability.
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
---
kernel/scftorture.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/kernel/scftorture.c b/kernel/scftorture.c
index 880c2ce..8349681 100644
--- a/kernel/scftorture.c
+++ b/kernel/scftorture.c
@@ -322,6 +322,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
scfp->n_single++;
if (scfcp) {
scfcp->scfc_cpu = cpu;
+ barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
}
ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, scfsp->scfs_wait);
@@ -339,8 +340,10 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
scfp->n_many_wait++;
else
scfp->n_many++;
- if (scfcp)
+ if (scfcp) {
+ barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
+ }
smp_call_function_many(cpu_online_mask, scf_handler, scfcp, scfsp->scfs_wait);
break;
case SCF_PRIM_ALL:
@@ -348,8 +351,10 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
scfp->n_all_wait++;
else
scfp->n_all++;
- if (scfcp)
+ if (scfcp) {
+ barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
+ }
smp_call_function(scf_handler, scfcp, scfsp->scfs_wait);
break;
}
@@ -358,6 +363,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
atomic_inc(&n_mb_out_errs); // Leak rather than trash!
else
kfree(scfcp);
+ barrier(); // Prevent race-reduction compiler optimizations.
}
if (use_cpus_read_lock)
cpus_read_unlock();
--
2.9.5
Powered by blists - more mailing lists