lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5739E7BC.70205@arm.com>
Date:	Mon, 16 May 2016 16:31:08 +0100
From:	Dietmar Eggemann <dietmar.eggemann@....com>
To:	Peter Zijlstra <peterz@...radead.org>, mingo@...nel.org,
	linux-kernel@...r.kernel.org
Cc:	clm@...com, matt@...eblueprint.co.uk, mgalbraith@...e.de,
	tglx@...utronix.de, fweisbec@...il.com
Subject: Re: [RFC][PATCH 4/7] sched: Replace sd_busy/nr_busy_cpus with
 sched_domain_shared

On 09/05/16 11:48, Peter Zijlstra wrote:

Couldn't you just always access sd->shared via
sd = rcu_dereference(per_cpu(sd_llc, cpu)) for
updating nr_busy_cpus?

The call_rcu() thing is on the sd any way.

@@ -5879,7 +5879,6 @@ static void destroy_sched_domains(struct sched_domain *sd)
 DEFINE_PER_CPU(struct sched_domain *, sd_llc);
 DEFINE_PER_CPU(int, sd_llc_size);
 DEFINE_PER_CPU(int, sd_llc_id);
-DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
 DEFINE_PER_CPU(struct sched_domain *, sd_numa);
 DEFINE_PER_CPU(struct sched_domain *, sd_asym);
 
@@ -5900,7 +5899,6 @@ static void update_top_cache_domain(int cpu)
        rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
        per_cpu(sd_llc_size, cpu) = size;
        per_cpu(sd_llc_id, cpu) = id;
-       rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
 
        sd = lowest_flag_domain(cpu, SD_NUMA);
        rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0d8dad2972b6..5aed6089dae8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8136,7 +8136,6 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 static inline bool nohz_kick_needed(struct rq *rq)
 {
        unsigned long now = jiffies;
-       struct sched_domain_shared *sds;
        struct sched_domain *sd;
        int nr_busy, cpu = rq->cpu;
        bool kick = false;
@@ -8165,13 +8164,13 @@ static inline bool nohz_kick_needed(struct rq *rq)
                return true;
 
        rcu_read_lock();
-       sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
-       if (sds) {
+       sd = rcu_dereference(per_cpu(sd_llc, cpu));
+       if (sd) {
                /*
                 * XXX: write a coherent comment on why we do this.
                 * See also: http:lkml.kernel.org/r/20111202010832.602203411@...iddha-desk.sc.intel.com
                 */
-               nr_busy = atomic_read(&sds->nr_busy_cpus);
+               nr_busy = atomic_read(&sd->shared->nr_busy_cpus);
                if (nr_busy > 1) {
                        kick = true;
                        goto unlock;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ