[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.20.1611151451330.3602@nanos>
Date: Tue, 15 Nov 2016 15:00:17 +0100 (CET)
From: Thomas Gleixner <tglx@...utronix.de>
To: Fenghua Yu <fenghua.yu@...el.com>
cc: "H. Peter Anvin" <h.peter.anvin@...el.com>,
Ingo Molnar <mingo@...e.hu>, Tony Luck <tony.luck@...el.com>,
Ravi V Shankar <ravi.v.shankar@...el.com>,
Sai Prakhya <sai.praneeth.prakhya@...el.com>,
Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
linux-kernel <linux-kernel@...r.kernel.org>, x86 <x86@...nel.org>
Subject: Re: [PATCH 3/3] x86/intel_rdt: Update closid in PQR_ASSOC registers
in synchronous mode when changing "cpus"
On Fri, 11 Nov 2016, Fenghua Yu wrote:
> +/*
> + * MSR_IA32_PQR_ASSOC is scoped per logical CPU, so all updates
> + * are always in thread context.
And this comment tells us what? Nothing useful. It lacks the most important
information why this is safe against a logical CPU switching the MSR right
at this moment during context switch. It's safe because the pqr_switch_to
function is called with interrupts disabled.
> + */
> +static void rdt_update_pqr_assoc_closid(void *v)
> +{
> + struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
> +
> + state->closid = *(int *)v;
> + wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, state->closid);
> +}
> +
> +static void rdt_update_pqr_assoc(const struct cpumask *cpu_mask, int closid)
> +{
> + int cpu = get_cpu();
> +
> + /* Update PQR_ASSOC MSR on this cpu if it's in cpu_mask. */
> + if (cpumask_test_cpu(cpu, cpu_mask))
> + rdt_update_pqr_assoc_closid(&closid);
> + /* Update PQR_ASSOC MSR on the rest of cpus. */
> + smp_call_function_many(cpu_mask, rdt_update_pqr_assoc_closid,
> + &closid, 1);
> + put_cpu();
> +}
> +
> static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
> char *buf, size_t nbytes, loff_t off)
> {
> @@ -238,6 +263,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
> &rdtgroup_default.cpu_mask, tmpmask);
> for_each_cpu(cpu, tmpmask)
> per_cpu(cpu_closid, cpu) = 0;
> +
> + /* Update PQR_ASSOC registers on the dropped cpus */
> + rdt_update_pqr_assoc(tmpmask, rdtgroup_default.closid);
Grr. You store the new closid now in the for_each_cpu() loop and in the smp
function call.
> }
>
> /*
> @@ -253,6 +281,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
> }
> for_each_cpu(cpu, tmpmask)
> per_cpu(cpu_closid, cpu) = rdtgrp->closid;
> +
> + /* Update PQR_ASSOC registers on the added cpus */
> + rdt_update_pqr_assoc(tmpmask, rdtgrp->closid);
Ditto.
> }
>
> /* Done pushing/pulling - update this group with new mask */
> @@ -783,18 +814,6 @@ static int reset_all_cbms(struct rdt_resource *r)
> }
>
> /*
> - * MSR_IA32_PQR_ASSOC is scoped per logical CPU, so all updates
> - * are always in thread context.
> - */
> -static void rdt_reset_pqr_assoc_closid(void *v)
> -{
> - struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
> -
> - state->closid = 0;
> - wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
> -}
> -
> -/*
> * Forcibly remove all of subdirectories under root.
> */
> static void rmdir_all_sub(void)
> @@ -809,13 +828,9 @@ static void rmdir_all_sub(void)
> t->closid = 0;
> read_unlock(&tasklist_lock);
>
> - get_cpu();
> - /* Reset PQR_ASSOC MSR on this cpu. */
> - rdt_reset_pqr_assoc_closid(NULL);
> - /* Reset PQR_ASSOC MSR on the rest of cpus. */
> - smp_call_function_many(cpu_online_mask, rdt_reset_pqr_assoc_closid,
> - NULL, 1);
> - put_cpu();
> + /* Reset PQR_ASSOC MSR on all online cpus */
> + rdt_update_pqr_assoc(cpu_online_mask, 0);
And here you have the extra closid loop later on. Really useful.
Thanks,
tglx
Powered by blists - more mailing lists