[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHp75VfmgOWnTJziYfO-ZQw0+ABQcFWbOcGGMqz9NzvKK_AKtQ@mail.gmail.com>
Date: Sat, 27 May 2017 21:13:51 +0300
From: Andy Shevchenko <andy.shevchenko@...il.com>
To: Vitaly Kuznetsov <vkuznets@...hat.com>
Cc: devel@...uxdriverproject.org, "x86@...nel.org" <x86@...nel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
Steven Rostedt <rostedt@...dmis.org>,
Jork Loeser <Jork.Loeser@...rosoft.com>,
Simon Xiao <sixiao@...rosoft.com>,
Andy Lutomirski <luto@...nel.org>
Subject: Re: [PATCH v4 09/10] x86/hyper-v: support extended CPU ranges for TLB
flush hypercalls
On Wed, May 24, 2017 at 3:04 PM, Vitaly Kuznetsov <vkuznets@...hat.com> wrote:
> Hyper-V hosts may support more than 64 vCPUs, we need to use
> HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX/LIST_EX hypercalls in this
> case.
> +{
> + /*
> + * We can't be sure that translated vcpu numbers will always be
> + * in ascending order, so iterate over all possible banks and
> + * check all vcpus in it instead.
vcpu -> vCPU
vcpus -> vCPUs
> + */
> + for (cur_bank = 0; cur_bank < ms_hyperv.max_vp_index/64; cur_bank++) {
> + has_cpus = false;
> + for_each_cpu(cpu, cpus) {
int vcpu_bank = vcpu / 64;
int vcpu_offset = vcpu % 64;
> + vcpu = hv_cpu_number_to_vp_number(cpu);
> + if (vcpu/64 != cur_bank)
if (vcpu_bank != cur_bank)
> + continue;
> + if (!has_cpus) {
> + flush->hv_vp_set.valid_bank_mask |=
> + 1 << vcpu / 64;
__set_bit(vcpu_bank, &mask);
> + flush->hv_vp_set.bank_contents[nr_bank] =
> + 1 << vcpu % 64;
Ditto. (vcpu_offset)
> + has_cpus = true;
> + } else {
> + flush->hv_vp_set.bank_contents[nr_bank] |=
> + 1 << vcpu % 64;
Ditto.
> + }
> + }
> + if (has_cpus)
> + nr_bank++;
> + }
> +
> + return nr_bank;
> +}
> +static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
> + struct mm_struct *mm,
> + unsigned long start,
> + unsigned long end)
> +{
> + struct hv_flush_pcpu_ex *flush;
> + unsigned long cur, flags;
> + u64 status = -1ULL;
U64_MAX
> + int nr_bank = 0, max_gvas, gva_n;
> + /*
> + * We can flush not more than max_gvas with one hypercall. Flush the
> + * whole address space if we were asked to do more.
> + */
#define XXX (PAGE_SIZE * PAGE_SIZE)
> + max_gvas = (PAGE_SIZE - sizeof(*flush) - nr_bank*8) / 8;
> +
> + if (end == TLB_FLUSH_ALL ||
> + (end && ((end - start)/(PAGE_SIZE*PAGE_SIZE)) > max_gvas)) {
> + if (end == TLB_FLUSH_ALL)
> + flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
> +
> + status = hv_do_rep_hypercall(
> + HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
> + 0, nr_bank + 2, flush, NULL);
if (end == _FLASH_ALL) {
flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
0, nr_bank + 2, flush, NULL);
} else if (end && ((end - start) / XXX > max_gvas)) {
status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
0, nr_bank + 2, flush, NULL);
} else {
...
Yes, a bit more code, but IMO much more understandable.
> + } else {
> + cur = start;
> + gva_n = nr_bank;
> + do {
> + flush->gva_list[gva_n] = cur & PAGE_MASK;
> + /*
> + * Lower 12 bits encode the number of additional
> + * pages to flush (in addition to the 'cur' page).
> + */
> + if (end >= cur + PAGE_SIZE * PAGE_SIZE)
if (end >= cur + XXX)
> + flush->gva_list[gva_n] |= ~PAGE_MASK;
> + else if (end > cur)
> + flush->gva_list[gva_n] |=
> + (end - cur - 1) >> PAGE_SHIFT;
> +
> + cur += PAGE_SIZE * PAGE_SIZE;
+= XXX;
> + ++gva_n;
> +
> + } while (cur < end);
> +}
--
With Best Regards,
Andy Shevchenko
Powered by blists - more mailing lists