[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <BN6PR03MB24813542C7D3D20607B1AE0CA07E0@BN6PR03MB2481.namprd03.prod.outlook.com>
Date: Thu, 19 Jan 2017 17:02:07 +0000
From: KY Srinivasan <kys@...rosoft.com>
To: Greg KH <gregkh@...uxfoundation.org>
CC: "olaf@...fle.de" <olaf@...fle.de>,
"jasowang@...hat.com" <jasowang@...hat.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"apw@...onical.com" <apw@...onical.com>,
"devel@...uxdriverproject.org" <devel@...uxdriverproject.org>,
"leann.ogasawara@...onical.com" <leann.ogasawara@...onical.com>
Subject: RE: [PATCH V2 05/18] Drivers: hv: vmbus: Consolidate all Hyper-V
specific clocksource code
> -----Original Message-----
> From: Greg KH [mailto:gregkh@...uxfoundation.org]
> Sent: Thursday, January 19, 2017 10:52 AM
> To: KY Srinivasan <kys@...rosoft.com>
> Cc: olaf@...fle.de; jasowang@...hat.com; linux-kernel@...r.kernel.org;
> apw@...onical.com; devel@...uxdriverproject.org;
> leann.ogasawara@...onical.com
> Subject: Re: [PATCH V2 05/18] Drivers: hv: vmbus: Consolidate all Hyper-V
> specific clocksource code
>
> On Thu, Jan 19, 2017 at 04:49:35PM +0000, KY Srinivasan wrote:
> >
> >
> > > -----Original Message-----
> > > From: Greg KH [mailto:gregkh@...uxfoundation.org]
> > > Sent: Thursday, January 19, 2017 4:59 AM
> > > To: KY Srinivasan <kys@...rosoft.com>
> > > Cc: linux-kernel@...r.kernel.org; devel@...uxdriverproject.org;
> > > olaf@...fle.de; apw@...onical.com; vkuznets@...hat.com;
> > > jasowang@...hat.com; leann.ogasawara@...onical.com
> > > Subject: Re: [PATCH V2 05/18] Drivers: hv: vmbus: Consolidate all Hyper-V
> > > specific clocksource code
> > >
> > > On Wed, Jan 18, 2017 at 04:45:04PM -0700, kys@...hange.microsoft.com
> > > wrote:
> > > > From: K. Y. Srinivasan <kys@...rosoft.com>
> > > >
> > > > As part of the effort to separate out architecture specific code,
> > > > consolidate all Hyper-V specific clocksource code to an architecture
> > > > specific code.
> > > >
> > > > Signed-off-by: K. Y. Srinivasan <kys@...rosoft.com>
> > > > ---
> > > > arch/x86/hyperv/hv_init.c | 104
> > > +++++++++++++++++++++++++++++++++++++++
> > > > arch/x86/include/asm/mshyperv.h | 12 +++++
> > > > arch/x86/kernel/cpu/mshyperv.c | 23 ---------
> > > > drivers/hv/hv.c | 95 -----------------------------------
> > > > drivers/hv/hyperv_vmbus.h | 8 ---
> > > > 5 files changed, 116 insertions(+), 126 deletions(-)
> > > >
> > > > diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
> > > > index b5c8e04..691dfaf 100644
> > > > --- a/arch/x86/hyperv/hv_init.c
> > > > +++ b/arch/x86/hyperv/hv_init.c
> > > > @@ -24,6 +24,79 @@
> > > > #include <linux/version.h>
> > > > #include <linux/vmalloc.h>
> > > > #include <linux/mm.h>
> > > > +#include <linux/clockchips.h>
> > > > +
> > > > +
> > > > +#ifdef CONFIG_X86_64
> > > > +
> > > > +static struct ms_hyperv_tsc_page *tsc_pg;
> > > > +
> > > > +static u64 read_hv_clock_tsc(struct clocksource *arg)
> > > > +{
> > > > + u64 current_tick;
> > > > +
> > > > + if (tsc_pg->tsc_sequence != 0) {
> > > > + /*
> > > > + * Use the tsc page to compute the value.
> > > > + */
> > > > +
> > > > + while (1) {
> > > > + u64 tmp;
> > > > + u32 sequence = tsc_pg->tsc_sequence;
> > > > + u64 cur_tsc;
> > > > + u64 scale = tsc_pg->tsc_scale;
> > > > + s64 offset = tsc_pg->tsc_offset;
> > > > +
> > > > + rdtscll(cur_tsc);
> > > > + /* current_tick = ((cur_tsc *scale) >> 64) + offset */
> > > > + asm("mulq %3"
> > > > + : "=d" (current_tick), "=a" (tmp)
> > > > + : "a" (cur_tsc), "r" (scale));
> > > > +
> > > > + current_tick += offset;
> > > > + if (tsc_pg->tsc_sequence == sequence)
> > > > + return current_tick;
> > > > +
> > > > + if (tsc_pg->tsc_sequence != 0)
> > > > + continue;
> > > > + /*
> > > > + * Fallback using MSR method.
> > > > + */
> > > > + break;
> > > > + }
> > > > + }
> > > > + rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
> > > > + return current_tick;
> > > > +}
> > > > +
> > > > +static struct clocksource hyperv_cs_tsc = {
> > > > + .name = "hyperv_clocksource_tsc_page",
> > > > + .rating = 400,
> > > > + .read = read_hv_clock_tsc,
> > > > + .mask = CLOCKSOURCE_MASK(64),
> > > > + .flags = CLOCK_SOURCE_IS_CONTINUOUS,
> > > > +};
> > > > +#endif
> > > > +
> > > > +static u64 read_hv_clock_msr(struct clocksource *arg)
> > > > +{
> > > > + u64 current_tick;
> > > > + /*
> > > > + * Read the partition counter to get the current tick count. This count
> > > > + * is set to 0 when the partition is created and is incremented in
> > > > + * 100 nanosecond units.
> > > > + */
> > > > + rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
> > > > + return current_tick;
> > > > +}
> > > > +
> > > > +static struct clocksource hyperv_cs_msr = {
> > > > + .name = "hyperv_clocksource_msr",
> > > > + .rating = 400,
> > > > + .read = read_hv_clock_msr,
> > > > + .mask = CLOCKSOURCE_MASK(64),
> > > > + .flags = CLOCK_SOURCE_IS_CONTINUOUS,
> > > > +};
> > > >
> > > > static void *hypercall_pg;
> > > > /*
> > > > @@ -31,6 +104,7 @@
> > > > * hypervisor has been detected.
> > > > *
> > > > * 1. Setup the hypercall page.
> > > > + * 2. Register Hyper-V specific clocksource.
> > > > */
> > > > void hyperv_init(void)
> > > > {
> > > > @@ -58,6 +132,36 @@ void hyperv_init(void)
> > > > hypercall_msr.enable = 1;
> > > > hypercall_msr.guest_physical_address =
> > > vmalloc_to_pfn(hypercall_pg);
> > > > wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
> > > > +
> > > > + /*
> > > > + * Register Hyper-V specific clocksource.
> > > > + */
> > > > +#ifdef CONFIG_X86_64
> > > > + if (ms_hyperv.features &
> > > HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
> > > > + union hv_x64_msr_hypercall_contents tsc_msr;
> > > > +
> > > > + tsc_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL,
> > > PAGE_KERNEL);
> > > > + if (!tsc_pg)
> > > > + clocksource_register_hz(&hyperv_cs_msr,
> > > NSEC_PER_SEC/100);
> > > > + return;
> > >
> > > There's a reason you always test-build your patches and pay attention to
> > > the warnings!
> > >
> > > How did this pass your tests?
> > >
> > > I've stopped applying patches here.
> >
> > Greg,
> >
> > Thank you for spotting the issue; I will fix the problem and resend.
>
> Again, I have to ask, how did this past testing?
The patches were developed on one machine and testing was done
on a different machine. While the bug was fixed on the test machine
I forgot to pick up the fix prior to posting the patches.
Regards,
K. Y
Powered by blists - more mailing lists