[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALCETrVjVkuDaN3aD3Ekx7EHvVLHMcURhtb_tReF=senOG=Bsg@mail.gmail.com>
Date: Sun, 2 Feb 2014 08:39:44 -0800
From: Andy Lutomirski <luto@...capital.net>
To: Stefani Seibold <stefani@...bold.net>
Cc: Greg KH <gregkh@...uxfoundation.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
X86 ML <x86@...nel.org>, Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, Andi Kleen <ak@...ux.intel.com>,
Andrea Arcangeli <aarcange@...hat.com>,
John Stultz <john.stultz@...aro.org>,
Pavel Emelyanov <xemul@...allels.com>,
Cyrill Gorcunov <gorcunov@...nvz.org>,
andriy.shevchenko@...ux.intel.com, Martin.Runge@...de-schwarz.com,
Andreas.Brief@...de-schwarz.com
Subject: Re: [PATCH 3/8] revamp vclock_gettime.c
On Sun, Feb 2, 2014 at 3:27 AM, <stefani@...bold.net> wrote:
> From: Stefani Seibold <stefani@...bold.net>
>
> This intermediate patch revamps the vclock_gettime.c by moving some functions
> around. It is only for spliting purpose, to make whole the 32 bit vdso timer
> patch easier to review.
>
> Signed-off-by: Stefani Seibold <stefani@...bold.net>
Acked-by: Andy Lutomirski <luto@...capital.net>
> ---
> arch/x86/vdso/vclock_gettime.c | 85 +++++++++++++++++++++---------------------
> 1 file changed, 42 insertions(+), 43 deletions(-)
>
> diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
> index eb5d7a5..bbc8065 100644
> --- a/arch/x86/vdso/vclock_gettime.c
> +++ b/arch/x86/vdso/vclock_gettime.c
> @@ -26,41 +26,26 @@
>
> #define gtod (&VVAR(vsyscall_gtod_data))
>
> -notrace static cycle_t vread_tsc(void)
> +static notrace cycle_t vread_hpet(void)
> {
> - cycle_t ret;
> - u64 last;
> -
> - /*
> - * Empirically, a fence (of type that depends on the CPU)
> - * before rdtsc is enough to ensure that rdtsc is ordered
> - * with respect to loads. The various CPU manuals are unclear
> - * as to whether rdtsc can be reordered with later loads,
> - * but no one has ever seen it happen.
> - */
> - rdtsc_barrier();
> - ret = (cycle_t)vget_cycles();
> -
> - last = VVAR(vsyscall_gtod_data).clock.cycle_last;
> -
> - if (likely(ret >= last))
> - return ret;
> + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
> +}
>
> - /*
> - * GCC likes to generate cmov here, but this branch is extremely
> - * predictable (it's just a funciton of time and the likely is
> - * very likely) and there's a data dependence, so force GCC
> - * to generate a branch instead. I don't barrier() because
> - * we don't actually need a barrier, and if this function
> - * ever gets inlined it will generate worse code.
> - */
> - asm volatile ("");
> - return last;
> +notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
> +{
> + long ret;
> + asm("syscall" : "=a" (ret) :
> + "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
> + return ret;
> }
>
> -static notrace cycle_t vread_hpet(void)
> +notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
> {
> - return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
> + long ret;
> +
> + asm("syscall" : "=a" (ret) :
> + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
> + return ret;
> }
>
> #ifdef CONFIG_PARAVIRT_CLOCK
> @@ -133,23 +118,37 @@ static notrace cycle_t vread_pvclock(int *mode)
> }
> #endif
>
> -notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
> +notrace static cycle_t vread_tsc(void)
> {
> - long ret;
> - asm("syscall" : "=a" (ret) :
> - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
> - return ret;
> -}
> + cycle_t ret;
> + u64 last;
>
> -notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
> -{
> - long ret;
> + /*
> + * Empirically, a fence (of type that depends on the CPU)
> + * before rdtsc is enough to ensure that rdtsc is ordered
> + * with respect to loads. The various CPU manuals are unclear
> + * as to whether rdtsc can be reordered with later loads,
> + * but no one has ever seen it happen.
> + */
> + rdtsc_barrier();
> + ret = (cycle_t)vget_cycles();
>
> - asm("syscall" : "=a" (ret) :
> - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
> - return ret;
> -}
> + last = VVAR(vsyscall_gtod_data).clock.cycle_last;
>
> + if (likely(ret >= last))
> + return ret;
> +
> + /*
> + * GCC likes to generate cmov here, but this branch is extremely
> + * predictable (it's just a funciton of time and the likely is
> + * very likely) and there's a data dependence, so force GCC
> + * to generate a branch instead. I don't barrier() because
> + * we don't actually need a barrier, and if this function
> + * ever gets inlined it will generate worse code.
> + */
> + asm volatile ("");
> + return last;
> +}
>
> notrace static inline u64 vgetsns(int *mode)
> {
> --
> 1.8.5.3
>
--
Andy Lutomirski
AMA Capital Management, LLC
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists