lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180607013427.7qvwzmi6d2c62y23@shbuild888>
Date:   Thu, 7 Jun 2018 09:34:27 +0800
From:   Feng Tang <feng.tang@...el.com>
To:     Pavel Tatashin <pasha.tatashin@...cle.com>
Cc:     Peter Zijlstra <peterz@...radead.org>,
        Petr Mladek <pmladek@...e.com>, Ingo Molnar <mingo@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        "H . Peter Anvin" <hpa@...or.com>,
        Alan Cox <gnomes@...rguk.ukuu.org.uk>,
        linux-kernel@...r.kernel.org, alek.du@...el.com,
        arjan@...ux.intel.com, len.brown@...el.com
Subject: Re: [RFC 2/2] x86, tsc: Enable clock for ealry printk timestamp

Hi Pavel,

Thanks for the revew.

On Wed, Jun 06, 2018 at 11:25:22AM -0400, Pavel Tatashin wrote:
> Hi Feng,
> 
> Using a global variable for this is not going to work, because you are adding a conditional branch and a load to a very hot path for the live of the system, not only for the duration of the boot.

Exactly. As I explained, I wanted to use the "__use_tsc" and 
"__sched_clock_stable" without creating new gloabl variables, but
the problem is jump_label_init() can't be called that early,
so I used "tsc_inited" temply just to show tsc_init() could be call
early, and the printk timestamp could work much earlier.

Thanks,
Feng

> 
> Pavel
> 
> >  
> > +int tsc_inited;
> >  /*
> >   * TSC can be unstable due to cpufreq or due to unsynced TSCs
> >   */
> > @@ -192,7 +193,7 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_
> >   */
> >  u64 native_sched_clock(void)
> >  {
> > -	if (static_branch_likely(&__use_tsc)) {
> > +	if (static_branch_likely(&__use_tsc) || tsc_inited) {
> >  		u64 tsc_now = rdtsc();
> >  
> >  		/* return the value in ns */
> > @@ -1387,30 +1391,16 @@ static int __init init_tsc_clocksource(void)
> >   */
> >  device_initcall(init_tsc_clocksource);
> >  
> > -void __init tsc_early_delay_calibrate(void)
> > -{
> > -	unsigned long lpj;
> > -
> > -	if (!boot_cpu_has(X86_FEATURE_TSC))
> > -		return;
> > -
> > -	cpu_khz = x86_platform.calibrate_cpu();
> > -	tsc_khz = x86_platform.calibrate_tsc();
> > -
> > -	tsc_khz = tsc_khz ? : cpu_khz;
> > -	if (!tsc_khz)
> > -		return;
> > -
> > -	lpj = tsc_khz * 1000;
> > -	do_div(lpj, HZ);
> > -	loops_per_jiffy = lpj;
> > -}
> > -
> >  void __init tsc_init(void)
> >  {
> >  	u64 lpj, cyc;
> >  	int cpu;
> >  
> > +	if (tsc_inited)
> > +		return;
> > +
> > +	tsc_inited = 1;
> > +
> >  	if (!boot_cpu_has(X86_FEATURE_TSC)) {
> >  		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
> >  		return;
> > @@ -1474,11 +1464,15 @@ void __init tsc_init(void)
> >  	lpj = ((u64)tsc_khz * 1000);
> >  	do_div(lpj, HZ);
> >  	lpj_fine = lpj;
> > +	loops_per_jiffy = lpj;
> >  
> >  	use_tsc_delay();
> >  
> >  	check_system_tsc_reliable();
> >  
> > +	extern void early_set_sched_clock_stable(u64 sched_clock_offset);
> > +	early_set_sched_clock_stable(div64_u64(rdtsc() * 1000, tsc_khz));
> > +
> >  	if (unsynchronized_tsc()) {
> >  		mark_tsc_unstable("TSCs unsynchronized");
> >  		return;
> > diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
> > index 10c83e7..6c5c22d 100644
> > --- a/kernel/sched/clock.c
> > +++ b/kernel/sched/clock.c
> > @@ -119,6 +119,13 @@ static void __scd_stamp(struct sched_clock_data *scd)
> >  	scd->tick_raw = sched_clock();
> >  }
> >  
> > +
> > +void early_set_sched_clock_stable(u64 sched_clock_offset)
> > +{
> > +	__sched_clock_offset = sched_clock_offset;
> > +	static_branch_enable(&__sched_clock_stable);
> > +}
> > +
> >  static void __set_sched_clock_stable(void)
> >  {
> >  	struct sched_clock_data *scd;
> > @@ -342,12 +349,14 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
> >   *
> >   * See cpu_clock().
> >   */
> > +
> > +extern int tsc_inited;
> >  u64 sched_clock_cpu(int cpu)
> >  {
> >  	struct sched_clock_data *scd;
> >  	u64 clock;
> >  
> > -	if (sched_clock_stable())
> > +	if (sched_clock_stable() || tsc_inited)
> >  		return sched_clock() + __sched_clock_offset;
> >  
> >  	if (unlikely(!sched_clock_running))
> > 	   
> > 
> > 
> > 
> >>>
> >>> If you have a dodgy part (sorry SKX), you'll just have to live with
> >>> sched_clock starting late(r).
> >>>
> >>> Do not cobble things on the side, try and get the normal things running
> >>> earlier.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ