--- linux-2.6.19/include/linux/time.h 2006-12-08 11:40:46.000000000 +0100 +++ linux-2.6.19-ed/include/linux/time.h 2006-12-08 16:58:57.000000000 +0100 @@ -88,9 +88,21 @@ static inline struct timespec timespec_s #define timespec_valid(ts) \ (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) -extern struct timespec xtime; -extern struct timespec wall_to_monotonic; -extern seqlock_t xtime_lock; +/* + * define a structure to keep all fields close to each others. + */ +struct ktimed_struct { + struct timespec _xtime; + struct timespec wall_to_monotonic; + seqlock_t lock; + unsigned long avenrun[3]; + int calc_load_count; +}; +extern struct ktimed_struct ktimed; +#define xtime ktimed._xtime +#define wall_to_monotonic ktimed.wall_to_monotonic +#define xtime_lock ktimed.lock +#define avenrun ktimed.avenrun void timekeeping_init(void); --- linux-2.6.19/kernel/timer.c 2006-12-08 11:50:11.000000000 +0100 +++ linux-2.6.19-ed/kernel/timer.c 2006-12-08 18:13:24.000000000 +0100 @@ -570,11 +570,13 @@ found: * however, we will ALWAYS keep the tv_nsec part positive so we can use * the usual normalization. */ -struct timespec xtime __attribute__ ((aligned (16))); -struct timespec wall_to_monotonic __attribute__ ((aligned (16))); - -EXPORT_SYMBOL(xtime); - +#ifndef ARCH_HAVE_KTIMED +struct ktimed_struct ktimed __cacheline_aligned = { + .lock = __SEQLOCK_UNLOCKED(ktimed.lock), + .calc_load_count = LOAD_FREQ, +}; +EXPORT_SYMBOL(ktimed); +#endif /* XXX - all of this timekeeping code should be later moved to time.c */ #include @@ -995,9 +997,6 @@ static unsigned long count_active_tasks( * * Requires xtime_lock to access. */ -unsigned long avenrun[3]; - -EXPORT_SYMBOL(avenrun); /* * calc_load - given tick count, update the avenrun load estimates. @@ -1006,27 +1005,21 @@ EXPORT_SYMBOL(avenrun); static inline void calc_load(unsigned long ticks) { unsigned long active_tasks; /* fixed-point */ - static int count = LOAD_FREQ; - active_tasks = count_active_tasks(); - for (count -= ticks; count < 0; count += LOAD_FREQ) { - CALC_LOAD(avenrun[0], EXP_1, active_tasks); - CALC_LOAD(avenrun[1], EXP_5, active_tasks); - CALC_LOAD(avenrun[2], EXP_15, active_tasks); + ktimed.calc_load_count -= ticks; + + if (unlikely(ktimed.calc_load_count < 0)) { + active_tasks = count_active_tasks(); + do { + ktimed.calc_load_count += LOAD_FREQ; + CALC_LOAD(avenrun[0], EXP_1, active_tasks); + CALC_LOAD(avenrun[1], EXP_5, active_tasks); + CALC_LOAD(avenrun[2], EXP_15, active_tasks); + } while (ktimed.calc_load_count < 0); } } /* - * This read-write spinlock protects us from races in SMP while - * playing with xtime and avenrun. - */ -#ifndef ARCH_HAVE_XTIME_LOCK -__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); - -EXPORT_SYMBOL(xtime_lock); -#endif - -/* * This function runs timers and the timer-tq in bottom half context. */ static void run_timer_softirq(struct softirq_action *h) --- linux-2.6.19/include/linux/sched.h 2006-12-08 12:10:45.000000000 +0100 +++ linux-2.6.19-ed/include/linux/sched.h 2006-12-08 12:10:59.000000000 +0100 @@ -104,7 +104,6 @@ struct futex_pi_state; * the EXP_n values would be 1981, 2034 and 2043 if still using only * 11 bit fractions. */ -extern unsigned long avenrun[]; /* Load averages */ #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1< --- linux-2.6.19/arch/x86_64/kernel/time.c 2006-12-08 16:22:15.000000000 +0100 +++ linux-2.6.19-ed/arch/x86_64/kernel/time.c 2006-12-08 18:13:24.000000000 +0100 @@ -77,7 +77,10 @@ unsigned long long monotonic_base; struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */ volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; -struct timespec __xtime __section_xtime; +struct ktimed_struct __ktimed __section_ktimed = { + .lock = __SEQLOCK_UNLOCKED(ktimed.lock), + .calc_load_count = LOAD_FREQ, +}; struct timezone __sys_tz __section_sys_tz; /*