From 24e4a8c3e8868874835b0f1ad6dd417341e99822 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 16 Jul 2014 21:03:53 +0000 Subject: ktime: Kill non-scalar ktime_t implementation for 2038 The non-scalar ktime_t implementation is basically a timespec which has to be changed to support dates past 2038 on 32bit systems. This patch removes the non-scalar ktime_t implementation, forcing the scalar s64 nanosecond version on all architectures. This may have additional performance overhead on some 32bit systems when converting between ktime_t and timespec structures, however the majority of 32bit systems (arm and i386) were already using scalar ktime_t, so no performance regressions will be seen on those platforms. On affected platforms, I'm open to finding optimizations, including avoiding converting to timespecs where possible. [ tglx: We can now cleanup the ktime_t.tv64 mess, but thats a different issue and we can throw a coccinelle script at it ] Signed-off-by: John Stultz Signed-off-by: Thomas Gleixner Signed-off-by: John Stultz --- arch/x86/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a8f749ef0fdc..7fa17b5ce668 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -111,7 +111,6 @@ config X86 select ARCH_CLOCKSOURCE_DATA select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) select GENERIC_TIME_VSYSCALL - select KTIME_SCALAR if X86_32 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select HAVE_CONTEXT_TRACKING if X86_64 -- cgit v1.2.3 From bb0b58127c5add364cb597d58b1cf66eb279eae8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 16 Jul 2014 21:04:52 +0000 Subject: x86: kvm: Use ktime_get_boot_ns() Use the new nanoseconds based interface and get rid of the timespec conversion dance. Signed-off-by: Thomas Gleixner Cc: Gleb Natapov Cc: kvm@vger.kernel.org Acked-by: Paolo Bonzini Signed-off-by: John Stultz --- arch/x86/kvm/x86.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f6449334ec45..65c430512132 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1109,11 +1109,7 @@ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, static inline u64 get_kernel_ns(void) { - struct timespec ts; - - ktime_get_ts(&ts); - monotonic_to_bootbased(&ts); - return timespec_to_ns(&ts); + return ktime_get_boot_ns(); } #ifdef CONFIG_X86_64 -- cgit v1.2.3 From cbcf2dd3b3d4d990610259e8d878fc8dc1f17d80 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 16 Jul 2014 21:04:54 +0000 Subject: x86: kvm: Make kvm_get_time_and_clockread() nanoseconds based Convert the relevant base data right away to nanoseconds instead of doing the conversion on every readout. Reduces text size by 160 bytes. Signed-off-by: Thomas Gleixner Cc: Gleb Natapov Cc: kvm@vger.kernel.org Acked-by: Paolo Bonzini Signed-off-by: John Stultz --- arch/x86/kvm/x86.c | 44 ++++++++++++++------------------------------ 1 file changed, 14 insertions(+), 30 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 65c430512132..63832f5110b6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -984,9 +984,8 @@ struct pvclock_gtod_data { u32 shift; } clock; - /* open coded 'struct timespec' */ - u64 monotonic_time_snsec; - time_t monotonic_time_sec; + u64 boot_ns; + u64 nsec_base; }; static struct pvclock_gtod_data pvclock_gtod_data; @@ -994,6 +993,9 @@ static struct pvclock_gtod_data pvclock_gtod_data; static void update_pvclock_gtod(struct timekeeper *tk) { struct pvclock_gtod_data *vdata = &pvclock_gtod_data; + u64 boot_ns; + + boot_ns = ktime_to_ns(ktime_add(tk->base_mono, tk->offs_boot)); write_seqcount_begin(&vdata->seq); @@ -1004,17 +1006,8 @@ static void update_pvclock_gtod(struct timekeeper *tk) vdata->clock.mult = tk->mult; vdata->clock.shift = tk->shift; - vdata->monotonic_time_sec = tk->xtime_sec - + tk->wall_to_monotonic.tv_sec; - vdata->monotonic_time_snsec = tk->xtime_nsec - + (tk->wall_to_monotonic.tv_nsec - << tk->shift); - while (vdata->monotonic_time_snsec >= - (((u64)NSEC_PER_SEC) << tk->shift)) { - vdata->monotonic_time_snsec -= - ((u64)NSEC_PER_SEC) << tk->shift; - vdata->monotonic_time_sec++; - } + vdata->boot_ns = boot_ns; + vdata->nsec_base = tk->xtime_nsec; write_seqcount_end(&vdata->seq); } @@ -1371,23 +1364,22 @@ static inline u64 vgettsc(cycle_t *cycle_now) return v * gtod->clock.mult; } -static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) +static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) { + struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; - u64 ns; int mode; - struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + u64 ns; - ts->tv_nsec = 0; do { seq = read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; - ts->tv_sec = gtod->monotonic_time_sec; - ns = gtod->monotonic_time_snsec; + ns = gtod->nsec_base; ns += vgettsc(cycle_now); ns >>= gtod->clock.shift; + ns += gtod->boot_ns; } while (unlikely(read_seqcount_retry(>od->seq, seq))); - timespec_add_ns(ts, ns); + *t = ns; return mode; } @@ -1395,19 +1387,11 @@ static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) /* returns true if host is using tsc clocksource */ static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) { - struct timespec ts; - /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) return false; - if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) - return false; - - monotonic_to_bootbased(&ts); - *kernel_ns = timespec_to_ns(&ts); - - return true; + return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC; } #endif -- cgit v1.2.3 From 09ec54429c6d10f87d1f084de53ae2c1c3a81108 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 16 Jul 2014 21:05:12 +0000 Subject: clocksource: Move cycle_last validation to core code The only user of the cycle_last validation is the x86 TSC. In order to provide NMI safe accessor functions for clock monotonic and monotonic_raw we need to do that in the core. We can't do the TSC specific if (now < cycle_last) now = cycle_last; for the other wrapping around clocksources, but TSC has CLOCKSOURCE_MASK(64) which actually does not mask out anything so if now is less than cycle_last the subtraction will give a negative result. So we can check for that in clocksource_delta() and return 0 for that case. Implement and enable it for x86 Signed-off-by: Thomas Gleixner Signed-off-by: John Stultz --- arch/x86/Kconfig | 1 + arch/x86/kernel/tsc.c | 21 +++++++++------------ kernel/time/Kconfig | 5 +++++ kernel/time/timekeeping_internal.h | 9 +++++++++ 4 files changed, 24 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7fa17b5ce668..d08e061c187a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -109,6 +109,7 @@ config X86 select CLOCKSOURCE_WATCHDOG select GENERIC_CLOCKEVENTS select ARCH_CLOCKSOURCE_DATA + select CLOCKSOURCE_VALIDATE_LAST_CYCLE select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) select GENERIC_TIME_VSYSCALL select GENERIC_STRNCPY_FROM_USER diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 57e5ce126d5a..456c0e660c43 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -951,7 +951,7 @@ core_initcall(cpufreq_tsc); static struct clocksource clocksource_tsc; /* - * We compare the TSC to the cycle_last value in the clocksource + * We used to compare the TSC to the cycle_last value in the clocksource * structure to avoid a nasty time-warp. This can be observed in a * very small window right after one CPU updated cycle_last under * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which @@ -961,26 +961,23 @@ static struct clocksource clocksource_tsc; * due to the unsigned delta calculation of the time keeping core * code, which is necessary to support wrapping clocksources like pm * timer. + * + * This sanity check is now done in the core timekeeping code. + * checking the result of read_tsc() - cycle_last for being negative. + * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. */ static cycle_t read_tsc(struct clocksource *cs) { - cycle_t ret = (cycle_t)get_cycles(); - - return ret >= clocksource_tsc.cycle_last ? - ret : clocksource_tsc.cycle_last; -} - -static void resume_tsc(struct clocksource *cs) -{ - if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) - clocksource_tsc.cycle_last = 0; + return (cycle_t)get_cycles(); } +/* + * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() + */ static struct clocksource clocksource_tsc = { .name = "tsc", .rating = 300, .read = read_tsc, - .resume = resume_tsc, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_MUST_VERIFY, diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index feccfd888732..d626dc98e8df 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -12,6 +12,11 @@ config CLOCKSOURCE_WATCHDOG config ARCH_CLOCKSOURCE_DATA bool +# Clocksources require validation of the clocksource against the last +# cycle update - x86/TSC misfeature +config CLOCKSOURCE_VALIDATE_LAST_CYCLE + bool + # Timekeeping vsyscall support config GENERIC_TIME_VSYSCALL bool diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h index 05dfa6b25dc4..4ea005a7f9da 100644 --- a/kernel/time/timekeeping_internal.h +++ b/kernel/time/timekeeping_internal.h @@ -12,9 +12,18 @@ extern void tk_debug_account_sleep_time(struct timespec64 *t); #define tk_debug_account_sleep_time(x) #endif +#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE +static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) +{ + cycle_t ret = (now - last) & mask; + + return (s64) ret > 0 ? ret : 0; +} +#else static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) { return (now - last) & mask; } +#endif #endif /* _TIMEKEEPING_INTERNAL_H */ -- cgit v1.2.3 From 4a0e637738f06673725792d74eed67f8779b62c7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 16 Jul 2014 21:05:13 +0000 Subject: clocksource: Get rid of cycle_last cycle_last was added to the clocksource to support the TSC validation. We moved that to the core code, so we can get rid of the extra copy. Signed-off-by: Thomas Gleixner Signed-off-by: John Stultz --- arch/arm64/kernel/vdso.c | 2 +- arch/ia64/kernel/time.c | 4 ++-- arch/powerpc/kernel/time.c | 4 ++-- arch/s390/kernel/time.c | 2 +- arch/tile/kernel/time.c | 2 +- arch/x86/kernel/vsyscall_gtod.c | 2 +- arch/x86/kvm/x86.c | 2 +- include/linux/clocksource.h | 2 -- include/linux/timekeeper_internal.h | 7 ++++--- kernel/time/timekeeping.c | 23 +++++++++++------------ 10 files changed, 24 insertions(+), 26 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 50384fec56c4..574672f001f7 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -224,7 +224,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; if (!use_syscall) { - vdso_data->cs_cycle_last = tk->clock->cycle_last; + vdso_data->cs_cycle_last = tk->cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->cs_mult = tk->mult; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 71c52bc7c28d..11dc42da7daf 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -441,7 +441,7 @@ void update_vsyscall_tz(void) } void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, - struct clocksource *c, u32 mult) + struct clocksource *c, u32 mult, cycles_t cycle_last) { write_seqcount_begin(&fsyscall_gtod_data.seq); @@ -450,7 +450,7 @@ void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, fsyscall_gtod_data.clk_mult = mult; fsyscall_gtod_data.clk_shift = c->shift; fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; - fsyscall_gtod_data.clk_cycle_last = c->cycle_last; + fsyscall_gtod_data.clk_cycle_last = cycle_last; /* copy kernel time structures */ fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 9fff9cdcc519..368ab374d33c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -741,7 +741,7 @@ static cycle_t timebase_read(struct clocksource *cs) } void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, - struct clocksource *clock, u32 mult) + struct clocksource *clock, u32 mult, cycle_t cycle_last) { u64 new_tb_to_xs, new_stamp_xsec; u32 frac_sec; @@ -774,7 +774,7 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, * We expect the caller to have done the first increment of * vdso_data->tb_update_count already. */ - vdso_data->tb_orig_stamp = clock->cycle_last; + vdso_data->tb_orig_stamp = cycle_last; vdso_data->stamp_xsec = new_stamp_xsec; vdso_data->tb_to_xs = new_tb_to_xs; vdso_data->wtom_clock_sec = wtm->tv_sec; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 0931b110c826..97950f392613 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -220,7 +220,7 @@ void update_vsyscall(struct timekeeper *tk) /* Make userspace gettimeofday spin until we're done. */ ++vdso_data->tb_update_count; smp_wmb(); - vdso_data->xtime_tod_stamp = tk->clock->cycle_last; + vdso_data->xtime_tod_stamp = tk->cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->wtom_clock_sec = diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index ae70155c2f16..d22d5bfc1e4e 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -269,7 +269,7 @@ void update_vsyscall(struct timekeeper *tk) /* Userspace gettimeofday will spin while this value is odd. */ ++vdso_data->tb_update_count; smp_wmb(); - vdso_data->xtime_tod_stamp = clock->cycle_last; + vdso_data->xtime_tod_stamp = tk->cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->wtom_clock_sec = wtm->tv_sec; diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c index 9531fbb123ba..c3cb3c144591 100644 --- a/arch/x86/kernel/vsyscall_gtod.c +++ b/arch/x86/kernel/vsyscall_gtod.c @@ -32,7 +32,7 @@ void update_vsyscall(struct timekeeper *tk) /* copy vsyscall data */ vdata->vclock_mode = tk->clock->archdata.vclock_mode; - vdata->cycle_last = tk->clock->cycle_last; + vdata->cycle_last = tk->cycle_last; vdata->mask = tk->clock->mask; vdata->mult = tk->mult; vdata->shift = tk->shift; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 63832f5110b6..7b25125f3f42 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1001,7 +1001,7 @@ static void update_pvclock_gtod(struct timekeeper *tk) /* copy pvclock gtod data */ vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; - vdata->clock.cycle_last = tk->clock->cycle_last; + vdata->clock.cycle_last = tk->cycle_last; vdata->clock.mask = tk->clock->mask; vdata->clock.mult = tk->mult; vdata->clock.shift = tk->shift; diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index a16b497d5159..653f0e2b6ca9 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -162,7 +162,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * @archdata: arch-specific data * @suspend: suspend function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary - * @cycle_last: most recent cycle counter value seen by ::read() * @owner: module reference, must be set by clocksource in modules */ struct clocksource { @@ -171,7 +170,6 @@ struct clocksource { * clocksource itself is cacheline aligned. */ cycle_t (*read)(struct clocksource *cs); - cycle_t cycle_last; cycle_t mask; u32 mult; u32 shift; diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 2e20275a7083..cb88096222c0 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -29,6 +29,8 @@ struct timekeeper { /* Current clocksource used for timekeeping. */ struct clocksource *clock; + /* Last cycle value */ + cycle_t cycle_last; /* NTP adjusted clock multiplier */ u32 mult; /* The shift value of the current clocksource. */ @@ -62,8 +64,6 @@ struct timekeeper { /* Number of clock cycles in one NTP interval. */ cycle_t cycle_interval; - /* Last cycle value (also stored in clock->cycle_last) */ - cycle_t cycle_last; /* Number of clock shifted nano seconds in one NTP interval. */ u64 xtime_interval; /* shifted nano seconds left over when rounding cycle_interval */ @@ -91,7 +91,8 @@ extern void update_vsyscall_tz(void); #elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD) extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, - struct clocksource *c, u32 mult); + struct clocksource *c, u32 mult, + cycles_t cycle_last); extern void update_vsyscall_tz(void); #else diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 531805013786..4e748c404749 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -121,7 +121,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) old_clock = tk->clock; tk->clock = clock; - tk->cycle_last = clock->cycle_last = clock->read(clock); + tk->cycle_last = clock->read(clock); /* Do the ns -> cycle conversion first, using original mult */ tmp = NTP_INTERVAL_LENGTH; @@ -182,7 +182,7 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk) cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ - delta = clocksource_delta(cycle_now, clock->cycle_last, clock->mask); + delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask); nsec = delta * tk->mult + tk->xtime_nsec; nsec >>= tk->shift; @@ -202,7 +202,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ - delta = clocksource_delta(cycle_now, clock->cycle_last, clock->mask); + delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask); /* convert delta to nanoseconds. */ nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); @@ -218,7 +218,8 @@ static inline void update_vsyscall(struct timekeeper *tk) struct timespec xt; xt = tk_xtime(tk); - update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult); + update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult, + tk->cycle_last); } static inline void old_vsyscall_fixup(struct timekeeper *tk) @@ -342,8 +343,8 @@ static void timekeeping_forward_now(struct timekeeper *tk) clock = tk->clock; cycle_now = clock->read(clock); - delta = clocksource_delta(cycle_now, clock->cycle_last, clock->mask); - tk->cycle_last = clock->cycle_last = cycle_now; + delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask); + tk->cycle_last = cycle_now; tk->xtime_nsec += delta * tk->mult; @@ -1020,13 +1021,13 @@ static void timekeeping_resume(void) */ cycle_now = clock->read(clock); if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && - cycle_now > clock->cycle_last) { + cycle_now > tk->cycle_last) { u64 num, max = ULLONG_MAX; u32 mult = clock->mult; u32 shift = clock->shift; s64 nsec = 0; - cycle_delta = clocksource_delta(cycle_now, clock->cycle_last, + cycle_delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask); /* @@ -1053,7 +1054,7 @@ static void timekeeping_resume(void) __timekeeping_inject_sleeptime(tk, &ts_delta); /* Re-base the last cycle value */ - tk->cycle_last = clock->cycle_last = cycle_now; + tk->cycle_last = cycle_now; tk->ntp_error = 0; timekeeping_suspended = 0; timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); @@ -1433,7 +1434,7 @@ void update_wall_time(void) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET offset = real_tk->cycle_interval; #else - offset = clocksource_delta(clock->read(clock), clock->cycle_last, + offset = clocksource_delta(clock->read(clock), tk->cycle_last, clock->mask); #endif @@ -1477,8 +1478,6 @@ void update_wall_time(void) clock_set |= accumulate_nsecs_to_secs(tk); write_seqcount_begin(&tk_core.seq); - /* Update clock->cycle_last with the new value */ - clock->cycle_last = tk->cycle_last; /* * Update the real timekeeper. * -- cgit v1.2.3 From d28ede83791defee9a81e558540699dc46dbbe13 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 16 Jul 2014 21:05:16 +0000 Subject: timekeeping: Create struct tk_read_base and use it in struct timekeeper The members of the new struct are the required ones for the new NMI safe accessor to clcok monotonic. In order to reuse the existing timekeeping code and to make the update of the fast NMI safe timekeepers a simple memcpy use the struct for the timekeeper as well and convert all users. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Mathieu Desnoyers Signed-off-by: John Stultz --- arch/arm64/kernel/vdso.c | 10 +-- arch/s390/kernel/time.c | 16 ++--- arch/tile/kernel/time.c | 10 +-- arch/x86/kernel/vsyscall_gtod.c | 23 ++++--- arch/x86/kvm/x86.c | 14 ++-- include/linux/timekeeper_internal.h | 103 +++++++++++++++------------- kernel/time/timekeeping.c | 132 ++++++++++++++++++------------------ 7 files changed, 158 insertions(+), 150 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 574672f001f7..8296f7f5f0ba 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -211,7 +211,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm) void update_vsyscall(struct timekeeper *tk) { struct timespec xtime_coarse; - u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter"); + u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); @@ -224,11 +224,11 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; if (!use_syscall) { - vdso_data->cs_cycle_last = tk->cycle_last; + vdso_data->cs_cycle_last = tk->tkr.cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->xtime_nsec; - vdso_data->cs_mult = tk->mult; - vdso_data->cs_shift = tk->shift; + vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; + vdso_data->cs_mult = tk->tkr.mult; + vdso_data->cs_shift = tk->tkr.shift; } smp_wmb(); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 97950f392613..4cef607f3711 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -214,26 +214,26 @@ void update_vsyscall(struct timekeeper *tk) { u64 nsecps; - if (tk->clock != &clocksource_tod) + if (tk->tkr.clock != &clocksource_tod) return; /* Make userspace gettimeofday spin until we're done. */ ++vdso_data->tb_update_count; smp_wmb(); - vdso_data->xtime_tod_stamp = tk->cycle_last; + vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->xtime_nsec; + vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; vdso_data->wtom_clock_sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - vdso_data->wtom_clock_nsec = tk->xtime_nsec + - + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); - nsecps = (u64) NSEC_PER_SEC << tk->shift; + vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec + + + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift); + nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift; while (vdso_data->wtom_clock_nsec >= nsecps) { vdso_data->wtom_clock_nsec -= nsecps; vdso_data->wtom_clock_sec++; } - vdso_data->tk_mult = tk->mult; - vdso_data->tk_shift = tk->shift; + vdso_data->tk_mult = tk->tkr.mult; + vdso_data->tk_shift = tk->tkr.shift; smp_wmb(); ++vdso_data->tb_update_count; } diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index d22d5bfc1e4e..d8fbc289e680 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -261,7 +261,7 @@ void update_vsyscall_tz(void) void update_vsyscall(struct timekeeper *tk) { struct timespec *wtm = &tk->wall_to_monotonic; - struct clocksource *clock = tk->clock; + struct clocksource *clock = tk->tkr.clock; if (clock != &cycle_counter_cs) return; @@ -269,13 +269,13 @@ void update_vsyscall(struct timekeeper *tk) /* Userspace gettimeofday will spin while this value is odd. */ ++vdso_data->tb_update_count; smp_wmb(); - vdso_data->xtime_tod_stamp = tk->cycle_last; + vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->xtime_nsec; + vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; vdso_data->wtom_clock_sec = wtm->tv_sec; vdso_data->wtom_clock_nsec = wtm->tv_nsec; - vdso_data->mult = tk->mult; - vdso_data->shift = tk->shift; + vdso_data->mult = tk->tkr.mult; + vdso_data->shift = tk->tkr.shift; smp_wmb(); ++vdso_data->tb_update_count; } diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c index c3cb3c144591..c7d791f32b98 100644 --- a/arch/x86/kernel/vsyscall_gtod.c +++ b/arch/x86/kernel/vsyscall_gtod.c @@ -31,29 +31,30 @@ void update_vsyscall(struct timekeeper *tk) gtod_write_begin(vdata); /* copy vsyscall data */ - vdata->vclock_mode = tk->clock->archdata.vclock_mode; - vdata->cycle_last = tk->cycle_last; - vdata->mask = tk->clock->mask; - vdata->mult = tk->mult; - vdata->shift = tk->shift; + vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode; + vdata->cycle_last = tk->tkr.cycle_last; + vdata->mask = tk->tkr.mask; + vdata->mult = tk->tkr.mult; + vdata->shift = tk->tkr.shift; vdata->wall_time_sec = tk->xtime_sec; - vdata->wall_time_snsec = tk->xtime_nsec; + vdata->wall_time_snsec = tk->tkr.xtime_nsec; vdata->monotonic_time_sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - vdata->monotonic_time_snsec = tk->xtime_nsec + vdata->monotonic_time_snsec = tk->tkr.xtime_nsec + ((u64)tk->wall_to_monotonic.tv_nsec - << tk->shift); + << tk->tkr.shift); while (vdata->monotonic_time_snsec >= - (((u64)NSEC_PER_SEC) << tk->shift)) { + (((u64)NSEC_PER_SEC) << tk->tkr.shift)) { vdata->monotonic_time_snsec -= - ((u64)NSEC_PER_SEC) << tk->shift; + ((u64)NSEC_PER_SEC) << tk->tkr.shift; vdata->monotonic_time_sec++; } vdata->wall_time_coarse_sec = tk->xtime_sec; - vdata->wall_time_coarse_nsec = (long)(tk->xtime_nsec >> tk->shift); + vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> + tk->tkr.shift); vdata->monotonic_time_coarse_sec = vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7b25125f3f42..b7e57946d1c1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -995,19 +995,19 @@ static void update_pvclock_gtod(struct timekeeper *tk) struct pvclock_gtod_data *vdata = &pvclock_gtod_data; u64 boot_ns; - boot_ns = ktime_to_ns(ktime_add(tk->base_mono, tk->offs_boot)); + boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot)); write_seqcount_begin(&vdata->seq); /* copy pvclock gtod data */ - vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; - vdata->clock.cycle_last = tk->cycle_last; - vdata->clock.mask = tk->clock->mask; - vdata->clock.mult = tk->mult; - vdata->clock.shift = tk->shift; + vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode; + vdata->clock.cycle_last = tk->tkr.cycle_last; + vdata->clock.mask = tk->tkr.mask; + vdata->clock.mult = tk->tkr.mult; + vdata->clock.shift = tk->tkr.shift; vdata->boot_ns = boot_ns; - vdata->nsec_base = tk->xtime_nsec; + vdata->nsec_base = tk->tkr.xtime_nsec; write_seqcount_end(&vdata->seq); } diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 75bb8add78f5..97381997625b 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -10,80 +10,87 @@ #include #include -/* - * Structure holding internal timekeeping values. - * - * Note: wall_to_monotonic is what we need to add to xtime (or xtime - * corrected for sub jiffie times) to get to monotonic time. - * Monotonic is pegged at zero at system boot time, so - * wall_to_monotonic will be negative, however, we will ALWAYS keep - * the tv_nsec part positive so we can use the usual normalization. +/** + * struct tk_read_base - base structure for timekeeping readout + * @clock: Current clocksource used for timekeeping. + * @read: Read function of @clock + * @mask: Bitmask for two's complement subtraction of non 64bit clocks + * @cycle_last: @clock cycle value at last update + * @mult: NTP adjusted multiplier for scaled math conversion + * @shift: Shift value for scaled math conversion + * @xtime_nsec: Shifted (fractional) nano seconds offset for readout + * @base_mono: ktime_t (nanoseconds) base time for readout * - * wall_to_monotonic is moved after resume from suspend for the - * monotonic time not to jump. To calculate the real boot time offset - * we need to do offs_real - offs_boot. + * This struct has size 56 byte on 64 bit. Together with a seqcount it + * occupies a single 64byte cache line. * - * - wall_to_monotonic is no longer the boot time, getboottime must be - * used instead. + * The struct is separate from struct timekeeper as it is also used + * for a fast NMI safe accessor to clock monotonic. */ -struct timekeeper { - /* Current clocksource used for timekeeping. */ +struct tk_read_base { struct clocksource *clock; - /* Read function of @clock */ cycle_t (*read)(struct clocksource *cs); - /* Bitmask for two's complement subtraction of non 64bit counters */ cycle_t mask; - /* Last cycle value */ cycle_t cycle_last; - /* NTP adjusted clock multiplier */ u32 mult; - /* The shift value of the current clocksource. */ u32 shift; - /* Clock shifted nano seconds */ u64 xtime_nsec; - - /* Monotonic base time */ ktime_t base_mono; +}; - /* Current CLOCK_REALTIME time in seconds */ +/** + * struct timekeeper - Structure holding internal timekeeping values. + * @tkr: The readout base structure + * @xtime_sec: Current CLOCK_REALTIME time in seconds + * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset + * @offs_real: Offset clock monotonic -> clock realtime + * @offs_boot: Offset clock monotonic -> clock boottime + * @offs_tai: Offset clock monotonic -> clock tai + * @tai_offset: The current UTC to TAI offset in seconds + * @base_raw: Monotonic raw base time in ktime_t format + * @raw_time: Monotonic raw base time in timespec64 format + * @cycle_interval: Number of clock cycles in one NTP interval + * @xtime_interval: Number of clock shifted nano seconds in one NTP + * interval. + * @xtime_remainder: Shifted nano seconds left over when rounding + * @cycle_interval + * @raw_interval: Raw nano seconds accumulated per NTP interval. + * @ntp_error: Difference between accumulated time and NTP time in ntp + * shifted nano seconds. + * @ntp_error_shift: Shift conversion between clock shifted nano seconds and + * ntp shifted nano seconds. + * + * Note: For timespec(64) based interfaces wall_to_monotonic is what + * we need to add to xtime (or xtime corrected for sub jiffie times) + * to get to monotonic time. Monotonic is pegged at zero at system + * boot time, so wall_to_monotonic will be negative, however, we will + * ALWAYS keep the tv_nsec part positive so we can use the usual + * normalization. + * + * wall_to_monotonic is moved after resume from suspend for the + * monotonic time not to jump. We need to add total_sleep_time to + * wall_to_monotonic to get the real boot based time offset. + * + * wall_to_monotonic is no longer the boot time, getboottime must be + * used instead. + */ +struct timekeeper { + struct tk_read_base tkr; u64 xtime_sec; - /* CLOCK_REALTIME to CLOCK_MONOTONIC offset */ struct timespec64 wall_to_monotonic; - - /* Offset clock monotonic -> clock realtime */ ktime_t offs_real; - /* Offset clock monotonic -> clock boottime */ ktime_t offs_boot; - /* Offset clock monotonic -> clock tai */ ktime_t offs_tai; - - /* The current UTC to TAI offset in seconds */ s32 tai_offset; - - /* Monotonic raw base time */ ktime_t base_raw; - - /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ struct timespec64 raw_time; - /* Number of clock cycles in one NTP interval. */ + /* The following members are for timekeeping internal use */ cycle_t cycle_interval; - /* Number of clock shifted nano seconds in one NTP interval. */ u64 xtime_interval; - /* shifted nano seconds left over when rounding cycle_interval */ s64 xtime_remainder; - /* Raw nano seconds accumulated per NTP interval. */ u32 raw_interval; - - /* - * Difference between accumulated time and NTP time in ntp - * shifted nano seconds. - */ s64 ntp_error; - /* - * Shift conversion between clock shifted nano seconds and - * ntp shifted nano seconds. - */ u32 ntp_error_shift; }; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 14b7367e6b94..ccb69980ef7e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -52,8 +52,8 @@ bool __read_mostly persistent_clock_exist = false; static inline void tk_normalize_xtime(struct timekeeper *tk) { - while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { - tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; + while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) { + tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift; tk->xtime_sec++; } } @@ -63,20 +63,20 @@ static inline struct timespec64 tk_xtime(struct timekeeper *tk) struct timespec64 ts; ts.tv_sec = tk->xtime_sec; - ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); + ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift); return ts; } static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) { tk->xtime_sec = ts->tv_sec; - tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; + tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift; } static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) { tk->xtime_sec += ts->tv_sec; - tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; + tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift; tk_normalize_xtime(tk); } @@ -119,11 +119,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) u64 tmp, ntpinterval; struct clocksource *old_clock; - old_clock = tk->clock; - tk->clock = clock; - tk->read = clock->read; - tk->mask = clock->mask; - tk->cycle_last = tk->read(clock); + old_clock = tk->tkr.clock; + tk->tkr.clock = clock; + tk->tkr.read = clock->read; + tk->tkr.mask = clock->mask; + tk->tkr.cycle_last = tk->tkr.read(clock); /* Do the ns -> cycle conversion first, using original mult */ tmp = NTP_INTERVAL_LENGTH; @@ -147,11 +147,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) if (old_clock) { int shift_change = clock->shift - old_clock->shift; if (shift_change < 0) - tk->xtime_nsec >>= -shift_change; + tk->tkr.xtime_nsec >>= -shift_change; else - tk->xtime_nsec <<= shift_change; + tk->tkr.xtime_nsec <<= shift_change; } - tk->shift = clock->shift; + tk->tkr.shift = clock->shift; tk->ntp_error = 0; tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; @@ -161,7 +161,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) * active clocksource. These value will be adjusted via NTP * to counteract clock drifting. */ - tk->mult = clock->mult; + tk->tkr.mult = clock->mult; } /* Timekeeper helper functions. */ @@ -179,13 +179,13 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk) s64 nsec; /* read clocksource: */ - cycle_now = tk->read(tk->clock); + cycle_now = tk->tkr.read(tk->tkr.clock); /* calculate the delta since the last update_wall_time: */ - delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask); + delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask); - nsec = delta * tk->mult + tk->xtime_nsec; - nsec >>= tk->shift; + nsec = delta * tk->tkr.mult + tk->tkr.xtime_nsec; + nsec >>= tk->tkr.shift; /* If arch requires, add in get_arch_timeoffset() */ return nsec + arch_gettimeoffset(); @@ -193,15 +193,15 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk) static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) { - struct clocksource *clock = tk->clock; + struct clocksource *clock = tk->tkr.clock; cycle_t cycle_now, delta; s64 nsec; /* read clocksource: */ - cycle_now = tk->read(clock); + cycle_now = tk->tkr.read(clock); /* calculate the delta since the last update_wall_time: */ - delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask); + delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask); /* convert delta to nanoseconds. */ nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); @@ -217,8 +217,8 @@ static inline void update_vsyscall(struct timekeeper *tk) struct timespec xt; xt = tk_xtime(tk); - update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult, - tk->cycle_last); + update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult, + tk->tkr.cycle_last); } static inline void old_vsyscall_fixup(struct timekeeper *tk) @@ -235,11 +235,11 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD * users are removed, this can be killed. */ - remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1); - tk->xtime_nsec -= remainder; - tk->xtime_nsec += 1ULL << tk->shift; + remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1); + tk->tkr.xtime_nsec -= remainder; + tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift; tk->ntp_error += remainder << tk->ntp_error_shift; - tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; + tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift; } #else #define old_vsyscall_fixup(tk) @@ -304,7 +304,7 @@ static inline void tk_update_ktime_data(struct timekeeper *tk) nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); nsec *= NSEC_PER_SEC; nsec += tk->wall_to_monotonic.tv_nsec; - tk->base_mono = ns_to_ktime(nsec); + tk->tkr.base_mono = ns_to_ktime(nsec); /* Update the monotonic raw base */ tk->base_raw = timespec64_to_ktime(tk->raw_time); @@ -336,18 +336,18 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) */ static void timekeeping_forward_now(struct timekeeper *tk) { - struct clocksource *clock = tk->clock; + struct clocksource *clock = tk->tkr.clock; cycle_t cycle_now, delta; s64 nsec; - cycle_now = tk->read(clock); - delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask); - tk->cycle_last = cycle_now; + cycle_now = tk->tkr.read(clock); + delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask); + tk->tkr.cycle_last = cycle_now; - tk->xtime_nsec += delta * tk->mult; + tk->tkr.xtime_nsec += delta * tk->tkr.mult; /* If arch requires, add in get_arch_timeoffset() */ - tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift; + tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift; tk_normalize_xtime(tk); @@ -412,7 +412,7 @@ ktime_t ktime_get(void) do { seq = read_seqcount_begin(&tk_core.seq); - base = tk->base_mono; + base = tk->tkr.base_mono; nsecs = timekeeping_get_ns(tk); } while (read_seqcount_retry(&tk_core.seq, seq)); @@ -438,7 +438,7 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs) do { seq = read_seqcount_begin(&tk_core.seq); - base = ktime_add(tk->base_mono, *offset); + base = ktime_add(tk->tkr.base_mono, *offset); nsecs = timekeeping_get_ns(tk); } while (read_seqcount_retry(&tk_core.seq, seq)); @@ -731,7 +731,7 @@ static int change_clocksource(void *data) */ if (try_module_get(new->owner)) { if (!new->enable || new->enable(new) == 0) { - old = tk->clock; + old = tk->tkr.clock; tk_setup_internals(tk, new); if (old->disable) old->disable(old); @@ -759,11 +759,11 @@ int timekeeping_notify(struct clocksource *clock) { struct timekeeper *tk = &tk_core.timekeeper; - if (tk->clock == clock) + if (tk->tkr.clock == clock) return 0; stop_machine(change_clocksource, clock, NULL); tick_clock_notify(); - return tk->clock == clock ? 0 : -1; + return tk->tkr.clock == clock ? 0 : -1; } /** @@ -803,7 +803,7 @@ int timekeeping_valid_for_hres(void) do { seq = read_seqcount_begin(&tk_core.seq); - ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; + ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; } while (read_seqcount_retry(&tk_core.seq, seq)); @@ -822,7 +822,7 @@ u64 timekeeping_max_deferment(void) do { seq = read_seqcount_begin(&tk_core.seq); - ret = tk->clock->max_idle_ns; + ret = tk->tkr.clock->max_idle_ns; } while (read_seqcount_retry(&tk_core.seq, seq)); @@ -989,7 +989,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta) static void timekeeping_resume(void) { struct timekeeper *tk = &tk_core.timekeeper; - struct clocksource *clock = tk->clock; + struct clocksource *clock = tk->tkr.clock; unsigned long flags; struct timespec64 ts_new, ts_delta; struct timespec tmp; @@ -1017,16 +1017,16 @@ static void timekeeping_resume(void) * The less preferred source will only be tried if there is no better * usable source. The rtc part is handled separately in rtc core code. */ - cycle_now = tk->read(clock); + cycle_now = tk->tkr.read(clock); if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && - cycle_now > tk->cycle_last) { + cycle_now > tk->tkr.cycle_last) { u64 num, max = ULLONG_MAX; u32 mult = clock->mult; u32 shift = clock->shift; s64 nsec = 0; - cycle_delta = clocksource_delta(cycle_now, tk->cycle_last, - tk->mask); + cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, + tk->tkr.mask); /* * "cycle_delta * mutl" may cause 64 bits overflow, if the @@ -1052,7 +1052,7 @@ static void timekeeping_resume(void) __timekeeping_inject_sleeptime(tk, &ts_delta); /* Re-base the last cycle value */ - tk->cycle_last = cycle_now; + tk->tkr.cycle_last = cycle_now; tk->ntp_error = 0; timekeeping_suspended = 0; timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); @@ -1239,12 +1239,12 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) } } - if (unlikely(tk->clock->maxadj && - (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { + if (unlikely(tk->tkr.clock->maxadj && + (tk->tkr.mult + adj > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) { printk_deferred_once(KERN_WARNING "Adjusting %s more than 11%% (%ld vs %ld)\n", - tk->clock->name, (long)tk->mult + adj, - (long)tk->clock->mult + tk->clock->maxadj); + tk->tkr.clock->name, (long)tk->tkr.mult + adj, + (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj); } /* * So the following can be confusing. @@ -1295,9 +1295,9 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) * * XXX - TODO: Doc ntp_error calculation. */ - tk->mult += adj; + tk->tkr.mult += adj; tk->xtime_interval += interval; - tk->xtime_nsec -= offset; + tk->tkr.xtime_nsec -= offset; tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; out_adjust: @@ -1315,9 +1315,9 @@ out_adjust: * We'll correct this error next time through this function, when * xtime_nsec is not as small. */ - if (unlikely((s64)tk->xtime_nsec < 0)) { - s64 neg = -(s64)tk->xtime_nsec; - tk->xtime_nsec = 0; + if (unlikely((s64)tk->tkr.xtime_nsec < 0)) { + s64 neg = -(s64)tk->tkr.xtime_nsec; + tk->tkr.xtime_nsec = 0; tk->ntp_error += neg << tk->ntp_error_shift; } @@ -1333,13 +1333,13 @@ out_adjust: */ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) { - u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; + u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift; unsigned int clock_set = 0; - while (tk->xtime_nsec >= nsecps) { + while (tk->tkr.xtime_nsec >= nsecps) { int leap; - tk->xtime_nsec -= nsecps; + tk->tkr.xtime_nsec -= nsecps; tk->xtime_sec++; /* Figure out if its a leap sec and apply if needed */ @@ -1384,9 +1384,9 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, /* Accumulate one shifted interval */ offset -= interval; - tk->cycle_last += interval; + tk->tkr.cycle_last += interval; - tk->xtime_nsec += tk->xtime_interval << shift; + tk->tkr.xtime_nsec += tk->xtime_interval << shift; *clock_set |= accumulate_nsecs_to_secs(tk); /* Accumulate raw time */ @@ -1429,8 +1429,8 @@ void update_wall_time(void) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET offset = real_tk->cycle_interval; #else - offset = clocksource_delta(tk->read(tk->clock), tk->cycle_last, - tk->mask); + offset = clocksource_delta(tk->tkr.read(tk->tkr.clock), + tk->tkr.cycle_last, tk->tkr.mask); #endif /* Check if there's really nothing to do */ @@ -1591,8 +1591,8 @@ ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot, do { seq = read_seqcount_begin(&tk_core.seq); - base = tk->base_mono; - nsecs = tk->xtime_nsec >> tk->shift; + base = tk->tkr.base_mono; + nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift; *offs_real = tk->offs_real; *offs_boot = tk->offs_boot; @@ -1623,7 +1623,7 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot, do { seq = read_seqcount_begin(&tk_core.seq); - base = tk->base_mono; + base = tk->tkr.base_mono; nsecs = timekeeping_get_ns(tk); *offs_real = tk->offs_real; -- cgit v1.2.3