summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2009-06-25 09:19:10 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2009-06-25 09:19:21 +0200
commit3504ebc7aa8d437fda898234cd393e93d93cc126 (patch)
tree46b35f2b3b5f0b3840328e7b380262f822ed070d /arch
parent246228111082cd852d628658800f2c193b643e71 (diff)
[PATCH] introduce get_clock_monotonic
Introduce get_clock_monotonic() function which can be used to get a (fast) timestamp. Resolution is the same as for get_clock(). The only difference is that the timestamps are monotonic and don't jump backward or forward. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/timex.h16
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/time.c9
4 files changed, 22 insertions, 7 deletions
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index cc21e3e20fd7..e4e7552d8151 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -88,6 +88,20 @@ int get_sync_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
-extern u64 sched_clock_base_cc;
+extern u64 sched_clock_base;
+
+/**
+ * get_clock_monotonic - returns current time in clock rate units
+ *
+ * The caller must ensure that preemption is disabled.
+ * The clock and sched_clock_base get changed via stop_machine.
+ * Therefore preemption must be disabled when calling this
+ * function, otherwise the returned value is not guaranteed to
+ * be monotonic.
+ */
+static inline unsigned long long get_clock_monotonic(void)
+{
+ return get_clock_xt() - sched_clock_base;
+}
#endif
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index f9b144049dc9..4088aecdd894 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -52,7 +52,7 @@ static void __init reset_tod_clock(void)
if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
disabled_wait(0);
- sched_clock_base_cc = TOD_UNIX_EPOCH;
+ sched_clock_base = TOD_UNIX_EPOCH;
}
#ifdef CONFIG_SHARED_KERNEL
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ec6882348520..d5e6ee3b2c4f 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -535,7 +535,7 @@ startup:basr %r13,0 # get base
b 0(%r13)
.align 4
4: .long startup_continue
-5: .long sched_clock_base_cc
+5: .long sched_clock_base
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4c8e9c47c81..6f0d86fb2aaa 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -59,7 +59,8 @@
#define TICK_SIZE tick
-u64 sched_clock_base_cc = -1; /* Force to data section. */
+u64 sched_clock_base = -1; /* Force to data section. */
+EXPORT_SYMBOL(sched_clock_base);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace sched_clock(void)
{
- return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
+ return (get_clock_monotonic() * 125) >> 9;
}
/*
@@ -277,7 +278,7 @@ void __init time_init(void)
tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
clocksource_tod.cycle_last = now;
clocksource_tod.raw_time = xtime;
- tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
+ tod_to_timeval(sched_clock_base - TOD_UNIX_EPOCH, &ts);
set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -315,7 +316,7 @@ static unsigned long long adjust_time(unsigned long long old,
delta = -delta;
adjust.offset = -ticks * (1000000 / HZ);
}
- sched_clock_base_cc += delta;
+ sched_clock_base += delta;
if (adjust.offset != 0) {
pr_notice("The ETR interface has adjusted the clock "
"by %li microseconds\n", adjust.offset);