123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348 |
- /*
- * Copyright 2006 Andi Kleen, SUSE Labs.
- * Subject to the GNU Public License, v.2
- *
- * Fast user context implementation of clock_gettime, gettimeofday, and time.
- *
- * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
- * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
- *
- * The code should have no internal unresolved relocations.
- * Check with readelf after changing.
- */
- #include <uapi/linux/time.h>
- #include <asm/vgtod.h>
- #include <asm/hpet.h>
- #include <asm/vvar.h>
- #include <asm/unistd.h>
- #include <asm/msr.h>
- #include <linux/math64.h>
- #include <linux/time.h>
- #define gtod (&VVAR(vsyscall_gtod_data))
- extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
- extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
- extern time_t __vdso_time(time_t *t);
- #ifdef CONFIG_HPET_TIMER
- extern u8 hpet_page
- __attribute__((visibility("hidden")));
- static notrace cycle_t vread_hpet(void)
- {
- return *(const volatile u32 *)(&hpet_page + HPET_COUNTER);
- }
- #endif
- #ifdef CONFIG_PARAVIRT_CLOCK
- extern u8 pvclock_page
- __attribute__((visibility("hidden")));
- #endif
- #ifndef BUILD_VDSO32
- #include <linux/kernel.h>
- #include <asm/vsyscall.h>
- #include <asm/fixmap.h>
- #include <asm/pvclock.h>
- notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
- {
- long ret;
- asm ("syscall" : "=a" (ret), "=m" (*ts) :
- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
- "memory", "rcx", "r11");
- return ret;
- }
- notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
- {
- long ret;
- asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
- "memory", "rcx", "r11");
- return ret;
- }
- #ifdef CONFIG_PARAVIRT_CLOCK
- static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
- {
- return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
- }
- static notrace cycle_t vread_pvclock(int *mode)
- {
- const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
- cycle_t ret;
- u64 tsc, pvti_tsc;
- u64 last, delta, pvti_system_time;
- u32 version, pvti_tsc_to_system_mul, pvti_tsc_shift;
- /*
- * Note: The kernel and hypervisor must guarantee that cpu ID
- * number maps 1:1 to per-CPU pvclock time info.
- *
- * Because the hypervisor is entirely unaware of guest userspace
- * preemption, it cannot guarantee that per-CPU pvclock time
- * info is updated if the underlying CPU changes or that that
- * version is increased whenever underlying CPU changes.
- *
- * On KVM, we are guaranteed that pvti updates for any vCPU are
- * atomic as seen by *all* vCPUs. This is an even stronger
- * guarantee than we get with a normal seqlock.
- *
- * On Xen, we don't appear to have that guarantee, but Xen still
- * supplies a valid seqlock using the version field.
- * We only do pvclock vdso timing at all if
- * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
- * mean that all vCPUs have matching pvti and that the TSC is
- * synced, so we can just look at vCPU 0's pvti.
- */
- if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
- *mode = VCLOCK_NONE;
- return 0;
- }
- do {
- version = pvti->version;
- /* This is also a read barrier, so we'll read version first. */
- tsc = rdtsc_ordered();
- pvti_tsc_to_system_mul = pvti->tsc_to_system_mul;
- pvti_tsc_shift = pvti->tsc_shift;
- pvti_system_time = pvti->system_time;
- pvti_tsc = pvti->tsc_timestamp;
- /* Make sure that the version double-check is last. */
- smp_rmb();
- } while (unlikely((version & 1) || version != pvti->version));
- delta = tsc - pvti_tsc;
- ret = pvti_system_time +
- pvclock_scale_delta(delta, pvti_tsc_to_system_mul,
- pvti_tsc_shift);
- /* refer to tsc.c read_tsc() comment for rationale */
- last = gtod->cycle_last;
- if (likely(ret >= last))
- return ret;
- return last;
- }
- #endif
- #else
- notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
- {
- long ret;
- asm (
- "mov %%ebx, %%edx \n"
- "mov %[clock], %%ebx \n"
- "call __kernel_vsyscall \n"
- "mov %%edx, %%ebx \n"
- : "=a" (ret), "=m" (*ts)
- : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
- : "memory", "edx");
- return ret;
- }
- notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
- {
- long ret;
- asm (
- "mov %%ebx, %%edx \n"
- "mov %[tv], %%ebx \n"
- "call __kernel_vsyscall \n"
- "mov %%edx, %%ebx \n"
- : "=a" (ret), "=m" (*tv), "=m" (*tz)
- : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
- : "memory", "edx");
- return ret;
- }
- #ifdef CONFIG_PARAVIRT_CLOCK
- static notrace cycle_t vread_pvclock(int *mode)
- {
- *mode = VCLOCK_NONE;
- return 0;
- }
- #endif
- #endif
- notrace static cycle_t vread_tsc(void)
- {
- cycle_t ret = (cycle_t)rdtsc_ordered();
- u64 last = gtod->cycle_last;
- if (likely(ret >= last))
- return ret;
- /*
- * GCC likes to generate cmov here, but this branch is extremely
- * predictable (it's just a funciton of time and the likely is
- * very likely) and there's a data dependence, so force GCC
- * to generate a branch instead. I don't barrier() because
- * we don't actually need a barrier, and if this function
- * ever gets inlined it will generate worse code.
- */
- asm volatile ("");
- return last;
- }
- notrace static inline u64 vgetsns(int *mode)
- {
- u64 v;
- cycles_t cycles;
- if (gtod->vclock_mode == VCLOCK_TSC)
- cycles = vread_tsc();
- #ifdef CONFIG_HPET_TIMER
- else if (gtod->vclock_mode == VCLOCK_HPET)
- cycles = vread_hpet();
- #endif
- #ifdef CONFIG_PARAVIRT_CLOCK
- else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
- cycles = vread_pvclock(mode);
- #endif
- else
- return 0;
- v = (cycles - gtod->cycle_last) & gtod->mask;
- return v * gtod->mult;
- }
- /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
- notrace static int __always_inline do_realtime(struct timespec *ts)
- {
- unsigned long seq;
- u64 ns;
- int mode;
- do {
- seq = gtod_read_begin(gtod);
- mode = gtod->vclock_mode;
- ts->tv_sec = gtod->wall_time_sec;
- ns = gtod->wall_time_snsec;
- ns += vgetsns(&mode);
- ns >>= gtod->shift;
- } while (unlikely(gtod_read_retry(gtod, seq)));
- ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
- ts->tv_nsec = ns;
- return mode;
- }
- notrace static int __always_inline do_monotonic(struct timespec *ts)
- {
- unsigned long seq;
- u64 ns;
- int mode;
- do {
- seq = gtod_read_begin(gtod);
- mode = gtod->vclock_mode;
- ts->tv_sec = gtod->monotonic_time_sec;
- ns = gtod->monotonic_time_snsec;
- ns += vgetsns(&mode);
- ns >>= gtod->shift;
- } while (unlikely(gtod_read_retry(gtod, seq)));
- ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
- ts->tv_nsec = ns;
- return mode;
- }
- notrace static void do_realtime_coarse(struct timespec *ts)
- {
- unsigned long seq;
- do {
- seq = gtod_read_begin(gtod);
- ts->tv_sec = gtod->wall_time_coarse_sec;
- ts->tv_nsec = gtod->wall_time_coarse_nsec;
- } while (unlikely(gtod_read_retry(gtod, seq)));
- }
- notrace static void do_monotonic_coarse(struct timespec *ts)
- {
- unsigned long seq;
- do {
- seq = gtod_read_begin(gtod);
- ts->tv_sec = gtod->monotonic_time_coarse_sec;
- ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
- } while (unlikely(gtod_read_retry(gtod, seq)));
- }
- notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
- {
- switch (clock) {
- case CLOCK_REALTIME:
- if (do_realtime(ts) == VCLOCK_NONE)
- goto fallback;
- break;
- case CLOCK_MONOTONIC:
- if (do_monotonic(ts) == VCLOCK_NONE)
- goto fallback;
- break;
- case CLOCK_REALTIME_COARSE:
- do_realtime_coarse(ts);
- break;
- case CLOCK_MONOTONIC_COARSE:
- do_monotonic_coarse(ts);
- break;
- default:
- goto fallback;
- }
- return 0;
- fallback:
- return vdso_fallback_gettime(clock, ts);
- }
- int clock_gettime(clockid_t, struct timespec *)
- __attribute__((weak, alias("__vdso_clock_gettime")));
- notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
- {
- if (likely(tv != NULL)) {
- if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
- return vdso_fallback_gtod(tv, tz);
- tv->tv_usec /= 1000;
- }
- if (unlikely(tz != NULL)) {
- tz->tz_minuteswest = gtod->tz_minuteswest;
- tz->tz_dsttime = gtod->tz_dsttime;
- }
- return 0;
- }
- int gettimeofday(struct timeval *, struct timezone *)
- __attribute__((weak, alias("__vdso_gettimeofday")));
- /*
- * This will break when the xtime seconds get inaccurate, but that is
- * unlikely
- */
- notrace time_t __vdso_time(time_t *t)
- {
- /* This is atomic on x86 so we don't need any locks. */
- time_t result = ACCESS_ONCE(gtod->wall_time_sec);
- if (t)
- *t = result;
- return result;
- }
- int time(time_t *t)
- __attribute__((weak, alias("__vdso_time")));
|