2 * Copyright (c) 1998-2003 Poul-Henning Kamp
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include "opt_compat.h"
31 #include "opt_clock.h"
33 #include <sys/param.h>
36 #include <sys/limits.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 #include <sys/sysctl.h>
41 #include <sys/timetc.h>
42 #include <sys/kernel.h>
43 #include <sys/power.h>
46 #include <machine/clock.h>
47 #include <machine/cputypes.h>
48 #include <machine/md_var.h>
49 #include <machine/specialreg.h>
51 #include "cpufreq_if.h"
57 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag;
59 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN,
60 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant");
61 TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant);
65 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0,
66 "Indicates whether the TSC is safe to use in SMP mode");
67 TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc);
69 int smp_tsc_adjust = 0;
70 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN,
71 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP");
72 TUNABLE_INT("kern.timecounter.smp_tsc_adjust", &smp_tsc_adjust);
75 static int tsc_shift = 1;
76 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN,
77 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency");
78 TUNABLE_INT("kern.timecounter.tsc_shift", &tsc_shift);
80 static int tsc_disabled;
81 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0,
82 "Disable x86 Time Stamp Counter");
83 TUNABLE_INT("machdep.disable_tsc", &tsc_disabled);
85 static int tsc_skip_calibration;
86 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN,
87 &tsc_skip_calibration, 0, "Disable TSC frequency calibration");
88 TUNABLE_INT("machdep.disable_tsc_calibration", &tsc_skip_calibration);
90 static void tsc_freq_changed(void *arg, const struct cf_level *level,
92 static void tsc_freq_changing(void *arg, const struct cf_level *level,
94 static unsigned tsc_get_timecount(struct timecounter *tc);
95 static inline unsigned tsc_get_timecount_low(struct timecounter *tc);
96 static unsigned tsc_get_timecount_lfence(struct timecounter *tc);
97 static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc);
98 static unsigned tsc_get_timecount_mfence(struct timecounter *tc);
99 static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc);
100 static void tsc_levels_changed(void *arg, int unit);
102 static struct timecounter tsc_timecounter = {
103 tsc_get_timecount, /* get_timecount */
105 ~0u, /* counter_mask */
108 800, /* quality (adjusted in code) */
111 #define VMW_HVMAGIC 0x564d5868
112 #define VMW_HVPORT 0x5658
113 #define VMW_HVCMD_GETVERSION 10
114 #define VMW_HVCMD_GETHZ 45
117 vmware_hvcall(u_int cmd, u_int *p)
120 __asm __volatile("inl %w3, %0"
121 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
122 : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT)
127 tsc_freq_vmware(void)
136 * [RFC] CPUID usage for interaction between Hypervisors and Linux.
137 * http://lkml.org/lkml/2008/10/1/246
139 * KB1009458: Mechanisms to determine if software is running in
140 * a VMware virtual machine
141 * http://kb.vmware.com/kb/1009458
144 if ((cpu_feature2 & CPUID2_HV) != 0) {
145 do_cpuid(0x40000000, regs);
147 for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4)
148 memcpy(p, ®s[i], sizeof(regs[i]));
152 * HV vendor ID string
153 * ------------+--------------
155 * Microsoft "Microsoft Hv"
156 * VMware "VMwareVMware"
159 printf("Hypervisor: Origin = \"%s\"\n", hv_sig);
161 if (strncmp(hv_sig, "VMwareVMware", 12) != 0)
164 p = getenv("smbios.system.serial");
167 if (strncmp(p, "VMware-", 7) != 0 &&
168 strncmp(p, "VMW", 3) != 0) {
173 vmware_hvcall(VMW_HVCMD_GETVERSION, regs);
174 if (regs[1] != VMW_HVMAGIC)
177 if (hv_high >= 0x40000010) {
178 do_cpuid(0x40000010, regs);
179 tsc_freq = regs[0] * 1000;
181 vmware_hvcall(VMW_HVCMD_GETHZ, regs);
182 if (regs[1] != UINT_MAX)
183 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32);
185 tsc_is_invariant = 1;
199 * Intel Processor Identification and the CPUID Instruction
200 * Application Note 485.
201 * http://www.intel.com/assets/pdf/appnote/241618.pdf
203 if (cpu_exthigh >= 0x80000004) {
205 for (i = 0x80000002; i < 0x80000005; i++) {
207 memcpy(p, regs, sizeof(regs));
211 for (i = 0; i < sizeof(brand) - 1; i++)
212 if (brand[i] == 'H' && brand[i + 1] == 'z')
229 #define C2D(c) ((c) - '0')
231 freq = C2D(p[0]) * 1000;
232 freq += C2D(p[2]) * 100;
233 freq += C2D(p[3]) * 10;
236 freq = C2D(p[0]) * 1000;
237 freq += C2D(p[1]) * 100;
238 freq += C2D(p[2]) * 10;
256 if ((regs[2] & CPUID_PERF_STAT) != 0) {
258 * XXX Some emulators expose host CPUID without actual
259 * support for these MSRs. We must test whether they
265 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0)
270 if (tsc_freq_vmware())
273 switch (cpu_vendor_id) {
275 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
276 (vm_guest == VM_GUEST_NO &&
277 CPUID_TO_FAMILY(cpu_id) >= 0x10))
278 tsc_is_invariant = 1;
279 if (cpu_feature & CPUID_SSE2) {
280 tsc_timecounter.tc_get_timecount =
281 tsc_get_timecount_mfence;
284 case CPU_VENDOR_INTEL:
285 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
286 (vm_guest == VM_GUEST_NO &&
287 ((CPUID_TO_FAMILY(cpu_id) == 0x6 &&
288 CPUID_TO_MODEL(cpu_id) >= 0xe) ||
289 (CPUID_TO_FAMILY(cpu_id) == 0xf &&
290 CPUID_TO_MODEL(cpu_id) >= 0x3))))
291 tsc_is_invariant = 1;
292 if (cpu_feature & CPUID_SSE2) {
293 tsc_timecounter.tc_get_timecount =
294 tsc_get_timecount_lfence;
297 case CPU_VENDOR_CENTAUR:
298 if (vm_guest == VM_GUEST_NO &&
299 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
300 CPUID_TO_MODEL(cpu_id) >= 0xf &&
301 (rdmsr(0x1203) & 0x100000000ULL) == 0)
302 tsc_is_invariant = 1;
303 if (cpu_feature & CPUID_SSE2) {
304 tsc_timecounter.tc_get_timecount =
305 tsc_get_timecount_lfence;
310 if (tsc_skip_calibration) {
311 if (cpu_vendor_id == CPU_VENDOR_INTEL)
317 printf("Calibrating TSC clock ... ");
321 tsc_freq = tsc2 - tsc1;
323 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq);
330 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
336 * Inform CPU accounting about our boot-time clock rate. This will
337 * be updated if someone loads a cpufreq driver after boot that
338 * discovers a new max frequency.
341 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant);
343 if (tsc_is_invariant)
346 /* Register to find out about changes in CPU frequency. */
347 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change,
348 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST);
349 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change,
350 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST);
351 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed,
352 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY);
358 * RDTSC is not a serializing instruction, and does not drain
359 * instruction stream, so we need to drain the stream before executing
360 * it. It could be fixed by use of RDTSCP, except the instruction is
361 * not available everywhere.
363 * Use CPUID for draining in the boot-time SMP constistency test. The
364 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel
365 * and VIA) when SSE2 is present, and nothing on older machines which
366 * also do not issue RDTSC prematurely. There, testing for SSE2 and
367 * vendor is too cumbersome, and we learn about TSC presence from CPUID.
369 * Do not use do_cpuid(), since we do not need CPUID results, which
370 * have to be written into memory with do_cpuid().
372 #define TSC_READ(x) \
374 tsc_read_##x(void *arg) \
376 uint32_t *tsc = arg; \
377 u_int cpu = PCPU_GET(cpuid); \
379 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \
380 tsc[cpu * 3 + x] = rdtsc32(); \
390 comp_smp_tsc(void *arg)
394 u_int cpu = PCPU_GET(cpuid);
397 size = (mp_maxid + 1) * 3;
398 for (i = 0, tsc = arg; i < N; i++, tsc += size)
402 d1 = tsc[cpu * 3 + 1] - tsc[j * 3];
403 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1];
404 if (d1 <= 0 || d2 <= 0) {
412 adj_smp_tsc(void *arg)
416 u_int cpu = PCPU_GET(cpuid);
417 u_int first, i, size;
424 size = (mp_maxid + 1) * 3;
425 for (i = 0, tsc = arg; i < N; i++, tsc += size) {
426 d = tsc[first * 3] - tsc[cpu * 3 + 1];
429 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2];
432 d = tsc[first * 3 + 1] - tsc[cpu * 3];
435 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1];
441 d = min / 2 + max / 2;
443 "movl $0x10, %%ecx\n\t"
445 "addl %%edi, %%eax\n\t"
446 "adcl %%esi, %%edx\n\t"
449 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32))
450 : "ax", "cx", "dx", "cc"
457 uint32_t *data, *tsc;
460 if ((!smp_tsc && !tsc_is_invariant) || vm_guest)
462 size = (mp_maxid + 1) * 3;
463 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK);
466 for (i = 0, tsc = data; i < N; i++, tsc += size)
467 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc);
468 smp_tsc = 1; /* XXX */
469 smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc,
470 smp_no_rendevous_barrier, data);
471 if (!smp_tsc && adj < smp_tsc_adjust) {
473 smp_rendezvous(smp_no_rendevous_barrier, adj_smp_tsc,
474 smp_no_rendevous_barrier, data);
479 printf("SMP: %sed TSC synchronization test%s\n",
480 smp_tsc ? "pass" : "fail",
481 adj > 0 ? " after adjustment" : "");
482 if (smp_tsc && tsc_is_invariant) {
483 switch (cpu_vendor_id) {
486 * Starting with Family 15h processors, TSC clock
487 * source is in the north bridge. Check whether
488 * we have a single-socket/multi-core platform.
489 * XXX Need more work for complex cases.
491 if (CPUID_TO_FAMILY(cpu_id) < 0x15 ||
492 (amd_feature2 & AMDID2_CMP) == 0 ||
493 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1)
496 case CPU_VENDOR_INTEL:
498 * XXX Assume Intel platforms have synchronized TSCs.
512 * The function is not called, it is provided to avoid linking failure
513 * on uniprocessor kernel.
530 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
534 * Limit timecounter frequency to fit in an int and prevent it from
535 * overflowing too fast.
540 * We can not use the TSC if we support APM. Precise timekeeping
541 * on an APM'ed machine is at best a fools pursuit, since
542 * any and all of the time spent in various SMM code can't
543 * be reliably accounted for. Reading the RTC is your only
544 * source of reliable time info. The i8254 loses too, of course,
545 * but we need to have some kind of time...
546 * We don't know at this point whether APM is going to be used
547 * or not, nor when it might be activated. Play it safe.
549 if (power_pm_get_type() == POWER_PM_TYPE_APM) {
550 tsc_timecounter.tc_quality = -1000;
552 printf("TSC timecounter disabled: APM enabled.\n");
557 * We cannot use the TSC if it stops incrementing in deep sleep.
558 * Currently only Intel CPUs are known for this problem unless
559 * the invariant TSC bit is set.
561 if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL &&
562 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) {
563 tsc_timecounter.tc_quality = -1000;
564 tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP;
566 printf("TSC timecounter disabled: C3 enabled.\n");
571 * We can not use the TSC in SMP mode unless the TSCs on all CPUs
572 * are synchronized. If the user is sure that the system has
573 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a
574 * non-zero value. The TSC seems unreliable in virtualized SMP
575 * environments, so it is set to a negative quality in those cases.
578 tsc_timecounter.tc_quality = test_tsc();
579 else if (tsc_is_invariant)
580 tsc_timecounter.tc_quality = 1000;
581 max_freq >>= tsc_shift;
584 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++)
586 if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) {
587 if (cpu_vendor_id == CPU_VENDOR_AMD) {
588 tsc_timecounter.tc_get_timecount = shift > 0 ?
589 tsc_get_timecount_low_mfence :
590 tsc_get_timecount_mfence;
592 tsc_timecounter.tc_get_timecount = shift > 0 ?
593 tsc_get_timecount_low_lfence :
594 tsc_get_timecount_lfence;
597 tsc_timecounter.tc_get_timecount = shift > 0 ?
598 tsc_get_timecount_low : tsc_get_timecount;
601 tsc_timecounter.tc_name = "TSC-low";
603 printf("TSC timecounter discards lower %d bit(s)\n",
607 tsc_timecounter.tc_frequency = tsc_freq >> shift;
608 tsc_timecounter.tc_priv = (void *)(intptr_t)shift;
609 tc_init(&tsc_timecounter);
612 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL);
615 * When cpufreq levels change, find out about the (new) max frequency. We
616 * use this to update CPU accounting in case it got a lower estimate at boot.
619 tsc_levels_changed(void *arg, int unit)
622 struct cf_level *levels;
626 /* Only use values from the first CPU, assuming all are equal. */
630 /* Find the appropriate cpufreq device instance. */
631 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit);
632 if (cf_dev == NULL) {
633 printf("tsc_levels_changed() called but no cpufreq device?\n");
637 /* Get settings from the device and find the max frequency. */
639 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
642 error = CPUFREQ_LEVELS(cf_dev, levels, &count);
643 if (error == 0 && count != 0) {
644 max_freq = (uint64_t)levels[0].total_set.freq * 1000000;
645 set_cputicker(rdtsc, max_freq, 1);
647 printf("tsc_levels_changed: no max freq found\n");
648 free(levels, M_TEMP);
652 * If the TSC timecounter is in use, veto the pending change. It may be
653 * possible in the future to handle a dynamically-changing timecounter rate.
656 tsc_freq_changing(void *arg, const struct cf_level *level, int *status)
659 if (*status != 0 || timecounter != &tsc_timecounter)
662 printf("timecounter TSC must not be in use when "
663 "changing frequencies; change denied\n");
667 /* Update TSC freq with the value indicated by the caller. */
669 tsc_freq_changed(void *arg, const struct cf_level *level, int status)
673 /* If there was an error during the transition, don't do anything. */
674 if (tsc_disabled || status != 0)
677 /* Total setting for this level gives the new frequency in MHz. */
678 freq = (uint64_t)level->total_set.freq * 1000000;
679 atomic_store_rel_64(&tsc_freq, freq);
680 tsc_timecounter.tc_frequency =
681 freq >> (int)(intptr_t)tsc_timecounter.tc_priv;
685 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS)
690 freq = atomic_load_acq_64(&tsc_freq);
693 error = sysctl_handle_64(oidp, &freq, 0, req);
694 if (error == 0 && req->newptr != NULL) {
695 atomic_store_rel_64(&tsc_freq, freq);
696 atomic_store_rel_64(&tsc_timecounter.tc_frequency,
697 freq >> (int)(intptr_t)tsc_timecounter.tc_priv);
702 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW,
703 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency");
706 tsc_get_timecount(struct timecounter *tc __unused)
713 tsc_get_timecount_low(struct timecounter *tc)
717 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0"
718 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx");
723 tsc_get_timecount_lfence(struct timecounter *tc __unused)
731 tsc_get_timecount_low_lfence(struct timecounter *tc)
735 return (tsc_get_timecount_low(tc));
739 tsc_get_timecount_mfence(struct timecounter *tc __unused)
747 tsc_get_timecount_low_mfence(struct timecounter *tc)
751 return (tsc_get_timecount_low(tc));
755 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th)
758 vdso_th->th_x86_shift = (int)(intptr_t)timecounter->tc_priv;
759 bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
760 return (timecounter == &tsc_timecounter);
763 #ifdef COMPAT_FREEBSD32
765 cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
768 vdso_th32->th_x86_shift = (int)(intptr_t)timecounter->tc_priv;
769 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));
770 return (timecounter == &tsc_timecounter);