2 * Copyright (c) 1998-2003 Poul-Henning Kamp
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include "opt_compat.h"
31 #include "opt_clock.h"
33 #include <sys/param.h>
36 #include <sys/limits.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 #include <sys/sysctl.h>
41 #include <sys/timetc.h>
42 #include <sys/kernel.h>
43 #include <sys/power.h>
46 #include <machine/clock.h>
47 #include <machine/cputypes.h>
48 #include <machine/md_var.h>
49 #include <machine/specialreg.h>
51 #include "cpufreq_if.h"
57 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag;
59 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN,
60 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant");
61 TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant);
65 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0,
66 "Indicates whether the TSC is safe to use in SMP mode");
67 TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc);
70 static int tsc_disabled;
71 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0,
72 "Disable x86 Time Stamp Counter");
73 TUNABLE_INT("machdep.disable_tsc", &tsc_disabled);
75 static int tsc_skip_calibration;
76 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN,
77 &tsc_skip_calibration, 0, "Disable TSC frequency calibration");
78 TUNABLE_INT("machdep.disable_tsc_calibration", &tsc_skip_calibration);
80 static void tsc_freq_changed(void *arg, const struct cf_level *level,
82 static void tsc_freq_changing(void *arg, const struct cf_level *level,
84 static unsigned tsc_get_timecount(struct timecounter *tc);
85 static inline unsigned tsc_get_timecount_low(struct timecounter *tc);
86 static unsigned tsc_get_timecount_lfence(struct timecounter *tc);
87 static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc);
88 static unsigned tsc_get_timecount_mfence(struct timecounter *tc);
89 static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc);
90 static void tsc_levels_changed(void *arg, int unit);
92 static struct timecounter tsc_timecounter = {
93 tsc_get_timecount, /* get_timecount */
95 ~0u, /* counter_mask */
98 800, /* quality (adjusted in code) */
101 #define VMW_HVMAGIC 0x564d5868
102 #define VMW_HVPORT 0x5658
103 #define VMW_HVCMD_GETVERSION 10
104 #define VMW_HVCMD_GETHZ 45
107 vmware_hvcall(u_int cmd, u_int *p)
110 __asm __volatile("inl %w3, %0"
111 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
112 : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT)
117 tsc_freq_vmware(void)
126 * [RFC] CPUID usage for interaction between Hypervisors and Linux.
127 * http://lkml.org/lkml/2008/10/1/246
129 * KB1009458: Mechanisms to determine if software is running in
130 * a VMware virtual machine
131 * http://kb.vmware.com/kb/1009458
134 if ((cpu_feature2 & CPUID2_HV) != 0) {
135 do_cpuid(0x40000000, regs);
137 for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4)
138 memcpy(p, ®s[i], sizeof(regs[i]));
142 * HV vendor ID string
143 * ------------+--------------
145 * Microsoft "Microsoft Hv"
146 * VMware "VMwareVMware"
149 printf("Hypervisor: Origin = \"%s\"\n", hv_sig);
151 if (strncmp(hv_sig, "VMwareVMware", 12) != 0)
154 p = getenv("smbios.system.serial");
157 if (strncmp(p, "VMware-", 7) != 0 &&
158 strncmp(p, "VMW", 3) != 0) {
163 vmware_hvcall(VMW_HVCMD_GETVERSION, regs);
164 if (regs[1] != VMW_HVMAGIC)
167 if (hv_high >= 0x40000010) {
168 do_cpuid(0x40000010, regs);
169 tsc_freq = regs[0] * 1000;
171 vmware_hvcall(VMW_HVCMD_GETHZ, regs);
172 if (regs[1] != UINT_MAX)
173 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32);
175 tsc_is_invariant = 1;
189 * Intel Processor Identification and the CPUID Instruction
190 * Application Note 485.
191 * http://www.intel.com/assets/pdf/appnote/241618.pdf
193 if (cpu_exthigh >= 0x80000004) {
195 for (i = 0x80000002; i < 0x80000005; i++) {
197 memcpy(p, regs, sizeof(regs));
201 for (i = 0; i < sizeof(brand) - 1; i++)
202 if (brand[i] == 'H' && brand[i + 1] == 'z')
219 #define C2D(c) ((c) - '0')
221 freq = C2D(p[0]) * 1000;
222 freq += C2D(p[2]) * 100;
223 freq += C2D(p[3]) * 10;
226 freq = C2D(p[0]) * 1000;
227 freq += C2D(p[1]) * 100;
228 freq += C2D(p[2]) * 10;
246 if ((regs[2] & CPUID_PERF_STAT) != 0) {
248 * XXX Some emulators expose host CPUID without actual
249 * support for these MSRs. We must test whether they
255 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0)
260 if (tsc_freq_vmware())
263 switch (cpu_vendor_id) {
265 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
266 (vm_guest == VM_GUEST_NO &&
267 CPUID_TO_FAMILY(cpu_id) >= 0x10))
268 tsc_is_invariant = 1;
269 if (cpu_feature & CPUID_SSE2) {
270 tsc_timecounter.tc_get_timecount =
271 tsc_get_timecount_mfence;
274 case CPU_VENDOR_INTEL:
275 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
276 (vm_guest == VM_GUEST_NO &&
277 ((CPUID_TO_FAMILY(cpu_id) == 0x6 &&
278 CPUID_TO_MODEL(cpu_id) >= 0xe) ||
279 (CPUID_TO_FAMILY(cpu_id) == 0xf &&
280 CPUID_TO_MODEL(cpu_id) >= 0x3))))
281 tsc_is_invariant = 1;
282 if (cpu_feature & CPUID_SSE2) {
283 tsc_timecounter.tc_get_timecount =
284 tsc_get_timecount_lfence;
287 case CPU_VENDOR_CENTAUR:
288 if (vm_guest == VM_GUEST_NO &&
289 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
290 CPUID_TO_MODEL(cpu_id) >= 0xf &&
291 (rdmsr(0x1203) & 0x100000000ULL) == 0)
292 tsc_is_invariant = 1;
293 if (cpu_feature & CPUID_SSE2) {
294 tsc_timecounter.tc_get_timecount =
295 tsc_get_timecount_lfence;
300 if (tsc_skip_calibration) {
301 if (cpu_vendor_id == CPU_VENDOR_INTEL)
307 printf("Calibrating TSC clock ... ");
311 tsc_freq = tsc2 - tsc1;
313 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq);
320 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
326 * Inform CPU accounting about our boot-time clock rate. This will
327 * be updated if someone loads a cpufreq driver after boot that
328 * discovers a new max frequency.
331 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant);
333 if (tsc_is_invariant)
336 /* Register to find out about changes in CPU frequency. */
337 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change,
338 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST);
339 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change,
340 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST);
341 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed,
342 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY);
348 * RDTSC is not a serializing instruction, and does not drain
349 * instruction stream, so we need to drain the stream before executing
350 * it. It could be fixed by use of RDTSCP, except the instruction is
351 * not available everywhere.
353 * Use CPUID for draining in the boot-time SMP constistency test. The
354 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel
355 * and VIA) when SSE2 is present, and nothing on older machines which
356 * also do not issue RDTSC prematurely. There, testing for SSE2 and
357 * vendor is too cumbersome, and we learn about TSC presence from
360 * Do not use do_cpuid(), since we do not need CPUID results, which
361 * have to be written into memory with do_cpuid().
363 #define TSC_READ(x) \
365 tsc_read_##x(void *arg) \
367 uint32_t *tsc = arg; \
368 u_int cpu = PCPU_GET(cpuid); \
370 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \
371 tsc[cpu * 3 + x] = rdtsc32(); \
381 comp_smp_tsc(void *arg)
385 u_int cpu = PCPU_GET(cpuid);
388 size = (mp_maxid + 1) * 3;
389 for (i = 0, tsc = arg; i < N; i++, tsc += size)
393 d1 = tsc[cpu * 3 + 1] - tsc[j * 3];
394 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1];
395 if (d1 <= 0 || d2 <= 0) {
405 uint32_t *data, *tsc;
408 if (!smp_tsc && !tsc_is_invariant)
410 size = (mp_maxid + 1) * 3;
411 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK);
412 for (i = 0, tsc = data; i < N; i++, tsc += size)
413 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc);
414 smp_tsc = 1; /* XXX */
415 smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc,
416 smp_no_rendevous_barrier, data);
419 printf("SMP: %sed TSC synchronization test\n",
420 smp_tsc ? "pass" : "fail");
421 if (smp_tsc && tsc_is_invariant) {
422 switch (cpu_vendor_id) {
425 * Starting with Family 15h processors, TSC clock
426 * source is in the north bridge. Check whether
427 * we have a single-socket/multi-core platform.
428 * XXX Need more work for complex cases.
430 if (CPUID_TO_FAMILY(cpu_id) < 0x15 ||
431 (amd_feature2 & AMDID2_CMP) == 0 ||
432 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1)
435 case CPU_VENDOR_INTEL:
437 * XXX Assume Intel platforms have synchronized TSCs.
456 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
460 * Limit timecounter frequency to fit in an int and prevent it from
461 * overflowing too fast.
466 * We can not use the TSC if we support APM. Precise timekeeping
467 * on an APM'ed machine is at best a fools pursuit, since
468 * any and all of the time spent in various SMM code can't
469 * be reliably accounted for. Reading the RTC is your only
470 * source of reliable time info. The i8254 loses too, of course,
471 * but we need to have some kind of time...
472 * We don't know at this point whether APM is going to be used
473 * or not, nor when it might be activated. Play it safe.
475 if (power_pm_get_type() == POWER_PM_TYPE_APM) {
476 tsc_timecounter.tc_quality = -1000;
478 printf("TSC timecounter disabled: APM enabled.\n");
483 * We cannot use the TSC if it stops incrementing in deep sleep.
484 * Currently only Intel CPUs are known for this problem unless
485 * the invariant TSC bit is set.
487 if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL &&
488 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) {
489 tsc_timecounter.tc_quality = -1000;
490 tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP;
492 printf("TSC timecounter disabled: C3 enabled.\n");
498 * We can not use the TSC in SMP mode unless the TSCs on all CPUs are
499 * synchronized. If the user is sure that the system has synchronized
500 * TSCs, set kern.timecounter.smp_tsc tunable to a non-zero value.
501 * We also limit the frequency even lower to avoid "temporal anomalies"
502 * as much as possible. The TSC seems unreliable in virtualized SMP
503 * environments, so it is set to a negative quality in those cases.
507 tsc_timecounter.tc_quality = -100;
509 tsc_timecounter.tc_quality = test_smp_tsc();
514 if (tsc_is_invariant)
515 tsc_timecounter.tc_quality = 1000;
518 for (shift = 0; shift < 31 && (tsc_freq >> shift) > max_freq; shift++)
521 if (cpu_feature & CPUID_SSE2) {
522 if (cpu_vendor_id == CPU_VENDOR_AMD) {
523 tsc_timecounter.tc_get_timecount =
524 tsc_get_timecount_low_mfence;
526 tsc_timecounter.tc_get_timecount =
527 tsc_get_timecount_low_lfence;
530 tsc_timecounter.tc_get_timecount = tsc_get_timecount_low;
531 tsc_timecounter.tc_name = "TSC-low";
533 printf("TSC timecounter discards lower %d bit(s)\n",
537 tsc_timecounter.tc_frequency = tsc_freq >> shift;
538 tsc_timecounter.tc_priv = (void *)(intptr_t)shift;
539 tc_init(&tsc_timecounter);
542 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL);
545 * When cpufreq levels change, find out about the (new) max frequency. We
546 * use this to update CPU accounting in case it got a lower estimate at boot.
549 tsc_levels_changed(void *arg, int unit)
552 struct cf_level *levels;
556 /* Only use values from the first CPU, assuming all are equal. */
560 /* Find the appropriate cpufreq device instance. */
561 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit);
562 if (cf_dev == NULL) {
563 printf("tsc_levels_changed() called but no cpufreq device?\n");
567 /* Get settings from the device and find the max frequency. */
569 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
572 error = CPUFREQ_LEVELS(cf_dev, levels, &count);
573 if (error == 0 && count != 0) {
574 max_freq = (uint64_t)levels[0].total_set.freq * 1000000;
575 set_cputicker(rdtsc, max_freq, 1);
577 printf("tsc_levels_changed: no max freq found\n");
578 free(levels, M_TEMP);
582 * If the TSC timecounter is in use, veto the pending change. It may be
583 * possible in the future to handle a dynamically-changing timecounter rate.
586 tsc_freq_changing(void *arg, const struct cf_level *level, int *status)
589 if (*status != 0 || timecounter != &tsc_timecounter)
592 printf("timecounter TSC must not be in use when "
593 "changing frequencies; change denied\n");
597 /* Update TSC freq with the value indicated by the caller. */
599 tsc_freq_changed(void *arg, const struct cf_level *level, int status)
603 /* If there was an error during the transition, don't do anything. */
604 if (tsc_disabled || status != 0)
607 /* Total setting for this level gives the new frequency in MHz. */
608 freq = (uint64_t)level->total_set.freq * 1000000;
609 atomic_store_rel_64(&tsc_freq, freq);
610 tsc_timecounter.tc_frequency =
611 freq >> (int)(intptr_t)tsc_timecounter.tc_priv;
615 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS)
620 freq = atomic_load_acq_64(&tsc_freq);
623 error = sysctl_handle_64(oidp, &freq, 0, req);
624 if (error == 0 && req->newptr != NULL) {
625 atomic_store_rel_64(&tsc_freq, freq);
626 atomic_store_rel_64(&tsc_timecounter.tc_frequency,
627 freq >> (int)(intptr_t)tsc_timecounter.tc_priv);
632 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW,
633 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency");
636 tsc_get_timecount(struct timecounter *tc __unused)
643 tsc_get_timecount_low(struct timecounter *tc)
647 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0"
648 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx");
653 tsc_get_timecount_lfence(struct timecounter *tc __unused)
661 tsc_get_timecount_low_lfence(struct timecounter *tc)
665 return (tsc_get_timecount_low(tc));
669 tsc_get_timecount_mfence(struct timecounter *tc __unused)
677 tsc_get_timecount_low_mfence(struct timecounter *tc)
681 return (tsc_get_timecount_low(tc));
685 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th)
688 vdso_th->th_x86_shift = (int)(intptr_t)timecounter->tc_priv;
689 bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
690 return (timecounter == &tsc_timecounter);
693 #ifdef COMPAT_FREEBSD32
695 cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
698 vdso_th32->th_x86_shift = (int)(intptr_t)timecounter->tc_priv;
699 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));
700 return (timecounter == &tsc_timecounter);