2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1998-2003 Poul-Henning Kamp
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include "opt_clock.h"
34 #include <sys/param.h>
37 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
42 #include <sys/timetc.h>
43 #include <sys/kernel.h>
44 #include <sys/power.h>
47 #include <machine/clock.h>
48 #include <machine/cputypes.h>
49 #include <machine/md_var.h>
50 #include <machine/specialreg.h>
51 #include <x86/vmware.h>
52 #include <dev/acpica/acpi_hpet.h>
53 #include <contrib/dev/acpica/include/acpi.h>
55 #include "cpufreq_if.h"
61 static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag;
63 SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN,
64 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant");
68 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0,
69 "Indicates whether the TSC is safe to use in SMP mode");
71 int smp_tsc_adjust = 0;
72 SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_adjust, CTLFLAG_RDTUN,
73 &smp_tsc_adjust, 0, "Try to adjust TSC on APs to match BSP");
76 static int tsc_shift = 1;
77 SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN,
78 &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency");
80 static int tsc_disabled;
81 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0,
82 "Disable x86 Time Stamp Counter");
84 static int tsc_skip_calibration;
85 SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN |
86 CTLFLAG_NOFETCH, &tsc_skip_calibration, 0,
87 "Disable TSC frequency calibration");
89 static void tsc_freq_changed(void *arg, const struct cf_level *level,
91 static void tsc_freq_changing(void *arg, const struct cf_level *level,
93 static unsigned tsc_get_timecount(struct timecounter *tc);
94 static inline unsigned tsc_get_timecount_low(struct timecounter *tc);
95 static unsigned tsc_get_timecount_lfence(struct timecounter *tc);
96 static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc);
97 static unsigned tsc_get_timecount_mfence(struct timecounter *tc);
98 static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc);
99 static void tsc_levels_changed(void *arg, int unit);
100 static uint32_t x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th,
101 struct timecounter *tc);
102 #ifdef COMPAT_FREEBSD32
103 static uint32_t x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32,
104 struct timecounter *tc);
107 static struct timecounter tsc_timecounter = {
108 .tc_get_timecount = tsc_get_timecount,
109 .tc_counter_mask = ~0u,
111 .tc_quality = 800, /* adjusted in code */
112 .tc_fill_vdso_timehands = x86_tsc_vdso_timehands,
113 #ifdef COMPAT_FREEBSD32
114 .tc_fill_vdso_timehands32 = x86_tsc_vdso_timehands32,
119 tsc_freq_vmware(void)
123 if (hv_high >= 0x40000010) {
124 do_cpuid(0x40000010, regs);
125 tsc_freq = regs[0] * 1000;
127 vmware_hvcall(VMW_HVCMD_GETHZ, regs);
128 if (regs[1] != UINT_MAX)
129 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32);
131 tsc_is_invariant = 1;
135 * Calculate TSC frequency using information from the CPUID leaf 0x15
136 * 'Time Stamp Counter and Nominal Core Crystal Clock'. It should be
137 * an improvement over the parsing of the CPU model name in
138 * tsc_freq_intel(), when available.
147 do_cpuid(0x15, regs);
148 if (regs[0] == 0 || regs[1] == 0 || regs[2] == 0)
150 tsc_freq = (uint64_t)regs[2] * regs[1] / regs[0];
164 * Intel Processor Identification and the CPUID Instruction
165 * Application Note 485.
166 * http://www.intel.com/assets/pdf/appnote/241618.pdf
168 if (cpu_exthigh >= 0x80000004) {
170 for (i = 0x80000002; i < 0x80000005; i++) {
172 memcpy(p, regs, sizeof(regs));
176 for (i = 0; i < sizeof(brand) - 1; i++)
177 if (brand[i] == 'H' && brand[i + 1] == 'z')
194 #define C2D(c) ((c) - '0')
196 freq = C2D(p[0]) * 1000;
197 freq += C2D(p[2]) * 100;
198 freq += C2D(p[3]) * 10;
201 freq = C2D(p[0]) * 1000;
202 freq += C2D(p[1]) * 100;
203 freq += C2D(p[2]) * 10;
222 if ((regs[2] & CPUID_PERF_STAT) != 0) {
224 * XXX Some emulators expose host CPUID without actual
225 * support for these MSRs. We must test whether they
231 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0)
236 if (vm_guest == VM_GUEST_VMWARE) {
241 switch (cpu_vendor_id) {
243 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
244 (vm_guest == VM_GUEST_NO &&
245 CPUID_TO_FAMILY(cpu_id) >= 0x10))
246 tsc_is_invariant = 1;
247 if (cpu_feature & CPUID_SSE2) {
248 tsc_timecounter.tc_get_timecount =
249 tsc_get_timecount_mfence;
252 case CPU_VENDOR_INTEL:
253 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 ||
254 (vm_guest == VM_GUEST_NO &&
255 ((CPUID_TO_FAMILY(cpu_id) == 0x6 &&
256 CPUID_TO_MODEL(cpu_id) >= 0xe) ||
257 (CPUID_TO_FAMILY(cpu_id) == 0xf &&
258 CPUID_TO_MODEL(cpu_id) >= 0x3))))
259 tsc_is_invariant = 1;
260 if (cpu_feature & CPUID_SSE2) {
261 tsc_timecounter.tc_get_timecount =
262 tsc_get_timecount_lfence;
265 case CPU_VENDOR_CENTAUR:
266 if (vm_guest == VM_GUEST_NO &&
267 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
268 CPUID_TO_MODEL(cpu_id) >= 0xf &&
269 (rdmsr(0x1203) & 0x100000000ULL) == 0)
270 tsc_is_invariant = 1;
271 if (cpu_feature & CPUID_SSE2) {
272 tsc_timecounter.tc_get_timecount =
273 tsc_get_timecount_lfence;
278 if (!TUNABLE_INT_FETCH("machdep.disable_tsc_calibration",
279 &tsc_skip_calibration)) {
281 * User did not give the order about calibration.
282 * If he did, we do not try to guess.
284 * Otherwise, if ACPI FADT reports that the platform
285 * is legacy-free and CPUID provides TSC frequency,
286 * use it. The calibration could fail anyway since
287 * ISA timer can be absent or power gated.
289 if (acpi_get_fadt_bootflags(&bootflags) &&
290 (bootflags & ACPI_FADT_LEGACY_DEVICES) == 0 &&
292 printf("Skipping TSC calibration since no legacy "
293 "devices reported by FADT and CPUID works\n");
294 tsc_skip_calibration = 1;
297 if (tsc_skip_calibration) {
298 if (tsc_freq_cpuid())
300 else if (cpu_vendor_id == CPU_VENDOR_INTEL)
304 printf("Calibrating TSC clock ... ");
308 tsc_freq = tsc2 - tsc1;
311 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq);
318 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
322 /* The TSC is known to be broken on certain CPUs. */
323 switch (cpu_vendor_id) {
325 switch (cpu_id & 0xFF0) {
331 case CPU_VENDOR_CENTAUR:
332 switch (cpu_id & 0xff0) {
335 * http://www.centtech.com/c6_data_sheet.pdf
337 * I-12 RDTSC may return incoherent values in EDX:EAX
338 * I-13 RDTSC hangs when certain event counters are used
344 switch (cpu_id & 0xff0) {
346 if ((cpu_id & CPUID_STEPPING) == 0)
357 * Inform CPU accounting about our boot-time clock rate. This will
358 * be updated if someone loads a cpufreq driver after boot that
359 * discovers a new max frequency.
362 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant);
364 if (tsc_is_invariant)
367 /* Register to find out about changes in CPU frequency. */
368 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change,
369 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST);
370 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change,
371 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST);
372 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed,
373 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY);
379 * RDTSC is not a serializing instruction, and does not drain
380 * instruction stream, so we need to drain the stream before executing
381 * it. It could be fixed by use of RDTSCP, except the instruction is
382 * not available everywhere.
384 * Use CPUID for draining in the boot-time SMP constistency test. The
385 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel
386 * and VIA) when SSE2 is present, and nothing on older machines which
387 * also do not issue RDTSC prematurely. There, testing for SSE2 and
388 * vendor is too cumbersome, and we learn about TSC presence from CPUID.
390 * Do not use do_cpuid(), since we do not need CPUID results, which
391 * have to be written into memory with do_cpuid().
393 #define TSC_READ(x) \
395 tsc_read_##x(void *arg) \
397 uint64_t *tsc = arg; \
398 u_int cpu = PCPU_GET(cpuid); \
400 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \
401 tsc[cpu * 3 + x] = rdtsc(); \
411 comp_smp_tsc(void *arg)
415 u_int cpu = PCPU_GET(cpuid);
418 size = (mp_maxid + 1) * 3;
419 for (i = 0, tsc = arg; i < N; i++, tsc += size)
423 d1 = tsc[cpu * 3 + 1] - tsc[j * 3];
424 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1];
425 if (d1 <= 0 || d2 <= 0) {
433 adj_smp_tsc(void *arg)
437 u_int cpu = PCPU_GET(cpuid);
438 u_int first, i, size;
445 size = (mp_maxid + 1) * 3;
446 for (i = 0, tsc = arg; i < N; i++, tsc += size) {
447 d = tsc[first * 3] - tsc[cpu * 3 + 1];
450 d = tsc[first * 3 + 1] - tsc[cpu * 3 + 2];
453 d = tsc[first * 3 + 1] - tsc[cpu * 3];
456 d = tsc[first * 3 + 2] - tsc[cpu * 3 + 1];
462 d = min / 2 + max / 2;
464 "movl $0x10, %%ecx\n\t"
466 "addl %%edi, %%eax\n\t"
467 "adcl %%esi, %%edx\n\t"
470 : "D" ((uint32_t)d), "S" ((uint32_t)(d >> 32))
471 : "ax", "cx", "dx", "cc"
476 test_tsc(int adj_max_count)
478 uint64_t *data, *tsc;
481 if ((!smp_tsc && !tsc_is_invariant) || vm_guest)
483 size = (mp_maxid + 1) * 3;
484 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK);
487 for (i = 0, tsc = data; i < N; i++, tsc += size)
488 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc);
489 smp_tsc = 1; /* XXX */
490 smp_rendezvous(smp_no_rendezvous_barrier, comp_smp_tsc,
491 smp_no_rendezvous_barrier, data);
492 if (!smp_tsc && adj < adj_max_count) {
494 smp_rendezvous(smp_no_rendezvous_barrier, adj_smp_tsc,
495 smp_no_rendezvous_barrier, data);
500 printf("SMP: %sed TSC synchronization test%s\n",
501 smp_tsc ? "pass" : "fail",
502 adj > 0 ? " after adjustment" : "");
503 if (smp_tsc && tsc_is_invariant) {
504 switch (cpu_vendor_id) {
507 * Starting with Family 15h processors, TSC clock
508 * source is in the north bridge. Check whether
509 * we have a single-socket/multi-core platform.
510 * XXX Need more work for complex cases.
512 if (CPUID_TO_FAMILY(cpu_id) < 0x15 ||
513 (amd_feature2 & AMDID2_CMP) == 0 ||
514 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1)
517 case CPU_VENDOR_INTEL:
519 * XXX Assume Intel platforms have synchronized TSCs.
538 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled)
542 * Limit timecounter frequency to fit in an int and prevent it from
543 * overflowing too fast.
548 * We can not use the TSC if we support APM. Precise timekeeping
549 * on an APM'ed machine is at best a fools pursuit, since
550 * any and all of the time spent in various SMM code can't
551 * be reliably accounted for. Reading the RTC is your only
552 * source of reliable time info. The i8254 loses too, of course,
553 * but we need to have some kind of time...
554 * We don't know at this point whether APM is going to be used
555 * or not, nor when it might be activated. Play it safe.
557 if (power_pm_get_type() == POWER_PM_TYPE_APM) {
558 tsc_timecounter.tc_quality = -1000;
560 printf("TSC timecounter disabled: APM enabled.\n");
565 * Intel CPUs without a C-state invariant TSC can stop the TSC
566 * in either C2 or C3. Disable use of C2 and C3 while using
567 * the TSC as the timecounter. The timecounter can be changed
568 * to enable C2 and C3.
570 * Note that the TSC is used as the cputicker for computing
571 * thread runtime regardless of the timecounter setting, so
572 * using an alternate timecounter and enabling C2 or C3 can
573 * result incorrect runtimes for kernel idle threads (but not
574 * for any non-idle threads).
576 if (cpu_vendor_id == CPU_VENDOR_INTEL &&
577 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) {
578 tsc_timecounter.tc_flags |= TC_FLAGS_C2STOP;
580 printf("TSC timecounter disables C2 and C3.\n");
584 * We can not use the TSC in SMP mode unless the TSCs on all CPUs
585 * are synchronized. If the user is sure that the system has
586 * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a
587 * non-zero value. The TSC seems unreliable in virtualized SMP
588 * environments, so it is set to a negative quality in those cases.
592 tsc_timecounter.tc_quality = test_tsc(smp_tsc_adjust);
595 if (tsc_is_invariant)
596 tsc_timecounter.tc_quality = 1000;
597 max_freq >>= tsc_shift;
600 for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++)
602 if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) {
603 if (cpu_vendor_id == CPU_VENDOR_AMD) {
604 tsc_timecounter.tc_get_timecount = shift > 0 ?
605 tsc_get_timecount_low_mfence :
606 tsc_get_timecount_mfence;
608 tsc_timecounter.tc_get_timecount = shift > 0 ?
609 tsc_get_timecount_low_lfence :
610 tsc_get_timecount_lfence;
613 tsc_timecounter.tc_get_timecount = shift > 0 ?
614 tsc_get_timecount_low : tsc_get_timecount;
617 tsc_timecounter.tc_name = "TSC-low";
619 printf("TSC timecounter discards lower %d bit(s)\n",
623 tsc_timecounter.tc_frequency = tsc_freq >> shift;
624 tsc_timecounter.tc_priv = (void *)(intptr_t)shift;
625 tc_init(&tsc_timecounter);
628 SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL);
636 /* If TSC was not good on boot, it is unlikely to become good now. */
637 if (tsc_timecounter.tc_quality < 0)
639 /* Nothing to do with UP. */
644 * If TSC was good, a single synchronization should be enough,
645 * but honour smp_tsc_adjust if it's set.
647 quality = test_tsc(MAX(smp_tsc_adjust, 1));
648 if (quality != tsc_timecounter.tc_quality) {
649 printf("TSC timecounter quality changed: %d -> %d\n",
650 tsc_timecounter.tc_quality, quality);
651 tsc_timecounter.tc_quality = quality;
657 * When cpufreq levels change, find out about the (new) max frequency. We
658 * use this to update CPU accounting in case it got a lower estimate at boot.
661 tsc_levels_changed(void *arg, int unit)
664 struct cf_level *levels;
668 /* Only use values from the first CPU, assuming all are equal. */
672 /* Find the appropriate cpufreq device instance. */
673 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit);
674 if (cf_dev == NULL) {
675 printf("tsc_levels_changed() called but no cpufreq device?\n");
679 /* Get settings from the device and find the max frequency. */
681 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
684 error = CPUFREQ_LEVELS(cf_dev, levels, &count);
685 if (error == 0 && count != 0) {
686 max_freq = (uint64_t)levels[0].total_set.freq * 1000000;
687 set_cputicker(rdtsc, max_freq, 1);
689 printf("tsc_levels_changed: no max freq found\n");
690 free(levels, M_TEMP);
694 * If the TSC timecounter is in use, veto the pending change. It may be
695 * possible in the future to handle a dynamically-changing timecounter rate.
698 tsc_freq_changing(void *arg, const struct cf_level *level, int *status)
701 if (*status != 0 || timecounter != &tsc_timecounter)
704 printf("timecounter TSC must not be in use when "
705 "changing frequencies; change denied\n");
709 /* Update TSC freq with the value indicated by the caller. */
711 tsc_freq_changed(void *arg, const struct cf_level *level, int status)
715 /* If there was an error during the transition, don't do anything. */
716 if (tsc_disabled || status != 0)
719 /* Total setting for this level gives the new frequency in MHz. */
720 freq = (uint64_t)level->total_set.freq * 1000000;
721 atomic_store_rel_64(&tsc_freq, freq);
722 tsc_timecounter.tc_frequency =
723 freq >> (int)(intptr_t)tsc_timecounter.tc_priv;
727 sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS)
732 freq = atomic_load_acq_64(&tsc_freq);
735 error = sysctl_handle_64(oidp, &freq, 0, req);
736 if (error == 0 && req->newptr != NULL) {
737 atomic_store_rel_64(&tsc_freq, freq);
738 atomic_store_rel_64(&tsc_timecounter.tc_frequency,
739 freq >> (int)(intptr_t)tsc_timecounter.tc_priv);
744 SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW,
745 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency");
748 tsc_get_timecount(struct timecounter *tc __unused)
755 tsc_get_timecount_low(struct timecounter *tc)
759 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0"
760 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx");
765 tsc_get_timecount_lfence(struct timecounter *tc __unused)
773 tsc_get_timecount_low_lfence(struct timecounter *tc)
777 return (tsc_get_timecount_low(tc));
781 tsc_get_timecount_mfence(struct timecounter *tc __unused)
789 tsc_get_timecount_low_mfence(struct timecounter *tc)
793 return (tsc_get_timecount_low(tc));
797 x86_tsc_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
800 vdso_th->th_algo = VDSO_TH_ALGO_X86_TSC;
801 vdso_th->th_x86_shift = (int)(intptr_t)tc->tc_priv;
802 vdso_th->th_x86_hpet_idx = 0xffffffff;
803 bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
807 #ifdef COMPAT_FREEBSD32
809 x86_tsc_vdso_timehands32(struct vdso_timehands32 *vdso_th32,
810 struct timecounter *tc)
813 vdso_th32->th_algo = VDSO_TH_ALGO_X86_TSC;
814 vdso_th32->th_x86_shift = (int)(intptr_t)tc->tc_priv;
815 vdso_th32->th_x86_hpet_idx = 0xffffffff;
816 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));