2 * Copyright (c) 2009 Hudson River Trading LLC
3 * Written by: John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Support for x86 machine check architecture.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
43 #include <sys/interrupt.h>
44 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
49 #include <sys/sched.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53 #include <sys/taskqueue.h>
54 #include <machine/intr_machdep.h>
55 #include <x86/apicvar.h>
56 #include <machine/cpu.h>
57 #include <machine/cputypes.h>
59 #include <machine/md_var.h>
60 #include <machine/specialreg.h>
62 /* Modes for mca_scan() */
71 * State maintained for each monitored MCx bank to control the
72 * corrected machine check interrupt threshold.
81 struct mca_record rec;
83 STAILQ_ENTRY(mca_internal) link;
86 static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture");
88 static volatile int mca_count; /* Number of records stored. */
89 static int mca_banks; /* Number of per-CPU register banks. */
91 static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL,
92 "Machine Check Architecture");
94 static int mca_enabled = 1;
95 SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0,
96 "Administrative toggle for machine check support");
98 static int amd10h_L1TP = 1;
99 SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0,
100 "Administrative toggle for logging of level one TLB parity (L1TP) errors");
102 static int intel6h_HSD131;
103 SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0,
104 "Administrative toggle for logging of spurious corrected errors");
106 int workaround_erratum383;
107 SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RD, &workaround_erratum383, 0,
108 "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
110 static STAILQ_HEAD(, mca_internal) mca_freelist;
111 static int mca_freecount;
112 static STAILQ_HEAD(, mca_internal) mca_records;
113 static struct callout mca_timer;
114 static int mca_ticks = 3600; /* Check hourly by default. */
115 static struct taskqueue *mca_tq;
116 static struct task mca_refill_task, mca_scan_task;
117 static struct mtx mca_lock;
120 static struct cmc_state **cmc_state; /* Indexed by cpuid, bank */
121 static int cmc_throttle = 60; /* Time in seconds to throttle CMCI. */
125 sysctl_positive_int(SYSCTL_HANDLER_ARGS)
129 value = *(int *)arg1;
130 error = sysctl_handle_int(oidp, &value, 0, req);
131 if (error || req->newptr == NULL)
135 *(int *)arg1 = value;
140 sysctl_mca_records(SYSCTL_HANDLER_ARGS)
142 int *name = (int *)arg1;
143 u_int namelen = arg2;
144 struct mca_record record;
145 struct mca_internal *rec;
151 if (name[0] < 0 || name[0] >= mca_count)
154 mtx_lock_spin(&mca_lock);
155 if (name[0] >= mca_count) {
156 mtx_unlock_spin(&mca_lock);
160 STAILQ_FOREACH(rec, &mca_records, link) {
167 mtx_unlock_spin(&mca_lock);
168 return (SYSCTL_OUT(req, &record, sizeof(record)));
172 mca_error_ttype(uint16_t mca_error)
175 switch ((mca_error & 0x000c) >> 2) {
187 mca_error_level(uint16_t mca_error)
190 switch (mca_error & 0x0003) {
204 mca_error_request(uint16_t mca_error)
207 switch ((mca_error & 0x00f0) >> 4) {
231 mca_error_mmtype(uint16_t mca_error)
234 switch ((mca_error & 0x70) >> 4) {
249 static int __nonnull(1)
250 mca_mute(const struct mca_record *rec)
254 * Skip spurious corrected parity errors generated by Intel Haswell-
255 * and Broadwell-based CPUs (see HSD131, HSM142, HSW131 and BDM48
256 * erratum respectively), unless reporting is enabled.
257 * Note that these errors also have been observed with the D0-stepping
258 * of Haswell, while at least initially the CPU specification updates
259 * suggested only the C0-stepping to be affected. Similarly, Celeron
260 * 2955U with a CPU ID of 0x45 apparently are also concerned with the
261 * same problem, with HSM142 only referring to 0x3c and 0x46.
263 if (cpu_vendor_id == CPU_VENDOR_INTEL &&
264 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
265 (CPUID_TO_MODEL(cpu_id) == 0x3c || /* HSD131, HSM142, HSW131 */
266 CPUID_TO_MODEL(cpu_id) == 0x3d || /* BDM48 */
267 CPUID_TO_MODEL(cpu_id) == 0x45 ||
268 CPUID_TO_MODEL(cpu_id) == 0x46) && /* HSM142 */
270 (rec->mr_status & 0xa0000000ffffffff) == 0x80000000000f0005 &&
277 /* Dump details about a single machine check. */
278 static void __nonnull(1)
279 mca_log(const struct mca_record *rec)
286 printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
287 (long long)rec->mr_status);
288 printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
289 (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status);
290 printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor,
291 rec->mr_cpu_id, rec->mr_apic_id);
292 printf("MCA: CPU %d ", rec->mr_cpu);
293 if (rec->mr_status & MC_STATUS_UC)
297 if (rec->mr_mcg_cap & MCG_CAP_CMCI_P)
298 printf("(%lld) ", ((long long)rec->mr_status &
299 MC_STATUS_COR_COUNT) >> 38);
301 if (rec->mr_status & MC_STATUS_PCC)
303 if (rec->mr_status & MC_STATUS_OVER)
305 mca_error = rec->mr_status & MC_STATUS_MCA_ERROR;
307 /* Simple error codes. */
312 printf("unclassified error");
315 printf("ucode ROM parity error");
318 printf("external error");
324 printf("internal parity error");
327 printf("internal timer error");
330 if ((mca_error & 0xfc00) == 0x0400) {
331 printf("internal error %x", mca_error & 0x03ff);
335 /* Compound error codes. */
337 /* Memory hierarchy error. */
338 if ((mca_error & 0xeffc) == 0x000c) {
339 printf("%s memory error", mca_error_level(mca_error));
344 if ((mca_error & 0xeff0) == 0x0010) {
345 printf("%sTLB %s error", mca_error_ttype(mca_error),
346 mca_error_level(mca_error));
350 /* Memory controller error. */
351 if ((mca_error & 0xef80) == 0x0080) {
352 printf("%s channel ", mca_error_mmtype(mca_error));
353 if ((mca_error & 0x000f) != 0x000f)
354 printf("%d", mca_error & 0x000f);
357 printf(" memory error");
362 if ((mca_error & 0xef00) == 0x0100) {
363 printf("%sCACHE %s %s error",
364 mca_error_ttype(mca_error),
365 mca_error_level(mca_error),
366 mca_error_request(mca_error));
370 /* Bus and/or Interconnect error. */
371 if ((mca_error & 0xe800) == 0x0800) {
372 printf("BUS%s ", mca_error_level(mca_error));
373 switch ((mca_error & 0x0600) >> 9) {
387 printf(" %s ", mca_error_request(mca_error));
388 switch ((mca_error & 0x000c) >> 2) {
402 if (mca_error & 0x0100)
403 printf(" timed out");
407 printf("unknown error %x", mca_error);
411 if (rec->mr_status & MC_STATUS_ADDRV)
412 printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr);
413 if (rec->mr_status & MC_STATUS_MISCV)
414 printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
417 static int __nonnull(2)
418 mca_check_status(int bank, struct mca_record *rec)
423 status = rdmsr(MSR_MC_STATUS(bank));
424 if (!(status & MC_STATUS_VAL))
427 /* Save exception information. */
428 rec->mr_status = status;
431 if (status & MC_STATUS_ADDRV)
432 rec->mr_addr = rdmsr(MSR_MC_ADDR(bank));
434 if (status & MC_STATUS_MISCV)
435 rec->mr_misc = rdmsr(MSR_MC_MISC(bank));
436 rec->mr_tsc = rdtsc();
437 rec->mr_apic_id = PCPU_GET(apic_id);
438 rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP);
439 rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS);
440 rec->mr_cpu_id = cpu_id;
441 rec->mr_cpu_vendor_id = cpu_vendor_id;
442 rec->mr_cpu = PCPU_GET(cpuid);
445 * Clear machine check. Don't do this for uncorrectable
446 * errors so that the BIOS can see them.
448 if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) {
449 wrmsr(MSR_MC_STATUS(bank), 0);
456 mca_fill_freelist(void)
458 struct mca_internal *rec;
462 * Ensure we have at least one record for each bank and one
465 desired = imax(mp_ncpus, mca_banks);
466 mtx_lock_spin(&mca_lock);
467 while (mca_freecount < desired) {
468 mtx_unlock_spin(&mca_lock);
469 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
470 mtx_lock_spin(&mca_lock);
471 STAILQ_INSERT_TAIL(&mca_freelist, rec, link);
474 mtx_unlock_spin(&mca_lock);
478 mca_refill(void *context, int pending)
484 static void __nonnull(2)
485 mca_record_entry(enum scan_mode mode, const struct mca_record *record)
487 struct mca_internal *rec;
489 if (mode == POLLED) {
490 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
491 mtx_lock_spin(&mca_lock);
493 mtx_lock_spin(&mca_lock);
494 rec = STAILQ_FIRST(&mca_freelist);
496 printf("MCA: Unable to allocate space for an event.\n");
498 mtx_unlock_spin(&mca_lock);
501 STAILQ_REMOVE_HEAD(&mca_freelist, link);
507 STAILQ_INSERT_TAIL(&mca_records, rec, link);
509 mtx_unlock_spin(&mca_lock);
511 taskqueue_enqueue_fast(mca_tq, &mca_refill_task);
516 * Update the interrupt threshold for a CMCI. The strategy is to use
517 * a low trigger that interrupts as soon as the first event occurs.
518 * However, if a steady stream of events arrive, the threshold is
519 * increased until the interrupts are throttled to once every
520 * cmc_throttle seconds or the periodic scan. If a periodic scan
521 * finds that the threshold is too high, it is lowered.
524 cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec)
526 struct cmc_state *cc;
531 /* Fetch the current limit for this bank. */
532 cc = &cmc_state[PCPU_GET(cpuid)][bank];
533 ctl = rdmsr(MSR_MC_CTL2(bank));
534 count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
535 delta = (u_int)(ticks - cc->last_intr);
538 * If an interrupt was received less than cmc_throttle seconds
539 * since the previous interrupt and the count from the current
540 * event is greater than or equal to the current threshold,
541 * double the threshold up to the max.
543 if (mode == CMCI && valid) {
544 limit = ctl & MC_CTL2_THRESHOLD;
545 if (delta < cmc_throttle && count >= limit &&
546 limit < cc->max_threshold) {
547 limit = min(limit << 1, cc->max_threshold);
548 ctl &= ~MC_CTL2_THRESHOLD;
550 wrmsr(MSR_MC_CTL2(bank), limit);
552 cc->last_intr = ticks;
557 * When the banks are polled, check to see if the threshold
563 /* If a CMCI occured recently, do nothing for now. */
564 if (delta < cmc_throttle)
568 * Compute a new limit based on the average rate of events per
569 * cmc_throttle seconds since the last interrupt.
572 count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
573 limit = count * cmc_throttle / delta;
576 else if (limit > cc->max_threshold)
577 limit = cc->max_threshold;
580 if ((ctl & MC_CTL2_THRESHOLD) != limit) {
581 ctl &= ~MC_CTL2_THRESHOLD;
583 wrmsr(MSR_MC_CTL2(bank), limit);
589 * This scans all the machine check banks of the current CPU to see if
590 * there are any machine checks. Any non-recoverable errors are
591 * reported immediately via mca_log(). The current thread must be
592 * pinned when this is called. The 'mode' parameter indicates if we
593 * are being called from the MC exception handler, the CMCI handler,
594 * or the periodic poller. In the MC exception case this function
595 * returns true if the system is restartable. Otherwise, it returns a
596 * count of the number of valid MC records found.
599 mca_scan(enum scan_mode mode)
601 struct mca_record rec;
602 uint64_t mcg_cap, ucmask;
603 int count, i, recoverable, valid;
607 ucmask = MC_STATUS_UC | MC_STATUS_PCC;
609 /* When handling a MCE#, treat the OVER flag as non-restartable. */
611 ucmask |= MC_STATUS_OVER;
612 mcg_cap = rdmsr(MSR_MCG_CAP);
613 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
616 * For a CMCI, only check banks this CPU is
619 if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i))
623 valid = mca_check_status(i, &rec);
626 if (rec.mr_status & ucmask) {
628 mtx_lock_spin(&mca_lock);
630 mtx_unlock_spin(&mca_lock);
632 mca_record_entry(mode, &rec);
637 * If this is a bank this CPU monitors via CMCI,
638 * update the threshold.
640 if (PCPU_GET(cmci_mask) & 1 << i)
641 cmci_update(mode, i, valid, &rec);
646 return (mode == MCE ? recoverable : count);
650 * Scan the machine check banks on all CPUs by binding to each CPU in
651 * turn. If any of the CPUs contained new machine check records, log
652 * them to the console.
655 mca_scan_cpus(void *context, int pending)
657 struct mca_internal *mca;
668 count += mca_scan(POLLED);
674 mtx_lock_spin(&mca_lock);
675 STAILQ_FOREACH(mca, &mca_records, link) {
681 mtx_unlock_spin(&mca_lock);
686 mca_periodic_scan(void *arg)
689 taskqueue_enqueue_fast(mca_tq, &mca_scan_task);
690 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
694 sysctl_mca_scan(SYSCTL_HANDLER_ARGS)
699 error = sysctl_handle_int(oidp, &i, 0, req);
703 taskqueue_enqueue_fast(mca_tq, &mca_scan_task);
708 mca_createtq(void *dummy)
713 mca_tq = taskqueue_create_fast("mca", M_WAITOK,
714 taskqueue_thread_enqueue, &mca_tq);
715 taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq");
717 SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL);
720 mca_startup(void *dummy)
726 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
728 SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL);
736 cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA,
738 for (i = 0; i <= mp_maxid; i++)
739 cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks,
740 M_MCA, M_WAITOK | M_ZERO);
741 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
742 "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
743 &cmc_throttle, 0, sysctl_positive_int, "I",
744 "Interval in seconds to throttle corrected MC interrupts");
749 mca_setup(uint64_t mcg_cap)
753 * On AMD Family 10h processors, unless logging of level one TLB
754 * parity (L1TP) errors is disabled, enable the recommended workaround
757 if (cpu_vendor_id == CPU_VENDOR_AMD &&
758 CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP)
759 workaround_erratum383 = 1;
761 mca_banks = mcg_cap & MCG_CAP_COUNT;
762 mtx_init(&mca_lock, "mca", NULL, MTX_SPIN);
763 STAILQ_INIT(&mca_records);
764 TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL);
765 callout_init(&mca_timer, 1);
766 STAILQ_INIT(&mca_freelist);
767 TASK_INIT(&mca_refill_task, 0, mca_refill, NULL);
769 SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
770 "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
772 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
773 "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks,
774 0, sysctl_positive_int, "I",
775 "Periodic interval in seconds to scan for machine checks");
776 SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
777 "records", CTLFLAG_RD, sysctl_mca_records, "Machine check records");
778 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
779 "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
780 sysctl_mca_scan, "I", "Force an immediate scan for machine checks");
782 if (mcg_cap & MCG_CAP_CMCI_P)
789 * See if we should monitor CMCI for this bank. If CMCI_EN is already
790 * set in MC_CTL2, then another CPU is responsible for this bank, so
791 * ignore it. If CMCI_EN returns zero after being set, then this bank
792 * does not support CMCI_EN. If this CPU sets CMCI_EN, then it should
793 * now monitor this bank.
798 struct cmc_state *cc;
801 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
803 ctl = rdmsr(MSR_MC_CTL2(i));
804 if (ctl & MC_CTL2_CMCI_EN)
805 /* Already monitored by another CPU. */
808 /* Set the threshold to one event for now. */
809 ctl &= ~MC_CTL2_THRESHOLD;
810 ctl |= MC_CTL2_CMCI_EN | 1;
811 wrmsr(MSR_MC_CTL2(i), ctl);
812 ctl = rdmsr(MSR_MC_CTL2(i));
813 if (!(ctl & MC_CTL2_CMCI_EN))
814 /* This bank does not support CMCI. */
817 cc = &cmc_state[PCPU_GET(cpuid)][i];
819 /* Determine maximum threshold. */
820 ctl &= ~MC_CTL2_THRESHOLD;
822 wrmsr(MSR_MC_CTL2(i), ctl);
823 ctl = rdmsr(MSR_MC_CTL2(i));
824 cc->max_threshold = ctl & MC_CTL2_THRESHOLD;
826 /* Start off with a threshold of 1. */
827 ctl &= ~MC_CTL2_THRESHOLD;
829 wrmsr(MSR_MC_CTL2(i), ctl);
831 /* Mark this bank as monitored. */
832 PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
836 * For resume, reset the threshold for any banks we monitor back to
837 * one and throw away the timestamp of the last interrupt.
842 struct cmc_state *cc;
845 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
847 /* Ignore banks not monitored by this CPU. */
848 if (!(PCPU_GET(cmci_mask) & 1 << i))
851 cc = &cmc_state[PCPU_GET(cpuid)][i];
852 cc->last_intr = -ticks;
853 ctl = rdmsr(MSR_MC_CTL2(i));
854 ctl &= ~MC_CTL2_THRESHOLD;
855 ctl |= MC_CTL2_CMCI_EN | 1;
856 wrmsr(MSR_MC_CTL2(i), ctl);
861 * Initializes per-CPU machine check registers and enables corrected
862 * machine check interrupts.
871 /* MCE is required. */
872 if (!mca_enabled || !(cpu_feature & CPUID_MCE))
875 if (cpu_feature & CPUID_MCA) {
877 PCPU_SET(cmci_mask, 0);
879 mcg_cap = rdmsr(MSR_MCG_CAP);
880 if (mcg_cap & MCG_CAP_CTL_P)
881 /* Enable MCA features. */
882 wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE);
883 if (PCPU_GET(cpuid) == 0 && boot)
887 * Disable logging of level one TLB parity (L1TP) errors by
888 * the data cache as an alternative workaround for AMD Family
889 * 10h Erratum 383. Unlike the recommended workaround, there
890 * is no performance penalty to this workaround. However,
891 * L1TP errors will go unreported.
893 if (cpu_vendor_id == CPU_VENDOR_AMD &&
894 CPUID_TO_FAMILY(cpu_id) == 0x10 && !amd10h_L1TP) {
895 mask = rdmsr(MSR_MC0_CTL_MASK);
896 if ((mask & (1UL << 5)) == 0)
897 wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5));
899 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
900 /* By default enable logging of all errors. */
901 ctl = 0xffffffffffffffffUL;
904 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
906 * For P6 models before Nehalem MC0_CTL is
907 * always enabled and reserved.
909 if (i == 0 && CPUID_TO_FAMILY(cpu_id) == 0x6
910 && CPUID_TO_MODEL(cpu_id) < 0x1a)
912 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
913 /* BKDG for Family 10h: unset GartTblWkEn. */
914 if (i == 4 && CPUID_TO_FAMILY(cpu_id) >= 0xf)
919 wrmsr(MSR_MC_CTL(i), ctl);
922 if (mcg_cap & MCG_CAP_CMCI_P) {
930 /* Clear all errors. */
931 wrmsr(MSR_MC_STATUS(i), 0);
935 if (PCPU_GET(cmci_mask) != 0 && boot)
940 load_cr4(rcr4() | CR4_MCE);
943 /* Must be executed on each CPU during boot. */
951 /* Must be executed on each CPU during resume. */
960 * The machine check registers for the BSP cannot be initialized until
961 * the local APIC is initialized. This happens at SI_SUB_CPU,
965 mca_init_bsp(void *arg __unused)
970 SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL);
972 /* Called when a machine check exception fires. */
977 int old_count, recoverable;
979 if (!(cpu_feature & CPUID_MCA)) {
981 * Just print the values of the old Pentium registers
984 printf("MC Type: 0x%jx Address: 0x%jx\n",
985 (uintmax_t)rdmsr(MSR_P5_MC_TYPE),
986 (uintmax_t)rdmsr(MSR_P5_MC_ADDR));
987 panic("Machine check");
990 /* Scan the banks and check for any non-recoverable errors. */
991 old_count = mca_count;
992 recoverable = mca_scan(MCE);
993 mcg_status = rdmsr(MSR_MCG_STATUS);
994 if (!(mcg_status & MCG_STATUS_RIPV))
999 * Wait for at least one error to be logged before
1000 * panic'ing. Some errors will assert a machine check
1001 * on all CPUs, but only certain CPUs will find a valid
1004 while (mca_count == old_count)
1007 panic("Unrecoverable machine check exception");
1011 wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
1015 /* Called for a CMCI (correctable machine check interrupt). */
1019 struct mca_internal *mca;
1023 * Serialize MCA bank scanning to prevent collisions from
1026 count = mca_scan(CMCI);
1028 /* If we found anything, log them to the console. */
1030 mtx_lock_spin(&mca_lock);
1031 STAILQ_FOREACH(mca, &mca_records, link) {
1037 mtx_unlock_spin(&mca_lock);