2 * Copyright (c) 2009 Hudson River Trading LLC
3 * Written by: John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Support for x86 machine check architecture.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
43 #include <sys/interrupt.h>
44 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
49 #include <sys/sched.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53 #include <sys/taskqueue.h>
54 #include <machine/intr_machdep.h>
55 #include <x86/apicvar.h>
56 #include <machine/cpu.h>
57 #include <machine/cputypes.h>
59 #include <machine/md_var.h>
60 #include <machine/specialreg.h>
62 /* Modes for mca_scan() */
71 * State maintained for each monitored MCx bank to control the
72 * corrected machine check interrupt threshold.
86 struct mca_record rec;
88 STAILQ_ENTRY(mca_internal) link;
91 static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture");
93 static volatile int mca_count; /* Number of records stored. */
94 static int mca_banks; /* Number of per-CPU register banks. */
96 static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL,
97 "Machine Check Architecture");
99 static int mca_enabled = 1;
100 SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0,
101 "Administrative toggle for machine check support");
103 static int amd10h_L1TP = 1;
104 SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0,
105 "Administrative toggle for logging of level one TLB parity (L1TP) errors");
107 static int intel6h_HSD131;
108 SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0,
109 "Administrative toggle for logging of spurious corrected errors");
111 int workaround_erratum383;
112 SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RDTUN,
113 &workaround_erratum383, 0,
114 "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
116 static STAILQ_HEAD(, mca_internal) mca_freelist;
117 static int mca_freecount;
118 static STAILQ_HEAD(, mca_internal) mca_records;
119 static struct callout mca_timer;
120 static int mca_ticks = 3600; /* Check hourly by default. */
121 static struct taskqueue *mca_tq;
122 static struct task mca_refill_task, mca_scan_task;
123 static struct mtx mca_lock;
126 static struct cmc_state **cmc_state; /* Indexed by cpuid, bank. */
127 static struct amd_et_state *amd_et_state; /* Indexed by cpuid. */
128 static int cmc_throttle = 60; /* Time in seconds to throttle CMCI. */
130 static int amd_elvt = -1;
133 amd_thresholding_supported(void)
135 if (cpu_vendor_id != CPU_VENDOR_AMD)
138 * The RASCap register is wholly reserved in families 0x10-0x15 (through model 1F).
140 * It begins to be documented in family 0x15 model 30 and family 0x16,
141 * but neither of these families documents the ScalableMca bit, which
142 * supposedly defines the presence of this feature on family 0x17.
144 if (CPUID_TO_FAMILY(cpu_id) >= 0x10 && CPUID_TO_FAMILY(cpu_id) <= 0x16)
146 if (CPUID_TO_FAMILY(cpu_id) >= 0x17)
147 return ((amd_rascap & AMDRAS_SCALABLE_MCA) != 0);
153 cmci_supported(uint64_t mcg_cap)
156 * MCG_CAP_CMCI_P bit is reserved in AMD documentation. Until
157 * it is defined, do not use it to check for CMCI support.
159 if (cpu_vendor_id != CPU_VENDOR_INTEL)
161 return ((mcg_cap & MCG_CAP_CMCI_P) != 0);
165 sysctl_positive_int(SYSCTL_HANDLER_ARGS)
169 value = *(int *)arg1;
170 error = sysctl_handle_int(oidp, &value, 0, req);
171 if (error || req->newptr == NULL)
175 *(int *)arg1 = value;
180 sysctl_mca_records(SYSCTL_HANDLER_ARGS)
182 int *name = (int *)arg1;
183 u_int namelen = arg2;
184 struct mca_record record;
185 struct mca_internal *rec;
191 if (name[0] < 0 || name[0] >= mca_count)
194 mtx_lock_spin(&mca_lock);
195 if (name[0] >= mca_count) {
196 mtx_unlock_spin(&mca_lock);
200 STAILQ_FOREACH(rec, &mca_records, link) {
207 mtx_unlock_spin(&mca_lock);
208 return (SYSCTL_OUT(req, &record, sizeof(record)));
212 mca_error_ttype(uint16_t mca_error)
215 switch ((mca_error & 0x000c) >> 2) {
227 mca_error_level(uint16_t mca_error)
230 switch (mca_error & 0x0003) {
244 mca_error_request(uint16_t mca_error)
247 switch ((mca_error & 0x00f0) >> 4) {
271 mca_error_mmtype(uint16_t mca_error)
274 switch ((mca_error & 0x70) >> 4) {
290 mca_mute(const struct mca_record *rec)
294 * Skip spurious corrected parity errors generated by Intel Haswell-
295 * and Broadwell-based CPUs (see HSD131, HSM142, HSW131 and BDM48
296 * erratum respectively), unless reporting is enabled.
297 * Note that these errors also have been observed with the D0-stepping
298 * of Haswell, while at least initially the CPU specification updates
299 * suggested only the C0-stepping to be affected. Similarly, Celeron
300 * 2955U with a CPU ID of 0x45 apparently are also concerned with the
301 * same problem, with HSM142 only referring to 0x3c and 0x46.
303 if (cpu_vendor_id == CPU_VENDOR_INTEL &&
304 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
305 (CPUID_TO_MODEL(cpu_id) == 0x3c || /* HSD131, HSM142, HSW131 */
306 CPUID_TO_MODEL(cpu_id) == 0x3d || /* BDM48 */
307 CPUID_TO_MODEL(cpu_id) == 0x45 ||
308 CPUID_TO_MODEL(cpu_id) == 0x46) && /* HSM142 */
310 (rec->mr_status & 0xa0000000ffffffff) == 0x80000000000f0005 &&
317 /* Dump details about a single machine check. */
319 mca_log(const struct mca_record *rec)
326 printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
327 (long long)rec->mr_status);
328 printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
329 (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status);
330 printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor,
331 rec->mr_cpu_id, rec->mr_apic_id);
332 printf("MCA: CPU %d ", rec->mr_cpu);
333 if (rec->mr_status & MC_STATUS_UC)
337 if (cmci_supported(rec->mr_mcg_cap))
338 printf("(%lld) ", ((long long)rec->mr_status &
339 MC_STATUS_COR_COUNT) >> 38);
341 if (rec->mr_status & MC_STATUS_PCC)
343 if (rec->mr_status & MC_STATUS_OVER)
345 mca_error = rec->mr_status & MC_STATUS_MCA_ERROR;
347 /* Simple error codes. */
352 printf("unclassified error");
355 printf("ucode ROM parity error");
358 printf("external error");
364 printf("internal parity error");
367 printf("internal timer error");
370 if ((mca_error & 0xfc00) == 0x0400) {
371 printf("internal error %x", mca_error & 0x03ff);
375 /* Compound error codes. */
377 /* Memory hierarchy error. */
378 if ((mca_error & 0xeffc) == 0x000c) {
379 printf("%s memory error", mca_error_level(mca_error));
384 if ((mca_error & 0xeff0) == 0x0010) {
385 printf("%sTLB %s error", mca_error_ttype(mca_error),
386 mca_error_level(mca_error));
390 /* Memory controller error. */
391 if ((mca_error & 0xef80) == 0x0080) {
392 printf("%s channel ", mca_error_mmtype(mca_error));
393 if ((mca_error & 0x000f) != 0x000f)
394 printf("%d", mca_error & 0x000f);
397 printf(" memory error");
402 if ((mca_error & 0xef00) == 0x0100) {
403 printf("%sCACHE %s %s error",
404 mca_error_ttype(mca_error),
405 mca_error_level(mca_error),
406 mca_error_request(mca_error));
410 /* Bus and/or Interconnect error. */
411 if ((mca_error & 0xe800) == 0x0800) {
412 printf("BUS%s ", mca_error_level(mca_error));
413 switch ((mca_error & 0x0600) >> 9) {
427 printf(" %s ", mca_error_request(mca_error));
428 switch ((mca_error & 0x000c) >> 2) {
442 if (mca_error & 0x0100)
443 printf(" timed out");
447 printf("unknown error %x", mca_error);
451 if (rec->mr_status & MC_STATUS_ADDRV)
452 printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr);
453 if (rec->mr_status & MC_STATUS_MISCV)
454 printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
458 mca_check_status(int bank, struct mca_record *rec)
463 status = rdmsr(MSR_MC_STATUS(bank));
464 if (!(status & MC_STATUS_VAL))
467 /* Save exception information. */
468 rec->mr_status = status;
471 if (status & MC_STATUS_ADDRV)
472 rec->mr_addr = rdmsr(MSR_MC_ADDR(bank));
474 if (status & MC_STATUS_MISCV)
475 rec->mr_misc = rdmsr(MSR_MC_MISC(bank));
476 rec->mr_tsc = rdtsc();
477 rec->mr_apic_id = PCPU_GET(apic_id);
478 rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP);
479 rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS);
480 rec->mr_cpu_id = cpu_id;
481 rec->mr_cpu_vendor_id = cpu_vendor_id;
482 rec->mr_cpu = PCPU_GET(cpuid);
485 * Clear machine check. Don't do this for uncorrectable
486 * errors so that the BIOS can see them.
488 if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) {
489 wrmsr(MSR_MC_STATUS(bank), 0);
496 mca_fill_freelist(void)
498 struct mca_internal *rec;
502 * Ensure we have at least one record for each bank and one
505 desired = imax(mp_ncpus, mca_banks);
506 mtx_lock_spin(&mca_lock);
507 while (mca_freecount < desired) {
508 mtx_unlock_spin(&mca_lock);
509 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
510 mtx_lock_spin(&mca_lock);
511 STAILQ_INSERT_TAIL(&mca_freelist, rec, link);
514 mtx_unlock_spin(&mca_lock);
518 mca_refill(void *context, int pending)
525 mca_record_entry(enum scan_mode mode, const struct mca_record *record)
527 struct mca_internal *rec;
529 if (mode == POLLED) {
530 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
531 mtx_lock_spin(&mca_lock);
533 mtx_lock_spin(&mca_lock);
534 rec = STAILQ_FIRST(&mca_freelist);
536 printf("MCA: Unable to allocate space for an event.\n");
538 mtx_unlock_spin(&mca_lock);
541 STAILQ_REMOVE_HEAD(&mca_freelist, link);
547 STAILQ_INSERT_TAIL(&mca_records, rec, link);
549 mtx_unlock_spin(&mca_lock);
550 if (mode == CMCI && !cold)
551 taskqueue_enqueue(mca_tq, &mca_refill_task);
556 * Update the interrupt threshold for a CMCI. The strategy is to use
557 * a low trigger that interrupts as soon as the first event occurs.
558 * However, if a steady stream of events arrive, the threshold is
559 * increased until the interrupts are throttled to once every
560 * cmc_throttle seconds or the periodic scan. If a periodic scan
561 * finds that the threshold is too high, it is lowered.
564 update_threshold(enum scan_mode mode, int valid, int last_intr, int count,
565 int cur_threshold, int max_threshold)
570 delta = (u_int)(time_uptime - last_intr);
571 limit = cur_threshold;
574 * If an interrupt was received less than cmc_throttle seconds
575 * since the previous interrupt and the count from the current
576 * event is greater than or equal to the current threshold,
577 * double the threshold up to the max.
579 if (mode == CMCI && valid) {
580 if (delta < cmc_throttle && count >= limit &&
581 limit < max_threshold) {
582 limit = min(limit << 1, max_threshold);
588 * When the banks are polled, check to see if the threshold
594 /* If a CMCI occured recently, do nothing for now. */
595 if (delta < cmc_throttle)
599 * Compute a new limit based on the average rate of events per
600 * cmc_throttle seconds since the last interrupt.
603 limit = count * cmc_throttle / delta;
606 else if (limit > max_threshold)
607 limit = max_threshold;
615 cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec)
617 struct cmc_state *cc;
619 int cur_threshold, new_threshold;
622 /* Fetch the current limit for this bank. */
623 cc = &cmc_state[PCPU_GET(cpuid)][bank];
624 ctl = rdmsr(MSR_MC_CTL2(bank));
625 count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
626 cur_threshold = ctl & MC_CTL2_THRESHOLD;
628 new_threshold = update_threshold(mode, valid, cc->last_intr, count,
629 cur_threshold, cc->max_threshold);
631 if (mode == CMCI && valid)
632 cc->last_intr = time_uptime;
633 if (new_threshold != cur_threshold) {
634 ctl &= ~MC_CTL2_THRESHOLD;
635 ctl |= new_threshold;
636 wrmsr(MSR_MC_CTL2(bank), ctl);
641 amd_thresholding_update(enum scan_mode mode, int bank, int valid)
643 struct amd_et_state *cc;
648 KASSERT(bank == MC_AMDNB_BANK,
649 ("%s: unexpected bank %d", __func__, bank));
650 cc = &amd_et_state[PCPU_GET(cpuid)];
651 misc = rdmsr(MSR_MC_MISC(bank));
652 count = (misc & MC_MISC_AMD_CNT_MASK) >> MC_MISC_AMD_CNT_SHIFT;
653 count = count - (MC_MISC_AMD_CNT_MAX - cc->cur_threshold);
655 new_threshold = update_threshold(mode, valid, cc->last_intr, count,
656 cc->cur_threshold, MC_MISC_AMD_CNT_MAX);
658 cc->cur_threshold = new_threshold;
659 misc &= ~MC_MISC_AMD_CNT_MASK;
660 misc |= (uint64_t)(MC_MISC_AMD_CNT_MAX - cc->cur_threshold)
661 << MC_MISC_AMD_CNT_SHIFT;
662 misc &= ~MC_MISC_AMD_OVERFLOW;
663 wrmsr(MSR_MC_MISC(bank), misc);
664 if (mode == CMCI && valid)
665 cc->last_intr = time_uptime;
670 * This scans all the machine check banks of the current CPU to see if
671 * there are any machine checks. Any non-recoverable errors are
672 * reported immediately via mca_log(). The current thread must be
673 * pinned when this is called. The 'mode' parameter indicates if we
674 * are being called from the MC exception handler, the CMCI handler,
675 * or the periodic poller. In the MC exception case this function
676 * returns true if the system is restartable. Otherwise, it returns a
677 * count of the number of valid MC records found.
680 mca_scan(enum scan_mode mode, int *recoverablep)
682 struct mca_record rec;
683 uint64_t mcg_cap, ucmask;
684 int count, i, recoverable, valid;
688 ucmask = MC_STATUS_UC | MC_STATUS_PCC;
690 /* When handling a MCE#, treat the OVER flag as non-restartable. */
692 ucmask |= MC_STATUS_OVER;
693 mcg_cap = rdmsr(MSR_MCG_CAP);
694 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
697 * For a CMCI, only check banks this CPU is
700 if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i))
704 valid = mca_check_status(i, &rec);
707 if (rec.mr_status & ucmask) {
709 mtx_lock_spin(&mca_lock);
711 mtx_unlock_spin(&mca_lock);
713 mca_record_entry(mode, &rec);
718 * If this is a bank this CPU monitors via CMCI,
719 * update the threshold.
721 if (PCPU_GET(cmci_mask) & 1 << i) {
722 if (cmc_state != NULL)
723 cmci_update(mode, i, valid, &rec);
725 amd_thresholding_update(mode, i, valid);
731 if (recoverablep != NULL)
732 *recoverablep = recoverable;
737 * Scan the machine check banks on all CPUs by binding to each CPU in
738 * turn. If any of the CPUs contained new machine check records, log
739 * them to the console.
742 mca_scan_cpus(void *context, int pending)
744 struct mca_internal *mca;
755 count += mca_scan(POLLED, NULL);
761 mtx_lock_spin(&mca_lock);
762 STAILQ_FOREACH(mca, &mca_records, link) {
768 mtx_unlock_spin(&mca_lock);
773 mca_periodic_scan(void *arg)
776 taskqueue_enqueue(mca_tq, &mca_scan_task);
777 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
781 sysctl_mca_scan(SYSCTL_HANDLER_ARGS)
786 error = sysctl_handle_int(oidp, &i, 0, req);
790 taskqueue_enqueue(mca_tq, &mca_scan_task);
795 mca_createtq(void *dummy)
800 mca_tq = taskqueue_create_fast("mca", M_WAITOK,
801 taskqueue_thread_enqueue, &mca_tq);
802 taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq");
804 /* CMCIs during boot may have claimed items from the freelist. */
807 SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL);
810 mca_startup(void *dummy)
816 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
818 #ifdef EARLY_AP_STARTUP
819 SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL);
821 SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL);
830 cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA,
832 for (i = 0; i <= mp_maxid; i++)
833 cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks,
834 M_MCA, M_WAITOK | M_ZERO);
835 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
836 "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
837 &cmc_throttle, 0, sysctl_positive_int, "I",
838 "Interval in seconds to throttle corrected MC interrupts");
842 amd_thresholding_setup(void)
845 amd_et_state = malloc((mp_maxid + 1) * sizeof(struct amd_et_state),
846 M_MCA, M_WAITOK | M_ZERO);
847 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
848 "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
849 &cmc_throttle, 0, sysctl_positive_int, "I",
850 "Interval in seconds to throttle corrected MC interrupts");
855 mca_setup(uint64_t mcg_cap)
859 * On AMD Family 10h processors, unless logging of level one TLB
860 * parity (L1TP) errors is disabled, enable the recommended workaround
863 if (cpu_vendor_id == CPU_VENDOR_AMD &&
864 CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP)
865 workaround_erratum383 = 1;
867 mca_banks = mcg_cap & MCG_CAP_COUNT;
868 mtx_init(&mca_lock, "mca", NULL, MTX_SPIN);
869 STAILQ_INIT(&mca_records);
870 TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL);
871 callout_init(&mca_timer, 1);
872 STAILQ_INIT(&mca_freelist);
873 TASK_INIT(&mca_refill_task, 0, mca_refill, NULL);
875 SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
876 "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
878 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
879 "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks,
880 0, sysctl_positive_int, "I",
881 "Periodic interval in seconds to scan for machine checks");
882 SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
883 "records", CTLFLAG_RD, sysctl_mca_records, "Machine check records");
884 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
885 "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
886 sysctl_mca_scan, "I", "Force an immediate scan for machine checks");
888 if (cmci_supported(mcg_cap))
890 else if (amd_thresholding_supported())
891 amd_thresholding_setup();
897 * See if we should monitor CMCI for this bank. If CMCI_EN is already
898 * set in MC_CTL2, then another CPU is responsible for this bank, so
899 * ignore it. If CMCI_EN returns zero after being set, then this bank
900 * does not support CMCI_EN. If this CPU sets CMCI_EN, then it should
901 * now monitor this bank.
906 struct cmc_state *cc;
909 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
911 ctl = rdmsr(MSR_MC_CTL2(i));
912 if (ctl & MC_CTL2_CMCI_EN)
913 /* Already monitored by another CPU. */
916 /* Set the threshold to one event for now. */
917 ctl &= ~MC_CTL2_THRESHOLD;
918 ctl |= MC_CTL2_CMCI_EN | 1;
919 wrmsr(MSR_MC_CTL2(i), ctl);
920 ctl = rdmsr(MSR_MC_CTL2(i));
921 if (!(ctl & MC_CTL2_CMCI_EN))
922 /* This bank does not support CMCI. */
925 cc = &cmc_state[PCPU_GET(cpuid)][i];
927 /* Determine maximum threshold. */
928 ctl &= ~MC_CTL2_THRESHOLD;
930 wrmsr(MSR_MC_CTL2(i), ctl);
931 ctl = rdmsr(MSR_MC_CTL2(i));
932 cc->max_threshold = ctl & MC_CTL2_THRESHOLD;
934 /* Start off with a threshold of 1. */
935 ctl &= ~MC_CTL2_THRESHOLD;
937 wrmsr(MSR_MC_CTL2(i), ctl);
939 /* Mark this bank as monitored. */
940 PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
944 * For resume, reset the threshold for any banks we monitor back to
945 * one and throw away the timestamp of the last interrupt.
950 struct cmc_state *cc;
953 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
955 /* Ignore banks not monitored by this CPU. */
956 if (!(PCPU_GET(cmci_mask) & 1 << i))
959 cc = &cmc_state[PCPU_GET(cpuid)][i];
961 ctl = rdmsr(MSR_MC_CTL2(i));
962 ctl &= ~MC_CTL2_THRESHOLD;
963 ctl |= MC_CTL2_CMCI_EN | 1;
964 wrmsr(MSR_MC_CTL2(i), ctl);
968 amd_thresholding_start(struct amd_et_state *cc)
972 KASSERT(amd_elvt >= 0, ("ELVT offset is not set"));
973 misc = rdmsr(MSR_MC_MISC(MC_AMDNB_BANK));
974 misc &= ~MC_MISC_AMD_INT_MASK;
975 misc |= MC_MISC_AMD_INT_LVT;
976 misc &= ~MC_MISC_AMD_LVT_MASK;
977 misc |= (uint64_t)amd_elvt << MC_MISC_AMD_LVT_SHIFT;
978 misc &= ~MC_MISC_AMD_CNT_MASK;
979 misc |= (uint64_t)(MC_MISC_AMD_CNT_MAX - cc->cur_threshold)
980 << MC_MISC_AMD_CNT_SHIFT;
981 misc &= ~MC_MISC_AMD_OVERFLOW;
982 misc |= MC_MISC_AMD_CNTEN;
984 wrmsr(MSR_MC_MISC(MC_AMDNB_BANK), misc);
988 amd_thresholding_init(void)
990 struct amd_et_state *cc;
993 /* The counter must be valid and present. */
994 misc = rdmsr(MSR_MC_MISC(MC_AMDNB_BANK));
995 if ((misc & (MC_MISC_AMD_VAL | MC_MISC_AMD_CNTP)) !=
996 (MC_MISC_AMD_VAL | MC_MISC_AMD_CNTP)) {
997 printf("%s: 0x%jx: !valid | !present\n", __func__,
1002 /* The register should not be locked. */
1003 if ((misc & MC_MISC_AMD_LOCK) != 0) {
1004 printf("%s: 0x%jx: locked\n", __func__, (uintmax_t)misc);
1009 * If counter is enabled then either the firmware or another CPU
1010 * has already claimed it.
1012 if ((misc & MC_MISC_AMD_CNTEN) != 0) {
1013 printf("%s: 0x%jx: count already enabled\n", __func__,
1019 * Configure an Extended Interrupt LVT register for reporting
1020 * counter overflows if that feature is supported and the first
1021 * extended register is available.
1023 amd_elvt = lapic_enable_mca_elvt();
1025 printf("%s: lapic enable mca elvt failed: %d\n", __func__, amd_elvt);
1029 /* Re-use Intel CMC support infrastructure. */
1031 printf("%s: Starting AMD thresholding\n", __func__);
1033 cc = &amd_et_state[PCPU_GET(cpuid)];
1034 cc->cur_threshold = 1;
1035 amd_thresholding_start(cc);
1037 /* Mark the NB bank as monitored. */
1038 PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << MC_AMDNB_BANK);
1042 amd_thresholding_resume(void)
1044 struct amd_et_state *cc;
1046 /* Nothing to do if this CPU doesn't monitor the NB bank. */
1047 if ((PCPU_GET(cmci_mask) & 1 << MC_AMDNB_BANK) == 0)
1050 cc = &amd_et_state[PCPU_GET(cpuid)];
1052 cc->cur_threshold = 1;
1053 amd_thresholding_start(cc);
1058 * Initializes per-CPU machine check registers and enables corrected
1059 * machine check interrupts.
1068 /* MCE is required. */
1069 if (!mca_enabled || !(cpu_feature & CPUID_MCE))
1072 if (cpu_feature & CPUID_MCA) {
1074 PCPU_SET(cmci_mask, 0);
1076 mcg_cap = rdmsr(MSR_MCG_CAP);
1077 if (mcg_cap & MCG_CAP_CTL_P)
1078 /* Enable MCA features. */
1079 wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE);
1080 if (PCPU_GET(cpuid) == 0 && boot)
1084 * Disable logging of level one TLB parity (L1TP) errors by
1085 * the data cache as an alternative workaround for AMD Family
1086 * 10h Erratum 383. Unlike the recommended workaround, there
1087 * is no performance penalty to this workaround. However,
1088 * L1TP errors will go unreported.
1090 if (cpu_vendor_id == CPU_VENDOR_AMD &&
1091 CPUID_TO_FAMILY(cpu_id) == 0x10 && !amd10h_L1TP) {
1092 mask = rdmsr(MSR_MC0_CTL_MASK);
1093 if ((mask & (1UL << 5)) == 0)
1094 wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5));
1096 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
1097 /* By default enable logging of all errors. */
1098 ctl = 0xffffffffffffffffUL;
1101 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
1103 * For P6 models before Nehalem MC0_CTL is
1104 * always enabled and reserved.
1106 if (i == 0 && CPUID_TO_FAMILY(cpu_id) == 0x6
1107 && CPUID_TO_MODEL(cpu_id) < 0x1a)
1109 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
1110 /* BKDG for Family 10h: unset GartTblWkEn. */
1111 if (i == 4 && CPUID_TO_FAMILY(cpu_id) >= 0xf)
1112 ctl &= ~(1UL << 10);
1116 wrmsr(MSR_MC_CTL(i), ctl);
1119 if (cmci_supported(mcg_cap)) {
1127 /* Clear all errors. */
1128 wrmsr(MSR_MC_STATUS(i), 0);
1133 * AMD Processors from families 10h - 16h provide support
1134 * for Machine Check Error Thresholding.
1135 * The processors support counters of MC errors and they
1136 * can be configured to generate an interrupt when a counter
1138 * The counters are all associated with Bank 4 and each
1139 * of them covers a group of errors reported via that bank.
1140 * At the moment only the DRAM Error Threshold Group is
1143 if (amd_thresholding_supported() &&
1144 (mcg_cap & MCG_CAP_COUNT) >= 4) {
1146 amd_thresholding_init();
1148 amd_thresholding_resume();
1149 } else if (PCPU_GET(cmci_mask) != 0 && boot) {
1155 load_cr4(rcr4() | CR4_MCE);
1158 /* Must be executed on each CPU during boot. */
1166 /* Must be executed on each CPU during resume. */
1175 * The machine check registers for the BSP cannot be initialized until
1176 * the local APIC is initialized. This happens at SI_SUB_CPU,
1180 mca_init_bsp(void *arg __unused)
1185 SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL);
1187 /* Called when a machine check exception fires. */
1191 uint64_t mcg_status;
1192 int recoverable, count;
1194 if (!(cpu_feature & CPUID_MCA)) {
1196 * Just print the values of the old Pentium registers
1199 printf("MC Type: 0x%jx Address: 0x%jx\n",
1200 (uintmax_t)rdmsr(MSR_P5_MC_TYPE),
1201 (uintmax_t)rdmsr(MSR_P5_MC_ADDR));
1202 panic("Machine check");
1205 /* Scan the banks and check for any non-recoverable errors. */
1206 count = mca_scan(MCE, &recoverable);
1207 mcg_status = rdmsr(MSR_MCG_STATUS);
1208 if (!(mcg_status & MCG_STATUS_RIPV))
1213 * Only panic if the error was detected local to this CPU.
1214 * Some errors will assert a machine check on all CPUs, but
1215 * only certain CPUs will find a valid bank to log.
1220 panic("Unrecoverable machine check exception");
1224 wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
1228 /* Called for a CMCI (correctable machine check interrupt). */
1232 struct mca_internal *mca;
1236 * Serialize MCA bank scanning to prevent collisions from
1239 count = mca_scan(CMCI, NULL);
1241 /* If we found anything, log them to the console. */
1243 mtx_lock_spin(&mca_lock);
1244 STAILQ_FOREACH(mca, &mca_records, link) {
1250 mtx_unlock_spin(&mca_lock);