2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009 Hudson River Trading LLC
5 * Written by: John H. Baldwin <jhb@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Support for x86 machine check architecture.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
43 #include <sys/param.h>
45 #include <sys/interrupt.h>
46 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
51 #include <sys/sched.h>
53 #include <sys/sysctl.h>
54 #include <sys/systm.h>
55 #include <sys/taskqueue.h>
56 #include <machine/intr_machdep.h>
57 #include <x86/apicvar.h>
58 #include <machine/cpu.h>
59 #include <machine/cputypes.h>
61 #include <machine/md_var.h>
62 #include <machine/specialreg.h>
64 /* Modes for mca_scan() */
73 * State maintained for each monitored MCx bank to control the
74 * corrected machine check interrupt threshold.
88 struct mca_record rec;
89 STAILQ_ENTRY(mca_internal) link;
92 struct mca_enumerator_ops {
93 unsigned int (*ctl)(int);
94 unsigned int (*status)(int);
95 unsigned int (*addr)(int);
96 unsigned int (*misc)(int);
99 static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture");
101 static volatile int mca_count; /* Number of records stored. */
102 static int mca_banks; /* Number of per-CPU register banks. */
103 static int mca_maxcount = -1; /* Limit on records stored. (-1 = unlimited) */
105 static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
106 "Machine Check Architecture");
108 static int mca_enabled = 1;
109 SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0,
110 "Administrative toggle for machine check support");
112 static int amd10h_L1TP = 1;
113 SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0,
114 "Administrative toggle for logging of level one TLB parity (L1TP) errors");
116 static int intel6h_HSD131;
117 SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0,
118 "Administrative toggle for logging of spurious corrected errors");
120 int workaround_erratum383;
121 SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RDTUN,
122 &workaround_erratum383, 0,
123 "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
125 static STAILQ_HEAD(, mca_internal) mca_freelist;
126 static int mca_freecount;
127 static STAILQ_HEAD(, mca_internal) mca_records;
128 static STAILQ_HEAD(, mca_internal) mca_pending;
129 static struct callout mca_timer;
130 static int mca_ticks = 3600; /* Check hourly by default. */
131 static struct taskqueue *mca_tq;
132 static struct task mca_resize_task, mca_scan_task;
133 static struct mtx mca_lock;
136 mca_ia32_ctl_reg(int bank)
138 return (MSR_MC_CTL(bank));
142 mca_ia32_status_reg(int bank)
144 return (MSR_MC_STATUS(bank));
148 mca_ia32_addr_reg(int bank)
150 return (MSR_MC_ADDR(bank));
154 mca_ia32_misc_reg(int bank)
156 return (MSR_MC_MISC(bank));
160 mca_smca_ctl_reg(int bank)
162 return (MSR_SMCA_MC_CTL(bank));
166 mca_smca_status_reg(int bank)
168 return (MSR_SMCA_MC_STATUS(bank));
172 mca_smca_addr_reg(int bank)
174 return (MSR_SMCA_MC_ADDR(bank));
178 mca_smca_misc_reg(int bank)
180 return (MSR_SMCA_MC_MISC(bank));
183 static struct mca_enumerator_ops mca_msr_ops = {
184 .ctl = mca_ia32_ctl_reg,
185 .status = mca_ia32_status_reg,
186 .addr = mca_ia32_addr_reg,
187 .misc = mca_ia32_misc_reg
191 static struct cmc_state **cmc_state; /* Indexed by cpuid, bank. */
192 static struct amd_et_state **amd_et_state; /* Indexed by cpuid, bank. */
193 static int cmc_throttle = 60; /* Time in seconds to throttle CMCI. */
195 static int amd_elvt = -1;
198 amd_thresholding_supported(void)
200 if (cpu_vendor_id != CPU_VENDOR_AMD &&
201 cpu_vendor_id != CPU_VENDOR_HYGON)
204 * The RASCap register is wholly reserved in families 0x10-0x15 (through model 1F).
206 * It begins to be documented in family 0x15 model 30 and family 0x16,
207 * but neither of these families documents the ScalableMca bit, which
208 * supposedly defines the presence of this feature on family 0x17.
210 if (CPUID_TO_FAMILY(cpu_id) >= 0x10 && CPUID_TO_FAMILY(cpu_id) <= 0x16)
212 if (CPUID_TO_FAMILY(cpu_id) >= 0x17)
213 return ((amd_rascap & AMDRAS_SCALABLE_MCA) != 0);
219 cmci_supported(uint64_t mcg_cap)
222 * MCG_CAP_CMCI_P bit is reserved in AMD documentation. Until
223 * it is defined, do not use it to check for CMCI support.
225 if (cpu_vendor_id != CPU_VENDOR_INTEL)
227 return ((mcg_cap & MCG_CAP_CMCI_P) != 0);
231 sysctl_positive_int(SYSCTL_HANDLER_ARGS)
235 value = *(int *)arg1;
236 error = sysctl_handle_int(oidp, &value, 0, req);
237 if (error || req->newptr == NULL)
241 *(int *)arg1 = value;
246 sysctl_mca_records(SYSCTL_HANDLER_ARGS)
248 int *name = (int *)arg1;
249 u_int namelen = arg2;
250 struct mca_record record;
251 struct mca_internal *rec;
257 if (name[0] < 0 || name[0] >= mca_count)
260 mtx_lock_spin(&mca_lock);
261 if (name[0] >= mca_count) {
262 mtx_unlock_spin(&mca_lock);
266 STAILQ_FOREACH(rec, &mca_records, link) {
273 mtx_unlock_spin(&mca_lock);
274 return (SYSCTL_OUT(req, &record, sizeof(record)));
278 mca_error_ttype(uint16_t mca_error)
281 switch ((mca_error & 0x000c) >> 2) {
293 mca_error_level(uint16_t mca_error)
296 switch (mca_error & 0x0003) {
310 mca_error_request(uint16_t mca_error)
313 switch ((mca_error & 0x00f0) >> 4) {
337 mca_error_mmtype(uint16_t mca_error)
340 switch ((mca_error & 0x70) >> 4) {
356 mca_mute(const struct mca_record *rec)
360 * Skip spurious corrected parity errors generated by Intel Haswell-
361 * and Broadwell-based CPUs (see HSD131, HSM142, HSW131 and BDM48
362 * erratum respectively), unless reporting is enabled.
363 * Note that these errors also have been observed with the D0-stepping
364 * of Haswell, while at least initially the CPU specification updates
365 * suggested only the C0-stepping to be affected. Similarly, Celeron
366 * 2955U with a CPU ID of 0x45 apparently are also concerned with the
367 * same problem, with HSM142 only referring to 0x3c and 0x46.
369 if (cpu_vendor_id == CPU_VENDOR_INTEL &&
370 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
371 (CPUID_TO_MODEL(cpu_id) == 0x3c || /* HSD131, HSM142, HSW131 */
372 CPUID_TO_MODEL(cpu_id) == 0x3d || /* BDM48 */
373 CPUID_TO_MODEL(cpu_id) == 0x45 ||
374 CPUID_TO_MODEL(cpu_id) == 0x46) && /* HSM142 */
376 (rec->mr_status & 0xa0000000ffffffff) == 0x80000000000f0005 &&
383 /* Dump details about a single machine check. */
385 mca_log(const struct mca_record *rec)
392 printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
393 (long long)rec->mr_status);
394 printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
395 (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status);
396 printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor,
397 rec->mr_cpu_id, rec->mr_apic_id);
398 printf("MCA: CPU %d ", rec->mr_cpu);
399 if (rec->mr_status & MC_STATUS_UC)
403 if (cmci_supported(rec->mr_mcg_cap))
404 printf("(%lld) ", ((long long)rec->mr_status &
405 MC_STATUS_COR_COUNT) >> 38);
407 if (rec->mr_status & MC_STATUS_PCC)
409 if (rec->mr_status & MC_STATUS_OVER)
411 mca_error = rec->mr_status & MC_STATUS_MCA_ERROR;
413 /* Simple error codes. */
418 printf("unclassified error");
421 printf("ucode ROM parity error");
424 printf("external error");
430 printf("internal parity error");
433 printf("internal timer error");
436 if ((mca_error & 0xfc00) == 0x0400) {
437 printf("internal error %x", mca_error & 0x03ff);
441 /* Compound error codes. */
443 /* Memory hierarchy error. */
444 if ((mca_error & 0xeffc) == 0x000c) {
445 printf("%s memory error", mca_error_level(mca_error));
450 if ((mca_error & 0xeff0) == 0x0010) {
451 printf("%sTLB %s error", mca_error_ttype(mca_error),
452 mca_error_level(mca_error));
456 /* Memory controller error. */
457 if ((mca_error & 0xef80) == 0x0080) {
458 printf("%s channel ", mca_error_mmtype(mca_error));
459 if ((mca_error & 0x000f) != 0x000f)
460 printf("%d", mca_error & 0x000f);
463 printf(" memory error");
468 if ((mca_error & 0xef00) == 0x0100) {
469 printf("%sCACHE %s %s error",
470 mca_error_ttype(mca_error),
471 mca_error_level(mca_error),
472 mca_error_request(mca_error));
476 /* Bus and/or Interconnect error. */
477 if ((mca_error & 0xe800) == 0x0800) {
478 printf("BUS%s ", mca_error_level(mca_error));
479 switch ((mca_error & 0x0600) >> 9) {
493 printf(" %s ", mca_error_request(mca_error));
494 switch ((mca_error & 0x000c) >> 2) {
508 if (mca_error & 0x0100)
509 printf(" timed out");
513 printf("unknown error %x", mca_error);
517 if (rec->mr_status & MC_STATUS_ADDRV)
518 printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr);
519 if (rec->mr_status & MC_STATUS_MISCV)
520 printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
524 mca_check_status(int bank, struct mca_record *rec)
529 status = rdmsr(mca_msr_ops.status(bank));
530 if (!(status & MC_STATUS_VAL))
533 /* Save exception information. */
534 rec->mr_status = status;
537 if (status & MC_STATUS_ADDRV)
538 rec->mr_addr = rdmsr(mca_msr_ops.addr(bank));
540 if (status & MC_STATUS_MISCV)
541 rec->mr_misc = rdmsr(mca_msr_ops.misc(bank));
542 rec->mr_tsc = rdtsc();
543 rec->mr_apic_id = PCPU_GET(apic_id);
544 rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP);
545 rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS);
546 rec->mr_cpu_id = cpu_id;
547 rec->mr_cpu_vendor_id = cpu_vendor_id;
548 rec->mr_cpu = PCPU_GET(cpuid);
551 * Clear machine check. Don't do this for uncorrectable
552 * errors so that the BIOS can see them.
554 if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) {
555 wrmsr(mca_msr_ops.status(bank), 0);
562 mca_resize_freelist(void)
564 struct mca_internal *next, *rec;
565 STAILQ_HEAD(, mca_internal) tmplist;
566 int count, i, desired_max, desired_min;
569 * Ensure we have at least one record for each bank and one
570 * record per CPU, but no more than twice that amount.
572 desired_min = imax(mp_ncpus, mca_banks);
573 desired_max = imax(mp_ncpus, mca_banks) * 2;
574 STAILQ_INIT(&tmplist);
575 mtx_lock_spin(&mca_lock);
576 while (mca_freecount > desired_max) {
577 rec = STAILQ_FIRST(&mca_freelist);
578 KASSERT(rec != NULL, ("mca_freecount is %d, but list is empty",
580 STAILQ_REMOVE_HEAD(&mca_freelist, link);
582 STAILQ_INSERT_TAIL(&tmplist, rec, link);
584 while (mca_freecount < desired_min) {
585 count = desired_min - mca_freecount;
586 mtx_unlock_spin(&mca_lock);
587 for (i = 0; i < count; i++) {
588 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
589 STAILQ_INSERT_TAIL(&tmplist, rec, link);
591 mtx_lock_spin(&mca_lock);
592 STAILQ_CONCAT(&mca_freelist, &tmplist);
593 mca_freecount += count;
595 mtx_unlock_spin(&mca_lock);
596 STAILQ_FOREACH_SAFE(rec, &tmplist, link, next)
601 mca_resize(void *context, int pending)
604 mca_resize_freelist();
608 mca_record_entry(enum scan_mode mode, const struct mca_record *record)
610 struct mca_internal *rec;
612 if (mode == POLLED) {
613 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
614 mtx_lock_spin(&mca_lock);
616 mtx_lock_spin(&mca_lock);
617 rec = STAILQ_FIRST(&mca_freelist);
619 printf("MCA: Unable to allocate space for an event.\n");
621 mtx_unlock_spin(&mca_lock);
624 STAILQ_REMOVE_HEAD(&mca_freelist, link);
629 STAILQ_INSERT_TAIL(&mca_pending, rec, link);
630 mtx_unlock_spin(&mca_lock);
635 * Update the interrupt threshold for a CMCI. The strategy is to use
636 * a low trigger that interrupts as soon as the first event occurs.
637 * However, if a steady stream of events arrive, the threshold is
638 * increased until the interrupts are throttled to once every
639 * cmc_throttle seconds or the periodic scan. If a periodic scan
640 * finds that the threshold is too high, it is lowered.
643 update_threshold(enum scan_mode mode, int valid, int last_intr, int count,
644 int cur_threshold, int max_threshold)
649 delta = (u_int)(time_uptime - last_intr);
650 limit = cur_threshold;
653 * If an interrupt was received less than cmc_throttle seconds
654 * since the previous interrupt and the count from the current
655 * event is greater than or equal to the current threshold,
656 * double the threshold up to the max.
658 if (mode == CMCI && valid) {
659 if (delta < cmc_throttle && count >= limit &&
660 limit < max_threshold) {
661 limit = min(limit << 1, max_threshold);
667 * When the banks are polled, check to see if the threshold
673 /* If a CMCI occured recently, do nothing for now. */
674 if (delta < cmc_throttle)
678 * Compute a new limit based on the average rate of events per
679 * cmc_throttle seconds since the last interrupt.
682 limit = count * cmc_throttle / delta;
685 else if (limit > max_threshold)
686 limit = max_threshold;
694 cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec)
696 struct cmc_state *cc;
698 int cur_threshold, new_threshold;
701 /* Fetch the current limit for this bank. */
702 cc = &cmc_state[PCPU_GET(cpuid)][bank];
703 ctl = rdmsr(MSR_MC_CTL2(bank));
704 count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
705 cur_threshold = ctl & MC_CTL2_THRESHOLD;
707 new_threshold = update_threshold(mode, valid, cc->last_intr, count,
708 cur_threshold, cc->max_threshold);
710 if (mode == CMCI && valid)
711 cc->last_intr = time_uptime;
712 if (new_threshold != cur_threshold) {
713 ctl &= ~MC_CTL2_THRESHOLD;
714 ctl |= new_threshold;
715 wrmsr(MSR_MC_CTL2(bank), ctl);
720 amd_thresholding_update(enum scan_mode mode, int bank, int valid)
722 struct amd_et_state *cc;
727 cc = &amd_et_state[PCPU_GET(cpuid)][bank];
728 misc = rdmsr(mca_msr_ops.misc(bank));
729 count = (misc & MC_MISC_AMD_CNT_MASK) >> MC_MISC_AMD_CNT_SHIFT;
730 count = count - (MC_MISC_AMD_CNT_MAX - cc->cur_threshold);
732 new_threshold = update_threshold(mode, valid, cc->last_intr, count,
733 cc->cur_threshold, MC_MISC_AMD_CNT_MAX);
735 cc->cur_threshold = new_threshold;
736 misc &= ~MC_MISC_AMD_CNT_MASK;
737 misc |= (uint64_t)(MC_MISC_AMD_CNT_MAX - cc->cur_threshold)
738 << MC_MISC_AMD_CNT_SHIFT;
739 misc &= ~MC_MISC_AMD_OVERFLOW;
740 wrmsr(mca_msr_ops.misc(bank), misc);
741 if (mode == CMCI && valid)
742 cc->last_intr = time_uptime;
747 * This scans all the machine check banks of the current CPU to see if
748 * there are any machine checks. Any non-recoverable errors are
749 * reported immediately via mca_log(). The current thread must be
750 * pinned when this is called. The 'mode' parameter indicates if we
751 * are being called from the MC exception handler, the CMCI handler,
752 * or the periodic poller. In the MC exception case this function
753 * returns true if the system is restartable. Otherwise, it returns a
754 * count of the number of valid MC records found.
757 mca_scan(enum scan_mode mode, int *recoverablep)
759 struct mca_record rec;
760 uint64_t mcg_cap, ucmask;
761 int count, i, recoverable, valid;
765 ucmask = MC_STATUS_UC | MC_STATUS_PCC;
767 /* When handling a MCE#, treat the OVER flag as non-restartable. */
769 ucmask |= MC_STATUS_OVER;
770 mcg_cap = rdmsr(MSR_MCG_CAP);
771 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
774 * For a CMCI, only check banks this CPU is
777 if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i))
781 valid = mca_check_status(i, &rec);
784 if (rec.mr_status & ucmask) {
786 mtx_lock_spin(&mca_lock);
788 mtx_unlock_spin(&mca_lock);
790 mca_record_entry(mode, &rec);
795 * If this is a bank this CPU monitors via CMCI,
796 * update the threshold.
798 if (PCPU_GET(cmci_mask) & 1 << i) {
799 if (cmc_state != NULL)
800 cmci_update(mode, i, valid, &rec);
802 amd_thresholding_update(mode, i, valid);
806 if (recoverablep != NULL)
807 *recoverablep = recoverable;
812 * Store a new record on the mca_records list while enforcing
816 mca_store_record(struct mca_internal *mca)
820 * If we are storing no records (mca_maxcount == 0),
821 * we just free this record.
823 * If we are storing records (mca_maxcount != 0) and
824 * we have free space on the list, store the record
825 * and increment mca_count.
827 * If we are storing records and we do not have free
828 * space on the list, store the new record at the
829 * tail and free the oldest one from the head.
831 if (mca_maxcount != 0)
832 STAILQ_INSERT_TAIL(&mca_records, mca, link);
833 if (mca_maxcount < 0 || mca_count < mca_maxcount)
836 if (mca_maxcount != 0) {
837 mca = STAILQ_FIRST(&mca_records);
838 STAILQ_REMOVE_HEAD(&mca_records, link);
840 STAILQ_INSERT_TAIL(&mca_freelist, mca, link);
846 * Do the work to process machine check records which have just been
847 * gathered. Print any pending logs to the console. Queue them for storage.
848 * Trigger a resizing of the free list.
851 mca_process_records(enum scan_mode mode)
853 struct mca_internal *mca;
855 mtx_lock_spin(&mca_lock);
856 while ((mca = STAILQ_FIRST(&mca_pending)) != NULL) {
857 STAILQ_REMOVE_HEAD(&mca_pending, link);
859 mca_store_record(mca);
861 mtx_unlock_spin(&mca_lock);
863 mca_resize_freelist();
865 taskqueue_enqueue(mca_tq, &mca_resize_task);
869 * Scan the machine check banks on all CPUs by binding to each CPU in
870 * turn. If any of the CPUs contained new machine check records, log
871 * them to the console.
874 mca_scan_cpus(void *context, int pending)
879 mca_resize_freelist();
886 count += mca_scan(POLLED, NULL);
892 mca_process_records(POLLED);
896 mca_periodic_scan(void *arg)
899 taskqueue_enqueue(mca_tq, &mca_scan_task);
900 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
904 sysctl_mca_scan(SYSCTL_HANDLER_ARGS)
909 error = sysctl_handle_int(oidp, &i, 0, req);
913 taskqueue_enqueue(mca_tq, &mca_scan_task);
918 sysctl_mca_maxcount(SYSCTL_HANDLER_ARGS)
920 struct mca_internal *mca;
925 error = sysctl_handle_int(oidp, &i, 0, req);
926 if (error || req->newptr == NULL)
928 mtx_lock_spin(&mca_lock);
931 if (mca_maxcount >= 0)
932 while (mca_count > mca_maxcount) {
933 mca = STAILQ_FIRST(&mca_records);
934 STAILQ_REMOVE_HEAD(&mca_records, link);
936 STAILQ_INSERT_TAIL(&mca_freelist, mca, link);
940 mtx_unlock_spin(&mca_lock);
941 if (doresize && !cold)
942 taskqueue_enqueue(mca_tq, &mca_resize_task);
947 mca_createtq(void *dummy)
952 mca_tq = taskqueue_create_fast("mca", M_WAITOK,
953 taskqueue_thread_enqueue, &mca_tq);
954 taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq");
956 /* CMCIs during boot may have claimed items from the freelist. */
957 mca_resize_freelist();
959 SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL);
962 mca_startup(void *dummy)
968 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
970 #ifdef EARLY_AP_STARTUP
971 SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL);
973 SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL);
982 cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA,
984 for (i = 0; i <= mp_maxid; i++)
985 cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks,
986 M_MCA, M_WAITOK | M_ZERO);
987 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
988 "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
989 &cmc_throttle, 0, sysctl_positive_int, "I",
990 "Interval in seconds to throttle corrected MC interrupts");
994 amd_thresholding_setup(void)
998 amd_et_state = malloc((mp_maxid + 1) * sizeof(struct amd_et_state *),
1000 for (i = 0; i <= mp_maxid; i++)
1001 amd_et_state[i] = malloc(sizeof(struct amd_et_state) *
1002 mca_banks, M_MCA, M_WAITOK | M_ZERO);
1003 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
1004 "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1005 &cmc_throttle, 0, sysctl_positive_int, "I",
1006 "Interval in seconds to throttle corrected MC interrupts");
1011 mca_setup(uint64_t mcg_cap)
1015 * On AMD Family 10h processors, unless logging of level one TLB
1016 * parity (L1TP) errors is disabled, enable the recommended workaround
1019 if (cpu_vendor_id == CPU_VENDOR_AMD &&
1020 CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP)
1021 workaround_erratum383 = 1;
1023 mca_banks = mcg_cap & MCG_CAP_COUNT;
1024 mtx_init(&mca_lock, "mca", NULL, MTX_SPIN);
1025 STAILQ_INIT(&mca_records);
1026 STAILQ_INIT(&mca_pending);
1027 TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL);
1028 callout_init(&mca_timer, 1);
1029 STAILQ_INIT(&mca_freelist);
1030 TASK_INIT(&mca_resize_task, 0, mca_resize, NULL);
1031 mca_resize_freelist();
1032 SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
1033 "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
1035 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
1036 "maxcount", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
1037 &mca_maxcount, 0, sysctl_mca_maxcount, "I",
1038 "Maximum record count (-1 is unlimited)");
1039 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
1040 "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks,
1041 0, sysctl_positive_int, "I",
1042 "Periodic interval in seconds to scan for machine checks");
1043 SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
1044 "records", CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mca_records,
1045 "Machine check records");
1046 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
1047 "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1048 sysctl_mca_scan, "I", "Force an immediate scan for machine checks");
1050 if (cmci_supported(mcg_cap))
1052 else if (amd_thresholding_supported())
1053 amd_thresholding_setup();
1059 * See if we should monitor CMCI for this bank. If CMCI_EN is already
1060 * set in MC_CTL2, then another CPU is responsible for this bank, so
1061 * ignore it. If CMCI_EN returns zero after being set, then this bank
1062 * does not support CMCI_EN. If this CPU sets CMCI_EN, then it should
1063 * now monitor this bank.
1068 struct cmc_state *cc;
1071 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
1073 ctl = rdmsr(MSR_MC_CTL2(i));
1074 if (ctl & MC_CTL2_CMCI_EN)
1075 /* Already monitored by another CPU. */
1078 /* Set the threshold to one event for now. */
1079 ctl &= ~MC_CTL2_THRESHOLD;
1080 ctl |= MC_CTL2_CMCI_EN | 1;
1081 wrmsr(MSR_MC_CTL2(i), ctl);
1082 ctl = rdmsr(MSR_MC_CTL2(i));
1083 if (!(ctl & MC_CTL2_CMCI_EN))
1084 /* This bank does not support CMCI. */
1087 cc = &cmc_state[PCPU_GET(cpuid)][i];
1089 /* Determine maximum threshold. */
1090 ctl &= ~MC_CTL2_THRESHOLD;
1092 wrmsr(MSR_MC_CTL2(i), ctl);
1093 ctl = rdmsr(MSR_MC_CTL2(i));
1094 cc->max_threshold = ctl & MC_CTL2_THRESHOLD;
1096 /* Start off with a threshold of 1. */
1097 ctl &= ~MC_CTL2_THRESHOLD;
1099 wrmsr(MSR_MC_CTL2(i), ctl);
1101 /* Mark this bank as monitored. */
1102 PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
1106 * For resume, reset the threshold for any banks we monitor back to
1107 * one and throw away the timestamp of the last interrupt.
1112 struct cmc_state *cc;
1115 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
1117 /* Ignore banks not monitored by this CPU. */
1118 if (!(PCPU_GET(cmci_mask) & 1 << i))
1121 cc = &cmc_state[PCPU_GET(cpuid)][i];
1123 ctl = rdmsr(MSR_MC_CTL2(i));
1124 ctl &= ~MC_CTL2_THRESHOLD;
1125 ctl |= MC_CTL2_CMCI_EN | 1;
1126 wrmsr(MSR_MC_CTL2(i), ctl);
1130 * Apply an AMD ET configuration to the corresponding MSR.
1133 amd_thresholding_start(struct amd_et_state *cc, int bank)
1137 KASSERT(amd_elvt >= 0, ("ELVT offset is not set"));
1139 misc = rdmsr(mca_msr_ops.misc(bank));
1141 misc &= ~MC_MISC_AMD_INT_MASK;
1142 misc |= MC_MISC_AMD_INT_LVT;
1144 misc &= ~MC_MISC_AMD_LVT_MASK;
1145 misc |= (uint64_t)amd_elvt << MC_MISC_AMD_LVT_SHIFT;
1147 misc &= ~MC_MISC_AMD_CNT_MASK;
1148 misc |= (uint64_t)(MC_MISC_AMD_CNT_MAX - cc->cur_threshold)
1149 << MC_MISC_AMD_CNT_SHIFT;
1151 misc &= ~MC_MISC_AMD_OVERFLOW;
1152 misc |= MC_MISC_AMD_CNTEN;
1154 wrmsr(mca_msr_ops.misc(bank), misc);
1158 amd_thresholding_monitor(int i)
1160 struct amd_et_state *cc;
1164 * Kludge: On 10h, banks after 4 are not thresholding but also may have
1165 * bogus Valid bits. Skip them. This is definitely fixed in 15h, but
1166 * I have not investigated whether it is fixed in earlier models.
1168 if (CPUID_TO_FAMILY(cpu_id) < 0x15 && i >= 5)
1171 /* The counter must be valid and present. */
1172 misc = rdmsr(mca_msr_ops.misc(i));
1173 if ((misc & (MC_MISC_AMD_VAL | MC_MISC_AMD_CNTP)) !=
1174 (MC_MISC_AMD_VAL | MC_MISC_AMD_CNTP))
1177 /* The register should not be locked. */
1178 if ((misc & MC_MISC_AMD_LOCK) != 0) {
1180 printf("%s: 0x%jx: Bank %d: locked\n", __func__,
1181 (uintmax_t)misc, i);
1186 * If counter is enabled then either the firmware or another CPU
1187 * has already claimed it.
1189 if ((misc & MC_MISC_AMD_CNTEN) != 0) {
1191 printf("%s: 0x%jx: Bank %d: already enabled\n",
1192 __func__, (uintmax_t)misc, i);
1197 * Configure an Extended Interrupt LVT register for reporting
1198 * counter overflows if that feature is supported and the first
1199 * extended register is available.
1201 amd_elvt = lapic_enable_mca_elvt();
1203 printf("%s: Bank %d: lapic enable mca elvt failed: %d\n",
1204 __func__, i, amd_elvt);
1208 /* Re-use Intel CMC support infrastructure. */
1210 printf("%s: Starting AMD thresholding on bank %d\n", __func__,
1213 cc = &amd_et_state[PCPU_GET(cpuid)][i];
1214 cc->cur_threshold = 1;
1215 amd_thresholding_start(cc, i);
1217 /* Mark this bank as monitored. */
1218 PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
1222 amd_thresholding_resume(int i)
1224 struct amd_et_state *cc;
1226 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
1228 /* Ignore banks not monitored by this CPU. */
1229 if (!(PCPU_GET(cmci_mask) & 1 << i))
1232 cc = &amd_et_state[PCPU_GET(cpuid)][i];
1234 cc->cur_threshold = 1;
1235 amd_thresholding_start(cc, i);
1240 * Initializes per-CPU machine check registers and enables corrected
1241 * machine check interrupts.
1248 int i, skip, family;
1250 family = CPUID_TO_FAMILY(cpu_id);
1252 /* MCE is required. */
1253 if (!mca_enabled || !(cpu_feature & CPUID_MCE))
1256 if (cpu_feature & CPUID_MCA) {
1258 PCPU_SET(cmci_mask, 0);
1260 mcg_cap = rdmsr(MSR_MCG_CAP);
1261 if (mcg_cap & MCG_CAP_CTL_P)
1262 /* Enable MCA features. */
1263 wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE);
1264 if (IS_BSP() && boot)
1268 * Disable logging of level one TLB parity (L1TP) errors by
1269 * the data cache as an alternative workaround for AMD Family
1270 * 10h Erratum 383. Unlike the recommended workaround, there
1271 * is no performance penalty to this workaround. However,
1272 * L1TP errors will go unreported.
1274 if (cpu_vendor_id == CPU_VENDOR_AMD && family == 0x10 &&
1276 mask = rdmsr(MSR_MC0_CTL_MASK);
1277 if ((mask & (1UL << 5)) == 0)
1278 wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5));
1280 if (amd_rascap & AMDRAS_SCALABLE_MCA) {
1281 mca_msr_ops.ctl = mca_smca_ctl_reg;
1282 mca_msr_ops.status = mca_smca_status_reg;
1283 mca_msr_ops.addr = mca_smca_addr_reg;
1284 mca_msr_ops.misc = mca_smca_misc_reg;
1288 * The cmci_monitor() must not be executed
1289 * simultaneously by several CPUs.
1292 mtx_lock_spin(&mca_lock);
1294 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
1295 /* By default enable logging of all errors. */
1296 ctl = 0xffffffffffffffffUL;
1299 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
1301 * For P6 models before Nehalem MC0_CTL is
1302 * always enabled and reserved.
1304 if (i == 0 && family == 0x6
1305 && CPUID_TO_MODEL(cpu_id) < 0x1a)
1307 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
1308 /* BKDG for Family 10h: unset GartTblWkEn. */
1309 if (i == MC_AMDNB_BANK && family >= 0xf &&
1311 ctl &= ~(1UL << 10);
1315 wrmsr(mca_msr_ops.ctl(i), ctl);
1318 if (cmci_supported(mcg_cap)) {
1323 } else if (amd_thresholding_supported()) {
1325 amd_thresholding_monitor(i);
1327 amd_thresholding_resume(i);
1331 /* Clear all errors. */
1332 wrmsr(mca_msr_ops.status(i), 0);
1335 mtx_unlock_spin(&mca_lock);
1338 if (!amd_thresholding_supported() &&
1339 PCPU_GET(cmci_mask) != 0 && boot)
1344 load_cr4(rcr4() | CR4_MCE);
1347 /* Must be executed on each CPU during boot. */
1355 /* Must be executed on each CPU during resume. */
1364 * The machine check registers for the BSP cannot be initialized until
1365 * the local APIC is initialized. This happens at SI_SUB_CPU,
1369 mca_init_bsp(void *arg __unused)
1374 SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL);
1376 /* Called when a machine check exception fires. */
1380 uint64_t mcg_status;
1381 int recoverable, count;
1383 if (!(cpu_feature & CPUID_MCA)) {
1385 * Just print the values of the old Pentium registers
1388 printf("MC Type: 0x%jx Address: 0x%jx\n",
1389 (uintmax_t)rdmsr(MSR_P5_MC_TYPE),
1390 (uintmax_t)rdmsr(MSR_P5_MC_ADDR));
1391 panic("Machine check");
1394 /* Scan the banks and check for any non-recoverable errors. */
1395 count = mca_scan(MCE, &recoverable);
1396 mcg_status = rdmsr(MSR_MCG_STATUS);
1397 if (!(mcg_status & MCG_STATUS_RIPV))
1402 * Only panic if the error was detected local to this CPU.
1403 * Some errors will assert a machine check on all CPUs, but
1404 * only certain CPUs will find a valid bank to log.
1409 panic("Unrecoverable machine check exception");
1413 wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
1417 /* Called for a CMCI (correctable machine check interrupt). */
1423 * Serialize MCA bank scanning to prevent collisions from
1426 * If we found anything, log them to the console.
1428 if (mca_scan(CMCI, NULL) != 0)
1429 mca_process_records(CMCI);