2 * Copyright (c) KATO Takenori, 1997, 1998.
4 * All rights reserved. Unpublished rights reserved under the copyright
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/sysctl.h>
40 #include <machine/cputypes.h>
41 #include <machine/md_var.h>
42 #include <machine/specialreg.h>
47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
48 #define CPU_ENABLE_SSE
51 void initializecpu(void);
52 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
53 void enable_K5_wt_alloc(void);
54 void enable_K6_wt_alloc(void);
55 void enable_K6_2_wt_alloc(void);
59 static void init_5x86(void);
60 static void init_bluelightning(void);
61 static void init_486dlc(void);
62 static void init_cy486dx(void);
63 #ifdef CPU_I486_ON_386
64 static void init_i486_on_386(void);
66 static void init_6x86(void);
70 static void init_6x86MX(void);
71 static void init_ppro(void);
72 static void init_mendocino(void);
75 static int hw_instruction_sse;
76 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
77 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
79 /* Must *NOT* be BSS or locore will bzero these after setting them */
80 int cpu = 0; /* Are we 386, 386sx, 486, etc? */
81 u_int cpu_feature = 0; /* Feature flags */
82 u_int cpu_feature2 = 0; /* Feature flags */
83 u_int amd_feature = 0; /* AMD feature flags */
84 u_int amd_feature2 = 0; /* AMD feature flags */
85 u_int via_feature_rng = 0; /* VIA RNG features */
86 u_int via_feature_xcrypt = 0; /* VIA ACE features */
87 u_int cpu_high = 0; /* Highest arg to CPUID */
88 u_int cpu_id = 0; /* Stepping ID */
89 u_int cpu_procinfo = 0; /* HyperThreading Info / Brand Index / CLFUSH */
90 u_int cpu_procinfo2 = 0; /* Multicore info */
91 char cpu_vendor[20] = ""; /* CPU Origin code */
93 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
94 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
95 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
96 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
99 u_int cpu_fxsr; /* SSE enabled */
100 u_int cpu_mxcsr_mask; /* valid bits in mxcsr */
108 init_bluelightning(void)
112 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
113 need_post_dma_flush = 1;
116 eflags = read_eflags();
119 load_cr0(rcr0() | CR0_CD | CR0_NW);
122 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
123 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
125 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
127 /* Enables 13MB and 0-640KB cache. */
128 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
129 #ifdef CPU_BLUELIGHTNING_3X
130 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
132 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
135 /* Enable caching in CR0. */
136 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
138 write_eflags(eflags);
142 * Cyrix 486SLC/DLC/SR/DR series
150 eflags = read_eflags();
154 ccr0 = read_cyrix_reg(CCR0);
155 #ifndef CYRIX_CACHE_WORKS
156 ccr0 |= CCR0_NC1 | CCR0_BARB;
157 write_cyrix_reg(CCR0, ccr0);
161 #ifndef CYRIX_CACHE_REALLY_WORKS
162 ccr0 |= CCR0_NC1 | CCR0_BARB;
166 #ifdef CPU_DIRECT_MAPPED_CACHE
167 ccr0 |= CCR0_CO; /* Direct mapped mode. */
169 write_cyrix_reg(CCR0, ccr0);
171 /* Clear non-cacheable region. */
172 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
173 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
174 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
175 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
177 write_cyrix_reg(0, 0); /* dummy write */
179 /* Enable caching in CR0. */
180 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
182 #endif /* !CYRIX_CACHE_WORKS */
183 write_eflags(eflags);
188 * Cyrix 486S/DX series
196 eflags = read_eflags();
200 ccr2 = read_cyrix_reg(CCR2);
202 ccr2 |= CCR2_SUSP_HLT;
206 /* Enables WB cache interface pin and Lock NW bit in CR0. */
207 ccr2 |= CCR2_WB | CCR2_LOCK_NW;
208 /* Unlock NW bit in CR0. */
209 write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
210 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
213 write_cyrix_reg(CCR2, ccr2);
214 write_eflags(eflags);
225 u_char ccr2, ccr3, ccr4, pcr0;
227 eflags = read_eflags();
230 load_cr0(rcr0() | CR0_CD | CR0_NW);
233 (void)read_cyrix_reg(CCR3); /* dummy */
235 /* Initialize CCR2. */
236 ccr2 = read_cyrix_reg(CCR2);
239 ccr2 |= CCR2_SUSP_HLT;
241 ccr2 &= ~CCR2_SUSP_HLT;
244 write_cyrix_reg(CCR2, ccr2);
246 /* Initialize CCR4. */
247 ccr3 = read_cyrix_reg(CCR3);
248 write_cyrix_reg(CCR3, CCR3_MAPEN0);
250 ccr4 = read_cyrix_reg(CCR4);
253 #ifdef CPU_FASTER_5X86_FPU
254 ccr4 |= CCR4_FASTFPE;
256 ccr4 &= ~CCR4_FASTFPE;
258 ccr4 &= ~CCR4_IOMASK;
259 /********************************************************************
260 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
261 * should be 0 for errata fix.
262 ********************************************************************/
264 ccr4 |= CPU_IORT & CCR4_IOMASK;
266 write_cyrix_reg(CCR4, ccr4);
268 /* Initialize PCR0. */
269 /****************************************************************
270 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
271 * BTB_EN might make your system unstable.
272 ****************************************************************/
273 pcr0 = read_cyrix_reg(PCR0);
290 /****************************************************************
291 * WARNING: if you use a memory mapped I/O device, don't use
292 * DISABLE_5X86_LSSER option, which may reorder memory mapped
294 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
295 ****************************************************************/
296 #ifdef CPU_DISABLE_5X86_LSSER
301 write_cyrix_reg(PCR0, pcr0);
304 write_cyrix_reg(CCR3, ccr3);
306 (void)read_cyrix_reg(0x80); /* dummy */
308 /* Unlock NW bit in CR0. */
309 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
310 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
311 /* Lock NW bit in CR0. */
312 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
314 write_eflags(eflags);
317 #ifdef CPU_I486_ON_386
319 * There are i486 based upgrade products for i386 machines.
320 * In this case, BIOS doesn't enables CPU cache.
323 init_i486_on_386(void)
327 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
328 need_post_dma_flush = 1;
331 eflags = read_eflags();
334 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
336 write_eflags(eflags);
343 * XXX - What should I do here? Please let me know.
351 eflags = read_eflags();
354 load_cr0(rcr0() | CR0_CD | CR0_NW);
357 /* Initialize CCR0. */
358 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
360 /* Initialize CCR1. */
361 #ifdef CPU_CYRIX_NO_LOCK
362 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
364 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
367 /* Initialize CCR2. */
369 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
371 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
374 ccr3 = read_cyrix_reg(CCR3);
375 write_cyrix_reg(CCR3, CCR3_MAPEN0);
377 /* Initialize CCR4. */
378 ccr4 = read_cyrix_reg(CCR4);
380 ccr4 &= ~CCR4_IOMASK;
382 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
384 write_cyrix_reg(CCR4, ccr4 | 7);
387 /* Initialize CCR5. */
389 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
393 write_cyrix_reg(CCR3, ccr3);
395 /* Unlock NW bit in CR0. */
396 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
399 * Earlier revision of the 6x86 CPU could crash the system if
400 * L1 cache is in write-back mode.
402 if ((cyrix_did & 0xff00) > 0x1600)
403 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
405 /* Revision 2.6 and lower. */
406 #ifdef CYRIX_CACHE_REALLY_WORKS
407 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
409 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
413 /* Lock NW bit in CR0. */
414 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
416 write_eflags(eflags);
418 #endif /* I486_CPU */
422 * Cyrix 6x86MX (code-named M2)
424 * XXX - What should I do here? Please let me know.
432 eflags = read_eflags();
435 load_cr0(rcr0() | CR0_CD | CR0_NW);
438 /* Initialize CCR0. */
439 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
441 /* Initialize CCR1. */
442 #ifdef CPU_CYRIX_NO_LOCK
443 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
445 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
448 /* Initialize CCR2. */
450 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
452 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
455 ccr3 = read_cyrix_reg(CCR3);
456 write_cyrix_reg(CCR3, CCR3_MAPEN0);
458 /* Initialize CCR4. */
459 ccr4 = read_cyrix_reg(CCR4);
460 ccr4 &= ~CCR4_IOMASK;
462 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
464 write_cyrix_reg(CCR4, ccr4 | 7);
467 /* Initialize CCR5. */
469 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
473 write_cyrix_reg(CCR3, ccr3);
475 /* Unlock NW bit in CR0. */
476 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
478 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
480 /* Lock NW bit in CR0. */
481 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
483 write_eflags(eflags);
492 * Local APIC should be disabled if it is not going to be used.
494 apicbase = rdmsr(MSR_APICBASE);
495 apicbase &= ~APICBASE_ENABLED;
496 wrmsr(MSR_APICBASE, apicbase);
500 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
506 #ifdef CPU_PPRO2CELERON
508 u_int64_t bbl_cr_ctl3;
510 eflags = read_eflags();
513 load_cr0(rcr0() | CR0_CD | CR0_NW);
516 bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
518 /* If the L2 cache is configured, do nothing. */
519 if (!(bbl_cr_ctl3 & 1)) {
520 bbl_cr_ctl3 = 0x134052bLL;
522 /* Set L2 Cache Latency (Default: 5). */
523 #ifdef CPU_CELERON_L2_LATENCY
524 #if CPU_L2_LATENCY > 15
525 #error invalid CPU_L2_LATENCY.
527 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
529 bbl_cr_ctl3 |= 5 << 1;
531 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
534 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
535 write_eflags(eflags);
536 #endif /* CPU_PPRO2CELERON */
540 * Initialize special VIA C3/C7 features
548 do_cpuid(0xc0000000, regs);
550 if (val >= 0xc0000001) {
551 do_cpuid(0xc0000001, regs);
556 /* Enable RNG if present and disabled */
557 if (val & VIA_CPUID_HAS_RNG) {
558 if (!(val & VIA_CPUID_DO_RNG)) {
559 msreg = rdmsr(0x110B);
561 wrmsr(0x110B, msreg);
563 via_feature_rng = VIA_HAS_RNG;
565 /* Enable AES engine if present and disabled */
566 if (val & VIA_CPUID_HAS_ACE) {
567 if (!(val & VIA_CPUID_DO_ACE)) {
568 msreg = rdmsr(0x1107);
569 msreg |= (0x01 << 28);
570 wrmsr(0x1107, msreg);
572 via_feature_xcrypt |= VIA_HAS_AES;
574 /* Enable ACE2 engine if present and disabled */
575 if (val & VIA_CPUID_HAS_ACE2) {
576 if (!(val & VIA_CPUID_DO_ACE2)) {
577 msreg = rdmsr(0x1107);
578 msreg |= (0x01 << 28);
579 wrmsr(0x1107, msreg);
581 via_feature_xcrypt |= VIA_HAS_AESCTR;
583 /* Enable SHA engine if present and disabled */
584 if (val & VIA_CPUID_HAS_PHE) {
585 if (!(val & VIA_CPUID_DO_PHE)) {
586 msreg = rdmsr(0x1107);
587 msreg |= (0x01 << 28/**/);
588 wrmsr(0x1107, msreg);
590 via_feature_xcrypt |= VIA_HAS_SHA;
592 /* Enable MM engine if present and disabled */
593 if (val & VIA_CPUID_HAS_PMM) {
594 if (!(val & VIA_CPUID_DO_PMM)) {
595 msreg = rdmsr(0x1107);
596 msreg |= (0x01 << 28/**/);
597 wrmsr(0x1107, msreg);
599 via_feature_xcrypt |= VIA_HAS_MM;
603 #endif /* I686_CPU */
606 * Initialize CR4 (Control register 4) to enable SSE instructions.
611 #if defined(CPU_ENABLE_SSE)
612 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
613 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
614 cpu_fxsr = hw_instruction_sse = 1;
626 init_bluelightning();
637 #ifdef CPU_I486_ON_386
645 #endif /* I486_CPU */
651 if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
652 switch (cpu_id & 0xff0) {
660 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
661 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
663 * Sometimes the BIOS doesn't enable SSE instructions.
664 * According to AMD document 20734, the mobile
665 * Duron, the (mobile) Athlon 4 and the Athlon MP
666 * support SSE. These correspond to cpu_id 0x66X
669 if ((cpu_feature & CPUID_XMM) == 0 &&
670 ((cpu_id & ~0xf) == 0x660 ||
671 (cpu_id & ~0xf) == 0x670 ||
672 (cpu_id & ~0xf) == 0x680)) {
674 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
676 cpu_feature = regs[3];
679 } else if (strcmp(cpu_vendor, "CentaurHauls") == 0) {
680 switch (cpu_id & 0xff0) {
682 if ((cpu_id & 0xf) < 3)
694 if ((amd_feature & AMDID_NX) != 0) {
697 msr = rdmsr(MSR_EFER) | EFER_NXE;
698 wrmsr(MSR_EFER, msr);
709 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
711 * OS should flush L1 cache by itself because no PC-98 supports
712 * non-Intel CPUs. Use wbinvd instruction before DMA transfer
713 * when need_pre_dma_flush = 1, use invd instruction after DMA
714 * transfer when need_post_dma_flush = 1. If your CPU upgrade
715 * product supports hardware cache control, you can add the
716 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
717 * This option eliminates unneeded cache flush instruction(s).
719 if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
723 need_post_dma_flush = 1;
726 need_pre_dma_flush = 1;
729 need_pre_dma_flush = 1;
730 #ifdef CPU_I486_ON_386
731 need_post_dma_flush = 1;
738 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
739 switch (cpu_id & 0xFF0) {
740 case 0x470: /* Enhanced Am486DX2 WB */
741 case 0x490: /* Enhanced Am486DX4 WB */
742 case 0x4F0: /* Am5x86 WB */
743 need_pre_dma_flush = 1;
746 } else if (strcmp(cpu_vendor, "IBM") == 0) {
747 need_post_dma_flush = 1;
749 #ifdef CPU_I486_ON_386
750 need_pre_dma_flush = 1;
753 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
756 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
758 * Enable write allocate feature of AMD processors.
759 * Following two functions require the Maxmem variable being set.
762 enable_K5_wt_alloc(void)
768 * Write allocate is supported only on models 1, 2, and 3, with
769 * a stepping of 4 or greater.
771 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
772 savecrit = intr_disable();
773 msr = rdmsr(0x83); /* HWCR */
774 wrmsr(0x83, msr & !(0x10));
777 * We have to tell the chip where the top of memory is,
778 * since video cards could have frame bufferes there,
779 * memory-mapped I/O could be there, etc.
785 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
787 if (!(inb(0x43b) & 4)) {
788 wrmsr(0x86, 0x0ff00f0);
789 msr |= AMD_WT_ALLOC_PRE;
793 * There is no way to know wheter 15-16M hole exists or not.
794 * Therefore, we disable write allocate for this range.
796 wrmsr(0x86, 0x0ff00f0);
797 msr |= AMD_WT_ALLOC_PRE;
802 wrmsr(0x83, msr|0x10); /* enable write allocate */
803 intr_restore(savecrit);
808 enable_K6_wt_alloc(void)
814 eflags = read_eflags();
818 #ifdef CPU_DISABLE_CACHE
820 * Certain K6-2 box becomes unstable when write allocation is
824 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
825 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
826 * All other bits in TR12 have no effect on the processer's operation.
827 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
830 wrmsr(0x0000000e, (u_int64_t)0x0008);
832 /* Don't assume that memory size is aligned with 4M. */
834 size = ((Maxmem >> 8) + 3) >> 2;
838 /* Limit is 508M bytes. */
841 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
843 #if defined(PC98) || defined(NO_MEMORY_HOLE)
844 if (whcr & (0x7fLL << 1)) {
847 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
850 if (!(inb(0x43b) & 4))
858 * There is no way to know wheter 15-16M hole exists or not.
859 * Therefore, we disable write allocate for this range.
863 wrmsr(0x0c0000082, whcr);
865 write_eflags(eflags);
869 enable_K6_2_wt_alloc(void)
875 eflags = read_eflags();
879 #ifdef CPU_DISABLE_CACHE
881 * Certain K6-2 box becomes unstable when write allocation is
885 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
886 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
887 * All other bits in TR12 have no effect on the processer's operation.
888 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
891 wrmsr(0x0000000e, (u_int64_t)0x0008);
893 /* Don't assume that memory size is aligned with 4M. */
895 size = ((Maxmem >> 8) + 3) >> 2;
899 /* Limit is 4092M bytes. */
902 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
904 #if defined(PC98) || defined(NO_MEMORY_HOLE)
905 if (whcr & (0x3ffLL << 22)) {
908 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
911 if (!(inb(0x43b) & 4))
912 whcr &= ~(1LL << 16);
919 * There is no way to know wheter 15-16M hole exists or not.
920 * Therefore, we disable write allocate for this range.
922 whcr &= ~(1LL << 16);
924 wrmsr(0x0c0000082, whcr);
926 write_eflags(eflags);
928 #endif /* I585_CPU && CPU_WT_ALLOC */
934 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
938 u_char ccr1, ccr2, ccr3;
939 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
942 if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
943 eflags = read_eflags();
947 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
948 ccr0 = read_cyrix_reg(CCR0);
950 ccr1 = read_cyrix_reg(CCR1);
951 ccr2 = read_cyrix_reg(CCR2);
952 ccr3 = read_cyrix_reg(CCR3);
953 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
954 write_cyrix_reg(CCR3, CCR3_MAPEN0);
955 ccr4 = read_cyrix_reg(CCR4);
956 if ((cpu == CPU_M1) || (cpu == CPU_M2))
957 ccr5 = read_cyrix_reg(CCR5);
959 pcr0 = read_cyrix_reg(PCR0);
960 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
962 write_eflags(eflags);
964 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
965 printf("CCR0=%x, ", (u_int)ccr0);
967 printf("CCR1=%x, CCR2=%x, CCR3=%x",
968 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
969 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
970 printf(", CCR4=%x, ", (u_int)ccr4);
972 printf("PCR0=%x\n", pcr0);
974 printf("CCR5=%x\n", ccr5);
977 printf("CR0=%x\n", cr0);