2 * Copyright (c) KATO Takenori, 1997, 1998.
4 * All rights reserved. Unpublished rights reserved under the copyright
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/sysctl.h>
40 #include <machine/cputypes.h>
41 #include <machine/md_var.h>
42 #include <machine/specialreg.h>
47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
48 #define CPU_ENABLE_SSE
51 void initializecpu(void);
52 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
53 void enable_K5_wt_alloc(void);
54 void enable_K6_wt_alloc(void);
55 void enable_K6_2_wt_alloc(void);
59 static void init_5x86(void);
60 static void init_bluelightning(void);
61 static void init_486dlc(void);
62 static void init_cy486dx(void);
63 #ifdef CPU_I486_ON_386
64 static void init_i486_on_386(void);
66 static void init_6x86(void);
70 static void init_6x86MX(void);
71 static void init_ppro(void);
72 static void init_mendocino(void);
75 static int hw_instruction_sse;
76 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
77 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
79 * -1: automatic (default)
80 * 0: keep enable CLFLUSH
81 * 1: force disable CLFLUSH
83 static int hw_clflush_disable = -1;
85 /* Must *NOT* be BSS or locore will bzero these after setting them */
86 int cpu = 0; /* Are we 386, 386sx, 486, etc? */
87 u_int cpu_feature = 0; /* Feature flags */
88 u_int cpu_feature2 = 0; /* Feature flags */
89 u_int amd_feature = 0; /* AMD feature flags */
90 u_int amd_feature2 = 0; /* AMD feature flags */
91 u_int amd_pminfo = 0; /* AMD advanced power management info */
92 u_int via_feature_rng = 0; /* VIA RNG features */
93 u_int via_feature_xcrypt = 0; /* VIA ACE features */
94 u_int cpu_high = 0; /* Highest arg to CPUID */
95 u_int cpu_id = 0; /* Stepping ID */
96 u_int cpu_procinfo = 0; /* HyperThreading Info / Brand Index / CLFUSH */
97 u_int cpu_procinfo2 = 0; /* Multicore info */
98 char cpu_vendor[20] = ""; /* CPU Origin code */
99 u_int cpu_vendor_id = 0; /* CPU vendor ID */
100 u_int cpu_clflush_line_size = 32;
102 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
103 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
104 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
105 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
107 #ifdef CPU_ENABLE_SSE
108 u_int cpu_fxsr; /* SSE enabled */
109 u_int cpu_mxcsr_mask; /* valid bits in mxcsr */
117 init_bluelightning(void)
121 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
122 need_post_dma_flush = 1;
125 saveintr = intr_disable();
127 load_cr0(rcr0() | CR0_CD | CR0_NW);
130 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
131 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
133 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
135 /* Enables 13MB and 0-640KB cache. */
136 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
137 #ifdef CPU_BLUELIGHTNING_3X
138 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
140 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
143 /* Enable caching in CR0. */
144 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
146 intr_restore(saveintr);
150 * Cyrix 486SLC/DLC/SR/DR series
158 saveintr = intr_disable();
161 ccr0 = read_cyrix_reg(CCR0);
162 #ifndef CYRIX_CACHE_WORKS
163 ccr0 |= CCR0_NC1 | CCR0_BARB;
164 write_cyrix_reg(CCR0, ccr0);
168 #ifndef CYRIX_CACHE_REALLY_WORKS
169 ccr0 |= CCR0_NC1 | CCR0_BARB;
173 #ifdef CPU_DIRECT_MAPPED_CACHE
174 ccr0 |= CCR0_CO; /* Direct mapped mode. */
176 write_cyrix_reg(CCR0, ccr0);
178 /* Clear non-cacheable region. */
179 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
180 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
181 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
182 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
184 write_cyrix_reg(0, 0); /* dummy write */
186 /* Enable caching in CR0. */
187 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
189 #endif /* !CYRIX_CACHE_WORKS */
190 intr_restore(saveintr);
195 * Cyrix 486S/DX series
203 saveintr = intr_disable();
206 ccr2 = read_cyrix_reg(CCR2);
208 ccr2 |= CCR2_SUSP_HLT;
212 /* Enables WB cache interface pin and Lock NW bit in CR0. */
213 ccr2 |= CCR2_WB | CCR2_LOCK_NW;
214 /* Unlock NW bit in CR0. */
215 write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
216 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
219 write_cyrix_reg(CCR2, ccr2);
220 intr_restore(saveintr);
231 u_char ccr2, ccr3, ccr4, pcr0;
233 saveintr = intr_disable();
235 load_cr0(rcr0() | CR0_CD | CR0_NW);
238 (void)read_cyrix_reg(CCR3); /* dummy */
240 /* Initialize CCR2. */
241 ccr2 = read_cyrix_reg(CCR2);
244 ccr2 |= CCR2_SUSP_HLT;
246 ccr2 &= ~CCR2_SUSP_HLT;
249 write_cyrix_reg(CCR2, ccr2);
251 /* Initialize CCR4. */
252 ccr3 = read_cyrix_reg(CCR3);
253 write_cyrix_reg(CCR3, CCR3_MAPEN0);
255 ccr4 = read_cyrix_reg(CCR4);
258 #ifdef CPU_FASTER_5X86_FPU
259 ccr4 |= CCR4_FASTFPE;
261 ccr4 &= ~CCR4_FASTFPE;
263 ccr4 &= ~CCR4_IOMASK;
264 /********************************************************************
265 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
266 * should be 0 for errata fix.
267 ********************************************************************/
269 ccr4 |= CPU_IORT & CCR4_IOMASK;
271 write_cyrix_reg(CCR4, ccr4);
273 /* Initialize PCR0. */
274 /****************************************************************
275 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
276 * BTB_EN might make your system unstable.
277 ****************************************************************/
278 pcr0 = read_cyrix_reg(PCR0);
295 /****************************************************************
296 * WARNING: if you use a memory mapped I/O device, don't use
297 * DISABLE_5X86_LSSER option, which may reorder memory mapped
299 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
300 ****************************************************************/
301 #ifdef CPU_DISABLE_5X86_LSSER
306 write_cyrix_reg(PCR0, pcr0);
309 write_cyrix_reg(CCR3, ccr3);
311 (void)read_cyrix_reg(0x80); /* dummy */
313 /* Unlock NW bit in CR0. */
314 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
315 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
316 /* Lock NW bit in CR0. */
317 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
319 intr_restore(saveintr);
322 #ifdef CPU_I486_ON_386
324 * There are i486 based upgrade products for i386 machines.
325 * In this case, BIOS doesn't enable CPU cache.
328 init_i486_on_386(void)
332 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
333 need_post_dma_flush = 1;
336 saveintr = intr_disable();
338 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
340 intr_restore(saveintr);
347 * XXX - What should I do here? Please let me know.
355 saveintr = intr_disable();
357 load_cr0(rcr0() | CR0_CD | CR0_NW);
360 /* Initialize CCR0. */
361 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
363 /* Initialize CCR1. */
364 #ifdef CPU_CYRIX_NO_LOCK
365 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
367 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
370 /* Initialize CCR2. */
372 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
374 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
377 ccr3 = read_cyrix_reg(CCR3);
378 write_cyrix_reg(CCR3, CCR3_MAPEN0);
380 /* Initialize CCR4. */
381 ccr4 = read_cyrix_reg(CCR4);
383 ccr4 &= ~CCR4_IOMASK;
385 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
387 write_cyrix_reg(CCR4, ccr4 | 7);
390 /* Initialize CCR5. */
392 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
396 write_cyrix_reg(CCR3, ccr3);
398 /* Unlock NW bit in CR0. */
399 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
402 * Earlier revision of the 6x86 CPU could crash the system if
403 * L1 cache is in write-back mode.
405 if ((cyrix_did & 0xff00) > 0x1600)
406 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
408 /* Revision 2.6 and lower. */
409 #ifdef CYRIX_CACHE_REALLY_WORKS
410 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
412 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
416 /* Lock NW bit in CR0. */
417 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
419 intr_restore(saveintr);
421 #endif /* I486_CPU */
425 * Cyrix 6x86MX (code-named M2)
427 * XXX - What should I do here? Please let me know.
435 saveintr = intr_disable();
437 load_cr0(rcr0() | CR0_CD | CR0_NW);
440 /* Initialize CCR0. */
441 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
443 /* Initialize CCR1. */
444 #ifdef CPU_CYRIX_NO_LOCK
445 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
447 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
450 /* Initialize CCR2. */
452 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
454 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
457 ccr3 = read_cyrix_reg(CCR3);
458 write_cyrix_reg(CCR3, CCR3_MAPEN0);
460 /* Initialize CCR4. */
461 ccr4 = read_cyrix_reg(CCR4);
462 ccr4 &= ~CCR4_IOMASK;
464 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
466 write_cyrix_reg(CCR4, ccr4 | 7);
469 /* Initialize CCR5. */
471 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
475 write_cyrix_reg(CCR3, ccr3);
477 /* Unlock NW bit in CR0. */
478 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
480 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
482 /* Lock NW bit in CR0. */
483 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
485 intr_restore(saveintr);
494 * Local APIC should be disabled if it is not going to be used.
496 apicbase = rdmsr(MSR_APICBASE);
497 apicbase &= ~APICBASE_ENABLED;
498 wrmsr(MSR_APICBASE, apicbase);
502 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
508 #ifdef CPU_PPRO2CELERON
510 u_int64_t bbl_cr_ctl3;
512 saveintr = intr_disable();
514 load_cr0(rcr0() | CR0_CD | CR0_NW);
517 bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
519 /* If the L2 cache is configured, do nothing. */
520 if (!(bbl_cr_ctl3 & 1)) {
521 bbl_cr_ctl3 = 0x134052bLL;
523 /* Set L2 Cache Latency (Default: 5). */
524 #ifdef CPU_CELERON_L2_LATENCY
525 #if CPU_L2_LATENCY > 15
526 #error invalid CPU_L2_LATENCY.
528 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
530 bbl_cr_ctl3 |= 5 << 1;
532 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
535 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
536 intr_restore(saveintr);
537 #endif /* CPU_PPRO2CELERON */
541 * Initialize special VIA C3/C7 features
549 do_cpuid(0xc0000000, regs);
551 if (val >= 0xc0000001) {
552 do_cpuid(0xc0000001, regs);
557 /* Enable RNG if present and disabled */
558 if (val & VIA_CPUID_HAS_RNG) {
559 if (!(val & VIA_CPUID_DO_RNG)) {
560 msreg = rdmsr(0x110B);
562 wrmsr(0x110B, msreg);
564 via_feature_rng = VIA_HAS_RNG;
566 /* Enable AES engine if present and disabled */
567 if (val & VIA_CPUID_HAS_ACE) {
568 if (!(val & VIA_CPUID_DO_ACE)) {
569 msreg = rdmsr(0x1107);
570 msreg |= (0x01 << 28);
571 wrmsr(0x1107, msreg);
573 via_feature_xcrypt |= VIA_HAS_AES;
575 /* Enable ACE2 engine if present and disabled */
576 if (val & VIA_CPUID_HAS_ACE2) {
577 if (!(val & VIA_CPUID_DO_ACE2)) {
578 msreg = rdmsr(0x1107);
579 msreg |= (0x01 << 28);
580 wrmsr(0x1107, msreg);
582 via_feature_xcrypt |= VIA_HAS_AESCTR;
584 /* Enable SHA engine if present and disabled */
585 if (val & VIA_CPUID_HAS_PHE) {
586 if (!(val & VIA_CPUID_DO_PHE)) {
587 msreg = rdmsr(0x1107);
588 msreg |= (0x01 << 28/**/);
589 wrmsr(0x1107, msreg);
591 via_feature_xcrypt |= VIA_HAS_SHA;
593 /* Enable MM engine if present and disabled */
594 if (val & VIA_CPUID_HAS_PMM) {
595 if (!(val & VIA_CPUID_DO_PMM)) {
596 msreg = rdmsr(0x1107);
597 msreg |= (0x01 << 28/**/);
598 wrmsr(0x1107, msreg);
600 via_feature_xcrypt |= VIA_HAS_MM;
604 #endif /* I686_CPU */
607 * Initialize CR4 (Control register 4) to enable SSE instructions.
612 #if defined(CPU_ENABLE_SSE)
613 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
614 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
615 cpu_fxsr = hw_instruction_sse = 1;
627 init_bluelightning();
638 #ifdef CPU_I486_ON_386
646 #endif /* I486_CPU */
652 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
653 switch (cpu_id & 0xff0) {
661 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
662 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
664 * Sometimes the BIOS doesn't enable SSE instructions.
665 * According to AMD document 20734, the mobile
666 * Duron, the (mobile) Athlon 4 and the Athlon MP
667 * support SSE. These correspond to cpu_id 0x66X
670 if ((cpu_feature & CPUID_XMM) == 0 &&
671 ((cpu_id & ~0xf) == 0x660 ||
672 (cpu_id & ~0xf) == 0x670 ||
673 (cpu_id & ~0xf) == 0x680)) {
675 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
677 cpu_feature = regs[3];
680 } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) {
681 switch (cpu_id & 0xff0) {
683 if ((cpu_id & 0xf) < 3)
696 if ((amd_feature & AMDID_NX) != 0) {
699 msr = rdmsr(MSR_EFER) | EFER_NXE;
700 wrmsr(MSR_EFER, msr);
712 * CPUID with %eax = 1, %ebx returns
713 * Bits 15-8: CLFLUSH line size
714 * (Value * 8 = cache line size in bytes)
716 if ((cpu_feature & CPUID_CLFSH) != 0)
717 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
719 * XXXKIB: (temporary) hack to work around traps generated
720 * when CLFLUSHing APIC register window under virtualization
721 * environments. These environments tend to disable the
722 * CPUID_SS feature even though the native CPU supports it.
724 TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
725 if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
726 cpu_feature &= ~CPUID_CLFSH;
728 * Allow to disable CLFLUSH feature manually by
729 * hw.clflush_disable tunable.
731 if (hw_clflush_disable == 1)
732 cpu_feature &= ~CPUID_CLFSH;
734 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
736 * OS should flush L1 cache by itself because no PC-98 supports
737 * non-Intel CPUs. Use wbinvd instruction before DMA transfer
738 * when need_pre_dma_flush = 1, use invd instruction after DMA
739 * transfer when need_post_dma_flush = 1. If your CPU upgrade
740 * product supports hardware cache control, you can add the
741 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
742 * This option eliminates unneeded cache flush instruction(s).
744 if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
748 need_post_dma_flush = 1;
751 need_pre_dma_flush = 1;
754 need_pre_dma_flush = 1;
755 #ifdef CPU_I486_ON_386
756 need_post_dma_flush = 1;
763 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
764 switch (cpu_id & 0xFF0) {
765 case 0x470: /* Enhanced Am486DX2 WB */
766 case 0x490: /* Enhanced Am486DX4 WB */
767 case 0x4F0: /* Am5x86 WB */
768 need_pre_dma_flush = 1;
771 } else if (cpu_vendor_id == CPU_VENDOR_IBM) {
772 need_post_dma_flush = 1;
774 #ifdef CPU_I486_ON_386
775 need_pre_dma_flush = 1;
778 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
781 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
783 * Enable write allocate feature of AMD processors.
784 * Following two functions require the Maxmem variable being set.
787 enable_K5_wt_alloc(void)
793 * Write allocate is supported only on models 1, 2, and 3, with
794 * a stepping of 4 or greater.
796 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
797 saveintr = intr_disable();
798 msr = rdmsr(0x83); /* HWCR */
799 wrmsr(0x83, msr & !(0x10));
802 * We have to tell the chip where the top of memory is,
803 * since video cards could have frame bufferes there,
804 * memory-mapped I/O could be there, etc.
810 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
812 if (!(inb(0x43b) & 4)) {
813 wrmsr(0x86, 0x0ff00f0);
814 msr |= AMD_WT_ALLOC_PRE;
818 * There is no way to know wheter 15-16M hole exists or not.
819 * Therefore, we disable write allocate for this range.
821 wrmsr(0x86, 0x0ff00f0);
822 msr |= AMD_WT_ALLOC_PRE;
827 wrmsr(0x83, msr|0x10); /* enable write allocate */
828 intr_restore(saveintr);
833 enable_K6_wt_alloc(void)
839 saveintr = intr_disable();
842 #ifdef CPU_DISABLE_CACHE
844 * Certain K6-2 box becomes unstable when write allocation is
848 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
849 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
850 * All other bits in TR12 have no effect on the processer's operation.
851 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
854 wrmsr(0x0000000e, (u_int64_t)0x0008);
856 /* Don't assume that memory size is aligned with 4M. */
858 size = ((Maxmem >> 8) + 3) >> 2;
862 /* Limit is 508M bytes. */
865 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
867 #if defined(PC98) || defined(NO_MEMORY_HOLE)
868 if (whcr & (0x7fLL << 1)) {
871 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
874 if (!(inb(0x43b) & 4))
882 * There is no way to know wheter 15-16M hole exists or not.
883 * Therefore, we disable write allocate for this range.
887 wrmsr(0x0c0000082, whcr);
889 intr_restore(saveintr);
893 enable_K6_2_wt_alloc(void)
899 saveintr = intr_disable();
902 #ifdef CPU_DISABLE_CACHE
904 * Certain K6-2 box becomes unstable when write allocation is
908 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
909 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
910 * All other bits in TR12 have no effect on the processer's operation.
911 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
914 wrmsr(0x0000000e, (u_int64_t)0x0008);
916 /* Don't assume that memory size is aligned with 4M. */
918 size = ((Maxmem >> 8) + 3) >> 2;
922 /* Limit is 4092M bytes. */
925 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
927 #if defined(PC98) || defined(NO_MEMORY_HOLE)
928 if (whcr & (0x3ffLL << 22)) {
931 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
934 if (!(inb(0x43b) & 4))
935 whcr &= ~(1LL << 16);
942 * There is no way to know wheter 15-16M hole exists or not.
943 * Therefore, we disable write allocate for this range.
945 whcr &= ~(1LL << 16);
947 wrmsr(0x0c0000082, whcr);
949 intr_restore(saveintr);
951 #endif /* I585_CPU && CPU_WT_ALLOC */
957 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
961 u_char ccr1, ccr2, ccr3;
962 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
965 if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
966 saveintr = intr_disable();
969 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
970 ccr0 = read_cyrix_reg(CCR0);
972 ccr1 = read_cyrix_reg(CCR1);
973 ccr2 = read_cyrix_reg(CCR2);
974 ccr3 = read_cyrix_reg(CCR3);
975 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
976 write_cyrix_reg(CCR3, CCR3_MAPEN0);
977 ccr4 = read_cyrix_reg(CCR4);
978 if ((cpu == CPU_M1) || (cpu == CPU_M2))
979 ccr5 = read_cyrix_reg(CCR5);
981 pcr0 = read_cyrix_reg(PCR0);
982 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
984 intr_restore(saveintr);
986 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
987 printf("CCR0=%x, ", (u_int)ccr0);
989 printf("CCR1=%x, CCR2=%x, CCR3=%x",
990 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
991 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
992 printf(", CCR4=%x, ", (u_int)ccr4);
994 printf("PCR0=%x\n", pcr0);
996 printf("CCR5=%x\n", ccr5);
999 printf("CR0=%x\n", cr0);