]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/initcpu.c
Partial merge of the SPDX changes
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / initcpu.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) KATO Takenori, 1997, 1998.
5  * 
6  * All rights reserved.  Unpublished rights reserved under the copyright
7  * laws of Japan.
8  * 
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer as
15  *    the first lines of this file unmodified.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_cpu.h"
36
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41
42 #include <machine/cputypes.h>
43 #include <machine/md_var.h>
44 #include <machine/specialreg.h>
45
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48
49 #ifdef I486_CPU
50 static void init_5x86(void);
51 static void init_bluelightning(void);
52 static void init_486dlc(void);
53 static void init_cy486dx(void);
54 #ifdef CPU_I486_ON_386
55 static void init_i486_on_386(void);
56 #endif
57 static void init_6x86(void);
58 #endif /* I486_CPU */
59
60 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
61 static void     enable_K5_wt_alloc(void);
62 static void     enable_K6_wt_alloc(void);
63 static void     enable_K6_2_wt_alloc(void);
64 #endif
65
66 #ifdef I686_CPU
67 static void     init_6x86MX(void);
68 static void     init_ppro(void);
69 static void     init_mendocino(void);
70 #endif
71
72 static int      hw_instruction_sse;
73 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
74     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
75 /*
76  * -1: automatic (default)
77  *  0: keep enable CLFLUSH
78  *  1: force disable CLFLUSH
79  */
80 static int      hw_clflush_disable = -1;
81
82 u_int   cyrix_did;              /* Device ID of Cyrix CPU */
83
84 #ifdef I486_CPU
85 /*
86  * IBM Blue Lightning
87  */
88 static void
89 init_bluelightning(void)
90 {
91         register_t saveintr;
92
93 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
94         need_post_dma_flush = 1;
95 #endif
96
97         saveintr = intr_disable();
98
99         load_cr0(rcr0() | CR0_CD | CR0_NW);
100         invd();
101
102 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
103         wrmsr(0x1000, 0x9c92LL);        /* FP operand can be cacheable on Cyrix FPU */
104 #else
105         wrmsr(0x1000, 0x1c92LL);        /* Intel FPU */
106 #endif
107         /* Enables 13MB and 0-640KB cache. */
108         wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
109 #ifdef CPU_BLUELIGHTNING_3X
110         wrmsr(0x1002, 0x04000000LL);    /* Enables triple-clock mode. */
111 #else
112         wrmsr(0x1002, 0x03000000LL);    /* Enables double-clock mode. */
113 #endif
114
115         /* Enable caching in CR0. */
116         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
117         invd();
118         intr_restore(saveintr);
119 }
120
121 /*
122  * Cyrix 486SLC/DLC/SR/DR series
123  */
124 static void
125 init_486dlc(void)
126 {
127         register_t saveintr;
128         u_char  ccr0;
129
130         saveintr = intr_disable();
131         invd();
132
133         ccr0 = read_cyrix_reg(CCR0);
134 #ifndef CYRIX_CACHE_WORKS
135         ccr0 |= CCR0_NC1 | CCR0_BARB;
136         write_cyrix_reg(CCR0, ccr0);
137         invd();
138 #else
139         ccr0 &= ~CCR0_NC0;
140 #ifndef CYRIX_CACHE_REALLY_WORKS
141         ccr0 |= CCR0_NC1 | CCR0_BARB;
142 #else
143         ccr0 |= CCR0_NC1;
144 #endif
145 #ifdef CPU_DIRECT_MAPPED_CACHE
146         ccr0 |= CCR0_CO;                        /* Direct mapped mode. */
147 #endif
148         write_cyrix_reg(CCR0, ccr0);
149
150         /* Clear non-cacheable region. */
151         write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
152         write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
153         write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
154         write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
155
156         write_cyrix_reg(0, 0);  /* dummy write */
157
158         /* Enable caching in CR0. */
159         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
160         invd();
161 #endif /* !CYRIX_CACHE_WORKS */
162         intr_restore(saveintr);
163 }
164
165
166 /*
167  * Cyrix 486S/DX series
168  */
169 static void
170 init_cy486dx(void)
171 {
172         register_t saveintr;
173         u_char  ccr2;
174
175         saveintr = intr_disable();
176         invd();
177
178         ccr2 = read_cyrix_reg(CCR2);
179 #ifdef CPU_SUSP_HLT
180         ccr2 |= CCR2_SUSP_HLT;
181 #endif
182
183 #ifdef PC98
184         /* Enables WB cache interface pin and Lock NW bit in CR0. */
185         ccr2 |= CCR2_WB | CCR2_LOCK_NW;
186         /* Unlock NW bit in CR0. */
187         write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
188         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
189 #endif
190
191         write_cyrix_reg(CCR2, ccr2);
192         intr_restore(saveintr);
193 }
194
195
196 /*
197  * Cyrix 5x86
198  */
199 static void
200 init_5x86(void)
201 {
202         register_t saveintr;
203         u_char  ccr2, ccr3, ccr4, pcr0;
204
205         saveintr = intr_disable();
206
207         load_cr0(rcr0() | CR0_CD | CR0_NW);
208         wbinvd();
209
210         (void)read_cyrix_reg(CCR3);             /* dummy */
211
212         /* Initialize CCR2. */
213         ccr2 = read_cyrix_reg(CCR2);
214         ccr2 |= CCR2_WB;
215 #ifdef CPU_SUSP_HLT
216         ccr2 |= CCR2_SUSP_HLT;
217 #else
218         ccr2 &= ~CCR2_SUSP_HLT;
219 #endif
220         ccr2 |= CCR2_WT1;
221         write_cyrix_reg(CCR2, ccr2);
222
223         /* Initialize CCR4. */
224         ccr3 = read_cyrix_reg(CCR3);
225         write_cyrix_reg(CCR3, CCR3_MAPEN0);
226
227         ccr4 = read_cyrix_reg(CCR4);
228         ccr4 |= CCR4_DTE;
229         ccr4 |= CCR4_MEM;
230 #ifdef CPU_FASTER_5X86_FPU
231         ccr4 |= CCR4_FASTFPE;
232 #else
233         ccr4 &= ~CCR4_FASTFPE;
234 #endif
235         ccr4 &= ~CCR4_IOMASK;
236         /********************************************************************
237          * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
238          * should be 0 for errata fix.
239          ********************************************************************/
240 #ifdef CPU_IORT
241         ccr4 |= CPU_IORT & CCR4_IOMASK;
242 #endif
243         write_cyrix_reg(CCR4, ccr4);
244
245         /* Initialize PCR0. */
246         /****************************************************************
247          * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
248          * BTB_EN might make your system unstable.
249          ****************************************************************/
250         pcr0 = read_cyrix_reg(PCR0);
251 #ifdef CPU_RSTK_EN
252         pcr0 |= PCR0_RSTK;
253 #else
254         pcr0 &= ~PCR0_RSTK;
255 #endif
256 #ifdef CPU_BTB_EN
257         pcr0 |= PCR0_BTB;
258 #else
259         pcr0 &= ~PCR0_BTB;
260 #endif
261 #ifdef CPU_LOOP_EN
262         pcr0 |= PCR0_LOOP;
263 #else
264         pcr0 &= ~PCR0_LOOP;
265 #endif
266
267         /****************************************************************
268          * WARNING: if you use a memory mapped I/O device, don't use
269          * DISABLE_5X86_LSSER option, which may reorder memory mapped
270          * I/O access.
271          * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
272          ****************************************************************/
273 #ifdef CPU_DISABLE_5X86_LSSER
274         pcr0 &= ~PCR0_LSSER;
275 #else
276         pcr0 |= PCR0_LSSER;
277 #endif
278         write_cyrix_reg(PCR0, pcr0);
279
280         /* Restore CCR3. */
281         write_cyrix_reg(CCR3, ccr3);
282
283         (void)read_cyrix_reg(0x80);             /* dummy */
284
285         /* Unlock NW bit in CR0. */
286         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
287         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
288         /* Lock NW bit in CR0. */
289         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
290
291         intr_restore(saveintr);
292 }
293
294 #ifdef CPU_I486_ON_386
295 /*
296  * There are i486 based upgrade products for i386 machines.
297  * In this case, BIOS doesn't enable CPU cache.
298  */
299 static void
300 init_i486_on_386(void)
301 {
302         register_t saveintr;
303
304 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
305         need_post_dma_flush = 1;
306 #endif
307
308         saveintr = intr_disable();
309
310         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0, NW = 0 */
311
312         intr_restore(saveintr);
313 }
314 #endif
315
316 /*
317  * Cyrix 6x86
318  *
319  * XXX - What should I do here?  Please let me know.
320  */
321 static void
322 init_6x86(void)
323 {
324         register_t saveintr;
325         u_char  ccr3, ccr4;
326
327         saveintr = intr_disable();
328
329         load_cr0(rcr0() | CR0_CD | CR0_NW);
330         wbinvd();
331
332         /* Initialize CCR0. */
333         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
334
335         /* Initialize CCR1. */
336 #ifdef CPU_CYRIX_NO_LOCK
337         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
338 #else
339         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
340 #endif
341
342         /* Initialize CCR2. */
343 #ifdef CPU_SUSP_HLT
344         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
345 #else
346         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
347 #endif
348
349         ccr3 = read_cyrix_reg(CCR3);
350         write_cyrix_reg(CCR3, CCR3_MAPEN0);
351
352         /* Initialize CCR4. */
353         ccr4 = read_cyrix_reg(CCR4);
354         ccr4 |= CCR4_DTE;
355         ccr4 &= ~CCR4_IOMASK;
356 #ifdef CPU_IORT
357         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
358 #else
359         write_cyrix_reg(CCR4, ccr4 | 7);
360 #endif
361
362         /* Initialize CCR5. */
363 #ifdef CPU_WT_ALLOC
364         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
365 #endif
366
367         /* Restore CCR3. */
368         write_cyrix_reg(CCR3, ccr3);
369
370         /* Unlock NW bit in CR0. */
371         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
372
373         /*
374          * Earlier revision of the 6x86 CPU could crash the system if
375          * L1 cache is in write-back mode.
376          */
377         if ((cyrix_did & 0xff00) > 0x1600)
378                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
379         else {
380                 /* Revision 2.6 and lower. */
381 #ifdef CYRIX_CACHE_REALLY_WORKS
382                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
383 #else
384                 load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0 and NW = 1 */
385 #endif
386         }
387
388         /* Lock NW bit in CR0. */
389         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
390
391         intr_restore(saveintr);
392 }
393 #endif /* I486_CPU */
394
395 #ifdef I586_CPU
396 /*
397  * Rise mP6
398  */
399 static void
400 init_rise(void)
401 {
402
403         /*
404          * The CMPXCHG8B instruction is always available but hidden.
405          */
406         cpu_feature |= CPUID_CX8;
407 }
408
409 /*
410  * IDT WinChip C6/2/2A/2B/3
411  *
412  * http://www.centtech.com/winchip_bios_writers_guide_v4_0.pdf
413  */
414 static void
415 init_winchip(void)
416 {
417         u_int regs[4];
418         uint64_t fcr;
419
420         fcr = rdmsr(0x0107);
421
422         /*
423          * Set ECX8, DSMC, DTLOCK/EDCTLB, EMMX, and ERETSTK and clear DPDC.
424          */
425         fcr |= (1 << 1) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 16);
426         fcr &= ~(1ULL << 11);
427
428         /*
429          * Additionally, set EBRPRED, E2MMX and EAMD3D for WinChip 2 and 3.
430          */
431         if (CPUID_TO_MODEL(cpu_id) >= 8)
432                 fcr |= (1 << 12) | (1 << 19) | (1 << 20);
433
434         wrmsr(0x0107, fcr);
435         do_cpuid(1, regs);
436         cpu_feature = regs[3];
437 }
438 #endif
439
440 #ifdef I686_CPU
441 /*
442  * Cyrix 6x86MX (code-named M2)
443  *
444  * XXX - What should I do here?  Please let me know.
445  */
446 static void
447 init_6x86MX(void)
448 {
449         register_t saveintr;
450         u_char  ccr3, ccr4;
451
452         saveintr = intr_disable();
453
454         load_cr0(rcr0() | CR0_CD | CR0_NW);
455         wbinvd();
456
457         /* Initialize CCR0. */
458         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
459
460         /* Initialize CCR1. */
461 #ifdef CPU_CYRIX_NO_LOCK
462         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
463 #else
464         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
465 #endif
466
467         /* Initialize CCR2. */
468 #ifdef CPU_SUSP_HLT
469         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
470 #else
471         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
472 #endif
473
474         ccr3 = read_cyrix_reg(CCR3);
475         write_cyrix_reg(CCR3, CCR3_MAPEN0);
476
477         /* Initialize CCR4. */
478         ccr4 = read_cyrix_reg(CCR4);
479         ccr4 &= ~CCR4_IOMASK;
480 #ifdef CPU_IORT
481         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
482 #else
483         write_cyrix_reg(CCR4, ccr4 | 7);
484 #endif
485
486         /* Initialize CCR5. */
487 #ifdef CPU_WT_ALLOC
488         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
489 #endif
490
491         /* Restore CCR3. */
492         write_cyrix_reg(CCR3, ccr3);
493
494         /* Unlock NW bit in CR0. */
495         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
496
497         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
498
499         /* Lock NW bit in CR0. */
500         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
501
502         intr_restore(saveintr);
503 }
504
505 static int ppro_apic_used = -1;
506
507 static void
508 init_ppro(void)
509 {
510         u_int64_t       apicbase;
511
512         /*
513          * Local APIC should be disabled if it is not going to be used.
514          */
515         if (ppro_apic_used != 1) {
516                 apicbase = rdmsr(MSR_APICBASE);
517                 apicbase &= ~APICBASE_ENABLED;
518                 wrmsr(MSR_APICBASE, apicbase);
519                 ppro_apic_used = 0;
520         }
521 }
522
523 /*
524  * If the local APIC is going to be used after being disabled above,
525  * re-enable it and don't disable it in the future.
526  */
527 void
528 ppro_reenable_apic(void)
529 {
530         u_int64_t       apicbase;
531
532         if (ppro_apic_used == 0) {
533                 apicbase = rdmsr(MSR_APICBASE);
534                 apicbase |= APICBASE_ENABLED;
535                 wrmsr(MSR_APICBASE, apicbase);
536                 ppro_apic_used = 1;
537         }
538 }
539
540 /*
541  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
542  * L2 cache).
543  */
544 static void
545 init_mendocino(void)
546 {
547 #ifdef CPU_PPRO2CELERON
548         register_t      saveintr;
549         u_int64_t       bbl_cr_ctl3;
550
551         saveintr = intr_disable();
552
553         load_cr0(rcr0() | CR0_CD | CR0_NW);
554         wbinvd();
555
556         bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
557
558         /* If the L2 cache is configured, do nothing. */
559         if (!(bbl_cr_ctl3 & 1)) {
560                 bbl_cr_ctl3 = 0x134052bLL;
561
562                 /* Set L2 Cache Latency (Default: 5). */
563 #ifdef  CPU_CELERON_L2_LATENCY
564 #if CPU_L2_LATENCY > 15
565 #error invalid CPU_L2_LATENCY.
566 #endif
567                 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
568 #else
569                 bbl_cr_ctl3 |= 5 << 1;
570 #endif
571                 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
572         }
573
574         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
575         intr_restore(saveintr);
576 #endif /* CPU_PPRO2CELERON */
577 }
578
579 /*
580  * Initialize special VIA features
581  */
582 static void
583 init_via(void)
584 {
585         u_int regs[4], val;
586         uint64_t fcr;
587
588         /*
589          * Explicitly enable CX8 and PGE on C3.
590          *
591          * http://www.via.com.tw/download/mainboards/6/13/VIA_C3_EBGA%20datasheet110.pdf
592          */
593         if (CPUID_TO_MODEL(cpu_id) <= 9)
594                 fcr = (1 << 1) | (1 << 7);
595         else
596                 fcr = 0;
597
598         /*
599          * Check extended CPUID for PadLock features.
600          *
601          * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
602          */
603         do_cpuid(0xc0000000, regs);
604         if (regs[0] >= 0xc0000001) {
605                 do_cpuid(0xc0000001, regs);
606                 val = regs[3];
607         } else
608                 val = 0;
609
610         /* Enable RNG if present. */
611         if ((val & VIA_CPUID_HAS_RNG) != 0) {
612                 via_feature_rng = VIA_HAS_RNG;
613                 wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
614         }
615
616         /* Enable PadLock if present. */
617         if ((val & VIA_CPUID_HAS_ACE) != 0)
618                 via_feature_xcrypt |= VIA_HAS_AES;
619         if ((val & VIA_CPUID_HAS_ACE2) != 0)
620                 via_feature_xcrypt |= VIA_HAS_AESCTR;
621         if ((val & VIA_CPUID_HAS_PHE) != 0)
622                 via_feature_xcrypt |= VIA_HAS_SHA;
623         if ((val & VIA_CPUID_HAS_PMM) != 0)
624                 via_feature_xcrypt |= VIA_HAS_MM;
625         if (via_feature_xcrypt != 0)
626                 fcr |= 1 << 28;
627
628         wrmsr(0x1107, rdmsr(0x1107) | fcr);
629 }
630
631 #endif /* I686_CPU */
632
633 #if defined(I586_CPU) || defined(I686_CPU)
634 static void
635 init_transmeta(void)
636 {
637         u_int regs[0];
638
639         /* Expose all hidden features. */
640         wrmsr(0x80860004, rdmsr(0x80860004) | ~0UL);
641         do_cpuid(1, regs);
642         cpu_feature = regs[3];
643 }
644 #endif
645
646 extern int elf32_nxstack;
647
648 void
649 initializecpu(void)
650 {
651
652         switch (cpu) {
653 #ifdef I486_CPU
654         case CPU_BLUE:
655                 init_bluelightning();
656                 break;
657         case CPU_486DLC:
658                 init_486dlc();
659                 break;
660         case CPU_CY486DX:
661                 init_cy486dx();
662                 break;
663         case CPU_M1SC:
664                 init_5x86();
665                 break;
666 #ifdef CPU_I486_ON_386
667         case CPU_486:
668                 init_i486_on_386();
669                 break;
670 #endif
671         case CPU_M1:
672                 init_6x86();
673                 break;
674 #endif /* I486_CPU */
675 #ifdef I586_CPU
676         case CPU_586:
677                 switch (cpu_vendor_id) {
678                 case CPU_VENDOR_AMD:
679 #ifdef CPU_WT_ALLOC
680                         if (((cpu_id & 0x0f0) > 0) &&
681                             ((cpu_id & 0x0f0) < 0x60) &&
682                             ((cpu_id & 0x00f) > 3))
683                                 enable_K5_wt_alloc();
684                         else if (((cpu_id & 0x0f0) > 0x80) ||
685                             (((cpu_id & 0x0f0) == 0x80) &&
686                                 (cpu_id & 0x00f) > 0x07))
687                                 enable_K6_2_wt_alloc();
688                         else if ((cpu_id & 0x0f0) > 0x50)
689                                 enable_K6_wt_alloc();
690 #endif
691                         if ((cpu_id & 0xf0) == 0xa0)
692                                 /*
693                                  * Make sure the TSC runs through
694                                  * suspension, otherwise we can't use
695                                  * it as timecounter
696                                  */
697                                 wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL);
698                         break;
699                 case CPU_VENDOR_CENTAUR:
700                         init_winchip();
701                         break;
702                 case CPU_VENDOR_TRANSMETA:
703                         init_transmeta();
704                         break;
705                 case CPU_VENDOR_RISE:
706                         init_rise();
707                         break;
708                 }
709                 break;
710 #endif
711 #ifdef I686_CPU
712         case CPU_M2:
713                 init_6x86MX();
714                 break;
715         case CPU_686:
716                 switch (cpu_vendor_id) {
717                 case CPU_VENDOR_INTEL:
718                         switch (cpu_id & 0xff0) {
719                         case 0x610:
720                                 init_ppro();
721                                 break;
722                         case 0x660:
723                                 init_mendocino();
724                                 break;
725                         }
726                         break;
727 #ifdef CPU_ATHLON_SSE_HACK
728                 case CPU_VENDOR_AMD:
729                         /*
730                          * Sometimes the BIOS doesn't enable SSE instructions.
731                          * According to AMD document 20734, the mobile
732                          * Duron, the (mobile) Athlon 4 and the Athlon MP
733                          * support SSE. These correspond to cpu_id 0x66X
734                          * or 0x67X.
735                          */
736                         if ((cpu_feature & CPUID_XMM) == 0 &&
737                             ((cpu_id & ~0xf) == 0x660 ||
738                              (cpu_id & ~0xf) == 0x670 ||
739                              (cpu_id & ~0xf) == 0x680)) {
740                                 u_int regs[4];
741                                 wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
742                                 do_cpuid(1, regs);
743                                 cpu_feature = regs[3];
744                         }
745                         break;
746 #endif
747                 case CPU_VENDOR_CENTAUR:
748                         init_via();
749                         break;
750                 case CPU_VENDOR_TRANSMETA:
751                         init_transmeta();
752                         break;
753                 }
754                 break;
755 #endif
756         default:
757                 break;
758         }
759         if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
760                 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
761                 cpu_fxsr = hw_instruction_sse = 1;
762         }
763 #if defined(PAE) || defined(PAE_TABLES)
764         if ((amd_feature & AMDID_NX) != 0) {
765                 uint64_t msr;
766
767                 msr = rdmsr(MSR_EFER) | EFER_NXE;
768                 wrmsr(MSR_EFER, msr);
769                 pg_nx = PG_NX;
770                 elf32_nxstack = 1;
771         }
772 #endif
773 }
774
775 void
776 initializecpucache(void)
777 {
778
779         /*
780          * CPUID with %eax = 1, %ebx returns
781          * Bits 15-8: CLFLUSH line size
782          *      (Value * 8 = cache line size in bytes)
783          */
784         if ((cpu_feature & CPUID_CLFSH) != 0)
785                 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
786         /*
787          * XXXKIB: (temporary) hack to work around traps generated
788          * when CLFLUSHing APIC register window under virtualization
789          * environments.  These environments tend to disable the
790          * CPUID_SS feature even though the native CPU supports it.
791          */
792         TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
793         if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
794                 cpu_feature &= ~CPUID_CLFSH;
795                 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
796         }
797         /*
798          * The kernel's use of CLFLUSH{,OPT} can be disabled manually
799          * by setting the hw.clflush_disable tunable.
800          */
801         if (hw_clflush_disable == 1) {
802                 cpu_feature &= ~CPUID_CLFSH;
803                 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
804         }
805
806 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
807         /*
808          * OS should flush L1 cache by itself because no PC-98 supports
809          * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
810          * when need_pre_dma_flush = 1, use invd instruction after DMA
811          * transfer when need_post_dma_flush = 1.  If your CPU upgrade
812          * product supports hardware cache control, you can add the
813          * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
814          * This option eliminates unneeded cache flush instruction(s).
815          */
816         if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
817                 switch (cpu) {
818 #ifdef I486_CPU
819                 case CPU_486DLC:
820                         need_post_dma_flush = 1;
821                         break;
822                 case CPU_M1SC:
823                         need_pre_dma_flush = 1;
824                         break;
825                 case CPU_CY486DX:
826                         need_pre_dma_flush = 1;
827 #ifdef CPU_I486_ON_386
828                         need_post_dma_flush = 1;
829 #endif
830                         break;
831 #endif
832                 default:
833                         break;
834                 }
835         } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
836                 switch (cpu_id & 0xFF0) {
837                 case 0x470:             /* Enhanced Am486DX2 WB */
838                 case 0x490:             /* Enhanced Am486DX4 WB */
839                 case 0x4F0:             /* Am5x86 WB */
840                         need_pre_dma_flush = 1;
841                         break;
842                 }
843         } else if (cpu_vendor_id == CPU_VENDOR_IBM) {
844                 need_post_dma_flush = 1;
845         } else {
846 #ifdef CPU_I486_ON_386
847                 need_pre_dma_flush = 1;
848 #endif
849         }
850 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
851 }
852
853 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
854 /*
855  * Enable write allocate feature of AMD processors.
856  * Following two functions require the Maxmem variable being set.
857  */
858 static void
859 enable_K5_wt_alloc(void)
860 {
861         u_int64_t       msr;
862         register_t      saveintr;
863
864         /*
865          * Write allocate is supported only on models 1, 2, and 3, with
866          * a stepping of 4 or greater.
867          */
868         if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
869                 saveintr = intr_disable();
870                 msr = rdmsr(0x83);              /* HWCR */
871                 wrmsr(0x83, msr & !(0x10));
872
873                 /*
874                  * We have to tell the chip where the top of memory is,
875                  * since video cards could have frame bufferes there,
876                  * memory-mapped I/O could be there, etc.
877                  */
878                 if(Maxmem > 0)
879                   msr = Maxmem / 16;
880                 else
881                   msr = 0;
882                 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
883 #ifdef PC98
884                 if (!(inb(0x43b) & 4)) {
885                         wrmsr(0x86, 0x0ff00f0);
886                         msr |= AMD_WT_ALLOC_PRE;
887                 }
888 #else
889                 /*
890                  * There is no way to know wheter 15-16M hole exists or not. 
891                  * Therefore, we disable write allocate for this range.
892                  */
893                         wrmsr(0x86, 0x0ff00f0);
894                         msr |= AMD_WT_ALLOC_PRE;
895 #endif
896                 wrmsr(0x85, msr);
897
898                 msr=rdmsr(0x83);
899                 wrmsr(0x83, msr|0x10); /* enable write allocate */
900                 intr_restore(saveintr);
901         }
902 }
903
904 static void
905 enable_K6_wt_alloc(void)
906 {
907         quad_t  size;
908         u_int64_t       whcr;
909         register_t      saveintr;
910
911         saveintr = intr_disable();
912         wbinvd();
913
914 #ifdef CPU_DISABLE_CACHE
915         /*
916          * Certain K6-2 box becomes unstable when write allocation is
917          * enabled.
918          */
919         /*
920          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
921          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
922          * All other bits in TR12 have no effect on the processer's operation.
923          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
924          * on the AMD-K6.
925          */
926         wrmsr(0x0000000e, (u_int64_t)0x0008);
927 #endif
928         /* Don't assume that memory size is aligned with 4M. */
929         if (Maxmem > 0)
930           size = ((Maxmem >> 8) + 3) >> 2;
931         else
932           size = 0;
933
934         /* Limit is 508M bytes. */
935         if (size > 0x7f)
936                 size = 0x7f;
937         whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
938
939 #if defined(PC98) || defined(NO_MEMORY_HOLE)
940         if (whcr & (0x7fLL << 1)) {
941 #ifdef PC98
942                 /*
943                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
944                  * 15-16M range.
945                  */
946                 if (!(inb(0x43b) & 4))
947                         whcr &= ~0x0001LL;
948                 else
949 #endif
950                         whcr |=  0x0001LL;
951         }
952 #else
953         /*
954          * There is no way to know wheter 15-16M hole exists or not. 
955          * Therefore, we disable write allocate for this range.
956          */
957         whcr &= ~0x0001LL;
958 #endif
959         wrmsr(0x0c0000082, whcr);
960
961         intr_restore(saveintr);
962 }
963
964 static void
965 enable_K6_2_wt_alloc(void)
966 {
967         quad_t  size;
968         u_int64_t       whcr;
969         register_t      saveintr;
970
971         saveintr = intr_disable();
972         wbinvd();
973
974 #ifdef CPU_DISABLE_CACHE
975         /*
976          * Certain K6-2 box becomes unstable when write allocation is
977          * enabled.
978          */
979         /*
980          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
981          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
982          * All other bits in TR12 have no effect on the processer's operation.
983          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
984          * on the AMD-K6.
985          */
986         wrmsr(0x0000000e, (u_int64_t)0x0008);
987 #endif
988         /* Don't assume that memory size is aligned with 4M. */
989         if (Maxmem > 0)
990           size = ((Maxmem >> 8) + 3) >> 2;
991         else
992           size = 0;
993
994         /* Limit is 4092M bytes. */
995         if (size > 0x3fff)
996                 size = 0x3ff;
997         whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
998
999 #if defined(PC98) || defined(NO_MEMORY_HOLE)
1000         if (whcr & (0x3ffLL << 22)) {
1001 #ifdef PC98
1002                 /*
1003                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
1004                  * 15-16M range.
1005                  */
1006                 if (!(inb(0x43b) & 4))
1007                         whcr &= ~(1LL << 16);
1008                 else
1009 #endif
1010                         whcr |=  1LL << 16;
1011         }
1012 #else
1013         /*
1014          * There is no way to know wheter 15-16M hole exists or not. 
1015          * Therefore, we disable write allocate for this range.
1016          */
1017         whcr &= ~(1LL << 16);
1018 #endif
1019         wrmsr(0x0c0000082, whcr);
1020
1021         intr_restore(saveintr);
1022 }
1023 #endif /* I585_CPU && CPU_WT_ALLOC */
1024
1025 #include "opt_ddb.h"
1026 #ifdef DDB
1027 #include <ddb/ddb.h>
1028
1029 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
1030 {
1031         register_t saveintr;
1032         u_int   cr0;
1033         u_char  ccr1, ccr2, ccr3;
1034         u_char  ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
1035
1036         cr0 = rcr0();
1037         if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
1038                 saveintr = intr_disable();
1039
1040
1041                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
1042                         ccr0 = read_cyrix_reg(CCR0);
1043                 }
1044                 ccr1 = read_cyrix_reg(CCR1);
1045                 ccr2 = read_cyrix_reg(CCR2);
1046                 ccr3 = read_cyrix_reg(CCR3);
1047                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
1048                         write_cyrix_reg(CCR3, CCR3_MAPEN0);
1049                         ccr4 = read_cyrix_reg(CCR4);
1050                         if ((cpu == CPU_M1) || (cpu == CPU_M2))
1051                                 ccr5 = read_cyrix_reg(CCR5);
1052                         else
1053                                 pcr0 = read_cyrix_reg(PCR0);
1054                         write_cyrix_reg(CCR3, ccr3);            /* Restore CCR3. */
1055                 }
1056                 intr_restore(saveintr);
1057
1058                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
1059                         printf("CCR0=%x, ", (u_int)ccr0);
1060
1061                 printf("CCR1=%x, CCR2=%x, CCR3=%x",
1062                         (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
1063                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
1064                         printf(", CCR4=%x, ", (u_int)ccr4);
1065                         if (cpu == CPU_M1SC)
1066                                 printf("PCR0=%x\n", pcr0);
1067                         else
1068                                 printf("CCR5=%x\n", ccr5);
1069                 }
1070         }
1071         printf("CR0=%x\n", cr0);
1072 }
1073 #endif /* DDB */