]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/initcpu.c
Merge ^/head r312894 through r312967.
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / initcpu.c
1 /*-
2  * Copyright (c) KATO Takenori, 1997, 1998.
3  * 
4  * All rights reserved.  Unpublished rights reserved under the copyright
5  * laws of Japan.
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include "opt_cpu.h"
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/sysctl.h>
39
40 #include <machine/cputypes.h>
41 #include <machine/md_var.h>
42 #include <machine/specialreg.h>
43
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46
47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
48 #define CPU_ENABLE_SSE
49 #endif
50
51 #ifdef I486_CPU
52 static void init_5x86(void);
53 static void init_bluelightning(void);
54 static void init_486dlc(void);
55 static void init_cy486dx(void);
56 #ifdef CPU_I486_ON_386
57 static void init_i486_on_386(void);
58 #endif
59 static void init_6x86(void);
60 #endif /* I486_CPU */
61
62 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
63 static void     enable_K5_wt_alloc(void);
64 static void     enable_K6_wt_alloc(void);
65 static void     enable_K6_2_wt_alloc(void);
66 #endif
67
68 #ifdef I686_CPU
69 static void     init_6x86MX(void);
70 static void     init_ppro(void);
71 static void     init_mendocino(void);
72 #endif
73
74 static int      hw_instruction_sse;
75 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
76     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
77 /*
78  * -1: automatic (default)
79  *  0: keep enable CLFLUSH
80  *  1: force disable CLFLUSH
81  */
82 static int      hw_clflush_disable = -1;
83
84 u_int   cyrix_did;              /* Device ID of Cyrix CPU */
85
86 #ifdef I486_CPU
87 /*
88  * IBM Blue Lightning
89  */
90 static void
91 init_bluelightning(void)
92 {
93         register_t saveintr;
94
95         saveintr = intr_disable();
96
97         load_cr0(rcr0() | CR0_CD | CR0_NW);
98         invd();
99
100 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
101         wrmsr(0x1000, 0x9c92LL);        /* FP operand can be cacheable on Cyrix FPU */
102 #else
103         wrmsr(0x1000, 0x1c92LL);        /* Intel FPU */
104 #endif
105         /* Enables 13MB and 0-640KB cache. */
106         wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
107 #ifdef CPU_BLUELIGHTNING_3X
108         wrmsr(0x1002, 0x04000000LL);    /* Enables triple-clock mode. */
109 #else
110         wrmsr(0x1002, 0x03000000LL);    /* Enables double-clock mode. */
111 #endif
112
113         /* Enable caching in CR0. */
114         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
115         invd();
116         intr_restore(saveintr);
117 }
118
119 /*
120  * Cyrix 486SLC/DLC/SR/DR series
121  */
122 static void
123 init_486dlc(void)
124 {
125         register_t saveintr;
126         u_char  ccr0;
127
128         saveintr = intr_disable();
129         invd();
130
131         ccr0 = read_cyrix_reg(CCR0);
132 #ifndef CYRIX_CACHE_WORKS
133         ccr0 |= CCR0_NC1 | CCR0_BARB;
134         write_cyrix_reg(CCR0, ccr0);
135         invd();
136 #else
137         ccr0 &= ~CCR0_NC0;
138 #ifndef CYRIX_CACHE_REALLY_WORKS
139         ccr0 |= CCR0_NC1 | CCR0_BARB;
140 #else
141         ccr0 |= CCR0_NC1;
142 #endif
143 #ifdef CPU_DIRECT_MAPPED_CACHE
144         ccr0 |= CCR0_CO;                        /* Direct mapped mode. */
145 #endif
146         write_cyrix_reg(CCR0, ccr0);
147
148         /* Clear non-cacheable region. */
149         write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
150         write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
151         write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
152         write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
153
154         write_cyrix_reg(0, 0);  /* dummy write */
155
156         /* Enable caching in CR0. */
157         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
158         invd();
159 #endif /* !CYRIX_CACHE_WORKS */
160         intr_restore(saveintr);
161 }
162
163
164 /*
165  * Cyrix 486S/DX series
166  */
167 static void
168 init_cy486dx(void)
169 {
170         register_t saveintr;
171         u_char  ccr2;
172
173         saveintr = intr_disable();
174         invd();
175
176         ccr2 = read_cyrix_reg(CCR2);
177 #ifdef CPU_SUSP_HLT
178         ccr2 |= CCR2_SUSP_HLT;
179 #endif
180
181         write_cyrix_reg(CCR2, ccr2);
182         intr_restore(saveintr);
183 }
184
185
186 /*
187  * Cyrix 5x86
188  */
189 static void
190 init_5x86(void)
191 {
192         register_t saveintr;
193         u_char  ccr2, ccr3, ccr4, pcr0;
194
195         saveintr = intr_disable();
196
197         load_cr0(rcr0() | CR0_CD | CR0_NW);
198         wbinvd();
199
200         (void)read_cyrix_reg(CCR3);             /* dummy */
201
202         /* Initialize CCR2. */
203         ccr2 = read_cyrix_reg(CCR2);
204         ccr2 |= CCR2_WB;
205 #ifdef CPU_SUSP_HLT
206         ccr2 |= CCR2_SUSP_HLT;
207 #else
208         ccr2 &= ~CCR2_SUSP_HLT;
209 #endif
210         ccr2 |= CCR2_WT1;
211         write_cyrix_reg(CCR2, ccr2);
212
213         /* Initialize CCR4. */
214         ccr3 = read_cyrix_reg(CCR3);
215         write_cyrix_reg(CCR3, CCR3_MAPEN0);
216
217         ccr4 = read_cyrix_reg(CCR4);
218         ccr4 |= CCR4_DTE;
219         ccr4 |= CCR4_MEM;
220 #ifdef CPU_FASTER_5X86_FPU
221         ccr4 |= CCR4_FASTFPE;
222 #else
223         ccr4 &= ~CCR4_FASTFPE;
224 #endif
225         ccr4 &= ~CCR4_IOMASK;
226         /********************************************************************
227          * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
228          * should be 0 for errata fix.
229          ********************************************************************/
230 #ifdef CPU_IORT
231         ccr4 |= CPU_IORT & CCR4_IOMASK;
232 #endif
233         write_cyrix_reg(CCR4, ccr4);
234
235         /* Initialize PCR0. */
236         /****************************************************************
237          * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
238          * BTB_EN might make your system unstable.
239          ****************************************************************/
240         pcr0 = read_cyrix_reg(PCR0);
241 #ifdef CPU_RSTK_EN
242         pcr0 |= PCR0_RSTK;
243 #else
244         pcr0 &= ~PCR0_RSTK;
245 #endif
246 #ifdef CPU_BTB_EN
247         pcr0 |= PCR0_BTB;
248 #else
249         pcr0 &= ~PCR0_BTB;
250 #endif
251 #ifdef CPU_LOOP_EN
252         pcr0 |= PCR0_LOOP;
253 #else
254         pcr0 &= ~PCR0_LOOP;
255 #endif
256
257         /****************************************************************
258          * WARNING: if you use a memory mapped I/O device, don't use
259          * DISABLE_5X86_LSSER option, which may reorder memory mapped
260          * I/O access.
261          * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
262          ****************************************************************/
263 #ifdef CPU_DISABLE_5X86_LSSER
264         pcr0 &= ~PCR0_LSSER;
265 #else
266         pcr0 |= PCR0_LSSER;
267 #endif
268         write_cyrix_reg(PCR0, pcr0);
269
270         /* Restore CCR3. */
271         write_cyrix_reg(CCR3, ccr3);
272
273         (void)read_cyrix_reg(0x80);             /* dummy */
274
275         /* Unlock NW bit in CR0. */
276         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
277         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
278         /* Lock NW bit in CR0. */
279         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
280
281         intr_restore(saveintr);
282 }
283
284 #ifdef CPU_I486_ON_386
285 /*
286  * There are i486 based upgrade products for i386 machines.
287  * In this case, BIOS doesn't enable CPU cache.
288  */
289 static void
290 init_i486_on_386(void)
291 {
292         register_t saveintr;
293
294         saveintr = intr_disable();
295
296         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0, NW = 0 */
297
298         intr_restore(saveintr);
299 }
300 #endif
301
302 /*
303  * Cyrix 6x86
304  *
305  * XXX - What should I do here?  Please let me know.
306  */
307 static void
308 init_6x86(void)
309 {
310         register_t saveintr;
311         u_char  ccr3, ccr4;
312
313         saveintr = intr_disable();
314
315         load_cr0(rcr0() | CR0_CD | CR0_NW);
316         wbinvd();
317
318         /* Initialize CCR0. */
319         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
320
321         /* Initialize CCR1. */
322 #ifdef CPU_CYRIX_NO_LOCK
323         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
324 #else
325         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
326 #endif
327
328         /* Initialize CCR2. */
329 #ifdef CPU_SUSP_HLT
330         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
331 #else
332         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
333 #endif
334
335         ccr3 = read_cyrix_reg(CCR3);
336         write_cyrix_reg(CCR3, CCR3_MAPEN0);
337
338         /* Initialize CCR4. */
339         ccr4 = read_cyrix_reg(CCR4);
340         ccr4 |= CCR4_DTE;
341         ccr4 &= ~CCR4_IOMASK;
342 #ifdef CPU_IORT
343         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
344 #else
345         write_cyrix_reg(CCR4, ccr4 | 7);
346 #endif
347
348         /* Initialize CCR5. */
349 #ifdef CPU_WT_ALLOC
350         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
351 #endif
352
353         /* Restore CCR3. */
354         write_cyrix_reg(CCR3, ccr3);
355
356         /* Unlock NW bit in CR0. */
357         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
358
359         /*
360          * Earlier revision of the 6x86 CPU could crash the system if
361          * L1 cache is in write-back mode.
362          */
363         if ((cyrix_did & 0xff00) > 0x1600)
364                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
365         else {
366                 /* Revision 2.6 and lower. */
367 #ifdef CYRIX_CACHE_REALLY_WORKS
368                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
369 #else
370                 load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0 and NW = 1 */
371 #endif
372         }
373
374         /* Lock NW bit in CR0. */
375         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
376
377         intr_restore(saveintr);
378 }
379 #endif /* I486_CPU */
380
381 #ifdef I586_CPU
382 /*
383  * Rise mP6
384  */
385 static void
386 init_rise(void)
387 {
388
389         /*
390          * The CMPXCHG8B instruction is always available but hidden.
391          */
392         cpu_feature |= CPUID_CX8;
393 }
394
395 /*
396  * IDT WinChip C6/2/2A/2B/3
397  *
398  * http://www.centtech.com/winchip_bios_writers_guide_v4_0.pdf
399  */
400 static void
401 init_winchip(void)
402 {
403         u_int regs[4];
404         uint64_t fcr;
405
406         fcr = rdmsr(0x0107);
407
408         /*
409          * Set ECX8, DSMC, DTLOCK/EDCTLB, EMMX, and ERETSTK and clear DPDC.
410          */
411         fcr |= (1 << 1) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 16);
412         fcr &= ~(1ULL << 11);
413
414         /*
415          * Additionally, set EBRPRED, E2MMX and EAMD3D for WinChip 2 and 3.
416          */
417         if (CPUID_TO_MODEL(cpu_id) >= 8)
418                 fcr |= (1 << 12) | (1 << 19) | (1 << 20);
419
420         wrmsr(0x0107, fcr);
421         do_cpuid(1, regs);
422         cpu_feature = regs[3];
423 }
424 #endif
425
426 #ifdef I686_CPU
427 /*
428  * Cyrix 6x86MX (code-named M2)
429  *
430  * XXX - What should I do here?  Please let me know.
431  */
432 static void
433 init_6x86MX(void)
434 {
435         register_t saveintr;
436         u_char  ccr3, ccr4;
437
438         saveintr = intr_disable();
439
440         load_cr0(rcr0() | CR0_CD | CR0_NW);
441         wbinvd();
442
443         /* Initialize CCR0. */
444         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
445
446         /* Initialize CCR1. */
447 #ifdef CPU_CYRIX_NO_LOCK
448         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
449 #else
450         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
451 #endif
452
453         /* Initialize CCR2. */
454 #ifdef CPU_SUSP_HLT
455         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
456 #else
457         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
458 #endif
459
460         ccr3 = read_cyrix_reg(CCR3);
461         write_cyrix_reg(CCR3, CCR3_MAPEN0);
462
463         /* Initialize CCR4. */
464         ccr4 = read_cyrix_reg(CCR4);
465         ccr4 &= ~CCR4_IOMASK;
466 #ifdef CPU_IORT
467         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
468 #else
469         write_cyrix_reg(CCR4, ccr4 | 7);
470 #endif
471
472         /* Initialize CCR5. */
473 #ifdef CPU_WT_ALLOC
474         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
475 #endif
476
477         /* Restore CCR3. */
478         write_cyrix_reg(CCR3, ccr3);
479
480         /* Unlock NW bit in CR0. */
481         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
482
483         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
484
485         /* Lock NW bit in CR0. */
486         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
487
488         intr_restore(saveintr);
489 }
490
491 static int ppro_apic_used = -1;
492
493 static void
494 init_ppro(void)
495 {
496         u_int64_t       apicbase;
497
498         /*
499          * Local APIC should be disabled if it is not going to be used.
500          */
501         if (ppro_apic_used != 1) {
502                 apicbase = rdmsr(MSR_APICBASE);
503                 apicbase &= ~APICBASE_ENABLED;
504                 wrmsr(MSR_APICBASE, apicbase);
505                 ppro_apic_used = 0;
506         }
507 }
508
509 /*
510  * If the local APIC is going to be used after being disabled above,
511  * re-enable it and don't disable it in the future.
512  */
513 void
514 ppro_reenable_apic(void)
515 {
516         u_int64_t       apicbase;
517
518         if (ppro_apic_used == 0) {
519                 apicbase = rdmsr(MSR_APICBASE);
520                 apicbase |= APICBASE_ENABLED;
521                 wrmsr(MSR_APICBASE, apicbase);
522                 ppro_apic_used = 1;
523         }
524 }
525
526 /*
527  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
528  * L2 cache).
529  */
530 static void
531 init_mendocino(void)
532 {
533 #ifdef CPU_PPRO2CELERON
534         register_t      saveintr;
535         u_int64_t       bbl_cr_ctl3;
536
537         saveintr = intr_disable();
538
539         load_cr0(rcr0() | CR0_CD | CR0_NW);
540         wbinvd();
541
542         bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
543
544         /* If the L2 cache is configured, do nothing. */
545         if (!(bbl_cr_ctl3 & 1)) {
546                 bbl_cr_ctl3 = 0x134052bLL;
547
548                 /* Set L2 Cache Latency (Default: 5). */
549 #ifdef  CPU_CELERON_L2_LATENCY
550 #if CPU_L2_LATENCY > 15
551 #error invalid CPU_L2_LATENCY.
552 #endif
553                 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
554 #else
555                 bbl_cr_ctl3 |= 5 << 1;
556 #endif
557                 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
558         }
559
560         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
561         intr_restore(saveintr);
562 #endif /* CPU_PPRO2CELERON */
563 }
564
565 /*
566  * Initialize special VIA features
567  */
568 static void
569 init_via(void)
570 {
571         u_int regs[4], val;
572         uint64_t fcr;
573
574         /*
575          * Explicitly enable CX8 and PGE on C3.
576          *
577          * http://www.via.com.tw/download/mainboards/6/13/VIA_C3_EBGA%20datasheet110.pdf
578          */
579         if (CPUID_TO_MODEL(cpu_id) <= 9)
580                 fcr = (1 << 1) | (1 << 7);
581         else
582                 fcr = 0;
583
584         /*
585          * Check extended CPUID for PadLock features.
586          *
587          * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
588          */
589         do_cpuid(0xc0000000, regs);
590         if (regs[0] >= 0xc0000001) {
591                 do_cpuid(0xc0000001, regs);
592                 val = regs[3];
593         } else
594                 val = 0;
595
596         /* Enable RNG if present. */
597         if ((val & VIA_CPUID_HAS_RNG) != 0) {
598                 via_feature_rng = VIA_HAS_RNG;
599                 wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
600         }
601
602         /* Enable PadLock if present. */
603         if ((val & VIA_CPUID_HAS_ACE) != 0)
604                 via_feature_xcrypt |= VIA_HAS_AES;
605         if ((val & VIA_CPUID_HAS_ACE2) != 0)
606                 via_feature_xcrypt |= VIA_HAS_AESCTR;
607         if ((val & VIA_CPUID_HAS_PHE) != 0)
608                 via_feature_xcrypt |= VIA_HAS_SHA;
609         if ((val & VIA_CPUID_HAS_PMM) != 0)
610                 via_feature_xcrypt |= VIA_HAS_MM;
611         if (via_feature_xcrypt != 0)
612                 fcr |= 1 << 28;
613
614         wrmsr(0x1107, rdmsr(0x1107) | fcr);
615 }
616
617 #endif /* I686_CPU */
618
619 #if defined(I586_CPU) || defined(I686_CPU)
620 static void
621 init_transmeta(void)
622 {
623         u_int regs[0];
624
625         /* Expose all hidden features. */
626         wrmsr(0x80860004, rdmsr(0x80860004) | ~0UL);
627         do_cpuid(1, regs);
628         cpu_feature = regs[3];
629 }
630 #endif
631
632 extern int elf32_nxstack;
633
634 void
635 initializecpu(void)
636 {
637
638         switch (cpu) {
639 #ifdef I486_CPU
640         case CPU_BLUE:
641                 init_bluelightning();
642                 break;
643         case CPU_486DLC:
644                 init_486dlc();
645                 break;
646         case CPU_CY486DX:
647                 init_cy486dx();
648                 break;
649         case CPU_M1SC:
650                 init_5x86();
651                 break;
652 #ifdef CPU_I486_ON_386
653         case CPU_486:
654                 init_i486_on_386();
655                 break;
656 #endif
657         case CPU_M1:
658                 init_6x86();
659                 break;
660 #endif /* I486_CPU */
661 #ifdef I586_CPU
662         case CPU_586:
663                 switch (cpu_vendor_id) {
664                 case CPU_VENDOR_AMD:
665 #ifdef CPU_WT_ALLOC
666                         if (((cpu_id & 0x0f0) > 0) &&
667                             ((cpu_id & 0x0f0) < 0x60) &&
668                             ((cpu_id & 0x00f) > 3))
669                                 enable_K5_wt_alloc();
670                         else if (((cpu_id & 0x0f0) > 0x80) ||
671                             (((cpu_id & 0x0f0) == 0x80) &&
672                                 (cpu_id & 0x00f) > 0x07))
673                                 enable_K6_2_wt_alloc();
674                         else if ((cpu_id & 0x0f0) > 0x50)
675                                 enable_K6_wt_alloc();
676 #endif
677                         if ((cpu_id & 0xf0) == 0xa0)
678                                 /*
679                                  * Make sure the TSC runs through
680                                  * suspension, otherwise we can't use
681                                  * it as timecounter
682                                  */
683                                 wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL);
684                         break;
685                 case CPU_VENDOR_CENTAUR:
686                         init_winchip();
687                         break;
688                 case CPU_VENDOR_TRANSMETA:
689                         init_transmeta();
690                         break;
691                 case CPU_VENDOR_RISE:
692                         init_rise();
693                         break;
694                 }
695                 break;
696 #endif
697 #ifdef I686_CPU
698         case CPU_M2:
699                 init_6x86MX();
700                 break;
701         case CPU_686:
702                 switch (cpu_vendor_id) {
703                 case CPU_VENDOR_INTEL:
704                         switch (cpu_id & 0xff0) {
705                         case 0x610:
706                                 init_ppro();
707                                 break;
708                         case 0x660:
709                                 init_mendocino();
710                                 break;
711                         }
712                         break;
713 #ifdef CPU_ATHLON_SSE_HACK
714                 case CPU_VENDOR_AMD:
715                         /*
716                          * Sometimes the BIOS doesn't enable SSE instructions.
717                          * According to AMD document 20734, the mobile
718                          * Duron, the (mobile) Athlon 4 and the Athlon MP
719                          * support SSE. These correspond to cpu_id 0x66X
720                          * or 0x67X.
721                          */
722                         if ((cpu_feature & CPUID_XMM) == 0 &&
723                             ((cpu_id & ~0xf) == 0x660 ||
724                              (cpu_id & ~0xf) == 0x670 ||
725                              (cpu_id & ~0xf) == 0x680)) {
726                                 u_int regs[4];
727                                 wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
728                                 do_cpuid(1, regs);
729                                 cpu_feature = regs[3];
730                         }
731                         break;
732 #endif
733                 case CPU_VENDOR_CENTAUR:
734                         init_via();
735                         break;
736                 case CPU_VENDOR_TRANSMETA:
737                         init_transmeta();
738                         break;
739                 }
740                 break;
741 #endif
742         default:
743                 break;
744         }
745 #if defined(CPU_ENABLE_SSE)
746         if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
747                 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
748                 cpu_fxsr = hw_instruction_sse = 1;
749         }
750 #endif
751 #if defined(PAE) || defined(PAE_TABLES)
752         if ((amd_feature & AMDID_NX) != 0) {
753                 uint64_t msr;
754
755                 msr = rdmsr(MSR_EFER) | EFER_NXE;
756                 wrmsr(MSR_EFER, msr);
757                 pg_nx = PG_NX;
758                 elf32_nxstack = 1;
759         }
760 #endif
761 }
762
763 void
764 initializecpucache(void)
765 {
766
767         /*
768          * CPUID with %eax = 1, %ebx returns
769          * Bits 15-8: CLFLUSH line size
770          *      (Value * 8 = cache line size in bytes)
771          */
772         if ((cpu_feature & CPUID_CLFSH) != 0)
773                 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
774         /*
775          * XXXKIB: (temporary) hack to work around traps generated
776          * when CLFLUSHing APIC register window under virtualization
777          * environments.  These environments tend to disable the
778          * CPUID_SS feature even though the native CPU supports it.
779          */
780         TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
781         if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
782                 cpu_feature &= ~CPUID_CLFSH;
783                 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
784         }
785         /*
786          * The kernel's use of CLFLUSH{,OPT} can be disabled manually
787          * by setting the hw.clflush_disable tunable.
788          */
789         if (hw_clflush_disable == 1) {
790                 cpu_feature &= ~CPUID_CLFSH;
791                 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
792         }
793 }
794
795 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
796 /*
797  * Enable write allocate feature of AMD processors.
798  * Following two functions require the Maxmem variable being set.
799  */
800 static void
801 enable_K5_wt_alloc(void)
802 {
803         u_int64_t       msr;
804         register_t      saveintr;
805
806         /*
807          * Write allocate is supported only on models 1, 2, and 3, with
808          * a stepping of 4 or greater.
809          */
810         if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
811                 saveintr = intr_disable();
812                 msr = rdmsr(0x83);              /* HWCR */
813                 wrmsr(0x83, msr & !(0x10));
814
815                 /*
816                  * We have to tell the chip where the top of memory is,
817                  * since video cards could have frame bufferes there,
818                  * memory-mapped I/O could be there, etc.
819                  */
820                 if(Maxmem > 0)
821                   msr = Maxmem / 16;
822                 else
823                   msr = 0;
824                 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
825
826                 /*
827                  * There is no way to know wheter 15-16M hole exists or not. 
828                  * Therefore, we disable write allocate for this range.
829                  */
830                 wrmsr(0x86, 0x0ff00f0);
831                 msr |= AMD_WT_ALLOC_PRE;
832                 wrmsr(0x85, msr);
833
834                 msr=rdmsr(0x83);
835                 wrmsr(0x83, msr|0x10); /* enable write allocate */
836                 intr_restore(saveintr);
837         }
838 }
839
840 static void
841 enable_K6_wt_alloc(void)
842 {
843         quad_t  size;
844         u_int64_t       whcr;
845         register_t      saveintr;
846
847         saveintr = intr_disable();
848         wbinvd();
849
850 #ifdef CPU_DISABLE_CACHE
851         /*
852          * Certain K6-2 box becomes unstable when write allocation is
853          * enabled.
854          */
855         /*
856          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
857          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
858          * All other bits in TR12 have no effect on the processer's operation.
859          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
860          * on the AMD-K6.
861          */
862         wrmsr(0x0000000e, (u_int64_t)0x0008);
863 #endif
864         /* Don't assume that memory size is aligned with 4M. */
865         if (Maxmem > 0)
866           size = ((Maxmem >> 8) + 3) >> 2;
867         else
868           size = 0;
869
870         /* Limit is 508M bytes. */
871         if (size > 0x7f)
872                 size = 0x7f;
873         whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
874
875 #if defined(NO_MEMORY_HOLE)
876         if (whcr & (0x7fLL << 1))
877                 whcr |=  0x0001LL;
878 #else
879         /*
880          * There is no way to know wheter 15-16M hole exists or not. 
881          * Therefore, we disable write allocate for this range.
882          */
883         whcr &= ~0x0001LL;
884 #endif
885         wrmsr(0x0c0000082, whcr);
886
887         intr_restore(saveintr);
888 }
889
890 static void
891 enable_K6_2_wt_alloc(void)
892 {
893         quad_t  size;
894         u_int64_t       whcr;
895         register_t      saveintr;
896
897         saveintr = intr_disable();
898         wbinvd();
899
900 #ifdef CPU_DISABLE_CACHE
901         /*
902          * Certain K6-2 box becomes unstable when write allocation is
903          * enabled.
904          */
905         /*
906          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
907          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
908          * All other bits in TR12 have no effect on the processer's operation.
909          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
910          * on the AMD-K6.
911          */
912         wrmsr(0x0000000e, (u_int64_t)0x0008);
913 #endif
914         /* Don't assume that memory size is aligned with 4M. */
915         if (Maxmem > 0)
916           size = ((Maxmem >> 8) + 3) >> 2;
917         else
918           size = 0;
919
920         /* Limit is 4092M bytes. */
921         if (size > 0x3fff)
922                 size = 0x3ff;
923         whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
924
925 #if defined(NO_MEMORY_HOLE)
926         if (whcr & (0x3ffLL << 22))
927                 whcr |=  1LL << 16;
928 #else
929         /*
930          * There is no way to know wheter 15-16M hole exists or not. 
931          * Therefore, we disable write allocate for this range.
932          */
933         whcr &= ~(1LL << 16);
934 #endif
935         wrmsr(0x0c0000082, whcr);
936
937         intr_restore(saveintr);
938 }
939 #endif /* I585_CPU && CPU_WT_ALLOC */
940
941 #include "opt_ddb.h"
942 #ifdef DDB
943 #include <ddb/ddb.h>
944
945 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
946 {
947         register_t saveintr;
948         u_int   cr0;
949         u_char  ccr1, ccr2, ccr3;
950         u_char  ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
951
952         cr0 = rcr0();
953         if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
954                 saveintr = intr_disable();
955
956
957                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
958                         ccr0 = read_cyrix_reg(CCR0);
959                 }
960                 ccr1 = read_cyrix_reg(CCR1);
961                 ccr2 = read_cyrix_reg(CCR2);
962                 ccr3 = read_cyrix_reg(CCR3);
963                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
964                         write_cyrix_reg(CCR3, CCR3_MAPEN0);
965                         ccr4 = read_cyrix_reg(CCR4);
966                         if ((cpu == CPU_M1) || (cpu == CPU_M2))
967                                 ccr5 = read_cyrix_reg(CCR5);
968                         else
969                                 pcr0 = read_cyrix_reg(PCR0);
970                         write_cyrix_reg(CCR3, ccr3);            /* Restore CCR3. */
971                 }
972                 intr_restore(saveintr);
973
974                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
975                         printf("CCR0=%x, ", (u_int)ccr0);
976
977                 printf("CCR1=%x, CCR2=%x, CCR3=%x",
978                         (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
979                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
980                         printf(", CCR4=%x, ", (u_int)ccr4);
981                         if (cpu == CPU_M1SC)
982                                 printf("PCR0=%x\n", pcr0);
983                         else
984                                 printf("CCR5=%x\n", ccr5);
985                 }
986         }
987         printf("CR0=%x\n", cr0);
988 }
989 #endif /* DDB */