2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include "opt_kstack_pages.h"
33 #include "opt_sched.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/cpuset.h>
43 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/memrange.h>
48 #include <sys/mutex.h>
51 #include <sys/sched.h>
53 #include <sys/sysctl.h>
56 #include <vm/vm_param.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_extern.h>
61 #include <x86/apicreg.h>
62 #include <machine/clock.h>
63 #include <machine/cputypes.h>
64 #include <machine/cpufunc.h>
66 #include <machine/md_var.h>
67 #include <machine/pcb.h>
68 #include <machine/psl.h>
69 #include <machine/smp.h>
70 #include <machine/specialreg.h>
71 #include <machine/tss.h>
72 #include <machine/cpu.h>
75 #define WARMBOOT_TARGET 0
76 #define WARMBOOT_OFF (KERNBASE + 0x0467)
77 #define WARMBOOT_SEG (KERNBASE + 0x0469)
79 #define CMOS_REG (0x70)
80 #define CMOS_DATA (0x71)
81 #define BIOS_RESET (0x0f)
82 #define BIOS_WARM (0x0a)
84 extern struct pcpu __pcpu[];
86 /* Temporary variables for init_secondary() */
87 char *doublefault_stack;
90 /* Variables needed for SMP tlb shootdown. */
91 vm_offset_t smp_tlb_addr2;
92 struct invpcid_descr smp_tlb_invpcid;
93 volatile int smp_tlb_wait;
96 extern int invpcid_works;
98 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
101 * Local data and functions.
104 static int start_ap(int apic_id);
106 static u_int bootMP_size;
107 static u_int boot_address;
110 * Calculate usable address in base memory for AP trampoline code.
113 mp_bootaddress(u_int basemem)
116 bootMP_size = mptramp_end - mptramp_start;
117 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
118 if (((basemem * 1024) - boot_address) < bootMP_size)
119 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
120 /* 3 levels of page table pages */
121 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
123 return mptramp_pagetables;
127 * Initialize the IPI handlers and start up the AP's.
134 /* Initialize the logical ID to APIC ID table. */
135 for (i = 0; i < MAXCPU; i++) {
136 cpu_apic_ids[i] = -1;
137 cpu_ipi_pending[i] = 0;
140 /* Install an inter-CPU IPI for TLB invalidation */
141 if (pmap_pcid_enabled) {
142 setidt(IPI_INVLTLB, IDTVEC(invltlb_pcid), SDT_SYSIGT,
144 setidt(IPI_INVLPG, IDTVEC(invlpg_pcid), SDT_SYSIGT,
147 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
148 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
150 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
152 /* Install an inter-CPU IPI for cache invalidation. */
153 setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0);
155 /* Install an inter-CPU IPI for all-CPU rendezvous */
156 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
158 /* Install generic inter-CPU IPI handler */
159 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
160 SDT_SYSIGT, SEL_KPL, 0);
162 /* Install an inter-CPU IPI for CPU stop/restart */
163 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
165 /* Install an inter-CPU IPI for CPU suspend/resume */
166 setidt(IPI_SUSPEND, IDTVEC(cpususpend), SDT_SYSIGT, SEL_KPL, 0);
168 /* Set boot_cpu_id if needed. */
169 if (boot_cpu_id == -1) {
170 boot_cpu_id = PCPU_GET(apic_id);
171 cpu_info[boot_cpu_id].cpu_bsp = 1;
173 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
174 ("BSP's APIC ID doesn't match boot_cpu_id"));
176 /* Probe logical/physical core configuration. */
181 /* Start each Application Processor */
182 init_ops.start_all_aps();
184 set_interrupt_apic_ids();
189 * AP CPU's call this to initialize themselves.
197 int cpu, gsel_tss, x;
198 struct region_descriptor ap_gdt;
200 /* Set by the startup code for us to use */
204 common_tss[cpu] = common_tss[0];
205 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
206 common_tss[cpu].tss_iobase = sizeof(struct amd64tss) +
208 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
210 /* The NMI stack runs on IST2. */
211 np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
212 common_tss[cpu].tss_ist2 = (long) np;
214 /* Prepare private GDT */
215 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
216 for (x = 0; x < NGDT; x++) {
217 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
218 x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
219 ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
221 ssdtosyssd(&gdt_segs[GPROC0_SEL],
222 (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
223 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
224 ap_gdt.rd_base = (long) &gdt[NGDT * cpu];
225 lgdt(&ap_gdt); /* does magic intra-segment return */
227 /* Get per-cpu data */
230 /* prime data page for it to use */
231 pcpu_init(pc, cpu, sizeof(struct pcpu));
232 dpcpu_init(dpcpu, cpu);
233 pc->pc_apic_id = cpu_apic_ids[cpu];
234 pc->pc_prvspace = pc;
235 pc->pc_curthread = 0;
236 pc->pc_tssp = &common_tss[cpu];
237 pc->pc_commontssp = &common_tss[cpu];
239 pc->pc_tss = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
241 pc->pc_fs32p = &gdt[NGDT * cpu + GUFS32_SEL];
242 pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
243 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
246 /* Save the per-cpu pointer for use by the NMI handler. */
247 np->np_pcpu = (register_t) pc;
249 wrmsr(MSR_FSBASE, 0); /* User value */
250 wrmsr(MSR_GSBASE, (u_int64_t)pc);
251 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
255 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
259 * Set to a known state:
260 * Set by mpboot.s: CR0_PG, CR0_PE
261 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
264 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
267 /* Set up the fast syscall stuff */
268 msr = rdmsr(MSR_EFER) | EFER_SCE;
269 wrmsr(MSR_EFER, msr);
270 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
271 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
272 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
273 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
274 wrmsr(MSR_STAR, msr);
275 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
277 /* signal our startup to the BSP. */
280 /* Spin until the BSP releases the AP's. */
284 init_secondary_tail();
287 /*******************************************************************
288 * local functions and data
292 * start each AP in our list
295 native_start_all_aps(void)
297 vm_offset_t va = boot_address + KERNBASE;
298 u_int64_t *pt4, *pt3, *pt2;
299 u_int32_t mpbioswarmvec;
303 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
305 /* install the AP 1st level boot code */
306 pmap_kenter(va, boot_address);
307 pmap_invalidate_page(kernel_pmap, va);
308 bcopy(mptramp_start, (void *)va, bootMP_size);
310 /* Locate the page tables, they'll be below the trampoline */
311 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
312 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
313 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
315 /* Create the initial 1GB replicated page tables */
316 for (i = 0; i < 512; i++) {
317 /* Each slot of the level 4 pages points to the same level 3 page */
318 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
319 pt4[i] |= PG_V | PG_RW | PG_U;
321 /* Each slot of the level 3 pages points to the same level 2 page */
322 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
323 pt3[i] |= PG_V | PG_RW | PG_U;
325 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
326 pt2[i] = i * (2 * 1024 * 1024);
327 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
330 /* save the current value of the warm-start vector */
331 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
332 outb(CMOS_REG, BIOS_RESET);
333 mpbiosreason = inb(CMOS_DATA);
335 /* setup a vector to our boot code */
336 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
337 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
338 outb(CMOS_REG, BIOS_RESET);
339 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
342 for (cpu = 1; cpu < mp_ncpus; cpu++) {
343 apic_id = cpu_apic_ids[cpu];
345 /* allocate and set up an idle stack data page */
346 bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
347 KSTACK_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
348 doublefault_stack = (char *)kmem_malloc(kernel_arena,
349 PAGE_SIZE, M_WAITOK | M_ZERO);
350 nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
352 dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
355 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
358 /* attempt to start the Application Processor */
359 if (!start_ap(apic_id)) {
360 /* restore the warmstart vector */
361 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
362 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
365 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
368 /* restore the warmstart vector */
369 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
371 outb(CMOS_REG, BIOS_RESET);
372 outb(CMOS_DATA, mpbiosreason);
374 /* number of APs actually started */
380 * This function starts the AP (application processor) identified
381 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
382 * to accomplish this. This is necessary because of the nuances
383 * of the different hardware we might encounter. It isn't pretty,
384 * but it seems to work.
387 start_ap(int apic_id)
392 /* calculate the vector */
393 vector = (boot_address >> 12) & 0xff;
395 /* used as a watchpoint to signal AP startup */
398 ipi_startup(apic_id, vector);
400 /* Wait up to 5 seconds for it to start. */
401 for (ms = 0; ms < 5000; ms++) {
403 return 1; /* return SUCCESS */
406 return 0; /* return FAILURE */
410 * Flush the TLB on all other CPU's
413 smp_tlb_shootdown(u_int vector, pmap_t pmap, vm_offset_t addr1,
418 ncpu = mp_ncpus - 1; /* does not shootdown self */
420 return; /* no other cpus */
421 if (!(read_rflags() & PSL_I))
422 panic("%s: interrupts disabled", __func__);
423 mtx_lock_spin(&smp_ipi_mtx);
424 smp_tlb_invpcid.addr = addr1;
426 smp_tlb_invpcid.pcid = 0;
428 smp_tlb_invpcid.pcid = pmap->pm_pcid;
429 pcid_cr3 = pmap->pm_cr3;
431 smp_tlb_addr2 = addr2;
433 atomic_store_rel_int(&smp_tlb_wait, 0);
434 ipi_all_but_self(vector);
435 while (smp_tlb_wait < ncpu)
437 mtx_unlock_spin(&smp_ipi_mtx);
441 smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
442 vm_offset_t addr1, vm_offset_t addr2)
444 int cpu, ncpu, othercpus;
446 othercpus = mp_ncpus - 1;
447 if (CPU_ISFULLSET(&mask)) {
451 CPU_CLR(PCPU_GET(cpuid), &mask);
452 if (CPU_EMPTY(&mask))
455 if (!(read_rflags() & PSL_I))
456 panic("%s: interrupts disabled", __func__);
457 mtx_lock_spin(&smp_ipi_mtx);
458 smp_tlb_invpcid.addr = addr1;
460 smp_tlb_invpcid.pcid = 0;
462 smp_tlb_invpcid.pcid = pmap->pm_pcid;
463 pcid_cr3 = pmap->pm_cr3;
465 smp_tlb_addr2 = addr2;
467 atomic_store_rel_int(&smp_tlb_wait, 0);
468 if (CPU_ISFULLSET(&mask)) {
470 ipi_all_but_self(vector);
473 while ((cpu = CPU_FFS(&mask)) != 0) {
476 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__,
478 ipi_send_cpu(cpu, vector);
482 while (smp_tlb_wait < ncpu)
484 mtx_unlock_spin(&smp_ipi_mtx);
488 smp_invlpg(pmap_t pmap, vm_offset_t addr)
492 smp_tlb_shootdown(IPI_INVLPG, pmap, addr, 0);
493 #ifdef COUNT_XINVLTLB_HITS
500 smp_invlpg_range(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2)
504 smp_tlb_shootdown(IPI_INVLRNG, pmap, addr1, addr2);
505 #ifdef COUNT_XINVLTLB_HITS
507 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
513 smp_masked_invltlb(cpuset_t mask, pmap_t pmap)
517 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0);
518 #ifdef COUNT_XINVLTLB_HITS
525 smp_masked_invlpg(cpuset_t mask, pmap_t pmap, vm_offset_t addr)
529 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, pmap, addr, 0);
530 #ifdef COUNT_XINVLTLB_HITS
537 smp_masked_invlpg_range(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
542 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, pmap, addr1,
544 #ifdef COUNT_XINVLTLB_HITS
546 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
552 smp_cache_flush(void)
556 smp_tlb_shootdown(IPI_INVLCACHE, NULL, 0, 0);
560 smp_invltlb(pmap_t pmap)
564 smp_tlb_shootdown(IPI_INVLTLB, pmap, 0, 0);
565 #ifdef COUNT_XINVLTLB_HITS
572 * Handlers for TLB related IPIs
575 invltlb_handler(void)
577 #ifdef COUNT_XINVLTLB_HITS
578 xhits_gbl[PCPU_GET(cpuid)]++;
579 #endif /* COUNT_XINVLTLB_HITS */
581 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
582 #endif /* COUNT_IPIS */
585 atomic_add_int(&smp_tlb_wait, 1);
589 invltlb_pcid_handler(void)
593 #ifdef COUNT_XINVLTLB_HITS
594 xhits_gbl[PCPU_GET(cpuid)]++;
595 #endif /* COUNT_XINVLTLB_HITS */
597 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
598 #endif /* COUNT_IPIS */
600 if (smp_tlb_invpcid.pcid != (uint64_t)-1 &&
601 smp_tlb_invpcid.pcid != 0) {
603 invpcid(&smp_tlb_invpcid, INVPCID_CTX);
605 /* Otherwise reload %cr3 twice. */
607 if (cr3 != pcid_cr3) {
609 cr3 |= CR3_PCID_SAVE;
616 if (smp_tlb_pmap != NULL) {
617 cpuid = PCPU_GET(cpuid);
618 if (!CPU_ISSET(cpuid, &smp_tlb_pmap->pm_active))
619 CPU_CLR_ATOMIC(cpuid, &smp_tlb_pmap->pm_save);
622 atomic_add_int(&smp_tlb_wait, 1);
628 #ifdef COUNT_XINVLTLB_HITS
629 xhits_pg[PCPU_GET(cpuid)]++;
630 #endif /* COUNT_XINVLTLB_HITS */
632 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
633 #endif /* COUNT_IPIS */
635 invlpg(smp_tlb_invpcid.addr);
636 atomic_add_int(&smp_tlb_wait, 1);
640 invlpg_pcid_handler(void)
643 #ifdef COUNT_XINVLTLB_HITS
644 xhits_pg[PCPU_GET(cpuid)]++;
645 #endif /* COUNT_XINVLTLB_HITS */
647 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
648 #endif /* COUNT_IPIS */
650 if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
652 } else if (smp_tlb_invpcid.pcid == 0) {
653 invlpg(smp_tlb_invpcid.addr);
654 } else if (invpcid_works) {
655 invpcid(&smp_tlb_invpcid, INVPCID_ADDR);
658 * PCID supported, but INVPCID is not.
659 * Temporarily switch to the target address
660 * space and do INVLPG.
664 load_cr3(pcid_cr3 | CR3_PCID_SAVE);
665 invlpg(smp_tlb_invpcid.addr);
666 load_cr3(cr3 | CR3_PCID_SAVE);
669 atomic_add_int(&smp_tlb_wait, 1);
673 invlpg_range(vm_offset_t start, vm_offset_t end)
679 } while (start < end);
683 invlrng_handler(void)
685 struct invpcid_descr d;
689 #ifdef COUNT_XINVLTLB_HITS
690 xhits_rng[PCPU_GET(cpuid)]++;
691 #endif /* COUNT_XINVLTLB_HITS */
693 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
694 #endif /* COUNT_IPIS */
696 addr = smp_tlb_invpcid.addr;
697 if (pmap_pcid_enabled) {
698 if (smp_tlb_invpcid.pcid == 0) {
700 * kernel pmap - use invlpg to invalidate
703 invlpg_range(addr, smp_tlb_addr2);
704 } else if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
706 if (smp_tlb_pmap != NULL) {
707 cpuid = PCPU_GET(cpuid);
708 if (!CPU_ISSET(cpuid, &smp_tlb_pmap->pm_active))
709 CPU_CLR_ATOMIC(cpuid,
710 &smp_tlb_pmap->pm_save);
712 } else if (invpcid_works) {
715 invpcid(&d, INVPCID_ADDR);
717 } while (d.addr <= smp_tlb_addr2);
721 load_cr3(pcid_cr3 | CR3_PCID_SAVE);
722 invlpg_range(addr, smp_tlb_addr2);
723 load_cr3(cr3 | CR3_PCID_SAVE);
726 invlpg_range(addr, smp_tlb_addr2);
729 atomic_add_int(&smp_tlb_wait, 1);