2 * Copyright (c) 1996, by Steve Passe
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
31 #include "opt_kstack_pages.h"
33 #include "opt_sched.h"
38 #error How did you get here?
42 #error The apic device is required for SMP, add "device apic" to your config file.
44 #if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT)
45 #error SMP not supported with CPU_DISABLE_CMPXCHG
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/cons.h> /* cngetc() */
53 #include <sys/cpuset.h>
57 #include <sys/kernel.h>
60 #include <sys/malloc.h>
61 #include <sys/memrange.h>
62 #include <sys/mutex.h>
65 #include <sys/sched.h>
67 #include <sys/sysctl.h>
70 #include <vm/vm_param.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_extern.h>
75 #include <x86/apicreg.h>
76 #include <machine/clock.h>
77 #include <machine/cputypes.h>
79 #include <machine/md_var.h>
80 #include <machine/pcb.h>
81 #include <machine/psl.h>
82 #include <machine/smp.h>
83 #include <machine/specialreg.h>
85 #define WARMBOOT_TARGET 0
86 #define WARMBOOT_OFF (KERNBASE + 0x0467)
87 #define WARMBOOT_SEG (KERNBASE + 0x0469)
89 #define CMOS_REG (0x70)
90 #define CMOS_DATA (0x71)
91 #define BIOS_RESET (0x0f)
92 #define BIOS_WARM (0x0a)
95 * this code MUST be enabled here and in mpboot.s.
96 * it follows the very early stages of AP boot by placing values in CMOS ram.
97 * it NORMALLY will never be needed and thus the primitive method for enabling.
102 #if defined(CHECK_POINTS) && !defined(PC98)
103 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA))
104 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
106 #define CHECK_INIT(D); \
107 CHECK_WRITE(0x34, (D)); \
108 CHECK_WRITE(0x35, (D)); \
109 CHECK_WRITE(0x36, (D)); \
110 CHECK_WRITE(0x37, (D)); \
111 CHECK_WRITE(0x38, (D)); \
112 CHECK_WRITE(0x39, (D));
114 #define CHECK_PRINT(S); \
115 printf("%s: %d, %d, %d, %d, %d, %d\n", \
124 #else /* CHECK_POINTS */
126 #define CHECK_INIT(D)
127 #define CHECK_PRINT(S)
128 #define CHECK_WRITE(A, D)
130 #endif /* CHECK_POINTS */
132 /* lock region used by kernel profiling */
135 int mp_naps; /* # of Applications processors */
136 int boot_cpu_id = -1; /* designated BSP */
138 extern struct pcpu __pcpu[];
140 /* AP uses this during bootstrap. Do not staticize. */
144 /* Free these after use */
145 void *bootstacks[MAXCPU];
148 struct pcb stoppcbs[MAXCPU];
149 struct pcb **susppcbs = NULL;
151 /* Variables needed for SMP tlb shootdown. */
152 vm_offset_t smp_tlb_addr1;
153 vm_offset_t smp_tlb_addr2;
154 volatile int smp_tlb_wait;
157 /* Interrupt counts. */
158 static u_long *ipi_preempt_counts[MAXCPU];
159 static u_long *ipi_ast_counts[MAXCPU];
160 u_long *ipi_invltlb_counts[MAXCPU];
161 u_long *ipi_invlrng_counts[MAXCPU];
162 u_long *ipi_invlpg_counts[MAXCPU];
163 u_long *ipi_invlcache_counts[MAXCPU];
164 u_long *ipi_rendezvous_counts[MAXCPU];
165 u_long *ipi_lazypmap_counts[MAXCPU];
166 static u_long *ipi_hardclock_counts[MAXCPU];
170 * Local data and functions.
173 static volatile cpuset_t ipi_nmi_pending;
175 /* used to hold the AP's until we are ready to release them */
176 static struct mtx ap_boot_mtx;
178 /* Set to 1 once we're ready to let the APs out of the pen. */
179 static volatile int aps_ready = 0;
182 * Store data from cpu_add() until later in the boot when we actually setup
189 int cpu_hyperthread:1;
190 } static cpu_info[MAX_APIC_ID + 1];
191 int cpu_apic_ids[MAXCPU];
192 int apic_cpuids[MAX_APIC_ID + 1];
194 /* Holds pending bitmap based IPIs per CPU */
195 static volatile u_int cpu_ipi_pending[MAXCPU];
197 static u_int boot_address;
198 static int cpu_logical; /* logical cpus per core */
199 static int cpu_cores; /* cores per package */
201 static void assign_cpu_ids(void);
202 static void install_ap_tramp(void);
203 static void set_interrupt_apic_ids(void);
204 static int start_all_aps(void);
205 static int start_ap(int apic_id);
206 static void release_aps(void *dummy);
208 static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */
209 static int hyperthreading_allowed = 1;
212 mem_range_AP_init(void)
214 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
215 mem_range_softc.mr_op->initAP(&mem_range_softc);
224 /* AMD processors do not support HTT. */
227 if ((amd_feature2 & AMDID2_CMP) == 0) {
232 core_id_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >>
233 AMDID_COREID_SIZE_SHIFT;
234 if (core_id_bits == 0) {
235 cpu_cores = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
239 /* Fam 10h and newer should get here. */
240 for (id = 0; id <= MAX_APIC_ID; id++) {
241 /* Check logical CPU availability. */
242 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
244 /* Check if logical CPU has the same package ID. */
245 if ((id >> core_id_bits) != (boot_cpu_id >> core_id_bits))
252 * Round up to the next power of two, if necessary, and then
254 * Returns -1 if argument is zero.
260 return (fls(x << (1 - powerof2(x))) - 1);
273 /* Both zero and one here mean one logical processor per package. */
274 max_logical = (cpu_feature & CPUID_HTT) != 0 ?
275 (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1;
276 if (max_logical <= 1)
280 * Because of uniformity assumption we examine only
281 * those logical processors that belong to the same
282 * package as BSP. Further, we count number of
283 * logical processors that belong to the same core
284 * as BSP thus deducing number of threads per core.
286 if (cpu_high >= 0x4) {
287 cpuid_count(0x04, 0, p);
288 max_cores = ((p[0] >> 26) & 0x3f) + 1;
291 core_id_bits = mask_width(max_logical/max_cores);
292 if (core_id_bits < 0)
294 pkg_id_bits = core_id_bits + mask_width(max_cores);
296 for (id = 0; id <= MAX_APIC_ID; id++) {
297 /* Check logical CPU availability. */
298 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
300 /* Check if logical CPU has the same package ID. */
301 if ((id >> pkg_id_bits) != (boot_cpu_id >> pkg_id_bits))
304 /* Check if logical CPU has the same package and core IDs. */
305 if ((id >> core_id_bits) == (boot_cpu_id >> core_id_bits))
309 KASSERT(cpu_cores >= 1 && cpu_logical >= 1,
310 ("topo_probe_0x4 couldn't find BSP"));
312 cpu_cores /= cpu_logical;
313 hyperthreading_cpus = cpu_logical;
327 /* We only support three levels for now. */
328 for (i = 0; i < 3; i++) {
329 cpuid_count(0x0b, i, p);
331 /* Fall back if CPU leaf 11 doesn't really exist. */
332 if (i == 0 && p[1] == 0) {
338 logical = p[1] &= 0xffff;
339 type = (p[2] >> 8) & 0xff;
340 if (type == 0 || logical == 0)
343 * Because of uniformity assumption we examine only
344 * those logical processors that belong to the same
347 for (cnt = 0, x = 0; x <= MAX_APIC_ID; x++) {
348 if (!cpu_info[x].cpu_present ||
349 cpu_info[x].cpu_disabled)
351 if (x >> bits == boot_cpu_id >> bits)
354 if (type == CPUID_TYPE_SMT)
356 else if (type == CPUID_TYPE_CORE)
359 if (cpu_logical == 0)
361 cpu_cores /= cpu_logical;
365 * Both topology discovery code and code that consumes topology
366 * information assume top-down uniformity of the topology.
367 * That is, all physical packages must be identical and each
368 * core in a package must have the same number of threads.
369 * Topology information is queried only on BSP, on which this
370 * code runs and for which it can query CPUID information.
371 * Then topology is extrapolated on all packages using the
372 * uniformity assumption.
377 static int cpu_topo_probed = 0;
382 CPU_ZERO(&logical_cpus_mask);
384 cpu_cores = cpu_logical = 1;
385 else if (cpu_vendor_id == CPU_VENDOR_AMD)
387 else if (cpu_vendor_id == CPU_VENDOR_INTEL) {
389 * See Intel(R) 64 Architecture Processor
390 * Topology Enumeration article for details.
392 * Note that 0x1 <= cpu_high < 4 case should be
393 * compatible with topo_probe_0x4() logic when
394 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
395 * or it should trigger the fallback otherwise.
399 else if (cpu_high >= 0x1)
404 * Fallback: assume each logical CPU is in separate
405 * physical package. That is, no multi-core, no SMT.
407 if (cpu_cores == 0 || cpu_logical == 0)
408 cpu_cores = cpu_logical = 1;
418 * Determine whether any threading flags are
422 if (cpu_logical > 1 && hyperthreading_cpus)
423 cg_flags = CG_FLAG_HTT;
424 else if (cpu_logical > 1)
425 cg_flags = CG_FLAG_SMT;
428 if (mp_ncpus % (cpu_cores * cpu_logical) != 0) {
429 printf("WARNING: Non-uniform processors.\n");
430 printf("WARNING: Using suboptimal topology.\n");
431 return (smp_topo_none());
434 * No multi-core or hyper-threaded.
436 if (cpu_logical * cpu_cores == 1)
437 return (smp_topo_none());
439 * Only HTT no multi-core.
441 if (cpu_logical > 1 && cpu_cores == 1)
442 return (smp_topo_1level(CG_SHARE_L1, cpu_logical, cg_flags));
444 * Only multi-core no HTT.
446 if (cpu_cores > 1 && cpu_logical == 1)
447 return (smp_topo_1level(CG_SHARE_L2, cpu_cores, cg_flags));
449 * Both HTT and multi-core.
451 return (smp_topo_2level(CG_SHARE_L2, cpu_cores,
452 CG_SHARE_L1, cpu_logical, cg_flags));
457 * Calculate usable address in base memory for AP trampoline code.
460 mp_bootaddress(u_int basemem)
463 boot_address = trunc_page(basemem); /* round down to 4k boundary */
464 if ((basemem - boot_address) < bootMP_size)
465 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
471 cpu_add(u_int apic_id, char boot_cpu)
474 if (apic_id > MAX_APIC_ID) {
475 panic("SMP: APIC ID %d too high", apic_id);
478 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
480 cpu_info[apic_id].cpu_present = 1;
482 KASSERT(boot_cpu_id == -1,
483 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
485 boot_cpu_id = apic_id;
486 cpu_info[apic_id].cpu_bsp = 1;
488 if (mp_ncpus < MAXCPU) {
490 mp_maxid = mp_ncpus - 1;
493 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
498 cpu_mp_setmaxid(void)
502 * mp_maxid should be already set by calls to cpu_add().
503 * Just sanity check its value here.
506 KASSERT(mp_maxid == 0,
507 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
508 else if (mp_ncpus == 1)
511 KASSERT(mp_maxid >= mp_ncpus - 1,
512 ("%s: counters out of sync: max %d, count %d", __func__,
513 mp_maxid, mp_ncpus));
521 * Always record BSP in CPU map so that the mbuf init code works
524 CPU_SETOF(0, &all_cpus);
527 * No CPUs were found, so this must be a UP system. Setup
528 * the variables to represent a system with a single CPU
535 /* At least one CPU was found. */
538 * One CPU was found, so this must be a UP system with
545 /* At least two CPUs were found. */
550 * Initialize the IPI handlers and start up the AP's.
557 /* Initialize the logical ID to APIC ID table. */
558 for (i = 0; i < MAXCPU; i++) {
559 cpu_apic_ids[i] = -1;
560 cpu_ipi_pending[i] = 0;
563 /* Install an inter-CPU IPI for TLB invalidation */
564 setidt(IPI_INVLTLB, IDTVEC(invltlb),
565 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
566 setidt(IPI_INVLPG, IDTVEC(invlpg),
567 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
568 setidt(IPI_INVLRNG, IDTVEC(invlrng),
569 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
571 /* Install an inter-CPU IPI for cache invalidation. */
572 setidt(IPI_INVLCACHE, IDTVEC(invlcache),
573 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
575 /* Install an inter-CPU IPI for lazy pmap release */
576 setidt(IPI_LAZYPMAP, IDTVEC(lazypmap),
577 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
579 /* Install an inter-CPU IPI for all-CPU rendezvous */
580 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous),
581 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
583 /* Install generic inter-CPU IPI handler */
584 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
585 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
587 /* Install an inter-CPU IPI for CPU stop/restart */
588 setidt(IPI_STOP, IDTVEC(cpustop),
589 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
591 /* Install an inter-CPU IPI for CPU suspend/resume */
592 setidt(IPI_SUSPEND, IDTVEC(cpususpend),
593 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
595 /* Set boot_cpu_id if needed. */
596 if (boot_cpu_id == -1) {
597 boot_cpu_id = PCPU_GET(apic_id);
598 cpu_info[boot_cpu_id].cpu_bsp = 1;
600 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
601 ("BSP's APIC ID doesn't match boot_cpu_id"));
603 /* Probe logical/physical core configuration. */
608 /* Start each Application Processor */
611 set_interrupt_apic_ids();
616 * Print various information about the SMP system hardware and setup.
619 cpu_mp_announce(void)
621 const char *hyperthread;
624 printf("FreeBSD/SMP: %d package(s) x %d core(s)",
625 mp_ncpus / (cpu_cores * cpu_logical), cpu_cores);
626 if (hyperthreading_cpus > 1)
627 printf(" x %d HTT threads", cpu_logical);
628 else if (cpu_logical > 1)
629 printf(" x %d SMT threads", cpu_logical);
632 /* List active CPUs first. */
633 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
634 for (i = 1; i < mp_ncpus; i++) {
635 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
639 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
643 /* List disabled CPUs last. */
644 for (i = 0; i <= MAX_APIC_ID; i++) {
645 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
647 if (cpu_info[i].cpu_hyperthread)
651 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
657 * AP CPU's call this to initialize themselves.
668 /* bootAP is set in start_ap() to our ID. */
671 /* Get per-cpu data */
674 /* prime data page for it to use */
675 pcpu_init(pc, myid, sizeof(struct pcpu));
676 dpcpu_init(dpcpu, myid);
677 pc->pc_apic_id = cpu_apic_ids[myid];
678 pc->pc_prvspace = pc;
679 pc->pc_curthread = 0;
681 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
682 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
684 for (x = 0; x < NGDT; x++) {
685 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
688 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
689 r_gdt.rd_base = (int) &gdt[myid * NGDT];
690 lgdt(&r_gdt); /* does magic intra-segment return */
695 PCPU_SET(currentldt, _default_ldt);
697 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
698 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
699 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
700 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
701 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
702 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
703 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
706 PCPU_SET(fsgs_gdt, &gdt[myid * NGDT + GUFS_SEL].sd);
709 * Set to a known state:
710 * Set by mpboot.s: CR0_PG, CR0_PE
711 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
714 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
716 CHECK_WRITE(0x38, 5);
718 /* Disable local APIC just to be sure. */
721 /* signal our startup to the BSP. */
723 CHECK_WRITE(0x39, 6);
725 /* Spin until the BSP releases the AP's. */
729 /* BSP may have changed PTD while we were waiting */
731 for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
734 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
738 /* Initialize the PAT MSR if present. */
741 /* set up CPU registers and state */
744 /* set up FPU state on the AP */
747 /* set up SSE registers */
751 /* Enable the PTE no-execute bit. */
752 if ((amd_feature & AMDID_NX) != 0) {
755 msr = rdmsr(MSR_EFER) | EFER_NXE;
756 wrmsr(MSR_EFER, msr);
760 /* A quick check from sanity claus */
761 cpuid = PCPU_GET(cpuid);
762 if (PCPU_GET(apic_id) != lapic_id()) {
763 printf("SMP: cpuid = %d\n", cpuid);
764 printf("SMP: actual apic_id = %d\n", lapic_id());
765 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
766 panic("cpuid mismatch! boom!!");
769 /* Initialize curthread. */
770 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
771 PCPU_SET(curthread, PCPU_GET(idlethread));
775 mtx_lock_spin(&ap_boot_mtx);
777 /* Init local apic for irq's */
780 /* Set memory range attributes for this CPU to match the BSP */
785 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
786 printf("SMP: AP CPU #%d Launched!\n", cpuid);
788 /* Determine if we are a logical CPU. */
789 /* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
790 if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
791 CPU_SET(cpuid, &logical_cpus_mask);
796 if (smp_cpus == mp_ncpus) {
797 /* enable IPI's, tlb shootdown, freezes etc */
798 atomic_store_rel_int(&smp_started, 1);
799 smp_active = 1; /* historic */
802 mtx_unlock_spin(&ap_boot_mtx);
804 /* Wait until all the AP's are up. */
805 while (smp_started == 0)
808 /* Start per-CPU event timers. */
811 /* Enter the scheduler. */
814 panic("scheduler returned us to %s", __func__);
818 /*******************************************************************
819 * local functions and data
823 * We tell the I/O APIC code about all the CPUs we want to receive
824 * interrupts. If we don't want certain CPUs to receive IRQs we
825 * can simply not tell the I/O APIC code about them in this function.
826 * We also do not tell it about the BSP since it tells itself about
827 * the BSP internally to work with UP kernels and on UP machines.
830 set_interrupt_apic_ids(void)
834 for (i = 0; i < MAXCPU; i++) {
835 apic_id = cpu_apic_ids[i];
838 if (cpu_info[apic_id].cpu_bsp)
840 if (cpu_info[apic_id].cpu_disabled)
843 /* Don't let hyperthreads service interrupts. */
844 if (hyperthreading_cpus > 1 &&
845 apic_id % hyperthreading_cpus != 0)
853 * Assign logical CPU IDs to local APICs.
860 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
861 &hyperthreading_allowed);
863 /* Check for explicitly disabled CPUs. */
864 for (i = 0; i <= MAX_APIC_ID; i++) {
865 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
868 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
869 cpu_info[i].cpu_hyperthread = 1;
872 * Don't use HT CPU if it has been disabled by a
875 if (hyperthreading_allowed == 0) {
876 cpu_info[i].cpu_disabled = 1;
881 /* Don't use this CPU if it has been disabled by a tunable. */
882 if (resource_disabled("lapic", i)) {
883 cpu_info[i].cpu_disabled = 1;
888 if (hyperthreading_allowed == 0 && hyperthreading_cpus > 1) {
889 hyperthreading_cpus = 0;
894 * Assign CPU IDs to local APIC IDs and disable any CPUs
895 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
897 * To minimize confusion for userland, we attempt to number
898 * CPUs such that all threads and cores in a package are
899 * grouped together. For now we assume that the BSP is always
900 * the first thread in a package and just start adding APs
901 * starting with the BSP's APIC ID.
904 cpu_apic_ids[0] = boot_cpu_id;
905 apic_cpuids[boot_cpu_id] = 0;
906 for (i = boot_cpu_id + 1; i != boot_cpu_id;
907 i == MAX_APIC_ID ? i = 0 : i++) {
908 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
909 cpu_info[i].cpu_disabled)
912 if (mp_ncpus < MAXCPU) {
913 cpu_apic_ids[mp_ncpus] = i;
914 apic_cpuids[i] = mp_ncpus;
917 cpu_info[i].cpu_disabled = 1;
919 KASSERT(mp_maxid >= mp_ncpus - 1,
920 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
925 * start each AP in our list
927 /* Lowest 1MB is already mapped: don't touch*/
928 #define TMPMAP_START 1
935 u_int32_t mpbioswarmvec;
938 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
940 /* install the AP 1st level boot code */
943 /* save the current value of the warm-start vector */
944 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
946 outb(CMOS_REG, BIOS_RESET);
947 mpbiosreason = inb(CMOS_DATA);
950 /* set up temporary P==V mapping for AP boot */
951 /* XXX this is a hack, we should boot the AP on its own stack/PTD */
952 for (i = TMPMAP_START; i < NKPT; i++)
953 PTD[i] = PTD[KPTDI + i];
957 for (cpu = 1; cpu < mp_ncpus; cpu++) {
958 apic_id = cpu_apic_ids[cpu];
960 /* allocate and set up a boot stack data page */
962 (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
963 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
964 /* setup a vector to our boot code */
965 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
966 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
968 outb(CMOS_REG, BIOS_RESET);
969 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
972 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 4;
975 /* attempt to start the Application Processor */
976 CHECK_INIT(99); /* setup checkpoints */
977 if (!start_ap(apic_id)) {
978 printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id);
979 CHECK_PRINT("trace"); /* show checkpoints */
980 /* better panic as the AP may be running loose */
981 printf("panic y/n? [y] ");
985 CHECK_PRINT("trace"); /* show checkpoints */
987 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
990 /* restore the warmstart vector */
991 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
994 outb(CMOS_REG, BIOS_RESET);
995 outb(CMOS_DATA, mpbiosreason);
998 /* Undo V==P hack from above */
999 for (i = TMPMAP_START; i < NKPT; i++)
1001 pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
1003 /* number of APs actually started */
1008 * load the 1st level AP boot code into base memory.
1011 /* targets for relocation */
1012 extern void bigJump(void);
1013 extern void bootCodeSeg(void);
1014 extern void bootDataSeg(void);
1015 extern void MPentry(void);
1016 extern u_int MP_GDT;
1017 extern u_int mp_gdtbase;
1020 install_ap_tramp(void)
1023 int size = *(int *) ((u_long) & bootMP_size);
1024 vm_offset_t va = boot_address + KERNBASE;
1025 u_char *src = (u_char *) ((u_long) bootMP);
1026 u_char *dst = (u_char *) va;
1027 u_int boot_base = (u_int) bootMP;
1032 KASSERT (size <= PAGE_SIZE,
1033 ("'size' do not fit into PAGE_SIZE, as expected."));
1034 pmap_kenter(va, boot_address);
1035 pmap_invalidate_page (kernel_pmap, va);
1036 for (x = 0; x < size; ++x)
1040 * modify addresses in code we just moved to basemem. unfortunately we
1041 * need fairly detailed info about mpboot.s for this to work. changes
1042 * to mpboot.s might require changes here.
1045 /* boot code is located in KERNEL space */
1046 dst = (u_char *) va;
1048 /* modify the lgdt arg */
1049 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
1050 *dst32 = boot_address + ((u_int) & MP_GDT - boot_base);
1052 /* modify the ljmp target for MPentry() */
1053 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
1054 *dst32 = ((u_int) MPentry - KERNBASE);
1056 /* modify the target for boot code segment */
1057 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
1058 dst8 = (u_int8_t *) (dst16 + 1);
1059 *dst16 = (u_int) boot_address & 0xffff;
1060 *dst8 = ((u_int) boot_address >> 16) & 0xff;
1062 /* modify the target for boot data segment */
1063 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
1064 dst8 = (u_int8_t *) (dst16 + 1);
1065 *dst16 = (u_int) boot_address & 0xffff;
1066 *dst8 = ((u_int) boot_address >> 16) & 0xff;
1070 * This function starts the AP (application processor) identified
1071 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
1072 * to accomplish this. This is necessary because of the nuances
1073 * of the different hardware we might encounter. It isn't pretty,
1074 * but it seems to work.
1077 start_ap(int apic_id)
1082 /* calculate the vector */
1083 vector = (boot_address >> 12) & 0xff;
1085 /* used as a watchpoint to signal AP startup */
1088 ipi_startup(apic_id, vector);
1090 /* Wait up to 5 seconds for it to start. */
1091 for (ms = 0; ms < 5000; ms++) {
1093 return 1; /* return SUCCESS */
1096 return 0; /* return FAILURE */
1099 #ifdef COUNT_XINVLTLB_HITS
1100 u_int xhits_gbl[MAXCPU];
1101 u_int xhits_pg[MAXCPU];
1102 u_int xhits_rng[MAXCPU];
1103 static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
1104 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
1105 sizeof(xhits_gbl), "IU", "");
1106 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
1107 sizeof(xhits_pg), "IU", "");
1108 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
1109 sizeof(xhits_rng), "IU", "");
1114 u_int ipi_range_size;
1115 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
1116 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
1117 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
1118 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
1121 u_int ipi_masked_global;
1122 u_int ipi_masked_page;
1123 u_int ipi_masked_range;
1124 u_int ipi_masked_range_size;
1125 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
1126 &ipi_masked_global, 0, "");
1127 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
1128 &ipi_masked_page, 0, "");
1129 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
1130 &ipi_masked_range, 0, "");
1131 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
1132 &ipi_masked_range_size, 0, "");
1133 #endif /* COUNT_XINVLTLB_HITS */
1136 * Init and startup IPI.
1139 ipi_startup(int apic_id, int vector)
1143 * first we do an INIT IPI: this INIT IPI might be run, resetting
1144 * and running the target CPU. OR this INIT IPI might be latched (P5
1145 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1148 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1149 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
1151 DELAY(10000); /* wait ~10mS */
1154 * next we do a STARTUP IPI: the previous INIT IPI might still be
1155 * latched, (P5 bug) this 1st STARTUP would then terminate
1156 * immediately, and the previously started INIT IPI would continue. OR
1157 * the previous INIT IPI has already run. and this STARTUP IPI will
1158 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1161 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1162 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1165 DELAY(200); /* wait ~200uS */
1168 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
1169 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1170 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1171 * recognized after hardware RESET or INIT IPI.
1173 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1174 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1177 DELAY(200); /* wait ~200uS */
1181 * Send an IPI to specified CPU handling the bitmap logic.
1184 ipi_send_cpu(int cpu, u_int ipi)
1186 u_int bitmap, old_pending, new_pending;
1188 KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
1190 if (IPI_IS_BITMAPED(ipi)) {
1192 ipi = IPI_BITMAP_VECTOR;
1194 old_pending = cpu_ipi_pending[cpu];
1195 new_pending = old_pending | bitmap;
1196 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
1197 old_pending, new_pending));
1201 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1205 * Flush the TLB on all other CPU's
1208 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1212 ncpu = mp_ncpus - 1; /* does not shootdown self */
1214 return; /* no other cpus */
1215 if (!(read_eflags() & PSL_I))
1216 panic("%s: interrupts disabled", __func__);
1217 mtx_lock_spin(&smp_ipi_mtx);
1218 smp_tlb_addr1 = addr1;
1219 smp_tlb_addr2 = addr2;
1220 atomic_store_rel_int(&smp_tlb_wait, 0);
1221 ipi_all_but_self(vector);
1222 while (smp_tlb_wait < ncpu)
1224 mtx_unlock_spin(&smp_ipi_mtx);
1228 smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1230 int cpu, ncpu, othercpus;
1232 othercpus = mp_ncpus - 1;
1233 if (CPU_ISFULLSET(&mask)) {
1237 CPU_CLR(PCPU_GET(cpuid), &mask);
1238 if (CPU_EMPTY(&mask))
1241 if (!(read_eflags() & PSL_I))
1242 panic("%s: interrupts disabled", __func__);
1243 mtx_lock_spin(&smp_ipi_mtx);
1244 smp_tlb_addr1 = addr1;
1245 smp_tlb_addr2 = addr2;
1246 atomic_store_rel_int(&smp_tlb_wait, 0);
1247 if (CPU_ISFULLSET(&mask)) {
1249 ipi_all_but_self(vector);
1252 while ((cpu = CPU_FFS(&mask)) != 0) {
1254 CPU_CLR(cpu, &mask);
1255 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu,
1257 ipi_send_cpu(cpu, vector);
1261 while (smp_tlb_wait < ncpu)
1263 mtx_unlock_spin(&smp_ipi_mtx);
1267 smp_cache_flush(void)
1271 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
1279 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
1280 #ifdef COUNT_XINVLTLB_HITS
1287 smp_invlpg(vm_offset_t addr)
1291 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
1292 #ifdef COUNT_XINVLTLB_HITS
1299 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
1303 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
1304 #ifdef COUNT_XINVLTLB_HITS
1306 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1312 smp_masked_invltlb(cpuset_t mask)
1316 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
1317 #ifdef COUNT_XINVLTLB_HITS
1318 ipi_masked_global++;
1324 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
1328 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
1329 #ifdef COUNT_XINVLTLB_HITS
1336 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
1340 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1341 #ifdef COUNT_XINVLTLB_HITS
1343 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE;
1349 ipi_bitmap_handler(struct trapframe frame)
1351 struct trapframe *oldframe;
1353 int cpu = PCPU_GET(cpuid);
1358 td->td_intr_nesting_level++;
1359 oldframe = td->td_intr_frame;
1360 td->td_intr_frame = &frame;
1361 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1362 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1364 (*ipi_preempt_counts[cpu])++;
1368 if (ipi_bitmap & (1 << IPI_AST)) {
1370 (*ipi_ast_counts[cpu])++;
1372 /* Nothing to do for AST */
1374 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
1376 (*ipi_hardclock_counts[cpu])++;
1380 td->td_intr_frame = oldframe;
1381 td->td_intr_nesting_level--;
1386 * send an IPI to a set of cpus.
1389 ipi_selected(cpuset_t cpus, u_int ipi)
1394 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1395 * of help in order to understand what is the source.
1396 * Set the mask of receiving CPUs for this purpose.
1398 if (ipi == IPI_STOP_HARD)
1399 CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
1401 while ((cpu = CPU_FFS(&cpus)) != 0) {
1403 CPU_CLR(cpu, &cpus);
1404 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1405 ipi_send_cpu(cpu, ipi);
1410 * send an IPI to a specific CPU.
1413 ipi_cpu(int cpu, u_int ipi)
1417 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1418 * of help in order to understand what is the source.
1419 * Set the mask of receiving CPUs for this purpose.
1421 if (ipi == IPI_STOP_HARD)
1422 CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
1424 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1425 ipi_send_cpu(cpu, ipi);
1429 * send an IPI to all CPUs EXCEPT myself
1432 ipi_all_but_self(u_int ipi)
1434 cpuset_t other_cpus;
1436 other_cpus = all_cpus;
1437 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
1438 if (IPI_IS_BITMAPED(ipi)) {
1439 ipi_selected(other_cpus, ipi);
1444 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1445 * of help in order to understand what is the source.
1446 * Set the mask of receiving CPUs for this purpose.
1448 if (ipi == IPI_STOP_HARD)
1449 CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
1451 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1452 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1461 * As long as there is not a simple way to know about a NMI's
1462 * source, if the bitmask for the current CPU is present in
1463 * the global pending bitword an IPI_STOP_HARD has been issued
1464 * and should be handled.
1466 cpuid = PCPU_GET(cpuid);
1467 if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
1470 CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
1476 * Handle an IPI_STOP by saving our current context and spinning until we
1480 cpustop_handler(void)
1484 cpu = PCPU_GET(cpuid);
1486 savectx(&stoppcbs[cpu]);
1488 /* Indicate that we are stopped */
1489 CPU_SET_ATOMIC(cpu, &stopped_cpus);
1491 /* Wait for restart */
1492 while (!CPU_ISSET(cpu, &started_cpus))
1495 CPU_CLR_ATOMIC(cpu, &started_cpus);
1496 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
1498 if (cpu == 0 && cpustop_restartfunc != NULL) {
1499 cpustop_restartfunc();
1500 cpustop_restartfunc = NULL;
1505 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1509 cpususpend_handler(void)
1513 cpu = PCPU_GET(cpuid);
1515 if (savectx(susppcbs[cpu])) {
1517 CPU_SET_ATOMIC(cpu, &suspended_cpus);
1520 PCPU_SET(switchtime, 0);
1521 PCPU_SET(switchticks, ticks);
1523 /* Indicate that we are resumed */
1524 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1527 /* Wait for resume */
1528 while (!CPU_ISSET(cpu, &started_cpus))
1531 /* Resume MCA and local APIC */
1535 CPU_CLR_ATOMIC(cpu, &started_cpus);
1538 * This is called once the rest of the system is up and running and we're
1539 * ready to let the AP's out of the pen.
1542 release_aps(void *dummy __unused)
1547 atomic_store_rel_int(&aps_ready, 1);
1548 while (smp_started == 0)
1551 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1555 * Setup interrupt counters for IPI handlers.
1558 mp_ipi_intrcnt(void *dummy)
1564 snprintf(buf, sizeof(buf), "cpu%d:invltlb", i);
1565 intrcnt_add(buf, &ipi_invltlb_counts[i]);
1566 snprintf(buf, sizeof(buf), "cpu%d:invlrng", i);
1567 intrcnt_add(buf, &ipi_invlrng_counts[i]);
1568 snprintf(buf, sizeof(buf), "cpu%d:invlpg", i);
1569 intrcnt_add(buf, &ipi_invlpg_counts[i]);
1570 snprintf(buf, sizeof(buf), "cpu%d:invlcache", i);
1571 intrcnt_add(buf, &ipi_invlcache_counts[i]);
1572 snprintf(buf, sizeof(buf), "cpu%d:preempt", i);
1573 intrcnt_add(buf, &ipi_preempt_counts[i]);
1574 snprintf(buf, sizeof(buf), "cpu%d:ast", i);
1575 intrcnt_add(buf, &ipi_ast_counts[i]);
1576 snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i);
1577 intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1578 snprintf(buf, sizeof(buf), "cpu%d:lazypmap", i);
1579 intrcnt_add(buf, &ipi_lazypmap_counts[i]);
1580 snprintf(buf, sizeof(buf), "cpu%d:hardclock", i);
1581 intrcnt_add(buf, &ipi_hardclock_counts[i]);
1584 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);