2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
35 #include "opt_kstack_pages.h"
37 #include "opt_sched.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/cons.h> /* cngetc() */
44 #include <sys/cpuset.h>
49 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/memrange.h>
54 #include <sys/mutex.h>
57 #include <sys/sched.h>
59 #include <sys/sysctl.h>
62 #include <vm/vm_param.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_extern.h>
67 #include <x86/apicreg.h>
68 #include <machine/clock.h>
69 #include <machine/cputypes.h>
71 #include <machine/md_var.h>
72 #include <machine/pcb.h>
73 #include <machine/psl.h>
74 #include <machine/smp.h>
75 #include <machine/specialreg.h>
76 #include <machine/cpu.h>
78 #define WARMBOOT_TARGET 0
79 #define WARMBOOT_OFF (KERNBASE + 0x0467)
80 #define WARMBOOT_SEG (KERNBASE + 0x0469)
82 #define CMOS_REG (0x70)
83 #define CMOS_DATA (0x71)
84 #define BIOS_RESET (0x0f)
85 #define BIOS_WARM (0x0a)
87 /* lock region used by kernel profiling */
90 int mp_naps; /* # of Applications processors */
91 int boot_cpu_id = -1; /* designated BSP */
93 extern struct pcpu __pcpu[];
95 /* AP uses this during bootstrap. Do not staticize. */
99 /* Free these after use */
100 void *bootstacks[MAXCPU];
103 struct pcb stoppcbs[MAXCPU];
104 struct susppcb **susppcbs;
107 /* Interrupt counts. */
108 static u_long *ipi_preempt_counts[MAXCPU];
109 static u_long *ipi_ast_counts[MAXCPU];
110 u_long *ipi_invltlb_counts[MAXCPU];
111 u_long *ipi_invlrng_counts[MAXCPU];
112 u_long *ipi_invlpg_counts[MAXCPU];
113 u_long *ipi_invlcache_counts[MAXCPU];
114 u_long *ipi_rendezvous_counts[MAXCPU];
115 static u_long *ipi_hardclock_counts[MAXCPU];
118 /* Default cpu_ops implementation. */
119 struct cpu_ops cpu_ops;
122 * Local data and functions.
125 static volatile cpuset_t ipi_stop_nmi_pending;
127 /* used to hold the AP's until we are ready to release them */
128 struct mtx ap_boot_mtx;
130 /* Set to 1 once we're ready to let the APs out of the pen. */
131 volatile int aps_ready = 0;
134 * Store data from cpu_add() until later in the boot when we actually setup
137 struct cpu_info cpu_info[MAX_APIC_ID + 1];
138 int apic_cpuids[MAX_APIC_ID + 1];
139 int cpu_apic_ids[MAXCPU];
141 /* Holds pending bitmap based IPIs per CPU */
142 volatile u_int cpu_ipi_pending[MAXCPU];
144 static void release_aps(void *dummy);
145 static void cpustop_handler_post(u_int cpu);
147 static int hyperthreading_allowed = 1;
148 SYSCTL_INT(_machdep, OID_AUTO, hyperthreading_allowed, CTLFLAG_RDTUN,
149 &hyperthreading_allowed, 0, "Use Intel HTT logical CPUs");
151 static struct topo_node topo_root;
153 static int pkg_id_shift;
154 static int core_id_shift;
155 static int disabled_cpus;
160 } static caches[MAX_CACHE_LEVELS];
163 mem_range_AP_init(void)
166 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
167 mem_range_softc.mr_op->initAP(&mem_range_softc);
171 * Round up to the next power of two, if necessary, and then
173 * Returns -1 if argument is zero.
179 return (fls(x << (1 - powerof2(x))) - 1);
183 * Add a cache level to the cache topology description.
186 add_deterministic_cache(int type, int level, int share_count)
192 printf("unexpected cache type %d\n", type);
195 if (type == 2) /* ignore instruction cache */
197 if (level == 0 || level > MAX_CACHE_LEVELS) {
198 printf("unexpected cache level %d\n", type);
202 if (caches[level - 1].present) {
203 printf("WARNING: multiple entries for L%u data cache\n", level);
204 printf("%u => %u\n", caches[level - 1].id_shift,
205 mask_width(share_count));
207 caches[level - 1].id_shift = mask_width(share_count);
208 caches[level - 1].present = 1;
210 if (caches[level - 1].id_shift > pkg_id_shift) {
211 printf("WARNING: L%u data cache covers more "
212 "APIC IDs than a package (%u > %u)\n", level,
213 caches[level - 1].id_shift, pkg_id_shift);
214 caches[level - 1].id_shift = pkg_id_shift;
216 if (caches[level - 1].id_shift < core_id_shift) {
217 printf("WARNING: L%u data cache covers fewer "
218 "APIC IDs than a core (%u < %u)\n", level,
219 caches[level - 1].id_shift, core_id_shift);
220 caches[level - 1].id_shift = core_id_shift;
227 * Determine topology of processing units and caches for AMD CPUs.
229 * - AMD CPUID Specification (Publication # 25481)
230 * - BKDG for AMD NPT Family 0Fh Processors (Publication # 32559)
231 * - BKDG For AMD Family 10h Processors (Publication # 31116)
232 * - BKDG For AMD Family 15h Models 00h-0Fh Processors (Publication # 42301)
233 * - BKDG For AMD Family 16h Models 00h-0Fh Processors (Publication # 48751)
241 int nodes_per_socket;
246 /* No multi-core capability. */
247 if ((amd_feature2 & AMDID2_CMP) == 0)
250 /* For families 10h and newer. */
251 pkg_id_shift = (cpu_procinfo2 & AMDID_COREID_SIZE) >>
252 AMDID_COREID_SIZE_SHIFT;
254 /* For 0Fh family. */
255 if (pkg_id_shift == 0)
257 mask_width((cpu_procinfo2 & AMDID_CMP_CORES) + 1);
260 * Families prior to 16h define the following value as
261 * cores per compute unit and we don't really care about the AMD
262 * compute units at the moment. Perhaps we should treat them as
263 * cores and cores within the compute units as hardware threads,
264 * but that's up for debate.
265 * Later families define the value as threads per compute unit,
266 * so we are following AMD's nomenclature here.
268 if ((amd_feature2 & AMDID2_TOPOLOGY) != 0 &&
269 CPUID_TO_FAMILY(cpu_id) >= 0x16) {
270 cpuid_count(0x8000001e, 0, p);
271 share_count = ((p[1] >> 8) & 0xff) + 1;
272 core_id_shift = mask_width(share_count);
275 if ((amd_feature2 & AMDID2_TOPOLOGY) != 0) {
277 cpuid_count(0x8000001d, i, p);
279 level = (p[0] >> 5) & 0x7;
280 share_count = 1 + ((p[0] >> 14) & 0xfff);
282 if (!add_deterministic_cache(type, level, share_count))
286 if (cpu_exthigh >= 0x80000005) {
287 cpuid_count(0x80000005, 0, p);
288 if (((p[2] >> 24) & 0xff) != 0) {
289 caches[0].id_shift = 0;
290 caches[0].present = 1;
293 if (cpu_exthigh >= 0x80000006) {
294 cpuid_count(0x80000006, 0, p);
295 if (((p[2] >> 16) & 0xffff) != 0) {
296 caches[1].id_shift = 0;
297 caches[1].present = 1;
299 if (((p[3] >> 18) & 0x3fff) != 0) {
300 nodes_per_socket = 1;
301 if ((amd_feature2 & AMDID2_NODE_ID) != 0) {
303 * Handle multi-node processors that
304 * have multiple chips, each with its
305 * own L3 cache, on the same die.
307 v = rdmsr(0xc001100c);
308 nodes_per_socket = 1 + ((v >> 3) & 0x7);
311 pkg_id_shift - mask_width(nodes_per_socket);
312 caches[2].present = 1;
319 * Determine topology of processing units for Intel CPUs
320 * using CPUID Leaf 1 and Leaf 4, if supported.
322 * - Intel 64 Architecture Processor Topology Enumeration
323 * - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
324 * Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS
325 * FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS
328 topo_probe_intel_0x4(void)
334 /* Both zero and one here mean one logical processor per package. */
335 max_logical = (cpu_feature & CPUID_HTT) != 0 ?
336 (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1;
337 if (max_logical <= 1)
340 if (cpu_high >= 0x4) {
341 cpuid_count(0x04, 0, p);
342 max_cores = ((p[0] >> 26) & 0x3f) + 1;
346 core_id_shift = mask_width(max_logical/max_cores);
347 KASSERT(core_id_shift >= 0,
348 ("intel topo: max_cores > max_logical\n"));
349 pkg_id_shift = core_id_shift + mask_width(max_cores);
353 * Determine topology of processing units for Intel CPUs
354 * using CPUID Leaf 11, if supported.
356 * - Intel 64 Architecture Processor Topology Enumeration
357 * - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
358 * Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS
359 * FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS
362 topo_probe_intel_0xb(void)
369 /* Fall back if CPU leaf 11 doesn't really exist. */
370 cpuid_count(0x0b, 0, p);
372 topo_probe_intel_0x4();
376 /* We only support three levels for now. */
378 cpuid_count(0x0b, i, p);
381 type = (p[2] >> 8) & 0xff;
386 /* TODO: check for duplicate (re-)assignment */
387 if (type == CPUID_TYPE_SMT)
388 core_id_shift = bits;
389 else if (type == CPUID_TYPE_CORE)
392 printf("unknown CPU level type %d\n", type);
395 if (pkg_id_shift < core_id_shift) {
396 printf("WARNING: core covers more APIC IDs than a package\n");
397 core_id_shift = pkg_id_shift;
402 * Determine topology of caches for Intel CPUs.
404 * - Intel 64 Architecture Processor Topology Enumeration
405 * - Intel 64 and IA-32 Architectures Software Developer’s Manual
406 * Volume 2A: Instruction Set Reference, A-M,
410 topo_probe_intel_caches(void)
418 if (cpu_high < 0x4) {
420 * Available cache level and sizes can be determined
421 * via CPUID leaf 2, but that requires a huge table of hardcoded
422 * values, so for now just assume L1 and L2 caches potentially
423 * shared only by HTT processing units, if HTT is present.
425 caches[0].id_shift = pkg_id_shift;
426 caches[0].present = 1;
427 caches[1].id_shift = pkg_id_shift;
428 caches[1].present = 1;
433 cpuid_count(0x4, i, p);
435 level = (p[0] >> 5) & 0x7;
436 share_count = 1 + ((p[0] >> 14) & 0xfff);
438 if (!add_deterministic_cache(type, level, share_count))
444 * Determine topology of processing units and caches for Intel CPUs.
446 * - Intel 64 Architecture Processor Topology Enumeration
449 topo_probe_intel(void)
453 * Note that 0x1 <= cpu_high < 4 case should be
454 * compatible with topo_probe_intel_0x4() logic when
455 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
456 * or it should trigger the fallback otherwise.
459 topo_probe_intel_0xb();
460 else if (cpu_high >= 0x1)
461 topo_probe_intel_0x4();
463 topo_probe_intel_caches();
467 * Topology information is queried only on BSP, on which this
468 * code runs and for which it can query CPUID information.
469 * Then topology is extrapolated on all packages using an
470 * assumption that APIC ID to hardware component ID mapping is
472 * That doesn't necesserily imply that the topology is uniform.
477 static int cpu_topo_probed = 0;
478 struct x86_topo_layer {
482 } topo_layers[MAX_CACHE_LEVELS + 3];
483 struct topo_node *parent;
484 struct topo_node *node;
493 CPU_ZERO(&logical_cpus_mask);
497 else if (cpu_vendor_id == CPU_VENDOR_AMD)
499 else if (cpu_vendor_id == CPU_VENDOR_INTEL)
502 KASSERT(pkg_id_shift >= core_id_shift,
503 ("bug in APIC topology discovery"));
506 bzero(topo_layers, sizeof(topo_layers));
508 topo_layers[nlayers].type = TOPO_TYPE_PKG;
509 topo_layers[nlayers].id_shift = pkg_id_shift;
511 printf("Package ID shift: %u\n", topo_layers[nlayers].id_shift);
515 * Consider all caches to be within a package/chip
516 * and "in front" of all sub-components like
517 * cores and hardware threads.
519 for (i = MAX_CACHE_LEVELS - 1; i >= 0; --i) {
520 if (caches[i].present) {
521 KASSERT(caches[i].id_shift <= pkg_id_shift,
522 ("bug in APIC topology discovery"));
523 KASSERT(caches[i].id_shift >= core_id_shift,
524 ("bug in APIC topology discovery"));
526 topo_layers[nlayers].type = TOPO_TYPE_CACHE;
527 topo_layers[nlayers].subtype = i + 1;
528 topo_layers[nlayers].id_shift = caches[i].id_shift;
530 printf("L%u cache ID shift: %u\n",
531 topo_layers[nlayers].subtype,
532 topo_layers[nlayers].id_shift);
537 if (pkg_id_shift > core_id_shift) {
538 topo_layers[nlayers].type = TOPO_TYPE_CORE;
539 topo_layers[nlayers].id_shift = core_id_shift;
541 printf("Core ID shift: %u\n",
542 topo_layers[nlayers].id_shift);
546 topo_layers[nlayers].type = TOPO_TYPE_PU;
547 topo_layers[nlayers].id_shift = 0;
550 topo_init_root(&topo_root);
551 for (i = 0; i <= MAX_APIC_ID; ++i) {
552 if (!cpu_info[i].cpu_present)
556 for (layer = 0; layer < nlayers; ++layer) {
557 node_id = i >> topo_layers[layer].id_shift;
558 parent = topo_add_node_by_hwid(parent, node_id,
559 topo_layers[layer].type,
560 topo_layers[layer].subtype);
565 for (layer = 0; layer < nlayers; ++layer) {
566 node_id = boot_cpu_id >> topo_layers[layer].id_shift;
567 node = topo_find_node_by_hwid(parent, node_id,
568 topo_layers[layer].type,
569 topo_layers[layer].subtype);
570 topo_promote_child(node);
578 * Assign logical CPU IDs to local APICs.
583 struct topo_node *node;
586 smt_mask = (1u << core_id_shift) - 1;
589 * Assign CPU IDs to local APIC IDs and disable any CPUs
590 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
593 TOPO_FOREACH(node, &topo_root) {
594 if (node->type != TOPO_TYPE_PU)
597 if ((node->hwid & smt_mask) != (boot_cpu_id & smt_mask))
598 cpu_info[node->hwid].cpu_hyperthread = 1;
600 if (resource_disabled("lapic", node->hwid)) {
601 if (node->hwid != boot_cpu_id)
602 cpu_info[node->hwid].cpu_disabled = 1;
604 printf("Cannot disable BSP, APIC ID = %d\n",
608 if (!hyperthreading_allowed &&
609 cpu_info[node->hwid].cpu_hyperthread)
610 cpu_info[node->hwid].cpu_disabled = 1;
612 if (mp_ncpus >= MAXCPU)
613 cpu_info[node->hwid].cpu_disabled = 1;
615 if (cpu_info[node->hwid].cpu_disabled) {
620 cpu_apic_ids[mp_ncpus] = node->hwid;
621 apic_cpuids[node->hwid] = mp_ncpus;
622 topo_set_pu_id(node, mp_ncpus);
626 KASSERT(mp_maxid >= mp_ncpus - 1,
627 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
632 * Print various information about the SMP system hardware and setup.
635 cpu_mp_announce(void)
637 struct topo_node *node;
638 const char *hyperthread;
643 printf("FreeBSD/SMP: ");
644 if (topo_analyze(&topo_root, 1, &pkg_count,
645 &cores_per_pkg, &thrs_per_core)) {
646 printf("%d package(s)", pkg_count);
647 if (cores_per_pkg > 0)
648 printf(" x %d core(s)", cores_per_pkg);
649 if (thrs_per_core > 1)
650 printf(" x %d hardware threads", thrs_per_core);
652 printf("Non-uniform topology");
657 printf("FreeBSD/SMP Online: ");
658 if (topo_analyze(&topo_root, 0, &pkg_count,
659 &cores_per_pkg, &thrs_per_core)) {
660 printf("%d package(s)", pkg_count);
661 if (cores_per_pkg > 0)
662 printf(" x %d core(s)", cores_per_pkg);
663 if (thrs_per_core > 1)
664 printf(" x %d hardware threads", thrs_per_core);
666 printf("Non-uniform topology");
674 TOPO_FOREACH(node, &topo_root) {
675 switch (node->type) {
677 printf("Package HW ID = %u\n", node->hwid);
680 printf("\tCore HW ID = %u\n", node->hwid);
683 if (cpu_info[node->hwid].cpu_hyperthread)
688 if (node->subtype == 0)
689 printf("\t\tCPU (AP%s): APIC ID: %u"
690 "(disabled)\n", hyperthread, node->hwid);
691 else if (node->id == 0)
692 printf("\t\tCPU0 (BSP): APIC ID: %u\n",
695 printf("\t\tCPU%u (AP%s): APIC ID: %u\n",
696 node->id, hyperthread, node->hwid);
706 * Add a scheduling group, a group of logical processors sharing
707 * a particular cache (and, thus having an affinity), to the scheduling
709 * This function recursively works on lower level caches.
712 x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root)
714 struct topo_node *node;
719 KASSERT(root->type == TOPO_TYPE_SYSTEM || root->type == TOPO_TYPE_CACHE,
720 ("x86topo_add_sched_group: bad type: %u", root->type));
721 CPU_COPY(&root->cpuset, &cg_root->cg_mask);
722 cg_root->cg_count = root->cpu_count;
723 if (root->type == TOPO_TYPE_SYSTEM)
724 cg_root->cg_level = CG_SHARE_NONE;
726 cg_root->cg_level = root->subtype;
729 * Check how many core nodes we have under the given root node.
730 * If we have multiple logical processors, but not multiple
731 * cores, then those processors must be hardware threads.
735 while (node != NULL) {
736 if (node->type != TOPO_TYPE_CORE) {
737 node = topo_next_node(root, node);
742 node = topo_next_nonchild_node(root, node);
745 if (cg_root->cg_level != CG_SHARE_NONE &&
746 root->cpu_count > 1 && ncores < 2)
747 cg_root->cg_flags = CG_FLAG_SMT;
750 * Find out how many cache nodes we have under the given root node.
751 * We ignore cache nodes that cover all the same processors as the
752 * root node. Also, we do not descend below found cache nodes.
753 * That is, we count top-level "non-redundant" caches under the root
758 while (node != NULL) {
759 if (node->type != TOPO_TYPE_CACHE ||
760 (root->type != TOPO_TYPE_SYSTEM &&
761 CPU_CMP(&node->cpuset, &root->cpuset) == 0)) {
762 node = topo_next_node(root, node);
766 node = topo_next_nonchild_node(root, node);
769 cg_root->cg_child = smp_topo_alloc(nchildren);
770 cg_root->cg_children = nchildren;
773 * Now find again the same cache nodes as above and recursively
774 * build scheduling topologies for them.
778 while (node != NULL) {
779 if (node->type != TOPO_TYPE_CACHE ||
780 (root->type != TOPO_TYPE_SYSTEM &&
781 CPU_CMP(&node->cpuset, &root->cpuset) == 0)) {
782 node = topo_next_node(root, node);
785 cg_root->cg_child[i].cg_parent = cg_root;
786 x86topo_add_sched_group(node, &cg_root->cg_child[i]);
788 node = topo_next_nonchild_node(root, node);
793 * Build the MI scheduling topology from the discovered hardware topology.
798 struct cpu_group *cg_root;
801 return (smp_topo_none());
803 cg_root = smp_topo_alloc(1);
804 x86topo_add_sched_group(&topo_root, cg_root);
810 * Add a logical CPU to the topology.
813 cpu_add(u_int apic_id, char boot_cpu)
816 if (apic_id > MAX_APIC_ID) {
817 panic("SMP: APIC ID %d too high", apic_id);
820 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
822 cpu_info[apic_id].cpu_present = 1;
824 KASSERT(boot_cpu_id == -1,
825 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
827 boot_cpu_id = apic_id;
828 cpu_info[apic_id].cpu_bsp = 1;
830 if (mp_ncpus < MAXCPU) {
832 mp_maxid = mp_ncpus - 1;
835 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
840 cpu_mp_setmaxid(void)
844 * mp_ncpus and mp_maxid should be already set by calls to cpu_add().
845 * If there were no calls to cpu_add() assume this is a UP system.
856 * Always record BSP in CPU map so that the mbuf init code works
859 CPU_SETOF(0, &all_cpus);
860 return (mp_ncpus > 1);
864 * AP CPU's call this to initialize themselves.
867 init_secondary_tail(void)
872 * On real hardware, switch to x2apic mode if possible. Do it
873 * after aps_ready was signalled, to avoid manipulating the
874 * mode while BSP might still want to send some IPI to us
875 * (second startup IPI is ignored on modern hardware etc).
879 /* Initialize the PAT MSR. */
882 /* set up CPU registers and state */
888 /* set up FPU state on the AP */
895 if (cpu_ops.cpu_init)
898 /* A quick check from sanity claus */
899 cpuid = PCPU_GET(cpuid);
900 if (PCPU_GET(apic_id) != lapic_id()) {
901 printf("SMP: cpuid = %d\n", cpuid);
902 printf("SMP: actual apic_id = %d\n", lapic_id());
903 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
904 panic("cpuid mismatch! boom!!");
907 /* Initialize curthread. */
908 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
909 PCPU_SET(curthread, PCPU_GET(idlethread));
913 mtx_lock_spin(&ap_boot_mtx);
915 /* Init local apic for irq's */
918 /* Set memory range attributes for this CPU to match the BSP */
923 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
924 printf("SMP: AP CPU #%d Launched!\n", cpuid);
926 /* Determine if we are a logical CPU. */
927 if (cpu_info[PCPU_GET(apic_id)].cpu_hyperthread)
928 CPU_SET(cpuid, &logical_cpus_mask);
933 if (smp_cpus == mp_ncpus) {
934 /* enable IPI's, tlb shootdown, freezes etc */
935 atomic_store_rel_int(&smp_started, 1);
940 * Enable global pages TLB extension
941 * This also implicitly flushes the TLB
943 load_cr4(rcr4() | CR4_PGE);
944 if (pmap_pcid_enabled)
945 load_cr4(rcr4() | CR4_PCIDE);
951 mtx_unlock_spin(&ap_boot_mtx);
953 /* Wait until all the AP's are up. */
954 while (atomic_load_acq_int(&smp_started) == 0)
957 #ifndef EARLY_AP_STARTUP
958 /* Start per-CPU event timers. */
964 panic("scheduler returned us to %s", __func__);
968 /*******************************************************************
969 * local functions and data
973 * We tell the I/O APIC code about all the CPUs we want to receive
974 * interrupts. If we don't want certain CPUs to receive IRQs we
975 * can simply not tell the I/O APIC code about them in this function.
976 * We also do not tell it about the BSP since it tells itself about
977 * the BSP internally to work with UP kernels and on UP machines.
980 set_interrupt_apic_ids(void)
984 for (i = 0; i < MAXCPU; i++) {
985 apic_id = cpu_apic_ids[i];
988 if (cpu_info[apic_id].cpu_bsp)
990 if (cpu_info[apic_id].cpu_disabled)
993 /* Don't let hyperthreads service interrupts. */
994 if (cpu_info[apic_id].cpu_hyperthread)
1002 #ifdef COUNT_XINVLTLB_HITS
1003 u_int xhits_gbl[MAXCPU];
1004 u_int xhits_pg[MAXCPU];
1005 u_int xhits_rng[MAXCPU];
1006 static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
1007 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
1008 sizeof(xhits_gbl), "IU", "");
1009 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
1010 sizeof(xhits_pg), "IU", "");
1011 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
1012 sizeof(xhits_rng), "IU", "");
1017 u_int ipi_range_size;
1018 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
1019 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
1020 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
1021 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
1023 #endif /* COUNT_XINVLTLB_HITS */
1026 * Init and startup IPI.
1029 ipi_startup(int apic_id, int vector)
1033 * This attempts to follow the algorithm described in the
1034 * Intel Multiprocessor Specification v1.4 in section B.4.
1035 * For each IPI, we allow the local APIC ~20us to deliver the
1036 * IPI. If that times out, we panic.
1040 * first we do an INIT IPI: this INIT IPI might be run, resetting
1041 * and running the target CPU. OR this INIT IPI might be latched (P5
1042 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1045 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1046 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
1047 lapic_ipi_wait(100);
1049 /* Explicitly deassert the INIT IPI. */
1050 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1051 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT,
1054 DELAY(10000); /* wait ~10mS */
1057 * next we do a STARTUP IPI: the previous INIT IPI might still be
1058 * latched, (P5 bug) this 1st STARTUP would then terminate
1059 * immediately, and the previously started INIT IPI would continue. OR
1060 * the previous INIT IPI has already run. and this STARTUP IPI will
1061 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1064 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1065 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1067 if (!lapic_ipi_wait(100))
1068 panic("Failed to deliver first STARTUP IPI to APIC %d",
1070 DELAY(200); /* wait ~200uS */
1073 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
1074 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1075 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1076 * recognized after hardware RESET or INIT IPI.
1078 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1079 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1081 if (!lapic_ipi_wait(100))
1082 panic("Failed to deliver second STARTUP IPI to APIC %d",
1085 DELAY(200); /* wait ~200uS */
1089 * Send an IPI to specified CPU handling the bitmap logic.
1092 ipi_send_cpu(int cpu, u_int ipi)
1094 u_int bitmap, old_pending, new_pending;
1096 KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
1098 if (IPI_IS_BITMAPED(ipi)) {
1100 ipi = IPI_BITMAP_VECTOR;
1102 old_pending = cpu_ipi_pending[cpu];
1103 new_pending = old_pending | bitmap;
1104 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
1105 old_pending, new_pending));
1109 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1113 ipi_bitmap_handler(struct trapframe frame)
1115 struct trapframe *oldframe;
1117 int cpu = PCPU_GET(cpuid);
1122 td->td_intr_nesting_level++;
1123 oldframe = td->td_intr_frame;
1124 td->td_intr_frame = &frame;
1125 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1126 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1128 (*ipi_preempt_counts[cpu])++;
1132 if (ipi_bitmap & (1 << IPI_AST)) {
1134 (*ipi_ast_counts[cpu])++;
1136 /* Nothing to do for AST */
1138 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
1140 (*ipi_hardclock_counts[cpu])++;
1144 td->td_intr_frame = oldframe;
1145 td->td_intr_nesting_level--;
1150 * send an IPI to a set of cpus.
1153 ipi_selected(cpuset_t cpus, u_int ipi)
1158 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1159 * of help in order to understand what is the source.
1160 * Set the mask of receiving CPUs for this purpose.
1162 if (ipi == IPI_STOP_HARD)
1163 CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &cpus);
1165 while ((cpu = CPU_FFS(&cpus)) != 0) {
1167 CPU_CLR(cpu, &cpus);
1168 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1169 ipi_send_cpu(cpu, ipi);
1174 * send an IPI to a specific CPU.
1177 ipi_cpu(int cpu, u_int ipi)
1181 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1182 * of help in order to understand what is the source.
1183 * Set the mask of receiving CPUs for this purpose.
1185 if (ipi == IPI_STOP_HARD)
1186 CPU_SET_ATOMIC(cpu, &ipi_stop_nmi_pending);
1188 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1189 ipi_send_cpu(cpu, ipi);
1193 * send an IPI to all CPUs EXCEPT myself
1196 ipi_all_but_self(u_int ipi)
1198 cpuset_t other_cpus;
1200 other_cpus = all_cpus;
1201 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
1202 if (IPI_IS_BITMAPED(ipi)) {
1203 ipi_selected(other_cpus, ipi);
1208 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1209 * of help in order to understand what is the source.
1210 * Set the mask of receiving CPUs for this purpose.
1212 if (ipi == IPI_STOP_HARD)
1213 CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &other_cpus);
1215 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1216 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1220 ipi_nmi_handler(void)
1225 * As long as there is not a simple way to know about a NMI's
1226 * source, if the bitmask for the current CPU is present in
1227 * the global pending bitword an IPI_STOP_HARD has been issued
1228 * and should be handled.
1230 cpuid = PCPU_GET(cpuid);
1231 if (!CPU_ISSET(cpuid, &ipi_stop_nmi_pending))
1234 CPU_CLR_ATOMIC(cpuid, &ipi_stop_nmi_pending);
1243 nmi_call_kdb_smp(u_int type, struct trapframe *frame)
1248 cpu = PCPU_GET(cpuid);
1249 if (atomic_cmpset_acq_int(&nmi_kdb_lock, 0, 1)) {
1250 nmi_call_kdb(cpu, type, frame);
1253 savectx(&stoppcbs[cpu]);
1254 CPU_SET_ATOMIC(cpu, &stopped_cpus);
1255 while (!atomic_cmpset_acq_int(&nmi_kdb_lock, 0, 1))
1259 atomic_store_rel_int(&nmi_kdb_lock, 0);
1261 cpustop_handler_post(cpu);
1266 * Handle an IPI_STOP by saving our current context and spinning until we
1270 cpustop_handler(void)
1274 cpu = PCPU_GET(cpuid);
1276 savectx(&stoppcbs[cpu]);
1278 /* Indicate that we are stopped */
1279 CPU_SET_ATOMIC(cpu, &stopped_cpus);
1281 /* Wait for restart */
1282 while (!CPU_ISSET(cpu, &started_cpus))
1285 cpustop_handler_post(cpu);
1289 cpustop_handler_post(u_int cpu)
1292 CPU_CLR_ATOMIC(cpu, &started_cpus);
1293 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
1296 * We don't broadcast TLB invalidations to other CPUs when they are
1297 * stopped. Hence, we clear the TLB before resuming.
1301 #if defined(__amd64__) && defined(DDB)
1302 amd64_db_resume_dbreg();
1305 if (cpu == 0 && cpustop_restartfunc != NULL) {
1306 cpustop_restartfunc();
1307 cpustop_restartfunc = NULL;
1312 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1316 cpususpend_handler(void)
1320 mtx_assert(&smp_ipi_mtx, MA_NOTOWNED);
1322 cpu = PCPU_GET(cpuid);
1323 if (savectx(&susppcbs[cpu]->sp_pcb)) {
1325 fpususpend(susppcbs[cpu]->sp_fpususpend);
1327 npxsuspend(susppcbs[cpu]->sp_fpususpend);
1330 CPU_SET_ATOMIC(cpu, &suspended_cpus);
1333 fpuresume(susppcbs[cpu]->sp_fpususpend);
1335 npxresume(susppcbs[cpu]->sp_fpususpend);
1339 PCPU_SET(switchtime, 0);
1340 PCPU_SET(switchticks, ticks);
1342 /* Indicate that we are resumed */
1343 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1346 /* Wait for resume */
1347 while (!CPU_ISSET(cpu, &started_cpus))
1350 if (cpu_ops.cpu_resume)
1351 cpu_ops.cpu_resume();
1357 /* Resume MCA and local APIC */
1362 /* Indicate that we are resumed */
1363 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1364 CPU_CLR_ATOMIC(cpu, &started_cpus);
1369 invlcache_handler(void)
1371 uint32_t generation;
1374 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1375 #endif /* COUNT_IPIS */
1378 * Reading the generation here allows greater parallelism
1379 * since wbinvd is a serializing instruction. Without the
1380 * temporary, we'd wait for wbinvd to complete, then the read
1381 * would execute, then the dependent write, which must then
1382 * complete before return from interrupt.
1384 generation = smp_tlb_generation;
1386 PCPU_SET(smp_tlb_done, generation);
1390 * This is called once the rest of the system is up and running and we're
1391 * ready to let the AP's out of the pen.
1394 release_aps(void *dummy __unused)
1399 atomic_store_rel_int(&aps_ready, 1);
1400 while (smp_started == 0)
1403 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1407 * Setup interrupt counters for IPI handlers.
1410 mp_ipi_intrcnt(void *dummy)
1416 snprintf(buf, sizeof(buf), "cpu%d:invltlb", i);
1417 intrcnt_add(buf, &ipi_invltlb_counts[i]);
1418 snprintf(buf, sizeof(buf), "cpu%d:invlrng", i);
1419 intrcnt_add(buf, &ipi_invlrng_counts[i]);
1420 snprintf(buf, sizeof(buf), "cpu%d:invlpg", i);
1421 intrcnt_add(buf, &ipi_invlpg_counts[i]);
1422 snprintf(buf, sizeof(buf), "cpu%d:invlcache", i);
1423 intrcnt_add(buf, &ipi_invlcache_counts[i]);
1424 snprintf(buf, sizeof(buf), "cpu%d:preempt", i);
1425 intrcnt_add(buf, &ipi_preempt_counts[i]);
1426 snprintf(buf, sizeof(buf), "cpu%d:ast", i);
1427 intrcnt_add(buf, &ipi_ast_counts[i]);
1428 snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i);
1429 intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1430 snprintf(buf, sizeof(buf), "cpu%d:hardclock", i);
1431 intrcnt_add(buf, &ipi_hardclock_counts[i]);
1434 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);
1438 * Flush the TLB on other CPU's
1441 /* Variables needed for SMP tlb shootdown. */
1442 static vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1443 pmap_t smp_tlb_pmap;
1444 volatile uint32_t smp_tlb_generation;
1447 #define read_eflags() read_rflags()
1451 smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
1452 vm_offset_t addr1, vm_offset_t addr2)
1454 cpuset_t other_cpus;
1455 volatile uint32_t *p_cpudone;
1456 uint32_t generation;
1459 /* It is not necessary to signal other CPUs while in the debugger. */
1460 if (kdb_active || panicstr != NULL)
1464 * Check for other cpus. Return if none.
1466 if (CPU_ISFULLSET(&mask)) {
1470 CPU_CLR(PCPU_GET(cpuid), &mask);
1471 if (CPU_EMPTY(&mask))
1475 if (!(read_eflags() & PSL_I))
1476 panic("%s: interrupts disabled", __func__);
1477 mtx_lock_spin(&smp_ipi_mtx);
1478 smp_tlb_addr1 = addr1;
1479 smp_tlb_addr2 = addr2;
1480 smp_tlb_pmap = pmap;
1481 generation = ++smp_tlb_generation;
1482 if (CPU_ISFULLSET(&mask)) {
1483 ipi_all_but_self(vector);
1484 other_cpus = all_cpus;
1485 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
1488 while ((cpu = CPU_FFS(&mask)) != 0) {
1490 CPU_CLR(cpu, &mask);
1491 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__,
1493 ipi_send_cpu(cpu, vector);
1496 while ((cpu = CPU_FFS(&other_cpus)) != 0) {
1498 CPU_CLR(cpu, &other_cpus);
1499 p_cpudone = &cpuid_to_pcpu[cpu]->pc_smp_tlb_done;
1500 while (*p_cpudone != generation)
1503 mtx_unlock_spin(&smp_ipi_mtx);
1507 smp_masked_invltlb(cpuset_t mask, pmap_t pmap)
1511 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0);
1512 #ifdef COUNT_XINVLTLB_HITS
1519 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
1523 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, NULL, addr, 0);
1524 #ifdef COUNT_XINVLTLB_HITS
1531 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
1535 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, NULL,
1537 #ifdef COUNT_XINVLTLB_HITS
1539 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1545 smp_cache_flush(void)
1549 smp_targeted_tlb_shootdown(all_cpus, IPI_INVLCACHE, NULL,
1555 * Handlers for TLB related IPIs
1558 invltlb_handler(void)
1560 uint32_t generation;
1562 #ifdef COUNT_XINVLTLB_HITS
1563 xhits_gbl[PCPU_GET(cpuid)]++;
1564 #endif /* COUNT_XINVLTLB_HITS */
1566 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
1567 #endif /* COUNT_IPIS */
1570 * Reading the generation here allows greater parallelism
1571 * since invalidating the TLB is a serializing operation.
1573 generation = smp_tlb_generation;
1574 if (smp_tlb_pmap == kernel_pmap)
1578 PCPU_SET(smp_tlb_done, generation);
1582 invlpg_handler(void)
1584 uint32_t generation;
1586 #ifdef COUNT_XINVLTLB_HITS
1587 xhits_pg[PCPU_GET(cpuid)]++;
1588 #endif /* COUNT_XINVLTLB_HITS */
1590 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
1591 #endif /* COUNT_IPIS */
1593 generation = smp_tlb_generation; /* Overlap with serialization */
1594 invlpg(smp_tlb_addr1);
1595 PCPU_SET(smp_tlb_done, generation);
1599 invlrng_handler(void)
1601 vm_offset_t addr, addr2;
1602 uint32_t generation;
1604 #ifdef COUNT_XINVLTLB_HITS
1605 xhits_rng[PCPU_GET(cpuid)]++;
1606 #endif /* COUNT_XINVLTLB_HITS */
1608 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
1609 #endif /* COUNT_IPIS */
1611 addr = smp_tlb_addr1;
1612 addr2 = smp_tlb_addr2;
1613 generation = smp_tlb_generation; /* Overlap with serialization */
1617 } while (addr < addr2);
1619 PCPU_SET(smp_tlb_done, generation);