2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/cons.h> /* cngetc() */
43 #include <sys/cpuset.h>
47 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/memrange.h>
52 #include <sys/mutex.h>
55 #include <sys/sched.h>
57 #include <sys/sysctl.h>
60 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_extern.h>
65 #include <x86/apicreg.h>
66 #include <machine/clock.h>
67 #include <machine/cputypes.h>
69 #include <machine/md_var.h>
70 #include <machine/pcb.h>
71 #include <machine/psl.h>
72 #include <machine/smp.h>
73 #include <machine/specialreg.h>
74 #include <machine/cpu.h>
76 #define WARMBOOT_TARGET 0
77 #define WARMBOOT_OFF (KERNBASE + 0x0467)
78 #define WARMBOOT_SEG (KERNBASE + 0x0469)
80 #define CMOS_REG (0x70)
81 #define CMOS_DATA (0x71)
82 #define BIOS_RESET (0x0f)
83 #define BIOS_WARM (0x0a)
85 /* lock region used by kernel profiling */
88 int mp_naps; /* # of Applications processors */
89 int boot_cpu_id = -1; /* designated BSP */
91 extern struct pcpu __pcpu[];
93 /* AP uses this during bootstrap. Do not staticize. */
97 /* Free these after use */
98 void *bootstacks[MAXCPU];
101 struct pcb stoppcbs[MAXCPU];
102 struct susppcb **susppcbs;
105 /* Interrupt counts. */
106 static u_long *ipi_preempt_counts[MAXCPU];
107 static u_long *ipi_ast_counts[MAXCPU];
108 u_long *ipi_invltlb_counts[MAXCPU];
109 u_long *ipi_invlrng_counts[MAXCPU];
110 u_long *ipi_invlpg_counts[MAXCPU];
111 u_long *ipi_invlcache_counts[MAXCPU];
112 u_long *ipi_rendezvous_counts[MAXCPU];
113 static u_long *ipi_hardclock_counts[MAXCPU];
116 /* Default cpu_ops implementation. */
117 struct cpu_ops cpu_ops;
120 * Local data and functions.
123 static volatile cpuset_t ipi_stop_nmi_pending;
125 /* used to hold the AP's until we are ready to release them */
126 struct mtx ap_boot_mtx;
128 /* Set to 1 once we're ready to let the APs out of the pen. */
129 volatile int aps_ready = 0;
132 * Store data from cpu_add() until later in the boot when we actually setup
135 struct cpu_info cpu_info[MAX_APIC_ID + 1];
136 int apic_cpuids[MAX_APIC_ID + 1];
137 int cpu_apic_ids[MAXCPU];
139 /* Holds pending bitmap based IPIs per CPU */
140 volatile u_int cpu_ipi_pending[MAXCPU];
142 static void release_aps(void *dummy);
144 static int hyperthreading_allowed = 1;
145 SYSCTL_INT(_machdep, OID_AUTO, hyperthreading_allowed, CTLFLAG_RDTUN,
146 &hyperthreading_allowed, 0, "Use Intel HTT logical CPUs");
148 static struct topo_node topo_root;
150 static int pkg_id_shift;
151 static int core_id_shift;
152 static int disabled_cpus;
157 } static caches[MAX_CACHE_LEVELS];
160 mem_range_AP_init(void)
163 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
164 mem_range_softc.mr_op->initAP(&mem_range_softc);
168 * Round up to the next power of two, if necessary, and then
170 * Returns -1 if argument is zero.
176 return (fls(x << (1 - powerof2(x))) - 1);
180 * Add a cache level to the cache topology description.
183 add_deterministic_cache(int type, int level, int share_count)
189 printf("unexpected cache type %d\n", type);
192 if (type == 2) /* ignore instruction cache */
194 if (level == 0 || level > MAX_CACHE_LEVELS) {
195 printf("unexpected cache level %d\n", type);
199 if (caches[level - 1].present) {
200 printf("WARNING: multiple entries for L%u data cache\n", level);
201 printf("%u => %u\n", caches[level - 1].id_shift,
202 mask_width(share_count));
204 caches[level - 1].id_shift = mask_width(share_count);
205 caches[level - 1].present = 1;
207 if (caches[level - 1].id_shift > pkg_id_shift) {
208 printf("WARNING: L%u data cache covers more "
209 "APIC IDs than a package\n", level);
210 printf("%u > %u\n", caches[level - 1].id_shift, pkg_id_shift);
211 caches[level - 1].id_shift = pkg_id_shift;
213 if (caches[level - 1].id_shift < core_id_shift) {
214 printf("WARNING: L%u data cache covers less "
215 "APIC IDs than a core\n", level);
216 printf("%u < %u\n", caches[level - 1].id_shift, core_id_shift);
217 caches[level - 1].id_shift = core_id_shift;
224 * Determine topology of processing units and caches for AMD CPUs.
226 * - AMD CPUID Specification (Publication # 25481)
227 * - BKDG For AMD Family 10h Processors (Publication # 31116), section 2.15
228 * - BKDG for AMD NPT Family 0Fh Processors (Publication # 32559)
229 * XXX At the moment the code does not recognize grouping of AMD CMT threads,
230 * if supported, into cores, so each thread is treated as being in its own
231 * core. In other words, each logical CPU is considered to be a core.
242 /* No multi-core capability. */
243 if ((amd_feature2 & AMDID2_CMP) == 0)
246 /* For families 10h and newer. */
247 pkg_id_shift = (cpu_procinfo2 & AMDID_COREID_SIZE) >>
248 AMDID_COREID_SIZE_SHIFT;
250 /* For 0Fh family. */
251 if (pkg_id_shift == 0)
253 mask_width((cpu_procinfo2 & AMDID_CMP_CORES) + 1);
255 if ((amd_feature2 & AMDID2_TOPOLOGY) != 0) {
257 cpuid_count(0x8000001d, i, p);
259 level = (p[0] >> 5) & 0x7;
260 share_count = 1 + ((p[0] >> 14) & 0xfff);
262 if (!add_deterministic_cache(type, level, share_count))
266 if (cpu_exthigh >= 0x80000005) {
267 cpuid_count(0x80000005, 0, p);
268 if (((p[2] >> 24) & 0xff) != 0) {
269 caches[0].id_shift = 0;
270 caches[0].present = 1;
273 if (cpu_exthigh >= 0x80000006) {
274 cpuid_count(0x80000006, 0, p);
275 if (((p[2] >> 16) & 0xffff) != 0) {
276 caches[1].id_shift = 0;
277 caches[1].present = 1;
279 if (((p[3] >> 18) & 0x3fff) != 0) {
282 * TODO: Account for dual-node processors
283 * where each node within a package has its own
286 caches[2].id_shift = pkg_id_shift;
287 caches[2].present = 1;
294 * Determine topology of processing units for Intel CPUs
295 * using CPUID Leaf 1 and Leaf 4, if supported.
297 * - Intel 64 Architecture Processor Topology Enumeration
298 * - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
299 * Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS
300 * FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS
303 topo_probe_intel_0x4(void)
309 /* Both zero and one here mean one logical processor per package. */
310 max_logical = (cpu_feature & CPUID_HTT) != 0 ?
311 (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1;
312 if (max_logical <= 1)
315 if (cpu_high >= 0x4) {
316 cpuid_count(0x04, 0, p);
317 max_cores = ((p[0] >> 26) & 0x3f) + 1;
321 core_id_shift = mask_width(max_logical/max_cores);
322 KASSERT(core_id_shift >= 0,
323 ("intel topo: max_cores > max_logical\n"));
324 pkg_id_shift = core_id_shift + mask_width(max_cores);
328 * Determine topology of processing units for Intel CPUs
329 * using CPUID Leaf 11, if supported.
331 * - Intel 64 Architecture Processor Topology Enumeration
332 * - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
333 * Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS
334 * FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS
337 topo_probe_intel_0xb(void)
344 /* Fall back if CPU leaf 11 doesn't really exist. */
345 cpuid_count(0x0b, 0, p);
347 topo_probe_intel_0x4();
351 /* We only support three levels for now. */
353 cpuid_count(0x0b, i, p);
356 type = (p[2] >> 8) & 0xff;
361 /* TODO: check for duplicate (re-)assignment */
362 if (type == CPUID_TYPE_SMT)
363 core_id_shift = bits;
364 else if (type == CPUID_TYPE_CORE)
367 printf("unknown CPU level type %d\n", type);
370 if (pkg_id_shift < core_id_shift) {
371 printf("WARNING: core covers more APIC IDs than a package\n");
372 core_id_shift = pkg_id_shift;
377 * Determine topology of caches for Intel CPUs.
379 * - Intel 64 Architecture Processor Topology Enumeration
380 * - Intel 64 and IA-32 Architectures Software Developer’s Manual
381 * Volume 2A: Instruction Set Reference, A-M,
385 topo_probe_intel_caches(void)
393 if (cpu_high < 0x4) {
395 * Available cache level and sizes can be determined
396 * via CPUID leaf 2, but that requires a huge table of hardcoded
397 * values, so for now just assume L1 and L2 caches potentially
398 * shared only by HTT processing units, if HTT is present.
400 caches[0].id_shift = pkg_id_shift;
401 caches[0].present = 1;
402 caches[1].id_shift = pkg_id_shift;
403 caches[1].present = 1;
408 cpuid_count(0x4, i, p);
410 level = (p[0] >> 5) & 0x7;
411 share_count = 1 + ((p[0] >> 14) & 0xfff);
413 if (!add_deterministic_cache(type, level, share_count))
419 * Determine topology of processing units and caches for Intel CPUs.
421 * - Intel 64 Architecture Processor Topology Enumeration
424 topo_probe_intel(void)
428 * Note that 0x1 <= cpu_high < 4 case should be
429 * compatible with topo_probe_intel_0x4() logic when
430 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
431 * or it should trigger the fallback otherwise.
434 topo_probe_intel_0xb();
435 else if (cpu_high >= 0x1)
436 topo_probe_intel_0x4();
438 topo_probe_intel_caches();
442 * Topology information is queried only on BSP, on which this
443 * code runs and for which it can query CPUID information.
444 * Then topology is extrapolated on all packages using an
445 * assumption that APIC ID to hardware component ID mapping is
447 * That doesn't necesserily imply that the topology is uniform.
452 static int cpu_topo_probed = 0;
453 struct x86_topo_layer {
457 } topo_layers[MAX_CACHE_LEVELS + 3];
458 struct topo_node *parent;
459 struct topo_node *node;
468 CPU_ZERO(&logical_cpus_mask);
472 else if (cpu_vendor_id == CPU_VENDOR_AMD)
474 else if (cpu_vendor_id == CPU_VENDOR_INTEL)
477 KASSERT(pkg_id_shift >= core_id_shift,
478 ("bug in APIC topology discovery"));
481 bzero(topo_layers, sizeof(topo_layers));
483 topo_layers[nlayers].type = TOPO_TYPE_PKG;
484 topo_layers[nlayers].id_shift = pkg_id_shift;
486 printf("Package ID shift: %u\n", topo_layers[nlayers].id_shift);
490 * Consider all caches to be within a package/chip
491 * and "in front" of all sub-components like
492 * cores and hardware threads.
494 for (i = MAX_CACHE_LEVELS - 1; i >= 0; --i) {
495 if (caches[i].present) {
496 KASSERT(caches[i].id_shift <= pkg_id_shift,
497 ("bug in APIC topology discovery"));
498 KASSERT(caches[i].id_shift >= core_id_shift,
499 ("bug in APIC topology discovery"));
501 topo_layers[nlayers].type = TOPO_TYPE_CACHE;
502 topo_layers[nlayers].subtype = i + 1;
503 topo_layers[nlayers].id_shift = caches[i].id_shift;
505 printf("L%u cache ID shift: %u\n",
506 topo_layers[nlayers].subtype,
507 topo_layers[nlayers].id_shift);
512 if (pkg_id_shift > core_id_shift) {
513 topo_layers[nlayers].type = TOPO_TYPE_CORE;
514 topo_layers[nlayers].id_shift = core_id_shift;
516 printf("Core ID shift: %u\n",
517 topo_layers[nlayers].id_shift);
521 topo_layers[nlayers].type = TOPO_TYPE_PU;
522 topo_layers[nlayers].id_shift = 0;
525 topo_init_root(&topo_root);
526 for (i = 0; i <= MAX_APIC_ID; ++i) {
527 if (!cpu_info[i].cpu_present)
531 for (layer = 0; layer < nlayers; ++layer) {
532 node_id = i >> topo_layers[layer].id_shift;
533 parent = topo_add_node_by_hwid(parent, node_id,
534 topo_layers[layer].type,
535 topo_layers[layer].subtype);
540 for (layer = 0; layer < nlayers; ++layer) {
541 node_id = boot_cpu_id >> topo_layers[layer].id_shift;
542 node = topo_find_node_by_hwid(parent, node_id,
543 topo_layers[layer].type,
544 topo_layers[layer].subtype);
545 topo_promote_child(node);
553 * Assign logical CPU IDs to local APICs.
558 struct topo_node *node;
561 smt_mask = (1u << core_id_shift) - 1;
564 * Assign CPU IDs to local APIC IDs and disable any CPUs
565 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
568 TOPO_FOREACH(node, &topo_root) {
569 if (node->type != TOPO_TYPE_PU)
572 if ((node->hwid & smt_mask) != (boot_cpu_id & smt_mask))
573 cpu_info[node->hwid].cpu_hyperthread = 1;
575 if (resource_disabled("lapic", node->hwid)) {
576 if (node->hwid != boot_cpu_id)
577 cpu_info[node->hwid].cpu_disabled = 1;
579 printf("Cannot disable BSP, APIC ID = %d\n",
583 if (!hyperthreading_allowed &&
584 cpu_info[node->hwid].cpu_hyperthread)
585 cpu_info[node->hwid].cpu_disabled = 1;
587 if (mp_ncpus >= MAXCPU)
588 cpu_info[node->hwid].cpu_disabled = 1;
590 if (cpu_info[node->hwid].cpu_disabled) {
595 cpu_apic_ids[mp_ncpus] = node->hwid;
596 apic_cpuids[node->hwid] = mp_ncpus;
597 topo_set_pu_id(node, mp_ncpus);
601 KASSERT(mp_maxid >= mp_ncpus - 1,
602 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
607 * Print various information about the SMP system hardware and setup.
610 cpu_mp_announce(void)
612 struct topo_node *node;
613 const char *hyperthread;
618 printf("FreeBSD/SMP: ");
619 if (topo_analyze(&topo_root, 1, &pkg_count,
620 &cores_per_pkg, &thrs_per_core)) {
621 printf("%d package(s)", pkg_count);
622 if (cores_per_pkg > 0)
623 printf(" x %d core(s)", cores_per_pkg);
624 if (thrs_per_core > 1)
625 printf(" x %d hardware threads", thrs_per_core);
627 printf("Non-uniform topology");
632 printf("FreeBSD/SMP Online: ");
633 if (topo_analyze(&topo_root, 0, &pkg_count,
634 &cores_per_pkg, &thrs_per_core)) {
635 printf("%d package(s)", pkg_count);
636 if (cores_per_pkg > 0)
637 printf(" x %d core(s)", cores_per_pkg);
638 if (thrs_per_core > 1)
639 printf(" x %d hardware threads", thrs_per_core);
641 printf("Non-uniform topology");
649 TOPO_FOREACH(node, &topo_root) {
650 switch (node->type) {
652 printf("Package HW ID = %u\n", node->hwid);
655 printf("\tCore HW ID = %u\n", node->hwid);
658 if (cpu_info[node->hwid].cpu_hyperthread)
663 if (node->subtype == 0)
664 printf("\t\tCPU (AP%s): APIC ID: %u"
665 "(disabled)\n", hyperthread, node->hwid);
666 else if (node->id == 0)
667 printf("\t\tCPU0 (BSP): APIC ID: %u\n",
670 printf("\t\tCPU%u (AP%s): APIC ID: %u\n",
671 node->id, hyperthread, node->hwid);
681 * Add a scheduling group, a group of logical processors sharing
682 * a particular cache (and, thus having an affinity), to the scheduling
684 * This function recursively works on lower level caches.
687 x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root)
689 struct topo_node *node;
694 KASSERT(root->type == TOPO_TYPE_SYSTEM || root->type == TOPO_TYPE_CACHE,
695 ("x86topo_add_sched_group: bad type: %u", root->type));
696 CPU_COPY(&root->cpuset, &cg_root->cg_mask);
697 cg_root->cg_count = root->cpu_count;
698 if (root->type == TOPO_TYPE_SYSTEM)
699 cg_root->cg_level = CG_SHARE_NONE;
701 cg_root->cg_level = root->subtype;
704 * Check how many core nodes we have under the given root node.
705 * If we have multiple logical processors, but not multiple
706 * cores, then those processors must be hardware threads.
710 while (node != NULL) {
711 if (node->type != TOPO_TYPE_CORE) {
712 node = topo_next_node(root, node);
717 node = topo_next_nonchild_node(root, node);
720 if (cg_root->cg_level != CG_SHARE_NONE &&
721 root->cpu_count > 1 && ncores < 2)
722 cg_root->cg_flags = CG_FLAG_SMT;
725 * Find out how many cache nodes we have under the given root node.
726 * We ignore cache nodes that cover all the same processors as the
727 * root node. Also, we do not descend below found cache nodes.
728 * That is, we count top-level "non-redundant" caches under the root
733 while (node != NULL) {
734 if (node->type != TOPO_TYPE_CACHE ||
735 (root->type != TOPO_TYPE_SYSTEM &&
736 CPU_CMP(&node->cpuset, &root->cpuset) == 0)) {
737 node = topo_next_node(root, node);
741 node = topo_next_nonchild_node(root, node);
744 cg_root->cg_child = smp_topo_alloc(nchildren);
745 cg_root->cg_children = nchildren;
748 * Now find again the same cache nodes as above and recursively
749 * build scheduling topologies for them.
753 while (node != NULL) {
754 if (node->type != TOPO_TYPE_CACHE ||
755 (root->type != TOPO_TYPE_SYSTEM &&
756 CPU_CMP(&node->cpuset, &root->cpuset) == 0)) {
757 node = topo_next_node(root, node);
760 cg_root->cg_child[i].cg_parent = cg_root;
761 x86topo_add_sched_group(node, &cg_root->cg_child[i]);
763 node = topo_next_nonchild_node(root, node);
768 * Build the MI scheduling topology from the discovered hardware topology.
773 struct cpu_group *cg_root;
776 return (smp_topo_none());
778 cg_root = smp_topo_alloc(1);
779 x86topo_add_sched_group(&topo_root, cg_root);
785 * Add a logical CPU to the topology.
788 cpu_add(u_int apic_id, char boot_cpu)
791 if (apic_id > MAX_APIC_ID) {
792 panic("SMP: APIC ID %d too high", apic_id);
795 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
797 cpu_info[apic_id].cpu_present = 1;
799 KASSERT(boot_cpu_id == -1,
800 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
802 boot_cpu_id = apic_id;
803 cpu_info[apic_id].cpu_bsp = 1;
805 if (mp_ncpus < MAXCPU) {
807 mp_maxid = mp_ncpus - 1;
810 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
815 cpu_mp_setmaxid(void)
819 * mp_ncpus and mp_maxid should be already set by calls to cpu_add().
820 * If there were no calls to cpu_add() assume this is a UP system.
831 * Always record BSP in CPU map so that the mbuf init code works
834 CPU_SETOF(0, &all_cpus);
835 return (mp_ncpus > 1);
839 * AP CPU's call this to initialize themselves.
842 init_secondary_tail(void)
847 * On real hardware, switch to x2apic mode if possible. Do it
848 * after aps_ready was signalled, to avoid manipulating the
849 * mode while BSP might still want to send some IPI to us
850 * (second startup IPI is ignored on modern hardware etc).
854 /* Initialize the PAT MSR. */
857 /* set up CPU registers and state */
863 /* set up FPU state on the AP */
870 if (cpu_ops.cpu_init)
873 /* A quick check from sanity claus */
874 cpuid = PCPU_GET(cpuid);
875 if (PCPU_GET(apic_id) != lapic_id()) {
876 printf("SMP: cpuid = %d\n", cpuid);
877 printf("SMP: actual apic_id = %d\n", lapic_id());
878 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
879 panic("cpuid mismatch! boom!!");
882 /* Initialize curthread. */
883 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
884 PCPU_SET(curthread, PCPU_GET(idlethread));
888 mtx_lock_spin(&ap_boot_mtx);
890 /* Init local apic for irq's */
893 /* Set memory range attributes for this CPU to match the BSP */
898 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
899 printf("SMP: AP CPU #%d Launched!\n", cpuid);
901 /* Determine if we are a logical CPU. */
902 if (cpu_info[PCPU_GET(apic_id)].cpu_hyperthread)
903 CPU_SET(cpuid, &logical_cpus_mask);
908 if (smp_cpus == mp_ncpus) {
909 /* enable IPI's, tlb shootdown, freezes etc */
910 atomic_store_rel_int(&smp_started, 1);
915 * Enable global pages TLB extension
916 * This also implicitly flushes the TLB
918 load_cr4(rcr4() | CR4_PGE);
919 if (pmap_pcid_enabled)
920 load_cr4(rcr4() | CR4_PCIDE);
926 mtx_unlock_spin(&ap_boot_mtx);
928 /* Wait until all the AP's are up. */
929 while (atomic_load_acq_int(&smp_started) == 0)
932 #ifndef EARLY_AP_STARTUP
933 /* Start per-CPU event timers. */
939 panic("scheduler returned us to %s", __func__);
943 /*******************************************************************
944 * local functions and data
948 * We tell the I/O APIC code about all the CPUs we want to receive
949 * interrupts. If we don't want certain CPUs to receive IRQs we
950 * can simply not tell the I/O APIC code about them in this function.
951 * We also do not tell it about the BSP since it tells itself about
952 * the BSP internally to work with UP kernels and on UP machines.
955 set_interrupt_apic_ids(void)
959 for (i = 0; i < MAXCPU; i++) {
960 apic_id = cpu_apic_ids[i];
963 if (cpu_info[apic_id].cpu_bsp)
965 if (cpu_info[apic_id].cpu_disabled)
968 /* Don't let hyperthreads service interrupts. */
969 if (cpu_info[apic_id].cpu_hyperthread)
977 #ifdef COUNT_XINVLTLB_HITS
978 u_int xhits_gbl[MAXCPU];
979 u_int xhits_pg[MAXCPU];
980 u_int xhits_rng[MAXCPU];
981 static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
982 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
983 sizeof(xhits_gbl), "IU", "");
984 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
985 sizeof(xhits_pg), "IU", "");
986 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
987 sizeof(xhits_rng), "IU", "");
992 u_int ipi_range_size;
993 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
994 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
995 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
996 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
998 #endif /* COUNT_XINVLTLB_HITS */
1001 * Init and startup IPI.
1004 ipi_startup(int apic_id, int vector)
1008 * This attempts to follow the algorithm described in the
1009 * Intel Multiprocessor Specification v1.4 in section B.4.
1010 * For each IPI, we allow the local APIC ~20us to deliver the
1011 * IPI. If that times out, we panic.
1015 * first we do an INIT IPI: this INIT IPI might be run, resetting
1016 * and running the target CPU. OR this INIT IPI might be latched (P5
1017 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1020 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1021 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
1022 lapic_ipi_wait(100);
1024 /* Explicitly deassert the INIT IPI. */
1025 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1026 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT,
1029 DELAY(10000); /* wait ~10mS */
1032 * next we do a STARTUP IPI: the previous INIT IPI might still be
1033 * latched, (P5 bug) this 1st STARTUP would then terminate
1034 * immediately, and the previously started INIT IPI would continue. OR
1035 * the previous INIT IPI has already run. and this STARTUP IPI will
1036 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1039 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1040 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1042 if (!lapic_ipi_wait(100))
1043 panic("Failed to deliver first STARTUP IPI to APIC %d",
1045 DELAY(200); /* wait ~200uS */
1048 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
1049 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1050 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1051 * recognized after hardware RESET or INIT IPI.
1053 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1054 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1056 if (!lapic_ipi_wait(100))
1057 panic("Failed to deliver second STARTUP IPI to APIC %d",
1060 DELAY(200); /* wait ~200uS */
1064 * Send an IPI to specified CPU handling the bitmap logic.
1067 ipi_send_cpu(int cpu, u_int ipi)
1069 u_int bitmap, old_pending, new_pending;
1071 KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
1073 if (IPI_IS_BITMAPED(ipi)) {
1075 ipi = IPI_BITMAP_VECTOR;
1077 old_pending = cpu_ipi_pending[cpu];
1078 new_pending = old_pending | bitmap;
1079 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
1080 old_pending, new_pending));
1084 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1088 ipi_bitmap_handler(struct trapframe frame)
1090 struct trapframe *oldframe;
1092 int cpu = PCPU_GET(cpuid);
1097 td->td_intr_nesting_level++;
1098 oldframe = td->td_intr_frame;
1099 td->td_intr_frame = &frame;
1100 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
1101 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1103 (*ipi_preempt_counts[cpu])++;
1107 if (ipi_bitmap & (1 << IPI_AST)) {
1109 (*ipi_ast_counts[cpu])++;
1111 /* Nothing to do for AST */
1113 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
1115 (*ipi_hardclock_counts[cpu])++;
1119 td->td_intr_frame = oldframe;
1120 td->td_intr_nesting_level--;
1125 * send an IPI to a set of cpus.
1128 ipi_selected(cpuset_t cpus, u_int ipi)
1133 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1134 * of help in order to understand what is the source.
1135 * Set the mask of receiving CPUs for this purpose.
1137 if (ipi == IPI_STOP_HARD)
1138 CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &cpus);
1140 while ((cpu = CPU_FFS(&cpus)) != 0) {
1142 CPU_CLR(cpu, &cpus);
1143 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1144 ipi_send_cpu(cpu, ipi);
1149 * send an IPI to a specific CPU.
1152 ipi_cpu(int cpu, u_int ipi)
1156 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1157 * of help in order to understand what is the source.
1158 * Set the mask of receiving CPUs for this purpose.
1160 if (ipi == IPI_STOP_HARD)
1161 CPU_SET_ATOMIC(cpu, &ipi_stop_nmi_pending);
1163 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1164 ipi_send_cpu(cpu, ipi);
1168 * send an IPI to all CPUs EXCEPT myself
1171 ipi_all_but_self(u_int ipi)
1173 cpuset_t other_cpus;
1175 other_cpus = all_cpus;
1176 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
1177 if (IPI_IS_BITMAPED(ipi)) {
1178 ipi_selected(other_cpus, ipi);
1183 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1184 * of help in order to understand what is the source.
1185 * Set the mask of receiving CPUs for this purpose.
1187 if (ipi == IPI_STOP_HARD)
1188 CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &other_cpus);
1190 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1191 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1195 ipi_nmi_handler(void)
1200 * As long as there is not a simple way to know about a NMI's
1201 * source, if the bitmask for the current CPU is present in
1202 * the global pending bitword an IPI_STOP_HARD has been issued
1203 * and should be handled.
1205 cpuid = PCPU_GET(cpuid);
1206 if (!CPU_ISSET(cpuid, &ipi_stop_nmi_pending))
1209 CPU_CLR_ATOMIC(cpuid, &ipi_stop_nmi_pending);
1215 * Handle an IPI_STOP by saving our current context and spinning until we
1219 cpustop_handler(void)
1223 cpu = PCPU_GET(cpuid);
1225 savectx(&stoppcbs[cpu]);
1227 /* Indicate that we are stopped */
1228 CPU_SET_ATOMIC(cpu, &stopped_cpus);
1230 /* Wait for restart */
1231 while (!CPU_ISSET(cpu, &started_cpus))
1234 CPU_CLR_ATOMIC(cpu, &started_cpus);
1235 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
1237 #if defined(__amd64__) && defined(DDB)
1238 amd64_db_resume_dbreg();
1241 if (cpu == 0 && cpustop_restartfunc != NULL) {
1242 cpustop_restartfunc();
1243 cpustop_restartfunc = NULL;
1248 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1252 cpususpend_handler(void)
1256 mtx_assert(&smp_ipi_mtx, MA_NOTOWNED);
1258 cpu = PCPU_GET(cpuid);
1259 if (savectx(&susppcbs[cpu]->sp_pcb)) {
1261 fpususpend(susppcbs[cpu]->sp_fpususpend);
1263 npxsuspend(susppcbs[cpu]->sp_fpususpend);
1266 CPU_SET_ATOMIC(cpu, &suspended_cpus);
1269 fpuresume(susppcbs[cpu]->sp_fpususpend);
1271 npxresume(susppcbs[cpu]->sp_fpususpend);
1275 PCPU_SET(switchtime, 0);
1276 PCPU_SET(switchticks, ticks);
1278 /* Indicate that we are resumed */
1279 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1282 /* Wait for resume */
1283 while (!CPU_ISSET(cpu, &started_cpus))
1286 if (cpu_ops.cpu_resume)
1287 cpu_ops.cpu_resume();
1293 /* Resume MCA and local APIC */
1298 /* Indicate that we are resumed */
1299 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1300 CPU_CLR_ATOMIC(cpu, &started_cpus);
1305 invlcache_handler(void)
1308 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1309 #endif /* COUNT_IPIS */
1312 atomic_add_int(&smp_tlb_wait, 1);
1316 * This is called once the rest of the system is up and running and we're
1317 * ready to let the AP's out of the pen.
1320 release_aps(void *dummy __unused)
1325 atomic_store_rel_int(&aps_ready, 1);
1326 while (smp_started == 0)
1329 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1333 * Setup interrupt counters for IPI handlers.
1336 mp_ipi_intrcnt(void *dummy)
1342 snprintf(buf, sizeof(buf), "cpu%d:invltlb", i);
1343 intrcnt_add(buf, &ipi_invltlb_counts[i]);
1344 snprintf(buf, sizeof(buf), "cpu%d:invlrng", i);
1345 intrcnt_add(buf, &ipi_invlrng_counts[i]);
1346 snprintf(buf, sizeof(buf), "cpu%d:invlpg", i);
1347 intrcnt_add(buf, &ipi_invlpg_counts[i]);
1348 snprintf(buf, sizeof(buf), "cpu%d:invlcache", i);
1349 intrcnt_add(buf, &ipi_invlcache_counts[i]);
1350 snprintf(buf, sizeof(buf), "cpu%d:preempt", i);
1351 intrcnt_add(buf, &ipi_preempt_counts[i]);
1352 snprintf(buf, sizeof(buf), "cpu%d:ast", i);
1353 intrcnt_add(buf, &ipi_ast_counts[i]);
1354 snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i);
1355 intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1356 snprintf(buf, sizeof(buf), "cpu%d:hardclock", i);
1357 intrcnt_add(buf, &ipi_hardclock_counts[i]);
1360 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);
1364 * Flush the TLB on other CPU's
1367 /* Variables needed for SMP tlb shootdown. */
1368 static vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1369 pmap_t smp_tlb_pmap;
1370 volatile int smp_tlb_wait;
1373 #define read_eflags() read_rflags()
1377 smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
1378 vm_offset_t addr1, vm_offset_t addr2)
1380 int cpu, ncpu, othercpus;
1382 othercpus = mp_ncpus - 1; /* does not shootdown self */
1385 * Check for other cpus. Return if none.
1387 if (CPU_ISFULLSET(&mask)) {
1391 CPU_CLR(PCPU_GET(cpuid), &mask);
1392 if (CPU_EMPTY(&mask))
1396 if (!(read_eflags() & PSL_I))
1397 panic("%s: interrupts disabled", __func__);
1398 mtx_lock_spin(&smp_ipi_mtx);
1399 smp_tlb_addr1 = addr1;
1400 smp_tlb_addr2 = addr2;
1401 smp_tlb_pmap = pmap;
1403 if (CPU_ISFULLSET(&mask)) {
1405 ipi_all_but_self(vector);
1408 while ((cpu = CPU_FFS(&mask)) != 0) {
1410 CPU_CLR(cpu, &mask);
1411 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__,
1413 ipi_send_cpu(cpu, vector);
1417 while (smp_tlb_wait < ncpu)
1419 mtx_unlock_spin(&smp_ipi_mtx);
1423 smp_masked_invltlb(cpuset_t mask, pmap_t pmap)
1427 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0);
1428 #ifdef COUNT_XINVLTLB_HITS
1435 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr)
1439 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, NULL, addr, 0);
1440 #ifdef COUNT_XINVLTLB_HITS
1447 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2)
1451 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, NULL,
1453 #ifdef COUNT_XINVLTLB_HITS
1455 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
1461 smp_cache_flush(void)
1465 smp_targeted_tlb_shootdown(all_cpus, IPI_INVLCACHE, NULL,
1471 * Handlers for TLB related IPIs
1474 invltlb_handler(void)
1476 #ifdef COUNT_XINVLTLB_HITS
1477 xhits_gbl[PCPU_GET(cpuid)]++;
1478 #endif /* COUNT_XINVLTLB_HITS */
1480 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
1481 #endif /* COUNT_IPIS */
1483 if (smp_tlb_pmap == kernel_pmap)
1487 atomic_add_int(&smp_tlb_wait, 1);
1491 invlpg_handler(void)
1493 #ifdef COUNT_XINVLTLB_HITS
1494 xhits_pg[PCPU_GET(cpuid)]++;
1495 #endif /* COUNT_XINVLTLB_HITS */
1497 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
1498 #endif /* COUNT_IPIS */
1500 invlpg(smp_tlb_addr1);
1501 atomic_add_int(&smp_tlb_wait, 1);
1505 invlrng_handler(void)
1509 #ifdef COUNT_XINVLTLB_HITS
1510 xhits_rng[PCPU_GET(cpuid)]++;
1511 #endif /* COUNT_XINVLTLB_HITS */
1513 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
1514 #endif /* COUNT_IPIS */
1516 addr = smp_tlb_addr1;
1520 } while (addr < smp_tlb_addr2);
1522 atomic_add_int(&smp_tlb_wait, 1);