2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
34 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/cons.h> /* cngetc() */
43 #include <sys/cpuset.h>
47 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/memrange.h>
52 #include <sys/mutex.h>
55 #include <sys/sched.h>
57 #include <sys/sysctl.h>
60 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_extern.h>
65 #include <x86/apicreg.h>
66 #include <machine/clock.h>
67 #include <machine/cputypes.h>
69 #include <machine/md_var.h>
70 #include <machine/pcb.h>
71 #include <machine/psl.h>
72 #include <machine/smp.h>
73 #include <machine/specialreg.h>
74 #include <machine/cpu.h>
76 #define WARMBOOT_TARGET 0
77 #define WARMBOOT_OFF (KERNBASE + 0x0467)
78 #define WARMBOOT_SEG (KERNBASE + 0x0469)
80 #define CMOS_REG (0x70)
81 #define CMOS_DATA (0x71)
82 #define BIOS_RESET (0x0f)
83 #define BIOS_WARM (0x0a)
85 /* lock region used by kernel profiling */
88 int mp_naps; /* # of Applications processors */
89 int boot_cpu_id = -1; /* designated BSP */
91 extern struct pcpu __pcpu[];
93 /* AP uses this during bootstrap. Do not staticize. */
97 /* Free these after use */
98 void *bootstacks[MAXCPU];
101 struct pcb stoppcbs[MAXCPU];
102 struct susppcb **susppcbs;
105 /* Interrupt counts. */
106 static u_long *ipi_preempt_counts[MAXCPU];
107 static u_long *ipi_ast_counts[MAXCPU];
108 u_long *ipi_invltlb_counts[MAXCPU];
109 u_long *ipi_invlrng_counts[MAXCPU];
110 u_long *ipi_invlpg_counts[MAXCPU];
111 u_long *ipi_invlcache_counts[MAXCPU];
112 u_long *ipi_rendezvous_counts[MAXCPU];
113 static u_long *ipi_hardclock_counts[MAXCPU];
116 /* Default cpu_ops implementation. */
117 struct cpu_ops cpu_ops;
120 * Local data and functions.
123 static volatile cpuset_t ipi_nmi_pending;
125 /* used to hold the AP's until we are ready to release them */
126 struct mtx ap_boot_mtx;
128 /* Set to 1 once we're ready to let the APs out of the pen. */
129 volatile int aps_ready = 0;
132 * Store data from cpu_add() until later in the boot when we actually setup
135 struct cpu_info cpu_info[MAX_APIC_ID + 1];
136 int cpu_apic_ids[MAXCPU];
137 int apic_cpuids[MAX_APIC_ID + 1];
139 /* Holds pending bitmap based IPIs per CPU */
140 volatile u_int cpu_ipi_pending[MAXCPU];
142 int cpu_logical; /* logical cpus per core */
143 int cpu_cores; /* cores per package */
145 static void release_aps(void *dummy);
147 static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */
148 static int hyperthreading_allowed = 1;
151 mem_range_AP_init(void)
154 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
155 mem_range_softc.mr_op->initAP(&mem_range_softc);
164 /* AMD processors do not support HTT. */
167 if ((amd_feature2 & AMDID2_CMP) == 0) {
172 core_id_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >>
173 AMDID_COREID_SIZE_SHIFT;
174 if (core_id_bits == 0) {
175 cpu_cores = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
179 /* Fam 10h and newer should get here. */
180 for (id = 0; id <= MAX_APIC_ID; id++) {
181 /* Check logical CPU availability. */
182 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
184 /* Check if logical CPU has the same package ID. */
185 if ((id >> core_id_bits) != (boot_cpu_id >> core_id_bits))
192 * Round up to the next power of two, if necessary, and then
194 * Returns -1 if argument is zero.
200 return (fls(x << (1 - powerof2(x))) - 1);
213 /* Both zero and one here mean one logical processor per package. */
214 max_logical = (cpu_feature & CPUID_HTT) != 0 ?
215 (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1;
216 if (max_logical <= 1)
220 * Because of uniformity assumption we examine only
221 * those logical processors that belong to the same
222 * package as BSP. Further, we count number of
223 * logical processors that belong to the same core
224 * as BSP thus deducing number of threads per core.
226 if (cpu_high >= 0x4) {
227 cpuid_count(0x04, 0, p);
228 max_cores = ((p[0] >> 26) & 0x3f) + 1;
231 core_id_bits = mask_width(max_logical/max_cores);
232 if (core_id_bits < 0)
234 pkg_id_bits = core_id_bits + mask_width(max_cores);
236 for (id = 0; id <= MAX_APIC_ID; id++) {
237 /* Check logical CPU availability. */
238 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled)
240 /* Check if logical CPU has the same package ID. */
241 if ((id >> pkg_id_bits) != (boot_cpu_id >> pkg_id_bits))
244 /* Check if logical CPU has the same package and core IDs. */
245 if ((id >> core_id_bits) == (boot_cpu_id >> core_id_bits))
249 KASSERT(cpu_cores >= 1 && cpu_logical >= 1,
250 ("topo_probe_0x4 couldn't find BSP"));
252 cpu_cores /= cpu_logical;
253 hyperthreading_cpus = cpu_logical;
267 /* We only support three levels for now. */
268 for (i = 0; i < 3; i++) {
269 cpuid_count(0x0b, i, p);
271 /* Fall back if CPU leaf 11 doesn't really exist. */
272 if (i == 0 && p[1] == 0) {
278 logical = p[1] &= 0xffff;
279 type = (p[2] >> 8) & 0xff;
280 if (type == 0 || logical == 0)
283 * Because of uniformity assumption we examine only
284 * those logical processors that belong to the same
287 for (cnt = 0, x = 0; x <= MAX_APIC_ID; x++) {
288 if (!cpu_info[x].cpu_present ||
289 cpu_info[x].cpu_disabled)
291 if (x >> bits == boot_cpu_id >> bits)
294 if (type == CPUID_TYPE_SMT)
296 else if (type == CPUID_TYPE_CORE)
299 if (cpu_logical == 0)
301 cpu_cores /= cpu_logical;
305 * Both topology discovery code and code that consumes topology
306 * information assume top-down uniformity of the topology.
307 * That is, all physical packages must be identical and each
308 * core in a package must have the same number of threads.
309 * Topology information is queried only on BSP, on which this
310 * code runs and for which it can query CPUID information.
311 * Then topology is extrapolated on all packages using the
312 * uniformity assumption.
317 static int cpu_topo_probed = 0;
322 CPU_ZERO(&logical_cpus_mask);
324 cpu_cores = cpu_logical = 1;
325 else if (cpu_vendor_id == CPU_VENDOR_AMD)
327 else if (cpu_vendor_id == CPU_VENDOR_INTEL) {
329 * See Intel(R) 64 Architecture Processor
330 * Topology Enumeration article for details.
332 * Note that 0x1 <= cpu_high < 4 case should be
333 * compatible with topo_probe_0x4() logic when
334 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
335 * or it should trigger the fallback otherwise.
339 else if (cpu_high >= 0x1)
344 * Fallback: assume each logical CPU is in separate
345 * physical package. That is, no multi-core, no SMT.
347 if (cpu_cores == 0 || cpu_logical == 0)
348 cpu_cores = cpu_logical = 1;
358 * Determine whether any threading flags are
362 if (cpu_logical > 1 && hyperthreading_cpus)
363 cg_flags = CG_FLAG_HTT;
364 else if (cpu_logical > 1)
365 cg_flags = CG_FLAG_SMT;
368 if (mp_ncpus % (cpu_cores * cpu_logical) != 0) {
369 printf("WARNING: Non-uniform processors.\n");
370 printf("WARNING: Using suboptimal topology.\n");
371 return (smp_topo_none());
374 * No multi-core or hyper-threaded.
376 if (cpu_logical * cpu_cores == 1)
377 return (smp_topo_none());
379 * Only HTT no multi-core.
381 if (cpu_logical > 1 && cpu_cores == 1)
382 return (smp_topo_1level(CG_SHARE_L1, cpu_logical, cg_flags));
384 * Only multi-core no HTT.
386 if (cpu_cores > 1 && cpu_logical == 1)
387 return (smp_topo_1level(CG_SHARE_L2, cpu_cores, cg_flags));
389 * Both HTT and multi-core.
391 return (smp_topo_2level(CG_SHARE_L2, cpu_cores,
392 CG_SHARE_L1, cpu_logical, cg_flags));
397 cpu_add(u_int apic_id, char boot_cpu)
400 if (apic_id > MAX_APIC_ID) {
401 panic("SMP: APIC ID %d too high", apic_id);
404 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
406 cpu_info[apic_id].cpu_present = 1;
408 KASSERT(boot_cpu_id == -1,
409 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
411 boot_cpu_id = apic_id;
412 cpu_info[apic_id].cpu_bsp = 1;
414 if (mp_ncpus < MAXCPU) {
416 mp_maxid = mp_ncpus - 1;
419 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
424 cpu_mp_setmaxid(void)
428 * mp_maxid should be already set by calls to cpu_add().
429 * Just sanity check its value here.
432 KASSERT(mp_maxid == 0,
433 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
434 else if (mp_ncpus == 1)
437 KASSERT(mp_maxid >= mp_ncpus - 1,
438 ("%s: counters out of sync: max %d, count %d", __func__,
439 mp_maxid, mp_ncpus));
447 * Always record BSP in CPU map so that the mbuf init code works
450 CPU_SETOF(0, &all_cpus);
453 * No CPUs were found, so this must be a UP system. Setup
454 * the variables to represent a system with a single CPU
461 /* At least one CPU was found. */
464 * One CPU was found, so this must be a UP system with
471 /* At least two CPUs were found. */
476 * Print various information about the SMP system hardware and setup.
479 cpu_mp_announce(void)
481 const char *hyperthread;
484 printf("FreeBSD/SMP: %d package(s) x %d core(s)",
485 mp_ncpus / (cpu_cores * cpu_logical), cpu_cores);
486 if (hyperthreading_cpus > 1)
487 printf(" x %d HTT threads", cpu_logical);
488 else if (cpu_logical > 1)
489 printf(" x %d SMT threads", cpu_logical);
492 /* List active CPUs first. */
493 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
494 for (i = 1; i < mp_ncpus; i++) {
495 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread)
499 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread,
503 /* List disabled CPUs last. */
504 for (i = 0; i <= MAX_APIC_ID; i++) {
505 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled)
507 if (cpu_info[i].cpu_hyperthread)
511 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread,
517 init_secondary_tail(void)
522 * On real hardware, switch to x2apic mode if possible. Do it
523 * after aps_ready was signalled, to avoid manipulating the
524 * mode while BSP might still want to send some IPI to us
525 * (second startup IPI is ignored on modern hardware etc).
529 /* Initialize the PAT MSR. */
532 /* set up CPU registers and state */
538 /* set up FPU state on the AP */
545 if (cpu_ops.cpu_init)
548 /* A quick check from sanity claus */
549 cpuid = PCPU_GET(cpuid);
550 if (PCPU_GET(apic_id) != lapic_id()) {
551 printf("SMP: cpuid = %d\n", cpuid);
552 printf("SMP: actual apic_id = %d\n", lapic_id());
553 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
554 panic("cpuid mismatch! boom!!");
557 /* Initialize curthread. */
558 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
559 PCPU_SET(curthread, PCPU_GET(idlethread));
563 mtx_lock_spin(&ap_boot_mtx);
565 /* Init local apic for irq's */
568 /* Set memory range attributes for this CPU to match the BSP */
573 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
574 printf("SMP: AP CPU #%d Launched!\n", cpuid);
576 /* Determine if we are a logical CPU. */
577 /* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */
578 if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0)
579 CPU_SET(cpuid, &logical_cpus_mask);
584 if (smp_cpus == mp_ncpus) {
585 /* enable IPI's, tlb shootdown, freezes etc */
586 atomic_store_rel_int(&smp_started, 1);
591 * Enable global pages TLB extension
592 * This also implicitly flushes the TLB
594 load_cr4(rcr4() | CR4_PGE);
595 if (pmap_pcid_enabled)
596 load_cr4(rcr4() | CR4_PCIDE);
602 mtx_unlock_spin(&ap_boot_mtx);
604 /* Wait until all the AP's are up. */
605 while (smp_started == 0)
608 /* Start per-CPU event timers. */
613 panic("scheduler returned us to %s", __func__);
617 /*******************************************************************
618 * local functions and data
622 * We tell the I/O APIC code about all the CPUs we want to receive
623 * interrupts. If we don't want certain CPUs to receive IRQs we
624 * can simply not tell the I/O APIC code about them in this function.
625 * We also do not tell it about the BSP since it tells itself about
626 * the BSP internally to work with UP kernels and on UP machines.
629 set_interrupt_apic_ids(void)
633 for (i = 0; i < MAXCPU; i++) {
634 apic_id = cpu_apic_ids[i];
637 if (cpu_info[apic_id].cpu_bsp)
639 if (cpu_info[apic_id].cpu_disabled)
642 /* Don't let hyperthreads service interrupts. */
643 if (cpu_logical > 1 &&
644 apic_id % cpu_logical != 0)
652 * Assign logical CPU IDs to local APICs.
659 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
660 &hyperthreading_allowed);
662 /* Check for explicitly disabled CPUs. */
663 for (i = 0; i <= MAX_APIC_ID; i++) {
664 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
667 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) {
668 cpu_info[i].cpu_hyperthread = 1;
671 * Don't use HT CPU if it has been disabled by a
674 if (hyperthreading_allowed == 0) {
675 cpu_info[i].cpu_disabled = 1;
680 /* Don't use this CPU if it has been disabled by a tunable. */
681 if (resource_disabled("lapic", i)) {
682 cpu_info[i].cpu_disabled = 1;
687 if (hyperthreading_allowed == 0 && hyperthreading_cpus > 1) {
688 hyperthreading_cpus = 0;
693 * Assign CPU IDs to local APIC IDs and disable any CPUs
694 * beyond MAXCPU. CPU 0 is always assigned to the BSP.
696 * To minimize confusion for userland, we attempt to number
697 * CPUs such that all threads and cores in a package are
698 * grouped together. For now we assume that the BSP is always
699 * the first thread in a package and just start adding APs
700 * starting with the BSP's APIC ID.
703 cpu_apic_ids[0] = boot_cpu_id;
704 apic_cpuids[boot_cpu_id] = 0;
705 for (i = boot_cpu_id + 1; i != boot_cpu_id;
706 i == MAX_APIC_ID ? i = 0 : i++) {
707 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
708 cpu_info[i].cpu_disabled)
711 if (mp_ncpus < MAXCPU) {
712 cpu_apic_ids[mp_ncpus] = i;
713 apic_cpuids[i] = mp_ncpus;
716 cpu_info[i].cpu_disabled = 1;
718 KASSERT(mp_maxid >= mp_ncpus - 1,
719 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
723 #ifdef COUNT_XINVLTLB_HITS
724 u_int xhits_gbl[MAXCPU];
725 u_int xhits_pg[MAXCPU];
726 u_int xhits_rng[MAXCPU];
727 static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, "");
728 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
729 sizeof(xhits_gbl), "IU", "");
730 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
731 sizeof(xhits_pg), "IU", "");
732 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
733 sizeof(xhits_rng), "IU", "");
738 u_int ipi_range_size;
739 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
740 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
741 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
742 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
745 u_int ipi_masked_global;
746 u_int ipi_masked_page;
747 u_int ipi_masked_range;
748 u_int ipi_masked_range_size;
749 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW,
750 &ipi_masked_global, 0, "");
751 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW,
752 &ipi_masked_page, 0, "");
753 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW,
754 &ipi_masked_range, 0, "");
755 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW,
756 &ipi_masked_range_size, 0, "");
757 #endif /* COUNT_XINVLTLB_HITS */
760 * Init and startup IPI.
763 ipi_startup(int apic_id, int vector)
767 * This attempts to follow the algorithm described in the
768 * Intel Multiprocessor Specification v1.4 in section B.4.
769 * For each IPI, we allow the local APIC ~20us to deliver the
770 * IPI. If that times out, we panic.
774 * first we do an INIT IPI: this INIT IPI might be run, resetting
775 * and running the target CPU. OR this INIT IPI might be latched (P5
776 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
779 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
780 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
783 /* Explicitly deassert the INIT IPI. */
784 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
785 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT,
788 DELAY(10000); /* wait ~10mS */
791 * next we do a STARTUP IPI: the previous INIT IPI might still be
792 * latched, (P5 bug) this 1st STARTUP would then terminate
793 * immediately, and the previously started INIT IPI would continue. OR
794 * the previous INIT IPI has already run. and this STARTUP IPI will
795 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
798 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
799 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
801 if (!lapic_ipi_wait(100))
802 panic("Failed to deliver first STARTUP IPI to APIC %d",
804 DELAY(200); /* wait ~200uS */
807 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
808 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
809 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
810 * recognized after hardware RESET or INIT IPI.
812 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
813 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
815 if (!lapic_ipi_wait(100))
816 panic("Failed to deliver second STARTUP IPI to APIC %d",
819 DELAY(200); /* wait ~200uS */
823 * Send an IPI to specified CPU handling the bitmap logic.
826 ipi_send_cpu(int cpu, u_int ipi)
828 u_int bitmap, old_pending, new_pending;
830 KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu));
832 if (IPI_IS_BITMAPED(ipi)) {
834 ipi = IPI_BITMAP_VECTOR;
836 old_pending = cpu_ipi_pending[cpu];
837 new_pending = old_pending | bitmap;
838 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
839 old_pending, new_pending));
843 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
847 ipi_bitmap_handler(struct trapframe frame)
849 struct trapframe *oldframe;
851 int cpu = PCPU_GET(cpuid);
856 td->td_intr_nesting_level++;
857 oldframe = td->td_intr_frame;
858 td->td_intr_frame = &frame;
859 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
860 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
862 (*ipi_preempt_counts[cpu])++;
866 if (ipi_bitmap & (1 << IPI_AST)) {
868 (*ipi_ast_counts[cpu])++;
870 /* Nothing to do for AST */
872 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
874 (*ipi_hardclock_counts[cpu])++;
878 td->td_intr_frame = oldframe;
879 td->td_intr_nesting_level--;
884 * send an IPI to a set of cpus.
887 ipi_selected(cpuset_t cpus, u_int ipi)
892 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
893 * of help in order to understand what is the source.
894 * Set the mask of receiving CPUs for this purpose.
896 if (ipi == IPI_STOP_HARD)
897 CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);
899 while ((cpu = CPU_FFS(&cpus)) != 0) {
902 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
903 ipi_send_cpu(cpu, ipi);
908 * send an IPI to a specific CPU.
911 ipi_cpu(int cpu, u_int ipi)
915 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
916 * of help in order to understand what is the source.
917 * Set the mask of receiving CPUs for this purpose.
919 if (ipi == IPI_STOP_HARD)
920 CPU_SET_ATOMIC(cpu, &ipi_nmi_pending);
922 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
923 ipi_send_cpu(cpu, ipi);
927 * send an IPI to all CPUs EXCEPT myself
930 ipi_all_but_self(u_int ipi)
934 other_cpus = all_cpus;
935 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
936 if (IPI_IS_BITMAPED(ipi)) {
937 ipi_selected(other_cpus, ipi);
942 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
943 * of help in order to understand what is the source.
944 * Set the mask of receiving CPUs for this purpose.
946 if (ipi == IPI_STOP_HARD)
947 CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);
949 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
950 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
959 * As long as there is not a simple way to know about a NMI's
960 * source, if the bitmask for the current CPU is present in
961 * the global pending bitword an IPI_STOP_HARD has been issued
962 * and should be handled.
964 cpuid = PCPU_GET(cpuid);
965 if (!CPU_ISSET(cpuid, &ipi_nmi_pending))
968 CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending);
974 * Handle an IPI_STOP by saving our current context and spinning until we
978 cpustop_handler(void)
982 cpu = PCPU_GET(cpuid);
984 savectx(&stoppcbs[cpu]);
986 /* Indicate that we are stopped */
987 CPU_SET_ATOMIC(cpu, &stopped_cpus);
989 /* Wait for restart */
990 while (!CPU_ISSET(cpu, &started_cpus))
993 CPU_CLR_ATOMIC(cpu, &started_cpus);
994 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
996 #if defined(__amd64__) && defined(DDB)
997 amd64_db_resume_dbreg();
1000 if (cpu == 0 && cpustop_restartfunc != NULL) {
1001 cpustop_restartfunc();
1002 cpustop_restartfunc = NULL;
1007 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1011 cpususpend_handler(void)
1015 mtx_assert(&smp_ipi_mtx, MA_NOTOWNED);
1017 cpu = PCPU_GET(cpuid);
1018 if (savectx(&susppcbs[cpu]->sp_pcb)) {
1020 fpususpend(susppcbs[cpu]->sp_fpususpend);
1022 npxsuspend(susppcbs[cpu]->sp_fpususpend);
1025 CPU_SET_ATOMIC(cpu, &suspended_cpus);
1028 fpuresume(susppcbs[cpu]->sp_fpususpend);
1030 npxresume(susppcbs[cpu]->sp_fpususpend);
1034 PCPU_SET(switchtime, 0);
1035 PCPU_SET(switchticks, ticks);
1037 /* Indicate that we are resumed */
1038 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1041 /* Wait for resume */
1042 while (!CPU_ISSET(cpu, &started_cpus))
1045 if (cpu_ops.cpu_resume)
1046 cpu_ops.cpu_resume();
1052 /* Resume MCA and local APIC */
1057 /* Indicate that we are resumed */
1058 CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1059 CPU_CLR_ATOMIC(cpu, &started_cpus);
1064 invlcache_handler(void)
1067 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1068 #endif /* COUNT_IPIS */
1071 atomic_add_int(&smp_tlb_wait, 1);
1075 * This is called once the rest of the system is up and running and we're
1076 * ready to let the AP's out of the pen.
1079 release_aps(void *dummy __unused)
1084 atomic_store_rel_int(&aps_ready, 1);
1085 while (smp_started == 0)
1088 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1092 * Setup interrupt counters for IPI handlers.
1095 mp_ipi_intrcnt(void *dummy)
1101 snprintf(buf, sizeof(buf), "cpu%d:invltlb", i);
1102 intrcnt_add(buf, &ipi_invltlb_counts[i]);
1103 snprintf(buf, sizeof(buf), "cpu%d:invlrng", i);
1104 intrcnt_add(buf, &ipi_invlrng_counts[i]);
1105 snprintf(buf, sizeof(buf), "cpu%d:invlpg", i);
1106 intrcnt_add(buf, &ipi_invlpg_counts[i]);
1107 snprintf(buf, sizeof(buf), "cpu%d:invlcache", i);
1108 intrcnt_add(buf, &ipi_invlcache_counts[i]);
1109 snprintf(buf, sizeof(buf), "cpu%d:preempt", i);
1110 intrcnt_add(buf, &ipi_preempt_counts[i]);
1111 snprintf(buf, sizeof(buf), "cpu%d:ast", i);
1112 intrcnt_add(buf, &ipi_ast_counts[i]);
1113 snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i);
1114 intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1115 snprintf(buf, sizeof(buf), "cpu%d:hardclock", i);
1116 intrcnt_add(buf, &ipi_hardclock_counts[i]);
1119 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);