2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * This module holds the global variables and machine independent functions
30 * used for the kernel SMP support.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
46 #include <sys/sched.h>
48 #include <sys/sysctl.h>
50 #include <machine/cpu.h>
51 #include <machine/smp.h>
53 #include "opt_sched.h"
56 MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
58 volatile cpuset_t stopped_cpus;
59 volatile cpuset_t started_cpus;
60 volatile cpuset_t suspended_cpus;
61 cpuset_t hlt_cpus_mask;
62 cpuset_t logical_cpus_mask;
64 void (*cpustop_restartfunc)(void);
67 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
69 /* This is used in modules that need to work in both SMP and UP. */
73 /* export this for libkvm consumers. */
74 int mp_maxcpus = MAXCPU;
76 volatile int smp_started;
79 static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
82 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
85 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
86 0, "Max number of CPUs that the system was compiled for.");
88 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD|CTLTYPE_INT|CTLFLAG_MPSAFE,
89 NULL, 0, sysctl_kern_smp_active, "I",
90 "Indicates system is running in SMP mode");
92 int smp_disabled = 0; /* has smp been disabled? */
93 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
94 &smp_disabled, 0, "SMP has been disabled from the loader");
96 int smp_cpus = 1; /* how many cpu's running */
97 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
98 "Number of CPUs online");
100 int smp_threads_per_core = 1; /* how many SMT threads are running per core */
101 SYSCTL_INT(_kern_smp, OID_AUTO, threads_per_core, CTLFLAG_RD|CTLFLAG_CAPRD,
102 &smp_threads_per_core, 0, "Number of SMT threads online per core");
104 int mp_ncores = -1; /* how many physical cores running */
105 SYSCTL_INT(_kern_smp, OID_AUTO, cores, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_ncores, 0,
106 "Number of CPUs online");
108 int smp_topology = 0; /* Which topology we're using. */
109 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
110 "Topology override setting; 0 is default provided by hardware.");
113 /* Enable forwarding of a signal to a process running on a different CPU */
114 static int forward_signal_enabled = 1;
115 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
116 &forward_signal_enabled, 0,
117 "Forwarding of a signal to a process on a different CPU");
119 /* Variables needed for SMP rendezvous. */
120 static volatile int smp_rv_ncpus;
121 static void (*volatile smp_rv_setup_func)(void *arg);
122 static void (*volatile smp_rv_action_func)(void *arg);
123 static void (*volatile smp_rv_teardown_func)(void *arg);
124 static void *volatile smp_rv_func_arg;
125 static volatile int smp_rv_waiters[4];
128 * Shared mutex to restrict busywaits between smp_rendezvous() and
129 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
130 * functions trigger at once and cause multiple CPUs to busywait with
131 * interrupts disabled.
133 struct mtx smp_ipi_mtx;
136 * Let the MD SMP code initialize mp_maxid very early if it can.
139 mp_setmaxid(void *dummy)
144 KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
145 KASSERT(mp_ncpus > 1 || mp_maxid == 0,
146 ("%s: one CPU but mp_maxid is not zero", __func__));
147 KASSERT(mp_maxid >= mp_ncpus - 1,
148 ("%s: counters out of sync: max %d, count %d", __func__,
149 mp_maxid, mp_ncpus));
151 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
154 * Call the MD SMP initialization code.
157 mp_start(void *dummy)
160 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
162 /* Probe for MP hardware. */
163 if (smp_disabled != 0 || cpu_mp_probe() == 0) {
166 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
171 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
174 /* Provide a default for most architectures that don't have SMT/HTT. */
176 mp_ncores = mp_ncpus;
180 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
183 forward_signal(struct thread *td)
188 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
189 * this thread, so all we need to do is poke it if it is currently
190 * executing so that it executes ast().
192 THREAD_LOCK_ASSERT(td, MA_OWNED);
193 KASSERT(TD_IS_RUNNING(td),
194 ("forward_signal: thread is not TDS_RUNNING"));
196 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
198 if (!smp_started || cold || KERNEL_PANICKED())
200 if (!forward_signal_enabled)
203 /* No need to IPI ourself. */
210 ipi_cpu(id, IPI_AST);
214 * When called the executing CPU will send an IPI to all other CPUs
215 * requesting that they halt execution.
217 * Usually (but not necessarily) called with 'other_cpus' as its arg.
219 * - Signals all CPUs in map to stop.
220 * - Waits for each to stop.
228 #if defined(__amd64__) || defined(__i386__)
234 generic_stop_cpus(cpuset_t map, u_int type)
237 char cpusetbuf[CPUSETBUFSIZ];
239 static volatile u_int stopping_cpu = NOCPU;
241 volatile cpuset_t *cpus;
244 type == IPI_STOP || type == IPI_STOP_HARD
246 || type == IPI_SUSPEND
248 , ("%s: invalid stop type", __func__));
253 CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
254 cpusetobj_strprint(cpusetbuf, &map), type);
258 * When suspending, ensure there are are no IPIs in progress.
259 * IPIs that have been issued, but not yet delivered (e.g.
260 * not pending on a vCPU when running under virtualization)
261 * will be lost, violating FreeBSD's assumption of reliable
264 if (type == IPI_SUSPEND)
265 mtx_lock_spin(&smp_ipi_mtx);
269 if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
271 if (stopping_cpu != PCPU_GET(cpuid))
272 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
273 PCPU_GET(cpuid)) == 0)
274 while (stopping_cpu != NOCPU)
275 cpu_spinwait(); /* spin */
277 /* send the stop IPI to all CPUs in map */
278 ipi_selected(map, type);
284 if (type == IPI_SUSPEND)
285 cpus = &suspended_cpus;
288 cpus = &stopped_cpus;
291 while (!CPU_SUBSET(cpus, &map)) {
295 if (i == 100000000) {
296 printf("timeout stopping cpus\n");
302 if (type == IPI_SUSPEND)
303 mtx_unlock_spin(&smp_ipi_mtx);
306 stopping_cpu = NOCPU;
311 stop_cpus(cpuset_t map)
314 return (generic_stop_cpus(map, IPI_STOP));
318 stop_cpus_hard(cpuset_t map)
321 return (generic_stop_cpus(map, IPI_STOP_HARD));
326 suspend_cpus(cpuset_t map)
329 return (generic_stop_cpus(map, IPI_SUSPEND));
334 * Called by a CPU to restart stopped CPUs.
336 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
338 * - Signals all CPUs in map to restart.
339 * - Waits for each to restart.
347 generic_restart_cpus(cpuset_t map, u_int type)
350 char cpusetbuf[CPUSETBUFSIZ];
352 volatile cpuset_t *cpus;
355 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD
356 || type == IPI_SUSPEND, ("%s: invalid stop type", __func__));
361 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
363 if (type == IPI_SUSPEND)
364 cpus = &resuming_cpus;
366 cpus = &stopped_cpus;
368 /* signal other cpus to restart */
369 if (type == IPI_SUSPEND)
370 CPU_COPY_STORE_REL(&map, &toresume_cpus);
372 CPU_COPY_STORE_REL(&map, &started_cpus);
375 * Wake up any CPUs stopped with MWAIT. From MI code we can't tell if
376 * MONITOR/MWAIT is enabled, but the potentially redundant writes are
377 * relatively inexpensive.
379 if (type == IPI_STOP) {
380 struct monitorbuf *mb;
384 if (!CPU_ISSET(id, &map))
387 mb = &pcpu_find(id)->pc_monitorbuf;
388 atomic_store_int(&mb->stop_state,
389 MONITOR_STOPSTATE_RUNNING);
393 if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
394 /* wait for each to clear its bit */
395 while (CPU_OVERLAP(cpus, &map))
399 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD,
400 ("%s: invalid stop type", __func__));
405 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
407 cpus = &stopped_cpus;
409 /* signal other cpus to restart */
410 CPU_COPY_STORE_REL(&map, &started_cpus);
412 /* wait for each to clear its bit */
413 while (CPU_OVERLAP(cpus, &map))
420 restart_cpus(cpuset_t map)
423 return (generic_restart_cpus(map, IPI_STOP));
428 resume_cpus(cpuset_t map)
431 return (generic_restart_cpus(map, IPI_SUSPEND));
437 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
438 * (if specified), rendezvous, execute the action function (if specified),
439 * rendezvous again, execute the teardown function (if specified), and then
442 * Note that the supplied external functions _must_ be reentrant and aware
443 * that they are running in parallel and in an unknown lock context.
446 smp_rendezvous_action(void)
449 void *local_func_arg;
450 void (*local_setup_func)(void*);
451 void (*local_action_func)(void*);
452 void (*local_teardown_func)(void*);
457 /* Ensure we have up-to-date values. */
458 atomic_add_acq_int(&smp_rv_waiters[0], 1);
459 while (smp_rv_waiters[0] < smp_rv_ncpus)
462 /* Fetch rendezvous parameters after acquire barrier. */
463 local_func_arg = smp_rv_func_arg;
464 local_setup_func = smp_rv_setup_func;
465 local_action_func = smp_rv_action_func;
466 local_teardown_func = smp_rv_teardown_func;
469 * Use a nested critical section to prevent any preemptions
470 * from occurring during a rendezvous action routine.
471 * Specifically, if a rendezvous handler is invoked via an IPI
472 * and the interrupted thread was in the critical_exit()
473 * function after setting td_critnest to 0 but before
474 * performing a deferred preemption, this routine can be
475 * invoked with td_critnest set to 0 and td_owepreempt true.
476 * In that case, a critical_exit() during the rendezvous
477 * action would trigger a preemption which is not permitted in
478 * a rendezvous action. To fix this, wrap all of the
479 * rendezvous action handlers in a critical section. We
480 * cannot use a regular critical section however as having
481 * critical_exit() preempt from this routine would also be
482 * problematic (the preemption must not occur before the IPI
483 * has been acknowledged via an EOI). Instead, we
484 * intentionally ignore td_owepreempt when leaving the
485 * critical section. This should be harmless because we do
486 * not permit rendezvous action routines to schedule threads,
487 * and thus td_owepreempt should never transition from 0 to 1
488 * during this routine.
493 owepreempt = td->td_owepreempt;
497 * If requested, run a setup function before the main action
498 * function. Ensure all CPUs have completed the setup
499 * function before moving on to the action function.
501 if (local_setup_func != smp_no_rendezvous_barrier) {
502 if (smp_rv_setup_func != NULL)
503 smp_rv_setup_func(smp_rv_func_arg);
504 atomic_add_int(&smp_rv_waiters[1], 1);
505 while (smp_rv_waiters[1] < smp_rv_ncpus)
509 if (local_action_func != NULL)
510 local_action_func(local_func_arg);
512 if (local_teardown_func != smp_no_rendezvous_barrier) {
514 * Signal that the main action has been completed. If a
515 * full exit rendezvous is requested, then all CPUs will
516 * wait here until all CPUs have finished the main action.
518 atomic_add_int(&smp_rv_waiters[2], 1);
519 while (smp_rv_waiters[2] < smp_rv_ncpus)
522 if (local_teardown_func != NULL)
523 local_teardown_func(local_func_arg);
527 * Signal that the rendezvous is fully completed by this CPU.
528 * This means that no member of smp_rv_* pseudo-structure will be
529 * accessed by this target CPU after this point; in particular,
530 * memory pointed by smp_rv_func_arg.
532 * The release semantic ensures that all accesses performed by
533 * the current CPU are visible when smp_rendezvous_cpus()
534 * returns, by synchronizing with the
535 * atomic_load_acq_int(&smp_rv_waiters[3]).
537 atomic_add_rel_int(&smp_rv_waiters[3], 1);
540 KASSERT(owepreempt == td->td_owepreempt,
541 ("rendezvous action changed td_owepreempt"));
545 smp_rendezvous_cpus(cpuset_t map,
546 void (* setup_func)(void *),
547 void (* action_func)(void *),
548 void (* teardown_func)(void *),
551 int curcpumap, i, ncpus = 0;
553 /* See comments in the !SMP case. */
556 if (setup_func != NULL)
558 if (action_func != NULL)
560 if (teardown_func != NULL)
567 * Make sure we come here with interrupts enabled. Otherwise we
568 * livelock if smp_ipi_mtx is owned by a thread which sent us an IPI.
570 MPASS(curthread->td_md.md_spinlock_count == 0);
573 if (CPU_ISSET(i, &map))
577 panic("ncpus is 0 with non-zero map");
579 mtx_lock_spin(&smp_ipi_mtx);
581 /* Pass rendezvous parameters via global variables. */
582 smp_rv_ncpus = ncpus;
583 smp_rv_setup_func = setup_func;
584 smp_rv_action_func = action_func;
585 smp_rv_teardown_func = teardown_func;
586 smp_rv_func_arg = arg;
587 smp_rv_waiters[1] = 0;
588 smp_rv_waiters[2] = 0;
589 smp_rv_waiters[3] = 0;
590 atomic_store_rel_int(&smp_rv_waiters[0], 0);
593 * Signal other processors, which will enter the IPI with
596 curcpumap = CPU_ISSET(curcpu, &map);
597 CPU_CLR(curcpu, &map);
598 ipi_selected(map, IPI_RENDEZVOUS);
600 /* Check if the current CPU is in the map */
602 smp_rendezvous_action();
605 * Ensure that the master CPU waits for all the other
606 * CPUs to finish the rendezvous, so that smp_rv_*
607 * pseudo-structure and the arg are guaranteed to not
610 * Load acquire synchronizes with the release add in
611 * smp_rendezvous_action(), which ensures that our caller sees
612 * all memory actions done by the called functions on other
615 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
618 mtx_unlock_spin(&smp_ipi_mtx);
622 smp_rendezvous(void (* setup_func)(void *),
623 void (* action_func)(void *),
624 void (* teardown_func)(void *),
627 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
630 static struct cpu_group group[MAXCPU * MAX_CACHE_LEVELS + 1];
635 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
636 struct cpu_group *top;
639 * Check for a fake topology request for debugging purposes.
641 switch (smp_topology) {
643 /* Dual core with no sharing. */
644 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
647 /* No topology, all cpus are equal. */
648 top = smp_topo_none();
651 /* Dual core with shared L2. */
652 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
655 /* quad core, shared l3 among each package, private l2. */
656 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
659 /* quad core, 2 dualcore parts on each package share l2. */
660 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
663 /* Single-core 2xHTT */
664 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
667 /* quad core with a shared l3, 8 threads sharing L2. */
668 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
672 /* Default, ask the system what it wants. */
677 * Verify the returned topology.
679 if (top->cg_count != mp_ncpus)
680 panic("Built bad topology at %p. CPU count %d != %d",
681 top, top->cg_count, mp_ncpus);
682 if (CPU_CMP(&top->cg_mask, &all_cpus))
683 panic("Built bad topology at %p. CPU mask (%s) != (%s)",
684 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
685 cpusetobj_strprint(cpusetbuf2, &all_cpus));
688 * Collapse nonsense levels that may be created out of convenience by
689 * the MD layers. They cause extra work in the search functions.
691 while (top->cg_children == 1) {
692 top = &top->cg_child[0];
693 top->cg_parent = NULL;
699 smp_topo_alloc(u_int count)
706 return (&group[curr]);
712 struct cpu_group *top;
715 top->cg_parent = NULL;
716 top->cg_child = NULL;
717 top->cg_mask = all_cpus;
718 top->cg_count = mp_ncpus;
719 top->cg_children = 0;
720 top->cg_level = CG_SHARE_NONE;
727 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
728 int count, int flags, int start)
730 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
735 for (i = 0; i < count; i++, start++)
736 CPU_SET(start, &mask);
737 child->cg_parent = parent;
738 child->cg_child = NULL;
739 child->cg_children = 0;
740 child->cg_level = share;
741 child->cg_count = count;
742 child->cg_flags = flags;
743 child->cg_mask = mask;
744 parent->cg_children++;
745 for (; parent != NULL; parent = parent->cg_parent) {
746 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
747 panic("Duplicate children in %p. mask (%s) child (%s)",
749 cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
750 cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
751 CPU_OR(&parent->cg_mask, &child->cg_mask);
752 parent->cg_count += child->cg_count;
759 smp_topo_1level(int share, int count, int flags)
761 struct cpu_group *child;
762 struct cpu_group *top;
769 packages = mp_ncpus / count;
770 top->cg_child = child = &group[1];
771 top->cg_level = CG_SHARE_NONE;
772 for (i = 0; i < packages; i++, child++)
773 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
778 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
781 struct cpu_group *top;
782 struct cpu_group *l1g;
783 struct cpu_group *l2g;
792 top->cg_level = CG_SHARE_NONE;
793 top->cg_children = mp_ncpus / (l2count * l1count);
794 l1g = l2g + top->cg_children;
795 for (i = 0; i < top->cg_children; i++, l2g++) {
796 l2g->cg_parent = top;
798 l2g->cg_level = l2share;
799 for (j = 0; j < l2count; j++, l1g++)
800 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
807 smp_topo_find(struct cpu_group *top, int cpu)
809 struct cpu_group *cg;
814 CPU_SETOF(cpu, &mask);
817 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
819 if (cg->cg_children == 0)
821 children = cg->cg_children;
822 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
823 if (CPU_OVERLAP(&cg->cg_mask, &mask))
831 smp_rendezvous_cpus(cpuset_t map,
832 void (*setup_func)(void *),
833 void (*action_func)(void *),
834 void (*teardown_func)(void *),
838 * In the !SMP case we just need to ensure the same initial conditions
842 if (setup_func != NULL)
844 if (action_func != NULL)
846 if (teardown_func != NULL)
852 smp_rendezvous(void (*setup_func)(void *),
853 void (*action_func)(void *),
854 void (*teardown_func)(void *),
858 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func,
863 * Provide dummy SMP support for UP kernels. Modules that need to use SMP
864 * APIs will still work using this dummy support.
867 mp_setvariables_for_up(void *dummy)
871 mp_maxid = PCPU_GET(cpuid);
872 CPU_SETOF(mp_maxid, &all_cpus);
873 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
875 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
876 mp_setvariables_for_up, NULL);
880 smp_no_rendezvous_barrier(void *dummy)
883 KASSERT((!smp_started),("smp_no_rendezvous called and smp is started"));
888 * Wait for specified idle threads to switch once. This ensures that even
889 * preempted threads have cycled through the switch function once,
890 * exiting their codepaths. This allows us to change global pointers
891 * with no other synchronization.
894 quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
902 for (cpu = 0; cpu <= mp_maxid; cpu++) {
903 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
905 pcpu = pcpu_find(cpu);
906 gen[cpu] = pcpu->pc_idlethread->td_generation;
908 for (cpu = 0; cpu <= mp_maxid; cpu++) {
909 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
911 pcpu = pcpu_find(cpu);
912 thread_lock(curthread);
913 sched_bind(curthread, cpu);
914 thread_unlock(curthread);
915 while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
916 error = tsleep(quiesce_cpus, prio, wmesg, 1);
917 if (error != EWOULDBLOCK)
923 thread_lock(curthread);
924 sched_unbind(curthread);
925 thread_unlock(curthread);
931 quiesce_all_cpus(const char *wmesg, int prio)
934 return quiesce_cpus(all_cpus, wmesg, prio);
938 * Observe all CPUs not executing in critical section.
939 * We are not in one so the check for us is safe. If the found
940 * thread changes to something else we know the section was
944 quiesce_all_critical(void)
946 struct thread *td, *newtd;
950 MPASS(curthread->td_critnest == 0);
953 pcpu = cpuid_to_pcpu[cpu];
954 td = pcpu->pc_curthread;
956 if (td->td_critnest == 0)
959 newtd = (struct thread *)
960 atomic_load_acq_ptr((void *)pcpu->pc_curthread);
968 cpus_fence_seq_cst_issue(void *arg __unused)
971 atomic_thread_fence_seq_cst();
975 * Send an IPI forcing a sequentially consistent fence.
977 * Allows replacement of an explicitly fence with a compiler barrier.
978 * Trades speed up during normal execution for a significant slowdown when
979 * the barrier is needed.
982 cpus_fence_seq_cst(void)
987 smp_no_rendezvous_barrier,
988 cpus_fence_seq_cst_issue,
989 smp_no_rendezvous_barrier,
993 cpus_fence_seq_cst_issue(NULL);
997 /* Extra care is taken with this sysctl because the data type is volatile */
999 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
1003 active = smp_started;
1004 error = SYSCTL_OUT(req, &active, sizeof(active));
1010 topo_init_node(struct topo_node *node)
1013 bzero(node, sizeof(*node));
1014 TAILQ_INIT(&node->children);
1018 topo_init_root(struct topo_node *root)
1021 topo_init_node(root);
1022 root->type = TOPO_TYPE_SYSTEM;
1026 * Add a child node with the given ID under the given parent.
1027 * Do nothing if there is already a child with that ID.
1030 topo_add_node_by_hwid(struct topo_node *parent, int hwid,
1031 topo_node_type type, uintptr_t subtype)
1033 struct topo_node *node;
1035 TAILQ_FOREACH_REVERSE(node, &parent->children,
1036 topo_children, siblings) {
1037 if (node->hwid == hwid
1038 && node->type == type && node->subtype == subtype) {
1043 node = malloc(sizeof(*node), M_TOPO, M_WAITOK);
1044 topo_init_node(node);
1045 node->parent = parent;
1048 node->subtype = subtype;
1049 TAILQ_INSERT_TAIL(&parent->children, node, siblings);
1050 parent->nchildren++;
1056 * Find a child node with the given ID under the given parent.
1059 topo_find_node_by_hwid(struct topo_node *parent, int hwid,
1060 topo_node_type type, uintptr_t subtype)
1063 struct topo_node *node;
1065 TAILQ_FOREACH(node, &parent->children, siblings) {
1066 if (node->hwid == hwid
1067 && node->type == type && node->subtype == subtype) {
1076 * Given a node change the order of its parent's child nodes such
1077 * that the node becomes the firt child while preserving the cyclic
1078 * order of the children. In other words, the given node is promoted
1082 topo_promote_child(struct topo_node *child)
1084 struct topo_node *next;
1085 struct topo_node *node;
1086 struct topo_node *parent;
1088 parent = child->parent;
1089 next = TAILQ_NEXT(child, siblings);
1090 TAILQ_REMOVE(&parent->children, child, siblings);
1091 TAILQ_INSERT_HEAD(&parent->children, child, siblings);
1093 while (next != NULL) {
1095 next = TAILQ_NEXT(node, siblings);
1096 TAILQ_REMOVE(&parent->children, node, siblings);
1097 TAILQ_INSERT_AFTER(&parent->children, child, node, siblings);
1103 * Iterate to the next node in the depth-first search (traversal) of
1104 * the topology tree.
1107 topo_next_node(struct topo_node *top, struct topo_node *node)
1109 struct topo_node *next;
1111 if ((next = TAILQ_FIRST(&node->children)) != NULL)
1114 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1117 while (node != top && (node = node->parent) != top)
1118 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1125 * Iterate to the next node in the depth-first search of the topology tree,
1126 * but without descending below the current node.
1129 topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
1131 struct topo_node *next;
1133 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1136 while (node != top && (node = node->parent) != top)
1137 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1144 * Assign the given ID to the given topology node that represents a logical
1148 topo_set_pu_id(struct topo_node *node, cpuid_t id)
1151 KASSERT(node->type == TOPO_TYPE_PU,
1152 ("topo_set_pu_id: wrong node type: %u", node->type));
1153 KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0,
1154 ("topo_set_pu_id: cpuset already not empty"));
1156 CPU_SET(id, &node->cpuset);
1157 node->cpu_count = 1;
1160 while ((node = node->parent) != NULL) {
1161 KASSERT(!CPU_ISSET(id, &node->cpuset),
1162 ("logical ID %u is already set in node %p", id, node));
1163 CPU_SET(id, &node->cpuset);
1168 static struct topology_spec {
1169 topo_node_type type;
1172 } topology_level_table[TOPO_LEVEL_COUNT] = {
1173 [TOPO_LEVEL_PKG] = { .type = TOPO_TYPE_PKG, },
1174 [TOPO_LEVEL_GROUP] = { .type = TOPO_TYPE_GROUP, },
1175 [TOPO_LEVEL_CACHEGROUP] = {
1176 .type = TOPO_TYPE_CACHE,
1177 .match_subtype = true,
1178 .subtype = CG_SHARE_L3,
1180 [TOPO_LEVEL_CORE] = { .type = TOPO_TYPE_CORE, },
1181 [TOPO_LEVEL_THREAD] = { .type = TOPO_TYPE_PU, },
1185 topo_analyze_table(struct topo_node *root, int all, enum topo_level level,
1186 struct topo_analysis *results)
1188 struct topology_spec *spec;
1189 struct topo_node *node;
1192 if (level >= TOPO_LEVEL_COUNT)
1195 spec = &topology_level_table[level];
1197 node = topo_next_node(root, root);
1199 while (node != NULL) {
1200 if (node->type != spec->type ||
1201 (spec->match_subtype && node->subtype != spec->subtype)) {
1202 node = topo_next_node(root, node);
1205 if (!all && CPU_EMPTY(&node->cpuset)) {
1206 node = topo_next_nonchild_node(root, node);
1212 if (!topo_analyze_table(node, all, level + 1, results))
1215 node = topo_next_nonchild_node(root, node);
1218 /* No explicit subgroups is essentially one subgroup. */
1222 if (!topo_analyze_table(root, all, level + 1, results))
1226 if (results->entities[level] == -1)
1227 results->entities[level] = count;
1228 else if (results->entities[level] != count)
1235 * Check if the topology is uniform, that is, each package has the same number
1236 * of cores in it and each core has the same number of threads (logical
1237 * processors) in it. If so, calculate the number of packages, the number of
1238 * groups per package, the number of cachegroups per group, and the number of
1239 * logical processors per cachegroup. 'all' parameter tells whether to include
1240 * administratively disabled logical processors into the analysis.
1243 topo_analyze(struct topo_node *topo_root, int all,
1244 struct topo_analysis *results)
1247 results->entities[TOPO_LEVEL_PKG] = -1;
1248 results->entities[TOPO_LEVEL_CORE] = -1;
1249 results->entities[TOPO_LEVEL_THREAD] = -1;
1250 results->entities[TOPO_LEVEL_GROUP] = -1;
1251 results->entities[TOPO_LEVEL_CACHEGROUP] = -1;
1253 if (!topo_analyze_table(topo_root, all, TOPO_LEVEL_PKG, results))
1256 KASSERT(results->entities[TOPO_LEVEL_PKG] > 0,
1257 ("bug in topology or analysis"));