2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * This module holds the global variables and machine independent functions
29 * used for the kernel SMP support.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
42 #include <sys/mutex.h>
44 #include <sys/sched.h>
46 #include <sys/sysctl.h>
48 #include <machine/cpu.h>
49 #include <machine/smp.h>
51 #include "opt_sched.h"
54 volatile cpuset_t stopped_cpus;
55 volatile cpuset_t started_cpus;
56 volatile cpuset_t suspended_cpus;
57 cpuset_t hlt_cpus_mask;
58 cpuset_t logical_cpus_mask;
60 void (*cpustop_restartfunc)(void);
63 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
65 /* This is used in modules that need to work in both SMP and UP. */
69 /* export this for libkvm consumers. */
70 int mp_maxcpus = MAXCPU;
72 volatile int smp_started;
75 static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
78 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
81 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
82 0, "Max number of CPUs that the system was compiled for.");
84 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD | CTLTYPE_INT, NULL, 0,
85 sysctl_kern_smp_active, "I", "Indicates system is running in SMP mode");
87 int smp_disabled = 0; /* has smp been disabled? */
88 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
89 &smp_disabled, 0, "SMP has been disabled from the loader");
91 int smp_cpus = 1; /* how many cpu's running */
92 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
93 "Number of CPUs online");
95 int smp_topology = 0; /* Which topology we're using. */
96 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
97 "Topology override setting; 0 is default provided by hardware.");
100 /* Enable forwarding of a signal to a process running on a different CPU */
101 static int forward_signal_enabled = 1;
102 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
103 &forward_signal_enabled, 0,
104 "Forwarding of a signal to a process on a different CPU");
106 /* Variables needed for SMP rendezvous. */
107 static volatile int smp_rv_ncpus;
108 static void (*volatile smp_rv_setup_func)(void *arg);
109 static void (*volatile smp_rv_action_func)(void *arg);
110 static void (*volatile smp_rv_teardown_func)(void *arg);
111 static void *volatile smp_rv_func_arg;
112 static volatile int smp_rv_waiters[4];
115 * Shared mutex to restrict busywaits between smp_rendezvous() and
116 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
117 * functions trigger at once and cause multiple CPUs to busywait with
118 * interrupts disabled.
120 struct mtx smp_ipi_mtx;
123 * Let the MD SMP code initialize mp_maxid very early if it can.
126 mp_setmaxid(void *dummy)
131 KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
132 KASSERT(mp_ncpus > 1 || mp_maxid == 0,
133 ("%s: one CPU but mp_maxid is not zero", __func__));
134 KASSERT(mp_maxid >= mp_ncpus - 1,
135 ("%s: counters out of sync: max %d, count %d", __func__,
136 mp_maxid, mp_ncpus));
138 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
141 * Call the MD SMP initialization code.
144 mp_start(void *dummy)
147 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
149 /* Probe for MP hardware. */
150 if (smp_disabled != 0 || cpu_mp_probe() == 0) {
152 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
157 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
161 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
164 forward_signal(struct thread *td)
169 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
170 * this thread, so all we need to do is poke it if it is currently
171 * executing so that it executes ast().
173 THREAD_LOCK_ASSERT(td, MA_OWNED);
174 KASSERT(TD_IS_RUNNING(td),
175 ("forward_signal: thread is not TDS_RUNNING"));
177 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
179 if (!smp_started || cold || panicstr)
181 if (!forward_signal_enabled)
184 /* No need to IPI ourself. */
191 ipi_cpu(id, IPI_AST);
195 * When called the executing CPU will send an IPI to all other CPUs
196 * requesting that they halt execution.
198 * Usually (but not necessarily) called with 'other_cpus' as its arg.
200 * - Signals all CPUs in map to stop.
201 * - Waits for each to stop.
210 generic_stop_cpus(cpuset_t map, u_int type)
213 char cpusetbuf[CPUSETBUFSIZ];
215 static volatile u_int stopping_cpu = NOCPU;
217 volatile cpuset_t *cpus;
220 #if defined(__amd64__) || defined(__i386__)
221 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
223 type == IPI_STOP || type == IPI_STOP_HARD,
225 ("%s: invalid stop type", __func__));
230 CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
231 cpusetobj_strprint(cpusetbuf, &map), type);
233 #if defined(__amd64__) || defined(__i386__)
235 * When suspending, ensure there are are no IPIs in progress.
236 * IPIs that have been issued, but not yet delivered (e.g.
237 * not pending on a vCPU when running under virtualization)
238 * will be lost, violating FreeBSD's assumption of reliable
241 if (type == IPI_SUSPEND)
242 mtx_lock_spin(&smp_ipi_mtx);
245 if (stopping_cpu != PCPU_GET(cpuid))
246 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
247 PCPU_GET(cpuid)) == 0)
248 while (stopping_cpu != NOCPU)
249 cpu_spinwait(); /* spin */
251 /* send the stop IPI to all CPUs in map */
252 ipi_selected(map, type);
254 #if defined(__amd64__) || defined(__i386__)
255 if (type == IPI_SUSPEND)
256 cpus = &suspended_cpus;
259 cpus = &stopped_cpus;
262 while (!CPU_SUBSET(cpus, &map)) {
266 if (i == 100000000) {
267 printf("timeout stopping cpus\n");
272 #if defined(__amd64__) || defined(__i386__)
273 if (type == IPI_SUSPEND)
274 mtx_unlock_spin(&smp_ipi_mtx);
277 stopping_cpu = NOCPU;
282 stop_cpus(cpuset_t map)
285 return (generic_stop_cpus(map, IPI_STOP));
289 stop_cpus_hard(cpuset_t map)
292 return (generic_stop_cpus(map, IPI_STOP_HARD));
295 #if defined(__amd64__) || defined(__i386__)
297 suspend_cpus(cpuset_t map)
300 return (generic_stop_cpus(map, IPI_SUSPEND));
305 * Called by a CPU to restart stopped CPUs.
307 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
309 * - Signals all CPUs in map to restart.
310 * - Waits for each to restart.
318 generic_restart_cpus(cpuset_t map, u_int type)
321 char cpusetbuf[CPUSETBUFSIZ];
323 volatile cpuset_t *cpus;
326 #if defined(__amd64__) || defined(__i386__)
327 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
329 type == IPI_STOP || type == IPI_STOP_HARD,
331 ("%s: invalid stop type", __func__));
336 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
338 #if defined(__amd64__) || defined(__i386__)
339 if (type == IPI_SUSPEND)
340 cpus = &suspended_cpus;
343 cpus = &stopped_cpus;
345 /* signal other cpus to restart */
346 CPU_COPY_STORE_REL(&map, &started_cpus);
348 /* wait for each to clear its bit */
349 while (CPU_OVERLAP(cpus, &map))
356 restart_cpus(cpuset_t map)
359 return (generic_restart_cpus(map, IPI_STOP));
362 #if defined(__amd64__) || defined(__i386__)
364 resume_cpus(cpuset_t map)
367 return (generic_restart_cpus(map, IPI_SUSPEND));
372 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
373 * (if specified), rendezvous, execute the action function (if specified),
374 * rendezvous again, execute the teardown function (if specified), and then
377 * Note that the supplied external functions _must_ be reentrant and aware
378 * that they are running in parallel and in an unknown lock context.
381 smp_rendezvous_action(void)
384 void *local_func_arg;
385 void (*local_setup_func)(void*);
386 void (*local_action_func)(void*);
387 void (*local_teardown_func)(void*);
392 /* Ensure we have up-to-date values. */
393 atomic_add_acq_int(&smp_rv_waiters[0], 1);
394 while (smp_rv_waiters[0] < smp_rv_ncpus)
397 /* Fetch rendezvous parameters after acquire barrier. */
398 local_func_arg = smp_rv_func_arg;
399 local_setup_func = smp_rv_setup_func;
400 local_action_func = smp_rv_action_func;
401 local_teardown_func = smp_rv_teardown_func;
404 * Use a nested critical section to prevent any preemptions
405 * from occurring during a rendezvous action routine.
406 * Specifically, if a rendezvous handler is invoked via an IPI
407 * and the interrupted thread was in the critical_exit()
408 * function after setting td_critnest to 0 but before
409 * performing a deferred preemption, this routine can be
410 * invoked with td_critnest set to 0 and td_owepreempt true.
411 * In that case, a critical_exit() during the rendezvous
412 * action would trigger a preemption which is not permitted in
413 * a rendezvous action. To fix this, wrap all of the
414 * rendezvous action handlers in a critical section. We
415 * cannot use a regular critical section however as having
416 * critical_exit() preempt from this routine would also be
417 * problematic (the preemption must not occur before the IPI
418 * has been acknowledged via an EOI). Instead, we
419 * intentionally ignore td_owepreempt when leaving the
420 * critical section. This should be harmless because we do
421 * not permit rendezvous action routines to schedule threads,
422 * and thus td_owepreempt should never transition from 0 to 1
423 * during this routine.
428 owepreempt = td->td_owepreempt;
432 * If requested, run a setup function before the main action
433 * function. Ensure all CPUs have completed the setup
434 * function before moving on to the action function.
436 if (local_setup_func != smp_no_rendevous_barrier) {
437 if (smp_rv_setup_func != NULL)
438 smp_rv_setup_func(smp_rv_func_arg);
439 atomic_add_int(&smp_rv_waiters[1], 1);
440 while (smp_rv_waiters[1] < smp_rv_ncpus)
444 if (local_action_func != NULL)
445 local_action_func(local_func_arg);
447 if (local_teardown_func != smp_no_rendevous_barrier) {
449 * Signal that the main action has been completed. If a
450 * full exit rendezvous is requested, then all CPUs will
451 * wait here until all CPUs have finished the main action.
453 atomic_add_int(&smp_rv_waiters[2], 1);
454 while (smp_rv_waiters[2] < smp_rv_ncpus)
457 if (local_teardown_func != NULL)
458 local_teardown_func(local_func_arg);
462 * Signal that the rendezvous is fully completed by this CPU.
463 * This means that no member of smp_rv_* pseudo-structure will be
464 * accessed by this target CPU after this point; in particular,
465 * memory pointed by smp_rv_func_arg.
467 * The release semantic ensures that all accesses performed by
468 * the current CPU are visible when smp_rendezvous_cpus()
469 * returns, by synchronizing with the
470 * atomic_load_acq_int(&smp_rv_waiters[3]).
472 atomic_add_rel_int(&smp_rv_waiters[3], 1);
475 KASSERT(owepreempt == td->td_owepreempt,
476 ("rendezvous action changed td_owepreempt"));
480 smp_rendezvous_cpus(cpuset_t map,
481 void (* setup_func)(void *),
482 void (* action_func)(void *),
483 void (* teardown_func)(void *),
486 int curcpumap, i, ncpus = 0;
488 /* Look comments in the !SMP case. */
491 if (setup_func != NULL)
493 if (action_func != NULL)
495 if (teardown_func != NULL)
502 if (CPU_ISSET(i, &map))
506 panic("ncpus is 0 with non-zero map");
508 mtx_lock_spin(&smp_ipi_mtx);
510 /* Pass rendezvous parameters via global variables. */
511 smp_rv_ncpus = ncpus;
512 smp_rv_setup_func = setup_func;
513 smp_rv_action_func = action_func;
514 smp_rv_teardown_func = teardown_func;
515 smp_rv_func_arg = arg;
516 smp_rv_waiters[1] = 0;
517 smp_rv_waiters[2] = 0;
518 smp_rv_waiters[3] = 0;
519 atomic_store_rel_int(&smp_rv_waiters[0], 0);
522 * Signal other processors, which will enter the IPI with
525 curcpumap = CPU_ISSET(curcpu, &map);
526 CPU_CLR(curcpu, &map);
527 ipi_selected(map, IPI_RENDEZVOUS);
529 /* Check if the current CPU is in the map */
531 smp_rendezvous_action();
534 * Ensure that the master CPU waits for all the other
535 * CPUs to finish the rendezvous, so that smp_rv_*
536 * pseudo-structure and the arg are guaranteed to not
539 * Load acquire synchronizes with the release add in
540 * smp_rendezvous_action(), which ensures that our caller sees
541 * all memory actions done by the called functions on other
544 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
547 mtx_unlock_spin(&smp_ipi_mtx);
551 smp_rendezvous(void (* setup_func)(void *),
552 void (* action_func)(void *),
553 void (* teardown_func)(void *),
556 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
559 static struct cpu_group group[MAXCPU];
564 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
565 struct cpu_group *top;
568 * Check for a fake topology request for debugging purposes.
570 switch (smp_topology) {
572 /* Dual core with no sharing. */
573 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
576 /* No topology, all cpus are equal. */
577 top = smp_topo_none();
580 /* Dual core with shared L2. */
581 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
584 /* quad core, shared l3 among each package, private l2. */
585 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
588 /* quad core, 2 dualcore parts on each package share l2. */
589 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
592 /* Single-core 2xHTT */
593 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
596 /* quad core with a shared l3, 8 threads sharing L2. */
597 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
601 /* Default, ask the system what it wants. */
606 * Verify the returned topology.
608 if (top->cg_count != mp_ncpus)
609 panic("Built bad topology at %p. CPU count %d != %d",
610 top, top->cg_count, mp_ncpus);
611 if (CPU_CMP(&top->cg_mask, &all_cpus))
612 panic("Built bad topology at %p. CPU mask (%s) != (%s)",
613 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
614 cpusetobj_strprint(cpusetbuf2, &all_cpus));
621 struct cpu_group *top;
624 top->cg_parent = NULL;
625 top->cg_child = NULL;
626 top->cg_mask = all_cpus;
627 top->cg_count = mp_ncpus;
628 top->cg_children = 0;
629 top->cg_level = CG_SHARE_NONE;
636 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
637 int count, int flags, int start)
639 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
644 for (i = 0; i < count; i++, start++)
645 CPU_SET(start, &mask);
646 child->cg_parent = parent;
647 child->cg_child = NULL;
648 child->cg_children = 0;
649 child->cg_level = share;
650 child->cg_count = count;
651 child->cg_flags = flags;
652 child->cg_mask = mask;
653 parent->cg_children++;
654 for (; parent != NULL; parent = parent->cg_parent) {
655 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
656 panic("Duplicate children in %p. mask (%s) child (%s)",
658 cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
659 cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
660 CPU_OR(&parent->cg_mask, &child->cg_mask);
661 parent->cg_count += child->cg_count;
668 smp_topo_1level(int share, int count, int flags)
670 struct cpu_group *child;
671 struct cpu_group *top;
678 packages = mp_ncpus / count;
679 top->cg_child = child = &group[1];
680 top->cg_level = CG_SHARE_NONE;
681 for (i = 0; i < packages; i++, child++)
682 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
687 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
690 struct cpu_group *top;
691 struct cpu_group *l1g;
692 struct cpu_group *l2g;
701 top->cg_level = CG_SHARE_NONE;
702 top->cg_children = mp_ncpus / (l2count * l1count);
703 l1g = l2g + top->cg_children;
704 for (i = 0; i < top->cg_children; i++, l2g++) {
705 l2g->cg_parent = top;
707 l2g->cg_level = l2share;
708 for (j = 0; j < l2count; j++, l1g++)
709 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
717 smp_topo_find(struct cpu_group *top, int cpu)
719 struct cpu_group *cg;
724 CPU_SETOF(cpu, &mask);
727 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
729 if (cg->cg_children == 0)
731 children = cg->cg_children;
732 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
733 if (CPU_OVERLAP(&cg->cg_mask, &mask))
741 smp_rendezvous_cpus(cpuset_t map,
742 void (*setup_func)(void *),
743 void (*action_func)(void *),
744 void (*teardown_func)(void *),
748 * In the !SMP case we just need to ensure the same initial conditions
752 if (setup_func != NULL)
754 if (action_func != NULL)
756 if (teardown_func != NULL)
762 smp_rendezvous(void (*setup_func)(void *),
763 void (*action_func)(void *),
764 void (*teardown_func)(void *),
768 /* Look comments in the smp_rendezvous_cpus() case. */
770 if (setup_func != NULL)
772 if (action_func != NULL)
774 if (teardown_func != NULL)
780 * Provide dummy SMP support for UP kernels. Modules that need to use SMP
781 * APIs will still work using this dummy support.
784 mp_setvariables_for_up(void *dummy)
787 mp_maxid = PCPU_GET(cpuid);
788 CPU_SETOF(mp_maxid, &all_cpus);
789 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
791 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
792 mp_setvariables_for_up, NULL);
796 smp_no_rendevous_barrier(void *dummy)
799 KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
804 * Wait specified idle threads to switch once. This ensures that even
805 * preempted threads have cycled through the switch function once,
806 * exiting their codepaths. This allows us to change global pointers
807 * with no other synchronization.
810 quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
818 for (cpu = 0; cpu <= mp_maxid; cpu++) {
819 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
821 pcpu = pcpu_find(cpu);
822 gen[cpu] = pcpu->pc_idlethread->td_generation;
824 for (cpu = 0; cpu <= mp_maxid; cpu++) {
825 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
827 pcpu = pcpu_find(cpu);
828 thread_lock(curthread);
829 sched_bind(curthread, cpu);
830 thread_unlock(curthread);
831 while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
832 error = tsleep(quiesce_cpus, prio, wmesg, 1);
833 if (error != EWOULDBLOCK)
839 thread_lock(curthread);
840 sched_unbind(curthread);
841 thread_unlock(curthread);
847 quiesce_all_cpus(const char *wmesg, int prio)
850 return quiesce_cpus(all_cpus, wmesg, prio);
853 /* Extra care is taken with this sysctl because the data type is volatile */
855 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
859 active = smp_started;
860 error = SYSCTL_OUT(req, &active, sizeof(active));