2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * This module holds the global variables and machine independent functions
32 * used for the kernel SMP support.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
45 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
50 #include <machine/cpu.h>
51 #include <machine/smp.h>
53 #include "opt_sched.h"
56 volatile cpumask_t stopped_cpus;
57 volatile cpumask_t started_cpus;
58 cpumask_t idle_cpus_mask;
59 cpumask_t hlt_cpus_mask;
60 cpumask_t logical_cpus_mask;
62 void (*cpustop_restartfunc)(void);
64 /* This is used in modules that need to work in both SMP and UP. */
68 /* export this for libkvm consumers. */
69 int mp_maxcpus = MAXCPU;
71 volatile int smp_started;
74 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP");
76 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD, &mp_maxid, 0,
79 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD, &mp_maxcpus, 0,
80 "Max number of CPUs that the system was compiled for.");
82 int smp_active = 0; /* are the APs allowed to run? */
83 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
84 "Number of Auxillary Processors (APs) that were successfully started");
86 int smp_disabled = 0; /* has smp been disabled? */
87 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN, &smp_disabled, 0,
88 "SMP has been disabled from the loader");
89 TUNABLE_INT("kern.smp.disabled", &smp_disabled);
91 int smp_cpus = 1; /* how many cpu's running */
92 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0,
93 "Number of CPUs online");
95 int smp_topology = 0; /* Which topology we're using. */
96 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0,
97 "Topology override setting; 0 is default provided by hardware.");
98 TUNABLE_INT("kern.smp.topology", &smp_topology);
101 /* Enable forwarding of a signal to a process running on a different CPU */
102 static int forward_signal_enabled = 1;
103 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
104 &forward_signal_enabled, 0,
105 "Forwarding of a signal to a process on a different CPU");
107 /* Variables needed for SMP rendezvous. */
108 static volatile int smp_rv_ncpus;
109 static void (*volatile smp_rv_setup_func)(void *arg);
110 static void (*volatile smp_rv_action_func)(void *arg);
111 static void (*volatile smp_rv_teardown_func)(void *arg);
112 static void *volatile smp_rv_func_arg;
113 static volatile int smp_rv_waiters[4];
116 * Shared mutex to restrict busywaits between smp_rendezvous() and
117 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
118 * functions trigger at once and cause multiple CPUs to busywait with
119 * interrupts disabled.
121 struct mtx smp_ipi_mtx;
124 * Let the MD SMP code initialize mp_maxid very early if it can.
127 mp_setmaxid(void *dummy)
131 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
134 * Call the MD SMP initialization code.
137 mp_start(void *dummy)
140 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
142 /* Probe for MP hardware. */
143 if (smp_disabled != 0 || cpu_mp_probe() == 0) {
145 all_cpus = PCPU_GET(cpumask);
150 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
154 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
157 forward_signal(struct thread *td)
162 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
163 * this thread, so all we need to do is poke it if it is currently
164 * executing so that it executes ast().
166 THREAD_LOCK_ASSERT(td, MA_OWNED);
167 KASSERT(TD_IS_RUNNING(td),
168 ("forward_signal: thread is not TDS_RUNNING"));
170 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
172 if (!smp_started || cold || panicstr)
174 if (!forward_signal_enabled)
177 /* No need to IPI ourself. */
184 ipi_cpu(id, IPI_AST);
188 * When called the executing CPU will send an IPI to all other CPUs
189 * requesting that they halt execution.
191 * Usually (but not necessarily) called with 'other_cpus' as its arg.
193 * - Signals all CPUs in map to stop.
194 * - Waits for each to stop.
203 generic_stop_cpus(cpumask_t map, u_int type)
205 static volatile u_int stopping_cpu = NOCPU;
209 #if defined(__amd64__)
210 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
212 type == IPI_STOP || type == IPI_STOP_HARD,
214 ("%s: invalid stop type", __func__));
219 CTR2(KTR_SMP, "stop_cpus(%x) with %u type", map, type);
221 if (stopping_cpu != PCPU_GET(cpuid))
222 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
223 PCPU_GET(cpuid)) == 0)
224 while (stopping_cpu != NOCPU)
225 cpu_spinwait(); /* spin */
227 /* send the stop IPI to all CPUs in map */
228 ipi_selected(map, type);
231 while ((stopped_cpus & map) != map) {
235 if (i == 100000000) {
236 printf("timeout stopping cpus\n");
241 stopping_cpu = NOCPU;
246 stop_cpus(cpumask_t map)
249 return (generic_stop_cpus(map, IPI_STOP));
253 stop_cpus_hard(cpumask_t map)
256 return (generic_stop_cpus(map, IPI_STOP_HARD));
259 #if defined(__amd64__)
261 suspend_cpus(cpumask_t map)
264 return (generic_stop_cpus(map, IPI_SUSPEND));
269 * Called by a CPU to restart stopped CPUs.
271 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
273 * - Signals all CPUs in map to restart.
274 * - Waits for each to restart.
282 restart_cpus(cpumask_t map)
288 CTR1(KTR_SMP, "restart_cpus(%x)", map);
290 /* signal other cpus to restart */
291 atomic_store_rel_int(&started_cpus, map);
293 /* wait for each to clear its bit */
294 while ((stopped_cpus & map) != 0)
301 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
302 * (if specified), rendezvous, execute the action function (if specified),
303 * rendezvous again, execute the teardown function (if specified), and then
306 * Note that the supplied external functions _must_ be reentrant and aware
307 * that they are running in parallel and in an unknown lock context.
310 smp_rendezvous_action(void)
313 void *local_func_arg;
314 void (*local_setup_func)(void*);
315 void (*local_action_func)(void*);
316 void (*local_teardown_func)(void*);
321 /* Ensure we have up-to-date values. */
322 atomic_add_acq_int(&smp_rv_waiters[0], 1);
323 while (smp_rv_waiters[0] < smp_rv_ncpus)
326 /* Fetch rendezvous parameters after acquire barrier. */
327 local_func_arg = smp_rv_func_arg;
328 local_setup_func = smp_rv_setup_func;
329 local_action_func = smp_rv_action_func;
330 local_teardown_func = smp_rv_teardown_func;
333 * Use a nested critical section to prevent any preemptions
334 * from occurring during a rendezvous action routine.
335 * Specifically, if a rendezvous handler is invoked via an IPI
336 * and the interrupted thread was in the critical_exit()
337 * function after setting td_critnest to 0 but before
338 * performing a deferred preemption, this routine can be
339 * invoked with td_critnest set to 0 and td_owepreempt true.
340 * In that case, a critical_exit() during the rendezvous
341 * action would trigger a preemption which is not permitted in
342 * a rendezvous action. To fix this, wrap all of the
343 * rendezvous action handlers in a critical section. We
344 * cannot use a regular critical section however as having
345 * critical_exit() preempt from this routine would also be
346 * problematic (the preemption must not occur before the IPI
347 * has been acknowleged via an EOI). Instead, we
348 * intentionally ignore td_owepreempt when leaving the
349 * critical setion. This should be harmless because we do not
350 * permit rendezvous action routines to schedule threads, and
351 * thus td_owepreempt should never transition from 0 to 1
352 * during this routine.
357 owepreempt = td->td_owepreempt;
361 * If requested, run a setup function before the main action
362 * function. Ensure all CPUs have completed the setup
363 * function before moving on to the action function.
365 if (local_setup_func != smp_no_rendevous_barrier) {
366 if (smp_rv_setup_func != NULL)
367 smp_rv_setup_func(smp_rv_func_arg);
368 atomic_add_int(&smp_rv_waiters[1], 1);
369 while (smp_rv_waiters[1] < smp_rv_ncpus)
373 if (local_action_func != NULL)
374 local_action_func(local_func_arg);
376 if (local_teardown_func != smp_no_rendevous_barrier) {
378 * Signal that the main action has been completed. If a
379 * full exit rendezvous is requested, then all CPUs will
380 * wait here until all CPUs have finished the main action.
382 atomic_add_int(&smp_rv_waiters[2], 1);
383 while (smp_rv_waiters[2] < smp_rv_ncpus)
386 if (local_teardown_func != NULL)
387 local_teardown_func(local_func_arg);
391 * Signal that the rendezvous is fully completed by this CPU.
392 * This means that no member of smp_rv_* pseudo-structure will be
393 * accessed by this target CPU after this point; in particular,
394 * memory pointed by smp_rv_func_arg.
396 atomic_add_int(&smp_rv_waiters[3], 1);
399 KASSERT(owepreempt == td->td_owepreempt,
400 ("rendezvous action changed td_owepreempt"));
404 smp_rendezvous_cpus(cpumask_t map,
405 void (* setup_func)(void *),
406 void (* action_func)(void *),
407 void (* teardown_func)(void *),
412 /* Look comments in the !SMP case. */
415 if (setup_func != NULL)
417 if (action_func != NULL)
419 if (teardown_func != NULL)
426 if (((1 << i) & map) != 0)
430 panic("ncpus is 0 with map=0x%x", map);
432 mtx_lock_spin(&smp_ipi_mtx);
434 /* Pass rendezvous parameters via global variables. */
435 smp_rv_ncpus = ncpus;
436 smp_rv_setup_func = setup_func;
437 smp_rv_action_func = action_func;
438 smp_rv_teardown_func = teardown_func;
439 smp_rv_func_arg = arg;
440 smp_rv_waiters[1] = 0;
441 smp_rv_waiters[2] = 0;
442 smp_rv_waiters[3] = 0;
443 atomic_store_rel_int(&smp_rv_waiters[0], 0);
446 * Signal other processors, which will enter the IPI with
449 ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);
451 /* Check if the current CPU is in the map */
452 if ((map & (1 << curcpu)) != 0)
453 smp_rendezvous_action();
456 * Ensure that the master CPU waits for all the other
457 * CPUs to finish the rendezvous, so that smp_rv_*
458 * pseudo-structure and the arg are guaranteed to not
461 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
464 mtx_unlock_spin(&smp_ipi_mtx);
468 smp_rendezvous(void (* setup_func)(void *),
469 void (* action_func)(void *),
470 void (* teardown_func)(void *),
473 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
476 static struct cpu_group group[MAXCPU];
481 struct cpu_group *top;
484 * Check for a fake topology request for debugging purposes.
486 switch (smp_topology) {
488 /* Dual core with no sharing. */
489 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
492 /* No topology, all cpus are equal. */
493 top = smp_topo_none();
496 /* Dual core with shared L2. */
497 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
500 /* quad core, shared l3 among each package, private l2. */
501 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
504 /* quad core, 2 dualcore parts on each package share l2. */
505 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
508 /* Single-core 2xHTT */
509 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
512 /* quad core with a shared l3, 8 threads sharing L2. */
513 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
517 /* Default, ask the system what it wants. */
522 * Verify the returned topology.
524 if (top->cg_count != mp_ncpus)
525 panic("Built bad topology at %p. CPU count %d != %d",
526 top, top->cg_count, mp_ncpus);
527 if (top->cg_mask != all_cpus)
528 panic("Built bad topology at %p. CPU mask 0x%X != 0x%X",
529 top, top->cg_mask, all_cpus);
536 struct cpu_group *top;
539 top->cg_parent = NULL;
540 top->cg_child = NULL;
541 top->cg_mask = ~0U >> (32 - mp_ncpus);
542 top->cg_count = mp_ncpus;
543 top->cg_children = 0;
544 top->cg_level = CG_SHARE_NONE;
551 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
552 int count, int flags, int start)
557 for (mask = 0, i = 0; i < count; i++, start++)
558 mask |= (1 << start);
559 child->cg_parent = parent;
560 child->cg_child = NULL;
561 child->cg_children = 0;
562 child->cg_level = share;
563 child->cg_count = count;
564 child->cg_flags = flags;
565 child->cg_mask = mask;
566 parent->cg_children++;
567 for (; parent != NULL; parent = parent->cg_parent) {
568 if ((parent->cg_mask & child->cg_mask) != 0)
569 panic("Duplicate children in %p. mask 0x%X child 0x%X",
570 parent, parent->cg_mask, child->cg_mask);
571 parent->cg_mask |= child->cg_mask;
572 parent->cg_count += child->cg_count;
579 smp_topo_1level(int share, int count, int flags)
581 struct cpu_group *child;
582 struct cpu_group *top;
589 packages = mp_ncpus / count;
590 top->cg_child = child = &group[1];
591 top->cg_level = CG_SHARE_NONE;
592 for (i = 0; i < packages; i++, child++)
593 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
598 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
601 struct cpu_group *top;
602 struct cpu_group *l1g;
603 struct cpu_group *l2g;
612 top->cg_level = CG_SHARE_NONE;
613 top->cg_children = mp_ncpus / (l2count * l1count);
614 l1g = l2g + top->cg_children;
615 for (i = 0; i < top->cg_children; i++, l2g++) {
616 l2g->cg_parent = top;
618 l2g->cg_level = l2share;
619 for (j = 0; j < l2count; j++, l1g++)
620 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
628 smp_topo_find(struct cpu_group *top, int cpu)
630 struct cpu_group *cg;
638 if ((cg->cg_mask & mask) == 0)
640 if (cg->cg_children == 0)
642 children = cg->cg_children;
643 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
644 if ((cg->cg_mask & mask) != 0)
652 smp_rendezvous_cpus(cpumask_t map,
653 void (*setup_func)(void *),
654 void (*action_func)(void *),
655 void (*teardown_func)(void *),
659 * In the !SMP case we just need to ensure the same initial conditions
663 if (setup_func != NULL)
665 if (action_func != NULL)
667 if (teardown_func != NULL)
673 smp_rendezvous(void (*setup_func)(void *),
674 void (*action_func)(void *),
675 void (*teardown_func)(void *),
679 /* Look comments in the smp_rendezvous_cpus() case. */
681 if (setup_func != NULL)
683 if (action_func != NULL)
685 if (teardown_func != NULL)
691 * Provide dummy SMP support for UP kernels. Modules that need to use SMP
692 * APIs will still work using this dummy support.
695 mp_setvariables_for_up(void *dummy)
698 mp_maxid = PCPU_GET(cpuid);
699 all_cpus = PCPU_GET(cpumask);
700 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
702 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
703 mp_setvariables_for_up, NULL);
707 smp_no_rendevous_barrier(void *dummy)
710 KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));