2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * This module holds the global variables and machine independent functions
29 * used for the kernel SMP support.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
45 #include <sys/sched.h>
47 #include <sys/sysctl.h>
49 #include <machine/cpu.h>
50 #include <machine/smp.h>
52 #include "opt_sched.h"
55 MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
57 volatile cpuset_t stopped_cpus;
58 volatile cpuset_t started_cpus;
59 volatile cpuset_t suspended_cpus;
60 cpuset_t hlt_cpus_mask;
61 cpuset_t logical_cpus_mask;
63 void (*cpustop_restartfunc)(void);
66 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
68 /* This is used in modules that need to work in both SMP and UP. */
72 /* export this for libkvm consumers. */
73 int mp_maxcpus = MAXCPU;
75 volatile int smp_started;
78 static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
81 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
84 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
85 0, "Max number of CPUs that the system was compiled for.");
87 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD | CTLTYPE_INT, NULL, 0,
88 sysctl_kern_smp_active, "I", "Indicates system is running in SMP mode");
90 int smp_disabled = 0; /* has smp been disabled? */
91 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
92 &smp_disabled, 0, "SMP has been disabled from the loader");
94 int smp_cpus = 1; /* how many cpu's running */
95 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
96 "Number of CPUs online");
98 int smp_topology = 0; /* Which topology we're using. */
99 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
100 "Topology override setting; 0 is default provided by hardware.");
103 /* Enable forwarding of a signal to a process running on a different CPU */
104 static int forward_signal_enabled = 1;
105 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
106 &forward_signal_enabled, 0,
107 "Forwarding of a signal to a process on a different CPU");
109 /* Variables needed for SMP rendezvous. */
110 static volatile int smp_rv_ncpus;
111 static void (*volatile smp_rv_setup_func)(void *arg);
112 static void (*volatile smp_rv_action_func)(void *arg);
113 static void (*volatile smp_rv_teardown_func)(void *arg);
114 static void *volatile smp_rv_func_arg;
115 static volatile int smp_rv_waiters[4];
118 * Shared mutex to restrict busywaits between smp_rendezvous() and
119 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
120 * functions trigger at once and cause multiple CPUs to busywait with
121 * interrupts disabled.
123 struct mtx smp_ipi_mtx;
126 * Let the MD SMP code initialize mp_maxid very early if it can.
129 mp_setmaxid(void *dummy)
134 KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
135 KASSERT(mp_ncpus > 1 || mp_maxid == 0,
136 ("%s: one CPU but mp_maxid is not zero", __func__));
137 KASSERT(mp_maxid >= mp_ncpus - 1,
138 ("%s: counters out of sync: max %d, count %d", __func__,
139 mp_maxid, mp_ncpus));
141 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
144 * Call the MD SMP initialization code.
147 mp_start(void *dummy)
150 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
152 /* Probe for MP hardware. */
153 if (smp_disabled != 0 || cpu_mp_probe() == 0) {
155 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
160 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
164 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
167 forward_signal(struct thread *td)
172 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
173 * this thread, so all we need to do is poke it if it is currently
174 * executing so that it executes ast().
176 THREAD_LOCK_ASSERT(td, MA_OWNED);
177 KASSERT(TD_IS_RUNNING(td),
178 ("forward_signal: thread is not TDS_RUNNING"));
180 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
182 if (!smp_started || cold || panicstr)
184 if (!forward_signal_enabled)
187 /* No need to IPI ourself. */
194 ipi_cpu(id, IPI_AST);
198 * When called the executing CPU will send an IPI to all other CPUs
199 * requesting that they halt execution.
201 * Usually (but not necessarily) called with 'other_cpus' as its arg.
203 * - Signals all CPUs in map to stop.
204 * - Waits for each to stop.
213 generic_stop_cpus(cpuset_t map, u_int type)
216 char cpusetbuf[CPUSETBUFSIZ];
218 static volatile u_int stopping_cpu = NOCPU;
220 volatile cpuset_t *cpus;
223 #if defined(__amd64__) || defined(__i386__)
224 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
226 type == IPI_STOP || type == IPI_STOP_HARD,
228 ("%s: invalid stop type", __func__));
233 CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
234 cpusetobj_strprint(cpusetbuf, &map), type);
236 #if defined(__amd64__) || defined(__i386__)
238 * When suspending, ensure there are are no IPIs in progress.
239 * IPIs that have been issued, but not yet delivered (e.g.
240 * not pending on a vCPU when running under virtualization)
241 * will be lost, violating FreeBSD's assumption of reliable
244 if (type == IPI_SUSPEND)
245 mtx_lock_spin(&smp_ipi_mtx);
248 if (stopping_cpu != PCPU_GET(cpuid))
249 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
250 PCPU_GET(cpuid)) == 0)
251 while (stopping_cpu != NOCPU)
252 cpu_spinwait(); /* spin */
254 /* send the stop IPI to all CPUs in map */
255 ipi_selected(map, type);
257 #if defined(__amd64__) || defined(__i386__)
258 if (type == IPI_SUSPEND)
259 cpus = &suspended_cpus;
262 cpus = &stopped_cpus;
265 while (!CPU_SUBSET(cpus, &map)) {
269 if (i == 100000000) {
270 printf("timeout stopping cpus\n");
275 #if defined(__amd64__) || defined(__i386__)
276 if (type == IPI_SUSPEND)
277 mtx_unlock_spin(&smp_ipi_mtx);
280 stopping_cpu = NOCPU;
285 stop_cpus(cpuset_t map)
288 return (generic_stop_cpus(map, IPI_STOP));
292 stop_cpus_hard(cpuset_t map)
295 return (generic_stop_cpus(map, IPI_STOP_HARD));
298 #if defined(__amd64__) || defined(__i386__)
300 suspend_cpus(cpuset_t map)
303 return (generic_stop_cpus(map, IPI_SUSPEND));
308 * Called by a CPU to restart stopped CPUs.
310 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
312 * - Signals all CPUs in map to restart.
313 * - Waits for each to restart.
321 generic_restart_cpus(cpuset_t map, u_int type)
324 char cpusetbuf[CPUSETBUFSIZ];
326 volatile cpuset_t *cpus;
329 #if defined(__amd64__) || defined(__i386__)
330 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
332 type == IPI_STOP || type == IPI_STOP_HARD,
334 ("%s: invalid stop type", __func__));
339 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
341 #if defined(__amd64__) || defined(__i386__)
342 if (type == IPI_SUSPEND)
343 cpus = &suspended_cpus;
346 cpus = &stopped_cpus;
348 /* signal other cpus to restart */
349 CPU_COPY_STORE_REL(&map, &started_cpus);
351 /* wait for each to clear its bit */
352 while (CPU_OVERLAP(cpus, &map))
359 restart_cpus(cpuset_t map)
362 return (generic_restart_cpus(map, IPI_STOP));
365 #if defined(__amd64__) || defined(__i386__)
367 resume_cpus(cpuset_t map)
370 return (generic_restart_cpus(map, IPI_SUSPEND));
375 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
376 * (if specified), rendezvous, execute the action function (if specified),
377 * rendezvous again, execute the teardown function (if specified), and then
380 * Note that the supplied external functions _must_ be reentrant and aware
381 * that they are running in parallel and in an unknown lock context.
384 smp_rendezvous_action(void)
387 void *local_func_arg;
388 void (*local_setup_func)(void*);
389 void (*local_action_func)(void*);
390 void (*local_teardown_func)(void*);
395 /* Ensure we have up-to-date values. */
396 atomic_add_acq_int(&smp_rv_waiters[0], 1);
397 while (smp_rv_waiters[0] < smp_rv_ncpus)
400 /* Fetch rendezvous parameters after acquire barrier. */
401 local_func_arg = smp_rv_func_arg;
402 local_setup_func = smp_rv_setup_func;
403 local_action_func = smp_rv_action_func;
404 local_teardown_func = smp_rv_teardown_func;
407 * Use a nested critical section to prevent any preemptions
408 * from occurring during a rendezvous action routine.
409 * Specifically, if a rendezvous handler is invoked via an IPI
410 * and the interrupted thread was in the critical_exit()
411 * function after setting td_critnest to 0 but before
412 * performing a deferred preemption, this routine can be
413 * invoked with td_critnest set to 0 and td_owepreempt true.
414 * In that case, a critical_exit() during the rendezvous
415 * action would trigger a preemption which is not permitted in
416 * a rendezvous action. To fix this, wrap all of the
417 * rendezvous action handlers in a critical section. We
418 * cannot use a regular critical section however as having
419 * critical_exit() preempt from this routine would also be
420 * problematic (the preemption must not occur before the IPI
421 * has been acknowledged via an EOI). Instead, we
422 * intentionally ignore td_owepreempt when leaving the
423 * critical section. This should be harmless because we do
424 * not permit rendezvous action routines to schedule threads,
425 * and thus td_owepreempt should never transition from 0 to 1
426 * during this routine.
431 owepreempt = td->td_owepreempt;
435 * If requested, run a setup function before the main action
436 * function. Ensure all CPUs have completed the setup
437 * function before moving on to the action function.
439 if (local_setup_func != smp_no_rendevous_barrier) {
440 if (smp_rv_setup_func != NULL)
441 smp_rv_setup_func(smp_rv_func_arg);
442 atomic_add_int(&smp_rv_waiters[1], 1);
443 while (smp_rv_waiters[1] < smp_rv_ncpus)
447 if (local_action_func != NULL)
448 local_action_func(local_func_arg);
450 if (local_teardown_func != smp_no_rendevous_barrier) {
452 * Signal that the main action has been completed. If a
453 * full exit rendezvous is requested, then all CPUs will
454 * wait here until all CPUs have finished the main action.
456 atomic_add_int(&smp_rv_waiters[2], 1);
457 while (smp_rv_waiters[2] < smp_rv_ncpus)
460 if (local_teardown_func != NULL)
461 local_teardown_func(local_func_arg);
465 * Signal that the rendezvous is fully completed by this CPU.
466 * This means that no member of smp_rv_* pseudo-structure will be
467 * accessed by this target CPU after this point; in particular,
468 * memory pointed by smp_rv_func_arg.
470 * The release semantic ensures that all accesses performed by
471 * the current CPU are visible when smp_rendezvous_cpus()
472 * returns, by synchronizing with the
473 * atomic_load_acq_int(&smp_rv_waiters[3]).
475 atomic_add_rel_int(&smp_rv_waiters[3], 1);
478 KASSERT(owepreempt == td->td_owepreempt,
479 ("rendezvous action changed td_owepreempt"));
483 smp_rendezvous_cpus(cpuset_t map,
484 void (* setup_func)(void *),
485 void (* action_func)(void *),
486 void (* teardown_func)(void *),
489 int curcpumap, i, ncpus = 0;
491 /* Look comments in the !SMP case. */
494 if (setup_func != NULL)
496 if (action_func != NULL)
498 if (teardown_func != NULL)
505 if (CPU_ISSET(i, &map))
509 panic("ncpus is 0 with non-zero map");
511 mtx_lock_spin(&smp_ipi_mtx);
513 /* Pass rendezvous parameters via global variables. */
514 smp_rv_ncpus = ncpus;
515 smp_rv_setup_func = setup_func;
516 smp_rv_action_func = action_func;
517 smp_rv_teardown_func = teardown_func;
518 smp_rv_func_arg = arg;
519 smp_rv_waiters[1] = 0;
520 smp_rv_waiters[2] = 0;
521 smp_rv_waiters[3] = 0;
522 atomic_store_rel_int(&smp_rv_waiters[0], 0);
525 * Signal other processors, which will enter the IPI with
528 curcpumap = CPU_ISSET(curcpu, &map);
529 CPU_CLR(curcpu, &map);
530 ipi_selected(map, IPI_RENDEZVOUS);
532 /* Check if the current CPU is in the map */
534 smp_rendezvous_action();
537 * Ensure that the master CPU waits for all the other
538 * CPUs to finish the rendezvous, so that smp_rv_*
539 * pseudo-structure and the arg are guaranteed to not
542 * Load acquire synchronizes with the release add in
543 * smp_rendezvous_action(), which ensures that our caller sees
544 * all memory actions done by the called functions on other
547 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
550 mtx_unlock_spin(&smp_ipi_mtx);
554 smp_rendezvous(void (* setup_func)(void *),
555 void (* action_func)(void *),
556 void (* teardown_func)(void *),
559 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
562 static struct cpu_group group[MAXCPU * MAX_CACHE_LEVELS + 1];
567 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
568 struct cpu_group *top;
571 * Check for a fake topology request for debugging purposes.
573 switch (smp_topology) {
575 /* Dual core with no sharing. */
576 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
579 /* No topology, all cpus are equal. */
580 top = smp_topo_none();
583 /* Dual core with shared L2. */
584 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
587 /* quad core, shared l3 among each package, private l2. */
588 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
591 /* quad core, 2 dualcore parts on each package share l2. */
592 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
595 /* Single-core 2xHTT */
596 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
599 /* quad core with a shared l3, 8 threads sharing L2. */
600 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
604 /* Default, ask the system what it wants. */
609 * Verify the returned topology.
611 if (top->cg_count != mp_ncpus)
612 panic("Built bad topology at %p. CPU count %d != %d",
613 top, top->cg_count, mp_ncpus);
614 if (CPU_CMP(&top->cg_mask, &all_cpus))
615 panic("Built bad topology at %p. CPU mask (%s) != (%s)",
616 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
617 cpusetobj_strprint(cpusetbuf2, &all_cpus));
622 smp_topo_alloc(u_int count)
629 return (&group[curr]);
635 struct cpu_group *top;
638 top->cg_parent = NULL;
639 top->cg_child = NULL;
640 top->cg_mask = all_cpus;
641 top->cg_count = mp_ncpus;
642 top->cg_children = 0;
643 top->cg_level = CG_SHARE_NONE;
650 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
651 int count, int flags, int start)
653 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
658 for (i = 0; i < count; i++, start++)
659 CPU_SET(start, &mask);
660 child->cg_parent = parent;
661 child->cg_child = NULL;
662 child->cg_children = 0;
663 child->cg_level = share;
664 child->cg_count = count;
665 child->cg_flags = flags;
666 child->cg_mask = mask;
667 parent->cg_children++;
668 for (; parent != NULL; parent = parent->cg_parent) {
669 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
670 panic("Duplicate children in %p. mask (%s) child (%s)",
672 cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
673 cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
674 CPU_OR(&parent->cg_mask, &child->cg_mask);
675 parent->cg_count += child->cg_count;
682 smp_topo_1level(int share, int count, int flags)
684 struct cpu_group *child;
685 struct cpu_group *top;
692 packages = mp_ncpus / count;
693 top->cg_child = child = &group[1];
694 top->cg_level = CG_SHARE_NONE;
695 for (i = 0; i < packages; i++, child++)
696 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
701 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
704 struct cpu_group *top;
705 struct cpu_group *l1g;
706 struct cpu_group *l2g;
715 top->cg_level = CG_SHARE_NONE;
716 top->cg_children = mp_ncpus / (l2count * l1count);
717 l1g = l2g + top->cg_children;
718 for (i = 0; i < top->cg_children; i++, l2g++) {
719 l2g->cg_parent = top;
721 l2g->cg_level = l2share;
722 for (j = 0; j < l2count; j++, l1g++)
723 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
731 smp_topo_find(struct cpu_group *top, int cpu)
733 struct cpu_group *cg;
738 CPU_SETOF(cpu, &mask);
741 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
743 if (cg->cg_children == 0)
745 children = cg->cg_children;
746 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
747 if (CPU_OVERLAP(&cg->cg_mask, &mask))
755 smp_rendezvous_cpus(cpuset_t map,
756 void (*setup_func)(void *),
757 void (*action_func)(void *),
758 void (*teardown_func)(void *),
762 * In the !SMP case we just need to ensure the same initial conditions
766 if (setup_func != NULL)
768 if (action_func != NULL)
770 if (teardown_func != NULL)
776 smp_rendezvous(void (*setup_func)(void *),
777 void (*action_func)(void *),
778 void (*teardown_func)(void *),
782 /* Look comments in the smp_rendezvous_cpus() case. */
784 if (setup_func != NULL)
786 if (action_func != NULL)
788 if (teardown_func != NULL)
794 * Provide dummy SMP support for UP kernels. Modules that need to use SMP
795 * APIs will still work using this dummy support.
798 mp_setvariables_for_up(void *dummy)
801 mp_maxid = PCPU_GET(cpuid);
802 CPU_SETOF(mp_maxid, &all_cpus);
803 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
805 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
806 mp_setvariables_for_up, NULL);
810 smp_no_rendevous_barrier(void *dummy)
813 KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
818 * Wait specified idle threads to switch once. This ensures that even
819 * preempted threads have cycled through the switch function once,
820 * exiting their codepaths. This allows us to change global pointers
821 * with no other synchronization.
824 quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
832 for (cpu = 0; cpu <= mp_maxid; cpu++) {
833 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
835 pcpu = pcpu_find(cpu);
836 gen[cpu] = pcpu->pc_idlethread->td_generation;
838 for (cpu = 0; cpu <= mp_maxid; cpu++) {
839 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
841 pcpu = pcpu_find(cpu);
842 thread_lock(curthread);
843 sched_bind(curthread, cpu);
844 thread_unlock(curthread);
845 while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
846 error = tsleep(quiesce_cpus, prio, wmesg, 1);
847 if (error != EWOULDBLOCK)
853 thread_lock(curthread);
854 sched_unbind(curthread);
855 thread_unlock(curthread);
861 quiesce_all_cpus(const char *wmesg, int prio)
864 return quiesce_cpus(all_cpus, wmesg, prio);
867 /* Extra care is taken with this sysctl because the data type is volatile */
869 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
873 active = smp_started;
874 error = SYSCTL_OUT(req, &active, sizeof(active));
881 topo_init_node(struct topo_node *node)
884 bzero(node, sizeof(*node));
885 TAILQ_INIT(&node->children);
889 topo_init_root(struct topo_node *root)
892 topo_init_node(root);
893 root->type = TOPO_TYPE_SYSTEM;
897 * Add a child node with the given ID under the given parent.
898 * Do nothing if there is already a child with that ID.
901 topo_add_node_by_hwid(struct topo_node *parent, int hwid,
902 topo_node_type type, uintptr_t subtype)
904 struct topo_node *node;
906 TAILQ_FOREACH_REVERSE(node, &parent->children,
907 topo_children, siblings) {
908 if (node->hwid == hwid
909 && node->type == type && node->subtype == subtype) {
914 node = malloc(sizeof(*node), M_TOPO, M_WAITOK);
915 topo_init_node(node);
916 node->parent = parent;
919 node->subtype = subtype;
920 TAILQ_INSERT_TAIL(&parent->children, node, siblings);
927 * Find a child node with the given ID under the given parent.
930 topo_find_node_by_hwid(struct topo_node *parent, int hwid,
931 topo_node_type type, uintptr_t subtype)
934 struct topo_node *node;
936 TAILQ_FOREACH(node, &parent->children, siblings) {
937 if (node->hwid == hwid
938 && node->type == type && node->subtype == subtype) {
947 * Given a node change the order of its parent's child nodes such
948 * that the node becomes the firt child while preserving the cyclic
949 * order of the children. In other words, the given node is promoted
953 topo_promote_child(struct topo_node *child)
955 struct topo_node *next;
956 struct topo_node *node;
957 struct topo_node *parent;
959 parent = child->parent;
960 next = TAILQ_NEXT(child, siblings);
961 TAILQ_REMOVE(&parent->children, child, siblings);
962 TAILQ_INSERT_HEAD(&parent->children, child, siblings);
964 while (next != NULL) {
966 next = TAILQ_NEXT(node, siblings);
967 TAILQ_REMOVE(&parent->children, node, siblings);
968 TAILQ_INSERT_AFTER(&parent->children, child, node, siblings);
974 * Iterate to the next node in the depth-first search (traversal) of
978 topo_next_node(struct topo_node *top, struct topo_node *node)
980 struct topo_node *next;
982 if ((next = TAILQ_FIRST(&node->children)) != NULL)
985 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
988 while ((node = node->parent) != top)
989 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
996 * Iterate to the next node in the depth-first search of the topology tree,
997 * but without descending below the current node.
1000 topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
1002 struct topo_node *next;
1004 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1007 while ((node = node->parent) != top)
1008 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1015 * Assign the given ID to the given topology node that represents a logical
1019 topo_set_pu_id(struct topo_node *node, cpuid_t id)
1022 KASSERT(node->type == TOPO_TYPE_PU,
1023 ("topo_set_pu_id: wrong node type: %u", node->type));
1024 KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0,
1025 ("topo_set_pu_id: cpuset already not empty"));
1027 CPU_SET(id, &node->cpuset);
1028 node->cpu_count = 1;
1031 while ((node = node->parent) != NULL) {
1032 KASSERT(!CPU_ISSET(id, &node->cpuset),
1033 ("logical ID %u is already set in node %p", id, node));
1034 CPU_SET(id, &node->cpuset);
1040 * Check if the topology is uniform, that is, each package has the same number
1041 * of cores in it and each core has the same number of threads (logical
1042 * processors) in it. If so, calculate the number of package, the number of
1043 * cores per package and the number of logical processors per core.
1044 * 'all' parameter tells whether to include administratively disabled logical
1045 * processors into the analysis.
1048 topo_analyze(struct topo_node *topo_root, int all,
1049 int *pkg_count, int *cores_per_pkg, int *thrs_per_core)
1051 struct topo_node *pkg_node;
1052 struct topo_node *core_node;
1053 struct topo_node *pu_node;
1060 *cores_per_pkg = -1;
1061 *thrs_per_core = -1;
1063 pkg_node = topo_root;
1064 while (pkg_node != NULL) {
1065 if (pkg_node->type != TOPO_TYPE_PKG) {
1066 pkg_node = topo_next_node(topo_root, pkg_node);
1069 if (!all && CPU_EMPTY(&pkg_node->cpuset)) {
1070 pkg_node = topo_next_nonchild_node(topo_root, pkg_node);
1078 core_node = pkg_node;
1079 while (core_node != NULL) {
1080 if (core_node->type == TOPO_TYPE_CORE) {
1081 if (!all && CPU_EMPTY(&core_node->cpuset)) {
1083 topo_next_nonchild_node(pkg_node,
1091 pu_node = core_node;
1092 while (pu_node != NULL) {
1093 if (pu_node->type == TOPO_TYPE_PU &&
1094 (all || !CPU_EMPTY(&pu_node->cpuset)))
1096 pu_node = topo_next_node(core_node,
1100 if (*thrs_per_core == -1)
1101 *thrs_per_core = tpc_counter;
1102 else if (*thrs_per_core != tpc_counter)
1105 core_node = topo_next_nonchild_node(pkg_node,
1108 /* PU node directly under PKG. */
1109 if (core_node->type == TOPO_TYPE_PU &&
1110 (all || !CPU_EMPTY(&core_node->cpuset)))
1112 core_node = topo_next_node(pkg_node,
1117 if (*cores_per_pkg == -1)
1118 *cores_per_pkg = cpp_counter;
1119 else if (*cores_per_pkg != cpp_counter)
1121 if (thrs_per_pkg == -1)
1122 thrs_per_pkg = tpp_counter;
1123 else if (thrs_per_pkg != tpp_counter)
1126 pkg_node = topo_next_nonchild_node(topo_root, pkg_node);
1129 KASSERT(*pkg_count > 0,
1130 ("bug in topology or analysis"));
1131 if (*cores_per_pkg == 0) {
1132 KASSERT(*thrs_per_core == -1 && thrs_per_pkg > 0,
1133 ("bug in topology or analysis"));
1134 *thrs_per_core = thrs_per_pkg;