2 * SPDX-License-Identifier: Beerware
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
21 #include <sys/cpuset.h>
22 #include <sys/queue.h>
25 * Types of nodes in the topological tree.
28 /* No node has this type; can be used in topo API calls. */
30 /* Processing unit aka computing unit aka logical CPU. */
32 /* Physical subdivision of a package. */
34 /* CPU L1/L2/L3 cache. */
36 /* Package aka chip, equivalent to socket. */
40 /* Other logical or physical grouping of PUs. */
41 /* E.g. PUs on the same dye, or PUs sharing an FPU. */
43 /* The whole system. */
47 /* Hardware indenitifier of a topology component. */
48 typedef unsigned int hwid_t;
49 /* Logical CPU idenitifier. */
52 /* A node in the topology. */
54 struct topo_node *parent;
55 TAILQ_HEAD(topo_children, topo_node) children;
56 TAILQ_ENTRY(topo_node) siblings;
67 * Scheduling topology of a NUMA or SMP system.
69 * The top level topology is an array of pointers to groups. Each group
70 * contains a bitmask of cpus in its group or subgroups. It may also
71 * contain a pointer to an array of child groups.
73 * The bitmasks at non leaf groups may be used by consumers who support
74 * a smaller depth than the hardware provides.
76 * The topology may be omitted by systems where all CPUs are equal.
80 struct cpu_group *cg_parent; /* Our parent group. */
81 struct cpu_group *cg_child; /* Optional children groups. */
82 cpuset_t cg_mask; /* Mask of cpus in this group. */
83 int32_t cg_count; /* Count of cpus in this group. */
84 int16_t cg_children; /* Number of children groups. */
85 int8_t cg_level; /* Shared cache level. */
86 int8_t cg_flags; /* Traversal modifiers. */
89 typedef struct cpu_group *cpu_group_t;
92 * Defines common resources for CPUs in the group. The highest level
93 * resource should be used when multiple are shared.
95 #define CG_SHARE_NONE 0
100 #define MAX_CACHE_LEVELS CG_SHARE_L3
103 * Behavior modifiers for load balancing and affinity.
105 #define CG_FLAG_HTT 0x01 /* Schedule the alternate core last. */
106 #define CG_FLAG_SMT 0x02 /* New age htt, less crippled. */
107 #define CG_FLAG_THREAD (CG_FLAG_HTT | CG_FLAG_SMT) /* Any threading. */
110 * Convenience routines for building and traversing topologies.
113 void topo_init_node(struct topo_node *node);
114 void topo_init_root(struct topo_node *root);
115 struct topo_node * topo_add_node_by_hwid(struct topo_node *parent, int hwid,
116 topo_node_type type, uintptr_t subtype);
117 struct topo_node * topo_find_node_by_hwid(struct topo_node *parent, int hwid,
118 topo_node_type type, uintptr_t subtype);
119 void topo_promote_child(struct topo_node *child);
120 struct topo_node * topo_next_node(struct topo_node *top,
121 struct topo_node *node);
122 struct topo_node * topo_next_nonchild_node(struct topo_node *top,
123 struct topo_node *node);
124 void topo_set_pu_id(struct topo_node *node, cpuid_t id);
129 * Some systems have useful sub-package core organizations. On these,
130 * a package has one or more subgroups. Each subgroup contains one or
131 * more cache groups (cores that share a last level cache).
134 TOPO_LEVEL_CACHEGROUP,
137 TOPO_LEVEL_COUNT /* Must be last */
139 struct topo_analysis {
140 int entities[TOPO_LEVEL_COUNT];
142 int topo_analyze(struct topo_node *topo_root, int all,
143 struct topo_analysis *results);
145 #define TOPO_FOREACH(i, root) \
146 for (i = root; i != NULL; i = topo_next_node(root, i))
148 struct cpu_group *smp_topo(void);
149 struct cpu_group *smp_topo_alloc(u_int count);
150 struct cpu_group *smp_topo_none(void);
151 struct cpu_group *smp_topo_1level(int l1share, int l1count, int l1flags);
152 struct cpu_group *smp_topo_2level(int l2share, int l2count, int l1share,
153 int l1count, int l1flags);
154 struct cpu_group *smp_topo_find(struct cpu_group *top, int cpu);
156 extern void (*cpustop_restartfunc)(void);
157 /* The suspend/resume cpusets are x86 only, but minimize ifdefs. */
158 extern volatile cpuset_t resuming_cpus; /* woken up cpus in suspend pen */
159 extern volatile cpuset_t started_cpus; /* cpus to let out of stop pen */
160 extern volatile cpuset_t stopped_cpus; /* cpus in stop pen */
161 extern volatile cpuset_t suspended_cpus; /* cpus [near] sleeping in susp pen */
162 extern volatile cpuset_t toresume_cpus; /* cpus to let out of suspend pen */
163 extern cpuset_t hlt_cpus_mask; /* XXX 'mask' is detail in old impl */
164 extern cpuset_t logical_cpus_mask;
167 extern u_int mp_maxid;
168 extern int mp_maxcpus;
169 extern int mp_ncores;
172 extern volatile int smp_started;
173 extern int smp_threads_per_core;
175 extern cpuset_t all_cpus;
176 extern cpuset_t cpuset_domain[MAXMEMDOM]; /* CPUs in each NUMA domain. */
179 * Macro allowing us to determine whether a CPU is absent at any given
180 * time, thus permitting us to configure sparse maps of cpuid-dependent
181 * (per-CPU) structures.
183 #define CPU_ABSENT(x_cpu) (!CPU_ISSET(x_cpu, &all_cpus))
186 * Macros to iterate over non-absent CPUs. CPU_FOREACH() takes an
187 * integer iterator and iterates over the available set of CPUs.
188 * CPU_FIRST() returns the id of the first non-absent CPU. CPU_NEXT()
189 * returns the id of the next non-absent CPU. It will wrap back to
190 * CPU_FIRST() once the end of the list is reached. The iterators are
191 * currently implemented via inline functions.
193 #define CPU_FOREACH(i) \
194 for ((i) = 0; (i) <= mp_maxid; (i)++) \
195 if (!CPU_ABSENT((i)))
220 #define CPU_FIRST() cpu_first()
221 #define CPU_NEXT(i) cpu_next((i))
225 * Machine dependent functions used to initialize MP support.
227 * The cpu_mp_probe() should check to see if MP support is present and return
228 * zero if it is not or non-zero if it is. If MP support is present, then
229 * cpu_mp_start() will be called so that MP can be enabled. This function
230 * should do things such as startup secondary processors. It should also
231 * setup mp_ncpus, all_cpus, and smp_cpus. It should also ensure that
232 * smp_started is initialized at the appropriate time.
233 * Once cpu_mp_start() returns, machine independent MP startup code will be
234 * executed and a simple message will be output to the console. Finally,
235 * cpu_mp_announce() will be called so that machine dependent messages about
236 * the MP support may be output to the console if desired.
238 * The cpu_setmaxid() function is called very early during the boot process
239 * so that the MD code may set mp_maxid to provide an upper bound on CPU IDs
240 * that other subsystems may use. If a platform is not able to determine
241 * the exact maximum ID that early, then it may set mp_maxid to MAXCPU - 1.
245 struct cpu_group *cpu_topo(void);
246 void cpu_mp_announce(void);
247 int cpu_mp_probe(void);
248 void cpu_mp_setmaxid(void);
249 void cpu_mp_start(void);
251 void forward_signal(struct thread *);
252 int restart_cpus(cpuset_t);
253 int stop_cpus(cpuset_t);
254 int stop_cpus_hard(cpuset_t);
255 #if defined(__amd64__) || defined(__i386__)
256 int suspend_cpus(cpuset_t);
257 int resume_cpus(cpuset_t);
260 void smp_rendezvous_action(void);
261 extern struct mtx smp_ipi_mtx;
265 int quiesce_all_cpus(const char *, int);
266 int quiesce_cpus(cpuset_t, const char *, int);
267 void quiesce_all_critical(void);
268 void cpus_fence_seq_cst(void);
269 void smp_no_rendezvous_barrier(void *);
270 void smp_rendezvous(void (*)(void *),
274 void smp_rendezvous_cpus(cpuset_t,
280 struct smp_rendezvous_cpus_retry_arg {
283 void smp_rendezvous_cpus_retry(cpuset_t,
287 void (*)(void *, int),
288 struct smp_rendezvous_cpus_retry_arg *);
290 void smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *);
294 #endif /* _SYS_SMP_H_ */