2 * Copyright (c) 2001 Wind River Systems, Inc.
4 * Written by: John Baldwin <jhb@FreeBSD.org>
6 * Copyright (c) 2009 Jeffrey Roberson <jeff@freebsd.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * This module provides MI support for per-cpu data.
37 * Each architecture determines the mapping of logical CPU IDs to physical
38 * CPUs. The requirements of this mapping are as follows:
39 * - Logical CPU IDs must reside in the range 0 ... MAXCPU - 1.
40 * - The mapping is not required to be dense. That is, there may be
41 * gaps in the mappings.
42 * - The platform sets the value of MAXCPU in <machine/param.h>.
43 * - It is suggested, but not required, that in the non-SMP case, the
44 * platform define MAXCPU to be 1 and define the logical ID of the
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/sysctl.h>
57 #include <sys/malloc.h>
64 MALLOC_DEFINE(M_PCPU, "Per-cpu", "Per-cpu resource accouting.");
69 TAILQ_ENTRY(dpcpu_free) df_link;
72 static DPCPU_DEFINE(char, modspace[DPCPU_MODMIN]);
73 static TAILQ_HEAD(, dpcpu_free) dpcpu_head = TAILQ_HEAD_INITIALIZER(dpcpu_head);
74 static struct sx dpcpu_lock;
75 uintptr_t dpcpu_off[MAXCPU];
76 struct pcpu *cpuid_to_pcpu[MAXCPU];
77 struct cpuhead cpuhead = SLIST_HEAD_INITIALIZER(cpuhead);
80 * Initialize the MI portions of a struct pcpu.
83 pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
87 KASSERT(cpuid >= 0 && cpuid < MAXCPU,
88 ("pcpu_init: invalid cpuid %d", cpuid));
89 pcpu->pc_cpuid = cpuid;
90 pcpu->pc_cpumask = 1 << cpuid;
91 cpuid_to_pcpu[cpuid] = pcpu;
92 SLIST_INSERT_HEAD(&cpuhead, pcpu, pc_allcpu);
93 cpu_pcpu_init(pcpu, cpuid, size);
94 pcpu->pc_rm_queue.rmq_next = &pcpu->pc_rm_queue;
95 pcpu->pc_rm_queue.rmq_prev = &pcpu->pc_rm_queue;
97 snprintf(pcpu->pc_name, sizeof(pcpu->pc_name), "CPU %d", cpuid);
102 dpcpu_init(void *dpcpu, int cpuid)
106 pcpu = pcpu_find(cpuid);
107 pcpu->pc_dynamic = (uintptr_t)dpcpu - DPCPU_START;
110 * Initialize defaults from our linker section.
112 memcpy(dpcpu, (void *)DPCPU_START, DPCPU_BYTES);
115 * Place it in the global pcpu offset array.
117 dpcpu_off[cpuid] = pcpu->pc_dynamic;
121 dpcpu_startup(void *dummy __unused)
123 struct dpcpu_free *df;
125 df = malloc(sizeof(*df), M_PCPU, M_WAITOK | M_ZERO);
126 df->df_start = (uintptr_t)&DPCPU_NAME(modspace);
127 df->df_len = DPCPU_MODMIN;
128 TAILQ_INSERT_HEAD(&dpcpu_head, df, df_link);
129 sx_init(&dpcpu_lock, "dpcpu alloc lock");
131 SYSINIT(dpcpu, SI_SUB_KLD, SI_ORDER_FIRST, dpcpu_startup, 0);
134 * First-fit extent based allocator for allocating space in the per-cpu
135 * region reserved for modules. This is only intended for use by the
136 * kernel linkers to place module linker sets.
139 dpcpu_alloc(int size)
141 struct dpcpu_free *df;
145 size = roundup2(size, sizeof(void *));
146 sx_xlock(&dpcpu_lock);
147 TAILQ_FOREACH(df, &dpcpu_head, df_link) {
148 if (df->df_len < size)
150 if (df->df_len == size) {
151 s = (void *)df->df_start;
152 TAILQ_REMOVE(&dpcpu_head, df, df_link);
156 s = (void *)df->df_start;
158 df->df_start = df->df_start + size;
161 sx_xunlock(&dpcpu_lock);
167 * Free dynamic per-cpu space at module unload time.
170 dpcpu_free(void *s, int size)
172 struct dpcpu_free *df;
173 struct dpcpu_free *dn;
177 size = roundup2(size, sizeof(void *));
178 start = (uintptr_t)s;
181 * Free a region of space and merge it with as many neighbors as
182 * possible. Keeping the list sorted simplifies this operation.
184 sx_xlock(&dpcpu_lock);
185 TAILQ_FOREACH(df, &dpcpu_head, df_link) {
186 if (df->df_start > end)
189 * If we expand at the end of an entry we may have to
190 * merge it with the one following it as well.
192 if (df->df_start + df->df_len == start) {
194 dn = TAILQ_NEXT(df, df_link);
195 if (df->df_start + df->df_len == dn->df_start) {
196 df->df_len += dn->df_len;
197 TAILQ_REMOVE(&dpcpu_head, dn, df_link);
200 sx_xunlock(&dpcpu_lock);
203 if (df->df_start == end) {
204 df->df_start = start;
206 sx_xunlock(&dpcpu_lock);
210 dn = malloc(sizeof(*df), M_PCPU, M_WAITOK | M_ZERO);
211 dn->df_start = start;
214 TAILQ_INSERT_BEFORE(df, dn, df_link);
216 TAILQ_INSERT_TAIL(&dpcpu_head, dn, df_link);
217 sx_xunlock(&dpcpu_lock);
221 * Initialize the per-cpu storage from an updated linker-set region.
224 dpcpu_copy(void *s, int size)
230 for (i = 0; i < mp_ncpus; ++i) {
231 dpcpu = dpcpu_off[i];
234 memcpy((void *)(dpcpu + (uintptr_t)s), s, size);
237 memcpy((void *)(dpcpu_off[0] + (uintptr_t)s), s, size);
242 * Destroy a struct pcpu.
245 pcpu_destroy(struct pcpu *pcpu)
248 SLIST_REMOVE(&cpuhead, pcpu, pcpu, pc_allcpu);
249 cpuid_to_pcpu[pcpu->pc_cpuid] = NULL;
250 dpcpu_off[pcpu->pc_cpuid] = 0;
254 * Locate a struct pcpu by cpu id.
257 pcpu_find(u_int cpuid)
260 return (cpuid_to_pcpu[cpuid]);
264 sysctl_dpcpu_quad(SYSCTL_HANDLER_ARGS)
271 for (i = 0; i < mp_ncpus; ++i) {
272 dpcpu = dpcpu_off[i];
275 count += *(int64_t *)(dpcpu + (uintptr_t)arg1);
277 return (SYSCTL_OUT(req, &count, sizeof(count)));
281 sysctl_dpcpu_long(SYSCTL_HANDLER_ARGS)
288 for (i = 0; i < mp_ncpus; ++i) {
289 dpcpu = dpcpu_off[i];
292 count += *(long *)(dpcpu + (uintptr_t)arg1);
294 return (SYSCTL_OUT(req, &count, sizeof(count)));
298 sysctl_dpcpu_int(SYSCTL_HANDLER_ARGS)
305 for (i = 0; i < mp_ncpus; ++i) {
306 dpcpu = dpcpu_off[i];
309 count += *(int *)(dpcpu + (uintptr_t)arg1);
311 return (SYSCTL_OUT(req, &count, sizeof(count)));
315 DB_SHOW_COMMAND(dpcpu_off, db_show_dpcpu_off)
320 db_printf("dpcpu_off[%2d] = 0x%jx (+ DPCPU_START = %p)\n",
321 id, (uintmax_t)dpcpu_off[id],
322 (void *)(uintptr_t)(dpcpu_off[id] + DPCPU_START));
327 show_pcpu(struct pcpu *pc)
331 db_printf("cpuid = %d\n", pc->pc_cpuid);
332 db_printf("dynamic pcpu = %p\n", (void *)pc->pc_dynamic);
333 db_printf("curthread = ");
334 td = pc->pc_curthread;
336 db_printf("%p: pid %d \"%s\"\n", td, td->td_proc->p_pid,
340 db_printf("curpcb = %p\n", pc->pc_curpcb);
341 db_printf("fpcurthread = ");
342 td = pc->pc_fpcurthread;
344 db_printf("%p: pid %d \"%s\"\n", td, td->td_proc->p_pid,
348 db_printf("idlethread = ");
349 td = pc->pc_idlethread;
351 db_printf("%p: tid %d \"%s\"\n", td, td->td_tid, td->td_name);
357 db_printf("curvnet = %p\n", pc->pc_curthread->td_vnet);
361 db_printf("spin locks held:\n");
362 witness_list_locks(&pc->pc_spinlocks, db_printf);
366 DB_SHOW_COMMAND(pcpu, db_show_pcpu)
372 id = ((addr >> 4) % 16) * 10 + (addr % 16);
374 id = PCPU_GET(cpuid);
377 db_printf("CPU %d not found\n", id);
383 DB_SHOW_ALL_COMMAND(pcpu, db_show_cpu_all)
388 db_printf("Current CPU: %d\n\n", PCPU_GET(cpuid));
389 for (id = 0; id <= mp_maxid; id++) {
397 DB_SHOW_ALIAS(allpcpu, db_show_cpu_all);