2 * Copyright (c) 2008 Marcel Moolenaar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
35 #include <sys/cpuset.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
41 #include <sys/sched.h>
45 #include <vm/vm_param.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_kern.h>
51 #include <machine/bus.h>
52 #include <machine/cpu.h>
53 #include <machine/intr_machdep.h>
54 #include <machine/pcb.h>
55 #include <machine/platform.h>
56 #include <machine/md_var.h>
57 #include <machine/setjmp.h>
58 #include <machine/smp.h>
62 extern struct pcpu __pcpu[MAXCPU];
64 volatile static int ap_awake;
65 volatile static u_int ap_letgo;
66 volatile static u_quad_t ap_timebase;
67 static u_int ipi_msg_cnt[32];
68 static struct mtx ap_boot_mtx;
69 struct pcb stoppcbs[MAXCPU];
70 int longfault(faultbuf, int);
73 machdep_ap_bootstrap(void)
77 PCPU_SET(pir, mfspr(SPR_PIR));
79 __asm __volatile("msync; isync");
84 /* Initialize DEC and TB, sync with the BSP values */
86 /* Writing to the time base register is hypervisor-privileged */
94 /* Give platform code a chance to do anything necessary */
95 platform_smp_ap_init();
97 /* Serialize console output and AP count increment */
98 mtx_lock_spin(&ap_boot_mtx);
100 printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid));
101 mtx_unlock_spin(&ap_boot_mtx);
103 /* Start per-CPU event timers. */
106 /* Announce ourselves awake, and enter the scheduler */
111 cpu_mp_setmaxid(void)
113 struct cpuref cpuref;
117 error = platform_smp_first_cpu(&cpuref);
120 error = platform_smp_next_cpu(&cpuref);
127 * Set the largest cpuid we're going to use. This is necessary
128 * for VM initialization.
130 mp_maxid = min(mp_ncpus, MAXCPU) - 1;
138 * We're not going to enable SMP if there's only 1 processor.
140 return (mp_ncpus > 1);
146 struct cpuref bsp, cpu;
150 error = platform_smp_get_bsp(&bsp);
151 KASSERT(error == 0, ("Don't know BSP"));
152 KASSERT(bsp.cr_cpuid == 0, ("%s: cpuid != 0", __func__));
154 error = platform_smp_first_cpu(&cpu);
156 if (cpu.cr_cpuid >= MAXCPU) {
157 printf("SMP: cpu%d: skipped -- ID out of range\n",
161 if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) {
162 printf("SMP: cpu%d: skipped - duplicate ID\n",
166 if (cpu.cr_cpuid != bsp.cr_cpuid) {
169 pc = &__pcpu[cpu.cr_cpuid];
170 dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
172 pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
173 dpcpu_init(dpcpu, cpu.cr_cpuid);
176 pc->pc_cpuid = bsp.cr_cpuid;
179 pc->pc_hwref = cpu.cr_hwref;
180 CPU_SET(pc->pc_cpuid, &all_cpus);
182 error = platform_smp_next_cpu(&cpu);
187 cpu_mp_announce(void)
192 for (i = 0; i <= mp_maxid; i++) {
196 printf("cpu%d: dev=%x", i, (int)pc->pc_hwref);
204 cpu_mp_unleash(void *dummy)
212 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
216 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
220 printf("Waking up CPU %d (dev=%x)\n",
221 pc->pc_cpuid, (int)pc->pc_hwref);
223 platform_smp_start_cpu(pc);
225 timeout = 2000; /* wait 2sec for the AP */
226 while (!pc->pc_awake && --timeout > 0)
230 PCPU_SET(pir, mfspr(SPR_PIR));
235 printf("Adding CPU %d, pir=%x, awake=%x\n",
236 pc->pc_cpuid, pc->pc_pir, pc->pc_awake);
239 CPU_SET(pc->pc_cpuid, &stopped_cpus);
244 /* Provide our current DEC and TB values for APs */
245 ap_timebase = mftb() + 10;
246 __asm __volatile("msync; isync");
248 /* Let APs continue */
249 atomic_store_rel_int(&ap_letgo, 1);
252 /* Writing to the time base register is hypervisor-privileged */
253 if (mfmsr() & PSL_HV)
259 while (ap_awake < smp_cpus)
262 if (smp_cpus != cpus || cpus != mp_ncpus) {
263 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
264 mp_ncpus, cpus, smp_cpus);
267 /* Let the APs get into the scheduler */
270 /* XXX Atomic set operation? */
274 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
277 powerpc_ipi_handler(void *arg)
283 CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
285 ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
287 return (FILTER_STRAY);
288 while ((msg = ffs(ipimask) - 1) != -1) {
289 ipimask &= ~(1u << msg);
293 CTR1(KTR_SMP, "%s: IPI_AST", __func__);
296 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
297 sched_preempt(curthread);
300 CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
301 smp_rendezvous_action();
306 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
307 * necessary to add such case in the switch.
309 CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)",
311 cpuid = PCPU_GET(cpuid);
312 savectx(&stoppcbs[cpuid]);
313 savectx(PCPU_GET(curpcb));
314 CPU_SET_ATOMIC(cpuid, &stopped_cpus);
315 while (!CPU_ISSET(cpuid, &started_cpus))
317 CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
318 CPU_CLR_ATOMIC(cpuid, &started_cpus);
319 CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
322 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
328 return (FILTER_HANDLED);
332 ipi_send(struct pcpu *pc, int ipi)
335 CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
336 pc, pc->pc_cpuid, ipi);
338 atomic_set_32(&pc->pc_ipimask, (1 << ipi));
340 PIC_IPI(root_pic, pc->pc_cpuid);
342 CTR1(KTR_SMP, "%s: sent", __func__);
345 /* Send an IPI to a set of cpus. */
347 ipi_selected(cpuset_t cpus, int ipi)
351 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
352 if (CPU_ISSET(pc->pc_cpuid, &cpus))
357 /* Send an IPI to a specific CPU. */
359 ipi_cpu(int cpu, u_int ipi)
362 ipi_send(cpuid_to_pcpu[cpu], ipi);
365 /* Send an IPI to all CPUs EXCEPT myself. */
367 ipi_all_but_self(int ipi)
371 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {