2 * Copyright (c) 2009 Neelkanth Natu
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/cpuset.h>
36 #include <sys/mutex.h>
37 #include <sys/kernel.h>
40 #include <sys/sched.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_kern.h>
48 #include <machine/clock.h>
49 #include <machine/smp.h>
50 #include <machine/hwfunc.h>
51 #include <machine/intr_machdep.h>
52 #include <machine/cache.h>
53 #include <machine/tlb.h>
55 struct pcb stoppcbs[MAXCPU];
58 static struct mtx ap_boot_mtx;
60 static volatile int aps_ready;
61 static volatile int mp_naps;
64 ipi_send(struct pcpu *pc, int ipi)
67 CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi);
69 atomic_set_32(&pc->pc_pending_ipis, ipi);
70 platform_ipi_send(pc->pc_cpuid);
72 CTR1(KTR_SMP, "%s: sent", __func__);
76 ipi_all_but_self(int ipi)
80 other_cpus = all_cpus;
81 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
82 ipi_selected(other_cpus, ipi);
85 /* Send an IPI to a set of cpus. */
87 ipi_selected(cpuset_t cpus, int ipi)
91 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
92 if (CPU_ISSET(pc->pc_cpuid, &cpus)) {
93 CTR3(KTR_SMP, "%s: pc: %p, ipi: %x\n", __func__, pc,
100 /* Send an IPI to a specific CPU. */
102 ipi_cpu(int cpu, u_int ipi)
105 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi);
106 ipi_send(cpuid_to_pcpu[cpu], ipi);
110 * Handle an IPI sent to this processor.
113 mips_ipi_handler(void *arg)
115 u_int cpu, ipi, ipi_bitmap;
118 cpu = PCPU_GET(cpuid);
120 platform_ipi_clear(); /* quiesce the pending ipi interrupt */
122 ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
124 return (FILTER_STRAY);
126 CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
128 while ((bit = ffs(ipi_bitmap))) {
134 CTR0(KTR_SMP, "IPI_RENDEZVOUS");
135 smp_rendezvous_action();
139 CTR0(KTR_SMP, "IPI_AST");
144 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
145 * necessary to add it in the switch.
147 CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
149 savectx(&stoppcbs[cpu]);
152 /* Indicate we are stopped */
153 CPU_SET_ATOMIC(cpu, &stopped_cpus);
155 /* Wait for restart */
156 while (!CPU_ISSET(cpu, &started_cpus))
159 CPU_CLR_ATOMIC(cpu, &started_cpus);
160 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
161 CTR0(KTR_SMP, "IPI_STOP (restart)");
164 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
165 sched_preempt(curthread);
168 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
172 panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
176 return (FILTER_HANDLED);
185 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
189 if (platform_start_ap(cpuid) != 0)
190 return (-1); /* could not start AP */
192 for (ms = 0; ms < 5000; ++ms) {
194 return (0); /* success */
199 return (-2); /* timeout initializing AP */
203 cpu_mp_setmaxid(void)
208 platform_cpu_mask(&cpumask);
211 while ((cpu = cpusetobj_ffs(&cpumask)) != 0) {
214 CPU_CLR(cpu, &cpumask);
220 mp_maxid = min(last, MAXCPU) - 1;
224 cpu_mp_announce(void)
232 return (platform_smp_topo());
239 return (mp_ncpus > 1);
248 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
251 platform_cpu_mask(&cpumask);
253 while (!CPU_EMPTY(&cpumask)) {
254 cpuid = cpusetobj_ffs(&cpumask) - 1;
255 CPU_CLR(cpuid, &cpumask);
257 if (cpuid >= MAXCPU) {
258 printf("cpu_mp_start: ignoring AP #%d.\n", cpuid);
262 if (cpuid != platform_processor_id()) {
263 if ((error = start_ap(cpuid)) != 0) {
264 printf("AP #%d failed to start: %d\n", cpuid, error);
268 printf("AP #%d started!\n", cpuid);
270 CPU_SET(cpuid, &all_cpus);
275 smp_init_secondary(u_int32_t cpuid)
280 tlb_invalidate_all();
281 mips_wr_wired(VMWIRED_ENTRIES);
284 * We assume that the L1 cache on the APs is identical to the one
287 mips_dcache_wbinv_all();
288 mips_icache_sync_all();
294 pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu));
295 dpcpu_init(dpcpu, cpuid);
297 /* The AP has initialized successfully - allow the BSP to proceed */
300 /* Spin until the BSP is ready to release the APs */
304 /* Initialize curthread. */
305 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
306 PCPU_SET(curthread, PCPU_GET(idlethread));
308 mtx_lock_spin(&ap_boot_mtx);
312 CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid));
315 printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid));
317 if (smp_cpus == mp_ncpus) {
318 atomic_store_rel_int(&smp_started, 1);
322 mtx_unlock_spin(&ap_boot_mtx);
324 while (smp_started == 0)
327 /* Start per-CPU event timers. */
330 /* enter the scheduler */
333 panic("scheduler returned us to %s", __func__);
338 release_aps(void *dummy __unused)
348 ipi_irq = platform_ipi_intrnum();
349 cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq,
350 INTR_TYPE_MISC | INTR_EXCL, NULL);
352 atomic_store_rel_int(&aps_ready, 1);
354 while (smp_started == 0)
358 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);