2 * Copyright (c) 2009 Neelkanth Natu
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
35 #include <sys/mutex.h>
36 #include <sys/kernel.h>
39 #include <sys/sched.h>
44 #include <vm/vm_extern.h>
45 #include <vm/vm_kern.h>
47 #include <machine/clock.h>
48 #include <machine/smp.h>
49 #include <machine/hwfunc.h>
50 #include <machine/intr_machdep.h>
51 #include <machine/cache.h>
52 #include <machine/tlb.h>
54 struct pcb stoppcbs[MAXCPU];
57 static struct mtx ap_boot_mtx;
59 static volatile int aps_ready;
60 static volatile int mp_naps;
63 ipi_send(struct pcpu *pc, int ipi)
66 CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi);
68 atomic_set_32(&pc->pc_pending_ipis, ipi);
69 platform_ipi_send(pc->pc_cpuid);
71 CTR1(KTR_SMP, "%s: sent", __func__);
75 ipi_all_but_self(u_int ipi)
78 ipi_selected(PCPU_GET(other_cpus), ipi);
81 /* Send an IPI to a set of cpus. */
83 ipi_selected(cpumask_t cpus, int ipi)
87 CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);
89 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
90 if ((cpus & pc->pc_cpumask) != 0)
95 /* Send an IPI to a specific CPU. */
97 ipi_cpu(int cpu, u_int ipi)
100 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi);
101 ipi_send(cpuid_to_pcpu[cpu], ipi);
105 * Handle an IPI sent to this processor.
108 mips_ipi_handler(void *arg)
112 u_int ipi, ipi_bitmap;
115 cpu = PCPU_GET(cpuid);
116 cpumask = PCPU_GET(cpumask);
118 platform_ipi_clear(); /* quiesce the pending ipi interrupt */
120 ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
122 return (FILTER_STRAY);
124 CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
126 while ((bit = ffs(ipi_bitmap))) {
132 CTR0(KTR_SMP, "IPI_RENDEZVOUS");
133 smp_rendezvous_action();
137 CTR0(KTR_SMP, "IPI_AST");
142 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
143 * necessary to add it in the switch.
145 CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
147 savectx(&stoppcbs[cpu]);
150 /* Indicate we are stopped */
151 atomic_set_int(&stopped_cpus, cpumask);
153 /* Wait for restart */
154 while ((started_cpus & cpumask) == 0)
157 atomic_clear_int(&started_cpus, cpumask);
158 atomic_clear_int(&stopped_cpus, cpumask);
159 CTR0(KTR_SMP, "IPI_STOP (restart)");
162 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
163 sched_preempt(curthread);
167 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
172 panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
176 return (FILTER_HANDLED);
185 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
189 if (platform_start_ap(cpuid) != 0)
190 return (-1); /* could not start AP */
192 for (ms = 0; ms < 5000; ++ms) {
194 return (0); /* success */
199 return (-2); /* timeout initializing AP */
203 cpu_mp_setmaxid(void)
206 mp_ncpus = platform_num_processors();
210 mp_maxid = min(mp_ncpus, MAXCPU) - 1;
214 cpu_mp_announce(void)
222 return (platform_smp_topo());
229 return (mp_ncpus > 1);
237 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
239 all_cpus = 1; /* BSP */
240 for (cpuid = 1; cpuid < platform_num_processors(); ++cpuid) {
241 if (cpuid >= MAXCPU) {
242 printf("cpu_mp_start: ignoring AP #%d.\n", cpuid);
246 if ((error = start_ap(cpuid)) != 0) {
247 printf("AP #%d failed to start: %d\n", cpuid, error);
252 printf("AP #%d started!\n", cpuid);
254 all_cpus |= 1 << cpuid;
257 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
261 smp_init_secondary(u_int32_t cpuid)
265 tlb_invalidate_all();
266 mips_wr_wired(VMWIRED_ENTRIES);
269 * We assume that the L1 cache on the APs is identical to the one
272 mips_dcache_wbinv_all();
273 mips_icache_sync_all();
279 pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu));
280 dpcpu_init(dpcpu, cpuid);
282 /* The AP has initialized successfully - allow the BSP to proceed */
285 /* Spin until the BSP is ready to release the APs */
289 /* Initialize curthread. */
290 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
291 PCPU_SET(curthread, PCPU_GET(idlethread));
293 mtx_lock_spin(&ap_boot_mtx);
297 CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid));
299 /* Build our map of 'other' CPUs. */
300 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
303 printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid));
305 if (smp_cpus == mp_ncpus) {
306 atomic_store_rel_int(&smp_started, 1);
310 mtx_unlock_spin(&ap_boot_mtx);
312 while (smp_started == 0)
316 /* Start per-CPU event timers. */
320 /* enter the scheduler */
323 panic("scheduler returned us to %s", __func__);
328 release_aps(void *dummy __unused)
338 ipi_irq = platform_ipi_intrnum();
339 cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq,
340 INTR_TYPE_MISC | INTR_EXCL | INTR_FAST, NULL);
342 atomic_store_rel_int(&aps_ready, 1);
344 while (smp_started == 0)
348 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);