2 * Copyright (c) 2015-2016 The FreeBSD Foundation
5 * This software was developed by Andrew Turner under
6 * sponsorship from the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include "opt_kstack_pages.h"
32 #include "opt_platform.h"
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
46 #include <sys/sched.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
54 #include <machine/debug_monitor.h>
55 #include <machine/machdep.h>
56 #include <machine/intr.h>
57 #include <machine/smp.h>
59 #include <machine/vfp.h>
63 #include <dev/ofw/openfirm.h>
64 #include <dev/ofw/ofw_cpu.h>
67 #include <dev/psci/psci.h>
71 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
72 typedef void intr_ipi_handler_t(void *);
74 #define INTR_IPI_NAMELEN (MAXCOMLEN + 1)
76 intr_ipi_handler_t * ii_handler;
77 void * ii_handler_arg;
78 intr_ipi_send_t * ii_send;
80 char ii_name[INTR_IPI_NAMELEN];
84 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
86 static struct intr_ipi *intr_ipi_lookup(u_int);
87 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
90 boolean_t ofw_cpu_reg(phandle_t node, u_int, cell_t *);
92 extern struct pcpu __pcpu[];
94 static device_identify_t arm64_cpu_identify;
95 static device_probe_t arm64_cpu_probe;
96 static device_attach_t arm64_cpu_attach;
98 static void ipi_ast(void *);
99 static void ipi_hardclock(void *);
100 static void ipi_preempt(void *);
101 static void ipi_rendezvous(void *);
102 static void ipi_stop(void *);
104 static int ipi_handler(void *arg);
106 struct mtx ap_boot_mtx;
107 struct pcb stoppcbs[MAXCPU];
109 static device_t cpu_list[MAXCPU];
112 * Not all systems boot from the first CPU in the device tree. To work around
113 * this we need to find which CPU we have booted from so when we later
114 * enable the secondary CPUs we skip this one.
116 static int cpu0 = -1;
118 void mpentry(unsigned long cpuid);
119 void init_secondary(uint64_t);
121 uint8_t secondary_stacks[MAXCPU - 1][PAGE_SIZE * KSTACK_PAGES] __aligned(16);
123 /* Set to 1 once we're ready to let the APs out of the pen. */
124 volatile int aps_ready = 0;
126 /* Temporary variables for init_secondary() */
127 void *dpcpu[MAXCPU - 1];
129 static device_method_t arm64_cpu_methods[] = {
130 /* Device interface */
131 DEVMETHOD(device_identify, arm64_cpu_identify),
132 DEVMETHOD(device_probe, arm64_cpu_probe),
133 DEVMETHOD(device_attach, arm64_cpu_attach),
138 static devclass_t arm64_cpu_devclass;
139 static driver_t arm64_cpu_driver = {
145 DRIVER_MODULE(arm64_cpu, cpu, arm64_cpu_driver, arm64_cpu_devclass, 0, 0);
148 arm64_cpu_identify(driver_t *driver, device_t parent)
151 if (device_find_child(parent, "arm64_cpu", -1) != NULL)
153 if (BUS_ADD_CHILD(parent, 0, "arm64_cpu", -1) == NULL)
154 device_printf(parent, "add child failed\n");
158 arm64_cpu_probe(device_t dev)
162 cpuid = device_get_unit(dev);
163 if (cpuid >= MAXCPU || cpuid > mp_maxid)
171 arm64_cpu_attach(device_t dev)
178 cpuid = device_get_unit(dev);
180 if (cpuid >= MAXCPU || cpuid > mp_maxid)
182 KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid));
184 reg = cpu_get_cpuid(dev, ®_size);
189 device_printf(dev, "register <");
190 for (i = 0; i < reg_size; i++)
191 printf("%s%x", (i == 0) ? "" : " ", reg[i]);
195 /* Set the device to start it later */
196 cpu_list[cpuid] = dev;
202 release_aps(void *dummy __unused)
206 /* Only release CPUs if they exist */
210 intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
211 intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
212 intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
213 intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
214 intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
215 intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
217 atomic_store_rel_int(&aps_ready, 1);
218 /* Wake up the other CPUs */
219 __asm __volatile("sev");
221 printf("Release APs\n");
223 for (i = 0; i < 2000; i++) {
229 printf("APs not started\n");
231 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
234 init_secondary(uint64_t cpu)
238 pcpup = &__pcpu[cpu];
240 * Set the pcpu pointer with a backup in tpidr_el1 to be
241 * loaded when entering the kernel from userland.
245 "msr tpidr_el1, %0" :: "r"(pcpup));
247 /* Spin until the BSP releases the APs */
249 __asm __volatile("wfe");
251 /* Initialize curthread */
252 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
253 pcpup->pc_curthread = pcpup->pc_idlethread;
254 pcpup->pc_curpcb = pcpup->pc_idlethread->td_pcb;
257 * Identify current CPU. This is necessary to setup
258 * affinity registers and to provide support for
259 * runtime chip identification.
263 intr_pic_init_secondary();
265 /* Start per-CPU event timers. */
274 /* Enable interrupts */
277 mtx_lock_spin(&ap_boot_mtx);
279 atomic_add_rel_32(&smp_cpus, 1);
281 if (smp_cpus == mp_ncpus) {
282 /* enable IPI's, tlb shootdown, freezes etc */
283 atomic_store_rel_int(&smp_started, 1);
286 mtx_unlock_spin(&ap_boot_mtx);
288 /* Enter the scheduler */
291 panic("scheduler returned us to init_secondary");
296 * Send IPI thru interrupt controller.
299 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
302 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
303 PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
307 * Setup IPI handler on interrupt controller.
312 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
315 struct intr_irqsrc *isrc;
319 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
320 KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
322 error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
326 isrc->isrc_handlers++;
328 ii = intr_ipi_lookup(ipi);
329 KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
331 ii->ii_handler = hand;
332 ii->ii_handler_arg = arg;
333 ii->ii_send = pic_ipi_send;
334 ii->ii_send_arg = isrc;
335 strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
336 ii->ii_count = intr_ipi_setup_counters(name);
340 intr_ipi_send(cpuset_t cpus, u_int ipi)
344 ii = intr_ipi_lookup(ipi);
345 if (ii->ii_count == NULL)
346 panic("%s: not setup IPI %u", __func__, ipi);
348 ii->ii_send(ii->ii_send_arg, cpus, ipi);
352 ipi_ast(void *dummy __unused)
355 CTR0(KTR_SMP, "IPI_AST");
359 ipi_hardclock(void *dummy __unused)
362 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
367 ipi_preempt(void *dummy __unused)
369 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
370 sched_preempt(curthread);
374 ipi_rendezvous(void *dummy __unused)
377 CTR0(KTR_SMP, "IPI_RENDEZVOUS");
378 smp_rendezvous_action();
382 ipi_stop(void *dummy __unused)
386 CTR0(KTR_SMP, "IPI_STOP");
388 cpu = PCPU_GET(cpuid);
389 savectx(&stoppcbs[cpu]);
391 /* Indicate we are stopped */
392 CPU_SET_ATOMIC(cpu, &stopped_cpus);
394 /* Wait for restart */
395 while (!CPU_ISSET(cpu, &started_cpus))
398 CPU_CLR_ATOMIC(cpu, &started_cpus);
399 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
400 CTR0(KTR_SMP, "IPI_STOP (restart)");
407 return (smp_topo_none());
410 /* Determine if we running MP machine */
415 /* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
421 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
429 /* Check we are able to start this cpu */
433 KASSERT(id < MAXCPU, ("Too many CPUs"));
435 /* We are already running on cpu 0 */
440 * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other
441 * CPUs ordered as the are likely grouped into clusters so it can be
442 * useful to keep that property, e.g. for the GICv3 driver to send
443 * an IPI to all CPUs in the cluster.
447 cpuid += mp_maxid + 1;
450 pcpup = &__pcpu[cpuid];
451 pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
453 dpcpu[cpuid - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
455 dpcpu_init(dpcpu[cpuid - 1], cpuid);
458 if (addr_size == 2) {
460 target_cpu |= reg[1];
463 printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
464 pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
466 err = psci_cpu_on(target_cpu, pa, cpuid);
467 if (err != PSCI_RETVAL_SUCCESS) {
469 * Panic here if INVARIANTS are enabled and PSCI failed to
470 * start the requested CPU. If psci_cpu_on returns PSCI_MISSING
471 * to indicate we are unable to use it to start the given CPU.
473 KASSERT(err == PSCI_MISSING,
474 ("Failed to start CPU %u (%lx)\n", id, target_cpu));
477 kmem_free(kernel_arena, (vm_offset_t)dpcpu[cpuid - 1],
479 dpcpu[cpuid - 1] = NULL;
480 /* Notify the user that the CPU failed to start */
481 printf("Failed to start CPU %u (%lx)\n", id, target_cpu);
483 CPU_SET(cpuid, &all_cpus);
489 /* Initialize and fire up non-boot processors */
494 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
496 CPU_SET(0, &all_cpus);
498 switch(arm64_bus_method) {
501 KASSERT(cpu0 >= 0, ("Current CPU was not found"));
502 ofw_cpu_early_foreach(cpu_init_fdt, true);
510 /* Introduce rest of cores to the world */
512 cpu_mp_announce(void)
517 cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
519 uint64_t mpidr_fdt, mpidr_reg;
523 if (addr_size == 2) {
528 mpidr_reg = READ_SPECIALREG(mpidr_el1);
530 if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt)
538 cpu_mp_setmaxid(void)
543 if (arm64_bus_method == ARM64_BUS_FDT) {
544 cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false);
546 cores = MIN(cores, MAXCPU);
548 printf("Found %d CPUs in the device tree\n",
551 mp_maxid = cores - 1;
558 printf("No CPU data, limiting to 1 core\n");
566 static struct intr_ipi *
567 intr_ipi_lookup(u_int ipi)
570 if (ipi >= INTR_IPI_COUNT)
571 panic("%s: no such IPI %u", __func__, ipi);
573 return (&ipi_sources[ipi]);
577 * interrupt controller dispatch function for IPIs. It should
578 * be called straight from the interrupt controller, when associated
579 * interrupt source is learned. Or from anybody who has an interrupt
583 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
588 ii = intr_ipi_lookup(ipi);
589 if (ii->ii_count == NULL)
590 panic("%s: not setup IPI %u", __func__, ipi);
592 intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
595 * Supply ipi filter with trapframe argument
596 * if none is registered.
598 arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
604 * Map IPI into interrupt controller.
609 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
614 if (ipi >= INTR_IPI_COUNT)
615 panic("%s: no such IPI %u", __func__, ipi);
617 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
619 isrc->isrc_type = INTR_ISRCT_NAMESPACE;
620 isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
621 isrc->isrc_nspc_num = ipi_next_num;
623 error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
625 isrc->isrc_dev = intr_irq_root_dev;
632 * Setup IPI handler to interrupt source.
634 * Note that there could be more ways how to send and receive IPIs
635 * on a platform like fast interrupts for example. In that case,
636 * one can call this function with ASIF_NOALLOC flag set and then
637 * call intr_ipi_dispatch() when appropriate.
642 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
643 void *arg, u_int flags)
645 struct intr_irqsrc *isrc;
651 isrc = intr_ipi_lookup(ipi);
652 if (isrc->isrc_ipifilter != NULL)
655 if ((flags & AISHF_NOALLOC) == 0) {
656 error = ipi_map(isrc, ipi);
661 isrc->isrc_ipifilter = filter;
662 isrc->isrc_arg = arg;
663 isrc->isrc_handlers = 1;
664 isrc->isrc_count = intr_ipi_setup_counters(name);
665 isrc->isrc_index = 0; /* it should not be used in IPI case */
667 if (isrc->isrc_dev != NULL) {
668 PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
669 PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
677 ipi_all_but_self(u_int ipi)
682 CPU_CLR(PCPU_GET(cpuid), &cpus);
683 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
684 intr_ipi_send(cpus, ipi);
688 ipi_cpu(int cpu, u_int ipi)
695 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
696 intr_ipi_send(cpus, ipi);
700 ipi_selected(cpuset_t cpus, u_int ipi)
703 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
704 intr_ipi_send(cpus, ipi);