2 * Copyright (c) 2015-2016 The FreeBSD Foundation
4 * This software was developed by Andrew Turner under
5 * sponsorship from the FreeBSD Foundation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
41 #include <sys/domainset.h>
42 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/mutex.h>
49 #include <sys/sched.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_map.h>
58 #include <machine/machdep.h>
59 #include <machine/debug_monitor.h>
60 #include <machine/intr.h>
61 #include <machine/smp.h>
63 #include <machine/vfp.h>
67 #include <contrib/dev/acpica/include/acpi.h>
68 #include <dev/acpica/acpivar.h>
72 #include <dev/ofw/openfirm.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
75 #include <dev/ofw/ofw_cpu.h>
78 #include <dev/psci/psci.h>
82 #define MP_BOOTSTACK_SIZE (kstack_pages * PAGE_SIZE)
84 #define MP_QUIRK_CPULIST 0x01 /* The list of cpus may be wrong, */
85 /* don't panic if one fails to start */
86 static uint32_t mp_quirks;
93 { "arm,foundation-aarch64", MP_QUIRK_CPULIST },
94 { "arm,fvp-base", MP_QUIRK_CPULIST },
95 /* This is incorrect in some DTS files */
96 { "arm,vfp-base", MP_QUIRK_CPULIST },
101 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
102 typedef void intr_ipi_handler_t(void *);
104 #define INTR_IPI_NAMELEN (MAXCOMLEN + 1)
106 intr_ipi_handler_t * ii_handler;
107 void * ii_handler_arg;
108 intr_ipi_send_t * ii_send;
110 char ii_name[INTR_IPI_NAMELEN];
114 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
116 static struct intr_ipi *intr_ipi_lookup(u_int);
117 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
120 static void ipi_ast(void *);
121 static void ipi_hardclock(void *);
122 static void ipi_preempt(void *);
123 static void ipi_rendezvous(void *);
124 static void ipi_stop(void *);
126 struct pcb stoppcbs[MAXCPU];
129 static u_int fdt_cpuid;
132 void mpentry(unsigned long cpuid);
133 void init_secondary(uint64_t);
135 /* Synchronize AP startup. */
136 static struct mtx ap_boot_mtx;
138 /* Stacks for AP initialization, discarded once idle threads are started. */
140 static void *bootstacks[MAXCPU];
142 /* Count of started APs, used to synchronize access to bootstack. */
143 static volatile int aps_started;
145 /* Set to 1 once we're ready to let the APs out of the pen. */
146 static volatile int aps_ready;
148 /* Temporary variables for init_secondary() */
149 void *dpcpu[MAXCPU - 1];
152 is_boot_cpu(uint64_t target_cpu)
155 return (PCPU_GET_MPIDR(cpuid_to_pcpu[0]) == (target_cpu & CPU_AFF_MASK));
159 release_aps(void *dummy __unused)
163 /* Only release CPUs if they exist */
167 intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
168 intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
169 intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
170 intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
171 intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
172 intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
174 atomic_store_rel_int(&aps_ready, 1);
175 /* Wake up the other CPUs */
181 printf("Release APs...");
184 for (i = 0; i < 2000; i++) {
185 if (atomic_load_acq_int(&smp_started) != 0) {
190 * Don't time out while we are making progress. Some large
191 * systems can take a while to start all CPUs.
193 if (smp_cpus > started) {
200 printf("APs not started\n");
202 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
205 init_secondary(uint64_t cpu)
212 * Verify that the value passed in 'cpu' argument (aka context_id) is
213 * valid. Some older U-Boot based PSCI implementations are buggy,
214 * they can pass random value in it.
216 mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
217 if (cpu >= MAXCPU || cpuid_to_pcpu[cpu] == NULL ||
218 PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) != mpidr) {
219 for (cpu = 0; cpu < mp_maxid; cpu++)
220 if (cpuid_to_pcpu[cpu] != NULL &&
221 PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) == mpidr)
224 panic("MPIDR for this CPU is not in pcpu table");
227 pcpup = cpuid_to_pcpu[cpu];
229 * Set the pcpu pointer with a backup in tpidr_el1 to be
230 * loaded when entering the kernel from userland.
234 "msr tpidr_el1, %0" :: "r"(pcpup));
237 * Identify current CPU. This is necessary to setup
238 * affinity registers and to provide support for
239 * runtime chip identification.
241 * We need this before signalling the CPU is ready to
242 * let the boot CPU use the results.
244 pcpup->pc_midr = get_midr();
247 /* Ensure the stores in identify_cpu have completed */
248 atomic_thread_fence_acq_rel();
250 /* Signal the BSP and spin until it has released all APs. */
251 atomic_add_int(&aps_started, 1);
252 while (!atomic_load_int(&aps_ready))
253 __asm __volatile("wfe");
255 /* Initialize curthread */
256 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
257 pcpup->pc_curthread = pcpup->pc_idlethread;
260 /* Initialize curpmap to match TTBR0's current setting. */
261 pmap0 = vmspace_pmap(&vmspace0);
262 KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
263 ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
264 pcpup->pc_curpmap = pmap0;
266 install_cpu_errata();
268 intr_pic_init_secondary();
270 /* Start per-CPU event timers. */
280 mtx_lock_spin(&ap_boot_mtx);
281 atomic_add_rel_32(&smp_cpus, 1);
282 if (smp_cpus == mp_ncpus) {
283 /* enable IPI's, tlb shootdown, freezes etc */
284 atomic_store_rel_int(&smp_started, 1);
286 mtx_unlock_spin(&ap_boot_mtx);
290 /* Enter the scheduler */
293 panic("scheduler returned us to init_secondary");
298 smp_after_idle_runnable(void *arg __unused)
305 KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
308 * Wait for all APs to handle an interrupt. After that, we know that
309 * the APs have entered the scheduler at least once, so the boot stacks
312 smp_rendezvous(smp_no_rendezvous_barrier, NULL,
313 smp_no_rendezvous_barrier, NULL);
315 for (cpu = 1; cpu < mp_ncpus; cpu++) {
316 if (bootstacks[cpu] != NULL)
317 kmem_free((vm_offset_t)bootstacks[cpu],
321 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
322 smp_after_idle_runnable, NULL);
325 * Send IPI thru interrupt controller.
328 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
331 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
334 * Ensure that this CPU's stores will be visible to IPI
335 * recipients before starting to send the interrupts.
339 PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
343 * Setup IPI handler on interrupt controller.
348 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
351 struct intr_irqsrc *isrc;
355 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
356 KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
358 error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
362 isrc->isrc_handlers++;
364 ii = intr_ipi_lookup(ipi);
365 KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
367 ii->ii_handler = hand;
368 ii->ii_handler_arg = arg;
369 ii->ii_send = pic_ipi_send;
370 ii->ii_send_arg = isrc;
371 strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
372 ii->ii_count = intr_ipi_setup_counters(name);
374 PIC_ENABLE_INTR(intr_irq_root_dev, isrc);
378 intr_ipi_send(cpuset_t cpus, u_int ipi)
382 ii = intr_ipi_lookup(ipi);
383 if (ii->ii_count == NULL)
384 panic("%s: not setup IPI %u", __func__, ipi);
386 ii->ii_send(ii->ii_send_arg, cpus, ipi);
390 ipi_ast(void *dummy __unused)
393 CTR0(KTR_SMP, "IPI_AST");
397 ipi_hardclock(void *dummy __unused)
400 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
405 ipi_preempt(void *dummy __unused)
407 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
408 sched_preempt(curthread);
412 ipi_rendezvous(void *dummy __unused)
415 CTR0(KTR_SMP, "IPI_RENDEZVOUS");
416 smp_rendezvous_action();
420 ipi_stop(void *dummy __unused)
424 CTR0(KTR_SMP, "IPI_STOP");
426 cpu = PCPU_GET(cpuid);
427 savectx(&stoppcbs[cpu]);
429 /* Indicate we are stopped */
430 CPU_SET_ATOMIC(cpu, &stopped_cpus);
432 /* Wait for restart */
433 while (!CPU_ISSET(cpu, &started_cpus))
437 dbg_register_sync(NULL);
440 CPU_CLR_ATOMIC(cpu, &started_cpus);
441 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
442 CTR0(KTR_SMP, "IPI_STOP (restart)");
448 struct cpu_group *dom, *root;
451 root = smp_topo_alloc(1);
452 dom = smp_topo_alloc(vm_ndomains);
454 root->cg_parent = NULL;
455 root->cg_child = dom;
456 CPU_COPY(&all_cpus, &root->cg_mask);
457 root->cg_count = mp_ncpus;
458 root->cg_children = vm_ndomains;
459 root->cg_level = CG_SHARE_NONE;
463 * Redundant layers will be collapsed by the caller so we don't need a
464 * special case for a single domain.
466 for (i = 0; i < vm_ndomains; i++, dom++) {
467 dom->cg_parent = root;
468 dom->cg_child = NULL;
469 CPU_COPY(&cpuset_domain[i], &dom->cg_mask);
470 dom->cg_count = CPU_COUNT(&dom->cg_mask);
471 dom->cg_children = 0;
472 dom->cg_level = CG_SHARE_L3;
479 /* Determine if we running MP machine */
484 /* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
489 enable_cpu_psci(uint64_t target_cpu, vm_paddr_t entry, u_int cpuid)
493 err = psci_cpu_on(target_cpu, entry, cpuid);
494 if (err != PSCI_RETVAL_SUCCESS) {
496 * Panic here if INVARIANTS are enabled and PSCI failed to
497 * start the requested CPU. psci_cpu_on() returns PSCI_MISSING
498 * to indicate we are unable to use it to start the given CPU.
500 KASSERT(err == PSCI_MISSING ||
501 (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
502 ("Failed to start CPU %u (%lx), error %d\n",
503 cpuid, target_cpu, err));
511 enable_cpu_spin(uint64_t cpu, vm_paddr_t entry, vm_paddr_t release_paddr)
513 vm_paddr_t *release_addr;
515 release_addr = pmap_mapdev(release_paddr, sizeof(*release_addr));
516 if (release_addr == NULL)
519 *release_addr = entry;
520 pmap_unmapdev((vm_offset_t)release_addr, sizeof(*release_addr));
531 * Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
532 * do nothing. Returns true if the CPU is present and running.
535 start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr)
538 vm_offset_t pcpu_mem;
543 /* Check we are able to start this cpu */
544 if (cpuid > mp_maxid)
548 if (is_boot_cpu(target_cpu))
551 KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
553 size = round_page(sizeof(*pcpup) + DPCPU_SIZE);
554 pcpu_mem = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
556 pmap_disable_promotion(pcpu_mem, size);
558 pcpup = (struct pcpu *)pcpu_mem;
559 pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
560 pcpup->pc_mpidr_low = target_cpu & CPU_AFF_MASK;
561 pcpup->pc_mpidr_high = (target_cpu & CPU_AFF_MASK) >> 32;
563 dpcpu[cpuid - 1] = (void *)(pcpup + 1);
564 dpcpu_init(dpcpu[cpuid - 1], cpuid);
566 bootstacks[cpuid] = (void *)kmem_malloc_domainset(
567 DOMAINSET_PREF(domain), MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
569 naps = atomic_load_int(&aps_started);
570 bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
572 printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
573 pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
576 * A limited set of hardware we support can only do spintables and
577 * remain useful, due to lack of EL3. Thus, we'll usually fall into the
580 MPASS(release_addr == 0 || !psci_present);
581 if (release_addr != 0)
582 err = enable_cpu_spin(target_cpu, pa, release_addr);
584 err = enable_cpu_psci(target_cpu, pa, cpuid);
588 dpcpu[cpuid - 1] = NULL;
589 kmem_free((vm_offset_t)bootstacks[cpuid], MP_BOOTSTACK_SIZE);
590 kmem_free(pcpu_mem, size);
591 bootstacks[cpuid] = NULL;
596 /* Wait for the AP to switch to its boot stack. */
597 while (atomic_load_int(&aps_started) < naps + 1)
599 CPU_SET(cpuid, &all_cpus);
606 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
608 ACPI_MADT_GENERIC_INTERRUPT *intr;
613 switch(entry->Type) {
614 case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
615 intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
618 if (is_boot_cpu(intr->ArmMpidr))
626 domain = acpi_pxm_get_cpu_locality(intr->Uid);
628 if (start_cpu(id, intr->ArmMpidr, domain, 0)) {
629 MPASS(cpuid_to_pcpu[id] != NULL);
630 cpuid_to_pcpu[id]->pc_acpi_id = intr->Uid;
632 * Don't increment for the boot CPU, its CPU ID is
635 if (!is_boot_cpu(intr->ArmMpidr))
648 ACPI_TABLE_MADT *madt;
652 physaddr = acpi_find_table(ACPI_SIG_MADT);
656 madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
658 printf("Unable to map the MADT, not starting APs\n");
661 /* Boot CPU is always 0 */
663 acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
664 madt_handler, &cpuid);
666 acpi_unmap_table(madt);
669 acpi_pxm_set_cpu_locality();
676 * Failure is indicated by failing to populate *release_addr.
679 populate_release_addr(phandle_t node, vm_paddr_t *release_addr)
683 if (OF_getencprop(node, "cpu-release-addr", buf, sizeof(buf)) !=
687 *release_addr = (((uintptr_t)buf[0] << 32) | buf[1]);
691 start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
694 vm_paddr_t release_addr;
700 if (addr_size == 2) {
702 target_cpu |= reg[1];
705 if (is_boot_cpu(target_cpu))
711 * If PSCI is present, we'll always use that -- the cpu_on method is
712 * mandated in both v0.1 and v0.2. We'll check the enable-method if
713 * we don't have PSCI and use spin table if it's provided.
716 if (!psci_present && cpuid != 0) {
717 if (OF_getprop_alloc(node, "enable-method",
718 (void **)&enable_method) <= 0)
721 if (strcmp(enable_method, "spin-table") != 0) {
722 OF_prop_free(enable_method);
726 OF_prop_free(enable_method);
727 populate_release_addr(node, &release_addr);
728 if (release_addr == 0) {
729 printf("Failed to fetch release address for CPU %u",
735 if (!start_cpu(cpuid, target_cpu, 0, release_addr))
739 * Don't increment for the boot CPU, its CPU ID is reserved.
741 if (!is_boot_cpu(target_cpu))
744 /* Try to read the numa node of this cpu */
745 if (vm_ndomains == 1 ||
746 OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
748 cpuid_to_pcpu[cpuid]->pc_domain = domain;
749 if (domain < MAXMEMDOM)
750 CPU_SET(cpuid, &cpuset_domain[domain]);
760 for (i = 0; fdt_quirks[i].compat != NULL; i++) {
761 if (ofw_bus_node_is_compatible(node,
762 fdt_quirks[i].compat) != 0) {
763 mp_quirks = fdt_quirks[i].quirks;
767 ofw_cpu_early_foreach(start_cpu_fdt, true);
771 /* Initialize and fire up non-boot processors */
777 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
779 /* CPU 0 is always boot CPU. */
780 CPU_SET(0, &all_cpus);
781 mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
782 cpuid_to_pcpu[0]->pc_mpidr_low = mpidr;
783 cpuid_to_pcpu[0]->pc_mpidr_high = mpidr >> 32;
785 switch(arm64_bus_method) {
788 mp_quirks = MP_QUIRK_CPULIST;
802 /* Introduce rest of cores to the world */
804 cpu_mp_announce(void)
810 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
812 ACPI_MADT_GENERIC_INTERRUPT *intr;
815 switch(entry->Type) {
816 case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
817 intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
828 ACPI_TABLE_MADT *madt;
832 physaddr = acpi_find_table(ACPI_SIG_MADT);
836 madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
838 printf("Unable to map the MADT, not starting APs\n");
843 acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
844 cpu_count_acpi_handler, &cores);
846 acpi_unmap_table(madt);
853 cpu_mp_setmaxid(void)
860 switch(arm64_bus_method) {
863 cores = cpu_count_acpi();
865 cores = MIN(cores, MAXCPU);
867 printf("Found %d CPUs in the ACPI tables\n",
870 mp_maxid = cores - 1;
876 cores = ofw_cpu_early_foreach(NULL, false);
878 cores = MIN(cores, MAXCPU);
880 printf("Found %d CPUs in the device tree\n",
883 mp_maxid = cores - 1;
889 printf("No CPU data, limiting to 1 core\n");
893 if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
894 if (cores > 0 && cores < mp_ncpus) {
896 mp_maxid = cores - 1;
904 static struct intr_ipi *
905 intr_ipi_lookup(u_int ipi)
908 if (ipi >= INTR_IPI_COUNT)
909 panic("%s: no such IPI %u", __func__, ipi);
911 return (&ipi_sources[ipi]);
915 * interrupt controller dispatch function for IPIs. It should
916 * be called straight from the interrupt controller, when associated
917 * interrupt source is learned. Or from anybody who has an interrupt
921 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
926 ii = intr_ipi_lookup(ipi);
927 if (ii->ii_count == NULL)
928 panic("%s: not setup IPI %u", __func__, ipi);
930 intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
933 * Supply ipi filter with trapframe argument
934 * if none is registered.
936 arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
942 * Map IPI into interrupt controller.
947 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
952 if (ipi >= INTR_IPI_COUNT)
953 panic("%s: no such IPI %u", __func__, ipi);
955 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
957 isrc->isrc_type = INTR_ISRCT_NAMESPACE;
958 isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
959 isrc->isrc_nspc_num = ipi_next_num;
961 error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
963 isrc->isrc_dev = intr_irq_root_dev;
970 * Setup IPI handler to interrupt source.
972 * Note that there could be more ways how to send and receive IPIs
973 * on a platform like fast interrupts for example. In that case,
974 * one can call this function with ASIF_NOALLOC flag set and then
975 * call intr_ipi_dispatch() when appropriate.
980 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
981 void *arg, u_int flags)
983 struct intr_irqsrc *isrc;
989 isrc = intr_ipi_lookup(ipi);
990 if (isrc->isrc_ipifilter != NULL)
993 if ((flags & AISHF_NOALLOC) == 0) {
994 error = ipi_map(isrc, ipi);
999 isrc->isrc_ipifilter = filter;
1000 isrc->isrc_arg = arg;
1001 isrc->isrc_handlers = 1;
1002 isrc->isrc_count = intr_ipi_setup_counters(name);
1003 isrc->isrc_index = 0; /* it should not be used in IPI case */
1005 if (isrc->isrc_dev != NULL) {
1006 PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1007 PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
1015 ipi_all_but_self(u_int ipi)
1020 CPU_CLR(PCPU_GET(cpuid), &cpus);
1021 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1022 intr_ipi_send(cpus, ipi);
1026 ipi_cpu(int cpu, u_int ipi)
1031 CPU_SET(cpu, &cpus);
1033 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
1034 intr_ipi_send(cpus, ipi);
1038 ipi_selected(cpuset_t cpus, u_int ipi)
1041 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1042 intr_ipi_send(cpus, ipi);