2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
31 * Copyright (c) 2002 Jake Burkholder.
32 * Copyright (c) 2007 - 2010 Marius Strobl <marius@FreeBSD.org>
33 * All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 #include <sys/cdefs.h>
58 __FBSDID("$FreeBSD$");
60 #include <sys/param.h>
61 #include <sys/systm.h>
64 #include <sys/kernel.h>
66 #include <sys/mutex.h>
69 #include <sys/sched.h>
73 #include <vm/vm_param.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_map.h>
79 #include <dev/ofw/openfirm.h>
81 #include <machine/asi.h>
82 #include <machine/atomic.h>
83 #include <machine/bus.h>
84 #include <machine/cpu.h>
85 #include <machine/md_var.h>
86 #include <machine/metadata.h>
87 #include <machine/ofw_machdep.h>
88 #include <machine/pcb.h>
89 #include <machine/smp.h>
90 #include <machine/tick.h>
91 #include <machine/tlb.h>
92 #include <machine/tsb.h>
93 #include <machine/tte.h>
94 #include <machine/ver.h>
96 #define SUNW_STARTCPU "SUNW,start-cpu"
97 #define SUNW_STOPSELF "SUNW,stop-self"
99 static ih_func_t cpu_ipi_ast;
100 static ih_func_t cpu_ipi_hardclock;
101 static ih_func_t cpu_ipi_preempt;
102 static ih_func_t cpu_ipi_stop;
105 * Argument area used to pass data to non-boot processors as they start up.
106 * This must be statically initialized with a known invalid CPU module ID,
107 * since the other processors will use it before the boot CPU enters the
110 struct cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0, 0 };
111 struct ipi_cache_args ipi_cache_args;
112 struct ipi_rd_args ipi_rd_args;
113 struct ipi_tlb_args ipi_tlb_args;
114 struct pcb stoppcbs[MAXCPU];
116 cpu_ipi_selected_t *cpu_ipi_selected;
117 cpu_ipi_single_t *cpu_ipi_single;
119 static vm_offset_t mp_tramp;
120 static u_int cpuid_to_mid[MAXCPU];
122 static volatile cpuset_t shutdown_cpus;
124 static void ap_count(phandle_t node, u_int mid, u_int cpu_impl);
125 static void ap_start(phandle_t node, u_int mid, u_int cpu_impl);
126 static void cpu_mp_unleash(void *v);
127 static void foreach_ap(phandle_t node, void (*func)(phandle_t node,
128 u_int mid, u_int cpu_impl));
129 static void sun4u_startcpu(phandle_t cpu, void *func, u_long arg);
131 static cpu_ipi_selected_t cheetah_ipi_selected;
132 static cpu_ipi_single_t cheetah_ipi_single;
133 static cpu_ipi_selected_t jalapeno_ipi_selected;
134 static cpu_ipi_single_t jalapeno_ipi_single;
135 static cpu_ipi_selected_t spitfire_ipi_selected;
136 static cpu_ipi_single_t spitfire_ipi_single;
138 SYSINIT(cpu_mp_unleash, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
141 mp_init(u_int cpu_impl)
146 mp_tramp = (vm_offset_t)OF_claim(NULL, PAGE_SIZE, PAGE_SIZE);
147 if (mp_tramp == (vm_offset_t)-1)
148 panic("%s", __func__);
149 bcopy(mp_tramp_code, (void *)mp_tramp, mp_tramp_code_len);
150 *(vm_offset_t *)(mp_tramp + mp_tramp_tlb_slots) = kernel_tlb_slots;
151 *(vm_offset_t *)(mp_tramp + mp_tramp_func) = (vm_offset_t)mp_startup;
152 tp = (struct tte *)(mp_tramp + mp_tramp_code_len);
153 for (i = 0; i < kernel_tlb_slots; i++) {
154 tp[i].tte_vpn = TV_VPN(kernel_tlbs[i].te_va, TS_4M);
155 tp[i].tte_data = TD_V | TD_4M | TD_PA(kernel_tlbs[i].te_pa) |
156 TD_L | TD_CP | TD_CV | TD_P | TD_W;
158 for (i = 0; i < PAGE_SIZE; i += sizeof(vm_offset_t))
162 * On UP systems cpu_ipi_selected() can be called while
163 * cpu_mp_start() wasn't so initialize these here.
165 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIi ||
166 cpu_impl == CPU_IMPL_ULTRASPARCIIIip) {
168 cpu_ipi_selected = jalapeno_ipi_selected;
169 cpu_ipi_single = jalapeno_ipi_single;
170 } else if (cpu_impl == CPU_IMPL_SPARC64V ||
171 cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
172 cpu_ipi_selected = cheetah_ipi_selected;
173 cpu_ipi_single = cheetah_ipi_single;
175 cpu_ipi_selected = spitfire_ipi_selected;
176 cpu_ipi_single = spitfire_ipi_single;
181 foreach_ap(phandle_t node, void (*func)(phandle_t node, u_int mid,
184 char type[sizeof("cpu")];
189 /* There's no need to traverse the whole OFW tree twice. */
190 if (mp_maxid > 0 && mp_ncpus >= mp_maxid + 1)
193 for (; node != 0; node = OF_peer(node)) {
194 child = OF_child(node);
196 foreach_ap(child, func);
198 if (OF_getprop(node, "device_type", type,
201 if (strcmp(type, "cpu") != 0)
203 if (OF_getprop(node, "implementation#", &cpu_impl,
204 sizeof(cpu_impl)) <= 0)
205 panic("%s: couldn't determine CPU "
206 "implementation", __func__);
207 if (OF_getprop(node, cpu_cpuid_prop(cpu_impl), &cpuid,
209 panic("%s: couldn't determine CPU module ID",
211 if (cpuid == PCPU_GET(mid))
213 (*func)(node, cpuid, cpu_impl);
219 * Probe for other CPUs.
225 CPU_SETOF(curcpu, &all_cpus);
229 foreach_ap(OF_child(OF_peer(0)), ap_count);
233 ap_count(phandle_t node __unused, u_int mid __unused, u_int cpu_impl __unused)
243 return (mp_maxid > 0);
250 return (smp_topo_none());
254 sun4u_startcpu(phandle_t cpu, void *func, u_long arg)
264 (cell_t)SUNW_STARTCPU,
269 args.func = (cell_t)func;
270 args.arg = (cell_t)arg;
275 * Fire up any non-boot processors.
281 intr_setup(PIL_AST, cpu_ipi_ast, -1, NULL, NULL);
282 intr_setup(PIL_RENDEZVOUS, (ih_func_t *)smp_rendezvous_action,
284 intr_setup(PIL_STOP, cpu_ipi_stop, -1, NULL, NULL);
285 intr_setup(PIL_PREEMPT, cpu_ipi_preempt, -1, NULL, NULL);
286 intr_setup(PIL_HARDCLOCK, cpu_ipi_hardclock, -1, NULL, NULL);
288 cpuid_to_mid[curcpu] = PCPU_GET(mid);
290 foreach_ap(OF_child(OF_peer(0)), ap_start);
291 KASSERT(!isjbus || mp_ncpus <= IDR_JALAPENO_MAX_BN_PAIRS,
292 ("%s: can only IPI a maximum of %d JBus-CPUs",
293 __func__, IDR_JALAPENO_MAX_BN_PAIRS));
298 ap_start(phandle_t node, u_int mid, u_int cpu_impl)
300 volatile struct cpu_start_args *csa;
307 if (mp_ncpus > MAXCPU)
310 if (OF_getprop(node, "clock-frequency", &clock, sizeof(clock)) <= 0)
311 panic("%s: couldn't determine CPU frequency", __func__);
312 if (clock != PCPU_GET(clock))
313 tick_et_use_stick = 1;
315 csa = &cpu_start_args;
317 sun4u_startcpu(node, (void *)mp_tramp, 0);
319 while (csa->csa_state != CPU_TICKSYNC)
322 csa->csa_tick = rd(tick);
323 if (cpu_impl == CPU_IMPL_SPARC64V ||
324 cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
325 while (csa->csa_state != CPU_STICKSYNC)
328 csa->csa_stick = rdstick();
330 while (csa->csa_state != CPU_INIT)
332 csa->csa_tick = csa->csa_stick = 0;
336 cpuid_to_mid[cpuid] = mid;
337 cpu_identify(csa->csa_ver, clock, cpuid);
339 va = kmem_alloc(kernel_map, PCPU_PAGES * PAGE_SIZE);
340 pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
341 pcpu_init(pc, cpuid, sizeof(*pc));
342 dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), cpuid);
344 pc->pc_clock = clock;
345 pc->pc_impl = cpu_impl;
351 CPU_SET(cpuid, &all_cpus);
356 cpu_mp_announce(void)
362 cpu_mp_unleash(void *v)
364 volatile struct cpu_start_args *csa;
373 ctx_min = TLB_CTX_USER_MIN;
374 ctx_inc = (TLB_CTX_USER_MAX - 1) / mp_ncpus;
375 csa = &cpu_start_args;
376 csa->csa_count = mp_ncpus;
377 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
378 pc->pc_tlb_ctx = ctx_min;
379 pc->pc_tlb_ctx_min = ctx_min;
380 pc->pc_tlb_ctx_max = ctx_min + ctx_inc;
383 if (pc->pc_cpuid == curcpu)
385 KASSERT(pc->pc_idlethread != NULL,
386 ("%s: idlethread", __func__));
387 pc->pc_curthread = pc->pc_idlethread;
388 pc->pc_curpcb = pc->pc_curthread->td_pcb;
389 for (i = 0; i < PCPU_PAGES; i++) {
390 va = pc->pc_addr + i * PAGE_SIZE;
391 pa = pmap_kextract(va);
393 panic("%s: pmap_kextract", __func__);
394 csa->csa_ttes[i].tte_vpn = TV_VPN(va, TS_8K);
395 csa->csa_ttes[i].tte_data = TD_V | TD_8K | TD_PA(pa) |
396 TD_L | TD_CP | TD_CV | TD_P | TD_W;
399 csa->csa_pcpu = pc->pc_addr;
400 csa->csa_mid = pc->pc_mid;
402 while (csa->csa_state != CPU_BOOTSTRAP)
413 cpu_mp_bootstrap(struct pcpu *pc)
415 volatile struct cpu_start_args *csa;
417 csa = &cpu_start_args;
419 /* Do CPU-specific initialization. */
420 if (pc->pc_impl >= CPU_IMPL_ULTRASPARCIII)
421 cheetah_init(pc->pc_impl);
422 else if (pc->pc_impl == CPU_IMPL_SPARC64V)
423 zeus_init(pc->pc_impl);
426 * Enable the caches. Note that his may include applying workarounds.
428 cache_enable(pc->pc_impl);
431 * Clear (S)TICK timer(s) (including NPT) and ensure they are stopped.
433 tick_clear(pc->pc_impl);
434 tick_stop(pc->pc_impl);
436 /* Set the kernel context. */
439 /* Lock the kernel TSB in the TLB if necessary. */
440 if (tsb_kernel_ldd_phys == 0)
444 * Flush all non-locked TLB entries possibly left over by the
447 tlb_flush_nonlocked();
451 * Note that the PIL we be lowered indirectly via sched_throw(NULL)
452 * when fake spinlock held by the idle thread eventually is released.
454 wrpr(pstate, 0, PSTATE_KERNEL);
457 KASSERT(curthread != NULL, ("%s: curthread", __func__));
458 printf("SMP: AP CPU #%d Launched!\n", curcpu);
462 csa->csa_state = CPU_BOOTSTRAP;
463 while (csa->csa_count != 0)
466 /* Start per-CPU event timers. */
469 /* Ok, now enter the scheduler. */
474 cpu_mp_shutdown(void)
480 shutdown_cpus = all_cpus;
481 CPU_CLR(PCPU_GET(cpuid), &shutdown_cpus);
482 cpus = shutdown_cpus;
484 /* XXX: Stop all the CPUs which aren't already. */
485 if (CPU_CMP(&stopped_cpus, &cpus)) {
487 /* cpus is just a flat "on" mask without curcpu. */
488 CPU_NAND(&cpus, &stopped_cpus);
492 while (!CPU_EMPTY(&shutdown_cpus)) {
494 printf("timeout shutting down CPUs.\n");
502 cpu_ipi_ast(struct trapframe *tf __unused)
508 cpu_ipi_stop(struct trapframe *tf __unused)
512 CTR2(KTR_SMP, "%s: stopped %d", __func__, curcpu);
514 savectx(&stoppcbs[curcpu]);
515 cpuid = PCPU_GET(cpuid);
516 CPU_SET_ATOMIC(cpuid, &stopped_cpus);
517 while (!CPU_ISSET(cpuid, &started_cpus)) {
518 if (CPU_ISSET(cpuid, &shutdown_cpus)) {
519 CPU_CLR_ATOMIC(cpuid, &shutdown_cpus);
520 (void)intr_disable();
525 CPU_CLR_ATOMIC(cpuid, &started_cpus);
526 CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
528 CTR2(KTR_SMP, "%s: restarted %d", __func__, curcpu);
532 cpu_ipi_preempt(struct trapframe *tf)
535 sched_preempt(curthread);
539 cpu_ipi_hardclock(struct trapframe *tf)
541 struct trapframe *oldframe;
546 td->td_intr_nesting_level++;
547 oldframe = td->td_intr_frame;
548 td->td_intr_frame = tf;
550 td->td_intr_frame = oldframe;
551 td->td_intr_nesting_level--;
556 spitfire_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
560 while ((cpu = cpusetobj_ffs(&cpus)) != 0) {
563 spitfire_ipi_single(cpu, d0, d1, d2);
568 spitfire_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
575 KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__));
576 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_BUSY) == 0,
577 ("%s: outstanding dispatch", __func__));
578 mid = cpuid_to_mid[cpu];
579 for (i = 0; i < IPI_RETRIES; i++) {
581 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
582 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
583 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
585 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
588 * Workaround for SpitFire erratum #54; do a dummy read
589 * from a SDB internal register before the MEMBAR #Sync
590 * for the write to ASI_SDB_INTR_W (requiring another
591 * MEMBAR #Sync in order to make sure the write has
592 * occurred before the load).
595 (void)ldxa(AA_SDB_CNTL_HIGH, ASI_SDB_CONTROL_R);
597 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
601 if ((ids & (IDR_BUSY | IDR_NACK)) == 0)
604 * Leave interrupts enabled for a bit before retrying
605 * in order to avoid deadlocks if the other CPU is also
606 * trying to send an IPI.
610 if (kdb_active != 0 || panicstr != NULL)
611 printf("%s: couldn't send IPI to module 0x%u\n",
614 panic("%s: couldn't send IPI to module 0x%u",
619 cheetah_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
626 KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__));
627 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
628 IDR_CHEETAH_ALL_BUSY) == 0,
629 ("%s: outstanding dispatch", __func__));
630 mid = cpuid_to_mid[cpu];
631 for (i = 0; i < IPI_RETRIES; i++) {
633 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
634 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
635 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
637 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
640 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
644 if ((ids & (IDR_BUSY | IDR_NACK)) == 0)
647 * Leave interrupts enabled for a bit before retrying
648 * in order to avoid deadlocks if the other CPU is also
649 * trying to send an IPI.
653 if (kdb_active != 0 || panicstr != NULL)
654 printf("%s: couldn't send IPI to module 0x%u\n",
657 panic("%s: couldn't send IPI to module 0x%u",
662 cheetah_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
664 char pbuf[CPUSETBUFSIZ];
671 KASSERT(!CPU_ISSET(curcpu, &cpus), ("%s: CPU can't IPI itself",
673 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
674 IDR_CHEETAH_ALL_BUSY) == 0,
675 ("%s: outstanding dispatch", __func__));
676 if (CPU_EMPTY(&cpus))
679 for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) {
681 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
682 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
683 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
686 for (cpu = 0; cpu < mp_ncpus; cpu++) {
687 if (CPU_ISSET(cpu, &cpus)) {
688 stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] <<
689 IDC_ITID_SHIFT) | bnp << IDC_BN_SHIFT,
693 if (bnp == IDR_CHEETAH_MAX_BN_PAIRS)
697 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
698 IDR_CHEETAH_ALL_BUSY) != 0)
702 for (cpu = 0; cpu < mp_ncpus; cpu++) {
703 if (CPU_ISSET(cpu, &cpus)) {
704 if ((ids & (IDR_NACK << (2 * bnp))) == 0)
709 if (CPU_EMPTY(&cpus))
712 * Leave interrupts enabled for a bit before retrying
713 * in order to avoid deadlocks if the other CPUs are
714 * also trying to send IPIs.
718 if (kdb_active != 0 || panicstr != NULL)
719 printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n",
720 __func__, cpusetobj_strprint(pbuf, &cpus), ids);
722 panic("%s: couldn't send IPI (cpus=%s ids=0x%lu)",
723 __func__, cpusetobj_strprint(pbuf, &cpus), ids);
727 jalapeno_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
731 u_int busy, busynack, mid;
734 KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__));
735 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
736 IDR_CHEETAH_ALL_BUSY) == 0,
737 ("%s: outstanding dispatch", __func__));
738 mid = cpuid_to_mid[cpu];
739 busy = IDR_BUSY << (2 * mid);
740 busynack = (IDR_BUSY | IDR_NACK) << (2 * mid);
741 for (i = 0; i < IPI_RETRIES; i++) {
743 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
744 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
745 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
747 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
750 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
754 if ((ids & busynack) == 0)
757 * Leave interrupts enabled for a bit before retrying
758 * in order to avoid deadlocks if the other CPU is also
759 * trying to send an IPI.
763 if (kdb_active != 0 || panicstr != NULL)
764 printf("%s: couldn't send IPI to module 0x%u\n",
767 panic("%s: couldn't send IPI to module 0x%u",
772 jalapeno_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
774 char pbuf[CPUSETBUFSIZ];
780 KASSERT(!CPU_ISSET(curcpu, &cpus), ("%s: CPU can't IPI itself",
782 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
783 IDR_CHEETAH_ALL_BUSY) == 0,
784 ("%s: outstanding dispatch", __func__));
785 if (CPU_EMPTY(&cpus))
788 for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) {
790 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
791 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
792 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
794 for (cpu = 0; cpu < mp_ncpus; cpu++) {
795 if (CPU_ISSET(cpu, &cpus)) {
796 stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] <<
797 IDC_ITID_SHIFT), ASI_SDB_INTR_W, 0);
801 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
802 IDR_CHEETAH_ALL_BUSY) != 0)
806 (IDR_CHEETAH_ALL_BUSY | IDR_CHEETAH_ALL_NACK)) == 0)
808 for (cpu = 0; cpu < mp_ncpus; cpu++)
809 if (CPU_ISSET(cpu, &cpus))
810 if ((ids & (IDR_NACK <<
811 (2 * cpuid_to_mid[cpu]))) == 0)
814 * Leave interrupts enabled for a bit before retrying
815 * in order to avoid deadlocks if the other CPUs are
816 * also trying to send IPIs.
820 if (kdb_active != 0 || panicstr != NULL)
821 printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n",
822 __func__, cpusetobj_strprint(pbuf, &cpus), ids);
824 panic("%s: couldn't send IPI (cpus=%s ids=0x%lu)",
825 __func__, cpusetobj_strprint(pbuf, &cpus), ids);