2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
31 * Copyright (c) 2002 Jake Burkholder.
32 * Copyright (c) 2007 - 2010 Marius Strobl <marius@FreeBSD.org>
33 * All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 #include <sys/cdefs.h>
58 __FBSDID("$FreeBSD$");
60 #include <sys/param.h>
61 #include <sys/systm.h>
64 #include <sys/kernel.h>
66 #include <sys/mutex.h>
69 #include <sys/sched.h>
73 #include <vm/vm_param.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_map.h>
79 #include <dev/ofw/openfirm.h>
81 #include <machine/asi.h>
82 #include <machine/atomic.h>
83 #include <machine/bus.h>
84 #include <machine/cpu.h>
85 #include <machine/md_var.h>
86 #include <machine/metadata.h>
87 #include <machine/ofw_machdep.h>
88 #include <machine/pcb.h>
89 #include <machine/smp.h>
90 #include <machine/tick.h>
91 #include <machine/tlb.h>
92 #include <machine/tsb.h>
93 #include <machine/tte.h>
94 #include <machine/ver.h>
96 #define SUNW_STARTCPU "SUNW,start-cpu"
97 #define SUNW_STOPSELF "SUNW,stop-self"
99 static ih_func_t cpu_ipi_ast;
100 static ih_func_t cpu_ipi_hardclock;
101 static ih_func_t cpu_ipi_preempt;
102 static ih_func_t cpu_ipi_stop;
105 * Argument area used to pass data to non-boot processors as they start up.
106 * This must be statically initialized with a known invalid CPU module ID,
107 * since the other processors will use it before the boot CPU enters the
110 struct cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0, 0 };
111 struct ipi_cache_args ipi_cache_args;
112 struct ipi_rd_args ipi_rd_args;
113 struct ipi_tlb_args ipi_tlb_args;
114 struct pcb stoppcbs[MAXCPU];
118 cpu_ipi_selected_t *cpu_ipi_selected;
119 cpu_ipi_single_t *cpu_ipi_single;
121 static vm_offset_t mp_tramp;
122 static u_int cpuid_to_mid[MAXCPU];
124 static volatile cpuset_t shutdown_cpus;
126 static void ap_count(phandle_t node, u_int mid, u_int cpu_impl);
127 static void ap_start(phandle_t node, u_int mid, u_int cpu_impl);
128 static void cpu_mp_unleash(void *v);
129 static void foreach_ap(phandle_t node, void (*func)(phandle_t node,
130 u_int mid, u_int cpu_impl));
131 static void sun4u_startcpu(phandle_t cpu, void *func, u_long arg);
133 static cpu_ipi_selected_t cheetah_ipi_selected;
134 static cpu_ipi_single_t cheetah_ipi_single;
135 static cpu_ipi_selected_t jalapeno_ipi_selected;
136 static cpu_ipi_single_t jalapeno_ipi_single;
137 static cpu_ipi_selected_t spitfire_ipi_selected;
138 static cpu_ipi_single_t spitfire_ipi_single;
140 SYSINIT(cpu_mp_unleash, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
143 mp_init(u_int cpu_impl)
148 mp_tramp = (vm_offset_t)OF_claim(NULL, PAGE_SIZE, PAGE_SIZE);
149 if (mp_tramp == (vm_offset_t)-1)
150 panic("%s", __func__);
151 bcopy(mp_tramp_code, (void *)mp_tramp, mp_tramp_code_len);
152 *(vm_offset_t *)(mp_tramp + mp_tramp_tlb_slots) = kernel_tlb_slots;
153 *(vm_offset_t *)(mp_tramp + mp_tramp_func) = (vm_offset_t)mp_startup;
154 tp = (struct tte *)(mp_tramp + mp_tramp_code_len);
155 for (i = 0; i < kernel_tlb_slots; i++) {
156 tp[i].tte_vpn = TV_VPN(kernel_tlbs[i].te_va, TS_4M);
157 tp[i].tte_data = TD_V | TD_4M | TD_PA(kernel_tlbs[i].te_pa) |
158 TD_L | TD_CP | TD_CV | TD_P | TD_W;
160 for (i = 0; i < PAGE_SIZE; i += sizeof(vm_offset_t))
164 * On UP systems cpu_ipi_selected() can be called while
165 * cpu_mp_start() wasn't so initialize these here.
167 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIi ||
168 cpu_impl == CPU_IMPL_ULTRASPARCIIIip) {
170 cpu_ipi_selected = jalapeno_ipi_selected;
171 cpu_ipi_single = jalapeno_ipi_single;
172 } else if (cpu_impl == CPU_IMPL_SPARC64V ||
173 cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
174 cpu_ipi_selected = cheetah_ipi_selected;
175 cpu_ipi_single = cheetah_ipi_single;
177 cpu_ipi_selected = spitfire_ipi_selected;
178 cpu_ipi_single = spitfire_ipi_single;
183 foreach_ap(phandle_t node, void (*func)(phandle_t node, u_int mid,
186 char type[sizeof("cpu")];
191 /* There's no need to traverse the whole OFW tree twice. */
192 if (mp_maxid > 0 && mp_ncpus >= mp_maxid + 1)
195 for (; node != 0; node = OF_peer(node)) {
196 child = OF_child(node);
198 foreach_ap(child, func);
200 if (OF_getprop(node, "device_type", type,
203 if (strcmp(type, "cpu") != 0)
205 if (OF_getprop(node, "implementation#", &cpu_impl,
206 sizeof(cpu_impl)) <= 0)
207 panic("%s: couldn't determine CPU "
208 "implementation", __func__);
209 if (OF_getprop(node, cpu_cpuid_prop(cpu_impl), &cpuid,
211 panic("%s: couldn't determine CPU module ID",
213 if (cpuid == PCPU_GET(mid))
215 (*func)(node, cpuid, cpu_impl);
221 * Probe for other CPUs.
227 CPU_SETOF(curcpu, &all_cpus);
231 foreach_ap(OF_child(OF_peer(0)), ap_count);
235 ap_count(phandle_t node __unused, u_int mid __unused, u_int cpu_impl __unused)
245 return (mp_maxid > 0);
252 return (smp_topo_none());
256 sun4u_startcpu(phandle_t cpu, void *func, u_long arg)
266 (cell_t)SUNW_STARTCPU,
271 args.func = (cell_t)func;
272 args.arg = (cell_t)arg;
277 * Fire up any non-boot processors.
283 mtx_init(&ipi_mtx, "ipi", NULL, MTX_SPIN);
285 intr_setup(PIL_AST, cpu_ipi_ast, -1, NULL, NULL);
286 intr_setup(PIL_RENDEZVOUS, (ih_func_t *)smp_rendezvous_action,
288 intr_setup(PIL_STOP, cpu_ipi_stop, -1, NULL, NULL);
289 intr_setup(PIL_PREEMPT, cpu_ipi_preempt, -1, NULL, NULL);
290 intr_setup(PIL_HARDCLOCK, cpu_ipi_hardclock, -1, NULL, NULL);
292 cpuid_to_mid[curcpu] = PCPU_GET(mid);
294 foreach_ap(OF_child(OF_peer(0)), ap_start);
295 KASSERT(!isjbus || mp_ncpus <= IDR_JALAPENO_MAX_BN_PAIRS,
296 ("%s: can only IPI a maximum of %d JBus-CPUs",
297 __func__, IDR_JALAPENO_MAX_BN_PAIRS));
302 ap_start(phandle_t node, u_int mid, u_int cpu_impl)
304 volatile struct cpu_start_args *csa;
311 if (mp_ncpus > MAXCPU)
314 if (OF_getprop(node, "clock-frequency", &clock, sizeof(clock)) <= 0)
315 panic("%s: couldn't determine CPU frequency", __func__);
316 if (clock != PCPU_GET(clock))
317 tick_et_use_stick = 1;
319 csa = &cpu_start_args;
321 sun4u_startcpu(node, (void *)mp_tramp, 0);
323 while (csa->csa_state != CPU_TICKSYNC)
326 csa->csa_tick = rd(tick);
327 if (cpu_impl == CPU_IMPL_SPARC64V ||
328 cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
329 while (csa->csa_state != CPU_STICKSYNC)
332 csa->csa_stick = rdstick();
334 while (csa->csa_state != CPU_INIT)
336 csa->csa_tick = csa->csa_stick = 0;
340 cpuid_to_mid[cpuid] = mid;
341 cpu_identify(csa->csa_ver, clock, cpuid);
343 va = kmem_alloc(kernel_map, PCPU_PAGES * PAGE_SIZE);
344 pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
345 pcpu_init(pc, cpuid, sizeof(*pc));
346 dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), cpuid);
348 pc->pc_clock = clock;
349 pc->pc_impl = cpu_impl;
355 CPU_SET(cpuid, &all_cpus);
360 cpu_mp_announce(void)
366 cpu_mp_unleash(void *v)
368 volatile struct cpu_start_args *csa;
377 ctx_min = TLB_CTX_USER_MIN;
378 ctx_inc = (TLB_CTX_USER_MAX - 1) / mp_ncpus;
379 csa = &cpu_start_args;
380 csa->csa_count = mp_ncpus;
381 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
382 pc->pc_tlb_ctx = ctx_min;
383 pc->pc_tlb_ctx_min = ctx_min;
384 pc->pc_tlb_ctx_max = ctx_min + ctx_inc;
387 if (pc->pc_cpuid == curcpu)
389 KASSERT(pc->pc_idlethread != NULL,
390 ("%s: idlethread", __func__));
391 pc->pc_curthread = pc->pc_idlethread;
392 pc->pc_curpcb = pc->pc_curthread->td_pcb;
393 for (i = 0; i < PCPU_PAGES; i++) {
394 va = pc->pc_addr + i * PAGE_SIZE;
395 pa = pmap_kextract(va);
397 panic("%s: pmap_kextract", __func__);
398 csa->csa_ttes[i].tte_vpn = TV_VPN(va, TS_8K);
399 csa->csa_ttes[i].tte_data = TD_V | TD_8K | TD_PA(pa) |
400 TD_L | TD_CP | TD_CV | TD_P | TD_W;
403 csa->csa_pcpu = pc->pc_addr;
404 csa->csa_mid = pc->pc_mid;
406 while (csa->csa_state != CPU_BOOTSTRAP)
417 cpu_mp_bootstrap(struct pcpu *pc)
419 volatile struct cpu_start_args *csa;
421 csa = &cpu_start_args;
423 /* Do CPU-specific initialization. */
424 if (pc->pc_impl >= CPU_IMPL_ULTRASPARCIII)
425 cheetah_init(pc->pc_impl);
426 else if (pc->pc_impl == CPU_IMPL_SPARC64V)
427 zeus_init(pc->pc_impl);
430 * Enable the caches. Note that his may include applying workarounds.
432 cache_enable(pc->pc_impl);
435 * Clear (S)TICK timer(s) (including NPT) and ensure they are stopped.
437 tick_clear(pc->pc_impl);
438 tick_stop(pc->pc_impl);
440 /* Set the kernel context. */
443 /* Lock the kernel TSB in the TLB if necessary. */
444 if (tsb_kernel_ldd_phys == 0)
448 * Flush all non-locked TLB entries possibly left over by the
451 tlb_flush_nonlocked();
455 * Note that the PIL we be lowered indirectly via sched_throw(NULL)
456 * when fake spinlock held by the idle thread eventually is released.
458 wrpr(pstate, 0, PSTATE_KERNEL);
461 KASSERT(curthread != NULL, ("%s: curthread", __func__));
462 printf("SMP: AP CPU #%d Launched!\n", curcpu);
466 csa->csa_state = CPU_BOOTSTRAP;
467 while (csa->csa_count != 0)
470 /* Start per-CPU event timers. */
473 /* Ok, now enter the scheduler. */
478 cpu_mp_shutdown(void)
484 shutdown_cpus = all_cpus;
485 CPU_CLR(PCPU_GET(cpuid), &shutdown_cpus);
486 cpus = shutdown_cpus;
488 /* XXX: Stop all the CPUs which aren't already. */
489 if (CPU_CMP(&stopped_cpus, &cpus)) {
491 /* cpus is just a flat "on" mask without curcpu. */
492 CPU_NAND(&cpus, &stopped_cpus);
496 while (!CPU_EMPTY(&shutdown_cpus)) {
498 printf("timeout shutting down CPUs.\n");
506 cpu_ipi_ast(struct trapframe *tf)
512 cpu_ipi_stop(struct trapframe *tf)
516 CTR2(KTR_SMP, "%s: stopped %d", __func__, curcpu);
518 savectx(&stoppcbs[curcpu]);
519 cpuid = PCPU_GET(cpuid);
520 CPU_SET_ATOMIC(cpuid, &stopped_cpus);
521 while (!CPU_ISSET(cpuid, &started_cpus)) {
522 if (CPU_ISSET(cpuid, &shutdown_cpus)) {
523 CPU_CLR_ATOMIC(cpuid, &shutdown_cpus);
524 (void)intr_disable();
529 CPU_CLR_ATOMIC(cpuid, &started_cpus);
530 CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
532 CTR2(KTR_SMP, "%s: restarted %d", __func__, curcpu);
536 cpu_ipi_preempt(struct trapframe *tf)
539 sched_preempt(curthread);
543 cpu_ipi_hardclock(struct trapframe *tf)
545 struct trapframe *oldframe;
550 td->td_intr_nesting_level++;
551 oldframe = td->td_intr_frame;
552 td->td_intr_frame = tf;
554 td->td_intr_frame = oldframe;
555 td->td_intr_nesting_level--;
560 spitfire_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
564 while ((cpu = cpusetobj_ffs(&cpus)) != 0) {
567 spitfire_ipi_single(cpu, d0, d1, d2);
572 spitfire_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
579 KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__));
580 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_BUSY) == 0,
581 ("%s: outstanding dispatch", __func__));
582 mid = cpuid_to_mid[cpu];
583 for (i = 0; i < IPI_RETRIES; i++) {
585 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
586 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
587 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
589 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
592 * Workaround for SpitFire erratum #54; do a dummy read
593 * from a SDB internal register before the MEMBAR #Sync
594 * for the write to ASI_SDB_INTR_W (requiring another
595 * MEMBAR #Sync in order to make sure the write has
596 * occurred before the load).
599 (void)ldxa(AA_SDB_CNTL_HIGH, ASI_SDB_CONTROL_R);
601 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
605 if ((ids & (IDR_BUSY | IDR_NACK)) == 0)
608 * Leave interrupts enabled for a bit before retrying
609 * in order to avoid deadlocks if the other CPU is also
610 * trying to send an IPI.
614 if (kdb_active != 0 || panicstr != NULL)
615 printf("%s: couldn't send IPI to module 0x%u\n",
618 panic("%s: couldn't send IPI to module 0x%u",
623 cheetah_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
630 KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__));
631 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
632 IDR_CHEETAH_ALL_BUSY) == 0,
633 ("%s: outstanding dispatch", __func__));
634 mid = cpuid_to_mid[cpu];
635 for (i = 0; i < IPI_RETRIES; i++) {
637 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
638 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
639 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
641 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
644 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
648 if ((ids & (IDR_BUSY | IDR_NACK)) == 0)
651 * Leave interrupts enabled for a bit before retrying
652 * in order to avoid deadlocks if the other CPU is also
653 * trying to send an IPI.
657 if (kdb_active != 0 || panicstr != NULL)
658 printf("%s: couldn't send IPI to module 0x%u\n",
661 panic("%s: couldn't send IPI to module 0x%u",
666 cheetah_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
668 char pbuf[CPUSETBUFSIZ];
675 KASSERT(!CPU_ISSET(curcpu, &cpus), ("%s: CPU can't IPI itself",
677 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
678 IDR_CHEETAH_ALL_BUSY) == 0,
679 ("%s: outstanding dispatch", __func__));
680 if (CPU_EMPTY(&cpus))
683 for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) {
685 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
686 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
687 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
690 for (cpu = 0; cpu < mp_ncpus; cpu++) {
691 if (CPU_ISSET(cpu, &cpus)) {
692 stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] <<
693 IDC_ITID_SHIFT) | bnp << IDC_BN_SHIFT,
697 if (bnp == IDR_CHEETAH_MAX_BN_PAIRS)
701 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
702 IDR_CHEETAH_ALL_BUSY) != 0)
706 for (cpu = 0; cpu < mp_ncpus; cpu++) {
707 if (CPU_ISSET(cpu, &cpus)) {
708 if ((ids & (IDR_NACK << (2 * bnp))) == 0)
713 if (CPU_EMPTY(&cpus))
716 * Leave interrupts enabled for a bit before retrying
717 * in order to avoid deadlocks if the other CPUs are
718 * also trying to send IPIs.
722 if (kdb_active != 0 || panicstr != NULL)
723 printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n",
724 __func__, cpusetobj_strprint(pbuf, &cpus), ids);
726 panic("%s: couldn't send IPI (cpus=%s ids=0x%lu)",
727 __func__, cpusetobj_strprint(pbuf, &cpus), ids);
731 jalapeno_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
735 u_int busy, busynack, mid;
738 KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__));
739 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
740 IDR_CHEETAH_ALL_BUSY) == 0,
741 ("%s: outstanding dispatch", __func__));
742 mid = cpuid_to_mid[cpu];
743 busy = IDR_BUSY << (2 * mid);
744 busynack = (IDR_BUSY | IDR_NACK) << (2 * mid);
745 for (i = 0; i < IPI_RETRIES; i++) {
747 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
748 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
749 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
751 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
754 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
758 if ((ids & busynack) == 0)
761 * Leave interrupts enabled for a bit before retrying
762 * in order to avoid deadlocks if the other CPU is also
763 * trying to send an IPI.
767 if (kdb_active != 0 || panicstr != NULL)
768 printf("%s: couldn't send IPI to module 0x%u\n",
771 panic("%s: couldn't send IPI to module 0x%u",
776 jalapeno_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2)
778 char pbuf[CPUSETBUFSIZ];
784 KASSERT(!CPU_ISSET(curcpu, &cpus), ("%s: CPU can't IPI itself",
786 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
787 IDR_CHEETAH_ALL_BUSY) == 0,
788 ("%s: outstanding dispatch", __func__));
789 if (CPU_EMPTY(&cpus))
792 for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) {
794 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
795 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
796 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
798 for (cpu = 0; cpu < mp_ncpus; cpu++) {
799 if (CPU_ISSET(cpu, &cpus)) {
800 stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] <<
801 IDC_ITID_SHIFT), ASI_SDB_INTR_W, 0);
805 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
806 IDR_CHEETAH_ALL_BUSY) != 0)
810 (IDR_CHEETAH_ALL_BUSY | IDR_CHEETAH_ALL_NACK)) == 0)
812 for (cpu = 0; cpu < mp_ncpus; cpu++)
813 if (CPU_ISSET(cpu, &cpus))
814 if ((ids & (IDR_NACK <<
815 (2 * cpuid_to_mid[cpu]))) == 0)
818 * Leave interrupts enabled for a bit before retrying
819 * in order to avoid deadlocks if the other CPUs are
820 * also trying to send IPIs.
824 if (kdb_active != 0 || panicstr != NULL)
825 printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n",
826 __func__, cpusetobj_strprint(pbuf, &cpus), ids);
828 panic("%s: couldn't send IPI (cpus=%s ids=0x%lu)",
829 __func__, cpusetobj_strprint(pbuf, &cpus), ids);