2 * Copyright (c) 2001-2005 Marcel Moolenaar
3 * Copyright (c) 2000 Doug Rabson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_kstack_pages.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
38 #include <sys/kthread.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/kernel.h>
44 #include <sys/sched.h>
46 #include <sys/sysctl.h>
49 #include <machine/atomic.h>
50 #include <machine/cpu.h>
51 #include <machine/fpu.h>
52 #include <machine/intr.h>
53 #include <machine/mca.h>
54 #include <machine/md_var.h>
55 #include <machine/pal.h>
56 #include <machine/pcb.h>
57 #include <machine/sal.h>
58 #include <machine/smp.h>
62 #include <vm/vm_extern.h>
63 #include <vm/vm_kern.h>
65 MALLOC_DEFINE(M_SMP, "SMP", "SMP related allocations");
67 void ia64_ap_startup(void);
69 #define LID_SAPIC(x) ((u_int)((x) >> 16))
70 #define LID_SAPIC_ID(x) ((u_int)((x) >> 24) & 0xff)
71 #define LID_SAPIC_EID(x) ((u_int)((x) >> 16) & 0xff)
72 #define LID_SAPIC_SET(id,eid) (((id & 0xff) << 8 | (eid & 0xff)) << 16);
73 #define LID_SAPIC_MASK 0xffff0000UL
75 /* Variables used by os_boot_rendez and ia64_ap_startup */
78 volatile int ap_delay;
79 volatile int ap_awake;
90 ia64_ih_ast(struct thread *td, u_int xiv, struct trapframe *tf)
93 PCPU_INC(md.stats.pcs_nasts);
94 CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid));
99 ia64_ih_highfp(struct thread *td, u_int xiv, struct trapframe *tf)
102 PCPU_INC(md.stats.pcs_nhighfps);
103 ia64_highfp_save_ipi();
108 ia64_ih_preempt(struct thread *td, u_int xiv, struct trapframe *tf)
111 PCPU_INC(md.stats.pcs_npreempts);
112 CTR1(KTR_SMP, "IPI_PREEMPT, cpuid=%d", PCPU_GET(cpuid));
113 sched_preempt(curthread);
118 ia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf)
121 PCPU_INC(md.stats.pcs_nrdvs);
122 CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid));
123 smp_rendezvous_action();
128 ia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf)
132 PCPU_INC(md.stats.pcs_nstops);
133 mybit = PCPU_GET(cpumask);
135 savectx(PCPU_PTR(md.pcb));
137 atomic_set_int(&stopped_cpus, mybit);
138 while ((started_cpus & mybit) == 0)
140 atomic_clear_int(&started_cpus, mybit);
141 atomic_clear_int(&stopped_cpus, mybit);
149 return smp_topo_none();
153 ia64_store_mca_state(void* arg)
155 unsigned int ncpu = (unsigned int)(uintptr_t)arg;
158 /* ia64_mca_save_state() is CPU-sensitive, so bind ourself to our target CPU */
161 sched_bind(td, ncpu);
165 * Get and save the CPU specific MCA records. Should we get the
166 * MCA state for each processor, or just the CMC state?
168 ia64_mca_save_state(SAL_INFO_MCA);
169 ia64_mca_save_state(SAL_INFO_CMC);
175 ia64_ap_startup(void)
180 ia64_set_k4((intptr_t)pcpup);
182 vhpt = PCPU_GET(md.vhpt);
184 ia64_set_pta(vhpt + (1 << 8) + (pmap_vhpt_log2size << 2) + 1);
193 ia64_set_fpsr(IA64_FPSR_DEFAULT);
195 /* Wait until it's time for us to be unleashed */
199 /* Initialize curthread. */
200 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
201 PCPU_SET(curthread, PCPU_GET(idlethread));
203 atomic_add_int(&ap_awake, 1);
207 CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid));
209 /* Mask interval timer interrupts on APs. */
210 ia64_set_itv(0x10000);
220 cpu_mp_setmaxid(void)
224 * Count the number of processors in the system by walking the ACPI
225 * tables. Note that we record the actual number of processors, even
226 * if this is larger than MAXCPU. We only activate MAXCPU processors.
228 mp_ncpus = ia64_count_cpus();
231 * Set the largest cpuid we're going to use. This is necessary for
234 mp_maxid = min(mp_ncpus, MAXCPU) - 1;
242 * If there's only 1 processor, or we don't have a wake-up vector,
243 * we're not going to enable SMP. Note that no wake-up vector can
244 * also mean that the wake-up mechanism is not supported. In this
245 * case we can have multiple processors, but we simply can't wake
248 return (mp_ncpus > 1 && ia64_ipi_wakeup != 0);
252 cpu_mp_add(u_int acpiid, u_int apicid, u_int apiceid)
259 lid = LID_SAPIC_SET(apicid, apiceid);
260 cpuid = ((ia64_get_lid() & LID_SAPIC_MASK) == lid) ? 0 : smp_cpus++;
262 KASSERT((all_cpus & (1UL << cpuid)) == 0,
263 ("%s: cpu%d already in CPU map", __func__, acpiid));
266 pc = (struct pcpu *)malloc(sizeof(*pc), M_SMP, M_WAITOK);
267 pcpu_init(pc, cpuid, sizeof(*pc));
268 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
269 dpcpu_init(dpcpu, cpuid);
273 pc->pc_acpi_id = acpiid;
275 all_cpus |= (1UL << cpuid);
284 for (i = 0; i <= mp_maxid; i++) {
287 printf("cpu%d: ACPI Id=%x, SAPIC Id=%x, SAPIC Eid=%x",
288 i, pc->pc_acpi_id, LID_SAPIC_ID(pc->pc_md.lid),
289 LID_SAPIC_EID(pc->pc_md.lid));
305 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
306 pc->pc_md.current_pmap = kernel_pmap;
307 pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
308 if (pc->pc_cpuid > 0) {
310 pc->pc_md.vhpt = pmap_alloc_vhpt();
311 if (pc->pc_md.vhpt == 0) {
312 printf("SMP: WARNING: unable to allocate VHPT"
313 " for cpu%d", pc->pc_cpuid);
316 ap_stack = malloc(KSTACK_PAGES * PAGE_SIZE, M_SMP,
322 printf("SMP: waking up cpu%d\n", pc->pc_cpuid);
324 ipi_send(pc, ia64_ipi_wakeup);
328 } while (--ap_delay > 0);
329 pc->pc_md.awake = ap_awake;
332 printf("SMP: WARNING: cpu%d did not wake up\n",
340 cpu_mp_unleash(void *dummy)
348 /* Allocate XIVs for IPIs */
349 ia64_ipi_ast = ia64_xiv_alloc(PI_DULL, IA64_XIV_IPI, ia64_ih_ast);
350 ia64_ipi_highfp = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_highfp);
351 ia64_ipi_preempt = ia64_xiv_alloc(PI_SOFT, IA64_XIV_IPI,
353 ia64_ipi_rndzvs = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_rndzvs);
354 ia64_ipi_stop = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI, ia64_ih_stop);
356 /* Reserve the NMI vector for IPI_STOP_HARD if possible */
357 ia64_ipi_nmi = (ia64_xiv_reserve(2, IA64_XIV_IPI, ia64_ih_stop) != 0)
358 ? ia64_ipi_stop : 0x400; /* DM=NMI, Vector=n/a */
362 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
364 if (pc->pc_md.awake) {
365 kproc_create(ia64_store_mca_state,
366 (void*)((uintptr_t)pc->pc_cpuid), NULL, 0, 0,
367 "mca %u", pc->pc_cpuid);
375 while (ap_awake != smp_cpus)
378 if (smp_cpus != cpus || cpus != mp_ncpus) {
379 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
380 mp_ncpus, cpus, smp_cpus);
387 * Now that all CPUs are up and running, bind interrupts to each of
394 * send an IPI to a set of cpus.
397 ipi_selected(cpumask_t cpus, int ipi)
401 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
402 if (cpus & pc->pc_cpumask)
408 * send an IPI to all CPUs EXCEPT myself.
411 ipi_all_but_self(int ipi)
415 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
422 * Send an IPI to the specified processor. The lid parameter holds the
423 * cr.lid (CR64) contents of the target processor. Only the id and eid
424 * fields are used here.
427 ipi_send(struct pcpu *cpu, int xiv)
431 KASSERT(xiv != 0, ("ipi_send"));
433 lid = LID_SAPIC(cpu->pc_md.lid);
436 ia64_st8(&(ia64_pib->ib_ipi[lid][0]), xiv);
438 CTR3(KTR_SMP, "ipi_send(%p, %d): cpuid=%d", cpu, xiv, PCPU_GET(cpuid));
441 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);