2 * Copyright (c) 2001-2005 Marcel Moolenaar
3 * Copyright (c) 2000 Doug Rabson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_kstack_pages.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/kernel.h>
43 #include <sys/sysctl.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_kern.h>
51 #include <machine/atomic.h>
52 #include <machine/clock.h>
53 #include <machine/fpu.h>
54 #include <machine/mca.h>
55 #include <machine/md_var.h>
56 #include <machine/pal.h>
57 #include <machine/pcb.h>
58 #include <machine/pmap.h>
59 #include <machine/sal.h>
60 #include <machine/smp.h>
61 #include <i386/include/specialreg.h>
63 MALLOC_DECLARE(M_PMAP);
65 void ia64_ap_startup(void);
67 extern uint64_t ia64_lapic_address;
69 #define LID_SAPIC_ID(x) ((int)((x) >> 24) & 0xff)
70 #define LID_SAPIC_EID(x) ((int)((x) >> 16) & 0xff)
71 #define LID_SAPIC_SET(id,eid) (((id & 0xff) << 8 | (eid & 0xff)) << 16);
72 #define LID_SAPIC_MASK 0xffff0000UL
76 /* Variables used by os_boot_rendez and ia64_ap_startup */
80 volatile int ap_delay;
81 volatile int ap_awake;
84 static void cpu_mp_unleash(void *);
91 ia64_set_k4((intptr_t)pcpup);
93 __asm __volatile("mov cr.pta=%0;; srlz.i;;" ::
94 "r" (ap_vhpt + (1<<8) + (pmap_vhpt_log2size<<2) + 1));
102 ia64_set_fpsr(IA64_FPSR_DEFAULT);
104 /* Wait until it's time for us to be unleashed */
108 __asm __volatile("ssm psr.i;; srlz.d;;");
110 /* Initialize curthread. */
111 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
112 PCPU_SET(curthread, PCPU_GET(idlethread));
115 * Correct spinlock nesting. The idle thread context that we are
116 * borrowing was created so that it would start out with a single
117 * spin lock (sched_lock) held in fork_trampoline(). Since we
118 * don't have any locks and explicitly acquire locks when we need
119 * to, the nesting count will be off by 1.
121 curthread->td_md.md_spinlock_count = 0;
125 * Get and save the CPU specific MCA records. Should we get the
126 * MCA state for each processor, or just the CMC state?
128 ia64_mca_save_state(SAL_INFO_MCA);
129 ia64_mca_save_state(SAL_INFO_CMC);
135 CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid));
137 mtx_lock_spin(&sched_lock);
139 binuptime(PCPU_PTR(switchtime));
140 PCPU_SET(switchticks, ticks);
144 /* kick off the clock on this AP */
147 cpu_throw(NULL, choosethread());
152 cpu_mp_setmaxid(void)
156 * Count the number of processors in the system by walking the ACPI
157 * tables. Note that we record the actual number of processors, even
158 * if this is larger than MAXCPU. We only activate MAXCPU processors.
160 mp_ncpus = ia64_count_cpus();
163 * Set the largest cpuid we're going to use. This is necessary for
166 mp_maxid = min(mp_ncpus, MAXCPU) - 1;
174 * If there's only 1 processor, or we don't have a wake-up vector,
175 * we're not going to enable SMP. Note that no wake-up vector can
176 * also mean that the wake-up mechanism is not supported. In this
177 * case we can have multiple processors, but we simply can't wake
180 return (mp_ncpus > 1 && ipi_vector[IPI_AP_WAKEUP] != 0);
184 cpu_mp_add(u_int acpiid, u_int apicid, u_int apiceid)
189 /* Ignore any processor numbers outside our range */
190 if (acpiid > mp_maxid)
193 KASSERT((all_cpus & (1UL << acpiid)) == 0,
194 ("%s: cpu%d already in CPU map", __func__, acpiid));
196 lid = LID_SAPIC_SET(apicid, apiceid);
198 if ((ia64_get_lid() & LID_SAPIC_MASK) == lid) {
200 ("%s: the BSP must be cpu0", __func__));
204 pc = (struct pcpu *)kmem_alloc(kernel_map, PAGE_SIZE);
205 pcpu_init(pc, acpiid, PAGE_SIZE);
210 all_cpus |= (1UL << acpiid);
219 for (i = 0; i <= mp_maxid; i++) {
222 printf("cpu%d: SAPIC Id=%x, SAPIC Eid=%x", i,
223 LID_SAPIC_ID(pc->pc_lid),
224 LID_SAPIC_EID(pc->pc_lid));
240 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
241 pc->pc_current_pmap = kernel_pmap;
242 pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
243 if (pc->pc_cpuid > 0) {
245 ap_stack = malloc(KSTACK_PAGES * PAGE_SIZE, M_PMAP,
247 ap_vhpt = pmap_vhpt_base[pc->pc_cpuid];
252 printf("SMP: waking up cpu%d\n", pc->pc_cpuid);
254 ipi_send(pc, IPI_AP_WAKEUP);
258 } while (--ap_delay > 0);
259 pc->pc_awake = ap_awake;
262 printf("SMP: WARNING: cpu%d did not wake up\n",
272 cpu_mp_unleash(void *dummy)
280 if (mp_ipi_test != 1)
281 printf("SMP: WARNING: sending of a test IPI failed\n");
285 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
294 while (ap_awake != smp_cpus)
297 if (smp_cpus != cpus || cpus != mp_ncpus) {
298 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
299 mp_ncpus, cpus, smp_cpus);
307 * send an IPI to a set of cpus.
310 ipi_selected(cpumask_t cpus, int ipi)
314 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
315 if (cpus & pc->pc_cpumask)
321 * send an IPI to all CPUs, including myself.
328 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
334 * send an IPI to all CPUs EXCEPT myself.
337 ipi_all_but_self(int ipi)
341 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
348 * send an IPI to myself.
354 ipi_send(pcpup, ipi);
358 * Send an IPI to the specified processor. The lid parameter holds the
359 * cr.lid (CR64) contents of the target processor. Only the id and eid
360 * fields are used here.
363 ipi_send(struct pcpu *cpu, int ipi)
365 volatile uint64_t *pipi;
368 pipi = __MEMIO_ADDR(ia64_lapic_address |
369 ((cpu->pc_lid & LID_SAPIC_MASK) >> 12));
370 vector = (uint64_t)(ipi_vector[ipi] & 0xff);
371 KASSERT(vector != 0, ("IPI %d is not assigned a vector", ipi));
373 CTR3(KTR_SMP, "ipi_send(%p, %ld), cpuid=%d", pipi, vector,
377 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);