2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2008, by Kip Macy
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include "opt_kstack_pages.h"
33 #include "opt_mp_watchdog.h"
35 #include "opt_sched.h"
40 #error How did you get here?
44 #error The apic device is required for SMP, add "device apic" to your config file.
46 #if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT)
47 #error SMP not supported with CPU_DISABLE_CMPXCHG
51 #include <sys/param.h>
52 #include <sys/systm.h>
54 #include <sys/cons.h> /* cngetc() */
58 #include <sys/kernel.h>
61 #include <sys/malloc.h>
62 #include <sys/memrange.h>
63 #include <sys/mutex.h>
66 #include <sys/sched.h>
68 #include <sys/sysctl.h>
71 #include <vm/vm_param.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_extern.h>
75 #include <vm/vm_page.h>
77 #include <machine/apicreg.h>
78 #include <machine/md_var.h>
79 #include <machine/mp_watchdog.h>
80 #include <machine/pcb.h>
81 #include <machine/psl.h>
82 #include <machine/smp.h>
83 #include <machine/specialreg.h>
84 #include <machine/pcpu.h>
88 #include <machine/xen/xen-os.h>
89 #include <xen/evtchn.h>
90 #include <xen/xen_intr.h>
91 #include <xen/hypervisor.h>
92 #include <xen/interface/vcpu.h>
95 int mp_naps; /* # of Applications processors */
96 int boot_cpu_id = -1; /* designated BSP */
98 extern struct pcpu __pcpu[];
101 static union descriptor *bootAPgdt;
103 static char resched_name[NR_CPUS][15];
104 static char callfunc_name[NR_CPUS][15];
106 /* Free these after use */
107 void *bootstacks[MAXCPU];
109 /* Hotwire a 0->4MB V==P mapping */
110 extern pt_entry_t *KPTphys;
112 struct pcb stoppcbs[MAXCPU];
114 /* Variables needed for SMP tlb shootdown. */
115 vm_offset_t smp_tlb_addr1;
116 vm_offset_t smp_tlb_addr2;
117 volatile int smp_tlb_wait;
119 typedef void call_data_func_t(uintptr_t , uintptr_t);
121 static u_int logical_cpus;
122 static volatile cpumask_t ipi_nmi_pending;
124 /* used to hold the AP's until we are ready to release them */
125 static struct mtx ap_boot_mtx;
127 /* Set to 1 once we're ready to let the APs out of the pen. */
128 static volatile int aps_ready = 0;
131 * Store data from cpu_add() until later in the boot when we actually setup
138 } static cpu_info[MAX_APIC_ID + 1];
139 int cpu_apic_ids[MAXCPU];
140 int apic_cpuids[MAX_APIC_ID + 1];
142 /* Holds pending bitmap based IPIs per CPU */
143 static volatile u_int cpu_ipi_pending[MAXCPU];
145 static int cpu_logical;
146 static int cpu_cores;
148 static void assign_cpu_ids(void);
149 static void set_interrupt_apic_ids(void);
150 int start_all_aps(void);
151 static int start_ap(int apic_id);
152 static void release_aps(void *dummy);
154 static u_int hyperthreading_cpus;
155 static cpumask_t hyperthreading_cpus_mask;
157 extern void Xhypervisor_callback(void);
158 extern void failsafe_callback(void);
159 extern void pmap_lazyfix_action(void);
166 if (cpu_logical == 0)
168 if (mp_ncpus % (cpu_cores * cpu_logical) != 0) {
169 printf("WARNING: Non-uniform processors.\n");
170 printf("WARNING: Using suboptimal topology.\n");
171 return (smp_topo_none());
174 * No multi-core or hyper-threaded.
176 if (cpu_logical * cpu_cores == 1)
177 return (smp_topo_none());
179 * Only HTT no multi-core.
181 if (cpu_logical > 1 && cpu_cores == 1)
182 return (smp_topo_1level(CG_SHARE_L1, cpu_logical, CG_FLAG_HTT));
184 * Only multi-core no HTT.
186 if (cpu_cores > 1 && cpu_logical == 1)
187 return (smp_topo_1level(CG_SHARE_NONE, cpu_cores, 0));
189 * Both HTT and multi-core.
191 return (smp_topo_2level(CG_SHARE_NONE, cpu_cores,
192 CG_SHARE_L1, cpu_logical, CG_FLAG_HTT));
196 * Calculate usable address in base memory for AP trampoline code.
199 mp_bootaddress(u_int basemem)
206 cpu_add(u_int apic_id, char boot_cpu)
209 if (apic_id > MAX_APIC_ID) {
210 panic("SMP: APIC ID %d too high", apic_id);
213 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
215 cpu_info[apic_id].cpu_present = 1;
217 KASSERT(boot_cpu_id == -1,
218 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
220 boot_cpu_id = apic_id;
221 cpu_info[apic_id].cpu_bsp = 1;
223 if (mp_ncpus < MAXCPU)
226 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
231 cpu_mp_setmaxid(void)
234 mp_maxid = MAXCPU - 1;
242 * Always record BSP in CPU map so that the mbuf init code works
248 * No CPUs were found, so this must be a UP system. Setup
249 * the variables to represent a system with a single CPU
256 /* At least one CPU was found. */
259 * One CPU was found, so this must be a UP system with
265 /* At least two CPUs were found. */
270 * Initialize the IPI handlers and start up the AP's.
277 /* Initialize the logical ID to APIC ID table. */
278 for (i = 0; i < MAXCPU; i++) {
279 cpu_apic_ids[i] = -1;
280 cpu_ipi_pending[i] = 0;
283 /* Set boot_cpu_id if needed. */
284 if (boot_cpu_id == -1) {
285 boot_cpu_id = PCPU_GET(apic_id);
286 cpu_info[boot_cpu_id].cpu_bsp = 1;
288 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
289 ("BSP's APIC ID doesn't match boot_cpu_id"));
290 cpu_apic_ids[0] = boot_cpu_id;
291 apic_cpuids[boot_cpu_id] = 0;
295 /* Start each Application Processor */
298 /* Setup the initial logical CPUs info. */
299 logical_cpus = logical_cpus_mask = 0;
300 if (cpu_feature & CPUID_HTT)
301 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
303 set_interrupt_apic_ids();
308 iv_rendezvous(uintptr_t a, uintptr_t b)
310 smp_rendezvous_action();
314 iv_invltlb(uintptr_t a, uintptr_t b)
320 iv_invlpg(uintptr_t a, uintptr_t b)
326 iv_invlrng(uintptr_t a, uintptr_t b)
328 vm_offset_t start = (vm_offset_t)a;
329 vm_offset_t end = (vm_offset_t)b;
331 while (start < end) {
339 iv_invlcache(uintptr_t a, uintptr_t b)
343 atomic_add_int(&smp_tlb_wait, 1);
347 iv_lazypmap(uintptr_t a, uintptr_t b)
349 pmap_lazyfix_action();
350 atomic_add_int(&smp_tlb_wait, 1);
354 * These start from "IPI offset" APIC_IPI_INTS
356 static call_data_func_t *ipi_vectors[6] =
367 * Reschedule call back. Nothing to do,
368 * all the work is done automatically when
369 * we return from the interrupt.
372 smp_reschedule_interrupt(void *unused)
374 int cpu = PCPU_GET(cpuid);
377 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
379 if (ipi_bitmap & (1 << IPI_PREEMPT)) {
381 (*ipi_preempt_counts[cpu])++;
383 sched_preempt(curthread);
386 if (ipi_bitmap & (1 << IPI_AST)) {
388 (*ipi_ast_counts[cpu])++;
390 /* Nothing to do for AST */
392 return (FILTER_HANDLED);
404 static struct _call_data *call_data;
407 smp_call_function_interrupt(void *unused)
409 call_data_func_t *func;
410 uintptr_t arg1 = call_data->arg1;
411 uintptr_t arg2 = call_data->arg2;
412 int wait = call_data->wait;
413 atomic_t *started = &call_data->started;
414 atomic_t *finished = &call_data->finished;
416 /* We only handle function IPIs, not bitmap IPIs */
417 if (call_data->func_id < APIC_IPI_INTS || call_data->func_id > IPI_BITMAP_VECTOR)
418 panic("invalid function id %u", call_data->func_id);
420 func = ipi_vectors[call_data->func_id - APIC_IPI_INTS];
422 * Notify initiating CPU that I've grabbed the data and am
423 * about to execute the function
428 * At this point the info structure may be out of scope unless wait==1
434 atomic_inc(finished);
436 atomic_add_int(&smp_tlb_wait, 1);
437 return (FILTER_HANDLED);
441 * Print various information about the SMP system hardware and setup.
444 cpu_mp_announce(void)
449 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
450 for (i = 1, x = 0; x <= MAX_APIC_ID; x++) {
451 if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
453 if (cpu_info[x].cpu_disabled)
454 printf(" cpu (AP): APIC ID: %2d (disabled)\n", x);
456 KASSERT(i < mp_ncpus,
457 ("mp_ncpus and actual cpus are out of whack"));
458 printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
464 xen_smp_intr_init(unsigned int cpu)
469 per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
471 sprintf(resched_name[cpu], "resched%u", cpu);
472 rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
475 smp_reschedule_interrupt,
476 INTR_FAST|INTR_TYPE_TTY|INTR_MPSAFE, &irq);
478 printf("[XEN] IPI cpu=%d irq=%d vector=RESCHEDULE_VECTOR (%d)\n",
479 cpu, irq, RESCHEDULE_VECTOR);
481 per_cpu(resched_irq, cpu) = irq;
483 sprintf(callfunc_name[cpu], "callfunc%u", cpu);
484 rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
487 smp_call_function_interrupt,
488 INTR_FAST|INTR_TYPE_TTY|INTR_MPSAFE, &irq);
491 per_cpu(callfunc_irq, cpu) = irq;
493 printf("[XEN] IPI cpu=%d irq=%d vector=CALL_FUNCTION_VECTOR (%d)\n",
494 cpu, irq, CALL_FUNCTION_VECTOR);
497 if ((cpu != 0) && ((rc = ap_cpu_initclocks(cpu)) != 0))
503 if (per_cpu(resched_irq, cpu) >= 0)
504 unbind_from_irqhandler(per_cpu(resched_irq, cpu));
505 if (per_cpu(callfunc_irq, cpu) >= 0)
506 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu));
511 xen_smp_intr_init_cpus(void *unused)
515 for (i = 0; i < mp_ncpus; i++)
516 xen_smp_intr_init(i);
519 #define MTOPSIZE (1<<(14 + PAGE_SHIFT))
522 * AP CPU's call this to initialize themselves.
531 /* bootAP is set in start_ap() to our ID. */
532 PCPU_SET(currentldt, _default_ldt);
533 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
535 gdt[bootAP * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
537 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
538 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
539 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
541 PCPU_SET(tss_gdt, &gdt[bootAP * NGDT + GPROC0_SEL].sd);
543 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
545 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
548 * Set to a known state:
549 * Set by mpboot.s: CR0_PG, CR0_PE
550 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
553 * signal our startup to the BSP.
557 /* Spin until the BSP releases the AP's. */
561 /* BSP may have changed PTD while we were waiting */
563 for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
566 /* set up FPU state on the AP */
570 /* set up SSE registers */
573 #if 0 && defined(PAE)
574 /* Enable the PTE no-execute bit. */
575 if ((amd_feature & AMDID_NX) != 0) {
578 msr = rdmsr(MSR_EFER) | EFER_NXE;
579 wrmsr(MSR_EFER, msr);
583 /* A quick check from sanity claus */
584 if (PCPU_GET(apic_id) != lapic_id()) {
585 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
586 printf("SMP: actual apic_id = %d\n", lapic_id());
587 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
588 panic("cpuid mismatch! boom!!");
592 /* Initialize curthread. */
593 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
594 PCPU_SET(curthread, PCPU_GET(idlethread));
596 mtx_lock_spin(&ap_boot_mtx);
599 /* Init local apic for irq's */
604 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
605 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
607 /* Determine if we are a logical CPU. */
608 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
609 logical_cpus_mask |= PCPU_GET(cpumask);
611 /* Determine if we are a hyperthread. */
612 if (hyperthreading_cpus > 1 &&
613 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
614 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
616 /* Build our map of 'other' CPUs. */
617 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
622 if (smp_cpus == mp_ncpus) {
623 /* enable IPI's, tlb shootdown, freezes etc */
624 atomic_store_rel_int(&smp_started, 1);
625 smp_active = 1; /* historic */
628 mtx_unlock_spin(&ap_boot_mtx);
630 /* wait until all the AP's are up */
631 while (smp_started == 0)
635 PCPU_SET(curthread, PCPU_GET(idlethread));
636 /* enter the scheduler */
639 panic("scheduler returned us to %s", __func__);
643 /*******************************************************************
644 * local functions and data
648 * We tell the I/O APIC code about all the CPUs we want to receive
649 * interrupts. If we don't want certain CPUs to receive IRQs we
650 * can simply not tell the I/O APIC code about them in this function.
651 * We also do not tell it about the BSP since it tells itself about
652 * the BSP internally to work with UP kernels and on UP machines.
655 set_interrupt_apic_ids(void)
659 for (i = 0; i < MAXCPU; i++) {
660 apic_id = cpu_apic_ids[i];
663 if (cpu_info[apic_id].cpu_bsp)
665 if (cpu_info[apic_id].cpu_disabled)
668 /* Don't let hyperthreads service interrupts. */
669 if (hyperthreading_cpus > 1 &&
670 apic_id % hyperthreading_cpus != 0)
678 * Assign logical CPU IDs to local APICs.
685 /* Check for explicitly disabled CPUs. */
686 for (i = 0; i <= MAX_APIC_ID; i++) {
687 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp)
690 /* Don't use this CPU if it has been disabled by a tunable. */
691 if (resource_disabled("lapic", i)) {
692 cpu_info[i].cpu_disabled = 1;
698 * Assign CPU IDs to local APIC IDs and disable any CPUs
699 * beyond MAXCPU. CPU 0 has already been assigned to the BSP,
700 * so we only have to assign IDs for APs.
703 for (i = 0; i <= MAX_APIC_ID; i++) {
704 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp ||
705 cpu_info[i].cpu_disabled)
708 if (mp_ncpus < MAXCPU) {
709 cpu_apic_ids[mp_ncpus] = i;
710 apic_cpuids[i] = mp_ncpus;
713 cpu_info[i].cpu_disabled = 1;
715 KASSERT(mp_maxid >= mp_ncpus - 1,
716 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
721 * start each AP in our list
723 /* Lowest 1MB is already mapped: don't touch*/
724 #define TMPMAP_START 1
731 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
733 /* set up temporary P==V mapping for AP boot */
734 /* XXX this is a hack, we should boot the AP on its own stack/PTD */
737 for (cpu = 1; cpu < mp_ncpus; cpu++) {
738 apic_id = cpu_apic_ids[cpu];
742 bootAPgdt = gdt + (512*cpu);
744 /* Get per-cpu data */
745 pc = &__pcpu[bootAP];
746 pcpu_init(pc, bootAP, sizeof(struct pcpu));
747 dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), bootAP);
748 pc->pc_apic_id = cpu_apic_ids[bootAP];
749 pc->pc_prvspace = pc;
750 pc->pc_curthread = 0;
752 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
753 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
755 PT_SET_MA(bootAPgdt, xpmap_ptom(VTOP(bootAPgdt)) | PG_V | PG_RW);
756 bzero(bootAPgdt, PAGE_SIZE);
757 for (x = 0; x < NGDT; x++)
758 ssdtosd(&gdt_segs[x], &bootAPgdt[x].sd);
759 PT_SET_MA(bootAPgdt, vtomach(bootAPgdt) | PG_V);
762 if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0) {
763 apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
764 acpiid = xen_vcpu_physid_to_x86_acpiid(cpu_id.phys_id);
767 x86_acpiid_to_apicid[acpiid] = apicid;
772 /* attempt to start the Application Processor */
773 if (!start_ap(cpu)) {
774 printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id);
775 /* better panic as the AP may be running loose */
776 printf("panic y/n? [y] ");
781 all_cpus |= (1 << cpu); /* record AP in CPU map */
785 /* build our map of 'other' CPUs */
786 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
788 pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1);
790 /* number of APs actually started */
794 extern uint8_t *pcpu_boot_stack;
795 extern trap_info_t trap_table[];
798 smp_trap_init(trap_info_t *trap_ctxt)
800 const trap_info_t *t = trap_table;
802 for (t = trap_table; t->address; t++) {
803 trap_ctxt[t->vector].flags = t->flags;
804 trap_ctxt[t->vector].cs = t->cs;
805 trap_ctxt[t->vector].address = t->address;
811 cpu_initialize_context(unsigned int cpu)
813 /* vcpu_guest_context_t is too large to allocate on the stack.
814 * Hence we allocate statically and protect it with a lock */
816 static vcpu_guest_context_t ctxt;
817 vm_offset_t boot_stack;
819 vm_paddr_t ma[NPGPTD];
825 * Page 1, [4] boot stack
829 for (i = 0; i < NPGPTD + 2; i++) {
830 m[i] = vm_page_alloc(NULL, color++,
831 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
834 pmap_zero_page(m[i]);
837 boot_stack = kmem_alloc_nofault(kernel_map, 1);
838 newPTD = kmem_alloc_nofault(kernel_map, NPGPTD);
839 ma[0] = xpmap_ptom(VM_PAGE_TO_PHYS(m[0]))|PG_V;
842 pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD + 1]));
843 for (i = 0; i < NPGPTD; i++) {
844 ((vm_paddr_t *)boot_stack)[i] =
846 xpmap_ptom(VM_PAGE_TO_PHYS(m[i]))|PG_V;
851 * Copy cpu0 IdlePTD to new IdlePTD - copying only
854 pmap_qenter(newPTD, m, 4);
856 memcpy((uint8_t *)newPTD + KPTDI*sizeof(vm_paddr_t),
857 (uint8_t *)PTOV(IdlePTD) + KPTDI*sizeof(vm_paddr_t),
858 nkpt*sizeof(vm_paddr_t));
860 pmap_qremove(newPTD, 4);
861 kmem_free(kernel_map, newPTD, 4);
863 * map actual idle stack to boot_stack
865 pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD]));
868 xen_pgdpt_pin(xpmap_ptom(VM_PAGE_TO_PHYS(m[NPGPTD + 1])));
869 vm_page_lock_queues();
870 for (i = 0; i < 4; i++) {
871 int pdir = (PTDPTDI + i) / NPDEPG;
872 int curoffset = (PTDPTDI + i) % NPDEPG;
874 xen_queue_pt_update((vm_paddr_t)
875 ((ma[pdir] & ~PG_V) + (curoffset*sizeof(vm_paddr_t))),
879 vm_page_unlock_queues();
881 memset(&ctxt, 0, sizeof(ctxt));
882 ctxt.flags = VGCF_IN_KERNEL;
883 ctxt.user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
884 ctxt.user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
885 ctxt.user_regs.fs = GSEL(GPRIV_SEL, SEL_KPL);
886 ctxt.user_regs.gs = GSEL(GDATA_SEL, SEL_KPL);
887 ctxt.user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
888 ctxt.user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
889 ctxt.user_regs.eip = (unsigned long)init_secondary;
890 ctxt.user_regs.eflags = PSL_KERNEL | 0x1000; /* IOPL_RING1 */
892 memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
894 smp_trap_init(ctxt.trap_ctxt);
897 ctxt.gdt_frames[0] = (uint32_t)((uint64_t)vtomach(bootAPgdt) >> PAGE_SHIFT);
901 ctxt.user_regs.esp = boot_stack + PAGE_SIZE;
903 ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
904 ctxt.kernel_sp = boot_stack + PAGE_SIZE;
906 ctxt.event_callback_cs = GSEL(GCODE_SEL, SEL_KPL);
907 ctxt.event_callback_eip = (unsigned long)Xhypervisor_callback;
908 ctxt.failsafe_callback_cs = GSEL(GCODE_SEL, SEL_KPL);
909 ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
911 ctxt.ctrlreg[3] = xpmap_ptom(VM_PAGE_TO_PHYS(m[NPGPTD + 1]));
912 #else /* __x86_64__ */
913 ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
914 ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
915 ctxt.kernel_sp = idle->thread.rsp0;
917 ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
918 ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
919 ctxt.syscall_callback_eip = (unsigned long)system_call;
921 ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
923 ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
926 printf("gdtpfn=%lx pdptpfn=%lx\n",
928 ctxt.ctrlreg[3] >> PAGE_SHIFT);
930 PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
932 PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL));
936 * This function starts the AP (application processor) identified
937 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
938 * to accomplish this. This is necessary because of the nuances
939 * of the different hardware we might encounter. It isn't pretty,
940 * but it seems to work.
945 start_ap(int apic_id)
949 /* used as a watchpoint to signal AP startup */
952 cpu_initialize_context(apic_id);
954 /* Wait up to 5 seconds for it to start. */
955 for (ms = 0; ms < 5000; ms++) {
957 return 1; /* return SUCCESS */
960 return 0; /* return FAILURE */
964 * send an IPI to a specific CPU.
967 ipi_send_cpu(int cpu, u_int ipi)
969 u_int bitmap, old_pending, new_pending;
971 if (IPI_IS_BITMAPED(ipi)) {
973 ipi = IPI_BITMAP_VECTOR;
975 old_pending = cpu_ipi_pending[cpu];
976 new_pending = old_pending | bitmap;
977 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],
978 old_pending, new_pending));
980 ipi_pcpu(cpu, RESCHEDULE_VECTOR);
982 KASSERT(call_data != NULL, ("call_data not set"));
983 ipi_pcpu(cpu, CALL_FUNCTION_VECTOR);
988 * Flush the TLB on all other CPU's
991 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
994 struct _call_data data;
996 ncpu = mp_ncpus - 1; /* does not shootdown self */
998 return; /* no other cpus */
999 if (!(read_eflags() & PSL_I))
1000 panic("%s: interrupts disabled", __func__);
1001 mtx_lock_spin(&smp_ipi_mtx);
1002 KASSERT(call_data == NULL, ("call_data isn't null?!"));
1004 call_data->func_id = vector;
1005 call_data->arg1 = addr1;
1006 call_data->arg2 = addr2;
1007 atomic_store_rel_int(&smp_tlb_wait, 0);
1008 ipi_all_but_self(vector);
1009 while (smp_tlb_wait < ncpu)
1012 mtx_unlock_spin(&smp_ipi_mtx);
1016 smp_targeted_tlb_shootdown(cpumask_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
1018 int ncpu, othercpus;
1019 struct _call_data data;
1021 othercpus = mp_ncpus - 1;
1022 if (mask == (u_int)-1) {
1027 mask &= ~PCPU_GET(cpumask);
1030 ncpu = bitcount32(mask);
1031 if (ncpu > othercpus) {
1032 /* XXX this should be a panic offence */
1033 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
1037 /* XXX should be a panic, implied by mask == 0 above */
1041 if (!(read_eflags() & PSL_I))
1042 panic("%s: interrupts disabled", __func__);
1043 mtx_lock_spin(&smp_ipi_mtx);
1044 KASSERT(call_data == NULL, ("call_data isn't null?!"));
1046 call_data->func_id = vector;
1047 call_data->arg1 = addr1;
1048 call_data->arg2 = addr2;
1049 atomic_store_rel_int(&smp_tlb_wait, 0);
1050 if (mask == (u_int)-1)
1051 ipi_all_but_self(vector);
1053 ipi_selected(mask, vector);
1054 while (smp_tlb_wait < ncpu)
1057 mtx_unlock_spin(&smp_ipi_mtx);
1061 smp_cache_flush(void)
1065 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
1073 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
1078 smp_invlpg(vm_offset_t addr)
1082 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
1087 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
1091 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
1096 smp_masked_invltlb(cpumask_t mask)
1100 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
1105 smp_masked_invlpg(cpumask_t mask, vm_offset_t addr)
1109 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
1114 smp_masked_invlpg_range(cpumask_t mask, vm_offset_t addr1, vm_offset_t addr2)
1118 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
1123 * send an IPI to a set of cpus.
1126 ipi_selected(cpumask_t cpus, u_int ipi)
1131 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1132 * of help in order to understand what is the source.
1133 * Set the mask of receiving CPUs for this purpose.
1135 if (ipi == IPI_STOP_HARD)
1136 atomic_set_int(&ipi_nmi_pending, cpus);
1138 while ((cpu = ffs(cpus)) != 0) {
1140 cpus &= ~(1 << cpu);
1141 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1142 ipi_send_cpu(cpu, ipi);
1147 * send an IPI to a specific CPU.
1150 ipi_cpu(int cpu, u_int ipi)
1154 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1155 * of help in order to understand what is the source.
1156 * Set the mask of receiving CPUs for this purpose.
1158 if (ipi == IPI_STOP_HARD)
1159 atomic_set_int(&ipi_nmi_pending, 1 << cpu);
1161 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1162 ipi_send_cpu(cpu, ipi);
1166 * send an IPI to all CPUs EXCEPT myself
1169 ipi_all_but_self(u_int ipi)
1173 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1174 * of help in order to understand what is the source.
1175 * Set the mask of receiving CPUs for this purpose.
1177 if (ipi == IPI_STOP_HARD)
1178 atomic_set_int(&ipi_nmi_pending, PCPU_GET(other_cpus));
1180 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1181 ipi_selected(PCPU_GET(other_cpus), ipi);
1190 * As long as there is not a simple way to know about a NMI's
1191 * source, if the bitmask for the current CPU is present in
1192 * the global pending bitword an IPI_STOP_HARD has been issued
1193 * and should be handled.
1195 cpumask = PCPU_GET(cpumask);
1196 if ((ipi_nmi_pending & cpumask) == 0)
1199 atomic_clear_int(&ipi_nmi_pending, cpumask);
1205 * Handle an IPI_STOP by saving our current context and spinning until we
1209 cpustop_handler(void)
1211 int cpu = PCPU_GET(cpuid);
1212 int cpumask = PCPU_GET(cpumask);
1214 savectx(&stoppcbs[cpu]);
1216 /* Indicate that we are stopped */
1217 atomic_set_int(&stopped_cpus, cpumask);
1219 /* Wait for restart */
1220 while (!(started_cpus & cpumask))
1223 atomic_clear_int(&started_cpus, cpumask);
1224 atomic_clear_int(&stopped_cpus, cpumask);
1226 if (cpu == 0 && cpustop_restartfunc != NULL) {
1227 cpustop_restartfunc();
1228 cpustop_restartfunc = NULL;
1233 * This is called once the rest of the system is up and running and we're
1234 * ready to let the AP's out of the pen.
1237 release_aps(void *dummy __unused)
1242 atomic_store_rel_int(&aps_ready, 1);
1243 while (smp_started == 0)
1246 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1247 SYSINIT(start_ipis, SI_SUB_INTR, SI_ORDER_ANY, xen_smp_intr_init_cpus, NULL);