2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_kstack_pages.h"
32 #include "opt_mp_watchdog.h"
33 #include "opt_sched.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
41 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/memrange.h>
46 #include <sys/mutex.h>
50 #include <sys/sysctl.h>
53 #include <vm/vm_param.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_extern.h>
58 #include <machine/apicreg.h>
59 #include <machine/clock.h>
60 #include <machine/md_var.h>
61 #include <machine/mp_watchdog.h>
62 #include <machine/pcb.h>
63 #include <machine/psl.h>
64 #include <machine/smp.h>
65 #include <machine/specialreg.h>
66 #include <machine/tss.h>
68 #define WARMBOOT_TARGET 0
69 #define WARMBOOT_OFF (KERNBASE + 0x0467)
70 #define WARMBOOT_SEG (KERNBASE + 0x0469)
72 #define CMOS_REG (0x70)
73 #define CMOS_DATA (0x71)
74 #define BIOS_RESET (0x0f)
75 #define BIOS_WARM (0x0a)
77 /* lock region used by kernel profiling */
80 int mp_naps; /* # of Applications processors */
81 int boot_cpu_id = -1; /* designated BSP */
85 * CPU topology map datastructures for HTT.
87 static struct cpu_group mp_groups[MAXCPU];
88 static struct cpu_top mp_top;
90 /* AP uses this during bootstrap. Do not staticize. */
94 /* Free these after use */
95 void *bootstacks[MAXCPU];
97 /* Temporary holder for double fault stack */
98 char *doublefault_stack;
100 /* Hotwire a 0->4MB V==P mapping */
101 extern pt_entry_t *KPTphys;
103 /* SMP page table page */
104 extern pt_entry_t *SMPpt;
106 struct pcb stoppcbs[MAXCPU];
108 /* Variables needed for SMP tlb shootdown. */
109 vm_offset_t smp_tlb_addr1;
110 vm_offset_t smp_tlb_addr2;
111 volatile int smp_tlb_wait;
113 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
116 volatile cpumask_t ipi_nmi_pending;
118 static void ipi_nmi_selected(u_int32_t cpus);
122 * Local data and functions.
127 * Provide an alternate method of stopping other CPUs. If another CPU has
128 * disabled interrupts the conventional STOP IPI will be blocked. This
129 * NMI-based stop should get through in that case.
131 static int stop_cpus_with_nmi = 1;
132 SYSCTL_INT(_debug, OID_AUTO, stop_cpus_with_nmi, CTLTYPE_INT | CTLFLAG_RW,
133 &stop_cpus_with_nmi, 0, "");
134 TUNABLE_INT("debug.stop_cpus_with_nmi", &stop_cpus_with_nmi);
136 #define stop_cpus_with_nmi 0
139 static u_int logical_cpus;
141 /* used to hold the AP's until we are ready to release them */
142 static struct mtx ap_boot_mtx;
144 /* Set to 1 once we're ready to let the APs out of the pen. */
145 static volatile int aps_ready = 0;
148 * Store data from cpu_add() until later in the boot when we actually setup
155 } static cpu_info[MAXCPU];
156 static int cpu_apic_ids[MAXCPU];
158 /* Holds pending bitmap based IPIs per CPU */
159 static volatile u_int cpu_ipi_pending[MAXCPU];
161 static u_int boot_address;
163 static void set_interrupt_apic_ids(void);
164 static int start_all_aps(void);
165 static int start_ap(int apic_id);
166 static void release_aps(void *dummy);
168 static int hlt_logical_cpus;
169 static u_int hyperthreading_cpus;
170 static cpumask_t hyperthreading_cpus_mask;
171 static int hyperthreading_allowed = 1;
172 static struct sysctl_ctx_list logical_cpu_clist;
173 static u_int bootMP_size;
176 mem_range_AP_init(void)
178 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
179 mem_range_softc.mr_op->initAP(&mem_range_softc);
185 struct cpu_group *group;
192 /* Build the smp_topology map. */
193 /* Nothing to do if there is no HTT support. */
194 if ((cpu_feature & CPUID_HTT) == 0)
196 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
197 if (logical_cpus <= 1)
199 /* Nothing to do if reported cores are physical cores. */
200 if (strcmp(cpu_vendor, "GenuineIntel") == 0 && cpu_high >= 4) {
201 cpuid_count(4, 0, regs);
202 if ((regs[0] & 0x1f) != 0 &&
203 logical_cpus <= ((regs[0] >> 26) & 0x3f) + 1)
206 group = &mp_groups[0];
208 for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
209 if (!cpu_info[apic_id].cpu_present)
212 * If the current group has members and we're not a logical
213 * cpu, create a new group.
215 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
220 group->cg_mask |= 1 << cpu;
224 mp_top.ct_count = groups;
225 mp_top.ct_group = mp_groups;
226 smp_topology = &mp_top;
230 * Calculate usable address in base memory for AP trampoline code.
233 mp_bootaddress(u_int basemem)
236 bootMP_size = mptramp_end - mptramp_start;
237 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
238 if (((basemem * 1024) - boot_address) < bootMP_size)
239 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
240 /* 3 levels of page table pages */
241 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
243 return mptramp_pagetables;
247 cpu_add(u_int apic_id, char boot_cpu)
250 if (apic_id >= MAXCPU) {
251 printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
252 apic_id, MAXCPU - 1);
255 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
257 cpu_info[apic_id].cpu_present = 1;
259 KASSERT(boot_cpu_id == -1,
260 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
262 boot_cpu_id = apic_id;
263 cpu_info[apic_id].cpu_bsp = 1;
266 if (apic_id > mp_maxid)
269 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
275 cpu_mp_setmaxid(void)
279 * mp_maxid should be already set by calls to cpu_add().
280 * Just sanity check its value here.
283 KASSERT(mp_maxid == 0,
284 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
285 else if (mp_ncpus == 1)
288 KASSERT(mp_maxid >= mp_ncpus - 1,
289 ("%s: counters out of sync: max %d, count %d", __func__,
290 mp_maxid, mp_ncpus));
299 * Always record BSP in CPU map so that the mbuf init code works
305 * No CPUs were found, so this must be a UP system. Setup
306 * the variables to represent a system with a single CPU
313 /* At least one CPU was found. */
316 * One CPU was found, so this must be a UP system with
323 /* At least two CPUs were found. */
328 * Initialize the IPI handlers and start up the AP's.
334 u_int threads_per_cache, p[4];
336 /* Initialize the logical ID to APIC ID table. */
337 for (i = 0; i < MAXCPU; i++) {
338 cpu_apic_ids[i] = -1;
339 cpu_ipi_pending[i] = 0;
342 /* Install an inter-CPU IPI for TLB invalidation */
343 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
344 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
345 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
347 /* Install an inter-CPU IPI for all-CPU rendezvous */
348 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
350 /* Install generic inter-CPU IPI handler */
351 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
352 SDT_SYSIGT, SEL_KPL, 0);
354 /* Install an inter-CPU IPI for CPU stop/restart */
355 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
357 /* Set boot_cpu_id if needed. */
358 if (boot_cpu_id == -1) {
359 boot_cpu_id = PCPU_GET(apic_id);
360 cpu_info[boot_cpu_id].cpu_bsp = 1;
362 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
363 ("BSP's APIC ID doesn't match boot_cpu_id"));
364 cpu_apic_ids[0] = boot_cpu_id;
366 /* Start each Application Processor */
369 /* Setup the initial logical CPUs info. */
370 logical_cpus = logical_cpus_mask = 0;
371 if (cpu_feature & CPUID_HTT)
372 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
375 * Work out if hyperthreading is *really* enabled. This
376 * is made really ugly by the fact that processors lie: Dual
377 * core processors claim to be hyperthreaded even when they're
378 * not, presumably because they want to be treated the same
379 * way as HTT with respect to per-cpu software licensing.
380 * At the time of writing (May 12, 2005) the only hyperthreaded
381 * cpus are from Intel, and Intel's dual-core processors can be
382 * identified via the "deterministic cache parameters" cpuid
386 * First determine if this is an Intel processor which claims
387 * to have hyperthreading support.
389 if ((cpu_feature & CPUID_HTT) &&
390 (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
392 * If the "deterministic cache parameters" cpuid calls
393 * are available, use them.
396 /* Ask the processor about the L1 cache. */
397 for (i = 0; i < 1; i++) {
398 cpuid_count(4, i, p);
399 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
400 if (hyperthreading_cpus < threads_per_cache)
401 hyperthreading_cpus = threads_per_cache;
402 if ((p[0] & 0x1f) == 0)
408 * If the deterministic cache parameters are not
409 * available, or if no caches were reported to exist,
410 * just accept what the HTT flag indicated.
412 if (hyperthreading_cpus == 0)
413 hyperthreading_cpus = logical_cpus;
416 set_interrupt_apic_ids();
421 * Print various information about the SMP system hardware and setup.
424 cpu_mp_announce(void)
429 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
430 for (i = 1, x = 0; x < MAXCPU; x++) {
431 if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp)
433 if (cpu_info[x].cpu_disabled)
434 printf(" cpu (AP): APIC ID: %2d (disabled)\n", x);
436 KASSERT(i < mp_ncpus,
437 ("mp_ncpus and actual cpus are out of whack"));
438 printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
444 * AP CPU's call this to initialize themselves.
453 /* Set by the startup code for us to use */
457 common_tss[cpu] = common_tss[0];
458 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
459 common_tss[cpu].tss_iobase = sizeof(struct amd64tss);
460 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
462 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
463 ssdtosyssd(&gdt_segs[GPROC0_SEL],
464 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
466 lgdt(&r_gdt); /* does magic intra-segment return */
468 /* Get per-cpu data */
471 /* prime data page for it to use */
472 pcpu_init(pc, cpu, sizeof(struct pcpu));
473 pc->pc_apic_id = cpu_apic_ids[cpu];
474 pc->pc_prvspace = pc;
475 pc->pc_curthread = 0;
476 pc->pc_tssp = &common_tss[cpu];
479 wrmsr(MSR_FSBASE, 0); /* User value */
480 wrmsr(MSR_GSBASE, (u_int64_t)pc);
481 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
485 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
489 * Set to a known state:
490 * Set by mpboot.s: CR0_PG, CR0_PE
491 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
494 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
497 /* Set up the fast syscall stuff */
498 msr = rdmsr(MSR_EFER) | EFER_SCE;
499 wrmsr(MSR_EFER, msr);
500 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
501 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
502 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
503 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
504 wrmsr(MSR_STAR, msr);
505 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
507 /* Disable local APIC just to be sure. */
510 /* signal our startup to the BSP. */
513 /* Spin until the BSP releases the AP's. */
517 /* Initialize the PAT MSR. */
520 /* set up CPU registers and state */
523 /* set up SSE/NX registers */
526 /* set up FPU state on the AP */
529 /* A quick check from sanity claus */
530 if (PCPU_GET(apic_id) != lapic_id()) {
531 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
532 printf("SMP: actual apic_id = %d\n", lapic_id());
533 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
534 panic("cpuid mismatch! boom!!");
537 /* Initialize curthread. */
538 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
539 PCPU_SET(curthread, PCPU_GET(idlethread));
541 mtx_lock_spin(&ap_boot_mtx);
543 /* Init local apic for irq's */
546 /* Set memory range attributes for this CPU to match the BSP */
551 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
552 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
554 /* Determine if we are a logical CPU. */
555 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
556 logical_cpus_mask |= PCPU_GET(cpumask);
558 /* Determine if we are a hyperthread. */
559 if (hyperthreading_cpus > 1 &&
560 PCPU_GET(apic_id) % hyperthreading_cpus != 0)
561 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
563 /* Build our map of 'other' CPUs. */
564 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
569 if (smp_cpus == mp_ncpus) {
570 /* enable IPI's, tlb shootdown, freezes etc */
571 atomic_store_rel_int(&smp_started, 1);
572 smp_active = 1; /* historic */
576 * Enable global pages TLB extension
577 * This also implicitly flushes the TLB
580 load_cr4(rcr4() | CR4_PGE);
582 mtx_unlock_spin(&ap_boot_mtx);
584 /* wait until all the AP's are up */
585 while (smp_started == 0)
588 /* ok, now grab sched_lock and enter the scheduler */
589 mtx_lock_spin(&sched_lock);
592 * Correct spinlock nesting. The idle thread context that we are
593 * borrowing was created so that it would start out with a single
594 * spin lock (sched_lock) held in fork_trampoline(). Since we've
595 * explicitly acquired locks in this function, the nesting count
596 * is now 2 rather than 1. Since we are nested, calling
597 * spinlock_exit() will simply adjust the counts without allowing
598 * spin lock using code to interrupt us.
601 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
603 PCPU_SET(switchtime, cpu_ticks());
604 PCPU_SET(switchticks, ticks);
606 cpu_throw(NULL, choosethread()); /* doesn't return */
608 panic("scheduler returned us to %s", __func__);
612 /*******************************************************************
613 * local functions and data
617 * We tell the I/O APIC code about all the CPUs we want to receive
618 * interrupts. If we don't want certain CPUs to receive IRQs we
619 * can simply not tell the I/O APIC code about them in this function.
620 * We also do not tell it about the BSP since it tells itself about
621 * the BSP internally to work with UP kernels and on UP machines.
624 set_interrupt_apic_ids(void)
628 for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
629 if (!cpu_info[apic_id].cpu_present)
631 if (cpu_info[apic_id].cpu_bsp)
634 /* Don't let hyperthreads service interrupts. */
635 if (hyperthreading_cpus > 1 &&
636 apic_id % hyperthreading_cpus != 0)
639 intr_add_cpu(apic_id);
644 * start each AP in our list
649 vm_offset_t va = boot_address + KERNBASE;
650 u_int64_t *pt4, *pt3, *pt2;
651 u_int32_t mpbioswarmvec;
655 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
657 /* install the AP 1st level boot code */
658 pmap_kenter(va, boot_address);
659 pmap_invalidate_page(kernel_pmap, va);
660 bcopy(mptramp_start, (void *)va, bootMP_size);
662 /* Locate the page tables, they'll be below the trampoline */
663 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
664 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
665 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
667 /* Create the initial 1GB replicated page tables */
668 for (i = 0; i < 512; i++) {
669 /* Each slot of the level 4 pages points to the same level 3 page */
670 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
671 pt4[i] |= PG_V | PG_RW | PG_U;
673 /* Each slot of the level 3 pages points to the same level 2 page */
674 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
675 pt3[i] |= PG_V | PG_RW | PG_U;
677 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
678 pt2[i] = i * (2 * 1024 * 1024);
679 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
682 /* save the current value of the warm-start vector */
683 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
684 outb(CMOS_REG, BIOS_RESET);
685 mpbiosreason = inb(CMOS_DATA);
687 /* setup a vector to our boot code */
688 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
689 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
690 outb(CMOS_REG, BIOS_RESET);
691 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
694 for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
696 /* Ignore non-existent CPUs and the BSP. */
697 if (!cpu_info[apic_id].cpu_present ||
698 cpu_info[apic_id].cpu_bsp)
701 /* Don't use this CPU if it has been disabled by a tunable. */
702 if (resource_disabled("lapic", apic_id)) {
703 cpu_info[apic_id].cpu_disabled = 1;
710 /* save APIC ID for this logical ID */
711 cpu_apic_ids[cpu] = apic_id;
713 /* allocate and set up an idle stack data page */
714 bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
715 doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE);
717 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
720 /* attempt to start the Application Processor */
721 if (!start_ap(apic_id)) {
722 /* restore the warmstart vector */
723 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
724 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
727 all_cpus |= (1 << cpu); /* record AP in CPU map */
730 /* build our map of 'other' CPUs */
731 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
733 /* restore the warmstart vector */
734 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
736 outb(CMOS_REG, BIOS_RESET);
737 outb(CMOS_DATA, mpbiosreason);
739 /* number of APs actually started */
745 * This function starts the AP (application processor) identified
746 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
747 * to accomplish this. This is necessary because of the nuances
748 * of the different hardware we might encounter. It isn't pretty,
749 * but it seems to work.
752 start_ap(int apic_id)
757 /* calculate the vector */
758 vector = (boot_address >> 12) & 0xff;
760 /* used as a watchpoint to signal AP startup */
764 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
765 * and running the target CPU. OR this INIT IPI might be latched (P5
766 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
770 /* do an INIT IPI: assert RESET */
771 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
772 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
774 /* wait for pending status end */
777 /* do an INIT IPI: deassert RESET */
778 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
779 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
781 /* wait for pending status end */
782 DELAY(10000); /* wait ~10mS */
786 * next we do a STARTUP IPI: the previous INIT IPI might still be
787 * latched, (P5 bug) this 1st STARTUP would then terminate
788 * immediately, and the previously started INIT IPI would continue. OR
789 * the previous INIT IPI has already run. and this STARTUP IPI will
790 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
794 /* do a STARTUP IPI */
795 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
796 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
799 DELAY(200); /* wait ~200uS */
802 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
803 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
804 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
805 * recognized after hardware RESET or INIT IPI.
808 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
809 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
812 DELAY(200); /* wait ~200uS */
814 /* Wait up to 5 seconds for it to start. */
815 for (ms = 0; ms < 5000; ms++) {
817 return 1; /* return SUCCESS */
820 return 0; /* return FAILURE */
824 * Flush the TLB on all other CPU's
827 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
831 ncpu = mp_ncpus - 1; /* does not shootdown self */
833 return; /* no other cpus */
834 mtx_assert(&smp_ipi_mtx, MA_OWNED);
835 smp_tlb_addr1 = addr1;
836 smp_tlb_addr2 = addr2;
837 atomic_store_rel_int(&smp_tlb_wait, 0);
838 ipi_all_but_self(vector);
839 while (smp_tlb_wait < ncpu)
844 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
848 othercpus = mp_ncpus - 1;
849 if (mask == (u_int)-1) {
854 mask &= ~PCPU_GET(cpumask);
857 ncpu = bitcount32(mask);
858 if (ncpu > othercpus) {
859 /* XXX this should be a panic offence */
860 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
864 /* XXX should be a panic, implied by mask == 0 above */
868 mtx_assert(&smp_ipi_mtx, MA_OWNED);
869 smp_tlb_addr1 = addr1;
870 smp_tlb_addr2 = addr2;
871 atomic_store_rel_int(&smp_tlb_wait, 0);
872 if (mask == (u_int)-1)
873 ipi_all_but_self(vector);
875 ipi_selected(mask, vector);
876 while (smp_tlb_wait < ncpu)
881 smp_cache_flush(void)
885 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0);
893 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
898 smp_invlpg(vm_offset_t addr)
902 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
906 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
910 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
915 smp_masked_invltlb(u_int mask)
919 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
924 smp_masked_invlpg(u_int mask, vm_offset_t addr)
928 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
933 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
937 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
942 ipi_bitmap_handler(struct trapframe frame)
944 int cpu = PCPU_GET(cpuid);
947 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]);
949 #ifdef IPI_PREEMPTION
950 if (ipi_bitmap & IPI_PREEMPT) {
951 mtx_lock_spin(&sched_lock);
952 /* Don't preempt the idle thread */
953 if (curthread->td_priority < PRI_MIN_IDLE) {
954 struct thread *running_thread = curthread;
955 if (running_thread->td_critnest > 1)
956 running_thread->td_owepreempt = 1;
958 mi_switch(SW_INVOL | SW_PREEMPT, NULL);
960 mtx_unlock_spin(&sched_lock);
964 /* Nothing to do for AST */
968 * send an IPI to a set of cpus.
971 ipi_selected(u_int32_t cpus, u_int ipi)
978 if (IPI_IS_BITMAPED(ipi)) {
980 ipi = IPI_BITMAP_VECTOR;
984 if (ipi == IPI_STOP && stop_cpus_with_nmi) {
985 ipi_nmi_selected(cpus);
989 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
990 while ((cpu = ffs(cpus)) != 0) {
994 KASSERT(cpu_apic_ids[cpu] != -1,
995 ("IPI to non-existent CPU %d", cpu));
999 old_pending = cpu_ipi_pending[cpu];
1000 new_pending = old_pending | bitmap;
1001 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending));
1007 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1013 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
1019 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) {
1020 ipi_selected(all_cpus, ipi);
1023 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1024 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
1028 * send an IPI to all CPUs EXCEPT myself
1031 ipi_all_but_self(u_int ipi)
1034 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) {
1035 ipi_selected(PCPU_GET(other_cpus), ipi);
1038 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1039 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1043 * send an IPI to myself
1049 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) {
1050 ipi_selected(PCPU_GET(cpumask), ipi);
1053 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1054 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
1059 * send NMI IPI to selected CPUs
1062 #define BEFORE_SPIN 1000000
1065 ipi_nmi_selected(u_int32_t cpus)
1070 icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT
1071 | APIC_TRIGMOD_EDGE;
1073 CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus);
1075 atomic_set_int(&ipi_nmi_pending, cpus);
1077 while ((cpu = ffs(cpus)) != 0) {
1079 cpus &= ~(1 << cpu);
1081 KASSERT(cpu_apic_ids[cpu] != -1,
1082 ("IPI NMI to non-existent CPU %d", cpu));
1084 /* Wait for an earlier IPI to finish. */
1085 if (!lapic_ipi_wait(BEFORE_SPIN))
1086 panic("ipi_nmi_selected: previous IPI has not cleared");
1088 lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]);
1093 ipi_nmi_handler(void)
1095 int cpumask = PCPU_GET(cpumask);
1097 if (!(ipi_nmi_pending & cpumask))
1100 atomic_clear_int(&ipi_nmi_pending, cpumask);
1105 #endif /* STOP_NMI */
1108 * Handle an IPI_STOP by saving our current context and spinning until we
1112 cpustop_handler(void)
1114 int cpu = PCPU_GET(cpuid);
1115 int cpumask = PCPU_GET(cpumask);
1117 savectx(&stoppcbs[cpu]);
1119 /* Indicate that we are stopped */
1120 atomic_set_int(&stopped_cpus, cpumask);
1122 /* Wait for restart */
1123 while (!(started_cpus & cpumask))
1126 atomic_clear_int(&started_cpus, cpumask);
1127 atomic_clear_int(&stopped_cpus, cpumask);
1129 if (cpu == 0 && cpustop_restartfunc != NULL) {
1130 cpustop_restartfunc();
1131 cpustop_restartfunc = NULL;
1136 * This is called once the rest of the system is up and running and we're
1137 * ready to let the AP's out of the pen.
1140 release_aps(void *dummy __unused)
1145 mtx_lock_spin(&sched_lock);
1146 atomic_store_rel_int(&aps_ready, 1);
1147 while (smp_started == 0)
1149 mtx_unlock_spin(&sched_lock);
1151 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1154 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1159 mask = hlt_cpus_mask;
1160 error = sysctl_handle_int(oidp, &mask, 0, req);
1161 if (error || !req->newptr)
1164 if (logical_cpus_mask != 0 &&
1165 (mask & logical_cpus_mask) == logical_cpus_mask)
1166 hlt_logical_cpus = 1;
1168 hlt_logical_cpus = 0;
1170 if (! hyperthreading_allowed)
1171 mask |= hyperthreading_cpus_mask;
1173 if ((mask & all_cpus) == all_cpus)
1175 hlt_cpus_mask = mask;
1178 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1179 0, 0, sysctl_hlt_cpus, "IU",
1180 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1183 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1187 disable = hlt_logical_cpus;
1188 error = sysctl_handle_int(oidp, &disable, 0, req);
1189 if (error || !req->newptr)
1193 hlt_cpus_mask |= logical_cpus_mask;
1195 hlt_cpus_mask &= ~logical_cpus_mask;
1197 if (! hyperthreading_allowed)
1198 hlt_cpus_mask |= hyperthreading_cpus_mask;
1200 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1201 hlt_cpus_mask &= ~(1<<0);
1203 hlt_logical_cpus = disable;
1208 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
1212 allowed = hyperthreading_allowed;
1213 error = sysctl_handle_int(oidp, &allowed, 0, req);
1214 if (error || !req->newptr)
1218 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
1220 hlt_cpus_mask |= hyperthreading_cpus_mask;
1222 if (logical_cpus_mask != 0 &&
1223 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
1224 hlt_logical_cpus = 1;
1226 hlt_logical_cpus = 0;
1228 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1229 hlt_cpus_mask &= ~(1<<0);
1231 hyperthreading_allowed = allowed;
1236 cpu_hlt_setup(void *dummy __unused)
1239 if (logical_cpus_mask != 0) {
1240 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1242 sysctl_ctx_init(&logical_cpu_clist);
1243 SYSCTL_ADD_PROC(&logical_cpu_clist,
1244 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1245 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1246 sysctl_hlt_logical_cpus, "IU", "");
1247 SYSCTL_ADD_UINT(&logical_cpu_clist,
1248 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1249 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1250 &logical_cpus_mask, 0, "");
1252 if (hlt_logical_cpus)
1253 hlt_cpus_mask |= logical_cpus_mask;
1256 * If necessary for security purposes, force
1257 * hyperthreading off, regardless of the value
1258 * of hlt_logical_cpus.
1260 if (hyperthreading_cpus_mask) {
1261 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
1262 &hyperthreading_allowed);
1263 SYSCTL_ADD_PROC(&logical_cpu_clist,
1264 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1265 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
1266 0, 0, sysctl_hyperthreading_allowed, "IU", "");
1267 if (! hyperthreading_allowed)
1268 hlt_cpus_mask |= hyperthreading_cpus_mask;
1272 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1275 mp_grab_cpu_hlt(void)
1277 u_int mask = PCPU_GET(cpumask);
1279 u_int cpuid = PCPU_GET(cpuid);
1287 retval = mask & hlt_cpus_mask;
1288 while (mask & hlt_cpus_mask)
1289 __asm __volatile("sti; hlt" : : : "memory");