2 * Copyright (c) 1996, by Steve Passe
3 * Copyright (c) 2003, by Peter Wemm
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include "opt_kstack_pages.h"
32 #include "opt_mp_watchdog.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
40 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/memrange.h>
45 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
52 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_extern.h>
57 #include <machine/apicreg.h>
58 #include <machine/clock.h>
59 #include <machine/md_var.h>
60 #include <machine/mp_watchdog.h>
61 #include <machine/pcb.h>
62 #include <machine/psl.h>
63 #include <machine/smp.h>
64 #include <machine/specialreg.h>
65 #include <machine/tss.h>
67 #define WARMBOOT_TARGET 0
68 #define WARMBOOT_OFF (KERNBASE + 0x0467)
69 #define WARMBOOT_SEG (KERNBASE + 0x0469)
71 #define CMOS_REG (0x70)
72 #define CMOS_DATA (0x71)
73 #define BIOS_RESET (0x0f)
74 #define BIOS_WARM (0x0a)
76 /* lock region used by kernel profiling */
79 int mp_naps; /* # of Applications processors */
80 int boot_cpu_id = -1; /* designated BSP */
84 * CPU topology map datastructures for HTT.
86 static struct cpu_group mp_groups[MAXCPU];
87 static struct cpu_top mp_top;
89 /* AP uses this during bootstrap. Do not staticize. */
93 /* Free these after use */
94 void *bootstacks[MAXCPU];
96 /* Hotwire a 0->4MB V==P mapping */
97 extern pt_entry_t *KPTphys;
99 /* SMP page table page */
100 extern pt_entry_t *SMPpt;
102 struct pcb stoppcbs[MAXCPU];
104 /* Variables needed for SMP tlb shootdown. */
105 vm_offset_t smp_tlb_addr1;
106 vm_offset_t smp_tlb_addr2;
107 volatile int smp_tlb_wait;
109 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
112 * Local data and functions.
115 static u_int logical_cpus;
117 /* used to hold the AP's until we are ready to release them */
118 static struct mtx ap_boot_mtx;
120 /* Set to 1 once we're ready to let the APs out of the pen. */
121 static volatile int aps_ready = 0;
124 * Store data from cpu_add() until later in the boot when we actually setup
130 } static cpu_info[MAXCPU];
131 static int cpu_apic_ids[MAXCPU];
133 static u_int boot_address;
135 static void set_logical_apic_ids(void);
136 static int start_all_aps(void);
137 static int start_ap(int apic_id);
138 static void release_aps(void *dummy);
140 static int hlt_logical_cpus;
141 static struct sysctl_ctx_list logical_cpu_clist;
142 static u_int bootMP_size;
145 mem_range_AP_init(void)
147 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
148 mem_range_softc.mr_op->initAP(&mem_range_softc);
154 struct cpu_group *group;
160 /* Build the smp_topology map. */
161 /* Nothing to do if there is no HTT support. */
162 if ((cpu_feature & CPUID_HTT) == 0)
164 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
165 if (logical_cpus <= 1)
167 group = &mp_groups[0];
169 for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
170 if (!cpu_info[apic_id].cpu_present)
173 * If the current group has members and we're not a logical
174 * cpu, create a new group.
176 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
181 group->cg_mask |= 1 << cpu;
185 mp_top.ct_count = groups;
186 mp_top.ct_group = mp_groups;
187 smp_topology = &mp_top;
192 * Calculate usable address in base memory for AP trampoline code.
195 mp_bootaddress(u_int basemem)
198 bootMP_size = mptramp_end - mptramp_start;
199 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
200 if (((basemem * 1024) - boot_address) < bootMP_size)
201 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */
202 /* 3 levels of page table pages */
203 mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
205 return mptramp_pagetables;
209 cpu_add(u_int apic_id, char boot_cpu)
212 if (apic_id >= MAXCPU) {
213 printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
214 apic_id, MAXCPU - 1);
217 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
219 cpu_info[apic_id].cpu_present = 1;
221 KASSERT(boot_cpu_id == -1,
222 ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
224 boot_cpu_id = apic_id;
225 cpu_info[apic_id].cpu_bsp = 1;
228 if (apic_id > mp_maxid)
231 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
237 cpu_mp_setmaxid(void)
241 * mp_maxid should be already set by calls to cpu_add().
242 * Just sanity check its value here.
245 KASSERT(mp_maxid == 0,
246 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
247 else if (mp_ncpus == 1)
250 KASSERT(mp_maxid >= mp_ncpus - 1,
251 ("%s: counters out of sync: max %d, count %d", __func__,
252 mp_maxid, mp_ncpus));
261 * Always record BSP in CPU map so that the mbuf init code works
267 * No CPUs were found, so this must be a UP system. Setup
268 * the variables to represent a system with a single CPU
275 /* At least one CPU was found. */
278 * One CPU was found, so this must be a UP system with
285 /* At least two CPUs were found. */
290 * Initialize the IPI handlers and start up the AP's.
297 /* Initialize the logical ID to APIC ID table. */
298 for (i = 0; i < MAXCPU; i++)
299 cpu_apic_ids[i] = -1;
301 /* Install an inter-CPU IPI for TLB invalidation */
302 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
303 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
304 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
306 /* Install an inter-CPU IPI for forwarding hardclock() */
307 setidt(IPI_HARDCLOCK, IDTVEC(hardclock), SDT_SYSIGT, SEL_KPL, 0);
309 /* Install an inter-CPU IPI for forwarding statclock() */
310 setidt(IPI_STATCLOCK, IDTVEC(statclock), SDT_SYSIGT, SEL_KPL, 0);
312 /* Install an inter-CPU IPI for all-CPU rendezvous */
313 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
315 /* Install an inter-CPU IPI for forcing an additional software trap */
316 setidt(IPI_AST, IDTVEC(cpuast), SDT_SYSIGT, SEL_KPL, 0);
318 /* Install an inter-CPU IPI for CPU stop/restart */
319 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
321 /* Set boot_cpu_id if needed. */
322 if (boot_cpu_id == -1) {
323 boot_cpu_id = PCPU_GET(apic_id);
324 cpu_info[boot_cpu_id].cpu_bsp = 1;
326 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
327 ("BSP's APIC ID doesn't match boot_cpu_id"));
328 cpu_apic_ids[0] = boot_cpu_id;
330 /* Start each Application Processor */
333 /* Setup the initial logical CPUs info. */
334 logical_cpus = logical_cpus_mask = 0;
335 if (cpu_feature & CPUID_HTT)
336 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
338 set_logical_apic_ids();
343 * Print various information about the SMP system hardware and setup.
346 cpu_mp_announce(void)
351 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
352 for (i = 1, x = 0; x < MAXCPU; x++) {
353 if (cpu_info[x].cpu_present && !cpu_info[x].cpu_bsp) {
354 KASSERT(i < mp_ncpus,
355 ("mp_ncpus and actual cpus are out of whack"));
356 printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
362 * AP CPU's call this to initialize themselves.
371 /* Set by the startup code for us to use */
375 common_tss[cpu] = common_tss[0];
376 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */
378 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
379 ssdtosyssd(&gdt_segs[GPROC0_SEL],
380 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
382 lgdt(&r_gdt); /* does magic intra-segment return */
384 /* Get per-cpu data */
387 /* prime data page for it to use */
388 pcpu_init(pc, cpu, sizeof(struct pcpu));
389 pc->pc_apic_id = cpu_apic_ids[cpu];
390 pc->pc_prvspace = pc;
391 pc->pc_curthread = 0;
392 pc->pc_tssp = &common_tss[cpu];
395 wrmsr(MSR_FSBASE, 0); /* User value */
396 wrmsr(MSR_GSBASE, (u_int64_t)pc);
397 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
401 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
405 * Set to a known state:
406 * Set by mpboot.s: CR0_PG, CR0_PE
407 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
410 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
413 /* Set up the fast syscall stuff */
414 msr = rdmsr(MSR_EFER) | EFER_SCE;
415 wrmsr(MSR_EFER, msr);
416 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
417 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
418 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
419 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
420 wrmsr(MSR_STAR, msr);
421 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
423 /* Disable local apic just to be sure. */
426 /* signal our startup to the BSP. */
429 /* Spin until the BSP releases the AP's. */
433 /* set up CPU registers and state */
436 /* set up SSE/NX registers */
439 /* set up FPU state on the AP */
442 /* A quick check from sanity claus */
443 if (PCPU_GET(apic_id) != lapic_id()) {
444 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
445 printf("SMP: actual apic_id = %d\n", lapic_id());
446 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
447 panic("cpuid mismatch! boom!!");
450 mtx_lock_spin(&ap_boot_mtx);
452 /* Init local apic for irq's */
455 /* Set memory range attributes for this CPU to match the BSP */
460 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
461 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
463 /* Determine if we are a logical CPU. */
464 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
465 logical_cpus_mask |= PCPU_GET(cpumask);
467 /* Build our map of 'other' CPUs. */
468 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
473 if (smp_cpus == mp_ncpus) {
474 /* enable IPI's, tlb shootdown, freezes etc */
475 atomic_store_rel_int(&smp_started, 1);
476 smp_active = 1; /* historic */
479 mtx_unlock_spin(&ap_boot_mtx);
481 /* wait until all the AP's are up */
482 while (smp_started == 0)
485 /* ok, now grab sched_lock and enter the scheduler */
486 mtx_lock_spin(&sched_lock);
488 binuptime(PCPU_PTR(switchtime));
489 PCPU_SET(switchticks, ticks);
491 cpu_throw(NULL, choosethread()); /* doesn't return */
493 panic("scheduler returned us to %s", __func__);
497 /*******************************************************************
498 * local functions and data
502 * Set the APIC logical IDs.
504 * We want to cluster logical CPU's within the same APIC ID cluster.
505 * Since logical CPU's are aligned simply filling in the clusters in
506 * APIC ID order works fine. Note that this does not try to balance
507 * the number of CPU's in each cluster. (XXX?)
510 set_logical_apic_ids(void)
512 u_int apic_id, cluster, cluster_id;
514 /* Force us to allocate cluster 0 at the start. */
516 cluster_id = APIC_MAX_INTRACLUSTER_ID;
517 for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
518 if (!cpu_info[apic_id].cpu_present)
520 if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
521 cluster = ioapic_next_logical_cluster();
526 printf("APIC ID: physical %u, logical %u:%u\n",
527 apic_id, cluster, cluster_id);
528 lapic_set_logical_id(apic_id, cluster, cluster_id);
533 * start each AP in our list
539 u_int32_t mpbioswarmvec;
541 u_int64_t *pt4, *pt3, *pt2;
542 vm_offset_t va = boot_address + KERNBASE;
544 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
546 /* install the AP 1st level boot code */
547 pmap_kenter(va, boot_address);
548 pmap_invalidate_page(kernel_pmap, va);
549 bcopy(mptramp_start, (void *)va, bootMP_size);
551 /* Locate the page tables, they'll be below the trampoline */
552 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
553 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
554 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
556 /* Create the initial 1GB replicated page tables */
557 for (i = 0; i < 512; i++) {
558 /* Each slot of the level 4 pages points to the same level 3 page */
559 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
560 pt4[i] |= PG_V | PG_RW | PG_U;
562 /* Each slot of the level 3 pages points to the same level 2 page */
563 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
564 pt3[i] |= PG_V | PG_RW | PG_U;
566 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
567 pt2[i] = i * (2 * 1024 * 1024);
568 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
571 /* save the current value of the warm-start vector */
572 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
573 outb(CMOS_REG, BIOS_RESET);
574 mpbiosreason = inb(CMOS_DATA);
576 /* setup a vector to our boot code */
577 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
578 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
579 outb(CMOS_REG, BIOS_RESET);
580 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
584 for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
585 if (!cpu_info[apic_id].cpu_present ||
586 cpu_info[apic_id].cpu_bsp)
590 /* save APIC ID for this logical ID */
591 cpu_apic_ids[cpu] = apic_id;
593 /* allocate and set up an idle stack data page */
594 bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
596 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
599 /* attempt to start the Application Processor */
600 if (!start_ap(apic_id)) {
601 /* restore the warmstart vector */
602 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
603 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
606 all_cpus |= (1 << cpu); /* record AP in CPU map */
609 /* build our map of 'other' CPUs */
610 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
612 /* restore the warmstart vector */
613 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
615 outb(CMOS_REG, BIOS_RESET);
616 outb(CMOS_DATA, mpbiosreason);
618 /* number of APs actually started */
624 * This function starts the AP (application processor) identified
625 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
626 * to accomplish this. This is necessary because of the nuances
627 * of the different hardware we might encounter. It isn't pretty,
628 * but it seems to work.
631 start_ap(int apic_id)
636 /* calculate the vector */
637 vector = (boot_address >> 12) & 0xff;
639 /* used as a watchpoint to signal AP startup */
643 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
644 * and running the target CPU. OR this INIT IPI might be latched (P5
645 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
649 /* do an INIT IPI: assert RESET */
650 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
651 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
653 /* wait for pending status end */
656 /* do an INIT IPI: deassert RESET */
657 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
658 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
660 /* wait for pending status end */
661 DELAY(10000); /* wait ~10mS */
665 * next we do a STARTUP IPI: the previous INIT IPI might still be
666 * latched, (P5 bug) this 1st STARTUP would then terminate
667 * immediately, and the previously started INIT IPI would continue. OR
668 * the previous INIT IPI has already run. and this STARTUP IPI will
669 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
673 /* do a STARTUP IPI */
674 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
675 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
678 DELAY(200); /* wait ~200uS */
681 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
682 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
683 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
684 * recognized after hardware RESET or INIT IPI.
687 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
688 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
691 DELAY(200); /* wait ~200uS */
693 /* Wait up to 5 seconds for it to start. */
694 for (ms = 0; ms < 50; ms++) {
696 return 1; /* return SUCCESS */
699 return 0; /* return FAILURE */
703 * Flush the TLB on all other CPU's
706 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
710 ncpu = mp_ncpus - 1; /* does not shootdown self */
712 return; /* no other cpus */
713 mtx_assert(&smp_ipi_mtx, MA_OWNED);
714 smp_tlb_addr1 = addr1;
715 smp_tlb_addr2 = addr2;
716 atomic_store_rel_int(&smp_tlb_wait, 0);
717 ipi_all_but_self(vector);
718 while (smp_tlb_wait < ncpu)
723 * This is about as magic as it gets. fortune(1) has got similar code
724 * for reversing bits in a word. Who thinks up this stuff??
726 * Yes, it does appear to be consistently faster than:
727 * while (i = ffs(m)) {
732 * while (lsb = (m & -m)) { // This is magic too
733 * m &= ~lsb; // or: m ^= lsb
736 * Both of these latter forms do some very strange things on gcc-3.1 with
737 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
738 * There is probably an SSE or MMX popcnt instruction.
740 * I wonder if this should be in libkern?
742 * XXX Stop the presses! Another one:
743 * static __inline u_int32_t
744 * popcnt1(u_int32_t v)
746 * v -= ((v >> 1) & 0x55555555);
747 * v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
748 * v = (v + (v >> 4)) & 0x0F0F0F0F;
749 * return (v * 0x01010101) >> 24;
751 * The downside is that it has a multiply. With a pentium3 with
752 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
753 * an imull, and in that case it is faster. In most other cases
754 * it appears slightly slower.
756 * Another variant (also from fortune):
757 * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
758 * #define BX_(x) ((x) - (((x)>>1)&0x77777777) \
759 * - (((x)>>2)&0x33333333) \
760 * - (((x)>>3)&0x11111111))
762 static __inline u_int32_t
766 m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
767 m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
768 m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
769 m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
770 m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
775 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
779 othercpus = mp_ncpus - 1;
780 if (mask == (u_int)-1) {
785 mask &= ~PCPU_GET(cpumask);
789 if (ncpu > othercpus) {
790 /* XXX this should be a panic offence */
791 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
795 /* XXX should be a panic, implied by mask == 0 above */
799 mtx_assert(&smp_ipi_mtx, MA_OWNED);
800 smp_tlb_addr1 = addr1;
801 smp_tlb_addr2 = addr2;
802 atomic_store_rel_int(&smp_tlb_wait, 0);
803 if (mask == (u_int)-1)
804 ipi_all_but_self(vector);
806 ipi_selected(mask, vector);
807 while (smp_tlb_wait < ncpu)
816 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
820 smp_invlpg(vm_offset_t addr)
824 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
828 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
832 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
836 smp_masked_invltlb(u_int mask)
840 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
844 smp_masked_invlpg(u_int mask, vm_offset_t addr)
848 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
852 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
856 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
861 * For statclock, we send an IPI to all CPU's to have them call this
865 forwarded_statclock(struct clockframe frame)
869 CTR0(KTR_SMP, "forwarded_statclock");
871 td->td_intr_nesting_level++;
876 td->td_intr_nesting_level--;
880 forward_statclock(void)
884 CTR0(KTR_SMP, "forward_statclock");
886 if (!smp_started || cold || panicstr)
889 map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
891 ipi_selected(map, IPI_STATCLOCK);
895 * For each hardclock(), we send an IPI to all other CPU's to have them
896 * execute this function. It would be nice to reduce contention on
897 * sched_lock if we could simply peek at the CPU to determine the user/kernel
898 * state and call hardclock_process() on the CPU receiving the clock interrupt
899 * and then just use a simple IPI to handle any ast's if needed.
902 forwarded_hardclock(struct clockframe frame)
906 CTR0(KTR_SMP, "forwarded_hardclock");
908 td->td_intr_nesting_level++;
909 hardclock_process(&frame);
910 td->td_intr_nesting_level--;
914 forward_hardclock(void)
918 CTR0(KTR_SMP, "forward_hardclock");
920 if (!smp_started || cold || panicstr)
923 map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
925 ipi_selected(map, IPI_HARDCLOCK);
929 * send an IPI to a set of cpus.
932 ipi_selected(u_int32_t cpus, u_int ipi)
936 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
937 while ((cpu = ffs(cpus)) != 0) {
939 KASSERT(cpu_apic_ids[cpu] != -1,
940 ("IPI to non-existent CPU %d", cpu));
941 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
947 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
953 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
954 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
958 * send an IPI to all CPUs EXCEPT myself
961 ipi_all_but_self(u_int ipi)
964 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
965 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
969 * send an IPI to myself
975 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
976 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
980 * This is called once the rest of the system is up and running and we're
981 * ready to let the AP's out of the pen.
984 release_aps(void *dummy __unused)
989 mtx_lock_spin(&sched_lock);
990 atomic_store_rel_int(&aps_ready, 1);
991 while (smp_started == 0)
993 mtx_unlock_spin(&sched_lock);
995 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
998 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
1003 mask = hlt_cpus_mask;
1004 error = sysctl_handle_int(oidp, &mask, 0, req);
1005 if (error || !req->newptr)
1008 if (logical_cpus_mask != 0 &&
1009 (mask & logical_cpus_mask) == logical_cpus_mask)
1010 hlt_logical_cpus = 1;
1012 hlt_logical_cpus = 0;
1014 if ((mask & all_cpus) == all_cpus)
1016 hlt_cpus_mask = mask;
1019 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
1020 0, 0, sysctl_hlt_cpus, "IU",
1021 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2.");
1024 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
1028 disable = hlt_logical_cpus;
1029 error = sysctl_handle_int(oidp, &disable, 0, req);
1030 if (error || !req->newptr)
1034 hlt_cpus_mask |= logical_cpus_mask;
1036 hlt_cpus_mask &= ~logical_cpus_mask;
1038 if ((hlt_cpus_mask & all_cpus) == all_cpus)
1039 hlt_cpus_mask &= ~(1<<0);
1041 hlt_logical_cpus = disable;
1046 cpu_hlt_setup(void *dummy __unused)
1049 if (logical_cpus_mask != 0) {
1050 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
1052 sysctl_ctx_init(&logical_cpu_clist);
1053 SYSCTL_ADD_PROC(&logical_cpu_clist,
1054 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1055 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
1056 sysctl_hlt_logical_cpus, "IU", "");
1057 SYSCTL_ADD_UINT(&logical_cpu_clist,
1058 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
1059 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
1060 &logical_cpus_mask, 0, "");
1062 if (hlt_logical_cpus)
1063 hlt_cpus_mask |= logical_cpus_mask;
1066 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
1069 mp_grab_cpu_hlt(void)
1071 u_int mask = PCPU_GET(cpumask);
1073 u_int cpuid = PCPU_GET(cpuid);
1081 retval = mask & hlt_cpus_mask;
1082 while (mask & hlt_cpus_mask)
1083 __asm __volatile("sti; hlt" : : : "memory");