2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1996, by Steve Passe
5 * Copyright (c) 2003, by Peter Wemm
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
35 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/cpuset.h>
43 #include <sys/domainset.h>
48 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/memrange.h>
53 #include <sys/mutex.h>
56 #include <sys/sched.h>
58 #include <sys/sysctl.h>
61 #include <vm/vm_param.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_extern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_phys.h>
68 #include <x86/apicreg.h>
69 #include <machine/clock.h>
70 #include <machine/cputypes.h>
71 #include <machine/cpufunc.h>
73 #include <machine/md_var.h>
74 #include <machine/pcb.h>
75 #include <machine/psl.h>
76 #include <machine/smp.h>
77 #include <machine/specialreg.h>
78 #include <machine/tss.h>
79 #include <x86/ucode.h>
80 #include <machine/cpu.h>
84 #include <contrib/dev/acpica/include/acpi.h>
85 #include <dev/acpica/acpivar.h>
88 #define WARMBOOT_TARGET 0
89 #define WARMBOOT_OFF (KERNBASE + 0x0467)
90 #define WARMBOOT_SEG (KERNBASE + 0x0469)
92 #define CMOS_REG (0x70)
93 #define CMOS_DATA (0x71)
94 #define BIOS_RESET (0x0f)
95 #define BIOS_WARM (0x0a)
97 #define GiB(v) (v ## ULL << 30)
99 #define AP_BOOTPT_SZ (PAGE_SIZE * 4)
101 /* Temporary variables for init_secondary() */
102 char *doublefault_stack;
108 extern u_int mptramp_la57;
109 extern u_int mptramp_nx;
112 * Local data and functions.
115 static int start_ap(int apic_id, vm_paddr_t boot_address);
118 * Initialize the IPI handlers and start up the AP's.
125 /* Initialize the logical ID to APIC ID table. */
126 for (i = 0; i < MAXCPU; i++) {
127 cpu_apic_ids[i] = -1;
130 /* Install an inter-CPU IPI for cache and TLB invalidations. */
131 setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
132 SDT_SYSIGT, SEL_KPL, 0);
134 /* Install an inter-CPU IPI for all-CPU rendezvous */
135 setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
136 IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
138 /* Install generic inter-CPU IPI handler */
139 setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
140 IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
142 /* Install an inter-CPU IPI for CPU stop/restart */
143 setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
144 SDT_SYSIGT, SEL_KPL, 0);
146 /* Install an inter-CPU IPI for CPU suspend/resume */
147 setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
148 SDT_SYSIGT, SEL_KPL, 0);
150 /* Install an IPI for calling delayed SWI */
151 setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
152 SDT_SYSIGT, SEL_KPL, 0);
154 /* Set boot_cpu_id if needed. */
155 if (boot_cpu_id == -1) {
156 boot_cpu_id = PCPU_GET(apic_id);
157 cpu_info[boot_cpu_id].cpu_bsp = 1;
159 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
160 ("BSP's APIC ID doesn't match boot_cpu_id"));
162 /* Probe logical/physical core configuration. */
168 mptramp_nx = pg_nx != 0;
169 MPASS(kernel_pmap->pm_cr3 < (1UL << 32));
170 mptramp_pagetables = kernel_pmap->pm_cr3;
172 /* Start each Application Processor */
173 init_ops.start_all_aps();
175 set_interrupt_apic_ids();
177 #if defined(DEV_ACPI) && MAXMEMDOM > 1
178 acpi_pxm_set_cpu_locality();
183 * AP CPU's call this to initialize themselves.
190 struct user_segment_descriptor *gdt;
191 struct region_descriptor ap_gdt;
193 int cpu, gsel_tss, x;
195 /* Set by the startup code for us to use */
198 /* Update microcode before doing anything else. */
201 /* Initialize the PCPU area. */
203 pcpu_init(pc, cpu, sizeof(struct pcpu));
204 dpcpu_init(dpcpu, cpu);
205 pc->pc_apic_id = cpu_apic_ids[cpu];
206 pc->pc_prvspace = pc;
207 pc->pc_curthread = 0;
208 pc->pc_tssp = &pc->pc_common_tss;
210 pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
211 PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
213 pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
214 pc->pc_fs32p = &gdt[GUFS32_SEL];
215 pc->pc_gs32p = &gdt[GUGS32_SEL];
216 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
217 pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
218 /* See comment in pmap_bootstrap(). */
219 pc->pc_pcid_next = PMAP_PCID_KERN + 2;
222 pc->pc_smp_tlb_gen = 1;
225 pc->pc_common_tss = __pcpu[0].pc_common_tss;
226 pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
228 pc->pc_common_tss.tss_rsp0 = 0;
230 /* The doublefault stack runs on IST1. */
231 np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
232 np->np_pcpu = (register_t)pc;
233 pc->pc_common_tss.tss_ist1 = (long)np;
235 /* The NMI stack runs on IST2. */
236 np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
237 np->np_pcpu = (register_t)pc;
238 pc->pc_common_tss.tss_ist2 = (long)np;
240 /* The MC# stack runs on IST3. */
241 np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
242 np->np_pcpu = (register_t)pc;
243 pc->pc_common_tss.tss_ist3 = (long)np;
245 /* The DB# stack runs on IST4. */
246 np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
247 np->np_pcpu = (register_t)pc;
248 pc->pc_common_tss.tss_ist4 = (long)np;
250 /* Prepare private GDT */
251 gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
252 for (x = 0; x < NGDT; x++) {
253 if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
254 x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
255 ssdtosd(&gdt_segs[x], &gdt[x]);
257 ssdtosyssd(&gdt_segs[GPROC0_SEL],
258 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
259 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
260 ap_gdt.rd_base = (u_long)gdt;
261 lgdt(&ap_gdt); /* does magic intra-segment return */
263 wrmsr(MSR_FSBASE, 0); /* User value */
264 wrmsr(MSR_GSBASE, (uint64_t)pc);
265 wrmsr(MSR_KGSBASE, 0); /* User value */
270 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
274 * Set to a known state:
275 * Set by mpboot.s: CR0_PG, CR0_PE
276 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
279 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
282 amd64_conf_fast_syscall();
284 /* signal our startup to the BSP. */
287 /* Spin until the BSP releases the AP's. */
288 while (atomic_load_acq_int(&aps_ready) == 0)
291 init_secondary_tail();
294 /*******************************************************************
295 * local functions and data
300 mp_realloc_pcpu(int cpuid, int domain)
305 oa = (vm_offset_t)&__pcpu[cpuid];
306 if (vm_phys_domain(pmap_kextract(oa)) == domain)
308 m = vm_page_alloc_noobj_domain(domain, 0);
311 na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
312 pagecopy((void *)oa, (void *)na);
313 pmap_qenter((vm_offset_t)&__pcpu[cpuid], &m, 1);
314 /* XXX old pcpu page leaked. */
319 * start each AP in our list
322 native_start_all_aps(void)
324 vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4];
325 pml5_entry_t old_pml45;
326 pml4_entry_t *v_pml4;
329 vm_paddr_t boot_address;
330 u_int32_t mpbioswarmvec;
331 int apic_id, cpu, domain, i;
334 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
336 MPASS(bootMP_size <= PAGE_SIZE);
337 m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
338 (1ULL << 20), /* Trampoline should be below 1M for real mode */
339 PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
340 boot_address = VM_PAGE_TO_PHYS(m_boottramp);
342 /* Create a transient 1:1 mapping of low 4G */
344 m_pml4 = pmap_page_alloc_below_4g(true);
345 v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
347 v_pml4 = &kernel_pmap->pm_pmltop[0];
349 m_pdp = pmap_page_alloc_below_4g(true);
350 v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
351 m_pd[0] = pmap_page_alloc_below_4g(false);
352 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0]));
353 for (i = 0; i < NPDEPG; i++)
354 v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A |
356 m_pd[1] = pmap_page_alloc_below_4g(false);
357 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1]));
358 for (i = 0; i < NPDEPG; i++)
359 v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
360 X86_PG_A | X86_PG_M | PG_PS;
361 m_pd[2] = pmap_page_alloc_below_4g(false);
362 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2]));
363 for (i = 0; i < NPDEPG; i++)
364 v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
365 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
366 m_pd[3] = pmap_page_alloc_below_4g(false);
367 v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3]));
368 for (i = 0; i < NPDEPG; i++)
369 v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
370 X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
371 v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V |
372 X86_PG_RW | X86_PG_A | X86_PG_M;
373 v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V |
374 X86_PG_RW | X86_PG_A | X86_PG_M;
375 v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V |
376 X86_PG_RW | X86_PG_A | X86_PG_M;
377 v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V |
378 X86_PG_RW | X86_PG_A | X86_PG_M;
379 old_pml45 = kernel_pmap->pm_pmltop[0];
381 kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) |
382 X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
384 v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V |
385 X86_PG_RW | X86_PG_A | X86_PG_M;
386 pmap_invalidate_all(kernel_pmap);
388 /* copy the AP 1st level boot code */
389 bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
391 printf("AP boot address %#lx\n", boot_address);
393 /* save the current value of the warm-start vector */
395 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
396 outb(CMOS_REG, BIOS_RESET);
397 mpbiosreason = inb(CMOS_DATA);
399 /* setup a vector to our boot code */
401 *((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
402 *((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4);
404 outb(CMOS_REG, BIOS_RESET);
405 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
407 /* Relocate pcpu areas to the correct domain. */
410 for (cpu = 1; cpu < mp_ncpus; cpu++) {
411 apic_id = cpu_apic_ids[cpu];
412 domain = acpi_pxm_get_cpu_locality(apic_id);
413 mp_realloc_pcpu(cpu, domain);
419 for (cpu = 1; cpu < mp_ncpus; cpu++) {
420 apic_id = cpu_apic_ids[cpu];
423 domain = acpi_pxm_get_cpu_locality(apic_id);
425 /* allocate and set up an idle stack data page */
426 bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
428 doublefault_stack = (char *)kmem_malloc(DBLFAULT_STACK_SIZE,
430 mce_stack = (char *)kmem_malloc(MCE_STACK_SIZE,
432 nmi_stack = (char *)kmem_malloc_domainset(
433 DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
434 dbg_stack = (char *)kmem_malloc_domainset(
435 DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
436 dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
437 DPCPU_SIZE, M_WAITOK | M_ZERO);
439 bootpcpu = &__pcpu[cpu];
440 bootSTK = (char *)bootstacks[cpu] +
441 kstack_pages * PAGE_SIZE - 8;
444 /* attempt to start the Application Processor */
445 if (!start_ap(apic_id, boot_address)) {
446 /* restore the warmstart vector */
448 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
449 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
452 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
455 /* restore the warmstart vector */
457 *(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
459 outb(CMOS_REG, BIOS_RESET);
460 outb(CMOS_DATA, mpbiosreason);
462 /* Destroy transient 1:1 mapping */
463 kernel_pmap->pm_pmltop[0] = old_pml45;
466 vm_page_free(m_pml4);
467 vm_page_free(m_pd[3]);
468 vm_page_free(m_pd[2]);
469 vm_page_free(m_pd[1]);
470 vm_page_free(m_pd[0]);
472 vm_page_free(m_boottramp);
474 /* number of APs actually started */
479 * This function starts the AP (application processor) identified
480 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
481 * to accomplish this. This is necessary because of the nuances
482 * of the different hardware we might encounter. It isn't pretty,
483 * but it seems to work.
486 start_ap(int apic_id, vm_paddr_t boot_address)
491 /* calculate the vector */
492 vector = (boot_address >> 12) & 0xff;
494 /* used as a watchpoint to signal AP startup */
497 ipi_startup(apic_id, vector);
499 /* Wait up to 5 seconds for it to start. */
500 for (ms = 0; ms < 5000; ms++) {
502 return 1; /* return SUCCESS */
505 return 0; /* return FAILURE */
509 * Flush the TLB on other CPU's
513 * Invalidation request. PCPU pc_smp_tlb_op uses u_int instead of the
514 * enum to avoid both namespace and ABI issues (with enums).
518 INVL_OP_TLB_INVPCID = 2,
519 INVL_OP_TLB_INVPCID_PTI = 3,
520 INVL_OP_TLB_PCID = 4,
522 INVL_OP_PGRNG_INVPCID = 6,
523 INVL_OP_PGRNG_PCID = 7,
525 INVL_OP_PG_INVPCID = 9,
526 INVL_OP_PG_PCID = 10,
531 * These variables are initialized at startup to reflect how each of
532 * the different kinds of invalidations should be performed on the
533 * current machine and environment.
535 static enum invl_op_codes invl_op_tlb;
536 static enum invl_op_codes invl_op_pgrng;
537 static enum invl_op_codes invl_op_pg;
540 * Scoreboard of IPI completion notifications from target to IPI initiator.
542 * Each CPU can initiate shootdown IPI independently from other CPUs.
543 * Initiator enters critical section, then fills its local PCPU
544 * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
545 * at location (cpu, my_cpuid) for each target cpu. After that IPI is
546 * sent to all targets which scan for zeroed scoreboard generation
547 * words. Upon finding such word the shootdown data is read from
548 * corresponding cpu's pcpu, and generation is set. Meantime initiator
549 * loops waiting for all zeroed generations in scoreboard to update.
551 static uint32_t *invl_scoreboard;
554 invl_scoreboard_init(void *arg __unused)
558 invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
559 (mp_maxid + 1), M_DEVBUF, M_WAITOK);
560 for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
561 invl_scoreboard[i] = 1;
563 if (pmap_pcid_enabled) {
566 invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
568 invl_op_tlb = INVL_OP_TLB_INVPCID;
569 invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
570 invl_op_pg = INVL_OP_PG_INVPCID;
572 invl_op_tlb = INVL_OP_TLB_PCID;
573 invl_op_pgrng = INVL_OP_PGRNG_PCID;
574 invl_op_pg = INVL_OP_PG_PCID;
577 invl_op_tlb = INVL_OP_TLB;
578 invl_op_pgrng = INVL_OP_PGRNG;
579 invl_op_pg = INVL_OP_PG;
582 SYSINIT(invl_ops, SI_SUB_SMP, SI_ORDER_FIRST, invl_scoreboard_init, NULL);
585 invl_scoreboard_getcpu(u_int cpu)
587 return (invl_scoreboard + cpu * (mp_maxid + 1));
591 invl_scoreboard_slot(u_int cpu)
593 return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
597 * Used by the pmap to request cache or TLB invalidation on local and
598 * remote processors. Mask provides the set of remote CPUs that are
599 * to be signalled with the invalidation IPI. As an optimization, the
600 * curcpu_cb callback is invoked on the calling CPU in a critical
601 * section while waiting for the remote CPUs to complete the operation.
603 * The callback function is called unconditionally on the caller's
604 * underlying processor, even when this processor is not set in the
605 * mask. So, the callback function must be prepared to handle such
606 * spurious invocations.
608 * Interrupts must be enabled when calling the function with smp
609 * started, to avoid deadlock with other IPIs that are protected with
610 * smp_ipi_mtx spinlock at the initiator side.
612 * Function must be called with the thread pinned, and it unpins on
616 smp_targeted_tlb_shootdown(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
617 smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
620 uint32_t generation, *p_cpudone;
625 * It is not necessary to signal other CPUs while booting or
626 * when in the debugger.
628 if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
631 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
634 * Make a stable copy of the set of CPUs on which the pmap is active.
635 * See if we have to interrupt other CPUs.
637 CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
638 is_all = CPU_CMP(&mask, &all_cpus) == 0;
639 CPU_CLR(curcpu, &mask);
640 if (CPU_EMPTY(&mask))
644 * Initiator must have interrupts enabled, which prevents
645 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
646 * from deadlocking with us. On the other hand, preemption
647 * must be disabled to pin initiator to the instance of the
648 * pcpu pc_smp_tlb data and scoreboard line.
650 KASSERT((read_rflags() & PSL_I) != 0,
651 ("smp_targeted_tlb_shootdown: interrupts disabled"));
654 PCPU_SET(smp_tlb_addr1, addr1);
655 PCPU_SET(smp_tlb_addr2, addr2);
656 PCPU_SET(smp_tlb_pmap, pmap);
657 generation = PCPU_GET(smp_tlb_gen);
658 if (++generation == 0)
660 PCPU_SET(smp_tlb_gen, generation);
661 PCPU_SET(smp_tlb_op, op);
662 /* Fence between filling smp_tlb fields and clearing scoreboard. */
663 atomic_thread_fence_rel();
665 CPU_FOREACH_ISSET(cpu, &mask) {
666 KASSERT(*invl_scoreboard_slot(cpu) != 0,
667 ("IPI scoreboard is zero, initiator %d target %d",
669 *invl_scoreboard_slot(cpu) = 0;
673 * IPI acts as a fence between writing to the scoreboard above
674 * (zeroing slot) and reading from it below (wait for
678 ipi_all_but_self(IPI_INVLOP);
680 ipi_selected(mask, IPI_INVLOP);
682 curcpu_cb(pmap, addr1, addr2);
683 CPU_FOREACH_ISSET(cpu, &mask) {
684 p_cpudone = invl_scoreboard_slot(cpu);
685 while (atomic_load_int(p_cpudone) != generation)
690 * Unpin before leaving critical section. If the thread owes
691 * preemption, this allows scheduler to select thread on any
692 * CPU from its cpuset.
701 curcpu_cb(pmap, addr1, addr2);
707 smp_masked_invltlb(pmap_t pmap, smp_invl_cb_t curcpu_cb)
709 smp_targeted_tlb_shootdown(pmap, 0, 0, curcpu_cb, invl_op_tlb);
710 #ifdef COUNT_XINVLTLB_HITS
716 smp_masked_invlpg(vm_offset_t addr, pmap_t pmap, smp_invl_cb_t curcpu_cb)
718 smp_targeted_tlb_shootdown(pmap, addr, 0, curcpu_cb, invl_op_pg);
719 #ifdef COUNT_XINVLTLB_HITS
725 smp_masked_invlpg_range(vm_offset_t addr1, vm_offset_t addr2, pmap_t pmap,
726 smp_invl_cb_t curcpu_cb)
728 smp_targeted_tlb_shootdown(pmap, addr1, addr2, curcpu_cb,
730 #ifdef COUNT_XINVLTLB_HITS
732 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
737 smp_cache_flush(smp_invl_cb_t curcpu_cb)
739 smp_targeted_tlb_shootdown(kernel_pmap, 0, 0, curcpu_cb, INVL_OP_CACHE);
743 * Handlers for TLB related IPIs
746 invltlb_handler(pmap_t smp_tlb_pmap)
748 #ifdef COUNT_XINVLTLB_HITS
749 xhits_gbl[PCPU_GET(cpuid)]++;
750 #endif /* COUNT_XINVLTLB_HITS */
752 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
753 #endif /* COUNT_IPIS */
755 if (smp_tlb_pmap == kernel_pmap)
762 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
764 struct invpcid_descr d;
766 #ifdef COUNT_XINVLTLB_HITS
767 xhits_gbl[PCPU_GET(cpuid)]++;
768 #endif /* COUNT_XINVLTLB_HITS */
770 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
771 #endif /* COUNT_IPIS */
773 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
776 invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
781 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
783 struct invpcid_descr d;
785 #ifdef COUNT_XINVLTLB_HITS
786 xhits_gbl[PCPU_GET(cpuid)]++;
787 #endif /* COUNT_XINVLTLB_HITS */
789 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
790 #endif /* COUNT_IPIS */
792 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
795 if (smp_tlb_pmap == kernel_pmap) {
797 * This invalidation actually needs to clear kernel
798 * mappings from the TLB in the current pmap, but
799 * since we were asked for the flush in the kernel
800 * pmap, achieve it by performing global flush.
802 invpcid(&d, INVPCID_CTXGLOB);
804 invpcid(&d, INVPCID_CTX);
805 if (smp_tlb_pmap == PCPU_GET(curpmap))
806 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
811 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
815 #ifdef COUNT_XINVLTLB_HITS
816 xhits_gbl[PCPU_GET(cpuid)]++;
817 #endif /* COUNT_XINVLTLB_HITS */
819 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
820 #endif /* COUNT_IPIS */
822 if (smp_tlb_pmap == kernel_pmap) {
826 * The current pmap might not be equal to
827 * smp_tlb_pmap. The clearing of the pm_gen in
828 * pmap_invalidate_all() takes care of TLB
829 * invalidation when switching to the pmap on this
832 if (smp_tlb_pmap == PCPU_GET(curpmap)) {
833 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
834 load_cr3(smp_tlb_pmap->pm_cr3 | pcid);
835 if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
836 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
842 invlpg_handler(vm_offset_t smp_tlb_addr1)
844 #ifdef COUNT_XINVLTLB_HITS
845 xhits_pg[PCPU_GET(cpuid)]++;
846 #endif /* COUNT_XINVLTLB_HITS */
848 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
849 #endif /* COUNT_IPIS */
851 invlpg(smp_tlb_addr1);
855 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
857 struct invpcid_descr d;
859 #ifdef COUNT_XINVLTLB_HITS
860 xhits_pg[PCPU_GET(cpuid)]++;
861 #endif /* COUNT_XINVLTLB_HITS */
863 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
864 #endif /* COUNT_IPIS */
866 invlpg(smp_tlb_addr1);
867 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
868 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
869 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
870 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
873 d.addr = smp_tlb_addr1;
874 invpcid(&d, INVPCID_ADDR);
879 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
884 #ifdef COUNT_XINVLTLB_HITS
885 xhits_pg[PCPU_GET(cpuid)]++;
886 #endif /* COUNT_XINVLTLB_HITS */
888 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
889 #endif /* COUNT_IPIS */
891 invlpg(smp_tlb_addr1);
892 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
893 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
894 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
895 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
896 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
897 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
898 pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
903 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
905 vm_offset_t addr, addr2;
907 #ifdef COUNT_XINVLTLB_HITS
908 xhits_rng[PCPU_GET(cpuid)]++;
909 #endif /* COUNT_XINVLTLB_HITS */
911 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
912 #endif /* COUNT_IPIS */
914 addr = smp_tlb_addr1;
915 addr2 = smp_tlb_addr2;
919 } while (addr < addr2);
923 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
924 vm_offset_t smp_tlb_addr2)
926 struct invpcid_descr d;
927 vm_offset_t addr, addr2;
929 #ifdef COUNT_XINVLTLB_HITS
930 xhits_rng[PCPU_GET(cpuid)]++;
931 #endif /* COUNT_XINVLTLB_HITS */
933 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
934 #endif /* COUNT_IPIS */
936 addr = smp_tlb_addr1;
937 addr2 = smp_tlb_addr2;
941 } while (addr < addr2);
942 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
943 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
944 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
945 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
948 d.addr = smp_tlb_addr1;
950 invpcid(&d, INVPCID_ADDR);
952 } while (d.addr < addr2);
957 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
958 vm_offset_t smp_tlb_addr2)
960 vm_offset_t addr, addr2;
964 #ifdef COUNT_XINVLTLB_HITS
965 xhits_rng[PCPU_GET(cpuid)]++;
966 #endif /* COUNT_XINVLTLB_HITS */
968 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
969 #endif /* COUNT_IPIS */
971 addr = smp_tlb_addr1;
972 addr2 = smp_tlb_addr2;
976 } while (addr < addr2);
977 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
978 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
979 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
980 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
981 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
982 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
983 pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2);
988 invlcache_handler(void)
991 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
992 #endif /* COUNT_IPIS */
997 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
998 vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1000 switch (smp_tlb_op) {
1002 invltlb_handler(smp_tlb_pmap);
1004 case INVL_OP_TLB_INVPCID:
1005 invltlb_invpcid_handler(smp_tlb_pmap);
1007 case INVL_OP_TLB_INVPCID_PTI:
1008 invltlb_invpcid_pti_handler(smp_tlb_pmap);
1010 case INVL_OP_TLB_PCID:
1011 invltlb_pcid_handler(smp_tlb_pmap);
1014 invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1016 case INVL_OP_PGRNG_INVPCID:
1017 invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1020 case INVL_OP_PGRNG_PCID:
1021 invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1025 invlpg_handler(smp_tlb_addr1);
1027 case INVL_OP_PG_INVPCID:
1028 invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1030 case INVL_OP_PG_PCID:
1031 invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1034 invlcache_handler();
1037 __assert_unreachable();
1043 invlop_handler(void)
1045 struct pcpu *initiator_pc;
1046 pmap_t smp_tlb_pmap;
1047 vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1048 u_int initiator_cpu_id;
1049 enum invl_op_codes smp_tlb_op;
1050 uint32_t *scoreboard, smp_tlb_gen;
1052 scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1054 for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1055 initiator_cpu_id++) {
1056 if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1059 if (initiator_cpu_id > mp_maxid)
1061 initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1064 * This acquire fence and its corresponding release
1065 * fence in smp_targeted_tlb_shootdown() is between
1066 * reading zero scoreboard slot and accessing PCPU of
1067 * initiator for pc_smp_tlb values.
1069 atomic_thread_fence_acq();
1070 smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1071 smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1072 smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1073 smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1074 smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1077 * Ensure that we do not make our scoreboard
1078 * notification visible to the initiator until the
1079 * pc_smp_tlb values are read. The corresponding
1080 * fence is implicitly provided by the barrier in the
1081 * IPI send operation before the APIC ICR register
1084 * As an optimization, the request is acknowledged
1085 * before the actual invalidation is performed. It is
1086 * safe because target CPU cannot return to userspace
1087 * before handler finishes. Only NMI can preempt the
1088 * handler, but NMI would see the kernel handler frame
1089 * and not touch not-invalidated user page table.
1091 atomic_thread_fence_acq();
1092 atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1094 invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,