2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1996, by Steve Passe
5 * Copyright (c) 2003, by Peter Wemm
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
35 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/cpuset.h>
43 #include <sys/domainset.h>
48 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/memrange.h>
53 #include <sys/mutex.h>
56 #include <sys/sched.h>
58 #include <sys/sysctl.h>
61 #include <vm/vm_param.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_extern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_phys.h>
68 #include <x86/apicreg.h>
69 #include <machine/clock.h>
70 #include <machine/cputypes.h>
71 #include <machine/cpufunc.h>
73 #include <machine/md_var.h>
74 #include <machine/pcb.h>
75 #include <machine/psl.h>
76 #include <machine/smp.h>
77 #include <machine/specialreg.h>
78 #include <machine/tss.h>
79 #include <x86/ucode.h>
80 #include <machine/cpu.h>
84 #include <contrib/dev/acpica/include/acpi.h>
85 #include <dev/acpica/acpivar.h>
88 #define WARMBOOT_TARGET 0
89 #define WARMBOOT_OFF (KERNBASE + 0x0467)
90 #define WARMBOOT_SEG (KERNBASE + 0x0469)
92 #define CMOS_REG (0x70)
93 #define CMOS_DATA (0x71)
94 #define BIOS_RESET (0x0f)
95 #define BIOS_WARM (0x0a)
97 #define GiB(v) (v ## ULL << 30)
99 #define AP_BOOTPT_SZ (PAGE_SIZE * 3)
101 /* Temporary variables for init_secondary() */
102 char *doublefault_stack;
108 * Local data and functions.
111 static int start_ap(int apic_id);
114 is_kernel_paddr(vm_paddr_t pa)
117 return (pa >= trunc_2mpage(btext - KERNBASE) &&
118 pa < round_page(_end - KERNBASE));
122 is_mpboot_good(vm_paddr_t start, vm_paddr_t end)
125 return (start + AP_BOOTPT_SZ <= GiB(4) && atop(end) < Maxmem);
129 * Calculate usable address in base memory for AP trampoline code.
132 mp_bootaddress(vm_paddr_t *physmap, unsigned int *physmap_idx)
134 vm_paddr_t start, end;
138 alloc_ap_trampoline(physmap, physmap_idx);
141 * Find a memory region big enough below the 4GB boundary to
142 * store the initial page tables. Region must be mapped by
145 * Note that it needs to be aligned to a page boundary.
148 for (i = *physmap_idx; i <= *physmap_idx; i -= 2) {
150 * First, try to chomp at the start of the physmap region.
151 * Kernel binary might claim it already.
153 start = round_page(physmap[i]);
154 end = start + AP_BOOTPT_SZ;
155 if (start < end && end <= physmap[i + 1] &&
156 is_mpboot_good(start, end) &&
157 !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
164 * Second, try to chomp at the end. Again, check
167 end = trunc_page(physmap[i + 1]);
168 start = end - AP_BOOTPT_SZ;
169 if (start < end && start >= physmap[i] &&
170 is_mpboot_good(start, end) &&
171 !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
173 physmap[i + 1] = start;
178 mptramp_pagetables = start;
179 if (physmap[i] == physmap[i + 1] && *physmap_idx != 0) {
180 memmove(&physmap[i], &physmap[i + 2],
181 sizeof(*physmap) * (*physmap_idx - i + 2));
185 mptramp_pagetables = trunc_page(boot_address) - AP_BOOTPT_SZ;
188 "Cannot find enough space for the initial AP page tables, placing them at %#x",
194 * Initialize the IPI handlers and start up the AP's.
201 /* Initialize the logical ID to APIC ID table. */
202 for (i = 0; i < MAXCPU; i++) {
203 cpu_apic_ids[i] = -1;
206 /* Install an inter-CPU IPI for cache and TLB invalidations. */
207 setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
208 SDT_SYSIGT, SEL_KPL, 0);
210 /* Install an inter-CPU IPI for all-CPU rendezvous */
211 setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
212 IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
214 /* Install generic inter-CPU IPI handler */
215 setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
216 IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
218 /* Install an inter-CPU IPI for CPU stop/restart */
219 setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
220 SDT_SYSIGT, SEL_KPL, 0);
222 /* Install an inter-CPU IPI for CPU suspend/resume */
223 setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
224 SDT_SYSIGT, SEL_KPL, 0);
226 /* Install an IPI for calling delayed SWI */
227 setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
228 SDT_SYSIGT, SEL_KPL, 0);
230 /* Set boot_cpu_id if needed. */
231 if (boot_cpu_id == -1) {
232 boot_cpu_id = PCPU_GET(apic_id);
233 cpu_info[boot_cpu_id].cpu_bsp = 1;
235 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
236 ("BSP's APIC ID doesn't match boot_cpu_id"));
238 /* Probe logical/physical core configuration. */
243 /* Start each Application Processor */
244 init_ops.start_all_aps();
246 set_interrupt_apic_ids();
248 #if defined(DEV_ACPI) && MAXMEMDOM > 1
249 acpi_pxm_set_cpu_locality();
254 * AP CPU's call this to initialize themselves.
261 struct user_segment_descriptor *gdt;
262 struct region_descriptor ap_gdt;
264 int cpu, gsel_tss, x;
266 /* Set by the startup code for us to use */
269 /* Update microcode before doing anything else. */
272 /* Get per-cpu data and save */
275 /* prime data page for it to use */
276 pcpu_init(pc, cpu, sizeof(struct pcpu));
277 dpcpu_init(dpcpu, cpu);
278 pc->pc_apic_id = cpu_apic_ids[cpu];
279 pc->pc_prvspace = pc;
280 pc->pc_curthread = 0;
281 pc->pc_tssp = &pc->pc_common_tss;
283 pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
284 PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
286 pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
287 pc->pc_fs32p = &gdt[GUFS32_SEL];
288 pc->pc_gs32p = &gdt[GUGS32_SEL];
289 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
290 pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
291 /* See comment in pmap_bootstrap(). */
292 pc->pc_pcid_next = PMAP_PCID_KERN + 2;
295 pc->pc_smp_tlb_gen = 1;
298 pc->pc_common_tss = __pcpu[0].pc_common_tss;
299 pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
301 pc->pc_common_tss.tss_rsp0 = 0;
303 /* The doublefault stack runs on IST1. */
304 np = ((struct nmi_pcpu *)&doublefault_stack[PAGE_SIZE]) - 1;
305 np->np_pcpu = (register_t)pc;
306 pc->pc_common_tss.tss_ist1 = (long)np;
308 /* The NMI stack runs on IST2. */
309 np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
310 np->np_pcpu = (register_t)pc;
311 pc->pc_common_tss.tss_ist2 = (long)np;
313 /* The MC# stack runs on IST3. */
314 np = ((struct nmi_pcpu *) &mce_stack[PAGE_SIZE]) - 1;
315 np->np_pcpu = (register_t)pc;
316 pc->pc_common_tss.tss_ist3 = (long)np;
318 /* The DB# stack runs on IST4. */
319 np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
320 np->np_pcpu = (register_t)pc;
321 pc->pc_common_tss.tss_ist4 = (long)np;
323 /* Prepare private GDT */
324 gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
325 for (x = 0; x < NGDT; x++) {
326 if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
327 x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
328 ssdtosd(&gdt_segs[x], &gdt[x]);
330 ssdtosyssd(&gdt_segs[GPROC0_SEL],
331 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
332 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
333 ap_gdt.rd_base = (u_long)gdt;
334 lgdt(&ap_gdt); /* does magic intra-segment return */
336 wrmsr(MSR_FSBASE, 0); /* User value */
337 wrmsr(MSR_GSBASE, (u_int64_t)pc);
338 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
343 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
347 * Set to a known state:
348 * Set by mpboot.s: CR0_PG, CR0_PE
349 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
352 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
355 amd64_conf_fast_syscall();
357 /* signal our startup to the BSP. */
360 /* Spin until the BSP releases the AP's. */
361 while (atomic_load_acq_int(&aps_ready) == 0)
364 init_secondary_tail();
367 /*******************************************************************
368 * local functions and data
373 mp_realloc_pcpu(int cpuid, int domain)
378 oa = (vm_offset_t)&__pcpu[cpuid];
379 if (_vm_phys_domain(pmap_kextract(oa)) == domain)
381 m = vm_page_alloc_domain(NULL, 0, domain,
382 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
385 na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
386 pagecopy((void *)oa, (void *)na);
387 pmap_qenter((vm_offset_t)&__pcpu[cpuid], &m, 1);
388 /* XXX old pcpu page leaked. */
393 * start each AP in our list
396 native_start_all_aps(void)
398 u_int64_t *pt4, *pt3, *pt2;
399 u_int32_t mpbioswarmvec;
400 int apic_id, cpu, domain, i;
403 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
405 /* copy the AP 1st level boot code */
406 bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
408 /* Locate the page tables, they'll be below the trampoline */
409 pt4 = (uint64_t *)PHYS_TO_DMAP(mptramp_pagetables);
410 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
411 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
413 /* Create the initial 1GB replicated page tables */
414 for (i = 0; i < 512; i++) {
415 /* Each slot of the level 4 pages points to the same level 3 page */
416 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
417 pt4[i] |= PG_V | PG_RW | PG_U;
419 /* Each slot of the level 3 pages points to the same level 2 page */
420 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
421 pt3[i] |= PG_V | PG_RW | PG_U;
423 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
424 pt2[i] = i * (2 * 1024 * 1024);
425 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
428 /* save the current value of the warm-start vector */
429 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
430 outb(CMOS_REG, BIOS_RESET);
431 mpbiosreason = inb(CMOS_DATA);
433 /* setup a vector to our boot code */
434 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
435 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
436 outb(CMOS_REG, BIOS_RESET);
437 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
439 /* Relocate pcpu areas to the correct domain. */
442 for (cpu = 1; cpu < mp_ncpus; cpu++) {
443 apic_id = cpu_apic_ids[cpu];
444 domain = acpi_pxm_get_cpu_locality(apic_id);
445 mp_realloc_pcpu(cpu, domain);
451 for (cpu = 1; cpu < mp_ncpus; cpu++) {
452 apic_id = cpu_apic_ids[cpu];
455 domain = acpi_pxm_get_cpu_locality(apic_id);
457 /* allocate and set up an idle stack data page */
458 bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
460 doublefault_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK |
462 mce_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
463 nmi_stack = (char *)kmem_malloc_domainset(
464 DOMAINSET_PREF(domain), PAGE_SIZE, M_WAITOK | M_ZERO);
465 dbg_stack = (char *)kmem_malloc_domainset(
466 DOMAINSET_PREF(domain), PAGE_SIZE, M_WAITOK | M_ZERO);
467 dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
468 DPCPU_SIZE, M_WAITOK | M_ZERO);
470 bootSTK = (char *)bootstacks[cpu] +
471 kstack_pages * PAGE_SIZE - 8;
474 /* attempt to start the Application Processor */
475 if (!start_ap(apic_id)) {
476 /* restore the warmstart vector */
477 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
478 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
481 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
484 /* restore the warmstart vector */
485 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
487 outb(CMOS_REG, BIOS_RESET);
488 outb(CMOS_DATA, mpbiosreason);
490 /* number of APs actually started */
496 * This function starts the AP (application processor) identified
497 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
498 * to accomplish this. This is necessary because of the nuances
499 * of the different hardware we might encounter. It isn't pretty,
500 * but it seems to work.
503 start_ap(int apic_id)
508 /* calculate the vector */
509 vector = (boot_address >> 12) & 0xff;
511 /* used as a watchpoint to signal AP startup */
514 ipi_startup(apic_id, vector);
516 /* Wait up to 5 seconds for it to start. */
517 for (ms = 0; ms < 5000; ms++) {
519 return 1; /* return SUCCESS */
522 return 0; /* return FAILURE */
526 * Flush the TLB on other CPU's
530 * Invalidation request. PCPU pc_smp_tlb_op uses u_int instead of the
531 * enum to avoid both namespace and ABI issues (with enums).
535 INVL_OP_TLB_INVPCID = 2,
536 INVL_OP_TLB_INVPCID_PTI = 3,
537 INVL_OP_TLB_PCID = 4,
539 INVL_OP_PGRNG_INVPCID = 6,
540 INVL_OP_PGRNG_PCID = 7,
542 INVL_OP_PG_INVPCID = 9,
543 INVL_OP_PG_PCID = 10,
548 * These variables are initialized at startup to reflect how each of
549 * the different kinds of invalidations should be performed on the
550 * current machine and environment.
552 static enum invl_op_codes invl_op_tlb;
553 static enum invl_op_codes invl_op_pgrng;
554 static enum invl_op_codes invl_op_pg;
557 * Scoreboard of IPI completion notifications from target to IPI initiator.
559 * Each CPU can initiate shootdown IPI independently from other CPUs.
560 * Initiator enters critical section, then fills its local PCPU
561 * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
562 * at location (cpu, my_cpuid) for each target cpu. After that IPI is
563 * sent to all targets which scan for zeroed scoreboard generation
564 * words. Upon finding such word the shootdown data is read from
565 * corresponding cpu's pcpu, and generation is set. Meantime initiator
566 * loops waiting for all zeroed generations in scoreboard to update.
568 static uint32_t *invl_scoreboard;
571 invl_scoreboard_init(void *arg __unused)
575 invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
576 (mp_maxid + 1), M_DEVBUF, M_WAITOK);
577 for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
578 invl_scoreboard[i] = 1;
580 if (pmap_pcid_enabled) {
583 invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
585 invl_op_tlb = INVL_OP_TLB_INVPCID;
586 invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
587 invl_op_pg = INVL_OP_PG_INVPCID;
589 invl_op_tlb = INVL_OP_TLB_PCID;
590 invl_op_pgrng = INVL_OP_PGRNG_PCID;
591 invl_op_pg = INVL_OP_PG_PCID;
594 invl_op_tlb = INVL_OP_TLB;
595 invl_op_pgrng = INVL_OP_PGRNG;
596 invl_op_pg = INVL_OP_PG;
599 SYSINIT(invl_ops, SI_SUB_SMP, SI_ORDER_FIRST, invl_scoreboard_init, NULL);
602 invl_scoreboard_getcpu(u_int cpu)
604 return (invl_scoreboard + cpu * (mp_maxid + 1));
608 invl_scoreboard_slot(u_int cpu)
610 return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
614 * Used by pmap to request cache or TLB invalidation on local and
615 * remote processors. Mask provides the set of remote CPUs which are
616 * to be signalled with the invalidation IPI. As an optimization, the
617 * curcpu_cb callback is invoked on the calling CPU while waiting for
618 * remote CPUs to complete the operation.
620 * The callback function is called unconditionally on the caller's
621 * underlying processor, even when this processor is not set in the
622 * mask. So, the callback function must be prepared to handle such
623 * spurious invocations.
625 * Interrupts must be enabled when calling the function with smp
626 * started, to avoid deadlock with other IPIs that are protected with
627 * smp_ipi_mtx spinlock at the initiator side.
630 smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
631 vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
633 cpuset_t other_cpus, mask1;
634 uint32_t generation, *p_cpudone;
638 * It is not necessary to signal other CPUs while booting or
639 * when in the debugger.
641 if (kdb_active || KERNEL_PANICKED() || !smp_started) {
642 curcpu_cb(pmap, addr1, addr2);
649 * Check for other cpus. Return if none.
651 if (CPU_ISFULLSET(&mask)) {
655 CPU_CLR(PCPU_GET(cpuid), &mask);
656 if (CPU_EMPTY(&mask))
661 * Initiator must have interrupts enabled, which prevents
662 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
663 * from deadlocking with us. On the other hand, preemption
664 * must be disabled to pin initiator to the instance of the
665 * pcpu pc_smp_tlb data and scoreboard line.
667 KASSERT((read_rflags() & PSL_I) != 0,
668 ("smp_targeted_tlb_shootdown: interrupts disabled"));
671 PCPU_SET(smp_tlb_addr1, addr1);
672 PCPU_SET(smp_tlb_addr2, addr2);
673 PCPU_SET(smp_tlb_pmap, pmap);
674 generation = PCPU_GET(smp_tlb_gen);
675 if (++generation == 0)
677 PCPU_SET(smp_tlb_gen, generation);
678 PCPU_SET(smp_tlb_op, op);
679 /* Fence between filling smp_tlb fields and clearing scoreboard. */
680 atomic_thread_fence_rel();
683 while ((cpu = CPU_FFS(&mask1)) != 0) {
685 CPU_CLR(cpu, &mask1);
686 KASSERT(*invl_scoreboard_slot(cpu) != 0,
687 ("IPI scoreboard is zero, initiator %d target %d",
688 PCPU_GET(cpuid), cpu));
689 *invl_scoreboard_slot(cpu) = 0;
693 * IPI acts as a fence between writing to the scoreboard above
694 * (zeroing slot) and reading from it below (wait for
697 if (CPU_ISFULLSET(&mask)) {
698 ipi_all_but_self(IPI_INVLOP);
699 other_cpus = all_cpus;
700 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
703 ipi_selected(mask, IPI_INVLOP);
705 curcpu_cb(pmap, addr1, addr2);
706 while ((cpu = CPU_FFS(&other_cpus)) != 0) {
708 CPU_CLR(cpu, &other_cpus);
709 p_cpudone = invl_scoreboard_slot(cpu);
710 while (atomic_load_int(p_cpudone) != generation)
718 curcpu_cb(pmap, addr1, addr2);
723 smp_masked_invltlb(cpuset_t mask, pmap_t pmap, smp_invl_cb_t curcpu_cb)
725 smp_targeted_tlb_shootdown(mask, pmap, 0, 0, curcpu_cb, invl_op_tlb);
726 #ifdef COUNT_XINVLTLB_HITS
732 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr, pmap_t pmap,
733 smp_invl_cb_t curcpu_cb)
735 smp_targeted_tlb_shootdown(mask, pmap, addr, 0, curcpu_cb, invl_op_pg);
736 #ifdef COUNT_XINVLTLB_HITS
742 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2,
743 pmap_t pmap, smp_invl_cb_t curcpu_cb)
745 smp_targeted_tlb_shootdown(mask, pmap, addr1, addr2, curcpu_cb,
747 #ifdef COUNT_XINVLTLB_HITS
749 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
754 smp_cache_flush(smp_invl_cb_t curcpu_cb)
756 smp_targeted_tlb_shootdown(all_cpus, NULL, 0, 0, curcpu_cb,
761 * Handlers for TLB related IPIs
764 invltlb_handler(pmap_t smp_tlb_pmap)
766 #ifdef COUNT_XINVLTLB_HITS
767 xhits_gbl[PCPU_GET(cpuid)]++;
768 #endif /* COUNT_XINVLTLB_HITS */
770 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
771 #endif /* COUNT_IPIS */
773 if (smp_tlb_pmap == kernel_pmap)
780 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
782 struct invpcid_descr d;
784 #ifdef COUNT_XINVLTLB_HITS
785 xhits_gbl[PCPU_GET(cpuid)]++;
786 #endif /* COUNT_XINVLTLB_HITS */
788 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
789 #endif /* COUNT_IPIS */
791 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
794 invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
799 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
801 struct invpcid_descr d;
803 #ifdef COUNT_XINVLTLB_HITS
804 xhits_gbl[PCPU_GET(cpuid)]++;
805 #endif /* COUNT_XINVLTLB_HITS */
807 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
808 #endif /* COUNT_IPIS */
810 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
813 if (smp_tlb_pmap == kernel_pmap) {
815 * This invalidation actually needs to clear kernel
816 * mappings from the TLB in the current pmap, but
817 * since we were asked for the flush in the kernel
818 * pmap, achieve it by performing global flush.
820 invpcid(&d, INVPCID_CTXGLOB);
822 invpcid(&d, INVPCID_CTX);
823 if (smp_tlb_pmap == PCPU_GET(curpmap))
824 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
829 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
833 #ifdef COUNT_XINVLTLB_HITS
834 xhits_gbl[PCPU_GET(cpuid)]++;
835 #endif /* COUNT_XINVLTLB_HITS */
837 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
838 #endif /* COUNT_IPIS */
840 if (smp_tlb_pmap == kernel_pmap) {
844 * The current pmap might not be equal to
845 * smp_tlb_pmap. The clearing of the pm_gen in
846 * pmap_invalidate_all() takes care of TLB
847 * invalidation when switching to the pmap on this
850 if (smp_tlb_pmap == PCPU_GET(curpmap)) {
851 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
852 load_cr3(smp_tlb_pmap->pm_cr3 | pcid);
853 if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
854 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
860 invlpg_handler(vm_offset_t smp_tlb_addr1)
862 #ifdef COUNT_XINVLTLB_HITS
863 xhits_pg[PCPU_GET(cpuid)]++;
864 #endif /* COUNT_XINVLTLB_HITS */
866 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
867 #endif /* COUNT_IPIS */
869 invlpg(smp_tlb_addr1);
873 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
875 struct invpcid_descr d;
877 #ifdef COUNT_XINVLTLB_HITS
878 xhits_pg[PCPU_GET(cpuid)]++;
879 #endif /* COUNT_XINVLTLB_HITS */
881 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
882 #endif /* COUNT_IPIS */
884 invlpg(smp_tlb_addr1);
885 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
886 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
887 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
888 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
891 d.addr = smp_tlb_addr1;
892 invpcid(&d, INVPCID_ADDR);
897 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
902 #ifdef COUNT_XINVLTLB_HITS
903 xhits_pg[PCPU_GET(cpuid)]++;
904 #endif /* COUNT_XINVLTLB_HITS */
906 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
907 #endif /* COUNT_IPIS */
909 invlpg(smp_tlb_addr1);
910 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
911 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
912 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
913 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
914 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
915 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
916 pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
921 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
923 vm_offset_t addr, addr2;
925 #ifdef COUNT_XINVLTLB_HITS
926 xhits_rng[PCPU_GET(cpuid)]++;
927 #endif /* COUNT_XINVLTLB_HITS */
929 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
930 #endif /* COUNT_IPIS */
932 addr = smp_tlb_addr1;
933 addr2 = smp_tlb_addr2;
937 } while (addr < addr2);
941 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
942 vm_offset_t smp_tlb_addr2)
944 struct invpcid_descr d;
945 vm_offset_t addr, addr2;
947 #ifdef COUNT_XINVLTLB_HITS
948 xhits_rng[PCPU_GET(cpuid)]++;
949 #endif /* COUNT_XINVLTLB_HITS */
951 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
952 #endif /* COUNT_IPIS */
954 addr = smp_tlb_addr1;
955 addr2 = smp_tlb_addr2;
959 } while (addr < addr2);
960 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
961 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
962 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
963 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
966 d.addr = smp_tlb_addr1;
968 invpcid(&d, INVPCID_ADDR);
970 } while (d.addr < addr2);
975 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
976 vm_offset_t smp_tlb_addr2)
978 vm_offset_t addr, addr2;
982 #ifdef COUNT_XINVLTLB_HITS
983 xhits_rng[PCPU_GET(cpuid)]++;
984 #endif /* COUNT_XINVLTLB_HITS */
986 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
987 #endif /* COUNT_IPIS */
989 addr = smp_tlb_addr1;
990 addr2 = smp_tlb_addr2;
994 } while (addr < addr2);
995 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
996 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
997 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
998 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
999 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
1000 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
1001 pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2);
1006 invlcache_handler(void)
1009 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1010 #endif /* COUNT_IPIS */
1015 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
1016 vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1018 switch (smp_tlb_op) {
1020 invltlb_handler(smp_tlb_pmap);
1022 case INVL_OP_TLB_INVPCID:
1023 invltlb_invpcid_handler(smp_tlb_pmap);
1025 case INVL_OP_TLB_INVPCID_PTI:
1026 invltlb_invpcid_pti_handler(smp_tlb_pmap);
1028 case INVL_OP_TLB_PCID:
1029 invltlb_pcid_handler(smp_tlb_pmap);
1032 invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1034 case INVL_OP_PGRNG_INVPCID:
1035 invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1038 case INVL_OP_PGRNG_PCID:
1039 invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1043 invlpg_handler(smp_tlb_addr1);
1045 case INVL_OP_PG_INVPCID:
1046 invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1048 case INVL_OP_PG_PCID:
1049 invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1052 invlcache_handler();
1055 __assert_unreachable();
1061 invlop_handler(void)
1063 struct pcpu *initiator_pc;
1064 pmap_t smp_tlb_pmap;
1065 vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1066 u_int initiator_cpu_id;
1067 enum invl_op_codes smp_tlb_op;
1068 uint32_t *scoreboard, smp_tlb_gen;
1070 scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1072 for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1073 initiator_cpu_id++) {
1074 if (scoreboard[initiator_cpu_id] == 0)
1077 if (initiator_cpu_id > mp_maxid)
1079 initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1082 * This acquire fence and its corresponding release
1083 * fence in smp_targeted_tlb_shootdown() is between
1084 * reading zero scoreboard slot and accessing PCPU of
1085 * initiator for pc_smp_tlb values.
1087 atomic_thread_fence_acq();
1088 smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1089 smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1090 smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1091 smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1092 smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1095 * Ensure that we do not make our scoreboard
1096 * notification visible to the initiator until the
1097 * pc_smp_tlb values are read. The corresponding
1098 * fence is implicitly provided by the barrier in the
1099 * IPI send operation before the APIC ICR register
1102 * As an optimization, the request is acknowledged
1103 * before the actual invalidation is performed. It is
1104 * safe because target CPU cannot return to userspace
1105 * before handler finishes. Only NMI can preempt the
1106 * handler, but NMI would see the kernel handler frame
1107 * and not touch not-invalidated user page table.
1109 atomic_thread_fence_acq();
1110 atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1112 invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,