2 * Copyright (c) 2001 Jake Burkholder.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
37 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
44 #include <machine/smp.h>
45 #include <machine/tlb.h>
46 #include <machine/vmparam.h>
48 PMAP_STATS_VAR(tlb_ncontext_demap);
49 PMAP_STATS_VAR(tlb_npage_demap);
50 PMAP_STATS_VAR(tlb_nrange_demap);
52 tlb_flush_nonlocked_t *tlb_flush_nonlocked;
53 tlb_flush_user_t *tlb_flush_user;
56 * Some tlb operations must be atomic, so no interrupt or trap can be allowed
57 * while they are in progress. Traps should not happen, but interrupts need to
58 * be explicitely disabled. critical_enter() cannot be used here, since it only
59 * disables soft interrupts.
63 tlb_context_demap(struct pmap *pm)
69 * It is important that we are not interrupted or preempted while
70 * doing the IPIs. The interrupted CPU may hold locks, and since
71 * it will wait for the CPU that sent the IPI, this can lead
72 * to a deadlock when an interrupt comes in on that CPU and it's
73 * handler tries to grab one of that locks. This will only happen for
74 * spin locks, but these IPI types are delivered even if normal
75 * interrupts are disabled, so the lock critical section will not
76 * protect the target processor from entering the IPI handler with
79 PMAP_STATS_INC(tlb_ncontext_demap);
80 cookie = ipi_tlb_context_demap(pm);
82 if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) {
83 KASSERT(pm->pm_context[curcpu] != -1,
84 ("tlb_context_demap: inactive pmap?"));
85 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
86 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
94 tlb_page_demap(struct pmap *pm, vm_offset_t va)
100 PMAP_STATS_INC(tlb_npage_demap);
101 cookie = ipi_tlb_page_demap(pm, va);
103 if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) {
104 KASSERT(pm->pm_context[curcpu] != -1,
105 ("tlb_page_demap: inactive pmap?"));
106 if (pm == kernel_pmap)
107 flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
109 flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
111 stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
112 stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
120 tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
127 PMAP_STATS_INC(tlb_nrange_demap);
128 cookie = ipi_tlb_range_demap(pm, start, end);
130 if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) {
131 KASSERT(pm->pm_context[curcpu] != -1,
132 ("tlb_range_demap: inactive pmap?"));
133 if (pm == kernel_pmap)
134 flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
136 flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
138 for (va = start; va < end; va += PAGE_SIZE) {
139 stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
140 stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);