2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Berkeley Software Design Inc's name may not be used to endorse or
15 * promote products derived from this software without specific prior
18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * from BSDI: pmap.c,v 1.28.2.15 2000/04/27 03:10:31 cp Exp
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/queue.h>
43 #include <sys/mutex.h>
45 #include <sys/rwlock.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
51 #include <vm/vm_param.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_pageout.h>
59 #include <machine/cpufunc.h>
60 #include <machine/frame.h>
61 #include <machine/trap.h>
62 #include <machine/pmap.h>
63 #include <machine/smp.h>
64 #include <machine/tlb.h>
65 #include <machine/tsb.h>
66 #include <machine/tte.h>
68 CTASSERT((1 << TTE_SHIFT) == sizeof(struct tte));
69 CTASSERT(TSB_BUCKET_MASK < (1 << 12));
71 PMAP_STATS_VAR(tsb_nrepl);
72 PMAP_STATS_VAR(tsb_nlookup_k);
73 PMAP_STATS_VAR(tsb_nlookup_u);
74 PMAP_STATS_VAR(tsb_nenter_k);
75 PMAP_STATS_VAR(tsb_nenter_k_oc);
76 PMAP_STATS_VAR(tsb_nenter_u);
77 PMAP_STATS_VAR(tsb_nenter_u_oc);
78 PMAP_STATS_VAR(tsb_nforeach);
80 struct tte *tsb_kernel;
81 vm_size_t tsb_kernel_mask;
82 vm_size_t tsb_kernel_size;
83 vm_paddr_t tsb_kernel_phys;
84 u_int tsb_kernel_ldd_phys;
87 tsb_tte_lookup(pmap_t pm, vm_offset_t va)
94 if (pm == kernel_pmap) {
95 PMAP_STATS_INC(tsb_nlookup_k);
97 if (tte_match(tp, va))
100 PMAP_LOCK_ASSERT(pm, MA_OWNED);
101 PMAP_STATS_INC(tsb_nlookup_u);
102 for (sz = TS_MIN; sz <= TS_MAX; sz++) {
103 bucket = tsb_vtobucket(pm, sz, va);
104 for (i = 0; i < TSB_BUCKET_SIZE; i++) {
106 if (tte_match(tp, va))
115 tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long sz, u_long data)
124 if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) {
126 "tsb_tte_enter: off colour va=%#lx pa=%#lx o=%p ot=%d pi=%#lx",
127 va, VM_PAGE_TO_PHYS(m), m->object,
128 m->object ? m->object->type : -1,
130 if (pm == kernel_pmap)
131 PMAP_STATS_INC(tsb_nenter_k_oc);
133 PMAP_STATS_INC(tsb_nenter_u_oc);
136 rw_assert(&tte_list_global_lock, RA_WLOCKED);
137 PMAP_LOCK_ASSERT(pm, MA_OWNED);
138 if (pm == kernel_pmap) {
139 PMAP_STATS_INC(tsb_nenter_k);
140 tp = tsb_kvtotte(va);
141 KASSERT((tp->tte_data & TD_V) == 0,
142 ("tsb_tte_enter: replacing valid kernel mapping"));
145 PMAP_STATS_INC(tsb_nenter_u);
147 bucket = tsb_vtobucket(pm, sz, va);
151 b0 = rd(tick) & (TSB_BUCKET_SIZE - 1);
154 if ((bucket[i].tte_data & TD_V) == 0) {
159 if ((bucket[i].tte_data & TD_REF) == 0)
161 else if (rtp == NULL)
164 } while ((i = (i + 1) & (TSB_BUCKET_SIZE - 1)) != b0);
168 if ((tp->tte_data & TD_V) != 0) {
169 PMAP_STATS_INC(tsb_nrepl);
170 ova = TTE_GET_VA(tp);
171 pmap_remove_tte(pm, NULL, tp, ova);
172 tlb_page_demap(pm, ova);
176 if ((m->flags & PG_FICTITIOUS) == 0) {
178 if ((m->oflags & VPO_UNMANAGED) == 0) {
179 pm->pm_stats.resident_count++;
182 if (pmap_cache_enter(m, va) != 0)
184 TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
186 data |= TD_FAKE | TD_E;
188 tp->tte_vpn = TV_VPN(va, sz);
195 * Traverse the tsb of a pmap, calling the callback function for any tte entry
196 * that has a virtual address between start and end. If this function returns 0,
197 * tsb_foreach() terminates.
198 * This is used by pmap_remove(), pmap_protect(), and pmap_copy() in the case
199 * that the number of pages in the range given to them reaches the
200 * dimensions of the tsb size as an optimization.
203 tsb_foreach(pmap_t pm1, pmap_t pm2, vm_offset_t start, vm_offset_t end,
204 tsb_callback_t *callback)
212 PMAP_STATS_INC(tsb_nforeach);
213 if (pm1 == kernel_pmap) {
215 n = tsb_kernel_size / sizeof(struct tte);
220 for (i = 0; i < n; i++) {
222 if ((tp->tte_data & TD_V) != 0) {
224 if (va >= start && va < end) {
225 if (!callback(pm1, pm2, tp, va))