2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 * Manages physical address maps.
46 * In addition to hardware address maps, this module is called upon to
47 * provide software-use-only maps which may or may not be stored in the
48 * same form as hardware maps. These pseudo-maps are used to store
49 * intermediate results from copy operations to and from address spaces.
51 * Since the information managed by this module is also stored by the
52 * logical address mapping module, this module may throw away valid virtual
53 * to physical mappings at almost any time. However, invalidations of
54 * mappings must be done as requested.
56 * In order to cope with hardware architectures which make virtual to
57 * physical map invalidates expensive, this module may delay invalidate
58 * reduced protection operations until such time as they are actually
59 * necessary. This module is given full information as to which processors
60 * are currently using which maps, and to when physical maps must be made
64 #include "opt_kstack_pages.h"
67 #include <sys/param.h>
68 #include <sys/kernel.h>
71 #include <sys/msgbuf.h>
72 #include <sys/mutex.h>
74 #include <sys/rwlock.h>
76 #include <sys/sysctl.h>
77 #include <sys/systm.h>
78 #include <sys/vmmeter.h>
80 #include <dev/ofw/openfirm.h>
83 #include <vm/vm_param.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_extern.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
92 #include <machine/cache.h>
93 #include <machine/frame.h>
94 #include <machine/instr.h>
95 #include <machine/md_var.h>
96 #include <machine/metadata.h>
97 #include <machine/ofw_mem.h>
98 #include <machine/smp.h>
99 #include <machine/tlb.h>
100 #include <machine/tte.h>
101 #include <machine/tsb.h>
102 #include <machine/ver.h>
105 * Virtual address of message buffer
107 struct msgbuf *msgbufp;
110 * Map of physical memory reagions
112 vm_paddr_t phys_avail[128];
113 static struct ofw_mem_region mra[128];
114 struct ofw_mem_region sparc64_memreg[128];
116 static struct ofw_map translations[128];
117 static int translations_size;
119 static vm_offset_t pmap_idle_map;
120 static vm_offset_t pmap_temp_map_1;
121 static vm_offset_t pmap_temp_map_2;
124 * First and last available kernel virtual addresses
126 vm_offset_t virtual_avail;
127 vm_offset_t virtual_end;
128 vm_offset_t kernel_vm_end;
130 vm_offset_t vm_max_kernel_address;
135 struct pmap kernel_pmap_store;
138 * Isolate the global TTE list lock from data and other locks to prevent
139 * false sharing within the cache (see also the declaration of struct
142 struct tte_list_lock tte_list_global __aligned(CACHE_LINE_SIZE);
145 * Allocate physical memory for use in pmap_bootstrap.
147 static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size, uint32_t colors);
149 static void pmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data);
150 static void pmap_cache_remove(vm_page_t m, vm_offset_t va);
151 static int pmap_protect_tte(struct pmap *pm1, struct pmap *pm2,
152 struct tte *tp, vm_offset_t va);
155 * Map the given physical page at the specified virtual address in the
156 * target pmap with the protection requested. If specified the page
157 * will be wired down.
159 * The page queues and pmap must be locked.
161 static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
162 vm_prot_t prot, boolean_t wired);
164 extern int tl1_dmmu_miss_direct_patch_tsb_phys_1[];
165 extern int tl1_dmmu_miss_direct_patch_tsb_phys_end_1[];
166 extern int tl1_dmmu_miss_patch_asi_1[];
167 extern int tl1_dmmu_miss_patch_quad_ldd_1[];
168 extern int tl1_dmmu_miss_patch_tsb_1[];
169 extern int tl1_dmmu_miss_patch_tsb_2[];
170 extern int tl1_dmmu_miss_patch_tsb_mask_1[];
171 extern int tl1_dmmu_miss_patch_tsb_mask_2[];
172 extern int tl1_dmmu_prot_patch_asi_1[];
173 extern int tl1_dmmu_prot_patch_quad_ldd_1[];
174 extern int tl1_dmmu_prot_patch_tsb_1[];
175 extern int tl1_dmmu_prot_patch_tsb_2[];
176 extern int tl1_dmmu_prot_patch_tsb_mask_1[];
177 extern int tl1_dmmu_prot_patch_tsb_mask_2[];
178 extern int tl1_immu_miss_patch_asi_1[];
179 extern int tl1_immu_miss_patch_quad_ldd_1[];
180 extern int tl1_immu_miss_patch_tsb_1[];
181 extern int tl1_immu_miss_patch_tsb_2[];
182 extern int tl1_immu_miss_patch_tsb_mask_1[];
183 extern int tl1_immu_miss_patch_tsb_mask_2[];
186 * If user pmap is processed with pmap_remove and with pmap_remove and the
187 * resident count drops to 0, there are no more pages to remove, so we
190 #define PMAP_REMOVE_DONE(pm) \
191 ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
194 * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove()
195 * and pmap_protect() instead of trying each virtual address.
197 #define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE)
199 SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "");
201 PMAP_STATS_VAR(pmap_nenter);
202 PMAP_STATS_VAR(pmap_nenter_update);
203 PMAP_STATS_VAR(pmap_nenter_replace);
204 PMAP_STATS_VAR(pmap_nenter_new);
205 PMAP_STATS_VAR(pmap_nkenter);
206 PMAP_STATS_VAR(pmap_nkenter_oc);
207 PMAP_STATS_VAR(pmap_nkenter_stupid);
208 PMAP_STATS_VAR(pmap_nkremove);
209 PMAP_STATS_VAR(pmap_nqenter);
210 PMAP_STATS_VAR(pmap_nqremove);
211 PMAP_STATS_VAR(pmap_ncache_enter);
212 PMAP_STATS_VAR(pmap_ncache_enter_c);
213 PMAP_STATS_VAR(pmap_ncache_enter_oc);
214 PMAP_STATS_VAR(pmap_ncache_enter_cc);
215 PMAP_STATS_VAR(pmap_ncache_enter_coc);
216 PMAP_STATS_VAR(pmap_ncache_enter_nc);
217 PMAP_STATS_VAR(pmap_ncache_enter_cnc);
218 PMAP_STATS_VAR(pmap_ncache_remove);
219 PMAP_STATS_VAR(pmap_ncache_remove_c);
220 PMAP_STATS_VAR(pmap_ncache_remove_oc);
221 PMAP_STATS_VAR(pmap_ncache_remove_cc);
222 PMAP_STATS_VAR(pmap_ncache_remove_coc);
223 PMAP_STATS_VAR(pmap_ncache_remove_nc);
224 PMAP_STATS_VAR(pmap_nzero_page);
225 PMAP_STATS_VAR(pmap_nzero_page_c);
226 PMAP_STATS_VAR(pmap_nzero_page_oc);
227 PMAP_STATS_VAR(pmap_nzero_page_nc);
228 PMAP_STATS_VAR(pmap_nzero_page_area);
229 PMAP_STATS_VAR(pmap_nzero_page_area_c);
230 PMAP_STATS_VAR(pmap_nzero_page_area_oc);
231 PMAP_STATS_VAR(pmap_nzero_page_area_nc);
232 PMAP_STATS_VAR(pmap_nzero_page_idle);
233 PMAP_STATS_VAR(pmap_nzero_page_idle_c);
234 PMAP_STATS_VAR(pmap_nzero_page_idle_oc);
235 PMAP_STATS_VAR(pmap_nzero_page_idle_nc);
236 PMAP_STATS_VAR(pmap_ncopy_page);
237 PMAP_STATS_VAR(pmap_ncopy_page_c);
238 PMAP_STATS_VAR(pmap_ncopy_page_oc);
239 PMAP_STATS_VAR(pmap_ncopy_page_nc);
240 PMAP_STATS_VAR(pmap_ncopy_page_dc);
241 PMAP_STATS_VAR(pmap_ncopy_page_doc);
242 PMAP_STATS_VAR(pmap_ncopy_page_sc);
243 PMAP_STATS_VAR(pmap_ncopy_page_soc);
245 PMAP_STATS_VAR(pmap_nnew_thread);
246 PMAP_STATS_VAR(pmap_nnew_thread_oc);
248 static inline u_long dtlb_get_data(u_int tlb, u_int slot);
251 * Quick sort callout for comparing memory regions
253 static int mr_cmp(const void *a, const void *b);
254 static int om_cmp(const void *a, const void *b);
257 mr_cmp(const void *a, const void *b)
259 const struct ofw_mem_region *mra;
260 const struct ofw_mem_region *mrb;
264 if (mra->mr_start < mrb->mr_start)
266 else if (mra->mr_start > mrb->mr_start)
273 om_cmp(const void *a, const void *b)
275 const struct ofw_map *oma;
276 const struct ofw_map *omb;
280 if (oma->om_start < omb->om_start)
282 else if (oma->om_start > omb->om_start)
289 dtlb_get_data(u_int tlb, u_int slot)
294 slot = TLB_DAR_SLOT(tlb, slot);
296 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
297 * work around errata of USIII and beyond.
300 (void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
301 data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
307 * Bootstrap the system enough to run with virtual memory.
310 pmap_bootstrap(u_int cpu_impl)
323 u_int dtlb_slots_avail;
332 * Set the kernel context.
336 colors = dcache_color_ignore != 0 ? 1 : DCACHE_COLORS;
339 * Find out what physical memory is available from the PROM and
340 * initialize the phys_avail array. This must be done before
341 * pmap_bootstrap_alloc is called.
343 if ((pmem = OF_finddevice("/memory")) == -1)
344 OF_panic("%s: finddevice /memory", __func__);
345 if ((sz = OF_getproplen(pmem, "available")) == -1)
346 OF_panic("%s: getproplen /memory/available", __func__);
347 if (sizeof(phys_avail) < sz)
348 OF_panic("%s: phys_avail too small", __func__);
349 if (sizeof(mra) < sz)
350 OF_panic("%s: mra too small", __func__);
352 if (OF_getprop(pmem, "available", mra, sz) == -1)
353 OF_panic("%s: getprop /memory/available", __func__);
355 CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
356 qsort(mra, sz, sizeof (*mra), mr_cmp);
358 getenv_quad("hw.physmem", &physmem);
359 physmem = btoc(physmem);
360 for (i = 0, j = 0; i < sz; i++, j += 2) {
361 CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start,
363 if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) {
364 if (btoc(physsz) < physmem) {
365 phys_avail[j] = mra[i].mr_start;
366 phys_avail[j + 1] = mra[i].mr_start +
367 (ctob(physmem) - physsz);
368 physsz = ctob(physmem);
372 phys_avail[j] = mra[i].mr_start;
373 phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
374 physsz += mra[i].mr_size;
376 physmem = btoc(physsz);
379 * Calculate the size of kernel virtual memory, and the size and mask
380 * for the kernel TSB based on the phsyical memory size but limited
381 * by the amount of dTLB slots available for locked entries if we have
382 * to lock the TSB in the TLB (given that for spitfire-class CPUs all
383 * of the dt64 slots can hold locked entries but there is no large
384 * dTLB for unlocked ones, we don't use more than half of it for the
386 * Note that for reasons unknown OpenSolaris doesn't take advantage of
387 * ASI_ATOMIC_QUAD_LDD_PHYS on UltraSPARC-III. However, given that no
388 * public documentation is available for these, the latter just might
389 * not support it, yet.
391 if (cpu_impl == CPU_IMPL_SPARC64V ||
392 cpu_impl >= CPU_IMPL_ULTRASPARCIIIp) {
393 tsb_kernel_ldd_phys = 1;
394 virtsz = roundup(5 / 3 * physsz, PAGE_SIZE_4M <<
395 (PAGE_SHIFT - TTE_SHIFT));
397 dtlb_slots_avail = 0;
398 for (i = 0; i < dtlb_slots; i++) {
399 data = dtlb_get_data(cpu_impl ==
400 CPU_IMPL_ULTRASPARCIII ? TLB_DAR_T16 :
402 if ((data & (TD_V | TD_L)) != (TD_V | TD_L))
406 dtlb_slots_avail -= PCPU_PAGES;
408 if (cpu_impl >= CPU_IMPL_ULTRASPARCI &&
409 cpu_impl < CPU_IMPL_ULTRASPARCIII)
410 dtlb_slots_avail /= 2;
411 virtsz = roundup(physsz, PAGE_SIZE_4M <<
412 (PAGE_SHIFT - TTE_SHIFT));
413 virtsz = MIN(virtsz, (dtlb_slots_avail * PAGE_SIZE_4M) <<
414 (PAGE_SHIFT - TTE_SHIFT));
416 vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
417 tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
418 tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
421 * Allocate the kernel TSB and lock it in the TLB if necessary.
423 pa = pmap_bootstrap_alloc(tsb_kernel_size, colors);
424 if (pa & PAGE_MASK_4M)
425 OF_panic("%s: TSB unaligned", __func__);
426 tsb_kernel_phys = pa;
427 if (tsb_kernel_ldd_phys == 0) {
429 (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
431 bzero(tsb_kernel, tsb_kernel_size);
434 (struct tte *)TLB_PHYS_TO_DIRECT(tsb_kernel_phys);
435 aszero(ASI_PHYS_USE_EC, tsb_kernel_phys, tsb_kernel_size);
439 * Allocate and map the dynamic per-CPU area for the BSP.
441 pa = pmap_bootstrap_alloc(DPCPU_SIZE, colors);
442 dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pa);
445 * Allocate and map the message buffer.
447 pa = pmap_bootstrap_alloc(msgbufsize, colors);
448 msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa);
451 * Patch the TSB addresses and mask as well as the ASIs used to load
452 * it into the trap table.
455 #define LDDA_R_I_R(rd, imm_asi, rs1, rs2) \
456 (EIF_OP(IOP_LDST) | EIF_F3_RD(rd) | EIF_F3_OP3(INS3_LDDA) | \
457 EIF_F3_RS1(rs1) | EIF_F3_I(0) | EIF_F3_IMM_ASI(imm_asi) | \
459 #define OR_R_I_R(rd, imm13, rs1) \
460 (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) | \
461 EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
462 #define SETHI(rd, imm22) \
463 (EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) | \
464 EIF_IMM((imm22) >> 10, 22))
465 #define WR_R_I(rd, imm13, rs1) \
466 (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_WR) | \
467 EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
469 #define PATCH_ASI(addr, asi) do { \
470 if (addr[0] != WR_R_I(IF_F3_RD(addr[0]), 0x0, \
471 IF_F3_RS1(addr[0]))) \
472 OF_panic("%s: patched instructions have changed", \
474 addr[0] |= EIF_IMM((asi), 13); \
478 #define PATCH_LDD(addr, asi) do { \
479 if (addr[0] != LDDA_R_I_R(IF_F3_RD(addr[0]), 0x0, \
480 IF_F3_RS1(addr[0]), IF_F3_RS2(addr[0]))) \
481 OF_panic("%s: patched instructions have changed", \
483 addr[0] |= EIF_F3_IMM_ASI(asi); \
487 #define PATCH_TSB(addr, val) do { \
488 if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \
489 addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, \
490 IF_F3_RS1(addr[1])) || \
491 addr[3] != SETHI(IF_F2_RD(addr[3]), 0x0)) \
492 OF_panic("%s: patched instructions have changed", \
494 addr[0] |= EIF_IMM((val) >> 42, 22); \
495 addr[1] |= EIF_IMM((val) >> 32, 10); \
496 addr[3] |= EIF_IMM((val) >> 10, 22); \
502 #define PATCH_TSB_MASK(addr, val) do { \
503 if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \
504 addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, \
505 IF_F3_RS1(addr[1]))) \
506 OF_panic("%s: patched instructions have changed", \
508 addr[0] |= EIF_IMM((val) >> 10, 22); \
509 addr[1] |= EIF_IMM((val), 10); \
514 if (tsb_kernel_ldd_phys == 0) {
516 ldd = ASI_NUCLEUS_QUAD_LDD;
517 off = (vm_offset_t)tsb_kernel;
519 asi = ASI_PHYS_USE_EC;
520 ldd = ASI_ATOMIC_QUAD_LDD_PHYS;
521 off = (vm_offset_t)tsb_kernel_phys;
523 PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_1, tsb_kernel_phys);
524 PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_end_1,
525 tsb_kernel_phys + tsb_kernel_size - 1);
526 PATCH_ASI(tl1_dmmu_miss_patch_asi_1, asi);
527 PATCH_LDD(tl1_dmmu_miss_patch_quad_ldd_1, ldd);
528 PATCH_TSB(tl1_dmmu_miss_patch_tsb_1, off);
529 PATCH_TSB(tl1_dmmu_miss_patch_tsb_2, off);
530 PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_1, tsb_kernel_mask);
531 PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_2, tsb_kernel_mask);
532 PATCH_ASI(tl1_dmmu_prot_patch_asi_1, asi);
533 PATCH_LDD(tl1_dmmu_prot_patch_quad_ldd_1, ldd);
534 PATCH_TSB(tl1_dmmu_prot_patch_tsb_1, off);
535 PATCH_TSB(tl1_dmmu_prot_patch_tsb_2, off);
536 PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_1, tsb_kernel_mask);
537 PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_2, tsb_kernel_mask);
538 PATCH_ASI(tl1_immu_miss_patch_asi_1, asi);
539 PATCH_LDD(tl1_immu_miss_patch_quad_ldd_1, ldd);
540 PATCH_TSB(tl1_immu_miss_patch_tsb_1, off);
541 PATCH_TSB(tl1_immu_miss_patch_tsb_2, off);
542 PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_1, tsb_kernel_mask);
543 PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_2, tsb_kernel_mask);
546 * Enter fake 8k pages for the 4MB kernel pages, so that
547 * pmap_kextract() will work for them.
549 for (i = 0; i < kernel_tlb_slots; i++) {
550 pa = kernel_tlbs[i].te_pa;
551 va = kernel_tlbs[i].te_va;
552 for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) {
553 tp = tsb_kvtotte(va + off);
554 vpn = TV_VPN(va + off, TS_8K);
555 data = TD_V | TD_8K | TD_PA(pa + off) | TD_REF |
556 TD_SW | TD_CP | TD_CV | TD_P | TD_W;
557 pmap_bootstrap_set_tte(tp, vpn, data);
562 * Set the start and end of KVA. The kernel is loaded starting
563 * at the first available 4MB super page, so we advance to the
564 * end of the last one used for it.
566 virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M;
567 virtual_end = vm_max_kernel_address;
568 kernel_vm_end = vm_max_kernel_address;
571 * Allocate kva space for temporary mappings.
573 pmap_idle_map = virtual_avail;
574 virtual_avail += PAGE_SIZE * colors;
575 pmap_temp_map_1 = virtual_avail;
576 virtual_avail += PAGE_SIZE * colors;
577 pmap_temp_map_2 = virtual_avail;
578 virtual_avail += PAGE_SIZE * colors;
581 * Allocate a kernel stack with guard page for thread0 and map it
582 * into the kernel TSB. We must ensure that the virtual address is
583 * colored properly for corresponding CPUs, since we're allocating
584 * from phys_avail so the memory won't have an associated vm_page_t.
586 pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, colors);
588 virtual_avail += roundup(KSTACK_GUARD_PAGES, colors) * PAGE_SIZE;
589 kstack0 = virtual_avail;
590 virtual_avail += roundup(KSTACK_PAGES, colors) * PAGE_SIZE;
591 if (dcache_color_ignore == 0)
592 KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys),
593 ("pmap_bootstrap: kstack0 miscolored"));
594 for (i = 0; i < KSTACK_PAGES; i++) {
595 pa = kstack0_phys + i * PAGE_SIZE;
596 va = kstack0 + i * PAGE_SIZE;
597 tp = tsb_kvtotte(va);
598 vpn = TV_VPN(va, TS_8K);
599 data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP |
601 pmap_bootstrap_set_tte(tp, vpn, data);
605 * Calculate the last available physical address.
607 for (i = 0; phys_avail[i + 2] != 0; i += 2)
609 Maxmem = sparc64_btop(phys_avail[i + 1]);
612 * Add the PROM mappings to the kernel TSB.
614 if ((vmem = OF_finddevice("/virtual-memory")) == -1)
615 OF_panic("%s: finddevice /virtual-memory", __func__);
616 if ((sz = OF_getproplen(vmem, "translations")) == -1)
617 OF_panic("%s: getproplen translations", __func__);
618 if (sizeof(translations) < sz)
619 OF_panic("%s: translations too small", __func__);
620 bzero(translations, sz);
621 if (OF_getprop(vmem, "translations", translations, sz) == -1)
622 OF_panic("%s: getprop /virtual-memory/translations",
624 sz /= sizeof(*translations);
625 translations_size = sz;
626 CTR0(KTR_PMAP, "pmap_bootstrap: translations");
627 qsort(translations, sz, sizeof (*translations), om_cmp);
628 for (i = 0; i < sz; i++) {
630 "translation: start=%#lx size=%#lx tte=%#lx",
631 translations[i].om_start, translations[i].om_size,
632 translations[i].om_tte);
633 if ((translations[i].om_tte & TD_V) == 0)
635 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
636 translations[i].om_start > VM_MAX_PROM_ADDRESS)
638 for (off = 0; off < translations[i].om_size;
640 va = translations[i].om_start + off;
641 tp = tsb_kvtotte(va);
642 vpn = TV_VPN(va, TS_8K);
643 data = ((translations[i].om_tte &
644 ~((TD_SOFT2_MASK << TD_SOFT2_SHIFT) |
645 (cpu_impl >= CPU_IMPL_ULTRASPARCI &&
646 cpu_impl < CPU_IMPL_ULTRASPARCIII ?
647 (TD_DIAG_SF_MASK << TD_DIAG_SF_SHIFT) :
648 (TD_RSVD_CH_MASK << TD_RSVD_CH_SHIFT)) |
649 (TD_SOFT_MASK << TD_SOFT_SHIFT))) | TD_EXEC) +
651 pmap_bootstrap_set_tte(tp, vpn, data);
656 * Get the available physical memory ranges from /memory/reg. These
657 * are only used for kernel dumps, but it may not be wise to do PROM
658 * calls in that situation.
660 if ((sz = OF_getproplen(pmem, "reg")) == -1)
661 OF_panic("%s: getproplen /memory/reg", __func__);
662 if (sizeof(sparc64_memreg) < sz)
663 OF_panic("%s: sparc64_memreg too small", __func__);
664 if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
665 OF_panic("%s: getprop /memory/reg", __func__);
666 sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
669 * Initialize the kernel pmap (which is statically allocated).
673 for (i = 0; i < MAXCPU; i++)
674 pm->pm_context[i] = TLB_CTX_KERNEL;
675 CPU_FILL(&pm->pm_active);
678 * Initialize the global tte list lock.
680 rw_init(&tte_list_global_lock, "tte list global");
683 * Flush all non-locked TLB entries possibly left over by the
686 tlb_flush_nonlocked();
690 * Map the 4MB kernel TSB pages.
700 for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
701 va = (vm_offset_t)tsb_kernel + i;
702 pa = tsb_kernel_phys + i;
703 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
705 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) |
706 TLB_TAR_CTX(TLB_CTX_KERNEL));
707 stxa_sync(0, ASI_DTLB_DATA_IN_REG, data);
712 * Set the secondary context to be the kernel context (needed for FP block
713 * operations in the kernel).
719 stxa(AA_DMMU_SCXR, ASI_DMMU, (ldxa(AA_DMMU_SCXR, ASI_DMMU) &
720 TLB_CXR_PGSZ_MASK) | TLB_CTX_KERNEL);
725 * Allocate a physical page of memory directly from the phys_avail map.
726 * Can only be called from pmap_bootstrap before avail start and end are
730 pmap_bootstrap_alloc(vm_size_t size, uint32_t colors)
735 size = roundup(size, PAGE_SIZE * colors);
736 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
737 if (phys_avail[i + 1] - phys_avail[i] < size)
740 phys_avail[i] += size;
743 OF_panic("%s: no suitable region found", __func__);
747 * Set a TTE. This function is intended as a helper when tsb_kernel is
748 * direct-mapped but we haven't taken over the trap table, yet, as it's the
749 * case when we are taking advantage of ASI_ATOMIC_QUAD_LDD_PHYS to access
753 pmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data)
756 if (tsb_kernel_ldd_phys == 0) {
760 stxa((vm_paddr_t)tp + offsetof(struct tte, tte_vpn),
761 ASI_PHYS_USE_EC, vpn);
762 stxa((vm_paddr_t)tp + offsetof(struct tte, tte_data),
763 ASI_PHYS_USE_EC, data);
768 * Initialize a vm_page's machine-dependent fields.
771 pmap_page_init(vm_page_t m)
774 TAILQ_INIT(&m->md.tte_list);
775 m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
781 * Initialize the pmap module.
791 for (i = 0; i < translations_size; i++) {
792 addr = translations[i].om_start;
793 size = translations[i].om_size;
794 if ((translations[i].om_tte & TD_V) == 0)
796 if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS)
798 result = vm_map_find(kernel_map, NULL, 0, &addr, size,
799 VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
800 if (result != KERN_SUCCESS || addr != translations[i].om_start)
801 panic("pmap_init: vm_map_find");
806 * Extract the physical page address associated with the given
807 * map/virtual_address pair.
810 pmap_extract(pmap_t pm, vm_offset_t va)
815 if (pm == kernel_pmap)
816 return (pmap_kextract(va));
818 tp = tsb_tte_lookup(pm, va);
822 pa = TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp));
828 * Atomically extract and hold the physical page with the given
829 * pmap and virtual address pair if that mapping permits the given
833 pmap_extract_and_hold(pmap_t pm, vm_offset_t va, vm_prot_t prot)
843 if (pm == kernel_pmap) {
844 if (va >= VM_MIN_DIRECT_ADDRESS) {
846 m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va));
847 (void)vm_page_pa_tryrelock(pm, TLB_DIRECT_TO_PHYS(va),
851 tp = tsb_kvtotte(va);
852 if ((tp->tte_data & TD_V) == 0)
856 tp = tsb_tte_lookup(pm, va);
857 if (tp != NULL && ((tp->tte_data & TD_SW) ||
858 (prot & VM_PROT_WRITE) == 0)) {
859 if (vm_page_pa_tryrelock(pm, TTE_GET_PA(tp), &pa))
861 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
870 * Extract the physical page address associated with the given kernel virtual
874 pmap_kextract(vm_offset_t va)
878 if (va >= VM_MIN_DIRECT_ADDRESS)
879 return (TLB_DIRECT_TO_PHYS(va));
880 tp = tsb_kvtotte(va);
881 if ((tp->tte_data & TD_V) == 0)
883 return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
887 pmap_cache_enter(vm_page_t m, vm_offset_t va)
892 rw_assert(&tte_list_global_lock, RA_WLOCKED);
893 KASSERT((m->flags & PG_FICTITIOUS) == 0,
894 ("pmap_cache_enter: fake page"));
895 PMAP_STATS_INC(pmap_ncache_enter);
897 if (dcache_color_ignore != 0)
901 * Find the color for this virtual address and note the added mapping.
903 color = DCACHE_COLOR(va);
904 m->md.colors[color]++;
907 * If all existing mappings have the same color, the mapping is
910 if (m->md.color == color) {
911 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0,
912 ("pmap_cache_enter: cacheable, mappings of other color"));
913 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
914 PMAP_STATS_INC(pmap_ncache_enter_c);
916 PMAP_STATS_INC(pmap_ncache_enter_oc);
921 * If there are no mappings of the other color, and the page still has
922 * the wrong color, this must be a new mapping. Change the color to
923 * match the new mapping, which is cacheable. We must flush the page
924 * from the cache now.
926 if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) {
927 KASSERT(m->md.colors[color] == 1,
928 ("pmap_cache_enter: changing color, not new mapping"));
929 dcache_page_inval(VM_PAGE_TO_PHYS(m));
931 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
932 PMAP_STATS_INC(pmap_ncache_enter_cc);
934 PMAP_STATS_INC(pmap_ncache_enter_coc);
939 * If the mapping is already non-cacheable, just return.
941 if (m->md.color == -1) {
942 PMAP_STATS_INC(pmap_ncache_enter_nc);
946 PMAP_STATS_INC(pmap_ncache_enter_cnc);
949 * Mark all mappings as uncacheable, flush any lines with the other
950 * color out of the dcache, and set the color to none (-1).
952 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
953 atomic_clear_long(&tp->tte_data, TD_CV);
954 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
956 dcache_page_inval(VM_PAGE_TO_PHYS(m));
962 pmap_cache_remove(vm_page_t m, vm_offset_t va)
967 rw_assert(&tte_list_global_lock, RA_WLOCKED);
968 CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
969 m->md.colors[DCACHE_COLOR(va)]);
970 KASSERT((m->flags & PG_FICTITIOUS) == 0,
971 ("pmap_cache_remove: fake page"));
972 PMAP_STATS_INC(pmap_ncache_remove);
974 if (dcache_color_ignore != 0)
977 KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0,
978 ("pmap_cache_remove: no mappings %d <= 0",
979 m->md.colors[DCACHE_COLOR(va)]));
982 * Find the color for this virtual address and note the removal of
985 color = DCACHE_COLOR(va);
986 m->md.colors[color]--;
989 * If the page is cacheable, just return and keep the same color, even
990 * if there are no longer any mappings.
992 if (m->md.color != -1) {
993 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
994 PMAP_STATS_INC(pmap_ncache_remove_c);
996 PMAP_STATS_INC(pmap_ncache_remove_oc);
1000 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0,
1001 ("pmap_cache_remove: uncacheable, no mappings of other color"));
1004 * If the page is not cacheable (color is -1), and the number of
1005 * mappings for this color is not zero, just return. There are
1006 * mappings of the other color still, so remain non-cacheable.
1008 if (m->md.colors[color] != 0) {
1009 PMAP_STATS_INC(pmap_ncache_remove_nc);
1014 * The number of mappings for this color is now zero. Recache the
1015 * other colored mappings, and change the page color to the other
1016 * color. There should be no lines in the data cache for this page,
1017 * so flushing should not be needed.
1019 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1020 atomic_set_long(&tp->tte_data, TD_CV);
1021 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1023 m->md.color = DCACHE_OTHER_COLOR(color);
1025 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
1026 PMAP_STATS_INC(pmap_ncache_remove_cc);
1028 PMAP_STATS_INC(pmap_ncache_remove_coc);
1032 * Map a wired page into kernel virtual address space.
1035 pmap_kenter(vm_offset_t va, vm_page_t m)
1042 rw_assert(&tte_list_global_lock, RA_WLOCKED);
1043 PMAP_STATS_INC(pmap_nkenter);
1044 tp = tsb_kvtotte(va);
1045 CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
1046 va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data);
1047 if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) {
1049 "pmap_kenter: off color va=%#lx pa=%#lx o=%p ot=%d pi=%#lx",
1050 va, VM_PAGE_TO_PHYS(m), m->object,
1051 m->object ? m->object->type : -1,
1053 PMAP_STATS_INC(pmap_nkenter_oc);
1055 if ((tp->tte_data & TD_V) != 0) {
1056 om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1057 ova = TTE_GET_VA(tp);
1058 if (m == om && va == ova) {
1059 PMAP_STATS_INC(pmap_nkenter_stupid);
1062 TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
1063 pmap_cache_remove(om, ova);
1065 tlb_page_demap(kernel_pmap, ova);
1067 data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP |
1069 if (pmap_cache_enter(m, va) != 0)
1071 tp->tte_vpn = TV_VPN(va, TS_8K);
1072 tp->tte_data = data;
1073 TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
1077 * Map a wired page into kernel virtual address space. This additionally
1078 * takes a flag argument which is or'ed to the TTE data. This is used by
1079 * sparc64_bus_mem_map().
1080 * NOTE: if the mapping is non-cacheable, it's the caller's responsibility
1081 * to flush entries that might still be in the cache, if applicable.
1084 pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags)
1088 tp = tsb_kvtotte(va);
1089 CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
1090 va, pa, tp, tp->tte_data);
1091 tp->tte_vpn = TV_VPN(va, TS_8K);
1092 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
1096 * Remove a wired page from kernel virtual address space.
1099 pmap_kremove(vm_offset_t va)
1104 rw_assert(&tte_list_global_lock, RA_WLOCKED);
1105 PMAP_STATS_INC(pmap_nkremove);
1106 tp = tsb_kvtotte(va);
1107 CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
1109 if ((tp->tte_data & TD_V) == 0)
1111 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1112 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1113 pmap_cache_remove(m, va);
1118 * Inverse of pmap_kenter_flags, used by bus_space_unmap().
1121 pmap_kremove_flags(vm_offset_t va)
1125 tp = tsb_kvtotte(va);
1126 CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp,
1132 * Map a range of physical addresses into kernel virtual address space.
1134 * The value passed in *virt is a suggested virtual address for the mapping.
1135 * Architectures which can support a direct-mapped physical to virtual region
1136 * can return the appropriate address within that region, leaving '*virt'
1140 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1143 return (TLB_PHYS_TO_DIRECT(start));
1147 * Map a list of wired pages into kernel virtual address space. This is
1148 * intended for temporary mappings which do not need page modification or
1149 * references recorded. Existing mappings in the region are overwritten.
1152 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
1156 PMAP_STATS_INC(pmap_nqenter);
1158 rw_wlock(&tte_list_global_lock);
1159 while (count-- > 0) {
1160 pmap_kenter(va, *m);
1164 rw_wunlock(&tte_list_global_lock);
1165 tlb_range_demap(kernel_pmap, sva, va);
1169 * Remove page mappings from kernel virtual address space. Intended for
1170 * temporary mappings entered by pmap_qenter.
1173 pmap_qremove(vm_offset_t sva, int count)
1177 PMAP_STATS_INC(pmap_nqremove);
1179 rw_wlock(&tte_list_global_lock);
1180 while (count-- > 0) {
1184 rw_wunlock(&tte_list_global_lock);
1185 tlb_range_demap(kernel_pmap, sva, va);
1189 * Initialize the pmap associated with process 0.
1192 pmap_pinit0(pmap_t pm)
1197 for (i = 0; i < MAXCPU; i++)
1198 pm->pm_context[i] = TLB_CTX_KERNEL;
1199 CPU_ZERO(&pm->pm_active);
1201 pm->pm_tsb_obj = NULL;
1202 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1206 * Initialize a preallocated and zeroed pmap structure, such as one in a
1207 * vmspace structure.
1210 pmap_pinit(pmap_t pm)
1212 vm_page_t ma[TSB_PAGES];
1219 * Allocate KVA space for the TSB.
1221 if (pm->pm_tsb == NULL) {
1222 pm->pm_tsb = (struct tte *)kmem_alloc_nofault(kernel_map,
1224 if (pm->pm_tsb == NULL) {
1225 PMAP_LOCK_DESTROY(pm);
1231 * Allocate an object for it.
1233 if (pm->pm_tsb_obj == NULL)
1234 pm->pm_tsb_obj = vm_object_allocate(OBJT_PHYS, TSB_PAGES);
1236 for (i = 0; i < MAXCPU; i++)
1237 pm->pm_context[i] = -1;
1238 CPU_ZERO(&pm->pm_active);
1240 VM_OBJECT_LOCK(pm->pm_tsb_obj);
1241 for (i = 0; i < TSB_PAGES; i++) {
1242 m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY |
1243 VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1244 m->valid = VM_PAGE_BITS_ALL;
1248 VM_OBJECT_UNLOCK(pm->pm_tsb_obj);
1249 pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
1251 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1256 * Release any resources held by the given physical map.
1257 * Called when a pmap initialized by pmap_pinit is being released.
1258 * Should only be called if the map contains no valid mappings.
1261 pmap_release(pmap_t pm)
1269 CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
1270 pm->pm_context[curcpu], pm->pm_tsb);
1271 KASSERT(pmap_resident_count(pm) == 0,
1272 ("pmap_release: resident pages %ld != 0",
1273 pmap_resident_count(pm)));
1276 * After the pmap was freed, it might be reallocated to a new process.
1277 * When switching, this might lead us to wrongly assume that we need
1278 * not switch contexts because old and new pmap pointer are equal.
1279 * Therefore, make sure that this pmap is not referenced by any PCPU
1280 * pointer any more. This could happen in two cases:
1281 * - A process that referenced the pmap is currently exiting on a CPU.
1282 * However, it is guaranteed to not switch in any more after setting
1283 * its state to PRS_ZOMBIE.
1284 * - A process that referenced this pmap ran on a CPU, but we switched
1285 * to a kernel thread, leaving the pmap pointer unchanged.
1289 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
1290 atomic_cmpset_rel_ptr((uintptr_t *)&pc->pc_pmap,
1291 (uintptr_t)pm, (uintptr_t)NULL);
1295 if (PCPU_GET(pmap) == pm)
1296 PCPU_SET(pmap, NULL);
1300 pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
1301 obj = pm->pm_tsb_obj;
1302 VM_OBJECT_LOCK(obj);
1303 KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
1304 while (!TAILQ_EMPTY(&obj->memq)) {
1305 m = TAILQ_FIRST(&obj->memq);
1308 atomic_subtract_int(&cnt.v_wire_count, 1);
1309 vm_page_free_zero(m);
1311 VM_OBJECT_UNLOCK(obj);
1312 PMAP_LOCK_DESTROY(pm);
1316 * Grow the number of kernel page table entries. Unneeded.
1319 pmap_growkernel(vm_offset_t addr)
1322 panic("pmap_growkernel: can't grow kernel");
1326 pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1332 rw_assert(&tte_list_global_lock, RA_WLOCKED);
1333 data = atomic_readandclear_long(&tp->tte_data);
1334 if ((data & TD_FAKE) == 0) {
1335 m = PHYS_TO_VM_PAGE(TD_PA(data));
1336 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1337 if ((data & TD_WIRED) != 0)
1338 pm->pm_stats.wired_count--;
1339 if ((data & TD_PV) != 0) {
1340 if ((data & TD_W) != 0)
1342 if ((data & TD_REF) != 0)
1343 vm_page_aflag_set(m, PGA_REFERENCED);
1344 if (TAILQ_EMPTY(&m->md.tte_list))
1345 vm_page_aflag_clear(m, PGA_WRITEABLE);
1346 pm->pm_stats.resident_count--;
1348 pmap_cache_remove(m, va);
1351 if (PMAP_REMOVE_DONE(pm))
1357 * Remove the given range of addresses from the specified map.
1360 pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
1365 CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
1366 pm->pm_context[curcpu], start, end);
1367 if (PMAP_REMOVE_DONE(pm))
1369 rw_wlock(&tte_list_global_lock);
1371 if (end - start > PMAP_TSB_THRESH) {
1372 tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
1373 tlb_context_demap(pm);
1375 for (va = start; va < end; va += PAGE_SIZE)
1376 if ((tp = tsb_tte_lookup(pm, va)) != NULL &&
1377 !pmap_remove_tte(pm, NULL, tp, va))
1379 tlb_range_demap(pm, start, end - 1);
1382 rw_wunlock(&tte_list_global_lock);
1386 pmap_remove_all(vm_page_t m)
1393 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1394 ("pmap_remove_all: page %p is not managed", m));
1395 rw_wlock(&tte_list_global_lock);
1396 for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
1397 tpn = TAILQ_NEXT(tp, tte_link);
1398 if ((tp->tte_data & TD_PV) == 0)
1400 pm = TTE_GET_PMAP(tp);
1401 va = TTE_GET_VA(tp);
1403 if ((tp->tte_data & TD_WIRED) != 0)
1404 pm->pm_stats.wired_count--;
1405 if ((tp->tte_data & TD_REF) != 0)
1406 vm_page_aflag_set(m, PGA_REFERENCED);
1407 if ((tp->tte_data & TD_W) != 0)
1409 tp->tte_data &= ~TD_V;
1410 tlb_page_demap(pm, va);
1411 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1412 pm->pm_stats.resident_count--;
1413 pmap_cache_remove(m, va);
1417 vm_page_aflag_clear(m, PGA_WRITEABLE);
1418 rw_wunlock(&tte_list_global_lock);
1422 pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1428 PMAP_LOCK_ASSERT(pm, MA_OWNED);
1429 data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
1430 if ((data & (TD_PV | TD_W)) == (TD_PV | TD_W)) {
1431 m = PHYS_TO_VM_PAGE(TD_PA(data));
1438 * Set the physical protection on the specified range of this map as requested.
1441 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1446 CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
1447 pm->pm_context[curcpu], sva, eva, prot);
1449 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1450 pmap_remove(pm, sva, eva);
1454 if (prot & VM_PROT_WRITE)
1458 if (eva - sva > PMAP_TSB_THRESH) {
1459 tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
1460 tlb_context_demap(pm);
1462 for (va = sva; va < eva; va += PAGE_SIZE)
1463 if ((tp = tsb_tte_lookup(pm, va)) != NULL)
1464 pmap_protect_tte(pm, NULL, tp, va);
1465 tlb_range_demap(pm, sva, eva - 1);
1471 * Map the given physical page at the specified virtual address in the
1472 * target pmap with the protection requested. If specified the page
1473 * will be wired down.
1476 pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m,
1477 vm_prot_t prot, boolean_t wired)
1480 rw_wlock(&tte_list_global_lock);
1482 pmap_enter_locked(pm, va, m, prot, wired);
1483 rw_wunlock(&tte_list_global_lock);
1488 * Map the given physical page at the specified virtual address in the
1489 * target pmap with the protection requested. If specified the page
1490 * will be wired down.
1492 * The page queues and pmap must be locked.
1495 pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1503 rw_assert(&tte_list_global_lock, RA_WLOCKED);
1504 PMAP_LOCK_ASSERT(pm, MA_OWNED);
1505 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
1506 VM_OBJECT_LOCKED(m->object),
1507 ("pmap_enter_locked: page %p is not busy", m));
1508 PMAP_STATS_INC(pmap_nenter);
1509 pa = VM_PAGE_TO_PHYS(m);
1512 * If this is a fake page from the device_pager, but it covers actual
1513 * physical memory, convert to the real backing page.
1515 if ((m->flags & PG_FICTITIOUS) != 0) {
1516 real = vm_phys_paddr_to_vm_page(pa);
1522 "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
1523 pm->pm_context[curcpu], m, va, pa, prot, wired);
1526 * If there is an existing mapping, and the physical address has not
1527 * changed, must be protection or wiring change.
1529 if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
1530 CTR0(KTR_PMAP, "pmap_enter_locked: update");
1531 PMAP_STATS_INC(pmap_nenter_update);
1534 * Wiring change, just update stats.
1537 if ((tp->tte_data & TD_WIRED) == 0) {
1538 tp->tte_data |= TD_WIRED;
1539 pm->pm_stats.wired_count++;
1542 if ((tp->tte_data & TD_WIRED) != 0) {
1543 tp->tte_data &= ~TD_WIRED;
1544 pm->pm_stats.wired_count--;
1549 * Save the old bits and clear the ones we're interested in.
1551 data = tp->tte_data;
1552 tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W);
1555 * If we're turning off write permissions, sense modify status.
1557 if ((prot & VM_PROT_WRITE) != 0) {
1558 tp->tte_data |= TD_SW;
1560 tp->tte_data |= TD_W;
1561 if ((m->oflags & VPO_UNMANAGED) == 0)
1562 vm_page_aflag_set(m, PGA_WRITEABLE);
1563 } else if ((data & TD_W) != 0)
1567 * If we're turning on execute permissions, flush the icache.
1569 if ((prot & VM_PROT_EXECUTE) != 0) {
1570 if ((data & TD_EXEC) == 0)
1571 icache_page_inval(pa);
1572 tp->tte_data |= TD_EXEC;
1576 * Delete the old mapping.
1578 tlb_page_demap(pm, TTE_GET_VA(tp));
1581 * If there is an existing mapping, but its for a different
1582 * physical address, delete the old mapping.
1585 CTR0(KTR_PMAP, "pmap_enter_locked: replace");
1586 PMAP_STATS_INC(pmap_nenter_replace);
1587 pmap_remove_tte(pm, NULL, tp, va);
1588 tlb_page_demap(pm, va);
1590 CTR0(KTR_PMAP, "pmap_enter_locked: new");
1591 PMAP_STATS_INC(pmap_nenter_new);
1595 * Now set up the data and install the new mapping.
1597 data = TD_V | TD_8K | TD_PA(pa);
1598 if (pm == kernel_pmap)
1600 if ((prot & VM_PROT_WRITE) != 0) {
1602 if ((m->oflags & VPO_UNMANAGED) == 0)
1603 vm_page_aflag_set(m, PGA_WRITEABLE);
1605 if (prot & VM_PROT_EXECUTE) {
1607 icache_page_inval(pa);
1611 * If its wired update stats. We also don't need reference or
1612 * modify tracking for wired mappings, so set the bits now.
1615 pm->pm_stats.wired_count++;
1616 data |= TD_REF | TD_WIRED;
1617 if ((prot & VM_PROT_WRITE) != 0)
1621 tsb_tte_enter(pm, m, va, TS_8K, data);
1626 * Maps a sequence of resident pages belonging to the same object.
1627 * The sequence begins with the given page m_start. This page is
1628 * mapped at the given virtual address start. Each subsequent page is
1629 * mapped at a virtual address that is offset from start by the same
1630 * amount as the page is offset from m_start within the object. The
1631 * last page in the sequence is the page with the largest offset from
1632 * m_start that can be mapped at a virtual address less than the given
1633 * virtual address end. Not every virtual page between start and end
1634 * is mapped; only those for which a resident page exists with the
1635 * corresponding offset from m_start are mapped.
1638 pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
1639 vm_page_t m_start, vm_prot_t prot)
1642 vm_pindex_t diff, psize;
1644 psize = atop(end - start);
1646 rw_wlock(&tte_list_global_lock);
1648 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1649 pmap_enter_locked(pm, start + ptoa(diff), m, prot &
1650 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1651 m = TAILQ_NEXT(m, listq);
1653 rw_wunlock(&tte_list_global_lock);
1658 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1661 rw_wlock(&tte_list_global_lock);
1663 pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1665 rw_wunlock(&tte_list_global_lock);
1670 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1671 vm_pindex_t pindex, vm_size_t size)
1674 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1675 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
1676 ("pmap_object_init_pt: non-device object"));
1680 * Change the wiring attribute for a map/virtual-address pair.
1681 * The mapping must already exist in the pmap.
1684 pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
1690 if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
1692 data = atomic_set_long(&tp->tte_data, TD_WIRED);
1693 if ((data & TD_WIRED) == 0)
1694 pm->pm_stats.wired_count++;
1696 data = atomic_clear_long(&tp->tte_data, TD_WIRED);
1697 if ((data & TD_WIRED) != 0)
1698 pm->pm_stats.wired_count--;
1705 pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp,
1711 if ((tp->tte_data & TD_FAKE) != 0)
1713 if (tsb_tte_lookup(dst_pmap, va) == NULL) {
1714 data = tp->tte_data &
1715 ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
1716 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1717 tsb_tte_enter(dst_pmap, m, va, TS_8K, data);
1723 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1724 vm_size_t len, vm_offset_t src_addr)
1729 if (dst_addr != src_addr)
1731 rw_wlock(&tte_list_global_lock);
1732 if (dst_pmap < src_pmap) {
1733 PMAP_LOCK(dst_pmap);
1734 PMAP_LOCK(src_pmap);
1736 PMAP_LOCK(src_pmap);
1737 PMAP_LOCK(dst_pmap);
1739 if (len > PMAP_TSB_THRESH) {
1740 tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len,
1742 tlb_context_demap(dst_pmap);
1744 for (va = src_addr; va < src_addr + len; va += PAGE_SIZE)
1745 if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL)
1746 pmap_copy_tte(src_pmap, dst_pmap, tp, va);
1747 tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
1749 rw_wunlock(&tte_list_global_lock);
1750 PMAP_UNLOCK(src_pmap);
1751 PMAP_UNLOCK(dst_pmap);
1755 pmap_zero_page(vm_page_t m)
1761 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1762 ("pmap_zero_page: fake page"));
1763 PMAP_STATS_INC(pmap_nzero_page);
1764 pa = VM_PAGE_TO_PHYS(m);
1765 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1766 PMAP_STATS_INC(pmap_nzero_page_c);
1767 va = TLB_PHYS_TO_DIRECT(pa);
1768 cpu_block_zero((void *)va, PAGE_SIZE);
1769 } else if (m->md.color == -1) {
1770 PMAP_STATS_INC(pmap_nzero_page_nc);
1771 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1773 PMAP_STATS_INC(pmap_nzero_page_oc);
1774 PMAP_LOCK(kernel_pmap);
1775 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1776 tp = tsb_kvtotte(va);
1777 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1778 tp->tte_vpn = TV_VPN(va, TS_8K);
1779 cpu_block_zero((void *)va, PAGE_SIZE);
1780 tlb_page_demap(kernel_pmap, va);
1781 PMAP_UNLOCK(kernel_pmap);
1786 pmap_zero_page_area(vm_page_t m, int off, int size)
1792 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1793 ("pmap_zero_page_area: fake page"));
1794 KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
1795 PMAP_STATS_INC(pmap_nzero_page_area);
1796 pa = VM_PAGE_TO_PHYS(m);
1797 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1798 PMAP_STATS_INC(pmap_nzero_page_area_c);
1799 va = TLB_PHYS_TO_DIRECT(pa);
1800 bzero((void *)(va + off), size);
1801 } else if (m->md.color == -1) {
1802 PMAP_STATS_INC(pmap_nzero_page_area_nc);
1803 aszero(ASI_PHYS_USE_EC, pa + off, size);
1805 PMAP_STATS_INC(pmap_nzero_page_area_oc);
1806 PMAP_LOCK(kernel_pmap);
1807 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1808 tp = tsb_kvtotte(va);
1809 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1810 tp->tte_vpn = TV_VPN(va, TS_8K);
1811 bzero((void *)(va + off), size);
1812 tlb_page_demap(kernel_pmap, va);
1813 PMAP_UNLOCK(kernel_pmap);
1818 pmap_zero_page_idle(vm_page_t m)
1824 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1825 ("pmap_zero_page_idle: fake page"));
1826 PMAP_STATS_INC(pmap_nzero_page_idle);
1827 pa = VM_PAGE_TO_PHYS(m);
1828 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1829 PMAP_STATS_INC(pmap_nzero_page_idle_c);
1830 va = TLB_PHYS_TO_DIRECT(pa);
1831 cpu_block_zero((void *)va, PAGE_SIZE);
1832 } else if (m->md.color == -1) {
1833 PMAP_STATS_INC(pmap_nzero_page_idle_nc);
1834 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1836 PMAP_STATS_INC(pmap_nzero_page_idle_oc);
1837 va = pmap_idle_map + (m->md.color * PAGE_SIZE);
1838 tp = tsb_kvtotte(va);
1839 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1840 tp->tte_vpn = TV_VPN(va, TS_8K);
1841 cpu_block_zero((void *)va, PAGE_SIZE);
1842 tlb_page_demap(kernel_pmap, va);
1847 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
1855 KASSERT((mdst->flags & PG_FICTITIOUS) == 0,
1856 ("pmap_copy_page: fake dst page"));
1857 KASSERT((msrc->flags & PG_FICTITIOUS) == 0,
1858 ("pmap_copy_page: fake src page"));
1859 PMAP_STATS_INC(pmap_ncopy_page);
1860 pdst = VM_PAGE_TO_PHYS(mdst);
1861 psrc = VM_PAGE_TO_PHYS(msrc);
1862 if (dcache_color_ignore != 0 ||
1863 (msrc->md.color == DCACHE_COLOR(psrc) &&
1864 mdst->md.color == DCACHE_COLOR(pdst))) {
1865 PMAP_STATS_INC(pmap_ncopy_page_c);
1866 vdst = TLB_PHYS_TO_DIRECT(pdst);
1867 vsrc = TLB_PHYS_TO_DIRECT(psrc);
1868 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1869 } else if (msrc->md.color == -1 && mdst->md.color == -1) {
1870 PMAP_STATS_INC(pmap_ncopy_page_nc);
1871 ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE);
1872 } else if (msrc->md.color == -1) {
1873 if (mdst->md.color == DCACHE_COLOR(pdst)) {
1874 PMAP_STATS_INC(pmap_ncopy_page_dc);
1875 vdst = TLB_PHYS_TO_DIRECT(pdst);
1876 ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1879 PMAP_STATS_INC(pmap_ncopy_page_doc);
1880 PMAP_LOCK(kernel_pmap);
1881 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1882 tp = tsb_kvtotte(vdst);
1884 TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1885 tp->tte_vpn = TV_VPN(vdst, TS_8K);
1886 ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1888 tlb_page_demap(kernel_pmap, vdst);
1889 PMAP_UNLOCK(kernel_pmap);
1891 } else if (mdst->md.color == -1) {
1892 if (msrc->md.color == DCACHE_COLOR(psrc)) {
1893 PMAP_STATS_INC(pmap_ncopy_page_sc);
1894 vsrc = TLB_PHYS_TO_DIRECT(psrc);
1895 ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1898 PMAP_STATS_INC(pmap_ncopy_page_soc);
1899 PMAP_LOCK(kernel_pmap);
1900 vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE);
1901 tp = tsb_kvtotte(vsrc);
1903 TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1904 tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1905 ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1907 tlb_page_demap(kernel_pmap, vsrc);
1908 PMAP_UNLOCK(kernel_pmap);
1911 PMAP_STATS_INC(pmap_ncopy_page_oc);
1912 PMAP_LOCK(kernel_pmap);
1913 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1914 tp = tsb_kvtotte(vdst);
1916 TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1917 tp->tte_vpn = TV_VPN(vdst, TS_8K);
1918 vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE);
1919 tp = tsb_kvtotte(vsrc);
1921 TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1922 tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1923 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1924 tlb_page_demap(kernel_pmap, vdst);
1925 tlb_page_demap(kernel_pmap, vsrc);
1926 PMAP_UNLOCK(kernel_pmap);
1931 * Returns true if the pmap's pv is one of the first
1932 * 16 pvs linked to from this page. This count may
1933 * be changed upwards or downwards in the future; it
1934 * is only necessary that true be returned for a small
1935 * subset of pmaps for proper page aging.
1938 pmap_page_exists_quick(pmap_t pm, vm_page_t m)
1944 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1945 ("pmap_page_exists_quick: page %p is not managed", m));
1948 rw_wlock(&tte_list_global_lock);
1949 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1950 if ((tp->tte_data & TD_PV) == 0)
1952 if (TTE_GET_PMAP(tp) == pm) {
1959 rw_wunlock(&tte_list_global_lock);
1964 * Return the number of managed mappings to the given physical page
1968 pmap_page_wired_mappings(vm_page_t m)
1974 if ((m->oflags & VPO_UNMANAGED) != 0)
1976 rw_wlock(&tte_list_global_lock);
1977 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
1978 if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
1980 rw_wunlock(&tte_list_global_lock);
1985 * Remove all pages from specified address space, this aids process exit
1986 * speeds. This is much faster than pmap_remove in the case of running down
1987 * an entire address space. Only works for the current pmap.
1990 pmap_remove_pages(pmap_t pm)
1996 * Returns TRUE if the given page has a managed mapping.
1999 pmap_page_is_mapped(vm_page_t m)
2005 if ((m->oflags & VPO_UNMANAGED) != 0)
2007 rw_wlock(&tte_list_global_lock);
2008 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
2009 if ((tp->tte_data & TD_PV) != 0) {
2013 rw_wunlock(&tte_list_global_lock);
2018 * Return a count of reference bits for a page, clearing those bits.
2019 * It is not necessary for every reference bit to be cleared, but it
2020 * is necessary that 0 only be returned when there are truly no
2021 * reference bits set.
2023 * XXX: The exact number of bits to check and clear is a matter that
2024 * should be tested and standardized at some point in the future for
2025 * optimal aging of shared pages.
2028 pmap_ts_referenced(vm_page_t m)
2036 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2037 ("pmap_ts_referenced: page %p is not managed", m));
2039 rw_wlock(&tte_list_global_lock);
2040 if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
2043 tpn = TAILQ_NEXT(tp, tte_link);
2044 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
2045 TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
2046 if ((tp->tte_data & TD_PV) == 0)
2048 data = atomic_clear_long(&tp->tte_data, TD_REF);
2049 if ((data & TD_REF) != 0 && ++count > 4)
2051 } while ((tp = tpn) != NULL && tp != tpf);
2053 rw_wunlock(&tte_list_global_lock);
2058 pmap_is_modified(vm_page_t m)
2063 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2064 ("pmap_is_modified: page %p is not managed", m));
2068 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
2069 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
2070 * is clear, no TTEs can have TD_W set.
2072 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2073 if ((m->oflags & VPO_BUSY) == 0 &&
2074 (m->aflags & PGA_WRITEABLE) == 0)
2076 rw_wlock(&tte_list_global_lock);
2077 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2078 if ((tp->tte_data & TD_PV) == 0)
2080 if ((tp->tte_data & TD_W) != 0) {
2085 rw_wunlock(&tte_list_global_lock);
2090 * pmap_is_prefaultable:
2092 * Return whether or not the specified virtual address is elgible
2096 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2101 rv = tsb_tte_lookup(pmap, addr) == NULL;
2107 * Return whether or not the specified physical page was referenced
2108 * in any physical maps.
2111 pmap_is_referenced(vm_page_t m)
2116 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2117 ("pmap_is_referenced: page %p is not managed", m));
2119 rw_wlock(&tte_list_global_lock);
2120 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2121 if ((tp->tte_data & TD_PV) == 0)
2123 if ((tp->tte_data & TD_REF) != 0) {
2128 rw_wunlock(&tte_list_global_lock);
2133 pmap_clear_modify(vm_page_t m)
2138 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2139 ("pmap_clear_modify: page %p is not managed", m));
2140 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2141 KASSERT((m->oflags & VPO_BUSY) == 0,
2142 ("pmap_clear_modify: page %p is busy", m));
2145 * If the page is not PGA_WRITEABLE, then no TTEs can have TD_W set.
2146 * If the object containing the page is locked and the page is not
2147 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
2149 if ((m->aflags & PGA_WRITEABLE) == 0)
2151 rw_wlock(&tte_list_global_lock);
2152 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2153 if ((tp->tte_data & TD_PV) == 0)
2155 data = atomic_clear_long(&tp->tte_data, TD_W);
2156 if ((data & TD_W) != 0)
2157 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
2159 rw_wunlock(&tte_list_global_lock);
2163 pmap_clear_reference(vm_page_t m)
2168 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2169 ("pmap_clear_reference: page %p is not managed", m));
2170 rw_wlock(&tte_list_global_lock);
2171 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2172 if ((tp->tte_data & TD_PV) == 0)
2174 data = atomic_clear_long(&tp->tte_data, TD_REF);
2175 if ((data & TD_REF) != 0)
2176 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
2178 rw_wunlock(&tte_list_global_lock);
2182 pmap_remove_write(vm_page_t m)
2187 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2188 ("pmap_remove_write: page %p is not managed", m));
2191 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
2192 * another thread while the object is locked. Thus, if PGA_WRITEABLE
2193 * is clear, no page table entries need updating.
2195 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2196 if ((m->oflags & VPO_BUSY) == 0 &&
2197 (m->aflags & PGA_WRITEABLE) == 0)
2199 rw_wlock(&tte_list_global_lock);
2200 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2201 if ((tp->tte_data & TD_PV) == 0)
2203 data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
2204 if ((data & TD_W) != 0) {
2206 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
2209 vm_page_aflag_clear(m, PGA_WRITEABLE);
2210 rw_wunlock(&tte_list_global_lock);
2214 pmap_mincore(pmap_t pm, vm_offset_t addr, vm_paddr_t *locked_pa)
2222 * Activate a user pmap. The pmap must be activated before its address space
2223 * can be accessed in any way.
2226 pmap_activate(struct thread *td)
2233 vm = td->td_proc->p_vmspace;
2234 pm = vmspace_pmap(vm);
2236 context = PCPU_GET(tlb_ctx);
2237 if (context == PCPU_GET(tlb_ctx_max)) {
2239 context = PCPU_GET(tlb_ctx_min);
2241 PCPU_SET(tlb_ctx, context + 1);
2243 pm->pm_context[curcpu] = context;
2245 CPU_SET_ATOMIC(PCPU_GET(cpuid), &pm->pm_active);
2246 atomic_store_ptr((uintptr_t *)PCPU_PTR(pmap), (uintptr_t)pm);
2248 CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
2252 stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
2253 stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
2254 stxa(AA_DMMU_PCXR, ASI_DMMU, (ldxa(AA_DMMU_PCXR, ASI_DMMU) &
2255 TLB_CXR_PGSZ_MASK) | context);
2261 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2267 * Increase the starting virtual address of the given mapping if a
2268 * different alignment might result in more superpage mappings.
2271 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
2272 vm_offset_t *addr, vm_size_t size)