2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
48 * Manages physical address maps.
50 * In addition to hardware address maps, this module is called upon to
51 * provide software-use-only maps which may or may not be stored in the
52 * same form as hardware maps. These pseudo-maps are used to store
53 * intermediate results from copy operations to and from address spaces.
55 * Since the information managed by this module is also stored by the
56 * logical address mapping module, this module may throw away valid virtual
57 * to physical mappings at almost any time. However, invalidations of
58 * mappings must be done as requested.
60 * In order to cope with hardware architectures which make virtual to
61 * physical map invalidates expensive, this module may delay invalidate
62 * reduced protection operations until such time as they are actually
63 * necessary. This module is given full information as to which processors
64 * are currently using which maps, and to when physical maps must be made
68 #include "opt_kstack_pages.h"
69 #include "opt_msgbuf.h"
72 #include <sys/param.h>
73 #include <sys/kernel.h>
76 #include <sys/msgbuf.h>
77 #include <sys/mutex.h>
80 #include <sys/sysctl.h>
81 #include <sys/systm.h>
82 #include <sys/vmmeter.h>
84 #include <dev/ofw/openfirm.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_pageout.h>
94 #include <vm/vm_pager.h>
96 #include <machine/cache.h>
97 #include <machine/frame.h>
98 #include <machine/instr.h>
99 #include <machine/md_var.h>
100 #include <machine/metadata.h>
101 #include <machine/ofw_mem.h>
102 #include <machine/smp.h>
103 #include <machine/tlb.h>
104 #include <machine/tte.h>
105 #include <machine/tsb.h>
106 #include <machine/ver.h>
110 #ifndef PMAP_SHPGPERPROC
111 #define PMAP_SHPGPERPROC 200
115 #include "opt_sched.h"
117 #error "sparc64 only works with SCHED_4BSD which uses a global scheduler lock."
119 extern struct mtx sched_lock;
122 * Virtual address of message buffer
124 struct msgbuf *msgbufp;
127 * Map of physical memory reagions
129 vm_paddr_t phys_avail[128];
130 static struct ofw_mem_region mra[128];
131 struct ofw_mem_region sparc64_memreg[128];
133 static struct ofw_map translations[128];
134 static int translations_size;
136 static vm_offset_t pmap_idle_map;
137 static vm_offset_t pmap_temp_map_1;
138 static vm_offset_t pmap_temp_map_2;
141 * First and last available kernel virtual addresses
143 vm_offset_t virtual_avail;
144 vm_offset_t virtual_end;
145 vm_offset_t kernel_vm_end;
147 vm_offset_t vm_max_kernel_address;
152 struct pmap kernel_pmap_store;
155 * Allocate physical memory for use in pmap_bootstrap.
157 static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size, uint32_t colors);
160 * Map the given physical page at the specified virtual address in the
161 * target pmap with the protection requested. If specified the page
162 * will be wired down.
164 * The page queues and pmap must be locked.
166 static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
167 vm_prot_t prot, boolean_t wired);
169 extern int tl1_immu_miss_patch_1[];
170 extern int tl1_immu_miss_patch_2[];
171 extern int tl1_dmmu_miss_patch_1[];
172 extern int tl1_dmmu_miss_patch_2[];
173 extern int tl1_dmmu_prot_patch_1[];
174 extern int tl1_dmmu_prot_patch_2[];
177 * If user pmap is processed with pmap_remove and with pmap_remove and the
178 * resident count drops to 0, there are no more pages to remove, so we
181 #define PMAP_REMOVE_DONE(pm) \
182 ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
185 * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove()
186 * and pmap_protect() instead of trying each virtual address.
188 #define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE)
190 SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "");
192 PMAP_STATS_VAR(pmap_nenter);
193 PMAP_STATS_VAR(pmap_nenter_update);
194 PMAP_STATS_VAR(pmap_nenter_replace);
195 PMAP_STATS_VAR(pmap_nenter_new);
196 PMAP_STATS_VAR(pmap_nkenter);
197 PMAP_STATS_VAR(pmap_nkenter_oc);
198 PMAP_STATS_VAR(pmap_nkenter_stupid);
199 PMAP_STATS_VAR(pmap_nkremove);
200 PMAP_STATS_VAR(pmap_nqenter);
201 PMAP_STATS_VAR(pmap_nqremove);
202 PMAP_STATS_VAR(pmap_ncache_enter);
203 PMAP_STATS_VAR(pmap_ncache_enter_c);
204 PMAP_STATS_VAR(pmap_ncache_enter_oc);
205 PMAP_STATS_VAR(pmap_ncache_enter_cc);
206 PMAP_STATS_VAR(pmap_ncache_enter_coc);
207 PMAP_STATS_VAR(pmap_ncache_enter_nc);
208 PMAP_STATS_VAR(pmap_ncache_enter_cnc);
209 PMAP_STATS_VAR(pmap_ncache_remove);
210 PMAP_STATS_VAR(pmap_ncache_remove_c);
211 PMAP_STATS_VAR(pmap_ncache_remove_oc);
212 PMAP_STATS_VAR(pmap_ncache_remove_cc);
213 PMAP_STATS_VAR(pmap_ncache_remove_coc);
214 PMAP_STATS_VAR(pmap_ncache_remove_nc);
215 PMAP_STATS_VAR(pmap_nzero_page);
216 PMAP_STATS_VAR(pmap_nzero_page_c);
217 PMAP_STATS_VAR(pmap_nzero_page_oc);
218 PMAP_STATS_VAR(pmap_nzero_page_nc);
219 PMAP_STATS_VAR(pmap_nzero_page_area);
220 PMAP_STATS_VAR(pmap_nzero_page_area_c);
221 PMAP_STATS_VAR(pmap_nzero_page_area_oc);
222 PMAP_STATS_VAR(pmap_nzero_page_area_nc);
223 PMAP_STATS_VAR(pmap_nzero_page_idle);
224 PMAP_STATS_VAR(pmap_nzero_page_idle_c);
225 PMAP_STATS_VAR(pmap_nzero_page_idle_oc);
226 PMAP_STATS_VAR(pmap_nzero_page_idle_nc);
227 PMAP_STATS_VAR(pmap_ncopy_page);
228 PMAP_STATS_VAR(pmap_ncopy_page_c);
229 PMAP_STATS_VAR(pmap_ncopy_page_oc);
230 PMAP_STATS_VAR(pmap_ncopy_page_nc);
231 PMAP_STATS_VAR(pmap_ncopy_page_dc);
232 PMAP_STATS_VAR(pmap_ncopy_page_doc);
233 PMAP_STATS_VAR(pmap_ncopy_page_sc);
234 PMAP_STATS_VAR(pmap_ncopy_page_soc);
236 PMAP_STATS_VAR(pmap_nnew_thread);
237 PMAP_STATS_VAR(pmap_nnew_thread_oc);
239 static inline u_long dtlb_get_data(u_int slot);
242 * Quick sort callout for comparing memory regions
244 static int mr_cmp(const void *a, const void *b);
245 static int om_cmp(const void *a, const void *b);
248 mr_cmp(const void *a, const void *b)
250 const struct ofw_mem_region *mra;
251 const struct ofw_mem_region *mrb;
255 if (mra->mr_start < mrb->mr_start)
257 else if (mra->mr_start > mrb->mr_start)
264 om_cmp(const void *a, const void *b)
266 const struct ofw_map *oma;
267 const struct ofw_map *omb;
271 if (oma->om_start < omb->om_start)
273 else if (oma->om_start > omb->om_start)
280 dtlb_get_data(u_int slot)
284 * We read ASI_DTLB_DATA_ACCESS_REG twice in order to work
285 * around errata of USIII and beyond.
287 (void)ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG);
288 return (ldxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG));
292 * Bootstrap the system enough to run with virtual memory.
295 pmap_bootstrap(u_int cpu_impl)
307 u_int dtlb_slots_avail;
313 colors = dcache_color_ignore != 0 ? 1 : DCACHE_COLORS;
316 * Find out what physical memory is available from the PROM and
317 * initialize the phys_avail array. This must be done before
318 * pmap_bootstrap_alloc is called.
320 if ((pmem = OF_finddevice("/memory")) == -1)
321 panic("pmap_bootstrap: finddevice /memory");
322 if ((sz = OF_getproplen(pmem, "available")) == -1)
323 panic("pmap_bootstrap: getproplen /memory/available");
324 if (sizeof(phys_avail) < sz)
325 panic("pmap_bootstrap: phys_avail too small");
326 if (sizeof(mra) < sz)
327 panic("pmap_bootstrap: mra too small");
329 if (OF_getprop(pmem, "available", mra, sz) == -1)
330 panic("pmap_bootstrap: getprop /memory/available");
332 CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
333 qsort(mra, sz, sizeof (*mra), mr_cmp);
335 getenv_quad("hw.physmem", &physmem);
336 physmem = btoc(physmem);
337 for (i = 0, j = 0; i < sz; i++, j += 2) {
338 CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start,
340 if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) {
341 if (btoc(physsz) < physmem) {
342 phys_avail[j] = mra[i].mr_start;
343 phys_avail[j + 1] = mra[i].mr_start +
344 (ctob(physmem) - physsz);
345 physsz = ctob(physmem);
349 phys_avail[j] = mra[i].mr_start;
350 phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
351 physsz += mra[i].mr_size;
353 physmem = btoc(physsz);
356 * Calculate the size of kernel virtual memory, and the size and mask
357 * for the kernel TSB based on the phsyical memory size but limited
358 * by the amount of dTLB slots available for locked entries (given
359 * that for spitfire-class CPUs all of the dt64 slots can hold locked
360 * entries but there is no large dTLB for unlocked ones, we don't use
361 * more than half of it for locked entries).
363 dtlb_slots_avail = 0;
364 for (i = 0; i < dtlb_slots; i++) {
365 data = dtlb_get_data(i);
366 if ((data & (TD_V | TD_L)) != (TD_V | TD_L))
370 dtlb_slots_avail -= PCPU_PAGES;
372 if (cpu_impl >= CPU_IMPL_ULTRASPARCI &&
373 cpu_impl < CPU_IMPL_ULTRASPARCIII)
374 dtlb_slots_avail /= 2;
375 virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
377 (dtlb_slots_avail * PAGE_SIZE_4M) << (PAGE_SHIFT - TTE_SHIFT));
378 vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
379 tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
380 tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
383 * Allocate the kernel TSB and lock it in the TLB.
385 pa = pmap_bootstrap_alloc(tsb_kernel_size, colors);
386 if (pa & PAGE_MASK_4M)
387 panic("pmap_bootstrap: tsb unaligned\n");
388 tsb_kernel_phys = pa;
389 tsb_kernel = (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
391 bzero(tsb_kernel, tsb_kernel_size);
394 * Allocate and map the dynamic per-CPU area for the BSP.
396 pa = pmap_bootstrap_alloc(DPCPU_SIZE, colors);
397 dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pa);
400 * Allocate and map the message buffer.
402 pa = pmap_bootstrap_alloc(MSGBUF_SIZE, colors);
403 msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa);
406 * Patch the virtual address and the tsb mask into the trap table.
409 #define SETHI(rd, imm22) \
410 (EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) | \
411 EIF_IMM((imm22) >> 10, 22))
412 #define OR_R_I_R(rd, imm13, rs1) \
413 (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) | \
414 EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
416 #define PATCH(addr) do { \
417 if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \
418 addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, IF_F3_RS1(addr[1])) || \
419 addr[2] != SETHI(IF_F2_RD(addr[2]), 0x0)) \
420 panic("pmap_boostrap: patched instructions have changed"); \
421 addr[0] |= EIF_IMM((tsb_kernel_mask) >> 10, 22); \
422 addr[1] |= EIF_IMM(tsb_kernel_mask, 10); \
423 addr[2] |= EIF_IMM(((vm_offset_t)tsb_kernel) >> 10, 22); \
429 PATCH(tl1_immu_miss_patch_1);
430 PATCH(tl1_immu_miss_patch_2);
431 PATCH(tl1_dmmu_miss_patch_1);
432 PATCH(tl1_dmmu_miss_patch_2);
433 PATCH(tl1_dmmu_prot_patch_1);
434 PATCH(tl1_dmmu_prot_patch_2);
437 * Enter fake 8k pages for the 4MB kernel pages, so that
438 * pmap_kextract() will work for them.
440 for (i = 0; i < kernel_tlb_slots; i++) {
441 pa = kernel_tlbs[i].te_pa;
442 va = kernel_tlbs[i].te_va;
443 for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) {
444 tp = tsb_kvtotte(va + off);
445 tp->tte_vpn = TV_VPN(va + off, TS_8K);
446 tp->tte_data = TD_V | TD_8K | TD_PA(pa + off) |
447 TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W;
452 * Set the start and end of KVA. The kernel is loaded starting
453 * at the first available 4MB super page, so we advance to the
454 * end of the last one used for it.
456 virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M;
457 virtual_end = vm_max_kernel_address;
458 kernel_vm_end = vm_max_kernel_address;
461 * Allocate kva space for temporary mappings.
463 pmap_idle_map = virtual_avail;
464 virtual_avail += PAGE_SIZE * colors;
465 pmap_temp_map_1 = virtual_avail;
466 virtual_avail += PAGE_SIZE * colors;
467 pmap_temp_map_2 = virtual_avail;
468 virtual_avail += PAGE_SIZE * colors;
471 * Allocate a kernel stack with guard page for thread0 and map it
472 * into the kernel TSB. We must ensure that the virtual address is
473 * colored properly for corresponding CPUs, since we're allocating
474 * from phys_avail so the memory won't have an associated vm_page_t.
476 pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, colors);
478 virtual_avail += roundup(KSTACK_GUARD_PAGES, colors) * PAGE_SIZE;
479 kstack0 = virtual_avail;
480 virtual_avail += roundup(KSTACK_PAGES, colors) * PAGE_SIZE;
481 if (dcache_color_ignore == 0)
482 KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys),
483 ("pmap_bootstrap: kstack0 miscolored"));
484 for (i = 0; i < KSTACK_PAGES; i++) {
485 pa = kstack0_phys + i * PAGE_SIZE;
486 va = kstack0 + i * PAGE_SIZE;
487 tp = tsb_kvtotte(va);
488 tp->tte_vpn = TV_VPN(va, TS_8K);
489 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW |
490 TD_CP | TD_CV | TD_P | TD_W;
494 * Calculate the last available physical address.
496 for (i = 0; phys_avail[i + 2] != 0; i += 2)
498 Maxmem = sparc64_btop(phys_avail[i + 1]);
501 * Add the PROM mappings to the kernel TSB.
503 if ((vmem = OF_finddevice("/virtual-memory")) == -1)
504 panic("pmap_bootstrap: finddevice /virtual-memory");
505 if ((sz = OF_getproplen(vmem, "translations")) == -1)
506 panic("pmap_bootstrap: getproplen translations");
507 if (sizeof(translations) < sz)
508 panic("pmap_bootstrap: translations too small");
509 bzero(translations, sz);
510 if (OF_getprop(vmem, "translations", translations, sz) == -1)
511 panic("pmap_bootstrap: getprop /virtual-memory/translations");
512 sz /= sizeof(*translations);
513 translations_size = sz;
514 CTR0(KTR_PMAP, "pmap_bootstrap: translations");
515 qsort(translations, sz, sizeof (*translations), om_cmp);
516 for (i = 0; i < sz; i++) {
518 "translation: start=%#lx size=%#lx tte=%#lx",
519 translations[i].om_start, translations[i].om_size,
520 translations[i].om_tte);
521 if ((translations[i].om_tte & TD_V) == 0)
523 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
524 translations[i].om_start > VM_MAX_PROM_ADDRESS)
526 for (off = 0; off < translations[i].om_size;
528 va = translations[i].om_start + off;
529 tp = tsb_kvtotte(va);
530 tp->tte_vpn = TV_VPN(va, TS_8K);
532 ((translations[i].om_tte &
533 ~((TD_SOFT2_MASK << TD_SOFT2_SHIFT) |
534 (cpu_impl >= CPU_IMPL_ULTRASPARCI &&
535 cpu_impl < CPU_IMPL_ULTRASPARCIII ?
536 (TD_DIAG_SF_MASK << TD_DIAG_SF_SHIFT) :
537 (TD_RSVD_CH_MASK << TD_RSVD_CH_SHIFT)) |
538 (TD_SOFT_MASK << TD_SOFT_SHIFT))) | TD_EXEC) +
544 * Get the available physical memory ranges from /memory/reg. These
545 * are only used for kernel dumps, but it may not be wise to do PROM
546 * calls in that situation.
548 if ((sz = OF_getproplen(pmem, "reg")) == -1)
549 panic("pmap_bootstrap: getproplen /memory/reg");
550 if (sizeof(sparc64_memreg) < sz)
551 panic("pmap_bootstrap: sparc64_memreg too small");
552 if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
553 panic("pmap_bootstrap: getprop /memory/reg");
554 sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
557 * Initialize the kernel pmap (which is statically allocated).
558 * NOTE: PMAP_LOCK_INIT() is needed as part of the initialization
559 * but sparc64 start up is not ready to initialize mutexes yet.
560 * It is called in machdep.c.
563 for (i = 0; i < MAXCPU; i++)
564 pm->pm_context[i] = TLB_CTX_KERNEL;
568 * Flush all non-locked TLB entries possibly left over by the
571 tlb_flush_nonlocked();
586 * Map the 4MB TSB pages.
588 for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
589 va = (vm_offset_t)tsb_kernel + i;
590 pa = tsb_kernel_phys + i;
591 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
593 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) |
594 TLB_TAR_CTX(TLB_CTX_KERNEL));
595 stxa_sync(0, ASI_DTLB_DATA_IN_REG, data);
599 * Set the secondary context to be the kernel context (needed for
600 * FP block operations in the kernel).
602 stxa(AA_DMMU_SCXR, ASI_DMMU, (ldxa(AA_DMMU_SCXR, ASI_DMMU) &
603 TLB_CXR_PGSZ_MASK) | TLB_CTX_KERNEL);
610 * Allocate a physical page of memory directly from the phys_avail map.
611 * Can only be called from pmap_bootstrap before avail start and end are
615 pmap_bootstrap_alloc(vm_size_t size, uint32_t colors)
620 size = roundup(size, PAGE_SIZE * colors);
621 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
622 if (phys_avail[i + 1] - phys_avail[i] < size)
625 phys_avail[i] += size;
628 panic("pmap_bootstrap_alloc");
632 * Initialize a vm_page's machine-dependent fields.
635 pmap_page_init(vm_page_t m)
638 TAILQ_INIT(&m->md.tte_list);
639 m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
645 * Initialize the pmap module.
655 for (i = 0; i < translations_size; i++) {
656 addr = translations[i].om_start;
657 size = translations[i].om_size;
658 if ((translations[i].om_tte & TD_V) == 0)
660 if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS)
662 result = vm_map_find(kernel_map, NULL, 0, &addr, size,
663 VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
664 if (result != KERN_SUCCESS || addr != translations[i].om_start)
665 panic("pmap_init: vm_map_find");
670 * Extract the physical page address associated with the given
671 * map/virtual_address pair.
674 pmap_extract(pmap_t pm, vm_offset_t va)
679 if (pm == kernel_pmap)
680 return (pmap_kextract(va));
682 tp = tsb_tte_lookup(pm, va);
686 pa = TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp));
692 * Atomically extract and hold the physical page with the given
693 * pmap and virtual address pair if that mapping permits the given
697 pmap_extract_and_hold(pmap_t pm, vm_offset_t va, vm_prot_t prot)
703 vm_page_lock_queues();
704 if (pm == kernel_pmap) {
705 if (va >= VM_MIN_DIRECT_ADDRESS) {
707 m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va));
710 tp = tsb_kvtotte(va);
711 if ((tp->tte_data & TD_V) == 0)
716 tp = tsb_tte_lookup(pm, va);
718 if (tp != NULL && ((tp->tte_data & TD_SW) ||
719 (prot & VM_PROT_WRITE) == 0)) {
720 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
723 vm_page_unlock_queues();
724 if (pm != kernel_pmap)
730 * Extract the physical page address associated with the given kernel virtual
734 pmap_kextract(vm_offset_t va)
738 if (va >= VM_MIN_DIRECT_ADDRESS)
739 return (TLB_DIRECT_TO_PHYS(va));
740 tp = tsb_kvtotte(va);
741 if ((tp->tte_data & TD_V) == 0)
743 return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
747 pmap_cache_enter(vm_page_t m, vm_offset_t va)
752 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
753 KASSERT((m->flags & PG_FICTITIOUS) == 0,
754 ("pmap_cache_enter: fake page"));
755 PMAP_STATS_INC(pmap_ncache_enter);
757 if (dcache_color_ignore != 0)
761 * Find the color for this virtual address and note the added mapping.
763 color = DCACHE_COLOR(va);
764 m->md.colors[color]++;
767 * If all existing mappings have the same color, the mapping is
770 if (m->md.color == color) {
771 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0,
772 ("pmap_cache_enter: cacheable, mappings of other color"));
773 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
774 PMAP_STATS_INC(pmap_ncache_enter_c);
776 PMAP_STATS_INC(pmap_ncache_enter_oc);
781 * If there are no mappings of the other color, and the page still has
782 * the wrong color, this must be a new mapping. Change the color to
783 * match the new mapping, which is cacheable. We must flush the page
784 * from the cache now.
786 if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) {
787 KASSERT(m->md.colors[color] == 1,
788 ("pmap_cache_enter: changing color, not new mapping"));
789 dcache_page_inval(VM_PAGE_TO_PHYS(m));
791 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
792 PMAP_STATS_INC(pmap_ncache_enter_cc);
794 PMAP_STATS_INC(pmap_ncache_enter_coc);
799 * If the mapping is already non-cacheable, just return.
801 if (m->md.color == -1) {
802 PMAP_STATS_INC(pmap_ncache_enter_nc);
806 PMAP_STATS_INC(pmap_ncache_enter_cnc);
809 * Mark all mappings as uncacheable, flush any lines with the other
810 * color out of the dcache, and set the color to none (-1).
812 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
813 atomic_clear_long(&tp->tte_data, TD_CV);
814 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
816 dcache_page_inval(VM_PAGE_TO_PHYS(m));
822 pmap_cache_remove(vm_page_t m, vm_offset_t va)
827 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
828 CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
829 m->md.colors[DCACHE_COLOR(va)]);
830 KASSERT((m->flags & PG_FICTITIOUS) == 0,
831 ("pmap_cache_remove: fake page"));
832 PMAP_STATS_INC(pmap_ncache_remove);
834 if (dcache_color_ignore != 0)
837 KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0,
838 ("pmap_cache_remove: no mappings %d <= 0",
839 m->md.colors[DCACHE_COLOR(va)]));
842 * Find the color for this virtual address and note the removal of
845 color = DCACHE_COLOR(va);
846 m->md.colors[color]--;
849 * If the page is cacheable, just return and keep the same color, even
850 * if there are no longer any mappings.
852 if (m->md.color != -1) {
853 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
854 PMAP_STATS_INC(pmap_ncache_remove_c);
856 PMAP_STATS_INC(pmap_ncache_remove_oc);
860 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0,
861 ("pmap_cache_remove: uncacheable, no mappings of other color"));
864 * If the page is not cacheable (color is -1), and the number of
865 * mappings for this color is not zero, just return. There are
866 * mappings of the other color still, so remain non-cacheable.
868 if (m->md.colors[color] != 0) {
869 PMAP_STATS_INC(pmap_ncache_remove_nc);
874 * The number of mappings for this color is now zero. Recache the
875 * other colored mappings, and change the page color to the other
876 * color. There should be no lines in the data cache for this page,
877 * so flushing should not be needed.
879 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
880 atomic_set_long(&tp->tte_data, TD_CV);
881 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
883 m->md.color = DCACHE_OTHER_COLOR(color);
885 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
886 PMAP_STATS_INC(pmap_ncache_remove_cc);
888 PMAP_STATS_INC(pmap_ncache_remove_coc);
892 * Map a wired page into kernel virtual address space.
895 pmap_kenter(vm_offset_t va, vm_page_t m)
902 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
903 PMAP_STATS_INC(pmap_nkenter);
904 tp = tsb_kvtotte(va);
905 CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
906 va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data);
907 if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) {
909 "pmap_kenter: off color va=%#lx pa=%#lx o=%p ot=%d pi=%#lx",
910 va, VM_PAGE_TO_PHYS(m), m->object,
911 m->object ? m->object->type : -1,
913 PMAP_STATS_INC(pmap_nkenter_oc);
915 if ((tp->tte_data & TD_V) != 0) {
916 om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
917 ova = TTE_GET_VA(tp);
918 if (m == om && va == ova) {
919 PMAP_STATS_INC(pmap_nkenter_stupid);
922 TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
923 pmap_cache_remove(om, ova);
925 tlb_page_demap(kernel_pmap, ova);
927 data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP |
929 if (pmap_cache_enter(m, va) != 0)
931 tp->tte_vpn = TV_VPN(va, TS_8K);
933 TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
937 * Map a wired page into kernel virtual address space. This additionally
938 * takes a flag argument wich is or'ed to the TTE data. This is used by
939 * sparc64_bus_mem_map().
940 * NOTE: if the mapping is non-cacheable, it's the caller's responsibility
941 * to flush entries that might still be in the cache, if applicable.
944 pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags)
948 tp = tsb_kvtotte(va);
949 CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
950 va, pa, tp, tp->tte_data);
951 tp->tte_vpn = TV_VPN(va, TS_8K);
952 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
956 * Remove a wired page from kernel virtual address space.
959 pmap_kremove(vm_offset_t va)
964 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
965 PMAP_STATS_INC(pmap_nkremove);
966 tp = tsb_kvtotte(va);
967 CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
969 if ((tp->tte_data & TD_V) == 0)
971 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
972 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
973 pmap_cache_remove(m, va);
978 * Inverse of pmap_kenter_flags, used by bus_space_unmap().
981 pmap_kremove_flags(vm_offset_t va)
985 tp = tsb_kvtotte(va);
986 CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp,
992 * Map a range of physical addresses into kernel virtual address space.
994 * The value passed in *virt is a suggested virtual address for the mapping.
995 * Architectures which can support a direct-mapped physical to virtual region
996 * can return the appropriate address within that region, leaving '*virt'
1000 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1003 return (TLB_PHYS_TO_DIRECT(start));
1007 * Map a list of wired pages into kernel virtual address space. This is
1008 * intended for temporary mappings which do not need page modification or
1009 * references recorded. Existing mappings in the region are overwritten.
1012 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
1017 PMAP_STATS_INC(pmap_nqenter);
1019 if (!(locked = mtx_owned(&vm_page_queue_mtx)))
1020 vm_page_lock_queues();
1021 while (count-- > 0) {
1022 pmap_kenter(va, *m);
1027 vm_page_unlock_queues();
1028 tlb_range_demap(kernel_pmap, sva, va);
1032 * Remove page mappings from kernel virtual address space. Intended for
1033 * temporary mappings entered by pmap_qenter.
1036 pmap_qremove(vm_offset_t sva, int count)
1041 PMAP_STATS_INC(pmap_nqremove);
1043 if (!(locked = mtx_owned(&vm_page_queue_mtx)))
1044 vm_page_lock_queues();
1045 while (count-- > 0) {
1050 vm_page_unlock_queues();
1051 tlb_range_demap(kernel_pmap, sva, va);
1055 * Initialize the pmap associated with process 0.
1058 pmap_pinit0(pmap_t pm)
1063 for (i = 0; i < MAXCPU; i++)
1064 pm->pm_context[i] = TLB_CTX_KERNEL;
1067 pm->pm_tsb_obj = NULL;
1068 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1072 * Initialize a preallocated and zeroed pmap structure, such as one in a
1073 * vmspace structure.
1076 pmap_pinit(pmap_t pm)
1078 vm_page_t ma[TSB_PAGES];
1085 * Allocate KVA space for the TSB.
1087 if (pm->pm_tsb == NULL) {
1088 pm->pm_tsb = (struct tte *)kmem_alloc_nofault(kernel_map,
1090 if (pm->pm_tsb == NULL) {
1091 PMAP_LOCK_DESTROY(pm);
1097 * Allocate an object for it.
1099 if (pm->pm_tsb_obj == NULL)
1100 pm->pm_tsb_obj = vm_object_allocate(OBJT_DEFAULT, TSB_PAGES);
1102 mtx_lock_spin(&sched_lock);
1103 for (i = 0; i < MAXCPU; i++)
1104 pm->pm_context[i] = -1;
1106 mtx_unlock_spin(&sched_lock);
1108 VM_OBJECT_LOCK(pm->pm_tsb_obj);
1109 for (i = 0; i < TSB_PAGES; i++) {
1110 m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY |
1111 VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1112 m->valid = VM_PAGE_BITS_ALL;
1116 VM_OBJECT_UNLOCK(pm->pm_tsb_obj);
1117 pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
1119 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1124 * Release any resources held by the given physical map.
1125 * Called when a pmap initialized by pmap_pinit is being released.
1126 * Should only be called if the map contains no valid mappings.
1129 pmap_release(pmap_t pm)
1135 CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
1136 pm->pm_context[curcpu], pm->pm_tsb);
1137 KASSERT(pmap_resident_count(pm) == 0,
1138 ("pmap_release: resident pages %ld != 0",
1139 pmap_resident_count(pm)));
1142 * After the pmap was freed, it might be reallocated to a new process.
1143 * When switching, this might lead us to wrongly assume that we need
1144 * not switch contexts because old and new pmap pointer are equal.
1145 * Therefore, make sure that this pmap is not referenced by any PCPU
1146 * pointer any more. This could happen in two cases:
1147 * - A process that referenced the pmap is currently exiting on a CPU.
1148 * However, it is guaranteed to not switch in any more after setting
1149 * its state to PRS_ZOMBIE.
1150 * - A process that referenced this pmap ran on a CPU, but we switched
1151 * to a kernel thread, leaving the pmap pointer unchanged.
1153 mtx_lock_spin(&sched_lock);
1154 SLIST_FOREACH(pc, &cpuhead, pc_allcpu)
1155 if (pc->pc_pmap == pm)
1157 mtx_unlock_spin(&sched_lock);
1159 obj = pm->pm_tsb_obj;
1160 VM_OBJECT_LOCK(obj);
1161 KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
1162 while (!TAILQ_EMPTY(&obj->memq)) {
1163 m = TAILQ_FIRST(&obj->memq);
1164 vm_page_lock_queues();
1165 if (vm_page_sleep_if_busy(m, FALSE, "pmaprl"))
1167 KASSERT(m->hold_count == 0,
1168 ("pmap_release: freeing held tsb page"));
1171 atomic_subtract_int(&cnt.v_wire_count, 1);
1172 vm_page_free_zero(m);
1173 vm_page_unlock_queues();
1175 VM_OBJECT_UNLOCK(obj);
1176 pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
1177 PMAP_LOCK_DESTROY(pm);
1181 * Grow the number of kernel page table entries. Unneeded.
1184 pmap_growkernel(vm_offset_t addr)
1187 panic("pmap_growkernel: can't grow kernel");
1191 pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1197 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1198 data = atomic_readandclear_long(&tp->tte_data);
1199 if ((data & TD_FAKE) == 0) {
1200 m = PHYS_TO_VM_PAGE(TD_PA(data));
1201 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1202 if ((data & TD_WIRED) != 0)
1203 pm->pm_stats.wired_count--;
1204 if ((data & TD_PV) != 0) {
1205 if ((data & TD_W) != 0)
1207 if ((data & TD_REF) != 0)
1208 vm_page_flag_set(m, PG_REFERENCED);
1209 if (TAILQ_EMPTY(&m->md.tte_list))
1210 vm_page_flag_clear(m, PG_WRITEABLE);
1211 pm->pm_stats.resident_count--;
1213 pmap_cache_remove(m, va);
1216 if (PMAP_REMOVE_DONE(pm))
1222 * Remove the given range of addresses from the specified map.
1225 pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
1230 CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
1231 pm->pm_context[curcpu], start, end);
1232 if (PMAP_REMOVE_DONE(pm))
1234 vm_page_lock_queues();
1236 if (end - start > PMAP_TSB_THRESH) {
1237 tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
1238 tlb_context_demap(pm);
1240 for (va = start; va < end; va += PAGE_SIZE)
1241 if ((tp = tsb_tte_lookup(pm, va)) != NULL &&
1242 !pmap_remove_tte(pm, NULL, tp, va))
1244 tlb_range_demap(pm, start, end - 1);
1247 vm_page_unlock_queues();
1251 pmap_remove_all(vm_page_t m)
1258 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1259 for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
1260 tpn = TAILQ_NEXT(tp, tte_link);
1261 if ((tp->tte_data & TD_PV) == 0)
1263 pm = TTE_GET_PMAP(tp);
1264 va = TTE_GET_VA(tp);
1266 if ((tp->tte_data & TD_WIRED) != 0)
1267 pm->pm_stats.wired_count--;
1268 if ((tp->tte_data & TD_REF) != 0)
1269 vm_page_flag_set(m, PG_REFERENCED);
1270 if ((tp->tte_data & TD_W) != 0)
1272 tp->tte_data &= ~TD_V;
1273 tlb_page_demap(pm, va);
1274 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1275 pm->pm_stats.resident_count--;
1276 pmap_cache_remove(m, va);
1280 vm_page_flag_clear(m, PG_WRITEABLE);
1284 pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1290 data = atomic_clear_long(&tp->tte_data, TD_REF | TD_SW | TD_W);
1291 if ((data & TD_PV) != 0) {
1292 m = PHYS_TO_VM_PAGE(TD_PA(data));
1293 if ((data & TD_REF) != 0)
1294 vm_page_flag_set(m, PG_REFERENCED);
1295 if ((data & TD_W) != 0)
1302 * Set the physical protection on the specified range of this map as requested.
1305 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1310 CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
1311 pm->pm_context[curcpu], sva, eva, prot);
1313 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1314 pmap_remove(pm, sva, eva);
1318 if (prot & VM_PROT_WRITE)
1321 vm_page_lock_queues();
1323 if (eva - sva > PMAP_TSB_THRESH) {
1324 tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
1325 tlb_context_demap(pm);
1327 for (va = sva; va < eva; va += PAGE_SIZE)
1328 if ((tp = tsb_tte_lookup(pm, va)) != NULL)
1329 pmap_protect_tte(pm, NULL, tp, va);
1330 tlb_range_demap(pm, sva, eva - 1);
1333 vm_page_unlock_queues();
1337 * Map the given physical page at the specified virtual address in the
1338 * target pmap with the protection requested. If specified the page
1339 * will be wired down.
1342 pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m,
1343 vm_prot_t prot, boolean_t wired)
1346 vm_page_lock_queues();
1348 pmap_enter_locked(pm, va, m, prot, wired);
1349 vm_page_unlock_queues();
1354 * Map the given physical page at the specified virtual address in the
1355 * target pmap with the protection requested. If specified the page
1356 * will be wired down.
1358 * The page queues and pmap must be locked.
1361 pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1369 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1370 PMAP_LOCK_ASSERT(pm, MA_OWNED);
1371 PMAP_STATS_INC(pmap_nenter);
1372 pa = VM_PAGE_TO_PHYS(m);
1375 * If this is a fake page from the device_pager, but it covers actual
1376 * physical memory, convert to the real backing page.
1378 if ((m->flags & PG_FICTITIOUS) != 0) {
1379 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1380 if (pa >= phys_avail[i] && pa <= phys_avail[i + 1]) {
1381 m = PHYS_TO_VM_PAGE(pa);
1388 "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
1389 pm->pm_context[curcpu], m, va, pa, prot, wired);
1392 * If there is an existing mapping, and the physical address has not
1393 * changed, must be protection or wiring change.
1395 if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
1396 CTR0(KTR_PMAP, "pmap_enter_locked: update");
1397 PMAP_STATS_INC(pmap_nenter_update);
1400 * Wiring change, just update stats.
1403 if ((tp->tte_data & TD_WIRED) == 0) {
1404 tp->tte_data |= TD_WIRED;
1405 pm->pm_stats.wired_count++;
1408 if ((tp->tte_data & TD_WIRED) != 0) {
1409 tp->tte_data &= ~TD_WIRED;
1410 pm->pm_stats.wired_count--;
1415 * Save the old bits and clear the ones we're interested in.
1417 data = tp->tte_data;
1418 tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W);
1421 * If we're turning off write permissions, sense modify status.
1423 if ((prot & VM_PROT_WRITE) != 0) {
1424 tp->tte_data |= TD_SW;
1426 tp->tte_data |= TD_W;
1427 vm_page_flag_set(m, PG_WRITEABLE);
1428 } else if ((data & TD_W) != 0)
1432 * If we're turning on execute permissions, flush the icache.
1434 if ((prot & VM_PROT_EXECUTE) != 0) {
1435 if ((data & TD_EXEC) == 0)
1436 icache_page_inval(pa);
1437 tp->tte_data |= TD_EXEC;
1441 * Delete the old mapping.
1443 tlb_page_demap(pm, TTE_GET_VA(tp));
1446 * If there is an existing mapping, but its for a different
1447 * phsyical address, delete the old mapping.
1450 CTR0(KTR_PMAP, "pmap_enter_locked: replace");
1451 PMAP_STATS_INC(pmap_nenter_replace);
1452 pmap_remove_tte(pm, NULL, tp, va);
1453 tlb_page_demap(pm, va);
1455 CTR0(KTR_PMAP, "pmap_enter_locked: new");
1456 PMAP_STATS_INC(pmap_nenter_new);
1460 * Now set up the data and install the new mapping.
1462 data = TD_V | TD_8K | TD_PA(pa);
1463 if (pm == kernel_pmap)
1465 if ((prot & VM_PROT_WRITE) != 0) {
1467 vm_page_flag_set(m, PG_WRITEABLE);
1469 if (prot & VM_PROT_EXECUTE) {
1471 icache_page_inval(pa);
1475 * If its wired update stats. We also don't need reference or
1476 * modify tracking for wired mappings, so set the bits now.
1479 pm->pm_stats.wired_count++;
1480 data |= TD_REF | TD_WIRED;
1481 if ((prot & VM_PROT_WRITE) != 0)
1485 tsb_tte_enter(pm, m, va, TS_8K, data);
1490 * Maps a sequence of resident pages belonging to the same object.
1491 * The sequence begins with the given page m_start. This page is
1492 * mapped at the given virtual address start. Each subsequent page is
1493 * mapped at a virtual address that is offset from start by the same
1494 * amount as the page is offset from m_start within the object. The
1495 * last page in the sequence is the page with the largest offset from
1496 * m_start that can be mapped at a virtual address less than the given
1497 * virtual address end. Not every virtual page between start and end
1498 * is mapped; only those for which a resident page exists with the
1499 * corresponding offset from m_start are mapped.
1502 pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
1503 vm_page_t m_start, vm_prot_t prot)
1506 vm_pindex_t diff, psize;
1508 psize = atop(end - start);
1511 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1512 pmap_enter_locked(pm, start + ptoa(diff), m, prot &
1513 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1514 m = TAILQ_NEXT(m, listq);
1520 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1524 pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1530 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1531 vm_pindex_t pindex, vm_size_t size)
1534 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1535 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
1536 ("pmap_object_init_pt: non-device object"));
1540 * Change the wiring attribute for a map/virtual-address pair.
1541 * The mapping must already exist in the pmap.
1544 pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
1550 if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
1552 data = atomic_set_long(&tp->tte_data, TD_WIRED);
1553 if ((data & TD_WIRED) == 0)
1554 pm->pm_stats.wired_count++;
1556 data = atomic_clear_long(&tp->tte_data, TD_WIRED);
1557 if ((data & TD_WIRED) != 0)
1558 pm->pm_stats.wired_count--;
1565 pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp,
1571 if ((tp->tte_data & TD_FAKE) != 0)
1573 if (tsb_tte_lookup(dst_pmap, va) == NULL) {
1574 data = tp->tte_data &
1575 ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
1576 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1577 tsb_tte_enter(dst_pmap, m, va, TS_8K, data);
1583 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1584 vm_size_t len, vm_offset_t src_addr)
1589 if (dst_addr != src_addr)
1591 vm_page_lock_queues();
1592 if (dst_pmap < src_pmap) {
1593 PMAP_LOCK(dst_pmap);
1594 PMAP_LOCK(src_pmap);
1596 PMAP_LOCK(src_pmap);
1597 PMAP_LOCK(dst_pmap);
1599 if (len > PMAP_TSB_THRESH) {
1600 tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len,
1602 tlb_context_demap(dst_pmap);
1604 for (va = src_addr; va < src_addr + len; va += PAGE_SIZE)
1605 if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL)
1606 pmap_copy_tte(src_pmap, dst_pmap, tp, va);
1607 tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
1609 vm_page_unlock_queues();
1610 PMAP_UNLOCK(src_pmap);
1611 PMAP_UNLOCK(dst_pmap);
1615 pmap_zero_page(vm_page_t m)
1621 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1622 ("pmap_zero_page: fake page"));
1623 PMAP_STATS_INC(pmap_nzero_page);
1624 pa = VM_PAGE_TO_PHYS(m);
1625 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1626 PMAP_STATS_INC(pmap_nzero_page_c);
1627 va = TLB_PHYS_TO_DIRECT(pa);
1628 cpu_block_zero((void *)va, PAGE_SIZE);
1629 } else if (m->md.color == -1) {
1630 PMAP_STATS_INC(pmap_nzero_page_nc);
1631 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1633 PMAP_STATS_INC(pmap_nzero_page_oc);
1634 PMAP_LOCK(kernel_pmap);
1635 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1636 tp = tsb_kvtotte(va);
1637 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1638 tp->tte_vpn = TV_VPN(va, TS_8K);
1639 cpu_block_zero((void *)va, PAGE_SIZE);
1640 tlb_page_demap(kernel_pmap, va);
1641 PMAP_UNLOCK(kernel_pmap);
1646 pmap_zero_page_area(vm_page_t m, int off, int size)
1652 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1653 ("pmap_zero_page_area: fake page"));
1654 KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
1655 PMAP_STATS_INC(pmap_nzero_page_area);
1656 pa = VM_PAGE_TO_PHYS(m);
1657 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1658 PMAP_STATS_INC(pmap_nzero_page_area_c);
1659 va = TLB_PHYS_TO_DIRECT(pa);
1660 bzero((void *)(va + off), size);
1661 } else if (m->md.color == -1) {
1662 PMAP_STATS_INC(pmap_nzero_page_area_nc);
1663 aszero(ASI_PHYS_USE_EC, pa + off, size);
1665 PMAP_STATS_INC(pmap_nzero_page_area_oc);
1666 PMAP_LOCK(kernel_pmap);
1667 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1668 tp = tsb_kvtotte(va);
1669 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1670 tp->tte_vpn = TV_VPN(va, TS_8K);
1671 bzero((void *)(va + off), size);
1672 tlb_page_demap(kernel_pmap, va);
1673 PMAP_UNLOCK(kernel_pmap);
1678 pmap_zero_page_idle(vm_page_t m)
1684 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1685 ("pmap_zero_page_idle: fake page"));
1686 PMAP_STATS_INC(pmap_nzero_page_idle);
1687 pa = VM_PAGE_TO_PHYS(m);
1688 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1689 PMAP_STATS_INC(pmap_nzero_page_idle_c);
1690 va = TLB_PHYS_TO_DIRECT(pa);
1691 cpu_block_zero((void *)va, PAGE_SIZE);
1692 } else if (m->md.color == -1) {
1693 PMAP_STATS_INC(pmap_nzero_page_idle_nc);
1694 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1696 PMAP_STATS_INC(pmap_nzero_page_idle_oc);
1697 va = pmap_idle_map + (m->md.color * PAGE_SIZE);
1698 tp = tsb_kvtotte(va);
1699 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1700 tp->tte_vpn = TV_VPN(va, TS_8K);
1701 cpu_block_zero((void *)va, PAGE_SIZE);
1702 tlb_page_demap(kernel_pmap, va);
1707 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
1715 KASSERT((mdst->flags & PG_FICTITIOUS) == 0,
1716 ("pmap_copy_page: fake dst page"));
1717 KASSERT((msrc->flags & PG_FICTITIOUS) == 0,
1718 ("pmap_copy_page: fake src page"));
1719 PMAP_STATS_INC(pmap_ncopy_page);
1720 pdst = VM_PAGE_TO_PHYS(mdst);
1721 psrc = VM_PAGE_TO_PHYS(msrc);
1722 if (dcache_color_ignore != 0 ||
1723 (msrc->md.color == DCACHE_COLOR(psrc) &&
1724 mdst->md.color == DCACHE_COLOR(pdst))) {
1725 PMAP_STATS_INC(pmap_ncopy_page_c);
1726 vdst = TLB_PHYS_TO_DIRECT(pdst);
1727 vsrc = TLB_PHYS_TO_DIRECT(psrc);
1728 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1729 } else if (msrc->md.color == -1 && mdst->md.color == -1) {
1730 PMAP_STATS_INC(pmap_ncopy_page_nc);
1731 ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE);
1732 } else if (msrc->md.color == -1) {
1733 if (mdst->md.color == DCACHE_COLOR(pdst)) {
1734 PMAP_STATS_INC(pmap_ncopy_page_dc);
1735 vdst = TLB_PHYS_TO_DIRECT(pdst);
1736 ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1739 PMAP_STATS_INC(pmap_ncopy_page_doc);
1740 PMAP_LOCK(kernel_pmap);
1741 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1742 tp = tsb_kvtotte(vdst);
1744 TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1745 tp->tte_vpn = TV_VPN(vdst, TS_8K);
1746 ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1748 tlb_page_demap(kernel_pmap, vdst);
1749 PMAP_UNLOCK(kernel_pmap);
1751 } else if (mdst->md.color == -1) {
1752 if (msrc->md.color == DCACHE_COLOR(psrc)) {
1753 PMAP_STATS_INC(pmap_ncopy_page_sc);
1754 vsrc = TLB_PHYS_TO_DIRECT(psrc);
1755 ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1758 PMAP_STATS_INC(pmap_ncopy_page_soc);
1759 PMAP_LOCK(kernel_pmap);
1760 vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE);
1761 tp = tsb_kvtotte(vsrc);
1763 TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1764 tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1765 ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1767 tlb_page_demap(kernel_pmap, vsrc);
1768 PMAP_UNLOCK(kernel_pmap);
1771 PMAP_STATS_INC(pmap_ncopy_page_oc);
1772 PMAP_LOCK(kernel_pmap);
1773 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1774 tp = tsb_kvtotte(vdst);
1776 TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1777 tp->tte_vpn = TV_VPN(vdst, TS_8K);
1778 vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE);
1779 tp = tsb_kvtotte(vsrc);
1781 TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1782 tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1783 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1784 tlb_page_demap(kernel_pmap, vdst);
1785 tlb_page_demap(kernel_pmap, vsrc);
1786 PMAP_UNLOCK(kernel_pmap);
1791 * Returns true if the pmap's pv is one of the first
1792 * 16 pvs linked to from this page. This count may
1793 * be changed upwards or downwards in the future; it
1794 * is only necessary that true be returned for a small
1795 * subset of pmaps for proper page aging.
1798 pmap_page_exists_quick(pmap_t pm, vm_page_t m)
1803 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1804 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1807 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1808 if ((tp->tte_data & TD_PV) == 0)
1810 if (TTE_GET_PMAP(tp) == pm)
1819 * Return the number of managed mappings to the given physical page
1823 pmap_page_wired_mappings(vm_page_t m)
1829 if ((m->flags & PG_FICTITIOUS) != 0)
1831 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1832 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
1833 if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
1839 * Remove all pages from specified address space, this aids process exit
1840 * speeds. This is much faster than pmap_remove n the case of running down
1841 * an entire address space. Only works for the current pmap.
1844 pmap_remove_pages(pmap_t pm)
1850 * Returns TRUE if the given page has a managed mapping.
1853 pmap_page_is_mapped(vm_page_t m)
1857 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1859 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1860 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
1861 if ((tp->tte_data & TD_PV) != 0)
1867 * Return a count of reference bits for a page, clearing those bits.
1868 * It is not necessary for every reference bit to be cleared, but it
1869 * is necessary that 0 only be returned when there are truly no
1870 * reference bits set.
1872 * XXX: The exact number of bits to check and clear is a matter that
1873 * should be tested and standardized at some point in the future for
1874 * optimal aging of shared pages.
1877 pmap_ts_referenced(vm_page_t m)
1885 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1886 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1889 if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
1892 tpn = TAILQ_NEXT(tp, tte_link);
1893 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1894 TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
1895 if ((tp->tte_data & TD_PV) == 0)
1897 data = atomic_clear_long(&tp->tte_data, TD_REF);
1898 if ((data & TD_REF) != 0 && ++count > 4)
1900 } while ((tp = tpn) != NULL && tp != tpf);
1906 pmap_is_modified(vm_page_t m)
1910 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1911 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1913 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1914 if ((tp->tte_data & TD_PV) == 0)
1916 if ((tp->tte_data & TD_W) != 0)
1923 * pmap_is_prefaultable:
1925 * Return whether or not the specified virtual address is elgible
1929 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
1934 rv = tsb_tte_lookup(pmap, addr) == NULL;
1940 pmap_clear_modify(vm_page_t m)
1945 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1946 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1948 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1949 if ((tp->tte_data & TD_PV) == 0)
1951 data = atomic_clear_long(&tp->tte_data, TD_W);
1952 if ((data & TD_W) != 0)
1953 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1958 pmap_clear_reference(vm_page_t m)
1963 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1964 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1966 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1967 if ((tp->tte_data & TD_PV) == 0)
1969 data = atomic_clear_long(&tp->tte_data, TD_REF);
1970 if ((data & TD_REF) != 0)
1971 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1976 pmap_remove_write(vm_page_t m)
1981 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1982 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1983 (m->flags & PG_WRITEABLE) == 0)
1985 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1986 if ((tp->tte_data & TD_PV) == 0)
1988 data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
1989 if ((data & TD_W) != 0) {
1991 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1994 vm_page_flag_clear(m, PG_WRITEABLE);
1998 pmap_mincore(pmap_t pm, vm_offset_t addr)
2006 * Activate a user pmap. The pmap must be activated before its address space
2007 * can be accessed in any way.
2010 pmap_activate(struct thread *td)
2016 vm = td->td_proc->p_vmspace;
2017 pm = vmspace_pmap(vm);
2019 mtx_lock_spin(&sched_lock);
2021 context = PCPU_GET(tlb_ctx);
2022 if (context == PCPU_GET(tlb_ctx_max)) {
2024 context = PCPU_GET(tlb_ctx_min);
2026 PCPU_SET(tlb_ctx, context + 1);
2028 pm->pm_context[curcpu] = context;
2029 pm->pm_active |= PCPU_GET(cpumask);
2032 stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
2033 stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
2034 stxa(AA_DMMU_PCXR, ASI_DMMU, (ldxa(AA_DMMU_PCXR, ASI_DMMU) &
2035 TLB_CXR_PGSZ_MASK) | context);
2038 mtx_unlock_spin(&sched_lock);
2042 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2048 * Increase the starting virtual address of the given mapping if a
2049 * different alignment might result in more superpage mappings.
2052 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
2053 vm_offset_t *addr, vm_size_t size)