2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
38 * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
39 * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
43 * Manages physical address maps.
45 * In addition to hardware address maps, this
46 * module is called upon to provide software-use-only
47 * maps which may or may not be stored in the same
48 * form as hardware maps. These pseudo-maps are
49 * used to store intermediate results from copy
50 * operations to and from address spaces.
52 * Since the information managed by this module is
53 * also stored by the logical address mapping module,
54 * this module may throw away valid virtual-to-physical
55 * mappings at almost any time. However, invalidations
56 * of virtual-to-physical mappings must be done as
59 * In order to cope with hardware architectures which
60 * make virtual-to-physical map invalidates expensive,
61 * this module may delay invalidate or reduced protection
62 * operations until such time as they are actually
63 * necessary. This module is given full information as
64 * to which processors are currently using which maps,
65 * and to when physical maps must be made correct.
68 #include <sys/cdefs.h>
69 __FBSDID("$FreeBSD$");
73 #include <sys/param.h>
74 #include <sys/systm.h>
76 #include <sys/msgbuf.h>
77 #include <sys/vmmeter.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_phys.h>
88 #include <sys/mutex.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_pager.h>
98 #include <sys/sched.h>
103 #include <machine/cache.h>
104 #include <machine/md_var.h>
105 #include <machine/tlb.h>
109 #ifndef PMAP_SHPGPERPROC
110 #define PMAP_SHPGPERPROC 200
113 #if !defined(DIAGNOSTIC)
114 #define PMAP_INLINE __inline
120 * Get PDEs and PTEs for user/kernel address space
122 #define pmap_seg_index(v) (((v) >> SEGSHIFT) & (NPDEPG - 1))
123 #define pmap_pde_index(v) (((v) >> PDRSHIFT) & (NPDEPG - 1))
124 #define pmap_pte_index(v) (((v) >> PAGE_SHIFT) & (NPTEPG - 1))
125 #define pmap_pde_pindex(v) ((v) >> PDRSHIFT)
128 #define NUPDE (NPDEPG * NPDEPG)
129 #define NUSERPGTBLS (NUPDE + NPDEPG)
131 #define NUPDE (NPDEPG)
132 #define NUSERPGTBLS (NUPDE)
135 #define is_kernel_pmap(x) ((x) == kernel_pmap)
137 struct pmap kernel_pmap_store;
138 pd_entry_t *kernel_segmap;
140 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
141 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
144 unsigned pmap_max_asid; /* max ASID supported by the system */
146 #define PMAP_ASID_RESERVED 0
148 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
150 static void pmap_asid_alloc(pmap_t pmap);
153 * Data for the pv entry allocation mechanism
155 static uma_zone_t pvzone;
156 static struct vm_object pvzone_obj;
157 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
159 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
160 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
161 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
162 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
164 static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
165 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
166 vm_page_t m, vm_prot_t prot, vm_page_t mpte);
167 static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
168 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
169 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
170 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
171 vm_offset_t va, vm_page_t m);
172 static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
173 static void pmap_invalidate_all(pmap_t pmap);
174 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
175 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
177 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
178 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
179 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
180 static pt_entry_t init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
183 static void pmap_invalidate_page_action(void *arg);
184 static void pmap_invalidate_all_action(void *arg);
185 static void pmap_update_page_action(void *arg);
190 * This structure is for high memory (memory above 512Meg in 32 bit) support.
191 * The highmem area does not have a KSEG0 mapping, and we need a mechanism to
192 * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
194 * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
195 * access a highmem physical address on a CPU, we map the physical address to
196 * the reserved virtual address for the CPU in the kernel pagetable. This is
197 * done with interrupts disabled(although a spinlock and sched_pin would be
200 struct local_sysmaps {
203 uint16_t valid1, valid2;
205 static struct local_sysmaps sysmap_lmem[MAXCPU];
208 pmap_alloc_lmem_map(void)
212 for (i = 0; i < MAXCPU; i++) {
213 sysmap_lmem[i].base = virtual_avail;
214 virtual_avail += PAGE_SIZE * 2;
215 sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
219 static __inline vm_offset_t
220 pmap_lmem_map1(vm_paddr_t phys)
222 struct local_sysmaps *sysm;
223 pt_entry_t *pte, npte;
228 intr = intr_disable();
229 cpu = PCPU_GET(cpuid);
230 sysm = &sysmap_lmem[cpu];
231 sysm->saved_intr = intr;
233 npte = TLBLO_PA_TO_PFN(phys) |
234 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
235 pte = pmap_pte(kernel_pmap, va);
241 static __inline vm_offset_t
242 pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
244 struct local_sysmaps *sysm;
245 pt_entry_t *pte, npte;
246 vm_offset_t va1, va2;
250 intr = intr_disable();
251 cpu = PCPU_GET(cpuid);
252 sysm = &sysmap_lmem[cpu];
253 sysm->saved_intr = intr;
255 va2 = sysm->base + PAGE_SIZE;
256 npte = TLBLO_PA_TO_PFN(phys1) |
257 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
258 pte = pmap_pte(kernel_pmap, va1);
260 npte = TLBLO_PA_TO_PFN(phys2) |
261 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
262 pte = pmap_pte(kernel_pmap, va2);
270 pmap_lmem_unmap(void)
272 struct local_sysmaps *sysm;
276 cpu = PCPU_GET(cpuid);
277 sysm = &sysmap_lmem[cpu];
278 pte = pmap_pte(kernel_pmap, sysm->base);
280 tlb_invalidate_address(kernel_pmap, sysm->base);
283 pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
285 tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
288 intr_restore(sysm->saved_intr);
290 #else /* __mips_n64 */
293 pmap_alloc_lmem_map(void)
297 static __inline vm_offset_t
298 pmap_lmem_map1(vm_paddr_t phys)
304 static __inline vm_offset_t
305 pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
311 static __inline vm_offset_t
312 pmap_lmem_unmap(void)
317 #endif /* !__mips_n64 */
320 * Page table entry lookup routines.
322 static __inline pd_entry_t *
323 pmap_segmap(pmap_t pmap, vm_offset_t va)
326 return (&pmap->pm_segtab[pmap_seg_index(va)]);
330 static __inline pd_entry_t *
331 pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
335 pde = (pd_entry_t *)*pdpe;
336 return (&pde[pmap_pde_index(va)]);
339 static __inline pd_entry_t *
340 pmap_pde(pmap_t pmap, vm_offset_t va)
344 pdpe = pmap_segmap(pmap, va);
345 if (pdpe == NULL || *pdpe == NULL)
348 return (pmap_pdpe_to_pde(pdpe, va));
351 static __inline pd_entry_t *
352 pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
359 pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
362 return (pmap_segmap(pmap, va));
366 static __inline pt_entry_t *
367 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
371 pte = (pt_entry_t *)*pde;
372 return (&pte[pmap_pte_index(va)]);
376 pmap_pte(pmap_t pmap, vm_offset_t va)
380 pde = pmap_pde(pmap, va);
381 if (pde == NULL || *pde == NULL)
384 return (pmap_pde_to_pte(pde, va));
388 pmap_steal_memory(vm_size_t size)
390 vm_paddr_t bank_size, pa;
393 size = round_page(size);
394 bank_size = phys_avail[1] - phys_avail[0];
395 while (size > bank_size) {
398 for (i = 0; phys_avail[i + 2]; i += 2) {
399 phys_avail[i] = phys_avail[i + 2];
400 phys_avail[i + 1] = phys_avail[i + 3];
403 phys_avail[i + 1] = 0;
405 panic("pmap_steal_memory: out of memory");
406 bank_size = phys_avail[1] - phys_avail[0];
410 phys_avail[0] += size;
411 if (MIPS_DIRECT_MAPPABLE(pa) == 0)
412 panic("Out of memory below 512Meg?");
413 va = MIPS_PHYS_TO_DIRECT(pa);
414 bzero((caddr_t)va, size);
419 * Bootstrap the system enough to run with virtual memory. This
420 * assumes that the phys_avail array has been initialized.
423 pmap_create_kernel_pagetable(void)
435 * Allocate segment table for the kernel
437 kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
440 * Allocate second level page tables for the kernel
443 npde = howmany(NKPT, NPDEPG);
444 pdaddr = pmap_steal_memory(PAGE_SIZE * npde);
447 ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt);
450 * The R[4-7]?00 stores only one copy of the Global bit in the
451 * translation lookaside buffer for each 2 page entry. Thus invalid
452 * entrys must have the Global bit set so when Entry LO and Entry HI
453 * G bits are anded together they will produce a global bit to store
456 for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++)
460 for (i = 0, npt = nkpt; npt > 0; i++) {
461 kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE);
462 pde = (pd_entry_t *)kernel_segmap[i];
464 for (j = 0; j < NPDEPG && npt > 0; j++, npt--)
465 pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE);
468 for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++)
469 kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE));
472 PMAP_LOCK_INIT(kernel_pmap);
473 kernel_pmap->pm_segtab = kernel_segmap;
474 CPU_FILL(&kernel_pmap->pm_active);
475 TAILQ_INIT(&kernel_pmap->pm_pvlist);
476 kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
477 kernel_pmap->pm_asid[0].gen = 0;
478 kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE;
485 int need_local_mappings = 0;
489 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
491 * Keep the memory aligned on page boundary.
493 phys_avail[i] = round_page(phys_avail[i]);
494 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
498 if (phys_avail[i - 2] > phys_avail[i]) {
501 ptemp[0] = phys_avail[i + 0];
502 ptemp[1] = phys_avail[i + 1];
504 phys_avail[i + 0] = phys_avail[i - 2];
505 phys_avail[i + 1] = phys_avail[i - 1];
507 phys_avail[i - 2] = ptemp[0];
508 phys_avail[i - 1] = ptemp[1];
514 * In 32 bit, we may have memory which cannot be mapped directly.
515 * This memory will need temporary mapping before it can be
518 if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1))
519 need_local_mappings = 1;
522 * Copy the phys_avail[] array before we start stealing memory from it.
524 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
525 physmem_desc[i] = phys_avail[i];
526 physmem_desc[i + 1] = phys_avail[i + 1];
529 Maxmem = atop(phys_avail[i - 1]);
532 printf("Physical memory chunk(s):\n");
533 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
536 size = phys_avail[i + 1] - phys_avail[i];
537 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
538 (uintmax_t) phys_avail[i],
539 (uintmax_t) phys_avail[i + 1] - 1,
540 (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
542 printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem));
545 * Steal the message buffer from the beginning of memory.
547 msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
548 msgbufinit(msgbufp, msgbufsize);
551 * Steal thread0 kstack.
553 kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
555 virtual_avail = VM_MIN_KERNEL_ADDRESS;
556 virtual_end = VM_MAX_KERNEL_ADDRESS;
560 * Steal some virtual address space to map the pcpu area.
562 virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
563 pcpup = (struct pcpu *)virtual_avail;
564 virtual_avail += PAGE_SIZE * 2;
567 * Initialize the wired TLB entry mapping the pcpu region for
568 * the BSP at 'pcpup'. Up until this point we were operating
569 * with the 'pcpup' for the BSP pointing to a virtual address
570 * in KSEG0 so there was no need for a TLB mapping.
572 mips_pcpu_tlb_init(PCPU_ADDR(0));
575 printf("pcpu is available at virtual address %p.\n", pcpup);
578 if (need_local_mappings)
579 pmap_alloc_lmem_map();
580 pmap_create_kernel_pagetable();
581 pmap_max_asid = VMNUM_PIDS;
587 * Initialize a vm_page's machine-dependent fields.
590 pmap_page_init(vm_page_t m)
593 TAILQ_INIT(&m->md.pv_list);
594 m->md.pv_list_count = 0;
599 * Initialize the pmap module.
600 * Called by vm_init, to initialize any structures that the pmap
601 * system needs to map virtual memory.
602 * pmap_init has been enhanced to support in a fairly consistant
603 * way, discontiguous physical memory.
610 * Initialize the address space (zone) for the pv entries. Set a
611 * high water mark so that the system can recover from excessive
612 * numbers of pv entries.
614 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
615 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
616 pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count;
617 pv_entry_high_water = 9 * (pv_entry_max / 10);
618 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
621 /***************************************************
622 * Low level helper routines.....
623 ***************************************************/
626 pmap_invalidate_all_local(pmap_t pmap)
630 cpuid = PCPU_GET(cpuid);
632 if (pmap == kernel_pmap) {
633 tlb_invalidate_all();
636 if (CPU_ISSET(cpuid, &pmap->pm_active))
637 tlb_invalidate_all_user(pmap);
639 pmap->pm_asid[cpuid].gen = 0;
644 pmap_invalidate_all(pmap_t pmap)
647 smp_rendezvous(0, pmap_invalidate_all_action, 0, pmap);
651 pmap_invalidate_all_action(void *arg)
654 pmap_invalidate_all_local((pmap_t)arg);
658 pmap_invalidate_all(pmap_t pmap)
661 pmap_invalidate_all_local(pmap);
666 pmap_invalidate_page_local(pmap_t pmap, vm_offset_t va)
670 cpuid = PCPU_GET(cpuid);
672 if (is_kernel_pmap(pmap)) {
673 tlb_invalidate_address(pmap, va);
676 if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
678 else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
679 pmap->pm_asid[cpuid].gen = 0;
682 tlb_invalidate_address(pmap, va);
686 struct pmap_invalidate_page_arg {
692 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
694 struct pmap_invalidate_page_arg arg;
698 smp_rendezvous(0, pmap_invalidate_page_action, 0, &arg);
702 pmap_invalidate_page_action(void *arg)
704 struct pmap_invalidate_page_arg *p = arg;
706 pmap_invalidate_page_local(p->pmap, p->va);
710 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
713 pmap_invalidate_page_local(pmap, va);
718 pmap_update_page_local(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
722 cpuid = PCPU_GET(cpuid);
724 if (is_kernel_pmap(pmap)) {
725 tlb_update(pmap, va, pte);
728 if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
730 else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
731 pmap->pm_asid[cpuid].gen = 0;
734 tlb_update(pmap, va, pte);
738 struct pmap_update_page_arg {
745 pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
747 struct pmap_update_page_arg arg;
752 smp_rendezvous(0, pmap_update_page_action, 0, &arg);
756 pmap_update_page_action(void *arg)
758 struct pmap_update_page_arg *p = arg;
760 pmap_update_page_local(p->pmap, p->va, p->pte);
764 pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
767 pmap_update_page_local(pmap, va, pte);
772 * Routine: pmap_extract
774 * Extract the physical page address associated
775 * with the given map/virtual_address pair.
778 pmap_extract(pmap_t pmap, vm_offset_t va)
781 vm_offset_t retval = 0;
784 pte = pmap_pte(pmap, va);
786 retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
793 * Routine: pmap_extract_and_hold
795 * Atomically extract and hold the physical page
796 * with the given pmap and virtual address pair
797 * if that mapping permits the given protection.
800 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
810 pte = *pmap_pte(pmap, va);
811 if (pte != 0 && pte_test(&pte, PTE_V) &&
812 (pte_test(&pte, PTE_D) || (prot & VM_PROT_WRITE) == 0)) {
813 if (vm_page_pa_tryrelock(pmap, TLBLO_PTE_TO_PA(pte), &pa))
816 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte));
824 /***************************************************
825 * Low level mapping routines.....
826 ***************************************************/
829 * add a wired page to the kva
832 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
835 pt_entry_t opte, npte;
838 printf("pmap_kenter: va: %p -> pa: %p\n", (void *)va, (void *)pa);
841 pte = pmap_pte(kernel_pmap, va);
843 npte = TLBLO_PA_TO_PFN(pa) | attr | PTE_D | PTE_V | PTE_G;
845 if (pte_test(&opte, PTE_V) && opte != npte)
846 pmap_update_page(kernel_pmap, va, npte);
850 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
853 KASSERT(is_cacheable_mem(pa),
854 ("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
856 pmap_kenter_attr(va, pa, PTE_C_CACHE);
860 * remove a page from the kernel pagetables
862 /* PMAP_INLINE */ void
863 pmap_kremove(vm_offset_t va)
868 * Write back all caches from the page being destroyed
870 mips_dcache_wbinv_range_index(va, PAGE_SIZE);
872 pte = pmap_pte(kernel_pmap, va);
874 pmap_invalidate_page(kernel_pmap, va);
878 * Used to map a range of physical addresses into kernel
879 * virtual address space.
881 * The value passed in '*virt' is a suggested virtual address for
882 * the mapping. Architectures which can support a direct-mapped
883 * physical to virtual region can return the appropriate address
884 * within that region, leaving '*virt' unchanged. Other
885 * architectures should map the pages starting at '*virt' and
886 * update '*virt' with the first usable address after the mapped
889 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
892 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
896 if (MIPS_DIRECT_MAPPABLE(end - 1))
897 return (MIPS_PHYS_TO_DIRECT(start));
900 while (start < end) {
901 pmap_kenter(va, start);
910 * Add a list of wired pages to the kva
911 * this routine is only used for temporary
912 * kernel mappings that do not need to have
913 * page modification or references recorded.
914 * Note that old mappings are simply written
915 * over. The page *must* be wired.
918 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
921 vm_offset_t origva = va;
923 for (i = 0; i < count; i++) {
924 pmap_flush_pvcache(m[i]);
925 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
929 mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
933 * this routine jerks page mappings from the
934 * kernel -- it is meant only for temporary mappings.
937 pmap_qremove(vm_offset_t va, int count)
940 * No need to wb/inv caches here,
941 * pmap_kremove will do it for us
944 while (count-- > 0) {
950 /***************************************************
951 * Page table page management routines.....
952 ***************************************************/
955 * Decrements a page table page's wire count, which is used to record the
956 * number of valid page table entries within the page. If the wire count
957 * drops to zero, then the page table page is unmapped. Returns TRUE if the
958 * page table page was unmapped and FALSE otherwise.
960 static PMAP_INLINE boolean_t
961 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
965 if (m->wire_count == 0) {
966 _pmap_unwire_ptp(pmap, va, m);
973 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
977 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
979 * unmap the page table page
982 if (m->pindex < NUPDE)
983 pde = pmap_pde(pmap, va);
985 pde = pmap_segmap(pmap, va);
987 pde = pmap_pde(pmap, va);
990 pmap->pm_stats.resident_count--;
993 if (m->pindex < NUPDE) {
998 * Recursively decrement next level pagetable refcount
1000 pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
1001 pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
1002 pmap_unwire_ptp(pmap, va, pdpg);
1005 if (pmap->pm_ptphint == m)
1006 pmap->pm_ptphint = NULL;
1009 * If the page is finally unwired, simply free it.
1011 vm_page_free_zero(m);
1012 atomic_subtract_int(&cnt.v_wire_count, 1);
1016 * After removing a page table entry, this routine is used to
1017 * conditionally free the page, and manage the hold/wire counts.
1020 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
1025 if (va >= VM_MAXUSER_ADDRESS)
1029 ptepindex = pmap_pde_pindex(va);
1030 if (pmap->pm_ptphint &&
1031 (pmap->pm_ptphint->pindex == ptepindex)) {
1032 mpte = pmap->pm_ptphint;
1034 pteva = *pmap_pde(pmap, va);
1035 mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pteva));
1036 pmap->pm_ptphint = mpte;
1039 return (pmap_unwire_ptp(pmap, va, mpte));
1043 pmap_pinit0(pmap_t pmap)
1047 PMAP_LOCK_INIT(pmap);
1048 pmap->pm_segtab = kernel_segmap;
1049 CPU_ZERO(&pmap->pm_active);
1050 pmap->pm_ptphint = NULL;
1051 for (i = 0; i < MAXCPU; i++) {
1052 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1053 pmap->pm_asid[i].gen = 0;
1055 PCPU_SET(curpmap, pmap);
1056 TAILQ_INIT(&pmap->pm_pvlist);
1057 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1061 pmap_grow_direct_page_cache()
1065 vm_contig_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
1067 vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
1072 pmap_alloc_direct_page(unsigned int index, int req)
1076 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req);
1080 if ((m->flags & PG_ZERO) == 0)
1084 atomic_add_int(&cnt.v_wire_count, 1);
1090 * Initialize a preallocated and zeroed pmap structure,
1091 * such as one in a vmspace structure.
1094 pmap_pinit(pmap_t pmap)
1100 PMAP_LOCK_INIT(pmap);
1103 * allocate the page directory page
1105 while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
1106 pmap_grow_direct_page_cache();
1108 ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
1109 pmap->pm_segtab = (pd_entry_t *)ptdva;
1110 CPU_ZERO(&pmap->pm_active);
1111 pmap->pm_ptphint = NULL;
1112 for (i = 0; i < MAXCPU; i++) {
1113 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1114 pmap->pm_asid[i].gen = 0;
1116 TAILQ_INIT(&pmap->pm_pvlist);
1117 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1123 * this routine is called if the page table page is not
1127 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1132 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1133 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1134 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1137 * Find or fabricate a new pagetable page
1139 if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
1140 if (flags & M_WAITOK) {
1142 vm_page_unlock_queues();
1143 pmap_grow_direct_page_cache();
1144 vm_page_lock_queues();
1149 * Indicate the need to retry. While waiting, the page
1150 * table page may have been allocated.
1156 * Map the pagetable page into the process address space, if it
1157 * isn't already there.
1159 pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1162 if (ptepindex >= NUPDE) {
1163 pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva;
1165 pd_entry_t *pdep, *pde;
1166 int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
1167 int pdeindex = ptepindex & (NPDEPG - 1);
1170 pdep = &pmap->pm_segtab[segindex];
1171 if (*pdep == NULL) {
1172 /* recurse for allocating page dir */
1173 if (_pmap_allocpte(pmap, NUPDE + segindex,
1175 /* alloc failed, release current */
1177 atomic_subtract_int(&cnt.v_wire_count, 1);
1178 vm_page_free_zero(m);
1182 pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
1185 /* Next level entry */
1186 pde = (pd_entry_t *)*pdep;
1187 pde[pdeindex] = (pd_entry_t)pageva;
1188 pmap->pm_ptphint = m;
1191 pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
1193 pmap->pm_stats.resident_count++;
1196 * Set the page table hint
1198 pmap->pm_ptphint = m;
1203 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1209 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1210 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1211 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1214 * Calculate pagetable page index
1216 ptepindex = pmap_pde_pindex(va);
1219 * Get the page directory entry
1221 pde = pmap_pde(pmap, va);
1224 * If the page table page is mapped, we just increment the hold
1225 * count, and activate it.
1227 if (pde != NULL && *pde != NULL) {
1229 * In order to get the page table page, try the hint first.
1231 if (pmap->pm_ptphint &&
1232 (pmap->pm_ptphint->pindex == ptepindex)) {
1233 m = pmap->pm_ptphint;
1235 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
1236 pmap->pm_ptphint = m;
1241 * Here if the pte page isn't mapped, or if it has been
1244 m = _pmap_allocpte(pmap, ptepindex, flags);
1245 if (m == NULL && (flags & M_WAITOK))
1252 /***************************************************
1253 * Pmap allocation/deallocation routines.
1254 ***************************************************/
1257 * - Merged pmap_release and pmap_release_free_page. When pmap_release is
1258 * called only the page directory page(s) can be left in the pmap pte
1259 * object, since all page table pages will have been freed by
1260 * pmap_remove_pages and pmap_remove. In addition, there can only be one
1261 * reference to the pmap and the page directory is wired, so the page(s)
1262 * can never be busy. So all there is to do is clear the magic mappings
1263 * from the page directory and free the page(s).
1268 * Release any resources held by the given physical map.
1269 * Called when a pmap initialized by pmap_pinit is being released.
1270 * Should only be called if the map contains no valid mappings.
1273 pmap_release(pmap_t pmap)
1278 KASSERT(pmap->pm_stats.resident_count == 0,
1279 ("pmap_release: pmap resident count %ld != 0",
1280 pmap->pm_stats.resident_count));
1282 ptdva = (vm_offset_t)pmap->pm_segtab;
1283 ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
1285 ptdpg->wire_count--;
1286 atomic_subtract_int(&cnt.v_wire_count, 1);
1287 vm_page_free_zero(ptdpg);
1288 PMAP_LOCK_DESTROY(pmap);
1292 * grow the number of kernel page table entries, if needed
1295 pmap_growkernel(vm_offset_t addr)
1298 pd_entry_t *pde, *pdpe;
1302 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1303 addr = roundup2(addr, NBSEG);
1304 if (addr - 1 >= kernel_map->max_offset)
1305 addr = kernel_map->max_offset;
1306 while (kernel_vm_end < addr) {
1307 pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
1310 /* new intermediate page table entry */
1311 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1313 panic("pmap_growkernel: no memory to grow kernel");
1314 *pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1315 continue; /* try again */
1318 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
1320 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1321 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1322 kernel_vm_end = kernel_map->max_offset;
1329 * This index is bogus, but out of the way
1331 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1333 panic("pmap_growkernel: no memory to grow kernel");
1335 *pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1338 * The R[4-7]?00 stores only one copy of the Global bit in
1339 * the translation lookaside buffer for each 2 page entry.
1340 * Thus invalid entrys must have the Global bit set so when
1341 * Entry LO and Entry HI G bits are anded together they will
1342 * produce a global bit to store in the tlb.
1344 pte = (pt_entry_t *)*pde;
1345 for (i = 0; i < NPTEPG; i++)
1348 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1349 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1350 kernel_vm_end = kernel_map->max_offset;
1356 /***************************************************
1357 * page management routines.
1358 ***************************************************/
1361 * free the pv_entry back to the free list
1363 static PMAP_INLINE void
1364 free_pv_entry(pv_entry_t pv)
1368 uma_zfree(pvzone, pv);
1372 * get a new pv_entry, allocating a block from the system
1374 * the memory allocation is performed bypassing the malloc code
1375 * because of the possibility of allocations at interrupt time.
1378 get_pv_entry(pmap_t locked_pmap)
1380 static const struct timeval printinterval = { 60, 0 };
1381 static struct timeval lastprint;
1382 struct vpgqueues *vpq;
1383 pt_entry_t *pte, oldpte;
1385 pv_entry_t allocated_pv, next_pv, pv;
1389 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1390 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1391 allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
1392 if (allocated_pv != NULL) {
1394 if (pv_entry_count > pv_entry_high_water)
1395 pagedaemon_wakeup();
1397 return (allocated_pv);
1400 * Reclaim pv entries: At first, destroy mappings to inactive
1401 * pages. After that, if a pv entry is still needed, destroy
1402 * mappings to active pages.
1404 if (ratecheck(&lastprint, &printinterval))
1405 printf("Approaching the limit on PV entries, "
1406 "increase the vm.pmap.shpgperproc tunable.\n");
1407 vpq = &vm_page_queues[PQ_INACTIVE];
1409 TAILQ_FOREACH(m, &vpq->pl, pageq) {
1410 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
1412 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
1415 /* Avoid deadlock and lock recursion. */
1416 if (pmap > locked_pmap)
1418 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
1420 pmap->pm_stats.resident_count--;
1421 pte = pmap_pte(pmap, va);
1422 KASSERT(pte != NULL, ("pte"));
1424 if (is_kernel_pmap(pmap))
1428 KASSERT(!pte_test(&oldpte, PTE_W),
1429 ("wired pte for unwired page"));
1430 if (m->md.pv_flags & PV_TABLE_REF)
1431 vm_page_aflag_set(m, PGA_REFERENCED);
1432 if (pte_test(&oldpte, PTE_D))
1434 pmap_invalidate_page(pmap, va);
1435 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1436 m->md.pv_list_count--;
1437 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1438 pmap_unuse_pt(pmap, va, pv->pv_ptem);
1439 if (pmap != locked_pmap)
1441 if (allocated_pv == NULL)
1446 if (TAILQ_EMPTY(&m->md.pv_list)) {
1447 vm_page_aflag_clear(m, PGA_WRITEABLE);
1448 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1451 if (allocated_pv == NULL) {
1452 if (vpq == &vm_page_queues[PQ_INACTIVE]) {
1453 vpq = &vm_page_queues[PQ_ACTIVE];
1456 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
1458 return (allocated_pv);
1464 * Move pmap_collect() out of the machine-dependent code, rename it
1465 * to reflect its new location, and add page queue and flag locking.
1467 * Notes: (1) alpha, i386, and ia64 had identical implementations
1468 * of pmap_collect() in terms of machine-independent interfaces;
1469 * (2) sparc64 doesn't require it; (3) powerpc had it as a TODO.
1471 * MIPS implementation was identical to alpha [Junos 8.2]
1475 * If it is the first entry on the list, it is actually
1476 * in the header and we must copy the following entry up
1477 * to the header. Otherwise we must search the list for
1478 * the entry. In either case we free the now unused entry.
1482 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1486 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1487 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1488 if (pvh->pv_list_count < pmap->pm_stats.resident_count) {
1489 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
1490 if (pmap == pv->pv_pmap && va == pv->pv_va)
1494 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1495 if (va == pv->pv_va)
1500 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
1501 pvh->pv_list_count--;
1502 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1508 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1512 pv = pmap_pvh_remove(pvh, pmap, va);
1513 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
1514 (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)),
1520 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1523 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1524 pmap_pvh_free(&m->md, pmap, va);
1525 if (TAILQ_EMPTY(&m->md.pv_list))
1526 vm_page_aflag_clear(m, PGA_WRITEABLE);
1530 * Conditionally create a pv entry.
1533 pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1538 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1539 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1540 if (pv_entry_count < pv_entry_high_water &&
1541 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
1546 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1547 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1548 m->md.pv_list_count++;
1555 * pmap_remove_pte: do the things to unmap a page in a process
1558 pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va)
1564 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1565 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1568 if (is_kernel_pmap(pmap))
1573 if (pte_test(&oldpte, PTE_W))
1574 pmap->pm_stats.wired_count -= 1;
1576 pmap->pm_stats.resident_count -= 1;
1577 pa = TLBLO_PTE_TO_PA(oldpte);
1579 if (page_is_managed(pa)) {
1580 m = PHYS_TO_VM_PAGE(pa);
1581 if (pte_test(&oldpte, PTE_D)) {
1582 KASSERT(!pte_test(&oldpte, PTE_RO),
1583 ("%s: modified page not writable: va: %p, pte: %#jx",
1584 __func__, (void *)va, (uintmax_t)oldpte));
1587 if (m->md.pv_flags & PV_TABLE_REF)
1588 vm_page_aflag_set(m, PGA_REFERENCED);
1589 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1591 pmap_remove_entry(pmap, m, va);
1593 return (pmap_unuse_pt(pmap, va, NULL));
1597 * Remove a single page from a process address space
1600 pmap_remove_page(struct pmap *pmap, vm_offset_t va)
1604 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1605 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1606 ptq = pmap_pte(pmap, va);
1609 * if there is no pte for this address, just skip it!!!
1611 if (!ptq || !pte_test(ptq, PTE_V)) {
1616 * Write back all caches from the page being destroyed
1618 mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1621 * get a local va for mappings for this pmap.
1623 (void)pmap_remove_pte(pmap, ptq, va);
1624 pmap_invalidate_page(pmap, va);
1630 * Remove the given range of addresses from the specified map.
1632 * It is assumed that the start and end are properly
1633 * rounded to the page size.
1636 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
1638 vm_offset_t va_next;
1639 pd_entry_t *pde, *pdpe;
1645 if (pmap->pm_stats.resident_count == 0)
1648 vm_page_lock_queues();
1652 * special handling of removing one page. a very common operation
1653 * and easy to short circuit some code.
1655 if ((sva + PAGE_SIZE) == eva) {
1656 pmap_remove_page(pmap, sva);
1659 for (; sva < eva; sva = va_next) {
1660 pdpe = pmap_segmap(pmap, sva);
1663 va_next = (sva + NBSEG) & ~SEGMASK;
1669 va_next = (sva + NBPDR) & ~PDRMASK;
1673 pde = pmap_pdpe_to_pde(pdpe, sva);
1678 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next;
1679 pte++, sva += PAGE_SIZE) {
1680 pmap_remove_page(pmap, sva);
1684 vm_page_unlock_queues();
1689 * Routine: pmap_remove_all
1691 * Removes this physical page from
1692 * all physical maps in which it resides.
1693 * Reflects back modify bits to the pager.
1696 * Original versions of this routine were very
1697 * inefficient because they iteratively called
1698 * pmap_remove (slow...)
1702 pmap_remove_all(vm_page_t m)
1705 pt_entry_t *pte, tpte;
1707 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1708 ("pmap_remove_all: page %p is not managed", m));
1709 vm_page_lock_queues();
1711 if (m->md.pv_flags & PV_TABLE_REF)
1712 vm_page_aflag_set(m, PGA_REFERENCED);
1714 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1715 PMAP_LOCK(pv->pv_pmap);
1718 * If it's last mapping writeback all caches from
1719 * the page being destroyed
1721 if (m->md.pv_list_count == 1)
1722 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1724 pv->pv_pmap->pm_stats.resident_count--;
1726 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
1729 if (is_kernel_pmap(pv->pv_pmap))
1734 if (pte_test(&tpte, PTE_W))
1735 pv->pv_pmap->pm_stats.wired_count--;
1738 * Update the vm_page_t clean and reference bits.
1740 if (pte_test(&tpte, PTE_D)) {
1741 KASSERT(!pte_test(&tpte, PTE_RO),
1742 ("%s: modified page not writable: va: %p, pte: %#jx",
1743 __func__, (void *)pv->pv_va, (uintmax_t)tpte));
1746 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1748 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1749 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1750 m->md.pv_list_count--;
1751 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1752 PMAP_UNLOCK(pv->pv_pmap);
1756 vm_page_aflag_clear(m, PGA_WRITEABLE);
1757 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1758 vm_page_unlock_queues();
1762 * Set the physical protection on the
1763 * specified range of this map as requested.
1766 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1769 pd_entry_t *pde, *pdpe;
1770 vm_offset_t va_next;
1775 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1776 pmap_remove(pmap, sva, eva);
1779 if (prot & VM_PROT_WRITE)
1782 vm_page_lock_queues();
1784 for (; sva < eva; sva = va_next) {
1789 pdpe = pmap_segmap(pmap, sva);
1792 va_next = (sva + NBSEG) & ~SEGMASK;
1798 va_next = (sva + NBPDR) & ~PDRMASK;
1802 pde = pmap_pdpe_to_pde(pdpe, sva);
1803 if (pde == NULL || *pde == NULL)
1808 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1811 /* Skip invalid PTEs */
1812 if (!pte_test(pte, PTE_V))
1815 pa = TLBLO_PTE_TO_PA(pbits);
1816 if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) {
1817 m = PHYS_TO_VM_PAGE(pa);
1819 m->md.pv_flags &= ~PV_TABLE_MOD;
1821 pte_clear(&pbits, PTE_D);
1822 pte_set(&pbits, PTE_RO);
1824 if (pbits != *pte) {
1826 pmap_update_page(pmap, sva, pbits);
1830 vm_page_unlock_queues();
1835 * Insert the given physical page (p) at
1836 * the specified virtual address (v) in the
1837 * target physical map with the protection requested.
1839 * If specified, the page will be wired down, meaning
1840 * that the related pte can not be reclaimed.
1842 * NB: This is the only routine which MAY NOT lazy-evaluate
1843 * or lose information. That is, this routine must actually
1844 * insert this page into the given map NOW.
1847 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1848 vm_prot_t prot, boolean_t wired)
1852 pt_entry_t origpte, newpte;
1861 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
1862 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
1863 ("pmap_enter: page %p is not busy", m));
1867 vm_page_lock_queues();
1871 * In the case that a page table page is not resident, we are
1874 if (va < VM_MAXUSER_ADDRESS) {
1875 mpte = pmap_allocpte(pmap, va, M_WAITOK);
1877 pte = pmap_pte(pmap, va);
1880 * Page Directory table entry not valid, we need a new PT page
1883 panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
1884 (void *)pmap->pm_segtab, (void *)va);
1886 pa = VM_PAGE_TO_PHYS(m);
1889 opa = TLBLO_PTE_TO_PA(origpte);
1892 * Mapping has not changed, must be protection or wiring change.
1894 if (pte_test(&origpte, PTE_V) && opa == pa) {
1896 * Wiring change, just update stats. We don't worry about
1897 * wiring PT pages as they remain resident as long as there
1898 * are valid mappings in them. Hence, if a user page is
1899 * wired, the PT page will be also.
1901 if (wired && !pte_test(&origpte, PTE_W))
1902 pmap->pm_stats.wired_count++;
1903 else if (!wired && pte_test(&origpte, PTE_W))
1904 pmap->pm_stats.wired_count--;
1906 KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
1907 ("%s: modified page not writable: va: %p, pte: %#jx",
1908 __func__, (void *)va, (uintmax_t)origpte));
1911 * Remove extra pte reference
1916 if (page_is_managed(opa)) {
1925 * Mapping has changed, invalidate old range and fall through to
1926 * handle validating new mapping.
1929 if (pte_test(&origpte, PTE_W))
1930 pmap->pm_stats.wired_count--;
1932 if (page_is_managed(opa)) {
1933 om = PHYS_TO_VM_PAGE(opa);
1934 pv = pmap_pvh_remove(&om->md, pmap, va);
1938 KASSERT(mpte->wire_count > 0,
1939 ("pmap_enter: missing reference to page table page,"
1940 " va: %p", (void *)va));
1943 pmap->pm_stats.resident_count++;
1946 * Enter on the PV list if part of our managed memory. Note that we
1947 * raise IPL while manipulating pv_table since pmap_enter can be
1948 * called at interrupt time.
1950 if ((m->oflags & VPO_UNMANAGED) == 0) {
1951 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
1952 ("pmap_enter: managed mapping within the clean submap"));
1954 pv = get_pv_entry(pmap);
1958 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1959 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1960 m->md.pv_list_count++;
1961 } else if (pv != NULL)
1965 * Increment counters
1968 pmap->pm_stats.wired_count++;
1971 if ((access & VM_PROT_WRITE) != 0)
1972 m->md.pv_flags |= PV_TABLE_MOD | PV_TABLE_REF;
1973 rw = init_pte_prot(va, m, prot);
1976 printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa);
1979 * Now validate mapping with desired protection/wiring.
1981 newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V;
1983 if (is_cacheable_mem(pa))
1984 newpte |= PTE_C_CACHE;
1986 newpte |= PTE_C_UNCACHED;
1991 if (is_kernel_pmap(pmap))
1995 * if the mapping or permission bits are different, we need to
1998 if (origpte != newpte) {
1999 if (pte_test(&origpte, PTE_V)) {
2001 if (page_is_managed(opa) && (opa != pa)) {
2002 if (om->md.pv_flags & PV_TABLE_REF)
2003 vm_page_aflag_set(om, PGA_REFERENCED);
2005 ~(PV_TABLE_REF | PV_TABLE_MOD);
2007 if (pte_test(&origpte, PTE_D)) {
2008 KASSERT(!pte_test(&origpte, PTE_RO),
2009 ("pmap_enter: modified page not writable:"
2010 " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte));
2011 if (page_is_managed(opa))
2014 if (page_is_managed(opa) &&
2015 TAILQ_EMPTY(&om->md.pv_list))
2016 vm_page_aflag_clear(om, PGA_WRITEABLE);
2021 pmap_update_page(pmap, va, newpte);
2024 * Sync I & D caches for executable pages. Do this only if the
2025 * target pmap belongs to the current process. Otherwise, an
2026 * unresolvable TLB miss may occur.
2028 if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
2029 (prot & VM_PROT_EXECUTE)) {
2030 mips_icache_sync_range(va, PAGE_SIZE);
2031 mips_dcache_wbinv_range(va, PAGE_SIZE);
2033 vm_page_unlock_queues();
2038 * this code makes some *MAJOR* assumptions:
2039 * 1. Current pmap & pmap exists.
2042 * 4. No page table pages.
2043 * but is *MUCH* faster than pmap_enter...
2047 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2050 vm_page_lock_queues();
2052 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2053 vm_page_unlock_queues();
2058 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2059 vm_prot_t prot, vm_page_t mpte)
2064 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2065 (m->oflags & VPO_UNMANAGED) != 0,
2066 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2067 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2068 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2071 * In the case that a page table page is not resident, we are
2074 if (va < VM_MAXUSER_ADDRESS) {
2079 * Calculate pagetable page index
2081 ptepindex = pmap_pde_pindex(va);
2082 if (mpte && (mpte->pindex == ptepindex)) {
2086 * Get the page directory entry
2088 pde = pmap_pde(pmap, va);
2091 * If the page table page is mapped, we just
2092 * increment the hold count, and activate it.
2094 if (pde && *pde != 0) {
2095 if (pmap->pm_ptphint &&
2096 (pmap->pm_ptphint->pindex == ptepindex)) {
2097 mpte = pmap->pm_ptphint;
2099 mpte = PHYS_TO_VM_PAGE(
2100 MIPS_DIRECT_TO_PHYS(*pde));
2101 pmap->pm_ptphint = mpte;
2105 mpte = _pmap_allocpte(pmap, ptepindex,
2115 pte = pmap_pte(pmap, va);
2116 if (pte_test(pte, PTE_V)) {
2125 * Enter on the PV list if part of our managed memory.
2127 if ((m->oflags & VPO_UNMANAGED) == 0 &&
2128 !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2130 pmap_unwire_ptp(pmap, va, mpte);
2137 * Increment counters
2139 pmap->pm_stats.resident_count++;
2141 pa = VM_PAGE_TO_PHYS(m);
2144 * Now validate mapping with RO protection
2146 *pte = TLBLO_PA_TO_PFN(pa) | PTE_V;
2148 if (is_cacheable_mem(pa))
2149 *pte |= PTE_C_CACHE;
2151 *pte |= PTE_C_UNCACHED;
2153 if (is_kernel_pmap(pmap))
2158 * Sync I & D caches. Do this only if the target pmap
2159 * belongs to the current process. Otherwise, an
2160 * unresolvable TLB miss may occur. */
2161 if (pmap == &curproc->p_vmspace->vm_pmap) {
2163 mips_icache_sync_range(va, PAGE_SIZE);
2164 mips_dcache_wbinv_range(va, PAGE_SIZE);
2171 * Make a temporary mapping for a physical address. This is only intended
2172 * to be used for panic dumps.
2174 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2177 pmap_kenter_temporary(vm_paddr_t pa, int i)
2182 printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2185 if (MIPS_DIRECT_MAPPABLE(pa)) {
2186 va = MIPS_PHYS_TO_DIRECT(pa);
2188 #ifndef __mips_n64 /* XXX : to be converted to new style */
2191 struct local_sysmaps *sysm;
2192 pt_entry_t *pte, npte;
2194 /* If this is used other than for dumps, we may need to leave
2195 * interrupts disasbled on return. If crash dumps don't work when
2196 * we get to this point, we might want to consider this (leaving things
2197 * disabled as a starting point ;-)
2199 intr = intr_disable();
2200 cpu = PCPU_GET(cpuid);
2201 sysm = &sysmap_lmem[cpu];
2202 /* Since this is for the debugger, no locks or any other fun */
2203 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
2204 pte = pmap_pte(kernel_pmap, sysm->base);
2207 pmap_update_page(kernel_pmap, sysm->base, npte);
2212 return ((void *)va);
2216 pmap_kenter_temporary_free(vm_paddr_t pa)
2218 #ifndef __mips_n64 /* XXX : to be converted to new style */
2221 struct local_sysmaps *sysm;
2224 if (MIPS_DIRECT_MAPPABLE(pa)) {
2225 /* nothing to do for this case */
2228 #ifndef __mips_n64 /* XXX : to be converted to new style */
2229 cpu = PCPU_GET(cpuid);
2230 sysm = &sysmap_lmem[cpu];
2234 intr = intr_disable();
2235 pte = pmap_pte(kernel_pmap, sysm->base);
2237 pmap_invalidate_page(kernel_pmap, sysm->base);
2245 * Moved the code to Machine Independent
2246 * vm_map_pmap_enter()
2250 * Maps a sequence of resident pages belonging to the same object.
2251 * The sequence begins with the given page m_start. This page is
2252 * mapped at the given virtual address start. Each subsequent page is
2253 * mapped at a virtual address that is offset from start by the same
2254 * amount as the page is offset from m_start within the object. The
2255 * last page in the sequence is the page with the largest offset from
2256 * m_start that can be mapped at a virtual address less than the given
2257 * virtual address end. Not every virtual page between start and end
2258 * is mapped; only those for which a resident page exists with the
2259 * corresponding offset from m_start are mapped.
2262 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2263 vm_page_t m_start, vm_prot_t prot)
2266 vm_pindex_t diff, psize;
2268 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
2269 psize = atop(end - start);
2272 vm_page_lock_queues();
2274 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2275 mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2277 m = TAILQ_NEXT(m, listq);
2279 vm_page_unlock_queues();
2284 * pmap_object_init_pt preloads the ptes for a given object
2285 * into the specified pmap. This eliminates the blast of soft
2286 * faults on process startup and immediately after an mmap.
2289 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2290 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2292 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2293 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2294 ("pmap_object_init_pt: non-device object"));
2298 * Routine: pmap_change_wiring
2299 * Function: Change the wiring attribute for a map/virtual-address
2301 * In/out conditions:
2302 * The mapping must already exist in the pmap.
2305 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
2313 pte = pmap_pte(pmap, va);
2315 if (wired && !pte_test(pte, PTE_W))
2316 pmap->pm_stats.wired_count++;
2317 else if (!wired && pte_test(pte, PTE_W))
2318 pmap->pm_stats.wired_count--;
2321 * Wiring is not a hardware characteristic so there is no need to
2325 pte_set(pte, PTE_W);
2327 pte_clear(pte, PTE_W);
2332 * Copy the range specified by src_addr/len
2333 * from the source map to the range dst_addr/len
2334 * in the destination map.
2336 * This routine is only advisory and need not do anything.
2340 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2341 vm_size_t len, vm_offset_t src_addr)
2346 * pmap_zero_page zeros the specified hardware page by mapping
2347 * the page into KVM and using bzero to clear its contents.
2349 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2352 pmap_zero_page(vm_page_t m)
2355 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2357 if (MIPS_DIRECT_MAPPABLE(phys)) {
2358 va = MIPS_PHYS_TO_DIRECT(phys);
2359 bzero((caddr_t)va, PAGE_SIZE);
2360 mips_dcache_wbinv_range(va, PAGE_SIZE);
2362 va = pmap_lmem_map1(phys);
2363 bzero((caddr_t)va, PAGE_SIZE);
2364 mips_dcache_wbinv_range(va, PAGE_SIZE);
2370 * pmap_zero_page_area zeros the specified hardware page by mapping
2371 * the page into KVM and using bzero to clear its contents.
2373 * off and size may not cover an area beyond a single hardware page.
2376 pmap_zero_page_area(vm_page_t m, int off, int size)
2379 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2381 if (MIPS_DIRECT_MAPPABLE(phys)) {
2382 va = MIPS_PHYS_TO_DIRECT(phys);
2383 bzero((char *)(caddr_t)va + off, size);
2384 mips_dcache_wbinv_range(va + off, size);
2386 va = pmap_lmem_map1(phys);
2387 bzero((char *)va + off, size);
2388 mips_dcache_wbinv_range(va + off, size);
2394 pmap_zero_page_idle(vm_page_t m)
2397 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2399 if (MIPS_DIRECT_MAPPABLE(phys)) {
2400 va = MIPS_PHYS_TO_DIRECT(phys);
2401 bzero((caddr_t)va, PAGE_SIZE);
2402 mips_dcache_wbinv_range(va, PAGE_SIZE);
2404 va = pmap_lmem_map1(phys);
2405 bzero((caddr_t)va, PAGE_SIZE);
2406 mips_dcache_wbinv_range(va, PAGE_SIZE);
2412 * pmap_copy_page copies the specified (machine independent)
2413 * page by mapping the page into virtual memory and using
2414 * bcopy to copy the page, one machine dependent page at a
2417 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2420 pmap_copy_page(vm_page_t src, vm_page_t dst)
2422 vm_offset_t va_src, va_dst;
2423 vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
2424 vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
2426 if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
2427 /* easy case, all can be accessed via KSEG0 */
2429 * Flush all caches for VA that are mapped to this page
2430 * to make sure that data in SDRAM is up to date
2432 pmap_flush_pvcache(src);
2433 mips_dcache_wbinv_range_index(
2434 MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
2435 va_src = MIPS_PHYS_TO_DIRECT(phys_src);
2436 va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
2437 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2438 mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2440 va_src = pmap_lmem_map2(phys_src, phys_dst);
2441 va_dst = va_src + PAGE_SIZE;
2442 bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2443 mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2448 int unmapped_buf_allowed;
2451 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2452 vm_offset_t b_offset, int xfersize)
2456 vm_offset_t a_pg_offset, b_pg_offset;
2457 vm_paddr_t a_phys, b_phys;
2460 while (xfersize > 0) {
2461 a_pg_offset = a_offset & PAGE_MASK;
2462 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2463 a_m = ma[a_offset >> PAGE_SHIFT];
2464 a_phys = VM_PAGE_TO_PHYS(a_m);
2465 b_pg_offset = b_offset & PAGE_MASK;
2466 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2467 b_m = mb[b_offset >> PAGE_SHIFT];
2468 b_phys = VM_PAGE_TO_PHYS(b_m);
2469 if (MIPS_DIRECT_MAPPABLE(a_phys) &&
2470 MIPS_DIRECT_MAPPABLE(b_phys)) {
2471 pmap_flush_pvcache(a_m);
2472 mips_dcache_wbinv_range_index(
2473 MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE);
2474 a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) +
2476 b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) +
2478 bcopy(a_cp, b_cp, cnt);
2479 mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2481 a_cp = (char *)pmap_lmem_map2(a_phys, b_phys);
2482 b_cp = (char *)a_cp + PAGE_SIZE;
2483 a_cp += a_pg_offset;
2484 b_cp += b_pg_offset;
2485 bcopy(a_cp, b_cp, cnt);
2486 mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2496 * Returns true if the pmap's pv is one of the first
2497 * 16 pvs linked to from this page. This count may
2498 * be changed upwards or downwards in the future; it
2499 * is only necessary that true be returned for a small
2500 * subset of pmaps for proper page aging.
2503 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2509 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2510 ("pmap_page_exists_quick: page %p is not managed", m));
2512 vm_page_lock_queues();
2513 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2514 if (pv->pv_pmap == pmap) {
2522 vm_page_unlock_queues();
2527 * Remove all pages from specified address space
2528 * this aids process exit speeds. Also, this code
2529 * is special cased for current process only, but
2530 * can have the more generic (and slightly slower)
2531 * mode enabled. This is much faster than pmap_remove
2532 * in the case of running down an entire address space.
2535 pmap_remove_pages(pmap_t pmap)
2537 pt_entry_t *pte, tpte;
2541 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2542 printf("warning: pmap_remove_pages called with non-current pmap\n");
2545 vm_page_lock_queues();
2547 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv != NULL; pv = npv) {
2549 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2550 if (!pte_test(pte, PTE_V))
2551 panic("pmap_remove_pages: page on pm_pvlist has no pte");
2555 * We cannot remove wired pages from a process' mapping at this time
2557 if (pte_test(&tpte, PTE_W)) {
2558 npv = TAILQ_NEXT(pv, pv_plist);
2561 *pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2563 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
2565 ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
2567 pv->pv_pmap->pm_stats.resident_count--;
2570 * Update the vm_page_t clean and reference bits.
2572 if (pte_test(&tpte, PTE_D)) {
2575 npv = TAILQ_NEXT(pv, pv_plist);
2576 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2578 m->md.pv_list_count--;
2579 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2580 if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
2581 vm_page_aflag_clear(m, PGA_WRITEABLE);
2583 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2586 pmap_invalidate_all(pmap);
2588 vm_page_unlock_queues();
2592 * pmap_testbit tests bits in pte's
2593 * note that the testbit/changebit routines are inline,
2594 * and a lot of things compile-time evaluate.
2597 pmap_testbit(vm_page_t m, int bit)
2601 boolean_t rv = FALSE;
2603 if (m->oflags & VPO_UNMANAGED)
2606 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
2609 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2610 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2611 PMAP_LOCK(pv->pv_pmap);
2612 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2613 rv = pte_test(pte, bit);
2614 PMAP_UNLOCK(pv->pv_pmap);
2622 * this routine is used to clear dirty bits in ptes
2624 static __inline void
2625 pmap_changebit(vm_page_t m, int bit, boolean_t setem)
2630 if (m->oflags & VPO_UNMANAGED)
2633 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2635 * Loop over all current mappings setting/clearing as appropos If
2636 * setting RO do we need to clear the VAC?
2638 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2639 PMAP_LOCK(pv->pv_pmap);
2640 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2643 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2645 pt_entry_t pbits = *pte;
2651 *pte = (pbits & ~PTE_D) | PTE_RO;
2653 *pte = pbits & ~bit;
2655 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2658 PMAP_UNLOCK(pv->pv_pmap);
2660 if (!setem && bit == PTE_D)
2661 vm_page_aflag_clear(m, PGA_WRITEABLE);
2665 * pmap_page_wired_mappings:
2667 * Return the number of managed mappings to the given physical page
2671 pmap_page_wired_mappings(vm_page_t m)
2679 if ((m->oflags & VPO_UNMANAGED) != 0)
2681 vm_page_lock_queues();
2682 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2685 pte = pmap_pte(pmap, pv->pv_va);
2686 if (pte_test(pte, PTE_W))
2690 vm_page_unlock_queues();
2695 * Clear the write and modified bits in each of the given page's mappings.
2698 pmap_remove_write(vm_page_t m)
2704 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2705 ("pmap_remove_write: page %p is not managed", m));
2708 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
2709 * another thread while the object is locked. Thus, if PGA_WRITEABLE
2710 * is clear, no page table entries need updating.
2712 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2713 if ((m->oflags & VPO_BUSY) == 0 &&
2714 (m->aflags & PGA_WRITEABLE) == 0)
2718 * Loop over all current mappings setting/clearing as appropos.
2720 vm_page_lock_queues();
2721 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) {
2722 npv = TAILQ_NEXT(pv, pv_plist);
2723 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2724 if (pte == NULL || !pte_test(pte, PTE_V))
2725 panic("page on pm_pvlist has no pte");
2728 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
2729 VM_PROT_READ | VM_PROT_EXECUTE);
2731 vm_page_aflag_clear(m, PGA_WRITEABLE);
2732 vm_page_unlock_queues();
2736 * pmap_ts_referenced:
2738 * Return the count of reference bits for a page, clearing all of them.
2741 pmap_ts_referenced(vm_page_t m)
2744 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2745 ("pmap_ts_referenced: page %p is not managed", m));
2746 if (m->md.pv_flags & PV_TABLE_REF) {
2747 vm_page_lock_queues();
2748 m->md.pv_flags &= ~PV_TABLE_REF;
2749 vm_page_unlock_queues();
2758 * Return whether or not the specified physical page was modified
2759 * in any physical maps.
2762 pmap_is_modified(vm_page_t m)
2766 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2767 ("pmap_is_modified: page %p is not managed", m));
2770 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
2771 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
2772 * is clear, no PTEs can have PTE_D set.
2774 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2775 if ((m->oflags & VPO_BUSY) == 0 &&
2776 (m->aflags & PGA_WRITEABLE) == 0)
2778 vm_page_lock_queues();
2779 if (m->md.pv_flags & PV_TABLE_MOD)
2782 rv = pmap_testbit(m, PTE_D);
2783 vm_page_unlock_queues();
2790 * pmap_is_prefaultable:
2792 * Return whether or not the specified virtual address is elgible
2796 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2804 pde = pmap_pde(pmap, addr);
2805 if (pde != NULL && *pde != 0) {
2806 pte = pmap_pde_to_pte(pde, addr);
2814 * Clear the modify bits on the specified physical page.
2817 pmap_clear_modify(vm_page_t m)
2820 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2821 ("pmap_clear_modify: page %p is not managed", m));
2822 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2823 KASSERT((m->oflags & VPO_BUSY) == 0,
2824 ("pmap_clear_modify: page %p is busy", m));
2827 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
2828 * If the object containing the page is locked and the page is not
2829 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
2831 if ((m->aflags & PGA_WRITEABLE) == 0)
2833 vm_page_lock_queues();
2834 if (m->md.pv_flags & PV_TABLE_MOD) {
2835 pmap_changebit(m, PTE_D, FALSE);
2836 m->md.pv_flags &= ~PV_TABLE_MOD;
2838 vm_page_unlock_queues();
2842 * pmap_is_referenced:
2844 * Return whether or not the specified physical page was referenced
2845 * in any physical maps.
2848 pmap_is_referenced(vm_page_t m)
2851 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2852 ("pmap_is_referenced: page %p is not managed", m));
2853 return ((m->md.pv_flags & PV_TABLE_REF) != 0);
2857 * pmap_clear_reference:
2859 * Clear the reference bit on the specified physical page.
2862 pmap_clear_reference(vm_page_t m)
2865 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2866 ("pmap_clear_reference: page %p is not managed", m));
2867 vm_page_lock_queues();
2868 if (m->md.pv_flags & PV_TABLE_REF) {
2869 m->md.pv_flags &= ~PV_TABLE_REF;
2871 vm_page_unlock_queues();
2875 * Miscellaneous support routines follow
2879 * Map a set of physical memory pages into the kernel virtual
2880 * address space. Return a pointer to where it is mapped. This
2881 * routine is intended to be used for mapping device memory,
2886 * Map a set of physical memory pages into the kernel virtual
2887 * address space. Return a pointer to where it is mapped. This
2888 * routine is intended to be used for mapping device memory,
2891 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
2894 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
2896 vm_offset_t va, tmpva, offset;
2899 * KSEG1 maps only first 512M of phys address space. For
2900 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
2902 if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
2903 return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
2905 offset = pa & PAGE_MASK;
2906 size = roundup(size + offset, PAGE_SIZE);
2908 va = kmem_alloc_nofault(kernel_map, size);
2910 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2911 pa = trunc_page(pa);
2912 for (tmpva = va; size > 0;) {
2913 pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
2920 return ((void *)(va + offset));
2924 pmap_unmapdev(vm_offset_t va, vm_size_t size)
2927 vm_offset_t base, offset;
2929 /* If the address is within KSEG1 then there is nothing to do */
2930 if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
2933 base = trunc_page(va);
2934 offset = va & PAGE_MASK;
2935 size = roundup(size + offset, PAGE_SIZE);
2936 kmem_free(kernel_map, base, size);
2941 * perform the pmap work for mincore
2944 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
2946 pt_entry_t *ptep, pte;
2954 ptep = pmap_pte(pmap, addr);
2955 pte = (ptep != NULL) ? *ptep : 0;
2956 if (!pte_test(&pte, PTE_V)) {
2960 val = MINCORE_INCORE;
2961 if (pte_test(&pte, PTE_D))
2962 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
2963 pa = TLBLO_PTE_TO_PA(pte);
2964 managed = page_is_managed(pa);
2967 * This may falsely report the given address as
2968 * MINCORE_REFERENCED. Unfortunately, due to the lack of
2969 * per-PTE reference information, it is impossible to
2970 * determine if the address is MINCORE_REFERENCED.
2972 m = PHYS_TO_VM_PAGE(pa);
2973 if ((m->aflags & PGA_REFERENCED) != 0)
2974 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
2976 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
2977 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
2978 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
2979 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
2983 PA_UNLOCK_COND(*locked_pa);
2989 pmap_activate(struct thread *td)
2991 pmap_t pmap, oldpmap;
2992 struct proc *p = td->td_proc;
2997 pmap = vmspace_pmap(p->p_vmspace);
2998 oldpmap = PCPU_GET(curpmap);
2999 cpuid = PCPU_GET(cpuid);
3002 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
3003 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
3004 pmap_asid_alloc(pmap);
3005 if (td == curthread) {
3006 PCPU_SET(segbase, pmap->pm_segtab);
3007 mips_wr_entryhi(pmap->pm_asid[cpuid].asid);
3010 PCPU_SET(curpmap, pmap);
3015 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
3020 * Increase the starting virtual address of the given mapping if a
3021 * different alignment might result in more superpage mappings.
3024 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3025 vm_offset_t *addr, vm_size_t size)
3027 vm_offset_t superpage_offset;
3031 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3032 offset += ptoa(object->pg_color);
3033 superpage_offset = offset & SEGMASK;
3034 if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG ||
3035 (*addr & SEGMASK) == superpage_offset)
3037 if ((*addr & SEGMASK) < superpage_offset)
3038 *addr = (*addr & ~SEGMASK) + superpage_offset;
3040 *addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset;
3044 * Increase the starting virtual address of the given mapping so
3045 * that it is aligned to not be the second page in a TLB entry.
3046 * This routine assumes that the length is appropriately-sized so
3047 * that the allocation does not share a TLB entry at all if required.
3050 pmap_align_tlb(vm_offset_t *addr)
3052 if ((*addr & PAGE_SIZE) == 0)
3059 DB_SHOW_COMMAND(ptable, ddb_pid_dump)
3062 struct thread *td = NULL;
3069 td = db_lookup_thread(addr, TRUE);
3071 db_printf("Invalid pid or tid");
3075 if (p->p_vmspace == NULL) {
3076 db_printf("No vmspace for process");
3079 pmap = vmspace_pmap(p->p_vmspace);
3083 db_printf("pmap:%p segtab:%p asid:%x generation:%x\n",
3084 pmap, pmap->pm_segtab, pmap->pm_asid[0].asid,
3085 pmap->pm_asid[0].gen);
3086 for (i = 0; i < NPDEPG; i++) {
3091 pdpe = (pd_entry_t *)pmap->pm_segtab[i];
3094 db_printf("[%4d] %p\n", i, pdpe);
3096 for (j = 0; j < NPDEPG; j++) {
3097 pde = (pt_entry_t *)pdpe[j];
3100 db_printf("\t[%4d] %p\n", j, pde);
3104 pde = (pt_entry_t *)pdpe;
3106 for (k = 0; k < NPTEPG; k++) {
3108 if (pte == 0 || !pte_test(&pte, PTE_V))
3110 pa = TLBLO_PTE_TO_PA(pte);
3111 va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
3112 db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n",
3113 k, (void *)va, (uintmax_t)pte, (uintmax_t)pa);
3122 static void pads(pmap_t pm);
3123 void pmap_pvdump(vm_offset_t pa);
3125 /* print address space of pmap*/
3132 if (pm == kernel_pmap)
3134 for (i = 0; i < NPTEPG; i++)
3135 if (pm->pm_segtab[i])
3136 for (j = 0; j < NPTEPG; j++) {
3137 va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
3138 if (pm == kernel_pmap && va < KERNBASE)
3140 if (pm != kernel_pmap &&
3141 va >= VM_MAXUSER_ADDRESS)
3143 ptep = pmap_pte(pm, va);
3144 if (pte_test(ptep, PTE_V))
3145 printf("%x:%x ", va, *(int *)ptep);
3151 pmap_pvdump(vm_offset_t pa)
3153 register pv_entry_t pv;
3156 printf("pa %x", pa);
3157 m = PHYS_TO_VM_PAGE(pa);
3158 for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3159 pv = TAILQ_NEXT(pv, pv_list)) {
3160 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3171 * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3172 * It takes almost as much or more time to search the TLB for a
3173 * specific ASID and flush those entries as it does to flush the entire TLB.
3174 * Therefore, when we allocate a new ASID, we just take the next number. When
3175 * we run out of numbers, we flush the TLB, increment the generation count
3176 * and start over. ASID zero is reserved for kernel use.
3179 pmap_asid_alloc(pmap)
3182 if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3183 pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3185 if (PCPU_GET(next_asid) == pmap_max_asid) {
3186 tlb_invalidate_all_user(NULL);
3187 PCPU_SET(asid_generation,
3188 (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3189 if (PCPU_GET(asid_generation) == 0) {
3190 PCPU_SET(asid_generation, 1);
3192 PCPU_SET(next_asid, 1); /* 0 means invalid */
3194 pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3195 pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3196 PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3201 page_is_managed(vm_paddr_t pa)
3203 vm_offset_t pgnum = atop(pa);
3205 if (pgnum >= first_page) {
3208 m = PHYS_TO_VM_PAGE(pa);
3211 if ((m->oflags & VPO_UNMANAGED) == 0)
3218 init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
3222 if (!(prot & VM_PROT_WRITE))
3223 rw = PTE_V | PTE_RO | PTE_C_CACHE;
3224 else if ((m->oflags & VPO_UNMANAGED) == 0) {
3225 if ((m->md.pv_flags & PV_TABLE_MOD) != 0)
3226 rw = PTE_V | PTE_D | PTE_C_CACHE;
3228 rw = PTE_V | PTE_C_CACHE;
3229 vm_page_aflag_set(m, PGA_WRITEABLE);
3231 /* Needn't emulate a modified bit for unmanaged pages. */
3232 rw = PTE_V | PTE_D | PTE_C_CACHE;
3237 * pmap_emulate_modified : do dirty bit emulation
3239 * On SMP, update just the local TLB, other CPUs will update their
3240 * TLBs from PTE lazily, if they get the exception.
3241 * Returns 0 in case of sucess, 1 if the page is read only and we
3245 pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
3252 pte = pmap_pte(pmap, va);
3254 panic("pmap_emulate_modified: can't find PTE");
3256 /* It is possible that some other CPU changed m-bit */
3257 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
3258 pmap_update_page_local(pmap, va, *pte);
3263 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
3264 panic("pmap_emulate_modified: invalid pte");
3266 if (pte_test(pte, PTE_RO)) {
3267 /* write to read only page in the kernel */
3271 pte_set(pte, PTE_D);
3272 pmap_update_page_local(pmap, va, *pte);
3273 pa = TLBLO_PTE_TO_PA(*pte);
3274 if (!page_is_managed(pa))
3275 panic("pmap_emulate_modified: unmanaged page");
3276 m = PHYS_TO_VM_PAGE(pa);
3277 m->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD);
3283 * Routine: pmap_kextract
3285 * Extract the physical page address associated
3288 /* PMAP_INLINE */ vm_offset_t
3289 pmap_kextract(vm_offset_t va)
3294 * First, the direct-mapped regions.
3296 #if defined(__mips_n64)
3297 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END)
3298 return (MIPS_XKPHYS_TO_PHYS(va));
3300 if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END)
3301 return (MIPS_KSEG0_TO_PHYS(va));
3303 if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END)
3304 return (MIPS_KSEG1_TO_PHYS(va));
3307 * User virtual addresses.
3309 if (va < VM_MAXUSER_ADDRESS) {
3312 if (curproc && curproc->p_vmspace) {
3313 ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3315 return (TLBLO_PTE_TO_PA(*ptep) |
3323 * Should be kernel virtual here, otherwise fail
3325 mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
3326 #if defined(__mips_n64)
3327 mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
3336 /* Is the kernel pmap initialized? */
3337 if (!CPU_EMPTY(&kernel_pmap->pm_active)) {
3338 /* It's inside the virtual address range */
3339 ptep = pmap_pte(kernel_pmap, va);
3341 return (TLBLO_PTE_TO_PA(*ptep) |
3348 panic("%s for unknown address space %p.", __func__, (void *)va);
3353 pmap_flush_pvcache(vm_page_t m)
3358 for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3359 pv = TAILQ_NEXT(pv, pv_list)) {
3360 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);