2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
38 * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
39 * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
43 * Manages physical address maps.
45 * In addition to hardware address maps, this
46 * module is called upon to provide software-use-only
47 * maps which may or may not be stored in the same
48 * form as hardware maps. These pseudo-maps are
49 * used to store intermediate results from copy
50 * operations to and from address spaces.
52 * Since the information managed by this module is
53 * also stored by the logical address mapping module,
54 * this module may throw away valid virtual-to-physical
55 * mappings at almost any time. However, invalidations
56 * of virtual-to-physical mappings must be done as
59 * In order to cope with hardware architectures which
60 * make virtual-to-physical map invalidates expensive,
61 * this module may delay invalidate or reduced protection
62 * operations until such time as they are actually
63 * necessary. This module is given full information as
64 * to which processors are currently using which maps,
65 * and to when physical maps must be made correct.
68 #include <sys/cdefs.h>
69 __FBSDID("$FreeBSD$");
71 #include "opt_msgbuf.h"
74 #include <sys/param.h>
75 #include <sys/systm.h>
77 #include <sys/msgbuf.h>
78 #include <sys/vmmeter.h>
86 #include <vm/vm_param.h>
87 #include <vm/vm_phys.h>
89 #include <sys/mutex.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/vm_pager.h>
99 #include <sys/sched.h>
104 #include <machine/cache.h>
105 #include <machine/md_var.h>
106 #include <machine/tlb.h>
110 #ifndef PMAP_SHPGPERPROC
111 #define PMAP_SHPGPERPROC 200
114 #if !defined(DIAGNOSTIC)
115 #define PMAP_INLINE __inline
121 * Get PDEs and PTEs for user/kernel address space
123 * XXX The & for pmap_segshift() is wrong, as is the fact that it doesn't
124 * trim off gratuitous bits of the address space. By having the &
125 * there, we break defining NUSERPGTBLS below because the address space
126 * is defined such that it ends immediately after NPDEPG*NPTEPG*PAGE_SIZE,
127 * so we end up getting NUSERPGTBLS of 0.
129 #define pmap_seg_index(v) (((v) >> SEGSHIFT) & (NPDEPG - 1))
130 #define pmap_pde_index(v) (((v) >> PDRSHIFT) & (NPDEPG - 1))
131 #define pmap_pte_index(v) (((v) >> PAGE_SHIFT) & (NPTEPG - 1))
132 #define pmap_pde_pindex(v) ((v) >> PDRSHIFT)
135 #define NUPDE (NPDEPG * NPDEPG)
136 #define NUSERPGTBLS (NUPDE + NPDEPG)
138 #define NUPDE (NPDEPG)
139 #define NUSERPGTBLS (NUPDE)
142 #define is_kernel_pmap(x) ((x) == kernel_pmap)
144 struct pmap kernel_pmap_store;
145 pd_entry_t *kernel_segmap;
147 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
148 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
151 unsigned pmap_max_asid; /* max ASID supported by the system */
153 #define PMAP_ASID_RESERVED 0
155 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
157 static void pmap_asid_alloc(pmap_t pmap);
160 * Data for the pv entry allocation mechanism
162 static uma_zone_t pvzone;
163 static struct vm_object pvzone_obj;
164 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
166 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
167 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
168 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
169 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
171 static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
172 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
173 vm_page_t m, vm_prot_t prot, vm_page_t mpte);
174 static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
175 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
176 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
177 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
178 vm_offset_t va, vm_page_t m);
179 static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
180 static void pmap_invalidate_all(pmap_t pmap);
181 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
182 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
184 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
185 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
186 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
187 static int init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
188 static vm_page_t pmap_alloc_pte_page(unsigned int index, int req);
189 static void pmap_grow_pte_page_cache(void);
192 static void pmap_invalidate_page_action(void *arg);
193 static void pmap_invalidate_all_action(void *arg);
194 static void pmap_update_page_action(void *arg);
199 * This structure is for high memory (memory above 512Meg in 32 bit)
200 * This memory area does not have direct mapping, so we a mechanism to do
201 * temporary per-CPU mapping to access these addresses.
203 * At bootup we reserve 2 virtual pages per CPU for mapping highmem pages, to
204 * access a highmem physical address on a CPU, we will disable interrupts and
205 * add the mapping from the reserved virtual address for the CPU to the physical
206 * address in the kernel pagetable.
208 struct local_sysmaps {
211 uint16_t valid1, valid2;
213 static struct local_sysmaps sysmap_lmem[MAXCPU];
216 pmap_alloc_lmem_map(void)
220 for (i = 0; i < MAXCPU; i++) {
221 sysmap_lmem[i].base = virtual_avail;
222 virtual_avail += PAGE_SIZE * 2;
223 sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
227 static __inline vm_offset_t
228 pmap_lmem_map1(vm_paddr_t phys)
230 struct local_sysmaps *sysm;
231 pt_entry_t *pte, npte;
236 intr = intr_disable();
237 cpu = PCPU_GET(cpuid);
238 sysm = &sysmap_lmem[cpu];
239 sysm->saved_intr = intr;
241 npte = TLBLO_PA_TO_PFN(phys) |
242 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
243 pte = pmap_pte(kernel_pmap, va);
249 static __inline vm_offset_t
250 pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
252 struct local_sysmaps *sysm;
253 pt_entry_t *pte, npte;
254 vm_offset_t va1, va2;
258 intr = intr_disable();
259 cpu = PCPU_GET(cpuid);
260 sysm = &sysmap_lmem[cpu];
261 sysm->saved_intr = intr;
263 va2 = sysm->base + PAGE_SIZE;
264 npte = TLBLO_PA_TO_PFN(phys1) |
265 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
266 pte = pmap_pte(kernel_pmap, va1);
268 npte = TLBLO_PA_TO_PFN(phys2) |
269 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
270 pte = pmap_pte(kernel_pmap, va2);
278 pmap_lmem_unmap(void)
280 struct local_sysmaps *sysm;
284 cpu = PCPU_GET(cpuid);
285 sysm = &sysmap_lmem[cpu];
286 pte = pmap_pte(kernel_pmap, sysm->base);
288 tlb_invalidate_address(kernel_pmap, sysm->base);
291 pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
293 tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
296 intr_restore(sysm->saved_intr);
298 #else /* __mips_n64 */
301 pmap_alloc_lmem_map(void)
305 static __inline vm_offset_t
306 pmap_lmem_map1(vm_paddr_t phys)
312 static __inline vm_offset_t
313 pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
319 static __inline vm_offset_t
320 pmap_lmem_unmap(void)
325 #endif /* !__mips_n64 */
328 * Page table entry lookup routines.
330 static __inline pd_entry_t *
331 pmap_segmap(pmap_t pmap, vm_offset_t va)
334 return (&pmap->pm_segtab[pmap_seg_index(va)]);
338 static __inline pd_entry_t *
339 pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
343 pde = (pd_entry_t *)*pdpe;
344 return (&pde[pmap_pde_index(va)]);
347 static __inline pd_entry_t *
348 pmap_pde(pmap_t pmap, vm_offset_t va)
352 pdpe = pmap_segmap(pmap, va);
353 if (pdpe == NULL || *pdpe == NULL)
356 return (pmap_pdpe_to_pde(pdpe, va));
359 static __inline pd_entry_t *
360 pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
367 pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
370 return (pmap_segmap(pmap, va));
374 static __inline pt_entry_t *
375 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
379 pte = (pt_entry_t *)*pde;
380 return (&pte[pmap_pte_index(va)]);
384 pmap_pte(pmap_t pmap, vm_offset_t va)
388 pde = pmap_pde(pmap, va);
389 if (pde == NULL || *pde == NULL)
392 return (pmap_pde_to_pte(pde, va));
396 pmap_steal_memory(vm_size_t size)
401 size = round_page(size);
403 bank_size = phys_avail[1] - phys_avail[0];
404 while (size > bank_size) {
407 for (i = 0; phys_avail[i + 2]; i += 2) {
408 phys_avail[i] = phys_avail[i + 2];
409 phys_avail[i + 1] = phys_avail[i + 3];
412 phys_avail[i + 1] = 0;
414 panic("pmap_steal_memory: out of memory");
415 bank_size = phys_avail[1] - phys_avail[0];
419 phys_avail[0] += size;
420 if (MIPS_DIRECT_MAPPABLE(pa) == 0)
421 panic("Out of memory below 512Meg?");
422 va = MIPS_PHYS_TO_DIRECT(pa);
423 bzero((caddr_t)va, size);
428 * Bootstrap the system enough to run with virtual memory. This
429 * assumes that the phys_avail array has been initialized.
432 pmap_create_kernel_pagetable(void)
444 * Allocate segment table for the kernel
446 kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
449 * Allocate second level page tables for the kernel
452 npde = howmany(NKPT, NPDEPG);
453 pdaddr = pmap_steal_memory(PAGE_SIZE * npde);
456 ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt);
459 * The R[4-7]?00 stores only one copy of the Global bit in the
460 * translation lookaside buffer for each 2 page entry. Thus invalid
461 * entrys must have the Global bit set so when Entry LO and Entry HI
462 * G bits are anded together they will produce a global bit to store
465 for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++)
469 for (i = 0, npt = nkpt; npt > 0; i++) {
470 kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE);
471 pde = (pd_entry_t *)kernel_segmap[i];
473 for (j = 0; j < NPDEPG && npt > 0; j++, npt--)
474 pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE);
477 for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++)
478 kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE));
481 PMAP_LOCK_INIT(kernel_pmap);
482 kernel_pmap->pm_segtab = kernel_segmap;
483 kernel_pmap->pm_active = ~0;
484 TAILQ_INIT(&kernel_pmap->pm_pvlist);
485 kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
486 kernel_pmap->pm_asid[0].gen = 0;
487 kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE;
494 int need_local_mappings = 0;
498 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
500 * Keep the memory aligned on page boundary.
502 phys_avail[i] = round_page(phys_avail[i]);
503 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
507 if (phys_avail[i - 2] > phys_avail[i]) {
510 ptemp[0] = phys_avail[i + 0];
511 ptemp[1] = phys_avail[i + 1];
513 phys_avail[i + 0] = phys_avail[i - 2];
514 phys_avail[i + 1] = phys_avail[i - 1];
516 phys_avail[i - 2] = ptemp[0];
517 phys_avail[i - 1] = ptemp[1];
523 * In 32 bit, we may have memory which cannot be mapped directly
524 * this memory will need temporary mapping before it can be
527 if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1]))
528 need_local_mappings = 1;
531 * Copy the phys_avail[] array before we start stealing memory from it.
533 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
534 physmem_desc[i] = phys_avail[i];
535 physmem_desc[i + 1] = phys_avail[i + 1];
538 Maxmem = atop(phys_avail[i - 1]);
541 printf("Physical memory chunk(s):\n");
542 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
545 size = phys_avail[i + 1] - phys_avail[i];
546 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
547 (uintmax_t) phys_avail[i],
548 (uintmax_t) phys_avail[i + 1] - 1,
549 (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
551 printf("Maxmem is 0x%0lx\n", ptoa(Maxmem));
554 * Steal the message buffer from the beginning of memory.
556 msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
557 msgbufinit(msgbufp, MSGBUF_SIZE);
560 * Steal thread0 kstack.
562 kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
564 virtual_avail = VM_MIN_KERNEL_ADDRESS;
565 virtual_end = VM_MAX_KERNEL_ADDRESS;
569 * Steal some virtual address space to map the pcpu area.
571 virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
572 pcpup = (struct pcpu *)virtual_avail;
573 virtual_avail += PAGE_SIZE * 2;
576 * Initialize the wired TLB entry mapping the pcpu region for
577 * the BSP at 'pcpup'. Up until this point we were operating
578 * with the 'pcpup' for the BSP pointing to a virtual address
579 * in KSEG0 so there was no need for a TLB mapping.
581 mips_pcpu_tlb_init(PCPU_ADDR(0));
584 printf("pcpu is available at virtual address %p.\n", pcpup);
587 if (need_local_mappings)
588 pmap_alloc_lmem_map();
589 pmap_create_kernel_pagetable();
590 pmap_max_asid = VMNUM_PIDS;
596 * Initialize a vm_page's machine-dependent fields.
599 pmap_page_init(vm_page_t m)
602 TAILQ_INIT(&m->md.pv_list);
603 m->md.pv_list_count = 0;
608 * Initialize the pmap module.
609 * Called by vm_init, to initialize any structures that the pmap
610 * system needs to map virtual memory.
611 * pmap_init has been enhanced to support in a fairly consistant
612 * way, discontiguous physical memory.
619 * Initialize the address space (zone) for the pv entries. Set a
620 * high water mark so that the system can recover from excessive
621 * numbers of pv entries.
623 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
624 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
625 pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count;
626 pv_entry_high_water = 9 * (pv_entry_max / 10);
627 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
630 /***************************************************
631 * Low level helper routines.....
632 ***************************************************/
635 pmap_invalidate_all_local(pmap_t pmap)
638 if (pmap == kernel_pmap) {
639 tlb_invalidate_all();
642 if (pmap->pm_active & PCPU_GET(cpumask))
643 tlb_invalidate_all_user(pmap);
645 pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
650 pmap_invalidate_all(pmap_t pmap)
653 smp_rendezvous(0, pmap_invalidate_all_action, 0, pmap);
657 pmap_invalidate_all_action(void *arg)
660 pmap_invalidate_all_local((pmap_t)arg);
664 pmap_invalidate_all(pmap_t pmap)
667 pmap_invalidate_all_local(pmap);
672 pmap_invalidate_page_local(pmap_t pmap, vm_offset_t va)
675 if (is_kernel_pmap(pmap)) {
676 tlb_invalidate_address(pmap, va);
679 if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
681 else if (!(pmap->pm_active & PCPU_GET(cpumask))) {
682 pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
685 tlb_invalidate_address(pmap, va);
689 struct pmap_invalidate_page_arg {
695 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
697 struct pmap_invalidate_page_arg arg;
701 smp_rendezvous(0, pmap_invalidate_page_action, 0, &arg);
705 pmap_invalidate_page_action(void *arg)
707 struct pmap_invalidate_page_arg *p = arg;
709 pmap_invalidate_page_local(p->pmap, p->va);
713 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
716 pmap_invalidate_page_local(pmap, va);
721 pmap_update_page_local(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
724 if (is_kernel_pmap(pmap)) {
725 tlb_update(pmap, va, pte);
728 if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
730 else if (!(pmap->pm_active & PCPU_GET(cpumask))) {
731 pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
734 tlb_update(pmap, va, pte);
738 struct pmap_update_page_arg {
745 pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
747 struct pmap_update_page_arg arg;
752 smp_rendezvous(0, pmap_update_page_action, 0, &arg);
756 pmap_update_page_action(void *arg)
758 struct pmap_update_page_arg *p = arg;
760 pmap_update_page_local(p->pmap, p->va, p->pte);
764 pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
767 pmap_update_page_local(pmap, va, pte);
772 * Routine: pmap_extract
774 * Extract the physical page address associated
775 * with the given map/virtual_address pair.
778 pmap_extract(pmap_t pmap, vm_offset_t va)
781 vm_offset_t retval = 0;
784 pte = pmap_pte(pmap, va);
786 retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
793 * Routine: pmap_extract_and_hold
795 * Atomically extract and hold the physical page
796 * with the given pmap and virtual address pair
797 * if that mapping permits the given protection.
800 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
806 vm_page_lock_queues();
808 pte = *pmap_pte(pmap, va);
809 if (pte != 0 && pte_test(&pte, PTE_V) &&
810 (pte_test(&pte, PTE_D) || (prot & VM_PROT_WRITE) == 0)) {
811 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte));
814 vm_page_unlock_queues();
819 /***************************************************
820 * Low level mapping routines.....
821 ***************************************************/
824 * add a wired page to the kva
827 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
830 pt_entry_t opte, npte;
833 printf("pmap_kenter: va: %p -> pa: %p\n", (void *)va, (void *)pa);
835 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | attr;
837 pte = pmap_pte(kernel_pmap, va);
840 if (pte_test(&opte, PTE_V) && opte != npte)
841 pmap_update_page(kernel_pmap, va, npte);
845 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
848 KASSERT(is_cacheable_mem(pa),
849 ("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
851 pmap_kenter_attr(va, pa, PTE_C_CACHE);
855 * remove a page from the kernel pagetables
857 /* PMAP_INLINE */ void
858 pmap_kremove(vm_offset_t va)
863 * Write back all caches from the page being destroyed
865 mips_dcache_wbinv_range_index(va, PAGE_SIZE);
867 pte = pmap_pte(kernel_pmap, va);
869 pmap_invalidate_page(kernel_pmap, va);
873 * Used to map a range of physical addresses into kernel
874 * virtual address space.
876 * The value passed in '*virt' is a suggested virtual address for
877 * the mapping. Architectures which can support a direct-mapped
878 * physical to virtual region can return the appropriate address
879 * within that region, leaving '*virt' unchanged. Other
880 * architectures should map the pages starting at '*virt' and
881 * update '*virt' with the first usable address after the mapped
884 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
887 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
891 if (MIPS_DIRECT_MAPPABLE(end))
892 return (MIPS_PHYS_TO_DIRECT(start));
895 while (start < end) {
896 pmap_kenter(va, start);
905 * Add a list of wired pages to the kva
906 * this routine is only used for temporary
907 * kernel mappings that do not need to have
908 * page modification or references recorded.
909 * Note that old mappings are simply written
910 * over. The page *must* be wired.
913 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
916 vm_offset_t origva = va;
918 for (i = 0; i < count; i++) {
919 pmap_flush_pvcache(m[i]);
920 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
924 mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
928 * this routine jerks page mappings from the
929 * kernel -- it is meant only for temporary mappings.
932 pmap_qremove(vm_offset_t va, int count)
935 * No need to wb/inv caches here,
936 * pmap_kremove will do it for us
939 while (count-- > 0) {
945 /***************************************************
946 * Page table page management routines.....
947 ***************************************************/
951 * Simplify the reference counting of page table pages. Specifically, use
952 * the page table page's wired count rather than its hold count to contain
953 * the reference count.
957 * This routine unholds page table pages, and if the hold count
958 * drops to zero, then it decrements the wire count.
960 static PMAP_INLINE int
961 pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
964 if (m->wire_count == 0)
965 return (_pmap_unwire_pte_hold(pmap, va, m));
971 _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
975 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
977 * unmap the page table page
980 if (m->pindex < NUPDE)
981 pde = pmap_pde(pmap, va);
983 pde = pmap_segmap(pmap, va);
985 pde = pmap_pde(pmap, va);
988 pmap->pm_stats.resident_count--;
991 if (m->pindex < NUPDE) {
996 * Recursively decrement next level pagetable refcount
998 pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
999 pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
1000 pmap_unwire_pte_hold(pmap, va, pdpg);
1003 if (pmap->pm_ptphint == m)
1004 pmap->pm_ptphint = NULL;
1007 * If the page is finally unwired, simply free it.
1009 vm_page_free_zero(m);
1010 atomic_subtract_int(&cnt.v_wire_count, 1);
1015 * After removing a page table entry, this routine is used to
1016 * conditionally free the page, and manage the hold/wire counts.
1019 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
1024 if (va >= VM_MAXUSER_ADDRESS)
1028 ptepindex = pmap_pde_pindex(va);
1029 if (pmap->pm_ptphint &&
1030 (pmap->pm_ptphint->pindex == ptepindex)) {
1031 mpte = pmap->pm_ptphint;
1033 pteva = *pmap_pde(pmap, va);
1034 mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pteva));
1035 pmap->pm_ptphint = mpte;
1038 return (pmap_unwire_pte_hold(pmap, va, mpte));
1042 pmap_pinit0(pmap_t pmap)
1046 PMAP_LOCK_INIT(pmap);
1047 pmap->pm_segtab = kernel_segmap;
1048 pmap->pm_active = 0;
1049 pmap->pm_ptphint = NULL;
1050 for (i = 0; i < MAXCPU; i++) {
1051 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1052 pmap->pm_asid[i].gen = 0;
1054 PCPU_SET(curpmap, pmap);
1055 TAILQ_INIT(&pmap->pm_pvlist);
1056 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1060 pmap_grow_pte_page_cache()
1064 vm_contig_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
1066 vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
1071 pmap_alloc_pte_page(unsigned int index, int req)
1075 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req);
1079 if ((m->flags & PG_ZERO) == 0)
1083 atomic_add_int(&cnt.v_wire_count, 1);
1089 * Initialize a preallocated and zeroed pmap structure,
1090 * such as one in a vmspace structure.
1093 pmap_pinit(pmap_t pmap)
1099 PMAP_LOCK_INIT(pmap);
1102 * allocate the page directory page
1104 while ((ptdpg = pmap_alloc_pte_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
1105 pmap_grow_pte_page_cache();
1107 ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
1108 pmap->pm_segtab = (pd_entry_t *)ptdva;
1109 pmap->pm_active = 0;
1110 pmap->pm_ptphint = NULL;
1111 for (i = 0; i < MAXCPU; i++) {
1112 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1113 pmap->pm_asid[i].gen = 0;
1115 TAILQ_INIT(&pmap->pm_pvlist);
1116 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1122 * this routine is called if the page table page is not
1126 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1131 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1132 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1133 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1136 * Find or fabricate a new pagetable page
1138 if ((m = pmap_alloc_pte_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
1139 if (flags & M_WAITOK) {
1141 vm_page_unlock_queues();
1142 pmap_grow_pte_page_cache();
1143 vm_page_lock_queues();
1148 * Indicate the need to retry. While waiting, the page
1149 * table page may have been allocated.
1155 * Map the pagetable page into the process address space, if it
1156 * isn't already there.
1158 pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1161 if (ptepindex >= NUPDE) {
1162 pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva;
1164 pd_entry_t *pdep, *pde;
1165 int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
1166 int pdeindex = ptepindex & (NPDEPG - 1);
1169 pdep = &pmap->pm_segtab[segindex];
1170 if (*pdep == NULL) {
1171 /* recurse for allocating page dir */
1172 if (_pmap_allocpte(pmap, NUPDE + segindex,
1174 /* alloc failed, release current */
1176 atomic_subtract_int(&cnt.v_wire_count, 1);
1177 vm_page_free_zero(m);
1181 pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
1184 /* Next level entry */
1185 pde = (pd_entry_t *)*pdep;
1186 pde[pdeindex] = (pd_entry_t)pageva;
1187 pmap->pm_ptphint = m;
1190 pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
1192 pmap->pm_stats.resident_count++;
1195 * Set the page table hint
1197 pmap->pm_ptphint = m;
1202 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1208 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1209 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1210 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1213 * Calculate pagetable page index
1215 ptepindex = pmap_pde_pindex(va);
1218 * Get the page directory entry
1220 pde = pmap_pde(pmap, va);
1223 * If the page table page is mapped, we just increment the hold
1224 * count, and activate it.
1226 if (pde != NULL && *pde != NULL) {
1228 * In order to get the page table page, try the hint first.
1230 if (pmap->pm_ptphint &&
1231 (pmap->pm_ptphint->pindex == ptepindex)) {
1232 m = pmap->pm_ptphint;
1234 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
1235 pmap->pm_ptphint = m;
1240 * Here if the pte page isn't mapped, or if it has been
1243 m = _pmap_allocpte(pmap, ptepindex, flags);
1244 if (m == NULL && (flags & M_WAITOK))
1251 /***************************************************
1252 * Pmap allocation/deallocation routines.
1253 ***************************************************/
1256 * - Merged pmap_release and pmap_release_free_page. When pmap_release is
1257 * called only the page directory page(s) can be left in the pmap pte
1258 * object, since all page table pages will have been freed by
1259 * pmap_remove_pages and pmap_remove. In addition, there can only be one
1260 * reference to the pmap and the page directory is wired, so the page(s)
1261 * can never be busy. So all there is to do is clear the magic mappings
1262 * from the page directory and free the page(s).
1267 * Release any resources held by the given physical map.
1268 * Called when a pmap initialized by pmap_pinit is being released.
1269 * Should only be called if the map contains no valid mappings.
1272 pmap_release(pmap_t pmap)
1277 KASSERT(pmap->pm_stats.resident_count == 0,
1278 ("pmap_release: pmap resident count %ld != 0",
1279 pmap->pm_stats.resident_count));
1281 ptdva = (vm_offset_t)pmap->pm_segtab;
1282 ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
1284 ptdpg->wire_count--;
1285 atomic_subtract_int(&cnt.v_wire_count, 1);
1286 vm_page_free_zero(ptdpg);
1287 PMAP_LOCK_DESTROY(pmap);
1291 * grow the number of kernel page table entries, if needed
1294 pmap_growkernel(vm_offset_t addr)
1297 pd_entry_t *pde, *pdpe;
1301 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1302 addr = roundup2(addr, NBSEG);
1303 if (addr - 1 >= kernel_map->max_offset)
1304 addr = kernel_map->max_offset;
1305 while (kernel_vm_end < addr) {
1306 pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
1309 /* new intermediate page table entry */
1310 nkpg = pmap_alloc_pte_page(nkpt, VM_ALLOC_INTERRUPT);
1312 panic("pmap_growkernel: no memory to grow kernel");
1313 *pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1314 continue; /* try again */
1317 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
1319 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1320 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1321 kernel_vm_end = kernel_map->max_offset;
1328 * This index is bogus, but out of the way
1330 nkpg = pmap_alloc_pte_page(nkpt, VM_ALLOC_INTERRUPT);
1332 panic("pmap_growkernel: no memory to grow kernel");
1334 *pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1337 * The R[4-7]?00 stores only one copy of the Global bit in
1338 * the translation lookaside buffer for each 2 page entry.
1339 * Thus invalid entrys must have the Global bit set so when
1340 * Entry LO and Entry HI G bits are anded together they will
1341 * produce a global bit to store in the tlb.
1343 pte = (pt_entry_t *)*pde;
1344 for (i = 0; i < NPTEPG; i++)
1347 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1348 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1349 kernel_vm_end = kernel_map->max_offset;
1355 /***************************************************
1356 * page management routines.
1357 ***************************************************/
1360 * free the pv_entry back to the free list
1362 static PMAP_INLINE void
1363 free_pv_entry(pv_entry_t pv)
1367 uma_zfree(pvzone, pv);
1371 * get a new pv_entry, allocating a block from the system
1373 * the memory allocation is performed bypassing the malloc code
1374 * because of the possibility of allocations at interrupt time.
1377 get_pv_entry(pmap_t locked_pmap)
1379 static const struct timeval printinterval = { 60, 0 };
1380 static struct timeval lastprint;
1381 struct vpgqueues *vpq;
1382 pt_entry_t *pte, oldpte;
1384 pv_entry_t allocated_pv, next_pv, pv;
1388 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1389 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1390 allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
1391 if (allocated_pv != NULL) {
1393 if (pv_entry_count > pv_entry_high_water)
1394 pagedaemon_wakeup();
1396 return (allocated_pv);
1399 * Reclaim pv entries: At first, destroy mappings to inactive
1400 * pages. After that, if a pv entry is still needed, destroy
1401 * mappings to active pages.
1403 if (ratecheck(&lastprint, &printinterval))
1404 printf("Approaching the limit on PV entries, "
1405 "increase the vm.pmap.shpgperproc tunable.\n");
1406 vpq = &vm_page_queues[PQ_INACTIVE];
1408 TAILQ_FOREACH(m, &vpq->pl, pageq) {
1409 if (m->hold_count || m->busy)
1411 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
1414 /* Avoid deadlock and lock recursion. */
1415 if (pmap > locked_pmap)
1417 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
1419 pmap->pm_stats.resident_count--;
1420 pte = pmap_pte(pmap, va);
1421 KASSERT(pte != NULL, ("pte"));
1423 if (is_kernel_pmap(pmap))
1427 KASSERT(!pte_test(&oldpte, PTE_W),
1428 ("wired pte for unwired page"));
1429 if (m->md.pv_flags & PV_TABLE_REF)
1430 vm_page_flag_set(m, PG_REFERENCED);
1431 if (pte_test(&oldpte, PTE_D))
1433 pmap_invalidate_page(pmap, va);
1434 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1435 m->md.pv_list_count--;
1436 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1437 pmap_unuse_pt(pmap, va, pv->pv_ptem);
1438 if (pmap != locked_pmap)
1440 if (allocated_pv == NULL)
1445 if (TAILQ_EMPTY(&m->md.pv_list)) {
1446 vm_page_flag_clear(m, PG_WRITEABLE);
1447 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1450 if (allocated_pv == NULL) {
1451 if (vpq == &vm_page_queues[PQ_INACTIVE]) {
1452 vpq = &vm_page_queues[PQ_ACTIVE];
1455 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
1457 return (allocated_pv);
1463 * Move pmap_collect() out of the machine-dependent code, rename it
1464 * to reflect its new location, and add page queue and flag locking.
1466 * Notes: (1) alpha, i386, and ia64 had identical implementations
1467 * of pmap_collect() in terms of machine-independent interfaces;
1468 * (2) sparc64 doesn't require it; (3) powerpc had it as a TODO.
1470 * MIPS implementation was identical to alpha [Junos 8.2]
1474 * If it is the first entry on the list, it is actually
1475 * in the header and we must copy the following entry up
1476 * to the header. Otherwise we must search the list for
1477 * the entry. In either case we free the now unused entry.
1481 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1485 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1486 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1487 if (pvh->pv_list_count < pmap->pm_stats.resident_count) {
1488 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
1489 if (pmap == pv->pv_pmap && va == pv->pv_va)
1493 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1494 if (va == pv->pv_va)
1499 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
1500 pvh->pv_list_count--;
1501 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1507 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1511 pv = pmap_pvh_remove(pvh, pmap, va);
1512 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
1513 (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)),
1519 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1522 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1523 pmap_pvh_free(&m->md, pmap, va);
1524 if (TAILQ_EMPTY(&m->md.pv_list))
1525 vm_page_flag_clear(m, PG_WRITEABLE);
1529 * Conditionally create a pv entry.
1532 pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1537 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1538 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1539 if (pv_entry_count < pv_entry_high_water &&
1540 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
1545 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1546 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1547 m->md.pv_list_count++;
1554 * pmap_remove_pte: do the things to unmap a page in a process
1557 pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va)
1563 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1564 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1567 if (is_kernel_pmap(pmap))
1572 if (pte_test(&oldpte, PTE_W))
1573 pmap->pm_stats.wired_count -= 1;
1575 pmap->pm_stats.resident_count -= 1;
1576 pa = TLBLO_PTE_TO_PA(oldpte);
1578 if (page_is_managed(pa)) {
1579 m = PHYS_TO_VM_PAGE(pa);
1580 if (pte_test(&oldpte, PTE_D)) {
1581 KASSERT(!pte_test(&oldpte, PTE_RO),
1582 ("%s: modified page not writable: va: %p, pte: 0x%x",
1583 __func__, (void *)va, oldpte));
1586 if (m->md.pv_flags & PV_TABLE_REF)
1587 vm_page_flag_set(m, PG_REFERENCED);
1588 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1590 pmap_remove_entry(pmap, m, va);
1592 return (pmap_unuse_pt(pmap, va, NULL));
1596 * Remove a single page from a process address space
1599 pmap_remove_page(struct pmap *pmap, vm_offset_t va)
1603 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1604 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1605 ptq = pmap_pte(pmap, va);
1608 * if there is no pte for this address, just skip it!!!
1610 if (!ptq || !pte_test(ptq, PTE_V)) {
1615 * Write back all caches from the page being destroyed
1617 mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1620 * get a local va for mappings for this pmap.
1622 (void)pmap_remove_pte(pmap, ptq, va);
1623 pmap_invalidate_page(pmap, va);
1629 * Remove the given range of addresses from the specified map.
1631 * It is assumed that the start and end are properly
1632 * rounded to the page size.
1635 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
1637 vm_offset_t va_next;
1638 pd_entry_t *pde, *pdpe;
1644 if (pmap->pm_stats.resident_count == 0)
1647 vm_page_lock_queues();
1651 * special handling of removing one page. a very common operation
1652 * and easy to short circuit some code.
1654 if ((sva + PAGE_SIZE) == eva) {
1655 pmap_remove_page(pmap, sva);
1658 for (; sva < eva; sva = va_next) {
1659 pdpe = pmap_segmap(pmap, sva);
1662 va_next = (sva + NBSEG) & ~SEGMASK;
1668 va_next = (sva + NBPDR) & ~PDRMASK;
1672 pde = pmap_pdpe_to_pde(pdpe, sva);
1677 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next;
1678 pte++, sva += PAGE_SIZE) {
1679 pmap_remove_page(pmap, sva);
1683 vm_page_unlock_queues();
1688 * Routine: pmap_remove_all
1690 * Removes this physical page from
1691 * all physical maps in which it resides.
1692 * Reflects back modify bits to the pager.
1695 * Original versions of this routine were very
1696 * inefficient because they iteratively called
1697 * pmap_remove (slow...)
1701 pmap_remove_all(vm_page_t m)
1704 pt_entry_t *pte, tpte;
1706 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1707 ("pmap_remove_all: page %p is fictitious", m));
1708 vm_page_lock_queues();
1710 if (m->md.pv_flags & PV_TABLE_REF)
1711 vm_page_flag_set(m, PG_REFERENCED);
1713 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1714 PMAP_LOCK(pv->pv_pmap);
1717 * If it's last mapping writeback all caches from
1718 * the page being destroyed
1720 if (m->md.pv_list_count == 1)
1721 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1723 pv->pv_pmap->pm_stats.resident_count--;
1725 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
1728 if (is_kernel_pmap(pv->pv_pmap))
1733 if (pte_test(&tpte, PTE_W))
1734 pv->pv_pmap->pm_stats.wired_count--;
1737 * Update the vm_page_t clean and reference bits.
1739 if (pte_test(&tpte, PTE_D)) {
1740 KASSERT(!pte_test(&tpte, PTE_RO),
1741 ("%s: modified page not writable: va: %p, pte: 0x%x",
1742 __func__, (void *)pv->pv_va, tpte));
1745 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1747 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1748 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1749 m->md.pv_list_count--;
1750 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1751 PMAP_UNLOCK(pv->pv_pmap);
1755 vm_page_flag_clear(m, PG_WRITEABLE);
1756 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1757 vm_page_unlock_queues();
1761 * Set the physical protection on the
1762 * specified range of this map as requested.
1765 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1768 pd_entry_t *pde, *pdpe;
1769 vm_offset_t va_next;
1774 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1775 pmap_remove(pmap, sva, eva);
1778 if (prot & VM_PROT_WRITE)
1781 vm_page_lock_queues();
1783 for (; sva < eva; sva = va_next) {
1788 pdpe = pmap_segmap(pmap, sva);
1791 va_next = (sva + NBSEG) & ~SEGMASK;
1797 va_next = (sva + NBPDR) & ~PDRMASK;
1801 pde = pmap_pdpe_to_pde(pdpe, sva);
1802 if (pde == NULL || *pde == NULL)
1807 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1810 /* Skip invalid PTEs */
1811 if (!pte_test(pte, PTE_V))
1814 pa = TLBLO_PTE_TO_PA(pbits);
1815 if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) {
1816 m = PHYS_TO_VM_PAGE(pa);
1818 m->md.pv_flags &= ~PV_TABLE_MOD;
1820 pte_clear(&pbits, PTE_D);
1821 pte_set(&pbits, PTE_RO);
1823 if (pbits != *pte) {
1825 pmap_update_page(pmap, sva, pbits);
1829 vm_page_unlock_queues();
1834 * Insert the given physical page (p) at
1835 * the specified virtual address (v) in the
1836 * target physical map with the protection requested.
1838 * If specified, the page will be wired down, meaning
1839 * that the related pte can not be reclaimed.
1841 * NB: This is the only routine which MAY NOT lazy-evaluate
1842 * or lose information. That is, this routine must actually
1843 * insert this page into the given map NOW.
1846 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1847 vm_prot_t prot, boolean_t wired)
1849 vm_offset_t pa, opa;
1851 pt_entry_t origpte, newpte;
1860 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
1861 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1862 (m->oflags & VPO_BUSY) != 0,
1863 ("pmap_enter: page %p is not busy", m));
1867 vm_page_lock_queues();
1871 * In the case that a page table page is not resident, we are
1874 if (va < VM_MAXUSER_ADDRESS) {
1875 mpte = pmap_allocpte(pmap, va, M_WAITOK);
1877 pte = pmap_pte(pmap, va);
1880 * Page Directory table entry not valid, we need a new PT page
1883 panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
1884 (void *)pmap->pm_segtab, (void *)va);
1886 pa = VM_PAGE_TO_PHYS(m);
1889 opa = TLBLO_PTE_TO_PA(origpte);
1892 * Mapping has not changed, must be protection or wiring change.
1894 if (pte_test(&origpte, PTE_V) && opa == pa) {
1896 * Wiring change, just update stats. We don't worry about
1897 * wiring PT pages as they remain resident as long as there
1898 * are valid mappings in them. Hence, if a user page is
1899 * wired, the PT page will be also.
1901 if (wired && !pte_test(&origpte, PTE_W))
1902 pmap->pm_stats.wired_count++;
1903 else if (!wired && pte_test(&origpte, PTE_W))
1904 pmap->pm_stats.wired_count--;
1906 KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
1907 ("%s: modified page not writable: va: %p, pte: 0x%x",
1908 __func__, (void *)va, origpte));
1911 * Remove extra pte reference
1916 if (page_is_managed(opa)) {
1925 * Mapping has changed, invalidate old range and fall through to
1926 * handle validating new mapping.
1929 if (pte_test(&origpte, PTE_W))
1930 pmap->pm_stats.wired_count--;
1932 if (page_is_managed(opa)) {
1933 om = PHYS_TO_VM_PAGE(opa);
1934 pv = pmap_pvh_remove(&om->md, pmap, va);
1938 KASSERT(mpte->wire_count > 0,
1939 ("pmap_enter: missing reference to page table page,"
1940 " va: %p", (void *)va));
1943 pmap->pm_stats.resident_count++;
1946 * Enter on the PV list if part of our managed memory. Note that we
1947 * raise IPL while manipulating pv_table since pmap_enter can be
1948 * called at interrupt time.
1950 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
1951 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
1952 ("pmap_enter: managed mapping within the clean submap"));
1954 pv = get_pv_entry(pmap);
1958 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1959 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1960 m->md.pv_list_count++;
1961 } else if (pv != NULL)
1965 * Increment counters
1968 pmap->pm_stats.wired_count++;
1971 if ((access & VM_PROT_WRITE) != 0)
1972 m->md.pv_flags |= PV_TABLE_MOD | PV_TABLE_REF;
1973 rw = init_pte_prot(va, m, prot);
1976 printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa);
1979 * Now validate mapping with desired protection/wiring.
1981 newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V;
1983 if (is_cacheable_mem(pa))
1984 newpte |= PTE_C_CACHE;
1986 newpte |= PTE_C_UNCACHED;
1991 if (is_kernel_pmap(pmap))
1995 * if the mapping or permission bits are different, we need to
1998 if (origpte != newpte) {
1999 if (pte_test(&origpte, PTE_V)) {
2001 if (page_is_managed(opa) && (opa != pa)) {
2002 if (om->md.pv_flags & PV_TABLE_REF)
2003 vm_page_flag_set(om, PG_REFERENCED);
2005 ~(PV_TABLE_REF | PV_TABLE_MOD);
2007 if (pte_test(&origpte, PTE_D)) {
2008 KASSERT(!pte_test(&origpte, PTE_RO),
2009 ("pmap_enter: modified page not writable:"
2010 " va: %p, pte: 0x%x", (void *)va, origpte));
2011 if (page_is_managed(opa))
2014 if (page_is_managed(opa) &&
2015 TAILQ_EMPTY(&om->md.pv_list))
2016 vm_page_flag_clear(om, PG_WRITEABLE);
2021 pmap_update_page(pmap, va, newpte);
2024 * Sync I & D caches for executable pages. Do this only if the the
2025 * target pmap belongs to the current process. Otherwise, an
2026 * unresolvable TLB miss may occur.
2028 if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
2029 (prot & VM_PROT_EXECUTE)) {
2030 mips_icache_sync_range(va, PAGE_SIZE);
2031 mips_dcache_wbinv_range(va, PAGE_SIZE);
2033 vm_page_unlock_queues();
2038 * this code makes some *MAJOR* assumptions:
2039 * 1. Current pmap & pmap exists.
2042 * 4. No page table pages.
2043 * but is *MUCH* faster than pmap_enter...
2047 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2050 vm_page_lock_queues();
2052 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2053 vm_page_unlock_queues();
2058 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2059 vm_prot_t prot, vm_page_t mpte)
2064 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2065 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
2066 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2067 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2068 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2071 * In the case that a page table page is not resident, we are
2074 if (va < VM_MAXUSER_ADDRESS) {
2079 * Calculate pagetable page index
2081 ptepindex = pmap_pde_pindex(va);
2082 if (mpte && (mpte->pindex == ptepindex)) {
2086 * Get the page directory entry
2088 pde = pmap_pde(pmap, va);
2091 * If the page table page is mapped, we just
2092 * increment the hold count, and activate it.
2094 if (pde && *pde != 0) {
2095 if (pmap->pm_ptphint &&
2096 (pmap->pm_ptphint->pindex == ptepindex)) {
2097 mpte = pmap->pm_ptphint;
2099 mpte = PHYS_TO_VM_PAGE(
2100 MIPS_DIRECT_TO_PHYS(*pde));
2101 pmap->pm_ptphint = mpte;
2105 mpte = _pmap_allocpte(pmap, ptepindex,
2115 pte = pmap_pte(pmap, va);
2116 if (pte_test(pte, PTE_V)) {
2125 * Enter on the PV list if part of our managed memory.
2127 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
2128 !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2130 pmap_unwire_pte_hold(pmap, va, mpte);
2137 * Increment counters
2139 pmap->pm_stats.resident_count++;
2141 pa = VM_PAGE_TO_PHYS(m);
2144 * Now validate mapping with RO protection
2146 *pte = TLBLO_PA_TO_PFN(pa) | PTE_V;
2148 if (is_cacheable_mem(pa))
2149 *pte |= PTE_C_CACHE;
2151 *pte |= PTE_C_UNCACHED;
2153 if (is_kernel_pmap(pmap))
2158 * Sync I & D caches. Do this only if the the target pmap
2159 * belongs to the current process. Otherwise, an
2160 * unresolvable TLB miss may occur. */
2161 if (pmap == &curproc->p_vmspace->vm_pmap) {
2163 mips_icache_sync_range(va, PAGE_SIZE);
2164 mips_dcache_wbinv_range(va, PAGE_SIZE);
2171 * Make a temporary mapping for a physical address. This is only intended
2172 * to be used for panic dumps.
2174 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2177 pmap_kenter_temporary(vm_paddr_t pa, int i)
2182 printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2185 if (MIPS_DIRECT_MAPPABLE(pa)) {
2186 va = MIPS_PHYS_TO_DIRECT(pa);
2188 #ifndef __mips_n64 /* XXX : to be converted to new style */
2191 struct local_sysmaps *sysm;
2192 pt_entry_t *pte, npte;
2194 /* If this is used other than for dumps, we may need to leave
2195 * interrupts disasbled on return. If crash dumps don't work when
2196 * we get to this point, we might want to consider this (leaving things
2197 * disabled as a starting point ;-)
2199 intr = intr_disable();
2200 cpu = PCPU_GET(cpuid);
2201 sysm = &sysmap_lmem[cpu];
2202 /* Since this is for the debugger, no locks or any other fun */
2203 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
2204 pte = pmap_pte(kernel_pmap, sysm->base);
2207 pmap_update_page(kernel_pmap, sysm->base, npte);
2212 return ((void *)va);
2216 pmap_kenter_temporary_free(vm_paddr_t pa)
2218 #ifndef __mips_n64 /* XXX : to be converted to new style */
2221 struct local_sysmaps *sysm;
2224 if (MIPS_DIRECT_MAPPABLE(pa)) {
2225 /* nothing to do for this case */
2228 #ifndef __mips_n64 /* XXX : to be converted to new style */
2229 cpu = PCPU_GET(cpuid);
2230 sysm = &sysmap_lmem[cpu];
2234 intr = intr_disable();
2235 pte = pmap_pte(kernel_pmap, sysm->base);
2237 pmap_invalidate_page(kernel_pmap, sysm->base);
2245 * Moved the code to Machine Independent
2246 * vm_map_pmap_enter()
2250 * Maps a sequence of resident pages belonging to the same object.
2251 * The sequence begins with the given page m_start. This page is
2252 * mapped at the given virtual address start. Each subsequent page is
2253 * mapped at a virtual address that is offset from start by the same
2254 * amount as the page is offset from m_start within the object. The
2255 * last page in the sequence is the page with the largest offset from
2256 * m_start that can be mapped at a virtual address less than the given
2257 * virtual address end. Not every virtual page between start and end
2258 * is mapped; only those for which a resident page exists with the
2259 * corresponding offset from m_start are mapped.
2262 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2263 vm_page_t m_start, vm_prot_t prot)
2266 vm_pindex_t diff, psize;
2268 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
2269 psize = atop(end - start);
2272 vm_page_lock_queues();
2274 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2275 mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2277 m = TAILQ_NEXT(m, listq);
2279 vm_page_unlock_queues();
2284 * pmap_object_init_pt preloads the ptes for a given object
2285 * into the specified pmap. This eliminates the blast of soft
2286 * faults on process startup and immediately after an mmap.
2289 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2290 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2292 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2293 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2294 ("pmap_object_init_pt: non-device object"));
2298 * Routine: pmap_change_wiring
2299 * Function: Change the wiring attribute for a map/virtual-address
2301 * In/out conditions:
2302 * The mapping must already exist in the pmap.
2305 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
2313 pte = pmap_pte(pmap, va);
2315 if (wired && !pte_test(pte, PTE_W))
2316 pmap->pm_stats.wired_count++;
2317 else if (!wired && pte_test(pte, PTE_W))
2318 pmap->pm_stats.wired_count--;
2321 * Wiring is not a hardware characteristic so there is no need to
2325 pte_set(pte, PTE_W);
2327 pte_clear(pte, PTE_W);
2332 * Copy the range specified by src_addr/len
2333 * from the source map to the range dst_addr/len
2334 * in the destination map.
2336 * This routine is only advisory and need not do anything.
2340 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2341 vm_size_t len, vm_offset_t src_addr)
2346 * pmap_zero_page zeros the specified hardware page by mapping
2347 * the page into KVM and using bzero to clear its contents.
2349 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2352 pmap_zero_page(vm_page_t m)
2355 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2357 if (MIPS_DIRECT_MAPPABLE(phys)) {
2358 va = MIPS_PHYS_TO_DIRECT(phys);
2359 bzero((caddr_t)va, PAGE_SIZE);
2360 mips_dcache_wbinv_range(va, PAGE_SIZE);
2362 va = pmap_lmem_map1(phys);
2363 bzero((caddr_t)va, PAGE_SIZE);
2364 mips_dcache_wbinv_range(va, PAGE_SIZE);
2370 * pmap_zero_page_area zeros the specified hardware page by mapping
2371 * the page into KVM and using bzero to clear its contents.
2373 * off and size may not cover an area beyond a single hardware page.
2376 pmap_zero_page_area(vm_page_t m, int off, int size)
2379 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2381 if (MIPS_DIRECT_MAPPABLE(phys)) {
2382 va = MIPS_PHYS_TO_DIRECT(phys);
2383 bzero((char *)(caddr_t)va + off, size);
2384 mips_dcache_wbinv_range(va + off, size);
2386 va = pmap_lmem_map1(phys);
2387 bzero((char *)va + off, size);
2388 mips_dcache_wbinv_range(va + off, size);
2394 pmap_zero_page_idle(vm_page_t m)
2397 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2399 if (MIPS_DIRECT_MAPPABLE(phys)) {
2400 va = MIPS_PHYS_TO_DIRECT(phys);
2401 bzero((caddr_t)va, PAGE_SIZE);
2402 mips_dcache_wbinv_range(va, PAGE_SIZE);
2404 va = pmap_lmem_map1(phys);
2405 bzero((caddr_t)va, PAGE_SIZE);
2406 mips_dcache_wbinv_range(va, PAGE_SIZE);
2412 * pmap_copy_page copies the specified (machine independent)
2413 * page by mapping the page into virtual memory and using
2414 * bcopy to copy the page, one machine dependent page at a
2417 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2420 pmap_copy_page(vm_page_t src, vm_page_t dst)
2422 vm_offset_t va_src, va_dst;
2423 vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
2424 vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
2426 if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
2427 /* easy case, all can be accessed via KSEG0 */
2429 * Flush all caches for VA that are mapped to this page
2430 * to make sure that data in SDRAM is up to date
2432 pmap_flush_pvcache(src);
2433 mips_dcache_wbinv_range_index(
2434 MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
2435 va_src = MIPS_PHYS_TO_DIRECT(phys_src);
2436 va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
2437 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2438 mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2440 va_src = pmap_lmem_map2(phys_src, phys_dst);
2441 va_dst = va_src + PAGE_SIZE;
2442 bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2443 mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2449 * Returns true if the pmap's pv is one of the first
2450 * 16 pvs linked to from this page. This count may
2451 * be changed upwards or downwards in the future; it
2452 * is only necessary that true be returned for a small
2453 * subset of pmaps for proper page aging.
2456 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2462 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2463 ("pmap_page_exists_quick: page %p is not managed", m));
2465 vm_page_lock_queues();
2466 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2467 if (pv->pv_pmap == pmap) {
2475 vm_page_unlock_queues();
2480 * Remove all pages from specified address space
2481 * this aids process exit speeds. Also, this code
2482 * is special cased for current process only, but
2483 * can have the more generic (and slightly slower)
2484 * mode enabled. This is much faster than pmap_remove
2485 * in the case of running down an entire address space.
2488 pmap_remove_pages(pmap_t pmap)
2490 pt_entry_t *pte, tpte;
2494 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2495 printf("warning: pmap_remove_pages called with non-current pmap\n");
2498 vm_page_lock_queues();
2500 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv != NULL; pv = npv) {
2502 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2503 if (!pte_test(pte, PTE_V))
2504 panic("pmap_remove_pages: page on pm_pvlist has no pte");
2508 * We cannot remove wired pages from a process' mapping at this time
2510 if (pte_test(&tpte, PTE_W)) {
2511 npv = TAILQ_NEXT(pv, pv_plist);
2514 *pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2516 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
2518 ("pmap_remove_pages: bad tpte %x", tpte));
2520 pv->pv_pmap->pm_stats.resident_count--;
2523 * Update the vm_page_t clean and reference bits.
2525 if (pte_test(&tpte, PTE_D)) {
2528 npv = TAILQ_NEXT(pv, pv_plist);
2529 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2531 m->md.pv_list_count--;
2532 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2533 if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
2534 vm_page_flag_clear(m, PG_WRITEABLE);
2536 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2539 pmap_invalidate_all(pmap);
2541 vm_page_unlock_queues();
2545 * pmap_testbit tests bits in pte's
2546 * note that the testbit/changebit routines are inline,
2547 * and a lot of things compile-time evaluate.
2550 pmap_testbit(vm_page_t m, int bit)
2554 boolean_t rv = FALSE;
2556 if (m->flags & PG_FICTITIOUS)
2559 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
2562 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2563 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2564 PMAP_LOCK(pv->pv_pmap);
2565 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2566 rv = pte_test(pte, bit);
2567 PMAP_UNLOCK(pv->pv_pmap);
2575 * this routine is used to clear dirty bits in ptes
2577 static __inline void
2578 pmap_changebit(vm_page_t m, int bit, boolean_t setem)
2583 if (m->flags & PG_FICTITIOUS)
2586 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2588 * Loop over all current mappings setting/clearing as appropos If
2589 * setting RO do we need to clear the VAC?
2591 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2592 PMAP_LOCK(pv->pv_pmap);
2593 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2596 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2598 pt_entry_t pbits = *pte;
2604 *pte = (pbits & ~PTE_D) | PTE_RO;
2606 *pte = pbits & ~bit;
2608 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2611 PMAP_UNLOCK(pv->pv_pmap);
2613 if (!setem && bit == PTE_D)
2614 vm_page_flag_clear(m, PG_WRITEABLE);
2618 * pmap_page_wired_mappings:
2620 * Return the number of managed mappings to the given physical page
2624 pmap_page_wired_mappings(vm_page_t m)
2632 if ((m->flags & PG_FICTITIOUS) != 0)
2634 vm_page_lock_queues();
2635 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2638 pte = pmap_pte(pmap, pv->pv_va);
2639 if (pte_test(pte, PTE_W))
2643 vm_page_unlock_queues();
2648 * Clear the write and modified bits in each of the given page's mappings.
2651 pmap_remove_write(vm_page_t m)
2657 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2658 if ((m->flags & PG_WRITEABLE) == 0)
2662 * Loop over all current mappings setting/clearing as appropos.
2664 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) {
2665 npv = TAILQ_NEXT(pv, pv_plist);
2666 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2667 if (pte == NULL || !pte_test(pte, PTE_V))
2668 panic("page on pm_pvlist has no pte");
2671 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
2672 VM_PROT_READ | VM_PROT_EXECUTE);
2674 vm_page_flag_clear(m, PG_WRITEABLE);
2678 * pmap_ts_referenced:
2680 * Return the count of reference bits for a page, clearing all of them.
2683 pmap_ts_referenced(vm_page_t m)
2686 if (m->flags & PG_FICTITIOUS)
2689 if (m->md.pv_flags & PV_TABLE_REF) {
2690 m->md.pv_flags &= ~PV_TABLE_REF;
2699 * Return whether or not the specified physical page was modified
2700 * in any physical maps.
2703 pmap_is_modified(vm_page_t m)
2706 if (m->flags & PG_FICTITIOUS)
2709 if (m->md.pv_flags & PV_TABLE_MOD)
2712 return (pmap_testbit(m, PTE_D));
2718 * pmap_is_prefaultable:
2720 * Return whether or not the specified virtual address is elgible
2724 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2732 pde = pmap_pde(pmap, addr);
2733 if (pde != NULL && *pde != 0) {
2734 pte = pmap_pde_to_pte(pde, addr);
2742 * Clear the modify bits on the specified physical page.
2745 pmap_clear_modify(vm_page_t m)
2747 if (m->flags & PG_FICTITIOUS)
2749 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2750 if (m->md.pv_flags & PV_TABLE_MOD) {
2751 pmap_changebit(m, PTE_D, FALSE);
2752 m->md.pv_flags &= ~PV_TABLE_MOD;
2757 * pmap_clear_reference:
2759 * Clear the reference bit on the specified physical page.
2762 pmap_clear_reference(vm_page_t m)
2764 if (m->flags & PG_FICTITIOUS)
2767 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2768 if (m->md.pv_flags & PV_TABLE_REF) {
2769 m->md.pv_flags &= ~PV_TABLE_REF;
2774 * Miscellaneous support routines follow
2778 * Map a set of physical memory pages into the kernel virtual
2779 * address space. Return a pointer to where it is mapped. This
2780 * routine is intended to be used for mapping device memory,
2785 * Map a set of physical memory pages into the kernel virtual
2786 * address space. Return a pointer to where it is mapped. This
2787 * routine is intended to be used for mapping device memory,
2790 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
2793 pmap_mapdev(vm_offset_t pa, vm_size_t size)
2795 vm_offset_t va, tmpva, offset;
2798 * KSEG1 maps only first 512M of phys address space. For
2799 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
2801 if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
2802 return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
2804 offset = pa & PAGE_MASK;
2805 size = roundup(size + offset, PAGE_SIZE);
2807 va = kmem_alloc_nofault(kernel_map, size);
2809 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2810 pa = trunc_page(pa);
2811 for (tmpva = va; size > 0;) {
2812 pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
2819 return ((void *)(va + offset));
2823 pmap_unmapdev(vm_offset_t va, vm_size_t size)
2826 vm_offset_t base, offset, tmpva;
2828 /* If the address is within KSEG1 then there is nothing to do */
2829 if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
2832 base = trunc_page(va);
2833 offset = va & PAGE_MASK;
2834 size = roundup(size + offset, PAGE_SIZE);
2835 for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE)
2836 pmap_kremove(tmpva);
2837 kmem_free(kernel_map, base, size);
2842 * perform the pmap work for mincore
2845 pmap_mincore(pmap_t pmap, vm_offset_t addr)
2847 pt_entry_t *ptep, pte;
2852 ptep = pmap_pte(pmap, addr);
2853 pte = (ptep != NULL) ? *ptep : 0;
2856 if (pte_test(&pte, PTE_V)) {
2859 val = MINCORE_INCORE;
2860 pa = TLBLO_PTE_TO_PA(pte);
2861 if (!page_is_managed(pa))
2864 m = PHYS_TO_VM_PAGE(pa);
2869 if (pte_test(&pte, PTE_D))
2870 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
2872 * Modified by someone
2875 vm_page_lock_queues();
2876 if (m->dirty || pmap_is_modified(m))
2877 val |= MINCORE_MODIFIED_OTHER;
2878 vm_page_unlock_queues();
2881 * Referenced by us or someone
2883 vm_page_lock_queues();
2884 if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
2885 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
2886 vm_page_flag_set(m, PG_REFERENCED);
2888 vm_page_unlock_queues();
2895 pmap_activate(struct thread *td)
2897 pmap_t pmap, oldpmap;
2898 struct proc *p = td->td_proc;
2902 pmap = vmspace_pmap(p->p_vmspace);
2903 oldpmap = PCPU_GET(curpmap);
2906 atomic_clear_32(&oldpmap->pm_active, PCPU_GET(cpumask));
2907 atomic_set_32(&pmap->pm_active, PCPU_GET(cpumask));
2908 pmap_asid_alloc(pmap);
2909 if (td == curthread) {
2910 PCPU_SET(segbase, pmap->pm_segtab);
2911 mips_wr_entryhi(pmap->pm_asid[PCPU_GET(cpuid)].asid);
2914 PCPU_SET(curpmap, pmap);
2919 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2924 * Increase the starting virtual address of the given mapping if a
2925 * different alignment might result in more superpage mappings.
2928 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
2929 vm_offset_t *addr, vm_size_t size)
2931 vm_offset_t superpage_offset;
2935 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
2936 offset += ptoa(object->pg_color);
2937 superpage_offset = offset & SEGMASK;
2938 if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG ||
2939 (*addr & SEGMASK) == superpage_offset)
2941 if ((*addr & SEGMASK) < superpage_offset)
2942 *addr = (*addr & ~SEGMASK) + superpage_offset;
2944 *addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset;
2948 * Increase the starting virtual address of the given mapping so
2949 * that it is aligned to not be the second page in a TLB entry.
2950 * This routine assumes that the length is appropriately-sized so
2951 * that the allocation does not share a TLB entry at all if required.
2954 pmap_align_tlb(vm_offset_t *addr)
2956 if ((*addr & PAGE_SIZE) == 0)
2963 DB_SHOW_COMMAND(ptable, ddb_pid_dump)
2966 struct thread *td = NULL;
2973 td = db_lookup_thread(addr, TRUE);
2975 db_printf("Invalid pid or tid");
2979 if (p->p_vmspace == NULL) {
2980 db_printf("No vmspace for process");
2983 pmap = vmspace_pmap(p->p_vmspace);
2987 db_printf("pmap:%p segtab:%p asid:%x generation:%x\n",
2988 pmap, pmap->pm_segtab, pmap->pm_asid[0].asid,
2989 pmap->pm_asid[0].gen);
2990 for (i = 0; i < NPDEPG; i++) {
2995 pdpe = (pd_entry_t *)pmap->pm_segtab[i];
2998 db_printf("[%4d] %p\n", i, pdpe);
3000 for (j = 0; j < NPDEPG; j++) {
3001 pde = (pt_entry_t *)pdpe[j];
3004 db_printf("\t[%4d] %p\n", j, pde);
3008 pde = (pt_entry_t *)pdpe;
3010 for (k = 0; k < NPTEPG; k++) {
3012 if (pte == 0 || !pte_test(&pte, PTE_V))
3014 pa = TLBLO_PTE_TO_PA(pte);
3015 va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
3016 db_printf("\t\t[%04d] va: %p pte: %8x pa:%lx\n",
3017 k, (void *)va, pte, (u_long)pa);
3026 static void pads(pmap_t pm);
3027 void pmap_pvdump(vm_offset_t pa);
3029 /* print address space of pmap*/
3036 if (pm == kernel_pmap)
3038 for (i = 0; i < NPTEPG; i++)
3039 if (pm->pm_segtab[i])
3040 for (j = 0; j < NPTEPG; j++) {
3041 va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
3042 if (pm == kernel_pmap && va < KERNBASE)
3044 if (pm != kernel_pmap &&
3045 va >= VM_MAXUSER_ADDRESS)
3047 ptep = pmap_pte(pm, va);
3048 if (pmap_pte_v(ptep))
3049 printf("%x:%x ", va, *(int *)ptep);
3055 pmap_pvdump(vm_offset_t pa)
3057 register pv_entry_t pv;
3060 printf("pa %x", pa);
3061 m = PHYS_TO_VM_PAGE(pa);
3062 for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3063 pv = TAILQ_NEXT(pv, pv_list)) {
3064 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3075 * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3076 * It takes almost as much or more time to search the TLB for a
3077 * specific ASID and flush those entries as it does to flush the entire TLB.
3078 * Therefore, when we allocate a new ASID, we just take the next number. When
3079 * we run out of numbers, we flush the TLB, increment the generation count
3080 * and start over. ASID zero is reserved for kernel use.
3083 pmap_asid_alloc(pmap)
3086 if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3087 pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3089 if (PCPU_GET(next_asid) == pmap_max_asid) {
3090 tlb_invalidate_all_user(NULL);
3091 PCPU_SET(asid_generation,
3092 (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3093 if (PCPU_GET(asid_generation) == 0) {
3094 PCPU_SET(asid_generation, 1);
3096 PCPU_SET(next_asid, 1); /* 0 means invalid */
3098 pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3099 pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3100 PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3105 page_is_managed(vm_offset_t pa)
3107 vm_offset_t pgnum = mips_btop(pa);
3109 if (pgnum >= first_page) {
3112 m = PHYS_TO_VM_PAGE(pa);
3115 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
3122 init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
3126 if (!(prot & VM_PROT_WRITE))
3127 rw = PTE_V | PTE_RO | PTE_C_CACHE;
3128 else if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
3129 if ((m->md.pv_flags & PV_TABLE_MOD) != 0)
3130 rw = PTE_V | PTE_D | PTE_C_CACHE;
3132 rw = PTE_V | PTE_C_CACHE;
3133 vm_page_flag_set(m, PG_WRITEABLE);
3135 /* Needn't emulate a modified bit for unmanaged pages. */
3136 rw = PTE_V | PTE_D | PTE_C_CACHE;
3141 * pmap_emulate_modified : do dirty bit emulation
3143 * On SMP, update just the local TLB, other CPUs will update their
3144 * TLBs from PTE lazily, if they get the exception.
3145 * Returns 0 in case of sucess, 1 if the page is read only and we
3149 pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
3156 pte = pmap_pte(pmap, va);
3158 panic("pmap_emulate_modified: can't find PTE");
3160 /* It is possible that some other CPU changed m-bit */
3161 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
3162 pmap_update_page_local(pmap, va, *pte);
3167 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
3168 panic("pmap_emulate_modified: invalid pte");
3170 if (pte_test(pte, PTE_RO)) {
3171 /* write to read only page in the kernel */
3175 pte_set(pte, PTE_D);
3176 pmap_update_page_local(pmap, va, *pte);
3177 pa = TLBLO_PTE_TO_PA(*pte);
3178 if (!page_is_managed(pa))
3179 panic("pmap_emulate_modified: unmanaged page");
3180 m = PHYS_TO_VM_PAGE(pa);
3181 m->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD);
3187 * Routine: pmap_kextract
3189 * Extract the physical page address associated
3192 /* PMAP_INLINE */ vm_offset_t
3193 pmap_kextract(vm_offset_t va)
3198 * First, the direct-mapped regions.
3200 #if defined(__mips_n64)
3201 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END)
3202 return (MIPS_XKPHYS_TO_PHYS(va));
3204 if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END)
3205 return (MIPS_KSEG0_TO_PHYS(va));
3207 if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END)
3208 return (MIPS_KSEG1_TO_PHYS(va));
3211 * User virtual addresses.
3213 if (va < VM_MAXUSER_ADDRESS) {
3216 if (curproc && curproc->p_vmspace) {
3217 ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3219 return (TLBLO_PTE_TO_PA(*ptep) |
3227 * Should be kernel virtual here, otherwise fail
3229 mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
3230 #if defined(__mips_n64)
3231 mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
3240 /* Is the kernel pmap initialized? */
3241 if (kernel_pmap->pm_active) {
3242 /* It's inside the virtual address range */
3243 ptep = pmap_pte(kernel_pmap, va);
3245 return (TLBLO_PTE_TO_PA(*ptep) |
3252 panic("%s for unknown address space %p.", __func__, (void *)va);
3257 pmap_flush_pvcache(vm_page_t m)
3262 for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3263 pv = TAILQ_NEXT(pv, pv_list)) {
3264 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);