2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
38 * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
39 * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
43 * Manages physical address maps.
45 * In addition to hardware address maps, this
46 * module is called upon to provide software-use-only
47 * maps which may or may not be stored in the same
48 * form as hardware maps. These pseudo-maps are
49 * used to store intermediate results from copy
50 * operations to and from address spaces.
52 * Since the information managed by this module is
53 * also stored by the logical address mapping module,
54 * this module may throw away valid virtual-to-physical
55 * mappings at almost any time. However, invalidations
56 * of virtual-to-physical mappings must be done as
59 * In order to cope with hardware architectures which
60 * make virtual-to-physical map invalidates expensive,
61 * this module may delay invalidate or reduced protection
62 * operations until such time as they are actually
63 * necessary. This module is given full information as
64 * to which processors are currently using which maps,
65 * and to when physical maps must be made correct.
68 #include <sys/cdefs.h>
69 __FBSDID("$FreeBSD$");
72 #include "opt_msgbuf.h"
73 #include <sys/param.h>
74 #include <sys/systm.h>
76 #include <sys/msgbuf.h>
77 #include <sys/vmmeter.h>
81 #include <vm/vm_param.h>
83 #include <sys/mutex.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_extern.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
93 #include <sys/sched.h>
98 #include <machine/cache.h>
99 #include <machine/md_var.h>
101 #if defined(DIAGNOSTIC)
102 #define PMAP_DIAGNOSTIC
107 #ifndef PMAP_SHPGPERPROC
108 #define PMAP_SHPGPERPROC 200
111 #if !defined(PMAP_DIAGNOSTIC)
112 #define PMAP_INLINE __inline
118 * Get PDEs and PTEs for user/kernel address space
120 #define pmap_pde(m, v) (&((m)->pm_segtab[(vm_offset_t)(v) >> SEGSHIFT]))
121 #define segtab_pde(m, v) (m[(vm_offset_t)(v) >> SEGSHIFT])
123 #define pmap_pte_w(pte) ((*(int *)pte & PTE_W) != 0)
124 #define pmap_pde_v(pte) ((*(int *)pte) != 0)
125 #define pmap_pte_m(pte) ((*(int *)pte & PTE_M) != 0)
126 #define pmap_pte_v(pte) ((*(int *)pte & PTE_V) != 0)
128 #define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PTE_W):(*(int *)pte &= ~PTE_W))
129 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
131 #define MIPS_SEGSIZE (1L << SEGSHIFT)
132 #define mips_segtrunc(va) ((va) & ~(MIPS_SEGSIZE-1))
133 #define pmap_TLB_invalidate_all() MIPS_TBIAP()
134 #define pmap_va_asid(pmap, va) ((va) | ((pmap)->pm_asid[PCPU_GET(cpuid)].asid << VMTLB_PID_SHIFT))
135 #define is_kernel_pmap(x) ((x) == kernel_pmap)
137 struct pmap kernel_pmap_store;
138 pd_entry_t *kernel_segmap;
140 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
141 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
144 unsigned pmap_max_asid; /* max ASID supported by the system */
147 #define PMAP_ASID_RESERVED 0
150 vm_offset_t kernel_vm_end;
152 static void pmap_asid_alloc(pmap_t pmap);
155 * Data for the pv entry allocation mechanism
157 static uma_zone_t pvzone;
158 static struct vm_object pvzone_obj;
159 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
161 struct fpage fpages_shared[FPAGES_SHARED];
163 struct sysmaps sysmaps_pcpu[MAXCPU];
165 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
166 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
167 static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
169 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
170 vm_page_t m, vm_prot_t prot, vm_page_t mpte);
171 static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
172 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
173 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
174 static boolean_t pmap_testbit(vm_page_t m, int bit);
176 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
177 vm_page_t m, boolean_t wired);
178 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
179 vm_offset_t va, vm_page_t m);
181 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
183 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
184 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
185 static int init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
186 static void pmap_TLB_invalidate_kernel(vm_offset_t);
187 static void pmap_TLB_update_kernel(vm_offset_t, pt_entry_t);
188 static void pmap_init_fpage(void);
191 static void pmap_invalidate_page_action(void *arg);
192 static void pmap_invalidate_all_action(void *arg);
193 static void pmap_update_page_action(void *arg);
197 struct local_sysmaps {
203 uint16_t valid1, valid2;
206 /* This structure is for large memory
207 * above 512Meg. We can't (in 32 bit mode)
208 * just use the direct mapped MIPS_CACHED_TO_PHYS()
209 * macros since we can't see the memory and must
210 * map it in when we need to access it. In 64
211 * bit mode this goes away.
213 static struct local_sysmaps sysmap_lmem[MAXCPU];
214 caddr_t virtual_sys_start = (caddr_t)0;
217 pmap_segmap(pmap_t pmap, vm_offset_t va)
220 return (pmap->pm_segtab[((vm_offset_t)(va) >> SEGSHIFT)]);
222 return ((pd_entry_t)0);
228 * Extract the page table entry associated
229 * with the given map/virtual_address pair.
232 pmap_pte(pmap_t pmap, vm_offset_t va)
237 pdeaddr = (pt_entry_t *)pmap_segmap(pmap, va);
239 return pdeaddr + vad_to_pte_offset(va);
242 return ((pt_entry_t *)0);
247 pmap_steal_memory(vm_size_t size)
252 size = round_page(size);
254 bank_size = phys_avail[1] - phys_avail[0];
255 while (size > bank_size) {
258 for (i = 0; phys_avail[i + 2]; i += 2) {
259 phys_avail[i] = phys_avail[i + 2];
260 phys_avail[i + 1] = phys_avail[i + 3];
263 phys_avail[i + 1] = 0;
265 panic("pmap_steal_memory: out of memory");
266 bank_size = phys_avail[1] - phys_avail[0];
270 phys_avail[0] += size;
271 if (pa >= MIPS_KSEG0_LARGEST_PHYS) {
272 panic("Out of memory below 512Meg?");
274 va = MIPS_PHYS_TO_CACHED(pa);
275 bzero((caddr_t)va, size);
280 * Bootstrap the system enough to run with virtual memory. This
281 * assumes that the phys_avail array has been initialized.
289 int memory_larger_than_512meg = 0;
293 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
295 * Keep the memory aligned on page boundary.
297 phys_avail[i] = round_page(phys_avail[i]);
298 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
300 if (phys_avail[i + 1] >= MIPS_KSEG0_LARGEST_PHYS)
301 memory_larger_than_512meg++;
304 if (phys_avail[i - 2] > phys_avail[i]) {
308 ptemp[0] = phys_avail[i + 0];
309 ptemp[1] = phys_avail[i + 1];
311 phys_avail[i + 0] = phys_avail[i - 2];
312 phys_avail[i + 1] = phys_avail[i - 1];
314 phys_avail[i - 2] = ptemp[0];
315 phys_avail[i - 1] = ptemp[1];
321 * Copy the phys_avail[] array before we start stealing memory from it.
323 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
324 physmem_desc[i] = phys_avail[i];
325 physmem_desc[i + 1] = phys_avail[i + 1];
328 Maxmem = atop(phys_avail[i - 1]);
331 printf("Physical memory chunk(s):\n");
332 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
335 size = phys_avail[i + 1] - phys_avail[i];
336 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
337 (uintmax_t) phys_avail[i],
338 (uintmax_t) phys_avail[i + 1] - 1,
339 (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
341 printf("Maxmem is 0x%0lx\n", ptoa(Maxmem));
344 * Steal the message buffer from the beginning of memory.
346 msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
347 msgbufinit(msgbufp, MSGBUF_SIZE);
350 * Steal thread0 kstack.
352 kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
355 virtual_avail = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
356 virtual_end = VM_MAX_KERNEL_ADDRESS;
360 * Steal some virtual address space to map the pcpu area.
362 virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
363 pcpup = (struct pcpu *)virtual_avail;
364 virtual_avail += PAGE_SIZE * 2;
367 * Initialize the wired TLB entry mapping the pcpu region for
368 * the BSP at 'pcpup'. Up until this point we were operating
369 * with the 'pcpup' for the BSP pointing to a virtual address
370 * in KSEG0 so there was no need for a TLB mapping.
372 mips_pcpu_tlb_init(PCPU_ADDR(0));
375 printf("pcpu is available at virtual address %p.\n", pcpup);
379 * Steal some virtual space that will not be in kernel_segmap. This
380 * va memory space will be used to map in kernel pages that are
381 * outside the 512Meg region. Note that we only do this steal when
382 * we do have memory in this region, that way for systems with
383 * smaller memory we don't "steal" any va ranges :-)
385 if (memory_larger_than_512meg) {
386 for (i = 0; i < MAXCPU; i++) {
387 sysmap_lmem[i].CMAP1 = PTE_G;
388 sysmap_lmem[i].CMAP2 = PTE_G;
389 sysmap_lmem[i].CADDR1 = (caddr_t)virtual_avail;
390 virtual_avail += PAGE_SIZE;
391 sysmap_lmem[i].CADDR2 = (caddr_t)virtual_avail;
392 virtual_avail += PAGE_SIZE;
393 sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
394 PMAP_LGMEM_LOCK_INIT(&sysmap_lmem[i]);
397 virtual_sys_start = (caddr_t)virtual_avail;
399 * Allocate segment table for the kernel
401 kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
404 * Allocate second level page tables for the kernel
407 if (memory_larger_than_512meg) {
409 * If we have a large memory system we CANNOT afford to hit
410 * pmap_growkernel() and allocate memory. Since we MAY end
411 * up with a page that is NOT mappable. For that reason we
412 * up front grab more. Normall NKPT is 120 (YMMV see pmap.h)
413 * this gives us 480meg of kernel virtual addresses at the
414 * cost of 120 pages (each page gets us 4 Meg). Since the
415 * kernel starts at virtual_avail, we can use this to
416 * calculate how many entris are left from there to the end
417 * of the segmap, we want to allocate all of it, which would
418 * be somewhere above 0xC0000000 - 0xFFFFFFFF which results
419 * in about 256 entries or so instead of the 120.
421 nkpt = (PAGE_SIZE / sizeof(pd_entry_t)) - (virtual_avail >> SEGSHIFT);
423 pgtab = (pt_entry_t *)pmap_steal_memory(PAGE_SIZE * nkpt);
426 * The R[4-7]?00 stores only one copy of the Global bit in the
427 * translation lookaside buffer for each 2 page entry. Thus invalid
428 * entrys must have the Global bit set so when Entry LO and Entry HI
429 * G bits are anded together they will produce a global bit to store
432 for (i = 0, pte = pgtab; i < (nkpt * NPTEPG); i++, pte++)
436 * The segment table contains the KVA of the pages in the second
439 for (i = 0, j = (virtual_avail >> SEGSHIFT); i < nkpt; i++, j++)
440 kernel_segmap[j] = (pd_entry_t)(pgtab + (i * NPTEPG));
443 * The kernel's pmap is statically allocated so we don't have to use
444 * pmap_create, which is unlikely to work correctly at this part of
445 * the boot sequence (XXX and which no longer exists).
447 PMAP_LOCK_INIT(kernel_pmap);
448 kernel_pmap->pm_segtab = kernel_segmap;
449 kernel_pmap->pm_active = ~0;
450 TAILQ_INIT(&kernel_pmap->pm_pvlist);
451 kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
452 kernel_pmap->pm_asid[0].gen = 0;
453 pmap_max_asid = VMNUM_PIDS;
458 * Initialize a vm_page's machine-dependent fields.
461 pmap_page_init(vm_page_t m)
464 TAILQ_INIT(&m->md.pv_list);
465 m->md.pv_list_count = 0;
470 * Initialize the pmap module.
471 * Called by vm_init, to initialize any structures that the pmap
472 * system needs to map virtual memory.
473 * pmap_init has been enhanced to support in a fairly consistant
474 * way, discontiguous physical memory.
480 if (need_wired_tlb_page_pool)
483 * Initialize the address space (zone) for the pv entries. Set a
484 * high water mark so that the system can recover from excessive
485 * numbers of pv entries.
487 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
488 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
489 pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count;
490 pv_entry_high_water = 9 * (pv_entry_max / 10);
491 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
494 /***************************************************
495 * Low level helper routines.....
496 ***************************************************/
498 #if defined(PMAP_DIAGNOSTIC)
501 * This code checks for non-writeable/modified pages.
502 * This should be an invalid condition.
505 pmap_nw_modified(pt_entry_t pte)
507 if ((pte & (PTE_M | PTE_RO)) == (PTE_M | PTE_RO))
516 pmap_invalidate_all(pmap_t pmap)
519 smp_rendezvous(0, pmap_invalidate_all_action, 0, (void *)pmap);
523 pmap_invalidate_all_action(void *arg)
525 pmap_t pmap = (pmap_t)arg;
529 if (pmap->pm_active & PCPU_GET(cpumask)) {
530 pmap_TLB_invalidate_all();
532 pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
535 struct pmap_invalidate_page_arg {
541 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
544 struct pmap_invalidate_page_arg arg;
549 smp_rendezvous(0, pmap_invalidate_page_action, 0, (void *)&arg);
553 pmap_invalidate_page_action(void *arg)
555 pmap_t pmap = ((struct pmap_invalidate_page_arg *)arg)->pmap;
556 vm_offset_t va = ((struct pmap_invalidate_page_arg *)arg)->va;
560 if (is_kernel_pmap(pmap)) {
561 pmap_TLB_invalidate_kernel(va);
564 if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
566 else if (!(pmap->pm_active & PCPU_GET(cpumask))) {
567 pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
570 va = pmap_va_asid(pmap, (va & ~PGOFSET));
575 pmap_TLB_invalidate_kernel(vm_offset_t va)
580 va = va | (pid << VMTLB_PID_SHIFT);
584 struct pmap_update_page_arg {
591 pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
594 struct pmap_update_page_arg arg;
600 smp_rendezvous(0, pmap_update_page_action, 0, (void *)&arg);
604 pmap_update_page_action(void *arg)
606 pmap_t pmap = ((struct pmap_update_page_arg *)arg)->pmap;
607 vm_offset_t va = ((struct pmap_update_page_arg *)arg)->va;
608 pt_entry_t pte = ((struct pmap_update_page_arg *)arg)->pte;
611 if (is_kernel_pmap(pmap)) {
612 pmap_TLB_update_kernel(va, pte);
615 if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
617 else if (!(pmap->pm_active & PCPU_GET(cpumask))) {
618 pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
621 va = pmap_va_asid(pmap, va);
622 MachTLBUpdate(va, pte);
626 pmap_TLB_update_kernel(vm_offset_t va, pt_entry_t pte)
631 va = va | (pid << VMTLB_PID_SHIFT);
633 MachTLBUpdate(va, pte);
637 * Routine: pmap_extract
639 * Extract the physical page address associated
640 * with the given map/virtual_address pair.
643 pmap_extract(pmap_t pmap, vm_offset_t va)
646 vm_offset_t retval = 0;
649 pte = pmap_pte(pmap, va);
651 retval = mips_tlbpfn_to_paddr(*pte) | (va & PAGE_MASK);
658 * Routine: pmap_extract_and_hold
660 * Atomically extract and hold the physical page
661 * with the given pmap and virtual address pair
662 * if that mapping permits the given protection.
665 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
671 vm_page_lock_queues();
674 pte = *pmap_pte(pmap, va);
675 if (pte != 0 && pmap_pte_v(&pte) &&
676 ((pte & PTE_RW) || (prot & VM_PROT_WRITE) == 0)) {
677 m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pte));
680 vm_page_unlock_queues();
685 /***************************************************
686 * Low level mapping routines.....
687 ***************************************************/
690 * add a wired page to the kva
692 /* PMAP_INLINE */ void
693 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
695 register pt_entry_t *pte;
696 pt_entry_t npte, opte;
699 printf("pmap_kenter: va: 0x%08x -> pa: 0x%08x\n", va, pa);
701 npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W;
703 if (is_cacheable_mem(pa))
706 npte |= PTE_UNCACHED;
708 pte = pmap_pte(kernel_pmap, va);
712 pmap_update_page(kernel_pmap, va, npte);
716 * remove a page from the kernel pagetables
718 /* PMAP_INLINE */ void
719 pmap_kremove(vm_offset_t va)
721 register pt_entry_t *pte;
724 * Write back all caches from the page being destroyed
726 mips_dcache_wbinv_range_index(va, NBPG);
728 pte = pmap_pte(kernel_pmap, va);
730 pmap_invalidate_page(kernel_pmap, va);
734 * Used to map a range of physical addresses into kernel
735 * virtual address space.
737 * The value passed in '*virt' is a suggested virtual address for
738 * the mapping. Architectures which can support a direct-mapped
739 * physical to virtual region can return the appropriate address
740 * within that region, leaving '*virt' unchanged. Other
741 * architectures should map the pages starting at '*virt' and
742 * update '*virt' with the first usable address after the mapped
746 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
751 while (start < end) {
752 pmap_kenter(va, start);
761 * Add a list of wired pages to the kva
762 * this routine is only used for temporary
763 * kernel mappings that do not need to have
764 * page modification or references recorded.
765 * Note that old mappings are simply written
766 * over. The page *must* be wired.
769 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
772 vm_offset_t origva = va;
774 for (i = 0; i < count; i++) {
775 pmap_flush_pvcache(m[i]);
776 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
780 mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
784 * this routine jerks page mappings from the
785 * kernel -- it is meant only for temporary mappings.
788 pmap_qremove(vm_offset_t va, int count)
791 * No need to wb/inv caches here,
792 * pmap_kremove will do it for us
795 while (count-- > 0) {
801 /***************************************************
802 * Page table page management routines.....
803 ***************************************************/
806 * floating pages (FPAGES) management routines
808 * FPAGES are the reserved virtual memory areas which can be
809 * mapped to any physical memory. This gets used typically
810 * in the following functions:
817 * Create the floating pages, aka FPAGES!
824 struct sysmaps *sysmaps;
827 * We allocate a total of (FPAGES*MAXCPU + FPAGES_SHARED + 1) pages
828 * at first. FPAGES & FPAGES_SHARED should be EVEN Then we'll adjust
829 * 'kva' to be even-page aligned so that the fpage area can be wired
830 * in the TLB with a single TLB entry.
832 kva = kmem_alloc_nofault(kernel_map,
833 (FPAGES * MAXCPU + 1 + FPAGES_SHARED) * PAGE_SIZE);
834 if ((void *)kva == NULL)
835 panic("pmap_init_fpage: fpage allocation failed");
838 * Make up start at an even page number so we can wire down the
839 * fpage area in the tlb with a single tlb entry.
841 if ((((vm_offset_t)kva) >> PGSHIFT) & 1) {
843 * 'kva' is not even-page aligned. Adjust it and free the
844 * first page which is unused.
846 kmem_free(kernel_map, (vm_offset_t)kva, NBPG);
847 kva = ((vm_offset_t)kva) + NBPG;
850 * 'kva' is even page aligned. We don't need the last page,
853 kmem_free(kernel_map, ((vm_offset_t)kva) + FSPACE, NBPG);
856 for (i = 0; i < MAXCPU; i++) {
857 sysmaps = &sysmaps_pcpu[i];
858 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
860 /* Assign FPAGES pages to the CPU */
861 for (j = 0; j < FPAGES; j++)
862 sysmaps->fp[j].kva = kva + (j) * PAGE_SIZE;
863 kva = ((vm_offset_t)kva) + (FPAGES * PAGE_SIZE);
867 * An additional 2 pages are needed, one for pmap_zero_page_idle()
868 * and one for coredump. These pages are shared by all cpu's
870 fpages_shared[PMAP_FPAGE3].kva = kva;
871 fpages_shared[PMAP_FPAGE_KENTER_TEMP].kva = kva + PAGE_SIZE;
875 * Map the page to the fpage virtual address as specified thru' fpage id
878 pmap_map_fpage(vm_paddr_t pa, struct fpage *fp, boolean_t check_unmaped)
881 register pt_entry_t *pte;
884 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
886 * Check if the fpage is free
889 if (check_unmaped == TRUE)
890 pmap_unmap_fpage(pa, fp);
892 panic("pmap_map_fpage: fpage is busy");
897 npte = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
898 pte = pmap_pte(kernel_pmap, kva);
901 pmap_TLB_update_kernel(kva, npte);
907 * Unmap the page from the fpage virtual address as specified thru' fpage id
910 pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp)
913 register pt_entry_t *pte;
915 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
917 * Check if the fpage is busy
920 panic("pmap_unmap_fpage: fpage is free");
924 pte = pmap_pte(kernel_pmap, kva);
926 pmap_TLB_invalidate_kernel(kva);
931 * Should there be any flush operation at the end?
937 * Simplify the reference counting of page table pages. Specifically, use
938 * the page table page's wired count rather than its hold count to contain
939 * the reference count.
943 * This routine unholds page table pages, and if the hold count
944 * drops to zero, then it decrements the wire count.
947 _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
951 * unmap the page table page
953 pmap->pm_segtab[m->pindex] = 0;
954 --pmap->pm_stats.resident_count;
956 if (pmap->pm_ptphint == m)
957 pmap->pm_ptphint = NULL;
960 * If the page is finally unwired, simply free it.
962 vm_page_free_zero(m);
963 atomic_subtract_int(&cnt.v_wire_count, 1);
967 static PMAP_INLINE int
968 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
971 if (m->wire_count == 0)
972 return (_pmap_unwire_pte_hold(pmap, m));
978 * After removing a page table entry, this routine is used to
979 * conditionally free the page, and manage the hold/wire counts.
982 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
987 if (va >= VM_MAXUSER_ADDRESS)
991 ptepindex = (va >> SEGSHIFT);
992 if (pmap->pm_ptphint &&
993 (pmap->pm_ptphint->pindex == ptepindex)) {
994 mpte = pmap->pm_ptphint;
996 pteva = *pmap_pde(pmap, va);
997 mpte = PHYS_TO_VM_PAGE(MIPS_CACHED_TO_PHYS(pteva));
998 pmap->pm_ptphint = mpte;
1001 return pmap_unwire_pte_hold(pmap, mpte);
1005 pmap_pinit0(pmap_t pmap)
1009 PMAP_LOCK_INIT(pmap);
1010 pmap->pm_segtab = kernel_segmap;
1011 pmap->pm_active = 0;
1012 pmap->pm_ptphint = NULL;
1013 for (i = 0; i < MAXCPU; i++) {
1014 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1015 pmap->pm_asid[i].gen = 0;
1017 PCPU_SET(curpmap, pmap);
1018 TAILQ_INIT(&pmap->pm_pvlist);
1019 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1023 * Initialize a preallocated and zeroed pmap structure,
1024 * such as one in a vmspace structure.
1027 pmap_pinit(pmap_t pmap)
1033 PMAP_LOCK_INIT(pmap);
1035 req = VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
1038 #ifdef VM_ALLOC_WIRED_TLB_PG_POOL
1039 if (need_wired_tlb_page_pool)
1040 req |= VM_ALLOC_WIRED_TLB_PG_POOL;
1043 * allocate the page directory page
1045 while ((ptdpg = vm_page_alloc(NULL, NUSERPGTBLS, req)) == NULL)
1048 ptdpg->valid = VM_PAGE_BITS_ALL;
1050 pmap->pm_segtab = (pd_entry_t *)
1051 MIPS_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(ptdpg));
1052 if ((ptdpg->flags & PG_ZERO) == 0)
1053 bzero(pmap->pm_segtab, PAGE_SIZE);
1055 pmap->pm_active = 0;
1056 pmap->pm_ptphint = NULL;
1057 for (i = 0; i < MAXCPU; i++) {
1058 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1059 pmap->pm_asid[i].gen = 0;
1061 TAILQ_INIT(&pmap->pm_pvlist);
1062 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1068 * this routine is called if the page table page is not
1072 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1074 vm_offset_t pteva, ptepa;
1078 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1079 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1080 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1082 req = VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ;
1083 #ifdef VM_ALLOC_WIRED_TLB_PG_POOL
1084 if (need_wired_tlb_page_pool)
1085 req |= VM_ALLOC_WIRED_TLB_PG_POOL;
1088 * Find or fabricate a new pagetable page
1090 if ((m = vm_page_alloc(NULL, ptepindex, req)) == NULL) {
1091 if (flags & M_WAITOK) {
1093 vm_page_unlock_queues();
1095 vm_page_lock_queues();
1099 * Indicate the need to retry. While waiting, the page
1100 * table page may have been allocated.
1104 if ((m->flags & PG_ZERO) == 0)
1107 KASSERT(m->queue == PQ_NONE,
1108 ("_pmap_allocpte: %p->queue != PQ_NONE", m));
1111 * Map the pagetable page into the process address space, if it
1112 * isn't already there.
1115 pmap->pm_stats.resident_count++;
1117 ptepa = VM_PAGE_TO_PHYS(m);
1118 pteva = MIPS_PHYS_TO_CACHED(ptepa);
1119 pmap->pm_segtab[ptepindex] = (pd_entry_t)pteva;
1122 * Set the page table hint
1124 pmap->pm_ptphint = m;
1127 * Kernel page tables are allocated in pmap_bootstrap() or
1128 * pmap_growkernel().
1130 if (is_kernel_pmap(pmap))
1131 panic("_pmap_allocpte() called for kernel pmap\n");
1133 m->valid = VM_PAGE_BITS_ALL;
1134 vm_page_flag_clear(m, PG_ZERO);
1140 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1146 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1147 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1148 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1151 * Calculate pagetable page index
1153 ptepindex = va >> SEGSHIFT;
1156 * Get the page directory entry
1158 pteva = (vm_offset_t)pmap->pm_segtab[ptepindex];
1161 * If the page table page is mapped, we just increment the hold
1162 * count, and activate it.
1166 * In order to get the page table page, try the hint first.
1168 if (pmap->pm_ptphint &&
1169 (pmap->pm_ptphint->pindex == ptepindex)) {
1170 m = pmap->pm_ptphint;
1172 m = PHYS_TO_VM_PAGE(MIPS_CACHED_TO_PHYS(pteva));
1173 pmap->pm_ptphint = m;
1178 * Here if the pte page isn't mapped, or if it has been
1181 m = _pmap_allocpte(pmap, ptepindex, flags);
1182 if (m == NULL && (flags & M_WAITOK))
1189 /***************************************************
1190 * Pmap allocation/deallocation routines.
1191 ***************************************************/
1194 * - Merged pmap_release and pmap_release_free_page. When pmap_release is
1195 * called only the page directory page(s) can be left in the pmap pte
1196 * object, since all page table pages will have been freed by
1197 * pmap_remove_pages and pmap_remove. In addition, there can only be one
1198 * reference to the pmap and the page directory is wired, so the page(s)
1199 * can never be busy. So all there is to do is clear the magic mappings
1200 * from the page directory and free the page(s).
1205 * Release any resources held by the given physical map.
1206 * Called when a pmap initialized by pmap_pinit is being released.
1207 * Should only be called if the map contains no valid mappings.
1210 pmap_release(pmap_t pmap)
1214 KASSERT(pmap->pm_stats.resident_count == 0,
1215 ("pmap_release: pmap resident count %ld != 0",
1216 pmap->pm_stats.resident_count));
1218 ptdpg = PHYS_TO_VM_PAGE(MIPS_CACHED_TO_PHYS(pmap->pm_segtab));
1219 ptdpg->wire_count--;
1220 atomic_subtract_int(&cnt.v_wire_count, 1);
1221 vm_page_free_zero(ptdpg);
1225 * grow the number of kernel page table entries, if needed
1228 pmap_growkernel(vm_offset_t addr)
1230 vm_offset_t ptppaddr;
1235 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1236 if (kernel_vm_end == 0) {
1237 kernel_vm_end = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
1239 while (segtab_pde(kernel_segmap, kernel_vm_end)) {
1240 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1241 ~(PAGE_SIZE * NPTEPG - 1);
1243 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1244 kernel_vm_end = kernel_map->max_offset;
1249 addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1250 if (addr - 1 >= kernel_map->max_offset)
1251 addr = kernel_map->max_offset;
1252 while (kernel_vm_end < addr) {
1253 if (segtab_pde(kernel_segmap, kernel_vm_end)) {
1254 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1255 ~(PAGE_SIZE * NPTEPG - 1);
1256 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1257 kernel_vm_end = kernel_map->max_offset;
1263 * This index is bogus, but out of the way
1265 req = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
1266 #ifdef VM_ALLOC_WIRED_TLB_PG_POOL
1267 if (need_wired_tlb_page_pool)
1268 req |= VM_ALLOC_WIRED_TLB_PG_POOL;
1270 nkpg = vm_page_alloc(NULL, nkpt, req);
1272 panic("pmap_growkernel: no memory to grow kernel");
1276 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1277 if (ptppaddr >= MIPS_KSEG0_LARGEST_PHYS) {
1279 * We need to do something here, but I am not sure
1280 * what. We can access anything in the 0 - 512Meg
1281 * region, but if we get a page to go in the kernel
1282 * segmap that is outside of of that we really need
1283 * to have another mapping beyond the temporary ones
1284 * I have. Not sure how to do this yet. FIXME FIXME.
1286 panic("Gak, can't handle a k-page table outside of lower 512Meg");
1288 pte = (pt_entry_t *)MIPS_PHYS_TO_CACHED(ptppaddr);
1289 segtab_pde(kernel_segmap, kernel_vm_end) = (pd_entry_t)pte;
1292 * The R[4-7]?00 stores only one copy of the Global bit in
1293 * the translation lookaside buffer for each 2 page entry.
1294 * Thus invalid entrys must have the Global bit set so when
1295 * Entry LO and Entry HI G bits are anded together they will
1296 * produce a global bit to store in the tlb.
1298 for (i = 0; i < NPTEPG; i++, pte++)
1301 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) &
1302 ~(PAGE_SIZE * NPTEPG - 1);
1303 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1304 kernel_vm_end = kernel_map->max_offset;
1310 /***************************************************
1311 * page management routines.
1312 ***************************************************/
1315 * free the pv_entry back to the free list
1317 static PMAP_INLINE void
1318 free_pv_entry(pv_entry_t pv)
1322 uma_zfree(pvzone, pv);
1326 * get a new pv_entry, allocating a block from the system
1328 * the memory allocation is performed bypassing the malloc code
1329 * because of the possibility of allocations at interrupt time.
1332 get_pv_entry(pmap_t locked_pmap)
1334 static const struct timeval printinterval = { 60, 0 };
1335 static struct timeval lastprint;
1336 struct vpgqueues *vpq;
1337 pt_entry_t *pte, oldpte;
1339 pv_entry_t allocated_pv, next_pv, pv;
1343 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1344 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1345 allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
1346 if (allocated_pv != NULL) {
1348 if (pv_entry_count > pv_entry_high_water)
1349 pagedaemon_wakeup();
1351 return (allocated_pv);
1354 * Reclaim pv entries: At first, destroy mappings to inactive
1355 * pages. After that, if a pv entry is still needed, destroy
1356 * mappings to active pages.
1358 if (ratecheck(&lastprint, &printinterval))
1359 printf("Approaching the limit on PV entries, "
1360 "increase the vm.pmap.shpgperproc tunable.\n");
1361 vpq = &vm_page_queues[PQ_INACTIVE];
1363 TAILQ_FOREACH(m, &vpq->pl, pageq) {
1364 if (m->hold_count || m->busy)
1366 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
1369 /* Avoid deadlock and lock recursion. */
1370 if (pmap > locked_pmap)
1372 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
1374 pmap->pm_stats.resident_count--;
1375 pte = pmap_pte(pmap, va);
1376 KASSERT(pte != NULL, ("pte"));
1377 oldpte = loadandclear((u_int *)pte);
1378 if (is_kernel_pmap(pmap))
1380 KASSERT((oldpte & PTE_W) == 0,
1381 ("wired pte for unwired page"));
1382 if (m->md.pv_flags & PV_TABLE_REF)
1383 vm_page_flag_set(m, PG_REFERENCED);
1386 pmap_invalidate_page(pmap, va);
1387 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1388 m->md.pv_list_count--;
1389 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1390 if (TAILQ_EMPTY(&m->md.pv_list)) {
1391 vm_page_flag_clear(m, PG_WRITEABLE);
1392 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1394 pmap_unuse_pt(pmap, va, pv->pv_ptem);
1395 if (pmap != locked_pmap)
1397 if (allocated_pv == NULL)
1403 if (allocated_pv == NULL) {
1404 if (vpq == &vm_page_queues[PQ_INACTIVE]) {
1405 vpq = &vm_page_queues[PQ_ACTIVE];
1408 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
1410 return (allocated_pv);
1416 * Move pmap_collect() out of the machine-dependent code, rename it
1417 * to reflect its new location, and add page queue and flag locking.
1419 * Notes: (1) alpha, i386, and ia64 had identical implementations
1420 * of pmap_collect() in terms of machine-independent interfaces;
1421 * (2) sparc64 doesn't require it; (3) powerpc had it as a TODO.
1423 * MIPS implementation was identical to alpha [Junos 8.2]
1427 * If it is the first entry on the list, it is actually
1428 * in the header and we must copy the following entry up
1429 * to the header. Otherwise we must search the list for
1430 * the entry. In either case we free the now unused entry.
1434 pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va)
1438 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1439 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1440 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1441 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1442 if (pmap == pv->pv_pmap && va == pv->pv_va)
1446 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1447 if (va == pv->pv_va)
1452 KASSERT(pv != NULL, ("pmap_remove_entry: pv not found"));
1453 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1454 m->md.pv_list_count--;
1455 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
1456 vm_page_flag_clear(m, PG_WRITEABLE);
1458 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1463 * Create a pv entry for page at pa for
1467 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m,
1472 pv = get_pv_entry(pmap);
1476 pv->pv_wired = wired;
1478 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1479 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1480 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1481 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1482 m->md.pv_list_count++;
1486 * Conditionally create a pv entry.
1489 pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1494 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1495 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1496 if (pv_entry_count < pv_entry_high_water &&
1497 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
1502 pv->pv_wired = FALSE;
1503 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1504 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1505 m->md.pv_list_count++;
1512 * pmap_remove_pte: do the things to unmap a page in a process
1515 pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va)
1521 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1522 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1524 oldpte = loadandclear((u_int *)ptq);
1525 if (is_kernel_pmap(pmap))
1529 pmap->pm_stats.wired_count -= 1;
1531 pmap->pm_stats.resident_count -= 1;
1532 pa = mips_tlbpfn_to_paddr(oldpte);
1534 if (page_is_managed(pa)) {
1535 m = PHYS_TO_VM_PAGE(pa);
1536 if (oldpte & PTE_M) {
1537 #if defined(PMAP_DIAGNOSTIC)
1538 if (pmap_nw_modified(oldpte)) {
1540 "pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
1546 if (m->md.pv_flags & PV_TABLE_REF)
1547 vm_page_flag_set(m, PG_REFERENCED);
1548 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1550 pmap_remove_entry(pmap, m, va);
1552 return pmap_unuse_pt(pmap, va, NULL);
1556 * Remove a single page from a process address space
1559 pmap_remove_page(struct pmap *pmap, vm_offset_t va)
1561 register pt_entry_t *ptq;
1563 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1564 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1565 ptq = pmap_pte(pmap, va);
1568 * if there is no pte for this address, just skip it!!!
1570 if (!ptq || !pmap_pte_v(ptq)) {
1575 * Write back all caches from the page being destroyed
1577 mips_dcache_wbinv_range_index(va, NBPG);
1580 * get a local va for mappings for this pmap.
1582 (void)pmap_remove_pte(pmap, ptq, va);
1583 pmap_invalidate_page(pmap, va);
1589 * Remove the given range of addresses from the specified map.
1591 * It is assumed that the start and end are properly
1592 * rounded to the page size.
1595 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
1597 vm_offset_t va, nva;
1602 if (pmap->pm_stats.resident_count == 0)
1605 vm_page_lock_queues();
1609 * special handling of removing one page. a very common operation
1610 * and easy to short circuit some code.
1612 if ((sva + PAGE_SIZE) == eva) {
1613 pmap_remove_page(pmap, sva);
1616 for (va = sva; va < eva; va = nva) {
1617 if (!*pmap_pde(pmap, va)) {
1618 nva = mips_segtrunc(va + MIPS_SEGSIZE);
1621 pmap_remove_page(pmap, va);
1622 nva = va + PAGE_SIZE;
1626 vm_page_unlock_queues();
1631 * Routine: pmap_remove_all
1633 * Removes this physical page from
1634 * all physical maps in which it resides.
1635 * Reflects back modify bits to the pager.
1638 * Original versions of this routine were very
1639 * inefficient because they iteratively called
1640 * pmap_remove (slow...)
1644 pmap_remove_all(vm_page_t m)
1646 register pv_entry_t pv;
1647 register pt_entry_t *pte, tpte;
1649 KASSERT((m->flags & PG_FICTITIOUS) == 0,
1650 ("pmap_remove_all: page %p is fictitious", m));
1651 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1653 if (m->md.pv_flags & PV_TABLE_REF)
1654 vm_page_flag_set(m, PG_REFERENCED);
1656 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1657 PMAP_LOCK(pv->pv_pmap);
1660 * If it's last mapping writeback all caches from
1661 * the page being destroyed
1663 if (m->md.pv_list_count == 1)
1664 mips_dcache_wbinv_range_index(pv->pv_va, NBPG);
1666 pv->pv_pmap->pm_stats.resident_count--;
1668 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
1670 tpte = loadandclear((u_int *)pte);
1671 if (is_kernel_pmap(pv->pv_pmap))
1675 pv->pv_pmap->pm_stats.wired_count--;
1678 * Update the vm_page_t clean and reference bits.
1681 #if defined(PMAP_DIAGNOSTIC)
1682 if (pmap_nw_modified(tpte)) {
1684 "pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
1690 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1692 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1693 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1694 m->md.pv_list_count--;
1695 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1696 PMAP_UNLOCK(pv->pv_pmap);
1700 vm_page_flag_clear(m, PG_WRITEABLE);
1701 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1705 * Set the physical protection on the
1706 * specified range of this map as requested.
1709 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1716 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1717 pmap_remove(pmap, sva, eva);
1720 if (prot & VM_PROT_WRITE)
1723 vm_page_lock_queues();
1726 pt_entry_t pbits, obits;
1731 * If segment table entry is empty, skip this segment.
1733 if (!*pmap_pde(pmap, sva)) {
1734 sva = mips_segtrunc(sva + MIPS_SEGSIZE);
1738 * If pte is invalid, skip this page
1740 pte = pmap_pte(pmap, sva);
1741 if (!pmap_pte_v(pte)) {
1746 obits = pbits = *pte;
1747 pa = mips_tlbpfn_to_paddr(pbits);
1749 if (page_is_managed(pa)) {
1750 m = PHYS_TO_VM_PAGE(pa);
1751 if (m->md.pv_flags & PV_TABLE_REF) {
1752 vm_page_flag_set(m, PG_REFERENCED);
1753 m->md.pv_flags &= ~PV_TABLE_REF;
1755 if (pbits & PTE_M) {
1757 m->md.pv_flags &= ~PV_TABLE_MOD;
1760 pbits = (pbits & ~PTE_M) | PTE_RO;
1762 if (pbits != *pte) {
1763 if (!atomic_cmpset_int((u_int *)pte, obits, pbits))
1765 pmap_update_page(pmap, sva, pbits);
1769 vm_page_unlock_queues();
1774 * Insert the given physical page (p) at
1775 * the specified virtual address (v) in the
1776 * target physical map with the protection requested.
1778 * If specified, the page will be wired down, meaning
1779 * that the related pte can not be reclaimed.
1781 * NB: This is the only routine which MAY NOT lazy-evaluate
1782 * or lose information. That is, this routine must actually
1783 * insert this page into the given map NOW.
1786 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1787 vm_prot_t prot, boolean_t wired)
1789 vm_offset_t pa, opa;
1790 register pt_entry_t *pte;
1791 pt_entry_t origpte, newpte;
1799 #ifdef PMAP_DIAGNOSTIC
1800 if (va > VM_MAX_KERNEL_ADDRESS)
1801 panic("pmap_enter: toobig");
1806 vm_page_lock_queues();
1810 * In the case that a page table page is not resident, we are
1813 if (va < VM_MAXUSER_ADDRESS) {
1814 mpte = pmap_allocpte(pmap, va, M_WAITOK);
1816 pte = pmap_pte(pmap, va);
1819 * Page Directory table entry not valid, we need a new PT page
1822 panic("pmap_enter: invalid page directory, pdir=%p, va=%p\n",
1823 (void *)pmap->pm_segtab, (void *)va);
1825 pa = VM_PAGE_TO_PHYS(m);
1828 opa = mips_tlbpfn_to_paddr(origpte);
1831 * Mapping has not changed, must be protection or wiring change.
1833 if ((origpte & PTE_V) && (opa == pa)) {
1835 * Wiring change, just update stats. We don't worry about
1836 * wiring PT pages as they remain resident as long as there
1837 * are valid mappings in them. Hence, if a user page is
1838 * wired, the PT page will be also.
1840 if (wired && ((origpte & PTE_W) == 0))
1841 pmap->pm_stats.wired_count++;
1842 else if (!wired && (origpte & PTE_W))
1843 pmap->pm_stats.wired_count--;
1845 #if defined(PMAP_DIAGNOSTIC)
1846 if (pmap_nw_modified(origpte)) {
1848 "pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
1854 * Remove extra pte reference
1860 * We might be turning off write access to the page, so we
1861 * go ahead and sense modify status.
1863 if (page_is_managed(opa)) {
1869 * Mapping has changed, invalidate old range and fall through to
1870 * handle validating new mapping.
1873 if (origpte & PTE_W)
1874 pmap->pm_stats.wired_count--;
1876 if (page_is_managed(opa)) {
1877 om = PHYS_TO_VM_PAGE(opa);
1878 pmap_remove_entry(pmap, om, va);
1882 KASSERT(mpte->wire_count > 0,
1883 ("pmap_enter: missing reference to page table page,"
1884 " va: %p", (void *)va));
1887 pmap->pm_stats.resident_count++;
1890 * Enter on the PV list if part of our managed memory. Note that we
1891 * raise IPL while manipulating pv_table since pmap_enter can be
1892 * called at interrupt time.
1894 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
1895 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
1896 ("pmap_enter: managed mapping within the clean submap"));
1897 pmap_insert_entry(pmap, va, mpte, m, wired);
1900 * Increment counters
1903 pmap->pm_stats.wired_count++;
1906 if ((access & VM_PROT_WRITE) != 0)
1907 m->md.pv_flags |= PV_TABLE_MOD | PV_TABLE_REF;
1908 rw = init_pte_prot(va, m, prot);
1911 printf("pmap_enter: va: 0x%08x -> pa: 0x%08x\n", va, pa);
1914 * Now validate mapping with desired protection/wiring.
1916 newpte = mips_paddr_to_tlbpfn(pa) | rw | PTE_V;
1918 if (is_cacheable_mem(pa))
1919 newpte |= PTE_CACHE;
1921 newpte |= PTE_UNCACHED;
1926 if (is_kernel_pmap(pmap)) {
1931 * if the mapping or permission bits are different, we need to
1934 if (origpte != newpte) {
1935 if (origpte & PTE_V) {
1937 if (page_is_managed(opa) && (opa != pa)) {
1938 if (om->md.pv_flags & PV_TABLE_REF)
1939 vm_page_flag_set(om, PG_REFERENCED);
1941 ~(PV_TABLE_REF | PV_TABLE_MOD);
1943 if (origpte & PTE_M) {
1944 KASSERT((origpte & PTE_RW),
1945 ("pmap_enter: modified page not writable:"
1946 " va: %p, pte: 0x%lx", (void *)va, origpte));
1947 if (page_is_managed(opa))
1954 pmap_update_page(pmap, va, newpte);
1957 * Sync I & D caches for executable pages. Do this only if the the
1958 * target pmap belongs to the current process. Otherwise, an
1959 * unresolvable TLB miss may occur.
1961 if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
1962 (prot & VM_PROT_EXECUTE)) {
1963 mips_icache_sync_range(va, NBPG);
1964 mips_dcache_wbinv_range(va, NBPG);
1966 vm_page_unlock_queues();
1971 * this code makes some *MAJOR* assumptions:
1972 * 1. Current pmap & pmap exists.
1975 * 4. No page table pages.
1976 * but is *MUCH* faster than pmap_enter...
1980 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1984 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
1989 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
1990 vm_prot_t prot, vm_page_t mpte)
1995 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
1996 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
1997 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
1998 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1999 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2002 * In the case that a page table page is not resident, we are
2005 if (va < VM_MAXUSER_ADDRESS) {
2010 * Calculate pagetable page index
2012 ptepindex = va >> SEGSHIFT;
2013 if (mpte && (mpte->pindex == ptepindex)) {
2017 * Get the page directory entry
2019 pteva = (vm_offset_t)pmap->pm_segtab[ptepindex];
2022 * If the page table page is mapped, we just
2023 * increment the hold count, and activate it.
2026 if (pmap->pm_ptphint &&
2027 (pmap->pm_ptphint->pindex == ptepindex)) {
2028 mpte = pmap->pm_ptphint;
2030 mpte = PHYS_TO_VM_PAGE(MIPS_CACHED_TO_PHYS(pteva));
2031 pmap->pm_ptphint = mpte;
2035 mpte = _pmap_allocpte(pmap, ptepindex,
2045 pte = pmap_pte(pmap, va);
2046 if (pmap_pte_v(pte)) {
2055 * Enter on the PV list if part of our managed memory.
2057 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
2058 !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2060 pmap_unwire_pte_hold(pmap, mpte);
2067 * Increment counters
2069 pmap->pm_stats.resident_count++;
2071 pa = VM_PAGE_TO_PHYS(m);
2074 * Now validate mapping with RO protection
2076 *pte = mips_paddr_to_tlbpfn(pa) | PTE_V;
2078 if (is_cacheable_mem(pa))
2081 *pte |= PTE_UNCACHED;
2083 if (is_kernel_pmap(pmap))
2088 * Sync I & D caches. Do this only if the the target pmap
2089 * belongs to the current process. Otherwise, an
2090 * unresolvable TLB miss may occur. */
2091 if (pmap == &curproc->p_vmspace->vm_pmap) {
2093 mips_icache_sync_range(va, NBPG);
2094 mips_dcache_wbinv_range(va, NBPG);
2101 * Make a temporary mapping for a physical address. This is only intended
2102 * to be used for panic dumps.
2105 pmap_kenter_temporary(vm_paddr_t pa, int i)
2110 printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2113 #ifdef VM_ALLOC_WIRED_TLB_PG_POOL
2114 if (need_wired_tlb_page_pool) {
2115 va = pmap_map_fpage(pa, &fpages_shared[PMAP_FPAGE_KENTER_TEMP],
2119 if (pa < MIPS_KSEG0_LARGEST_PHYS) {
2120 va = MIPS_PHYS_TO_CACHED(pa);
2123 struct local_sysmaps *sysm;
2124 /* If this is used other than for dumps, we may need to leave
2125 * interrupts disasbled on return. If crash dumps don't work when
2126 * we get to this point, we might want to consider this (leaving things
2127 * disabled as a starting point ;-)
2129 int_level = disableintr();
2130 cpu = PCPU_GET(cpuid);
2131 sysm = &sysmap_lmem[cpu];
2132 /* Since this is for the debugger, no locks or any other fun */
2133 sysm->CMAP1 = mips_paddr_to_tlbpfn(pa) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
2135 pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
2136 va = (vm_offset_t)sysm->CADDR1;
2137 restoreintr(int_level);
2139 return ((void *)va);
2143 pmap_kenter_temporary_free(vm_paddr_t pa)
2147 struct local_sysmaps *sysm;
2149 if (pa < MIPS_KSEG0_LARGEST_PHYS) {
2150 /* nothing to do for this case */
2153 cpu = PCPU_GET(cpuid);
2154 sysm = &sysmap_lmem[cpu];
2156 int_level = disableintr();
2157 pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
2158 restoreintr(int_level);
2165 * Moved the code to Machine Independent
2166 * vm_map_pmap_enter()
2170 * Maps a sequence of resident pages belonging to the same object.
2171 * The sequence begins with the given page m_start. This page is
2172 * mapped at the given virtual address start. Each subsequent page is
2173 * mapped at a virtual address that is offset from start by the same
2174 * amount as the page is offset from m_start within the object. The
2175 * last page in the sequence is the page with the largest offset from
2176 * m_start that can be mapped at a virtual address less than the given
2177 * virtual address end. Not every virtual page between start and end
2178 * is mapped; only those for which a resident page exists with the
2179 * corresponding offset from m_start are mapped.
2182 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2183 vm_page_t m_start, vm_prot_t prot)
2186 vm_pindex_t diff, psize;
2188 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
2189 psize = atop(end - start);
2193 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2194 mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2196 m = TAILQ_NEXT(m, listq);
2202 * pmap_object_init_pt preloads the ptes for a given object
2203 * into the specified pmap. This eliminates the blast of soft
2204 * faults on process startup and immediately after an mmap.
2207 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2208 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2210 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2211 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2212 ("pmap_object_init_pt: non-device object"));
2216 * Routine: pmap_change_wiring
2217 * Function: Change the wiring attribute for a map/virtual-address
2219 * In/out conditions:
2220 * The mapping must already exist in the pmap.
2223 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
2225 register pt_entry_t *pte;
2231 pte = pmap_pte(pmap, va);
2233 if (wired && !pmap_pte_w(pte))
2234 pmap->pm_stats.wired_count++;
2235 else if (!wired && pmap_pte_w(pte))
2236 pmap->pm_stats.wired_count--;
2239 * Wiring is not a hardware characteristic so there is no need to
2242 pmap_pte_set_w(pte, wired);
2247 * Copy the range specified by src_addr/len
2248 * from the source map to the range dst_addr/len
2249 * in the destination map.
2251 * This routine is only advisory and need not do anything.
2255 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2256 vm_size_t len, vm_offset_t src_addr)
2261 * pmap_zero_page zeros the specified hardware page by mapping
2262 * the page into KVM and using bzero to clear its contents.
2265 pmap_zero_page(vm_page_t m)
2268 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2270 #ifdef VM_ALLOC_WIRED_TLB_PG_POOL
2271 if (need_wired_tlb_page_pool) {
2273 struct sysmaps *sysmaps;
2275 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
2276 mtx_lock(&sysmaps->lock);
2279 fp1 = &sysmaps->fp[PMAP_FPAGE1];
2280 va = pmap_map_fpage(phys, fp1, FALSE);
2281 bzero((caddr_t)va, PAGE_SIZE);
2282 pmap_unmap_fpage(phys, fp1);
2284 mtx_unlock(&sysmaps->lock);
2286 * Should you do cache flush?
2290 if (phys < MIPS_KSEG0_LARGEST_PHYS) {
2292 va = MIPS_PHYS_TO_CACHED(phys);
2294 bzero((caddr_t)va, PAGE_SIZE);
2295 mips_dcache_wbinv_range(va, PAGE_SIZE);
2298 struct local_sysmaps *sysm;
2300 cpu = PCPU_GET(cpuid);
2301 sysm = &sysmap_lmem[cpu];
2302 PMAP_LGMEM_LOCK(sysm);
2304 int_level = disableintr();
2305 sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
2307 pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
2308 bzero(sysm->CADDR1, PAGE_SIZE);
2309 pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
2310 restoreintr(int_level);
2314 PMAP_LGMEM_UNLOCK(sysm);
2320 * pmap_zero_page_area zeros the specified hardware page by mapping
2321 * the page into KVM and using bzero to clear its contents.
2323 * off and size may not cover an area beyond a single hardware page.
2326 pmap_zero_page_area(vm_page_t m, int off, int size)
2329 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2331 #ifdef VM_ALLOC_WIRED_TLB_PG_POOL
2332 if (need_wired_tlb_page_pool) {
2334 struct sysmaps *sysmaps;
2336 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
2337 mtx_lock(&sysmaps->lock);
2340 fp1 = &sysmaps->fp[PMAP_FPAGE1];
2341 va = pmap_map_fpage(phys, fp1, FALSE);
2342 bzero((caddr_t)va + off, size);
2343 pmap_unmap_fpage(phys, fp1);
2346 mtx_unlock(&sysmaps->lock);
2349 if (phys < MIPS_KSEG0_LARGEST_PHYS) {
2350 va = MIPS_PHYS_TO_CACHED(phys);
2351 bzero((char *)(caddr_t)va + off, size);
2352 mips_dcache_wbinv_range(va + off, size);
2355 struct local_sysmaps *sysm;
2357 cpu = PCPU_GET(cpuid);
2358 sysm = &sysmap_lmem[cpu];
2359 PMAP_LGMEM_LOCK(sysm);
2360 int_level = disableintr();
2362 sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
2364 pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
2365 bzero((char *)sysm->CADDR1 + off, size);
2366 pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
2367 restoreintr(int_level);
2371 PMAP_LGMEM_UNLOCK(sysm);
2376 pmap_zero_page_idle(vm_page_t m)
2379 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2381 #ifdef VM_ALLOC_WIRED_TLB_PG_POOL
2382 if (need_wired_tlb_page_pool) {
2384 va = pmap_map_fpage(phys, &fpages_shared[PMAP_FPAGE3], FALSE);
2385 bzero((caddr_t)va, PAGE_SIZE);
2386 pmap_unmap_fpage(phys, &fpages_shared[PMAP_FPAGE3]);
2390 if (phys < MIPS_KSEG0_LARGEST_PHYS) {
2391 va = MIPS_PHYS_TO_CACHED(phys);
2392 bzero((caddr_t)va, PAGE_SIZE);
2393 mips_dcache_wbinv_range(va, PAGE_SIZE);
2396 struct local_sysmaps *sysm;
2398 cpu = PCPU_GET(cpuid);
2399 sysm = &sysmap_lmem[cpu];
2400 PMAP_LGMEM_LOCK(sysm);
2401 int_level = disableintr();
2403 sysm->CMAP1 = mips_paddr_to_tlbpfn(phys) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
2405 pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
2406 bzero(sysm->CADDR1, PAGE_SIZE);
2407 pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
2408 restoreintr(int_level);
2412 PMAP_LGMEM_UNLOCK(sysm);
2418 * pmap_copy_page copies the specified (machine independent)
2419 * page by mapping the page into virtual memory and using
2420 * bcopy to copy the page, one machine dependent page at a
2424 pmap_copy_page(vm_page_t src, vm_page_t dst)
2426 vm_offset_t va_src, va_dst;
2427 vm_paddr_t phy_src = VM_PAGE_TO_PHYS(src);
2428 vm_paddr_t phy_dst = VM_PAGE_TO_PHYS(dst);
2430 #ifdef VM_ALLOC_WIRED_TLB_PG_POOL
2431 if (need_wired_tlb_page_pool) {
2432 struct fpage *fp1, *fp2;
2433 struct sysmaps *sysmaps;
2435 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
2436 mtx_lock(&sysmaps->lock);
2439 fp1 = &sysmaps->fp[PMAP_FPAGE1];
2440 fp2 = &sysmaps->fp[PMAP_FPAGE2];
2442 va_src = pmap_map_fpage(phy_src, fp1, FALSE);
2443 va_dst = pmap_map_fpage(phy_dst, fp2, FALSE);
2445 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2447 pmap_unmap_fpage(phy_src, fp1);
2448 pmap_unmap_fpage(phy_dst, fp2);
2450 mtx_unlock(&sysmaps->lock);
2453 * Should you flush the cache?
2458 if ((phy_src < MIPS_KSEG0_LARGEST_PHYS) && (phy_dst < MIPS_KSEG0_LARGEST_PHYS)) {
2459 /* easy case, all can be accessed via KSEG0 */
2461 * Flush all caches for VA that are mapped to this page
2462 * to make sure that data in SDRAM is up to date
2464 pmap_flush_pvcache(src);
2465 mips_dcache_wbinv_range_index(
2466 MIPS_PHYS_TO_CACHED(phy_dst), NBPG);
2467 va_src = MIPS_PHYS_TO_CACHED(phy_src);
2468 va_dst = MIPS_PHYS_TO_CACHED(phy_dst);
2469 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2470 mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2473 struct local_sysmaps *sysm;
2475 cpu = PCPU_GET(cpuid);
2476 sysm = &sysmap_lmem[cpu];
2477 PMAP_LGMEM_LOCK(sysm);
2479 int_level = disableintr();
2480 if (phy_src < MIPS_KSEG0_LARGEST_PHYS) {
2481 /* one side needs mapping - dest */
2482 va_src = MIPS_PHYS_TO_CACHED(phy_src);
2483 sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
2484 pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
2486 va_dst = (vm_offset_t)sysm->CADDR2;
2487 } else if (phy_dst < MIPS_KSEG0_LARGEST_PHYS) {
2488 /* one side needs mapping - src */
2489 va_dst = MIPS_PHYS_TO_CACHED(phy_dst);
2490 sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
2491 pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
2492 va_src = (vm_offset_t)sysm->CADDR1;
2495 /* all need mapping */
2496 sysm->CMAP1 = mips_paddr_to_tlbpfn(phy_src) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
2497 sysm->CMAP2 = mips_paddr_to_tlbpfn(phy_dst) | PTE_RW | PTE_V | PTE_G | PTE_W | PTE_CACHE;
2498 pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR1, sysm->CMAP1);
2499 pmap_TLB_update_kernel((vm_offset_t)sysm->CADDR2, sysm->CMAP2);
2500 sysm->valid1 = sysm->valid2 = 1;
2501 va_src = (vm_offset_t)sysm->CADDR1;
2502 va_dst = (vm_offset_t)sysm->CADDR2;
2504 bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2506 pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR1);
2511 pmap_TLB_invalidate_kernel((vm_offset_t)sysm->CADDR2);
2515 restoreintr(int_level);
2517 PMAP_LGMEM_UNLOCK(sysm);
2523 * Returns true if the pmap's pv is one of the first
2524 * 16 pvs linked to from this page. This count may
2525 * be changed upwards or downwards in the future; it
2526 * is only necessary that true be returned for a small
2527 * subset of pmaps for proper page aging.
2530 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2535 if (m->flags & PG_FICTITIOUS)
2538 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2539 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2540 if (pv->pv_pmap == pmap) {
2551 * Remove all pages from specified address space
2552 * this aids process exit speeds. Also, this code
2553 * is special cased for current process only, but
2554 * can have the more generic (and slightly slower)
2555 * mode enabled. This is much faster than pmap_remove
2556 * in the case of running down an entire address space.
2559 pmap_remove_pages(pmap_t pmap)
2561 pt_entry_t *pte, tpte;
2565 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2566 printf("warning: pmap_remove_pages called with non-current pmap\n");
2569 vm_page_lock_queues();
2572 //XXX need to be TAILQ_FOREACH_SAFE ?
2573 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2575 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2576 if (!pmap_pte_v(pte))
2577 panic("pmap_remove_pages: page on pm_pvlist has no pte\n");
2581 * We cannot remove wired pages from a process' mapping at this time
2584 npv = TAILQ_NEXT(pv, pv_plist);
2587 *pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2589 m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(tpte));
2591 KASSERT(m < &vm_page_array[vm_page_array_size],
2592 ("pmap_remove_pages: bad tpte %lx", tpte));
2594 pv->pv_pmap->pm_stats.resident_count--;
2597 * Update the vm_page_t clean and reference bits.
2602 npv = TAILQ_NEXT(pv, pv_plist);
2603 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2605 m->md.pv_list_count--;
2606 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2607 if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
2608 vm_page_flag_clear(m, PG_WRITEABLE);
2610 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2614 pmap_invalidate_all(pmap);
2616 vm_page_unlock_queues();
2620 * pmap_testbit tests bits in pte's
2621 * note that the testbit/changebit routines are inline,
2622 * and a lot of things compile-time evaluate.
2625 pmap_testbit(vm_page_t m, int bit)
2629 boolean_t rv = FALSE;
2631 if (m->flags & PG_FICTITIOUS)
2634 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
2637 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2638 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2639 #if defined(PMAP_DIAGNOSTIC)
2641 printf("Null pmap (tb) at va: 0x%x\n", pv->pv_va);
2645 PMAP_LOCK(pv->pv_pmap);
2646 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2647 rv = (*pte & bit) != 0;
2648 PMAP_UNLOCK(pv->pv_pmap);
2656 * this routine is used to modify bits in ptes
2658 static __inline void
2659 pmap_changebit(vm_page_t m, int bit, boolean_t setem)
2661 register pv_entry_t pv;
2662 register pt_entry_t *pte;
2664 if (m->flags & PG_FICTITIOUS)
2667 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2669 * Loop over all current mappings setting/clearing as appropos If
2670 * setting RO do we need to clear the VAC?
2672 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2673 #if defined(PMAP_DIAGNOSTIC)
2675 printf("Null pmap (cb) at va: 0x%x\n", pv->pv_va);
2680 PMAP_LOCK(pv->pv_pmap);
2681 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2685 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2687 vm_offset_t pbits = *(vm_offset_t *)pte;
2690 if (bit == PTE_RW) {
2691 if (pbits & PTE_M) {
2694 *(int *)pte = (pbits & ~(PTE_M | PTE_RW)) |
2697 *(int *)pte = pbits & ~bit;
2699 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2702 PMAP_UNLOCK(pv->pv_pmap);
2704 if (!setem && bit == PTE_RW)
2705 vm_page_flag_clear(m, PG_WRITEABLE);
2709 * pmap_page_wired_mappings:
2711 * Return the number of managed mappings to the given physical page
2715 pmap_page_wired_mappings(vm_page_t m)
2721 if ((m->flags & PG_FICTITIOUS) != 0)
2723 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2724 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
2731 * Clear the write and modified bits in each of the given page's mappings.
2734 pmap_remove_write(vm_page_t m)
2740 if ((m->flags & PG_WRITEABLE) == 0)
2744 * Loop over all current mappings setting/clearing as appropos.
2746 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) {
2747 npv = TAILQ_NEXT(pv, pv_plist);
2748 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2750 if ((pte == NULL) || !mips_pg_v(*pte))
2751 panic("page on pm_pvlist has no pte\n");
2754 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
2755 VM_PROT_READ | VM_PROT_EXECUTE);
2757 vm_page_flag_clear(m, PG_WRITEABLE);
2761 * pmap_ts_referenced:
2763 * Return the count of reference bits for a page, clearing all of them.
2766 pmap_ts_referenced(vm_page_t m)
2768 if (m->flags & PG_FICTITIOUS)
2771 if (m->md.pv_flags & PV_TABLE_REF) {
2772 m->md.pv_flags &= ~PV_TABLE_REF;
2781 * Return whether or not the specified physical page was modified
2782 * in any physical maps.
2785 pmap_is_modified(vm_page_t m)
2787 if (m->flags & PG_FICTITIOUS)
2790 if (m->md.pv_flags & PV_TABLE_MOD)
2793 return pmap_testbit(m, PTE_M);
2799 * pmap_is_prefaultable:
2801 * Return whether or not the specified virtual address is elgible
2805 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2812 if (*pmap_pde(pmap, addr)) {
2813 pte = pmap_pte(pmap, addr);
2821 * Clear the modify bits on the specified physical page.
2824 pmap_clear_modify(vm_page_t m)
2826 if (m->flags & PG_FICTITIOUS)
2828 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2829 if (m->md.pv_flags & PV_TABLE_MOD) {
2830 pmap_changebit(m, PTE_M, FALSE);
2831 m->md.pv_flags &= ~PV_TABLE_MOD;
2836 * pmap_clear_reference:
2838 * Clear the reference bit on the specified physical page.
2841 pmap_clear_reference(vm_page_t m)
2843 if (m->flags & PG_FICTITIOUS)
2846 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2847 if (m->md.pv_flags & PV_TABLE_REF) {
2848 m->md.pv_flags &= ~PV_TABLE_REF;
2853 * Miscellaneous support routines follow
2857 * Map a set of physical memory pages into the kernel virtual
2858 * address space. Return a pointer to where it is mapped. This
2859 * routine is intended to be used for mapping device memory,
2864 * Map a set of physical memory pages into the kernel virtual
2865 * address space. Return a pointer to where it is mapped. This
2866 * routine is intended to be used for mapping device memory,
2870 pmap_mapdev(vm_offset_t pa, vm_size_t size)
2872 vm_offset_t va, tmpva, offset;
2875 * KSEG1 maps only first 512M of phys address space. For
2876 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
2878 if ((pa + size - 1) < MIPS_KSEG0_LARGEST_PHYS)
2879 return (void *)MIPS_PHYS_TO_KSEG1(pa);
2881 offset = pa & PAGE_MASK;
2882 size = roundup(size + offset, PAGE_SIZE);
2884 va = kmem_alloc_nofault(kernel_map, size);
2886 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2887 pa = trunc_page(pa);
2888 for (tmpva = va; size > 0;) {
2889 pmap_kenter(tmpva, pa);
2896 return ((void *)(va + offset));
2900 pmap_unmapdev(vm_offset_t va, vm_size_t size)
2902 vm_offset_t base, offset, tmpva;
2904 /* If the address is within KSEG1 then there is nothing to do */
2905 if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
2908 base = trunc_page(va);
2909 offset = va & PAGE_MASK;
2910 size = roundup(size + offset, PAGE_SIZE);
2911 for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE)
2912 pmap_kremove(tmpva);
2913 kmem_free(kernel_map, base, size);
2917 * perform the pmap work for mincore
2920 pmap_mincore(pmap_t pmap, vm_offset_t addr)
2923 pt_entry_t *ptep, pte;
2928 ptep = pmap_pte(pmap, addr);
2929 pte = (ptep != NULL) ? *ptep : 0;
2932 if (mips_pg_v(pte)) {
2935 val = MINCORE_INCORE;
2936 pa = mips_tlbpfn_to_paddr(pte);
2937 if (!page_is_managed(pa))
2940 m = PHYS_TO_VM_PAGE(pa);
2946 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
2948 * Modified by someone
2951 vm_page_lock_queues();
2952 if (m->dirty || pmap_is_modified(m))
2953 val |= MINCORE_MODIFIED_OTHER;
2954 vm_page_unlock_queues();
2957 * Referenced by us or someone
2959 vm_page_lock_queues();
2960 if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
2961 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
2962 vm_page_flag_set(m, PG_REFERENCED);
2964 vm_page_unlock_queues();
2970 pmap_activate(struct thread *td)
2972 pmap_t pmap, oldpmap;
2973 struct proc *p = td->td_proc;
2977 pmap = vmspace_pmap(p->p_vmspace);
2978 oldpmap = PCPU_GET(curpmap);
2981 atomic_clear_32(&oldpmap->pm_active, PCPU_GET(cpumask));
2982 atomic_set_32(&pmap->pm_active, PCPU_GET(cpumask));
2983 pmap_asid_alloc(pmap);
2984 if (td == curthread) {
2985 PCPU_SET(segbase, pmap->pm_segtab);
2986 MachSetPID(pmap->pm_asid[PCPU_GET(cpuid)].asid);
2989 PCPU_SET(curpmap, pmap);
2994 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2999 * Increase the starting virtual address of the given mapping if a
3000 * different alignment might result in more superpage mappings.
3003 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3004 vm_offset_t *addr, vm_size_t size)
3006 vm_offset_t superpage_offset;
3010 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3011 offset += ptoa(object->pg_color);
3012 superpage_offset = offset & SEGOFSET;
3013 if (size - ((NBSEG - superpage_offset) & SEGOFSET) < NBSEG ||
3014 (*addr & SEGOFSET) == superpage_offset)
3016 if ((*addr & SEGOFSET) < superpage_offset)
3017 *addr = (*addr & ~SEGOFSET) + superpage_offset;
3019 *addr = ((*addr + SEGOFSET) & ~SEGOFSET) + superpage_offset;
3022 int pmap_pid_dump(int pid);
3025 pmap_pid_dump(int pid)
3032 sx_slock(&allproc_lock);
3033 LIST_FOREACH(p, &allproc, p_list) {
3034 if (p->p_pid != pid)
3040 printf("vmspace is %p\n",
3043 pmap = vmspace_pmap(p->p_vmspace);
3044 printf("pmap asid:%x generation:%x\n",
3045 pmap->pm_asid[0].asid,
3046 pmap->pm_asid[0].gen);
3047 for (i = 0; i < NUSERPGTBLS; i++) {
3050 unsigned base = i << SEGSHIFT;
3052 pde = &pmap->pm_segtab[i];
3053 if (pde && pmap_pde_v(pde)) {
3054 for (j = 0; j < 1024; j++) {
3055 vm_offset_t va = base +
3058 pte = pmap_pte(pmap, va);
3059 if (pte && pmap_pte_v(pte)) {
3063 pa = mips_tlbpfn_to_paddr(*pte);
3064 m = PHYS_TO_VM_PAGE(pa);
3065 printf("va: %p, pt: %p, h: %d, w: %d, f: 0x%x",
3084 printf("Process pid:%d has no vm_space\n", pid);
3088 sx_sunlock(&allproc_lock);
3095 static void pads(pmap_t pm);
3096 void pmap_pvdump(vm_offset_t pa);
3098 /* print address space of pmap*/
3105 if (pm == kernel_pmap)
3107 for (i = 0; i < NPTEPG; i++)
3108 if (pm->pm_segtab[i])
3109 for (j = 0; j < NPTEPG; j++) {
3110 va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
3111 if (pm == kernel_pmap && va < KERNBASE)
3113 if (pm != kernel_pmap &&
3114 va >= VM_MAXUSER_ADDRESS)
3116 ptep = pmap_pte(pm, va);
3117 if (pmap_pte_v(ptep))
3118 printf("%x:%x ", va, *(int *)ptep);
3124 pmap_pvdump(vm_offset_t pa)
3126 register pv_entry_t pv;
3129 printf("pa %x", pa);
3130 m = PHYS_TO_VM_PAGE(pa);
3131 for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3132 pv = TAILQ_NEXT(pv, pv_list)) {
3133 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3144 * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3145 * It takes almost as much or more time to search the TLB for a
3146 * specific ASID and flush those entries as it does to flush the entire TLB.
3147 * Therefore, when we allocate a new ASID, we just take the next number. When
3148 * we run out of numbers, we flush the TLB, increment the generation count
3149 * and start over. ASID zero is reserved for kernel use.
3152 pmap_asid_alloc(pmap)
3155 if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3156 pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3158 if (PCPU_GET(next_asid) == pmap_max_asid) {
3160 PCPU_SET(asid_generation,
3161 (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3162 if (PCPU_GET(asid_generation) == 0) {
3163 PCPU_SET(asid_generation, 1);
3165 PCPU_SET(next_asid, 1); /* 0 means invalid */
3167 pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3168 pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3169 PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3173 if (pmapdebug & (PDB_FOLLOW | PDB_TLBPID)) {
3175 printf("pmap_asid_alloc: curproc %d '%s' ",
3176 curproc->p_pid, curproc->p_comm);
3178 printf("pmap_asid_alloc: curproc <none> ");
3179 printf("segtab %p asid %d\n", pmap->pm_segtab,
3180 pmap->pm_asid[PCPU_GET(cpuid)].asid);
3186 page_is_managed(vm_offset_t pa)
3188 vm_offset_t pgnum = mips_btop(pa);
3190 if (pgnum >= first_page && (pgnum < (first_page + vm_page_array_size))) {
3193 m = PHYS_TO_VM_PAGE(pa);
3194 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
3201 init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
3205 if (!(prot & VM_PROT_WRITE))
3208 if (va >= VM_MIN_KERNEL_ADDRESS) {
3210 * Don't bother to trap on kernel writes, just
3211 * record page as dirty.
3215 } else if ((m->md.pv_flags & PV_TABLE_MOD) ||
3216 m->dirty == VM_PAGE_BITS_ALL)
3220 vm_page_flag_set(m, PG_WRITEABLE);
3226 * pmap_page_is_free:
3228 * Called when a page is freed to allow pmap to clean up
3229 * any extra state associated with the page. In this case
3230 * clear modified/referenced bits.
3233 pmap_page_is_free(vm_page_t m)
3240 * pmap_set_modified:
3242 * Sets the page modified and reference bits for the specified page.
3245 pmap_set_modified(vm_offset_t pa)
3248 PHYS_TO_VM_PAGE(pa)->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD);
3251 #include <machine/db_machdep.h>
3254 * Dump the translation buffer (TLB) in readable form.
3258 db_dump_tlb(int first, int last)
3265 while (tlbno <= last) {
3266 MachTLBRead(tlbno, &tlb);
3267 if (tlb.tlb_lo0 & PTE_V || tlb.tlb_lo1 & PTE_V) {
3268 printf("TLB %2d vad 0x%08x ", tlbno, (tlb.tlb_hi & 0xffffff00));
3270 printf("TLB*%2d vad 0x%08x ", tlbno, (tlb.tlb_hi & 0xffffff00));
3272 printf("0=0x%08x ", pfn_to_vad(tlb.tlb_lo0));
3273 printf("%c", tlb.tlb_lo0 & PTE_M ? 'M' : ' ');
3274 printf("%c", tlb.tlb_lo0 & PTE_G ? 'G' : ' ');
3275 printf(" atr %x ", (tlb.tlb_lo0 >> 3) & 7);
3276 printf("1=0x%08x ", pfn_to_vad(tlb.tlb_lo1));
3277 printf("%c", tlb.tlb_lo1 & PTE_M ? 'M' : ' ');
3278 printf("%c", tlb.tlb_lo1 & PTE_G ? 'G' : ' ');
3279 printf(" atr %x ", (tlb.tlb_lo1 >> 3) & 7);
3280 printf(" sz=%x pid=%x\n", tlb.tlb_mask,
3281 (tlb.tlb_hi & 0x000000ff)
3288 #include <sys/kernel.h>
3289 #include <ddb/ddb.h>
3291 DB_SHOW_COMMAND(tlb, ddb_dump_tlb)
3293 db_dump_tlb(0, num_tlbentries - 1);
3299 * Routine: pmap_kextract
3301 * Extract the physical page address associated
3304 /* PMAP_INLINE */ vm_offset_t
3305 pmap_kextract(vm_offset_t va)
3309 if (va < MIPS_CACHED_MEMORY_ADDR) {
3310 /* user virtual address */
3313 if (curproc && curproc->p_vmspace) {
3314 ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3316 pa = mips_tlbpfn_to_paddr(*ptep) |
3319 } else if (va >= MIPS_CACHED_MEMORY_ADDR &&
3320 va < MIPS_UNCACHED_MEMORY_ADDR)
3321 pa = MIPS_CACHED_TO_PHYS(va);
3322 else if (va >= MIPS_UNCACHED_MEMORY_ADDR &&
3323 va < MIPS_KSEG2_START)
3324 pa = MIPS_UNCACHED_TO_PHYS(va);
3325 #ifdef VM_ALLOC_WIRED_TLB_PG_POOL
3326 else if (need_wired_tlb_page_pool && ((va >= VM_MIN_KERNEL_ADDRESS) &&
3327 (va < (VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET))))
3328 pa = MIPS_CACHED_TO_PHYS(va);
3330 else if (va >= MIPS_KSEG2_START && va < VM_MAX_KERNEL_ADDRESS) {
3333 /* Is the kernel pmap initialized? */
3334 if (kernel_pmap->pm_active) {
3335 if (va >= (vm_offset_t)virtual_sys_start) {
3336 /* Its inside the virtual address range */
3337 ptep = pmap_pte(kernel_pmap, va);
3339 pa = mips_tlbpfn_to_paddr(*ptep) |
3345 * its inside the special mapping area, I
3346 * don't think this should happen, but if it
3347 * does I want it toa all work right :-)
3348 * Note if it does happen, we assume the
3349 * caller has the lock? FIXME, this needs to
3350 * be checked FIXEM - RRS.
3352 for (i = 0; i < MAXCPU; i++) {
3353 if ((sysmap_lmem[i].valid1) && ((vm_offset_t)sysmap_lmem[i].CADDR1 == va)) {
3354 pa = mips_tlbpfn_to_paddr(sysmap_lmem[i].CMAP1);
3357 if ((sysmap_lmem[i].valid2) && ((vm_offset_t)sysmap_lmem[i].CADDR2 == va)) {
3358 pa = mips_tlbpfn_to_paddr(sysmap_lmem[i].CMAP2);
3369 pmap_flush_pvcache(vm_page_t m)
3374 for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3375 pv = TAILQ_NEXT(pv, pv_list)) {
3376 mips_dcache_wbinv_range_index(pv->pv_va, NBPG);