2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
8 * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
46 * Copyright (c) 2003 Networks Associates Technology, Inc.
47 * All rights reserved.
49 * This software was developed for the FreeBSD Project by Jake Burkholder,
50 * Safeport Network Services, and Network Associates Laboratories, the
51 * Security Research Division of Network Associates, Inc. under
52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
53 * CHATS research program.
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
81 * Manages physical address maps.
83 * In addition to hardware address maps, this
84 * module is called upon to provide software-use-only
85 * maps which may or may not be stored in the same
86 * form as hardware maps. These pseudo-maps are
87 * used to store intermediate results from copy
88 * operations to and from address spaces.
90 * Since the information managed by this module is
91 * also stored by the logical address mapping module,
92 * this module may throw away valid virtual-to-physical
93 * mappings at almost any time. However, invalidations
94 * of virtual-to-physical mappings must be done as
97 * In order to cope with hardware architectures which
98 * make virtual-to-physical map invalidates expensive,
99 * this module may delay invalidate or reduced protection
100 * operations until such time as they are actually
101 * necessary. This module is given full information as
102 * to which processors are currently using which maps,
103 * and to when physical maps must be made correct.
107 #include "opt_pmap.h"
108 #include "opt_msgbuf.h"
110 #include "opt_xbox.h"
112 #include <sys/param.h>
113 #include <sys/systm.h>
114 #include <sys/kernel.h>
116 #include <sys/lock.h>
117 #include <sys/malloc.h>
118 #include <sys/mman.h>
119 #include <sys/msgbuf.h>
120 #include <sys/mutex.h>
121 #include <sys/proc.h>
122 #include <sys/sf_buf.h>
124 #include <sys/vmmeter.h>
125 #include <sys/sched.h>
126 #include <sys/sysctl.h>
132 #include <vm/vm_param.h>
133 #include <vm/vm_kern.h>
134 #include <vm/vm_page.h>
135 #include <vm/vm_map.h>
136 #include <vm/vm_object.h>
137 #include <vm/vm_extern.h>
138 #include <vm/vm_pageout.h>
139 #include <vm/vm_pager.h>
140 #include <vm/vm_reserv.h>
143 #include <machine/cpu.h>
144 #include <machine/cputypes.h>
145 #include <machine/md_var.h>
146 #include <machine/pcb.h>
147 #include <machine/specialreg.h>
149 #include <machine/smp.h>
153 #include <machine/xbox.h>
156 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
157 #define CPU_ENABLE_SSE
160 #ifndef PMAP_SHPGPERPROC
161 #define PMAP_SHPGPERPROC 200
164 #if !defined(DIAGNOSTIC)
165 #define PMAP_INLINE __gnu89_inline
172 #define PV_STAT(x) do { x ; } while (0)
174 #define PV_STAT(x) do { } while (0)
177 #define pa_index(pa) ((pa) >> PDRSHIFT)
178 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
181 * Get PDEs and PTEs for user/kernel address space
183 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
184 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
186 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
187 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
188 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
189 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
190 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
192 #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
193 atomic_clear_int((u_int *)(pte), PG_W))
194 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
196 struct pmap kernel_pmap_store;
197 LIST_HEAD(pmaplist, pmap);
198 static struct pmaplist allpmaps;
199 static struct mtx allpmaps_lock;
201 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
202 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
203 int pgeflag = 0; /* PG_G or-in */
204 int pseflag = 0; /* PG_PS or-in */
207 vm_offset_t kernel_vm_end;
208 extern u_int32_t KERNend;
212 static uma_zone_t pdptzone;
215 static int pat_works; /* Is page attribute table sane? */
217 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
219 static int pg_ps_enabled;
220 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RD, &pg_ps_enabled, 0,
221 "Are large page mappings enabled?");
224 * Data for the pv entry allocation mechanism
226 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
227 static struct md_page *pv_table;
228 static int shpgperproc = PMAP_SHPGPERPROC;
230 struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */
231 int pv_maxchunks; /* How many chunks we have KVA for */
232 vm_offset_t pv_vafree; /* freelist stored in the PTE */
235 * All those kernel PT submaps that BSD is so fond of
244 static struct sysmaps sysmaps_pcpu[MAXCPU];
245 pt_entry_t *CMAP1 = 0;
246 static pt_entry_t *CMAP3;
247 caddr_t CADDR1 = 0, ptvmmap = 0;
248 static caddr_t CADDR3;
249 struct msgbuf *msgbufp = 0;
254 static caddr_t crashdumpmap;
256 static pt_entry_t *PMAP1 = 0, *PMAP2;
257 static pt_entry_t *PADDR1 = 0, *PADDR2;
260 static int PMAP1changedcpu;
261 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
263 "Number of times pmap_pte_quick changed CPU with same PMAP1");
265 static int PMAP1changed;
266 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
268 "Number of times pmap_pte_quick changed PMAP1");
269 static int PMAP1unchanged;
270 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
272 "Number of times pmap_pte_quick didn't change PMAP1");
273 static struct mtx PMAP2mutex;
275 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
276 static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
277 static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
278 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
279 static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
280 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
281 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
283 static int pmap_pvh_wired_mappings(struct md_page *pvh, int count);
285 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
286 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
288 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
289 vm_page_t m, vm_prot_t prot, vm_page_t mpte);
290 static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
291 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
292 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
293 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
294 static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
295 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
296 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
297 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
299 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
300 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
302 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
304 static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
305 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
307 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
309 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
310 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
313 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
315 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
316 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
317 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
318 static void pmap_pte_release(pt_entry_t *pte);
319 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
320 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
322 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
325 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
326 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
329 * If you get an error here, then you set KVA_PAGES wrong! See the
330 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
331 * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
333 CTASSERT(KERNBASE % (1 << 24) == 0);
336 * Move the kernel virtual free pointer to the next
337 * 4MB. This is used to help improve performance
338 * by using a large (4MB) page for much of the kernel
339 * (.text, .data, .bss)
342 pmap_kmem_choose(vm_offset_t addr)
344 vm_offset_t newaddr = addr;
347 if (cpu_feature & CPUID_PSE)
348 newaddr = (addr + PDRMASK) & ~PDRMASK;
354 * Bootstrap the system enough to run with virtual memory.
356 * On the i386 this is called after mapping has already been enabled
357 * and just syncs the pmap module with what has already been done.
358 * [We can't call it easily with mapping off since the kernel is not
359 * mapped with PA == VA, hence we would have to relocate every address
360 * from the linked base (virtual) address "KERNBASE" to the actual
361 * (physical) address starting relative to 0]
364 pmap_bootstrap(vm_paddr_t firstaddr)
367 pt_entry_t *pte, *unused;
368 struct sysmaps *sysmaps;
372 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
373 * large. It should instead be correctly calculated in locore.s and
374 * not based on 'first' (which is a physical address, not a virtual
375 * address, for the start of unused physical memory). The kernel
376 * page tables are NOT double mapped and thus should not be included
377 * in this calculation.
379 virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
380 virtual_avail = pmap_kmem_choose(virtual_avail);
382 virtual_end = VM_MAX_KERNEL_ADDRESS;
385 * Initialize the kernel pmap (which is statically allocated).
387 PMAP_LOCK_INIT(kernel_pmap);
388 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
390 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
392 kernel_pmap->pm_root = NULL;
393 kernel_pmap->pm_active = -1; /* don't allow deactivation */
394 TAILQ_INIT(&kernel_pmap->pm_pvchunk);
395 LIST_INIT(&allpmaps);
396 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
397 mtx_lock_spin(&allpmaps_lock);
398 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
399 mtx_unlock_spin(&allpmaps_lock);
403 * Reserve some special page table entries/VA space for temporary
406 #define SYSMAP(c, p, v, n) \
407 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
413 * CMAP1/CMAP2 are used for zeroing and copying pages.
414 * CMAP3 is used for the idle process page zeroing.
416 for (i = 0; i < MAXCPU; i++) {
417 sysmaps = &sysmaps_pcpu[i];
418 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
419 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
420 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
422 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
423 SYSMAP(caddr_t, CMAP3, CADDR3, 1)
429 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
432 * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
434 SYSMAP(caddr_t, unused, ptvmmap, 1)
437 * msgbufp is used to map the system message buffer.
439 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
442 * ptemap is used for pmap_pte_quick
444 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
445 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1);
447 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
454 * Leave in place an identity mapping (virt == phys) for the low 1 MB
455 * physical memory region that is used by the ACPI wakeup code. This
456 * mapping must not have PG_G set.
459 /* FIXME: This is gross, but needed for the XBOX. Since we are in such
460 * an early stadium, we cannot yet neatly map video memory ... :-(
461 * Better fixes are very welcome! */
462 if (!arch_i386_is_xbox)
464 for (i = 1; i < NKPT; i++)
467 /* Initialize the PAT MSR if present. */
470 /* Turn on PG_G on kernel page(s) */
482 /* Bail if this CPU doesn't implement PAT. */
483 if (!(cpu_feature & CPUID_PAT))
486 if (cpu_vendor_id != CPU_VENDOR_INTEL ||
487 (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) {
489 * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
490 * Program 4 and 5 as WP and WC.
491 * Leave 6 and 7 as UC and UC-.
493 pat_msr = rdmsr(MSR_PAT);
494 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
495 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
496 PAT_VALUE(5, PAT_WRITE_COMBINING);
500 * Due to some Intel errata, we can only safely use the lower 4
501 * PAT entries. Thus, just replace PAT Index 2 with WC instead
504 * Intel Pentium III Processor Specification Update
505 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
508 * Intel Pentium IV Processor Specification Update
509 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
511 pat_msr = rdmsr(MSR_PAT);
512 pat_msr &= ~PAT_MASK(2);
513 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
516 wrmsr(MSR_PAT, pat_msr);
520 * Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on.
527 vm_offset_t va, endva;
534 endva = KERNBASE + KERNend;
537 va = KERNBASE + KERNLOAD;
539 pdir = kernel_pmap->pm_pdir[KPTDI+i];
541 kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir;
542 invltlb(); /* Play it safe, invltlb() every time */
547 va = (vm_offset_t)btext;
552 invltlb(); /* Play it safe, invltlb() every time */
559 * Initialize a vm_page's machine-dependent fields.
562 pmap_page_init(vm_page_t m)
565 TAILQ_INIT(&m->md.pv_list);
566 m->md.pat_mode = PAT_WRITE_BACK;
571 pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
574 /* Inform UMA that this allocator uses kernel_map/object. */
575 *flags = UMA_SLAB_KERNEL;
576 return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
577 0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
582 * ABuse the pte nodes for unmapped kva to thread a kva freelist through.
584 * - Must deal with pages in order to ensure that none of the PG_* bits
585 * are ever set, PG_V in particular.
586 * - Assumes we can write to ptes without pte_store() atomic ops, even
587 * on PAE systems. This should be ok.
588 * - Assumes nothing will ever test these addresses for 0 to indicate
589 * no mapping instead of correctly checking PG_V.
590 * - Assumes a vm_offset_t will fit in a pte (true for i386).
591 * Because PG_V is never set, there can be no mappings to invalidate.
594 pmap_ptelist_alloc(vm_offset_t *head)
601 return (va); /* Out of memory */
605 panic("pmap_ptelist_alloc: va with PG_V set!");
611 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
616 panic("pmap_ptelist_free: freeing va with PG_V set!");
618 *pte = *head; /* virtual! PG_V is 0 though */
623 pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
629 for (i = npages - 1; i >= 0; i--) {
630 va = (vm_offset_t)base + i * PAGE_SIZE;
631 pmap_ptelist_free(head, va);
637 * Initialize the pmap module.
638 * Called by vm_init, to initialize any structures that the pmap
639 * system needs to map virtual memory.
649 * Initialize the vm page array entries for the kernel pmap's
652 for (i = 0; i < nkpt; i++) {
653 mpte = PHYS_TO_VM_PAGE(PTD[i + KPTDI] & PG_FRAME);
654 KASSERT(mpte >= vm_page_array &&
655 mpte < &vm_page_array[vm_page_array_size],
656 ("pmap_init: page table page is out of range"));
657 mpte->pindex = i + KPTDI;
658 mpte->phys_addr = PTD[i + KPTDI] & PG_FRAME;
662 * Initialize the address space (zone) for the pv entries. Set a
663 * high water mark so that the system can recover from excessive
664 * numbers of pv entries.
666 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
667 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
668 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
669 pv_entry_max = roundup(pv_entry_max, _NPCPV);
670 pv_entry_high_water = 9 * (pv_entry_max / 10);
673 * Are large page mappings enabled?
675 TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
678 * Calculate the size of the pv head table for superpages.
680 for (i = 0; phys_avail[i + 1]; i += 2);
681 pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR;
684 * Allocate memory for the pv head table for superpages.
686 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
688 pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
689 for (i = 0; i < pv_npg; i++)
690 TAILQ_INIT(&pv_table[i].pv_list);
692 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
693 pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
694 PAGE_SIZE * pv_maxchunks);
695 if (pv_chunkbase == NULL)
696 panic("pmap_init: not enough kvm for pv chunks");
697 pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
699 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
700 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
701 UMA_ZONE_VM | UMA_ZONE_NOFREE);
702 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
707 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
708 "Max number of PV entries");
709 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
710 "Page share factor per proc");
712 SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
713 "2/4MB page mapping counters");
715 static u_long pmap_pde_demotions;
716 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
717 &pmap_pde_demotions, 0, "2/4MB page demotions");
719 static u_long pmap_pde_mappings;
720 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
721 &pmap_pde_mappings, 0, "2/4MB page mappings");
723 static u_long pmap_pde_p_failures;
724 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
725 &pmap_pde_p_failures, 0, "2/4MB page promotion failures");
727 static u_long pmap_pde_promotions;
728 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
729 &pmap_pde_promotions, 0, "2/4MB page promotions");
731 /***************************************************
732 * Low level helper routines.....
733 ***************************************************/
736 * Determine the appropriate bits to set in a PTE or PDE for a specified
740 pmap_cache_bits(int mode, boolean_t is_pde)
742 int pat_flag, pat_index, cache_bits;
744 /* The PAT bit is different for PTE's and PDE's. */
745 pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
747 /* If we don't support PAT, map extended modes to older ones. */
748 if (!(cpu_feature & CPUID_PAT)) {
750 case PAT_UNCACHEABLE:
751 case PAT_WRITE_THROUGH:
755 case PAT_WRITE_COMBINING:
756 case PAT_WRITE_PROTECTED:
757 mode = PAT_UNCACHEABLE;
762 /* Map the caching mode to a PAT index. */
765 case PAT_UNCACHEABLE:
768 case PAT_WRITE_THROUGH:
777 case PAT_WRITE_COMBINING:
780 case PAT_WRITE_PROTECTED:
784 panic("Unknown caching mode %d\n", mode);
789 case PAT_UNCACHEABLE:
790 case PAT_WRITE_PROTECTED:
793 case PAT_WRITE_THROUGH:
799 case PAT_WRITE_COMBINING:
803 panic("Unknown caching mode %d\n", mode);
807 /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
810 cache_bits |= pat_flag;
812 cache_bits |= PG_NC_PCD;
814 cache_bits |= PG_NC_PWT;
819 * For SMP, these functions have to use the IPI mechanism for coherence.
821 * N.B.: Before calling any of the following TLB invalidation functions,
822 * the calling processor must ensure that all stores updating a non-
823 * kernel page table are globally performed. Otherwise, another
824 * processor could cache an old, pre-update entry without being
825 * invalidated. This can happen one of two ways: (1) The pmap becomes
826 * active on another processor after its pm_active field is checked by
827 * one of the following functions but before a store updating the page
828 * table is globally performed. (2) The pmap becomes active on another
829 * processor before its pm_active field is checked but due to
830 * speculative loads one of the following functions stills reads the
831 * pmap as inactive on the other processor.
833 * The kernel page table is exempt because its pm_active field is
834 * immutable. The kernel page table is always active on every
838 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
844 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
848 cpumask = PCPU_GET(cpumask);
849 other_cpus = PCPU_GET(other_cpus);
850 if (pmap->pm_active & cpumask)
852 if (pmap->pm_active & other_cpus)
853 smp_masked_invlpg(pmap->pm_active & other_cpus, va);
859 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
866 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
867 for (addr = sva; addr < eva; addr += PAGE_SIZE)
869 smp_invlpg_range(sva, eva);
871 cpumask = PCPU_GET(cpumask);
872 other_cpus = PCPU_GET(other_cpus);
873 if (pmap->pm_active & cpumask)
874 for (addr = sva; addr < eva; addr += PAGE_SIZE)
876 if (pmap->pm_active & other_cpus)
877 smp_masked_invlpg_range(pmap->pm_active & other_cpus,
884 pmap_invalidate_all(pmap_t pmap)
890 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
894 cpumask = PCPU_GET(cpumask);
895 other_cpus = PCPU_GET(other_cpus);
896 if (pmap->pm_active & cpumask)
898 if (pmap->pm_active & other_cpus)
899 smp_masked_invltlb(pmap->pm_active & other_cpus);
905 pmap_invalidate_cache(void)
915 * Normal, non-SMP, 486+ invalidation functions.
916 * We inline these within pmap.c for speed.
919 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
922 if (pmap == kernel_pmap || pmap->pm_active)
927 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
931 if (pmap == kernel_pmap || pmap->pm_active)
932 for (addr = sva; addr < eva; addr += PAGE_SIZE)
937 pmap_invalidate_all(pmap_t pmap)
940 if (pmap == kernel_pmap || pmap->pm_active)
945 pmap_invalidate_cache(void)
953 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
956 KASSERT((sva & PAGE_MASK) == 0,
957 ("pmap_invalidate_cache_range: sva not page-aligned"));
958 KASSERT((eva & PAGE_MASK) == 0,
959 ("pmap_invalidate_cache_range: eva not page-aligned"));
961 if (cpu_feature & CPUID_SS)
962 ; /* If "Self Snoop" is supported, do nothing. */
963 else if (cpu_feature & CPUID_CLFSH) {
966 * Otherwise, do per-cache line flush. Use the mfence
967 * instruction to insure that previous stores are
968 * included in the write-back. The processor
969 * propagates flush to other processors in the cache
973 for (; sva < eva; sva += cpu_clflush_line_size)
979 * No targeted cache flush methods are supported by CPU,
980 * globally invalidate cache as a last resort.
982 pmap_invalidate_cache();
987 * Are we current address space or kernel? N.B. We return FALSE when
988 * a pmap's page table is in use because a kernel thread is borrowing
989 * it. The borrowed page table can change spontaneously, making any
990 * dependence on its continued use subject to a race condition.
993 pmap_is_current(pmap_t pmap)
996 return (pmap == kernel_pmap ||
997 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
998 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
1002 * If the given pmap is not the current or kernel pmap, the returned pte must
1003 * be released by passing it to pmap_pte_release().
1006 pmap_pte(pmap_t pmap, vm_offset_t va)
1011 pde = pmap_pde(pmap, va);
1015 /* are we current address space or kernel? */
1016 if (pmap_is_current(pmap))
1017 return (vtopte(va));
1018 mtx_lock(&PMAP2mutex);
1019 newpf = *pde & PG_FRAME;
1020 if ((*PMAP2 & PG_FRAME) != newpf) {
1021 *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
1022 pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
1024 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
1030 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte
1033 static __inline void
1034 pmap_pte_release(pt_entry_t *pte)
1037 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
1038 mtx_unlock(&PMAP2mutex);
1041 static __inline void
1042 invlcaddr(void *caddr)
1045 invlpg((u_int)caddr);
1049 * Super fast pmap_pte routine best used when scanning
1050 * the pv lists. This eliminates many coarse-grained
1051 * invltlb calls. Note that many of the pv list
1052 * scans are across different pmaps. It is very wasteful
1053 * to do an entire invltlb for checking a single mapping.
1055 * If the given pmap is not the current pmap, vm_page_queue_mtx
1056 * must be held and curthread pinned to a CPU.
1059 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
1064 pde = pmap_pde(pmap, va);
1068 /* are we current address space or kernel? */
1069 if (pmap_is_current(pmap))
1070 return (vtopte(va));
1071 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1072 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
1073 newpf = *pde & PG_FRAME;
1074 if ((*PMAP1 & PG_FRAME) != newpf) {
1075 *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
1077 PMAP1cpu = PCPU_GET(cpuid);
1083 if (PMAP1cpu != PCPU_GET(cpuid)) {
1084 PMAP1cpu = PCPU_GET(cpuid);
1090 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
1096 * Routine: pmap_extract
1098 * Extract the physical page address associated
1099 * with the given map/virtual_address pair.
1102 pmap_extract(pmap_t pmap, vm_offset_t va)
1110 pde = pmap->pm_pdir[va >> PDRSHIFT];
1112 if ((pde & PG_PS) != 0)
1113 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
1115 pte = pmap_pte(pmap, va);
1116 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
1117 pmap_pte_release(pte);
1125 * Routine: pmap_extract_and_hold
1127 * Atomically extract and hold the physical page
1128 * with the given pmap and virtual address pair
1129 * if that mapping permits the given protection.
1132 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1139 vm_page_lock_queues();
1141 pde = *pmap_pde(pmap, va);
1144 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
1145 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
1151 pte = *pmap_pte_quick(pmap, va);
1153 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
1154 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
1160 vm_page_unlock_queues();
1165 /***************************************************
1166 * Low level mapping routines.....
1167 ***************************************************/
1170 * Add a wired page to the kva.
1171 * Note: not SMP coherent.
1174 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1179 pte_store(pte, pa | PG_RW | PG_V | pgeflag);
1182 static __inline void
1183 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
1188 pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
1192 * Remove a page from the kernel pagetables.
1193 * Note: not SMP coherent.
1196 pmap_kremove(vm_offset_t va)
1205 * Used to map a range of physical addresses into kernel
1206 * virtual address space.
1208 * The value passed in '*virt' is a suggested virtual address for
1209 * the mapping. Architectures which can support a direct-mapped
1210 * physical to virtual region can return the appropriate address
1211 * within that region, leaving '*virt' unchanged. Other
1212 * architectures should map the pages starting at '*virt' and
1213 * update '*virt' with the first usable address after the mapped
1217 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1219 vm_offset_t va, sva;
1222 while (start < end) {
1223 pmap_kenter(va, start);
1227 pmap_invalidate_range(kernel_pmap, sva, va);
1234 * Add a list of wired pages to the kva
1235 * this routine is only used for temporary
1236 * kernel mappings that do not need to have
1237 * page modification or references recorded.
1238 * Note that old mappings are simply written
1239 * over. The page *must* be wired.
1240 * Note: SMP coherent. Uses a ranged shootdown IPI.
1243 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1245 pt_entry_t *endpte, oldpte, *pte;
1249 endpte = pte + count;
1250 while (pte < endpte) {
1252 pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag |
1253 pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
1257 if ((oldpte & PG_V) != 0)
1258 pmap_invalidate_range(kernel_pmap, sva, sva + count *
1263 * This routine tears out page mappings from the
1264 * kernel -- it is meant only for temporary mappings.
1265 * Note: SMP coherent. Uses a ranged shootdown IPI.
1268 pmap_qremove(vm_offset_t sva, int count)
1273 while (count-- > 0) {
1277 pmap_invalidate_range(kernel_pmap, sva, va);
1280 /***************************************************
1281 * Page table page management routines.....
1282 ***************************************************/
1283 static __inline void
1284 pmap_free_zero_pages(vm_page_t free)
1288 while (free != NULL) {
1291 /* Preserve the page's PG_ZERO setting. */
1292 vm_page_free_toq(m);
1297 * Schedule the specified unused page table page to be freed. Specifically,
1298 * add the page to the specified list of pages that will be released to the
1299 * physical memory manager after the TLB has been updated.
1301 static __inline void
1302 pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
1306 m->flags |= PG_ZERO;
1308 m->flags &= ~PG_ZERO;
1314 * Inserts the specified page table page into the specified pmap's collection
1315 * of idle page table pages. Each of a pmap's page table pages is responsible
1316 * for mapping a distinct range of virtual addresses. The pmap's collection is
1317 * ordered by this virtual address range.
1320 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
1324 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1325 root = pmap->pm_root;
1330 root = vm_page_splay(mpte->pindex, root);
1331 if (mpte->pindex < root->pindex) {
1332 mpte->left = root->left;
1335 } else if (mpte->pindex == root->pindex)
1336 panic("pmap_insert_pt_page: pindex already inserted");
1338 mpte->right = root->right;
1343 pmap->pm_root = mpte;
1347 * Looks for a page table page mapping the specified virtual address in the
1348 * specified pmap's collection of idle page table pages. Returns NULL if there
1349 * is no page table page corresponding to the specified virtual address.
1352 pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
1355 vm_pindex_t pindex = va >> PDRSHIFT;
1357 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1358 if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
1359 mpte = vm_page_splay(pindex, mpte);
1360 if ((pmap->pm_root = mpte)->pindex != pindex)
1367 * Removes the specified page table page from the specified pmap's collection
1368 * of idle page table pages. The specified page table page must be a member of
1369 * the pmap's collection.
1372 pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
1376 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1377 if (mpte != pmap->pm_root)
1378 vm_page_splay(mpte->pindex, pmap->pm_root);
1379 if (mpte->left == NULL)
1382 root = vm_page_splay(mpte->pindex, mpte->left);
1383 root->right = mpte->right;
1385 pmap->pm_root = root;
1389 * This routine unholds page table pages, and if the hold count
1390 * drops to zero, then it decrements the wire count.
1393 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
1397 if (m->wire_count == 0)
1398 return _pmap_unwire_pte_hold(pmap, m, free);
1404 _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
1409 * unmap the page table page
1411 pmap->pm_pdir[m->pindex] = 0;
1412 --pmap->pm_stats.resident_count;
1415 * This is a release store so that the ordinary store unmapping
1416 * the page table page is globally performed before TLB shoot-
1419 atomic_subtract_rel_int(&cnt.v_wire_count, 1);
1422 * Do an invltlb to make the invalidated mapping
1423 * take effect immediately.
1425 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
1426 pmap_invalidate_page(pmap, pteva);
1429 * Put page on a list so that it is released after
1430 * *ALL* TLB shootdown is done
1432 pmap_add_delayed_free_list(m, free, TRUE);
1438 * After removing a page table entry, this routine is used to
1439 * conditionally free the page, and manage the hold/wire counts.
1442 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
1447 if (va >= VM_MAXUSER_ADDRESS)
1449 ptepde = *pmap_pde(pmap, va);
1450 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
1451 return pmap_unwire_pte_hold(pmap, mpte, free);
1455 pmap_pinit0(pmap_t pmap)
1458 PMAP_LOCK_INIT(pmap);
1459 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
1461 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
1463 pmap->pm_root = NULL;
1464 pmap->pm_active = 0;
1465 PCPU_SET(curpmap, pmap);
1466 TAILQ_INIT(&pmap->pm_pvchunk);
1467 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1468 mtx_lock_spin(&allpmaps_lock);
1469 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1470 mtx_unlock_spin(&allpmaps_lock);
1474 * Initialize a preallocated and zeroed pmap structure,
1475 * such as one in a vmspace structure.
1478 pmap_pinit(pmap_t pmap)
1480 vm_page_t m, ptdpg[NPGPTD];
1485 PMAP_LOCK_INIT(pmap);
1488 * No need to allocate page table space yet but we do need a valid
1489 * page directory table.
1491 if (pmap->pm_pdir == NULL) {
1492 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
1495 if (pmap->pm_pdir == NULL) {
1496 PMAP_LOCK_DESTROY(pmap);
1500 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
1501 KASSERT(((vm_offset_t)pmap->pm_pdpt &
1502 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
1503 ("pmap_pinit: pdpt misaligned"));
1504 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
1505 ("pmap_pinit: pdpt above 4g"));
1507 pmap->pm_root = NULL;
1509 KASSERT(pmap->pm_root == NULL,
1510 ("pmap_pinit: pmap has reserved page table page(s)"));
1513 * allocate the page directory page(s)
1515 for (i = 0; i < NPGPTD;) {
1516 m = vm_page_alloc(NULL, color++,
1517 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1526 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
1528 for (i = 0; i < NPGPTD; i++) {
1529 if ((ptdpg[i]->flags & PG_ZERO) == 0)
1530 bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
1533 mtx_lock_spin(&allpmaps_lock);
1534 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1535 mtx_unlock_spin(&allpmaps_lock);
1536 /* Wire in kernel global address entries. */
1537 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
1539 /* install self-referential address mapping entry(s) */
1540 for (i = 0; i < NPGPTD; i++) {
1541 pa = VM_PAGE_TO_PHYS(ptdpg[i]);
1542 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
1544 pmap->pm_pdpt[i] = pa | PG_V;
1548 pmap->pm_active = 0;
1549 TAILQ_INIT(&pmap->pm_pvchunk);
1550 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1556 * this routine is called if the page table page is not
1560 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1565 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1566 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1567 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1570 * Allocate a page table page.
1572 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1573 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1574 if (flags & M_WAITOK) {
1576 vm_page_unlock_queues();
1578 vm_page_lock_queues();
1583 * Indicate the need to retry. While waiting, the page table
1584 * page may have been allocated.
1588 if ((m->flags & PG_ZERO) == 0)
1592 * Map the pagetable page into the process address space, if
1593 * it isn't already there.
1596 pmap->pm_stats.resident_count++;
1598 ptepa = VM_PAGE_TO_PHYS(m);
1599 pmap->pm_pdir[ptepindex] =
1600 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1606 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1612 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1613 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1614 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1617 * Calculate pagetable page index
1619 ptepindex = va >> PDRSHIFT;
1622 * Get the page directory entry
1624 ptepa = pmap->pm_pdir[ptepindex];
1627 * This supports switching from a 4MB page to a
1630 if (ptepa & PG_PS) {
1631 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
1632 ptepa = pmap->pm_pdir[ptepindex];
1636 * If the page table page is mapped, we just increment the
1637 * hold count, and activate it.
1640 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
1644 * Here if the pte page isn't mapped, or if it has
1647 m = _pmap_allocpte(pmap, ptepindex, flags);
1648 if (m == NULL && (flags & M_WAITOK))
1655 /***************************************************
1656 * Pmap allocation/deallocation routines.
1657 ***************************************************/
1661 * Deal with a SMP shootdown of other users of the pmap that we are
1662 * trying to dispose of. This can be a bit hairy.
1664 static cpumask_t *lazymask;
1665 static u_int lazyptd;
1666 static volatile u_int lazywait;
1668 void pmap_lazyfix_action(void);
1671 pmap_lazyfix_action(void)
1673 cpumask_t mymask = PCPU_GET(cpumask);
1676 (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
1678 if (rcr3() == lazyptd)
1679 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1680 atomic_clear_int(lazymask, mymask);
1681 atomic_store_rel_int(&lazywait, 1);
1685 pmap_lazyfix_self(cpumask_t mymask)
1688 if (rcr3() == lazyptd)
1689 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1690 atomic_clear_int(lazymask, mymask);
1695 pmap_lazyfix(pmap_t pmap)
1697 cpumask_t mymask, mask;
1700 while ((mask = pmap->pm_active) != 0) {
1702 mask = mask & -mask; /* Find least significant set bit */
1703 mtx_lock_spin(&smp_ipi_mtx);
1705 lazyptd = vtophys(pmap->pm_pdpt);
1707 lazyptd = vtophys(pmap->pm_pdir);
1709 mymask = PCPU_GET(cpumask);
1710 if (mask == mymask) {
1711 lazymask = &pmap->pm_active;
1712 pmap_lazyfix_self(mymask);
1714 atomic_store_rel_int((u_int *)&lazymask,
1715 (u_int)&pmap->pm_active);
1716 atomic_store_rel_int(&lazywait, 0);
1717 ipi_selected(mask, IPI_LAZYPMAP);
1718 while (lazywait == 0) {
1724 mtx_unlock_spin(&smp_ipi_mtx);
1726 printf("pmap_lazyfix: spun for 50000000\n");
1733 * Cleaning up on uniprocessor is easy. For various reasons, we're
1734 * unlikely to have to even execute this code, including the fact
1735 * that the cleanup is deferred until the parent does a wait(2), which
1736 * means that another userland process has run.
1739 pmap_lazyfix(pmap_t pmap)
1743 cr3 = vtophys(pmap->pm_pdir);
1744 if (cr3 == rcr3()) {
1745 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1746 pmap->pm_active &= ~(PCPU_GET(cpumask));
1752 * Release any resources held by the given physical map.
1753 * Called when a pmap initialized by pmap_pinit is being released.
1754 * Should only be called if the map contains no valid mappings.
1757 pmap_release(pmap_t pmap)
1759 vm_page_t m, ptdpg[NPGPTD];
1762 KASSERT(pmap->pm_stats.resident_count == 0,
1763 ("pmap_release: pmap resident count %ld != 0",
1764 pmap->pm_stats.resident_count));
1765 KASSERT(pmap->pm_root == NULL,
1766 ("pmap_release: pmap has reserved page table page(s)"));
1769 mtx_lock_spin(&allpmaps_lock);
1770 LIST_REMOVE(pmap, pm_list);
1771 mtx_unlock_spin(&allpmaps_lock);
1773 for (i = 0; i < NPGPTD; i++)
1774 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] &
1777 bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
1778 sizeof(*pmap->pm_pdir));
1780 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
1782 for (i = 0; i < NPGPTD; i++) {
1785 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
1786 ("pmap_release: got wrong ptd page"));
1789 atomic_subtract_int(&cnt.v_wire_count, 1);
1790 vm_page_free_zero(m);
1792 PMAP_LOCK_DESTROY(pmap);
1796 kvm_size(SYSCTL_HANDLER_ARGS)
1798 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
1800 return sysctl_handle_long(oidp, &ksize, 0, req);
1802 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1803 0, 0, kvm_size, "IU", "Size of KVM");
1806 kvm_free(SYSCTL_HANDLER_ARGS)
1808 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1810 return sysctl_handle_long(oidp, &kfree, 0, req);
1812 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1813 0, 0, kvm_free, "IU", "Amount of KVM free");
1816 * grow the number of kernel page table entries, if needed
1819 pmap_growkernel(vm_offset_t addr)
1822 vm_paddr_t ptppaddr;
1827 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1828 if (kernel_vm_end == 0) {
1829 kernel_vm_end = KERNBASE;
1831 while (pdir_pde(PTD, kernel_vm_end)) {
1832 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1834 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1835 kernel_vm_end = kernel_map->max_offset;
1840 addr = roundup2(addr, PAGE_SIZE * NPTEPG);
1841 if (addr - 1 >= kernel_map->max_offset)
1842 addr = kernel_map->max_offset;
1843 while (kernel_vm_end < addr) {
1844 if (pdir_pde(PTD, kernel_vm_end)) {
1845 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1846 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1847 kernel_vm_end = kernel_map->max_offset;
1853 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT,
1854 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1857 panic("pmap_growkernel: no memory to grow kernel");
1861 if ((nkpg->flags & PG_ZERO) == 0)
1862 pmap_zero_page(nkpg);
1863 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1864 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1865 pdir_pde(PTD, kernel_vm_end) = newpdir;
1867 mtx_lock_spin(&allpmaps_lock);
1868 LIST_FOREACH(pmap, &allpmaps, pm_list) {
1869 pde = pmap_pde(pmap, kernel_vm_end);
1870 pde_store(pde, newpdir);
1872 mtx_unlock_spin(&allpmaps_lock);
1873 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1874 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1875 kernel_vm_end = kernel_map->max_offset;
1882 /***************************************************
1883 * page management routines.
1884 ***************************************************/
1886 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1887 CTASSERT(_NPCM == 11);
1889 static __inline struct pv_chunk *
1890 pv_to_chunk(pv_entry_t pv)
1893 return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
1896 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1898 #define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */
1899 #define PC_FREE10 0x0000fffful /* Free values for index 10 */
1901 static uint32_t pc_freemask[11] = {
1902 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1903 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1904 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1905 PC_FREE0_9, PC_FREE10
1908 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1909 "Current number of pv entries");
1912 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1914 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1915 "Current number of pv entry chunks");
1916 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1917 "Current number of pv entry chunks allocated");
1918 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1919 "Current number of pv entry chunks frees");
1920 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1921 "Number of times tried to get a chunk page but failed.");
1923 static long pv_entry_frees, pv_entry_allocs;
1924 static int pv_entry_spare;
1926 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1927 "Current number of pv entry frees");
1928 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1929 "Current number of pv entry allocs");
1930 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1931 "Current number of spare pv entries");
1933 static int pmap_collect_inactive, pmap_collect_active;
1935 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
1936 "Current number times pmap_collect called on inactive queue");
1937 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
1938 "Current number times pmap_collect called on active queue");
1942 * We are in a serious low memory condition. Resort to
1943 * drastic measures to free some pages so we can allocate
1944 * another pv entry chunk. This is normally called to
1945 * unmap inactive pages, and if necessary, active pages.
1948 pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
1950 struct md_page *pvh;
1953 pt_entry_t *pte, tpte;
1954 pv_entry_t next_pv, pv;
1959 TAILQ_FOREACH(m, &vpq->pl, pageq) {
1960 if (m->hold_count || m->busy)
1962 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
1965 /* Avoid deadlock and lock recursion. */
1966 if (pmap > locked_pmap)
1968 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
1970 pmap->pm_stats.resident_count--;
1971 pde = pmap_pde(pmap, va);
1972 KASSERT((*pde & PG_PS) == 0, ("pmap_collect: found"
1973 " a 4mpage in page %p's pv list", m));
1974 pte = pmap_pte_quick(pmap, va);
1975 tpte = pte_load_clear(pte);
1976 KASSERT((tpte & PG_W) == 0,
1977 ("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
1979 vm_page_flag_set(m, PG_REFERENCED);
1980 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
1983 pmap_unuse_pt(pmap, va, &free);
1984 pmap_invalidate_page(pmap, va);
1985 pmap_free_zero_pages(free);
1986 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1987 if (TAILQ_EMPTY(&m->md.pv_list)) {
1988 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
1989 if (TAILQ_EMPTY(&pvh->pv_list))
1990 vm_page_flag_clear(m, PG_WRITEABLE);
1992 free_pv_entry(pmap, pv);
1993 if (pmap != locked_pmap)
2002 * free the pv_entry back to the free list
2005 free_pv_entry(pmap_t pmap, pv_entry_t pv)
2008 struct pv_chunk *pc;
2009 int idx, field, bit;
2011 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2012 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2013 PV_STAT(pv_entry_frees++);
2014 PV_STAT(pv_entry_spare++);
2016 pc = pv_to_chunk(pv);
2017 idx = pv - &pc->pc_pventry[0];
2020 pc->pc_map[field] |= 1ul << bit;
2021 /* move to head of list */
2022 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2023 for (idx = 0; idx < _NPCM; idx++)
2024 if (pc->pc_map[idx] != pc_freemask[idx]) {
2025 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2028 PV_STAT(pv_entry_spare -= _NPCPV);
2029 PV_STAT(pc_chunk_count--);
2030 PV_STAT(pc_chunk_frees++);
2031 /* entire chunk is free, return it */
2032 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2033 pmap_qremove((vm_offset_t)pc, 1);
2034 vm_page_unwire(m, 0);
2036 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
2040 * get a new pv_entry, allocating a block from the system
2044 get_pv_entry(pmap_t pmap, int try)
2046 static const struct timeval printinterval = { 60, 0 };
2047 static struct timeval lastprint;
2048 static vm_pindex_t colour;
2049 struct vpgqueues *pq;
2052 struct pv_chunk *pc;
2055 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2056 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2057 PV_STAT(pv_entry_allocs++);
2059 if (pv_entry_count > pv_entry_high_water)
2060 if (ratecheck(&lastprint, &printinterval))
2061 printf("Approaching the limit on PV entries, consider "
2062 "increasing either the vm.pmap.shpgperproc or the "
2063 "vm.pmap.pv_entry_max tunable.\n");
2066 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2068 for (field = 0; field < _NPCM; field++) {
2069 if (pc->pc_map[field]) {
2070 bit = bsfl(pc->pc_map[field]);
2074 if (field < _NPCM) {
2075 pv = &pc->pc_pventry[field * 32 + bit];
2076 pc->pc_map[field] &= ~(1ul << bit);
2077 /* If this was the last item, move it to tail */
2078 for (field = 0; field < _NPCM; field++)
2079 if (pc->pc_map[field] != 0) {
2080 PV_STAT(pv_entry_spare--);
2081 return (pv); /* not full, return */
2083 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2084 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2085 PV_STAT(pv_entry_spare--);
2090 * Access to the ptelist "pv_vafree" is synchronized by the page
2091 * queues lock. If "pv_vafree" is currently non-empty, it will
2092 * remain non-empty until pmap_ptelist_alloc() completes.
2094 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
2095 &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
2096 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
2099 PV_STAT(pc_chunk_tryfail++);
2103 * Reclaim pv entries: At first, destroy mappings to
2104 * inactive pages. After that, if a pv chunk entry
2105 * is still needed, destroy mappings to active pages.
2108 PV_STAT(pmap_collect_inactive++);
2109 pq = &vm_page_queues[PQ_INACTIVE];
2110 } else if (pq == &vm_page_queues[PQ_INACTIVE]) {
2111 PV_STAT(pmap_collect_active++);
2112 pq = &vm_page_queues[PQ_ACTIVE];
2114 panic("get_pv_entry: increase vm.pmap.shpgperproc");
2115 pmap_collect(pmap, pq);
2118 PV_STAT(pc_chunk_count++);
2119 PV_STAT(pc_chunk_allocs++);
2121 pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
2122 pmap_qenter((vm_offset_t)pc, &m, 1);
2124 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */
2125 for (field = 1; field < _NPCM; field++)
2126 pc->pc_map[field] = pc_freemask[field];
2127 pv = &pc->pc_pventry[0];
2128 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2129 PV_STAT(pv_entry_spare += _NPCPV - 1);
2133 static __inline pv_entry_t
2134 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2138 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2139 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
2140 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2141 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
2149 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2151 struct md_page *pvh;
2153 vm_offset_t va_last;
2156 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2157 KASSERT((pa & PDRMASK) == 0,
2158 ("pmap_pv_demote_pde: pa is not 4mpage aligned"));
2161 * Transfer the 4mpage's pv entry for this mapping to the first
2164 pvh = pa_to_pvh(pa);
2165 va = trunc_4mpage(va);
2166 pv = pmap_pvh_remove(pvh, pmap, va);
2167 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
2168 m = PHYS_TO_VM_PAGE(pa);
2169 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2170 /* Instantiate the remaining NPTEPG - 1 pv entries. */
2171 va_last = va + NBPDR - PAGE_SIZE;
2174 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2175 ("pmap_pv_demote_pde: page %p is not managed", m));
2177 pmap_insert_entry(pmap, va, m);
2178 } while (va < va_last);
2182 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2184 struct md_page *pvh;
2186 vm_offset_t va_last;
2189 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2190 KASSERT((pa & PDRMASK) == 0,
2191 ("pmap_pv_promote_pde: pa is not 4mpage aligned"));
2194 * Transfer the first page's pv entry for this mapping to the
2195 * 4mpage's pv list. Aside from avoiding the cost of a call
2196 * to get_pv_entry(), a transfer avoids the possibility that
2197 * get_pv_entry() calls pmap_collect() and that pmap_collect()
2198 * removes one of the mappings that is being promoted.
2200 m = PHYS_TO_VM_PAGE(pa);
2201 va = trunc_4mpage(va);
2202 pv = pmap_pvh_remove(&m->md, pmap, va);
2203 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
2204 pvh = pa_to_pvh(pa);
2205 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
2206 /* Free the remaining NPTEPG - 1 pv entries. */
2207 va_last = va + NBPDR - PAGE_SIZE;
2211 pmap_pvh_free(&m->md, pmap, va);
2212 } while (va < va_last);
2216 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2220 pv = pmap_pvh_remove(pvh, pmap, va);
2221 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2222 free_pv_entry(pmap, pv);
2226 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
2228 struct md_page *pvh;
2230 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2231 pmap_pvh_free(&m->md, pmap, va);
2232 if (TAILQ_EMPTY(&m->md.pv_list)) {
2233 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2234 if (TAILQ_EMPTY(&pvh->pv_list))
2235 vm_page_flag_clear(m, PG_WRITEABLE);
2240 * Create a pv entry for page at pa for
2244 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2248 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2249 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2250 pv = get_pv_entry(pmap, FALSE);
2252 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2256 * Conditionally create a pv entry.
2259 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2263 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2264 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2265 if (pv_entry_count < pv_entry_high_water &&
2266 (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2268 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2275 * Create the pv entries for each of the pages within a superpage.
2278 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2280 struct md_page *pvh;
2283 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2284 if (pv_entry_count < pv_entry_high_water &&
2285 (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2287 pvh = pa_to_pvh(pa);
2288 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
2295 * Fills a page table page with mappings to consecutive physical pages.
2298 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
2302 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
2304 newpte += PAGE_SIZE;
2309 * Tries to demote a 2- or 4MB page mapping. If demotion fails, the
2310 * 2- or 4MB page mapping is invalidated.
2313 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2315 pd_entry_t newpde, oldpde;
2316 pmap_t allpmaps_entry;
2317 pt_entry_t *firstpte, newpte;
2319 vm_page_t free, mpte;
2321 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2323 KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
2324 ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
2325 mpte = pmap_lookup_pt_page(pmap, va);
2327 pmap_remove_pt_page(pmap, mpte);
2329 KASSERT((oldpde & PG_W) == 0,
2330 ("pmap_demote_pde: page table page for a wired mapping"
2334 * Invalidate the 2- or 4MB page mapping and return
2335 * "failure" if the mapping was never accessed or the
2336 * allocation of the new page table page fails.
2338 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
2339 va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
2340 VM_ALLOC_WIRED)) == NULL) {
2342 pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
2343 pmap_invalidate_page(pmap, trunc_4mpage(va));
2344 pmap_free_zero_pages(free);
2345 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
2346 " in pmap %p", va, pmap);
2349 if (va < VM_MAXUSER_ADDRESS)
2350 pmap->pm_stats.resident_count++;
2352 mptepa = VM_PAGE_TO_PHYS(mpte);
2355 * Temporarily map the page table page (mpte) into the kernel's
2356 * address space at either PADDR1 or PADDR2.
2358 if (curthread->td_pinned > 0 && mtx_owned(&vm_page_queue_mtx)) {
2359 if ((*PMAP1 & PG_FRAME) != mptepa) {
2360 *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
2362 PMAP1cpu = PCPU_GET(cpuid);
2368 if (PMAP1cpu != PCPU_GET(cpuid)) {
2369 PMAP1cpu = PCPU_GET(cpuid);
2377 mtx_lock(&PMAP2mutex);
2378 if ((*PMAP2 & PG_FRAME) != mptepa) {
2379 *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M;
2380 pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
2384 newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
2385 KASSERT((oldpde & PG_A) != 0,
2386 ("pmap_demote_pde: oldpde is missing PG_A"));
2387 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
2388 ("pmap_demote_pde: oldpde is missing PG_M"));
2389 newpte = oldpde & ~PG_PS;
2390 if ((newpte & PG_PDE_PAT) != 0)
2391 newpte ^= PG_PDE_PAT | PG_PTE_PAT;
2394 * If the page table page is new, initialize it.
2396 if (mpte->wire_count == 1) {
2397 mpte->wire_count = NPTEPG;
2398 pmap_fill_ptp(firstpte, newpte);
2400 KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
2401 ("pmap_demote_pde: firstpte and newpte map different physical"
2405 * If the mapping has changed attributes, update the page table
2408 if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
2409 pmap_fill_ptp(firstpte, newpte);
2412 * Demote the mapping. This pmap is locked. The old PDE has
2413 * PG_A set. If the old PDE has PG_RW set, it also has PG_M
2414 * set. Thus, there is no danger of a race with another
2415 * processor changing the setting of PG_A and/or PG_M between
2416 * the read above and the store below.
2418 if (pmap == kernel_pmap) {
2420 * A harmless race exists between this loop and the bcopy()
2421 * in pmap_pinit() that initializes the kernel segment of
2422 * the new page table. Specifically, that bcopy() may copy
2423 * the new PDE from the PTD, which is first in allpmaps, to
2424 * the new page table before this loop updates that new
2427 mtx_lock_spin(&allpmaps_lock);
2428 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
2429 pde = pmap_pde(allpmaps_entry, va);
2430 KASSERT(*pde == newpde || (*pde & PG_PTE_PROMOTE) ==
2431 (oldpde & PG_PTE_PROMOTE),
2432 ("pmap_demote_pde: pde was %#jx, expected %#jx",
2433 (uintmax_t)*pde, (uintmax_t)oldpde));
2434 pde_store(pde, newpde);
2436 mtx_unlock_spin(&allpmaps_lock);
2438 pde_store(pde, newpde);
2439 if (firstpte == PADDR2)
2440 mtx_unlock(&PMAP2mutex);
2443 * Invalidate the recursive mapping of the page table page.
2445 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
2448 * Demote the pv entry. This depends on the earlier demotion
2449 * of the mapping. Specifically, the (re)creation of a per-
2450 * page pv entry might trigger the execution of pmap_collect(),
2451 * which might reclaim a newly (re)created per-page pv entry
2452 * and destroy the associated mapping. In order to destroy
2453 * the mapping, the PDE must have already changed from mapping
2454 * the 2mpage to referencing the page table page.
2456 if ((oldpde & PG_MANAGED) != 0)
2457 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
2459 pmap_pde_demotions++;
2460 CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
2461 " in pmap %p", va, pmap);
2466 * pmap_remove_pde: do the things to unmap a superpage in a process
2469 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
2472 struct md_page *pvh;
2474 vm_offset_t eva, va;
2477 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2478 KASSERT((sva & PDRMASK) == 0,
2479 ("pmap_remove_pde: sva is not 4mpage aligned"));
2480 oldpde = pte_load_clear(pdq);
2482 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
2485 * Machines that don't support invlpg, also don't support
2489 pmap_invalidate_page(kernel_pmap, sva);
2490 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2491 if (oldpde & PG_MANAGED) {
2492 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
2493 pmap_pvh_free(pvh, pmap, sva);
2495 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
2496 va < eva; va += PAGE_SIZE, m++) {
2497 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
2500 vm_page_flag_set(m, PG_REFERENCED);
2501 if (TAILQ_EMPTY(&m->md.pv_list) &&
2502 TAILQ_EMPTY(&pvh->pv_list))
2503 vm_page_flag_clear(m, PG_WRITEABLE);
2506 if (pmap == kernel_pmap) {
2507 if (!pmap_demote_pde(pmap, pdq, sva))
2508 panic("pmap_remove_pde: failed demotion");
2510 mpte = pmap_lookup_pt_page(pmap, sva);
2512 pmap_remove_pt_page(pmap, mpte);
2513 pmap->pm_stats.resident_count--;
2514 KASSERT(mpte->wire_count == NPTEPG,
2515 ("pmap_remove_pde: pte page wire count error"));
2516 mpte->wire_count = 0;
2517 pmap_add_delayed_free_list(mpte, free, FALSE);
2518 atomic_subtract_int(&cnt.v_wire_count, 1);
2524 * pmap_remove_pte: do the things to unmap a page in a process
2527 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
2532 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2533 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2534 oldpte = pte_load_clear(ptq);
2536 pmap->pm_stats.wired_count -= 1;
2538 * Machines that don't support invlpg, also don't support
2542 pmap_invalidate_page(kernel_pmap, va);
2543 pmap->pm_stats.resident_count -= 1;
2544 if (oldpte & PG_MANAGED) {
2545 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
2546 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2549 vm_page_flag_set(m, PG_REFERENCED);
2550 pmap_remove_entry(pmap, m, va);
2552 return (pmap_unuse_pt(pmap, va, free));
2556 * Remove a single page from a process address space
2559 pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
2563 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2564 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
2565 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2566 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
2568 pmap_remove_pte(pmap, pte, va, free);
2569 pmap_invalidate_page(pmap, va);
2573 * Remove the given range of addresses from the specified map.
2575 * It is assumed that the start and end are properly
2576 * rounded to the page size.
2579 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2584 vm_page_t free = NULL;
2588 * Perform an unsynchronized read. This is, however, safe.
2590 if (pmap->pm_stats.resident_count == 0)
2595 vm_page_lock_queues();
2600 * special handling of removing one page. a very
2601 * common operation and easy to short circuit some
2604 if ((sva + PAGE_SIZE == eva) &&
2605 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
2606 pmap_remove_page(pmap, sva, &free);
2610 for (; sva < eva; sva = pdnxt) {
2614 * Calculate index for next page table.
2616 pdnxt = (sva + NBPDR) & ~PDRMASK;
2619 if (pmap->pm_stats.resident_count == 0)
2622 pdirindex = sva >> PDRSHIFT;
2623 ptpaddr = pmap->pm_pdir[pdirindex];
2626 * Weed out invalid mappings. Note: we assume that the page
2627 * directory table is always allocated, and in kernel virtual.
2633 * Check for large page.
2635 if ((ptpaddr & PG_PS) != 0) {
2637 * Are we removing the entire large page? If not,
2638 * demote the mapping and fall through.
2640 if (sva + NBPDR == pdnxt && eva >= pdnxt) {
2642 * The TLB entry for a PG_G mapping is
2643 * invalidated by pmap_remove_pde().
2645 if ((ptpaddr & PG_G) == 0)
2647 pmap_remove_pde(pmap,
2648 &pmap->pm_pdir[pdirindex], sva, &free);
2650 } else if (!pmap_demote_pde(pmap,
2651 &pmap->pm_pdir[pdirindex], sva)) {
2652 /* The large page mapping was destroyed. */
2658 * Limit our scan to either the end of the va represented
2659 * by the current page table page, or to the end of the
2660 * range being removed.
2665 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
2671 * The TLB entry for a PG_G mapping is invalidated
2672 * by pmap_remove_pte().
2674 if ((*pte & PG_G) == 0)
2676 if (pmap_remove_pte(pmap, pte, sva, &free))
2683 pmap_invalidate_all(pmap);
2684 vm_page_unlock_queues();
2686 pmap_free_zero_pages(free);
2690 * Routine: pmap_remove_all
2692 * Removes this physical page from
2693 * all physical maps in which it resides.
2694 * Reflects back modify bits to the pager.
2697 * Original versions of this routine were very
2698 * inefficient because they iteratively called
2699 * pmap_remove (slow...)
2703 pmap_remove_all(vm_page_t m)
2705 struct md_page *pvh;
2708 pt_entry_t *pte, tpte;
2713 KASSERT((m->flags & PG_FICTITIOUS) == 0,
2714 ("pmap_remove_all: page %p is fictitious", m));
2715 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2717 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2718 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2722 pde = pmap_pde(pmap, va);
2723 (void)pmap_demote_pde(pmap, pde, va);
2726 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2729 pmap->pm_stats.resident_count--;
2730 pde = pmap_pde(pmap, pv->pv_va);
2731 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
2732 " a 4mpage in page %p's pv list", m));
2733 pte = pmap_pte_quick(pmap, pv->pv_va);
2734 tpte = pte_load_clear(pte);
2736 pmap->pm_stats.wired_count--;
2738 vm_page_flag_set(m, PG_REFERENCED);
2741 * Update the vm_page_t clean and reference bits.
2743 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2746 pmap_unuse_pt(pmap, pv->pv_va, &free);
2747 pmap_invalidate_page(pmap, pv->pv_va);
2748 pmap_free_zero_pages(free);
2749 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2750 free_pv_entry(pmap, pv);
2753 vm_page_flag_clear(m, PG_WRITEABLE);
2758 * pmap_protect_pde: do the things to protect a 4mpage in a process
2761 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
2763 pd_entry_t newpde, oldpde;
2764 vm_offset_t eva, va;
2766 boolean_t anychanged;
2768 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2769 KASSERT((sva & PDRMASK) == 0,
2770 ("pmap_protect_pde: sva is not 4mpage aligned"));
2773 oldpde = newpde = *pde;
2774 if (oldpde & PG_MANAGED) {
2776 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
2777 va < eva; va += PAGE_SIZE, m++) {
2779 * In contrast to the analogous operation on a 4KB page
2780 * mapping, the mapping's PG_A flag is not cleared and
2781 * the page's PG_REFERENCED flag is not set. The
2782 * reason is that pmap_demote_pde() expects that a 2/4MB
2783 * page mapping with a stored page table page has PG_A
2786 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
2790 if ((prot & VM_PROT_WRITE) == 0)
2791 newpde &= ~(PG_RW | PG_M);
2793 if ((prot & VM_PROT_EXECUTE) == 0)
2796 if (newpde != oldpde) {
2797 if (!pde_cmpset(pde, oldpde, newpde))
2800 pmap_invalidate_page(pmap, sva);
2804 return (anychanged);
2808 * Set the physical protection on the
2809 * specified range of this map as requested.
2812 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2819 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2820 pmap_remove(pmap, sva, eva);
2825 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
2826 (VM_PROT_WRITE|VM_PROT_EXECUTE))
2829 if (prot & VM_PROT_WRITE)
2835 vm_page_lock_queues();
2838 for (; sva < eva; sva = pdnxt) {
2839 pt_entry_t obits, pbits;
2842 pdnxt = (sva + NBPDR) & ~PDRMASK;
2846 pdirindex = sva >> PDRSHIFT;
2847 ptpaddr = pmap->pm_pdir[pdirindex];
2850 * Weed out invalid mappings. Note: we assume that the page
2851 * directory table is always allocated, and in kernel virtual.
2857 * Check for large page.
2859 if ((ptpaddr & PG_PS) != 0) {
2861 * Are we protecting the entire large page? If not,
2862 * demote the mapping and fall through.
2864 if (sva + NBPDR == pdnxt && eva >= pdnxt) {
2866 * The TLB entry for a PG_G mapping is
2867 * invalidated by pmap_protect_pde().
2869 if (pmap_protect_pde(pmap,
2870 &pmap->pm_pdir[pdirindex], sva, prot))
2873 } else if (!pmap_demote_pde(pmap,
2874 &pmap->pm_pdir[pdirindex], sva)) {
2875 /* The large page mapping was destroyed. */
2883 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
2889 * Regardless of whether a pte is 32 or 64 bits in
2890 * size, PG_RW, PG_A, and PG_M are among the least
2891 * significant 32 bits.
2893 obits = pbits = *pte;
2894 if ((pbits & PG_V) == 0)
2896 if (pbits & PG_MANAGED) {
2899 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
2900 vm_page_flag_set(m, PG_REFERENCED);
2903 if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2905 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
2910 if ((prot & VM_PROT_WRITE) == 0)
2911 pbits &= ~(PG_RW | PG_M);
2913 if ((prot & VM_PROT_EXECUTE) == 0)
2917 if (pbits != obits) {
2919 if (!atomic_cmpset_64(pte, obits, pbits))
2922 if (!atomic_cmpset_int((u_int *)pte, obits,
2927 pmap_invalidate_page(pmap, sva);
2935 pmap_invalidate_all(pmap);
2936 vm_page_unlock_queues();
2941 * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are
2942 * within a single page table page (PTP) to a single 2- or 4MB page mapping.
2943 * For promotion to occur, two conditions must be met: (1) the 4KB page
2944 * mappings must map aligned, contiguous physical memory and (2) the 4KB page
2945 * mappings must have identical characteristics.
2947 * Managed (PG_MANAGED) mappings within the kernel address space are not
2948 * promoted. The reason is that kernel PDEs are replicated in each pmap but
2949 * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel
2953 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2956 pmap_t allpmaps_entry;
2957 pt_entry_t *firstpte, oldpte, pa, *pte;
2958 vm_offset_t oldpteva;
2961 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2964 * Examine the first PTE in the specified PTP. Abort if this PTE is
2965 * either invalid, unused, or does not map the first 4KB physical page
2966 * within a 2- or 4MB page.
2968 firstpte = vtopte(trunc_4mpage(va));
2971 if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
2972 pmap_pde_p_failures++;
2973 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
2974 " in pmap %p", va, pmap);
2977 if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
2978 pmap_pde_p_failures++;
2979 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
2980 " in pmap %p", va, pmap);
2983 if ((newpde & (PG_M | PG_RW)) == PG_RW) {
2985 * When PG_M is already clear, PG_RW can be cleared without
2986 * a TLB invalidation.
2988 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde &
2995 * Examine each of the other PTEs in the specified PTP. Abort if this
2996 * PTE maps an unexpected 4KB physical page or does not have identical
2997 * characteristics to the first PTE.
2999 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
3000 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
3003 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
3004 pmap_pde_p_failures++;
3005 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3006 " in pmap %p", va, pmap);
3009 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
3011 * When PG_M is already clear, PG_RW can be cleared
3012 * without a TLB invalidation.
3014 if (!atomic_cmpset_int((u_int *)pte, oldpte,
3018 oldpteva = (oldpte & PG_FRAME & PDRMASK) |
3020 CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x"
3021 " in pmap %p", oldpteva, pmap);
3023 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
3024 pmap_pde_p_failures++;
3025 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3026 " in pmap %p", va, pmap);
3033 * Save the page table page in its current state until the PDE
3034 * mapping the superpage is demoted by pmap_demote_pde() or
3035 * destroyed by pmap_remove_pde().
3037 mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
3038 KASSERT(mpte >= vm_page_array &&
3039 mpte < &vm_page_array[vm_page_array_size],
3040 ("pmap_promote_pde: page table page is out of range"));
3041 KASSERT(mpte->pindex == va >> PDRSHIFT,
3042 ("pmap_promote_pde: page table page's pindex is wrong"));
3043 pmap_insert_pt_page(pmap, mpte);
3046 * Promote the pv entries.
3048 if ((newpde & PG_MANAGED) != 0)
3049 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
3052 * Propagate the PAT index to its proper position.
3054 if ((newpde & PG_PTE_PAT) != 0)
3055 newpde ^= PG_PDE_PAT | PG_PTE_PAT;
3058 * Map the superpage.
3060 if (pmap == kernel_pmap) {
3061 mtx_lock_spin(&allpmaps_lock);
3062 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
3063 pde = pmap_pde(allpmaps_entry, va);
3064 pde_store(pde, PG_PS | newpde);
3066 mtx_unlock_spin(&allpmaps_lock);
3068 pde_store(pde, PG_PS | newpde);
3070 pmap_pde_promotions++;
3071 CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
3072 " in pmap %p", va, pmap);
3076 * Insert the given physical page (p) at
3077 * the specified virtual address (v) in the
3078 * target physical map with the protection requested.
3080 * If specified, the page will be wired down, meaning
3081 * that the related pte can not be reclaimed.
3083 * NB: This is the only routine which MAY NOT lazy-evaluate
3084 * or lose information. That is, this routine must actually
3085 * insert this page into the given map NOW.
3088 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
3089 vm_prot_t prot, boolean_t wired)
3095 pt_entry_t origpte, newpte;
3099 va = trunc_page(va);
3100 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
3101 KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
3102 ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va));
3106 vm_page_lock_queues();
3111 * In the case that a page table page is not
3112 * resident, we are creating it here.
3114 if (va < VM_MAXUSER_ADDRESS) {
3115 mpte = pmap_allocpte(pmap, va, M_WAITOK);
3118 pde = pmap_pde(pmap, va);
3119 if ((*pde & PG_PS) != 0)
3120 panic("pmap_enter: attempted pmap_enter on 4MB page");
3121 pte = pmap_pte_quick(pmap, va);
3124 * Page Directory table entry not valid, we need a new PT page
3127 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x",
3128 (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
3131 pa = VM_PAGE_TO_PHYS(m);
3134 opa = origpte & PG_FRAME;
3137 * Mapping has not changed, must be protection or wiring change.
3139 if (origpte && (opa == pa)) {
3141 * Wiring change, just update stats. We don't worry about
3142 * wiring PT pages as they remain resident as long as there
3143 * are valid mappings in them. Hence, if a user page is wired,
3144 * the PT page will be also.
3146 if (wired && ((origpte & PG_W) == 0))
3147 pmap->pm_stats.wired_count++;
3148 else if (!wired && (origpte & PG_W))
3149 pmap->pm_stats.wired_count--;
3152 * Remove extra pte reference
3158 * We might be turning off write access to the page,
3159 * so we go ahead and sense modify status.
3161 if (origpte & PG_MANAGED) {
3168 * Mapping has changed, invalidate old range and fall through to
3169 * handle validating new mapping.
3173 pmap->pm_stats.wired_count--;
3174 if (origpte & PG_MANAGED) {
3175 om = PHYS_TO_VM_PAGE(opa);
3176 pmap_remove_entry(pmap, om, va);
3180 KASSERT(mpte->wire_count > 0,
3181 ("pmap_enter: missing reference to page table page,"
3185 pmap->pm_stats.resident_count++;
3188 * Enter on the PV list if part of our managed memory.
3190 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
3191 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
3192 ("pmap_enter: managed mapping within the clean submap"));
3193 pmap_insert_entry(pmap, va, m);
3198 * Increment counters
3201 pmap->pm_stats.wired_count++;
3205 * Now validate mapping with desired protection/wiring.
3207 newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
3208 if ((prot & VM_PROT_WRITE) != 0) {
3210 vm_page_flag_set(m, PG_WRITEABLE);
3213 if ((prot & VM_PROT_EXECUTE) == 0)
3218 if (va < VM_MAXUSER_ADDRESS)
3220 if (pmap == kernel_pmap)
3224 * if the mapping or permission bits are different, we need
3225 * to update the pte.
3227 if ((origpte & ~(PG_M|PG_A)) != newpte) {
3229 if ((access & VM_PROT_WRITE) != 0)
3231 if (origpte & PG_V) {
3233 origpte = pte_load_store(pte, newpte);
3234 if (origpte & PG_A) {
3235 if (origpte & PG_MANAGED)
3236 vm_page_flag_set(om, PG_REFERENCED);
3237 if (opa != VM_PAGE_TO_PHYS(m))
3240 if ((origpte & PG_NX) == 0 &&
3241 (newpte & PG_NX) != 0)
3245 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
3246 if ((origpte & PG_MANAGED) != 0)
3248 if ((prot & VM_PROT_WRITE) == 0)
3252 pmap_invalidate_page(pmap, va);
3254 pte_store(pte, newpte);
3258 * If both the page table page and the reservation are fully
3259 * populated, then attempt promotion.
3261 if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
3262 pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
3263 pmap_promote_pde(pmap, pde, va);
3266 vm_page_unlock_queues();
3271 * Tries to create a 2- or 4MB page mapping. Returns TRUE if successful and
3272 * FALSE otherwise. Fails if (1) a page table page cannot be allocated without
3273 * blocking, (2) a mapping already exists at the specified virtual address, or
3274 * (3) a pv entry cannot be allocated without reclaiming another pv entry.
3277 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3279 pd_entry_t *pde, newpde;
3281 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3282 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3283 pde = pmap_pde(pmap, va);
3285 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3286 " in pmap %p", va, pmap);
3289 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
3291 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
3292 newpde |= PG_MANAGED;
3295 * Abort this mapping if its PV entry could not be created.
3297 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
3298 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3299 " in pmap %p", va, pmap);
3304 if ((prot & VM_PROT_EXECUTE) == 0)
3307 if (va < VM_MAXUSER_ADDRESS)
3311 * Increment counters.
3313 pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
3316 * Map the superpage.
3318 pde_store(pde, newpde);
3320 pmap_pde_mappings++;
3321 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
3322 " in pmap %p", va, pmap);
3327 * Maps a sequence of resident pages belonging to the same object.
3328 * The sequence begins with the given page m_start. This page is
3329 * mapped at the given virtual address start. Each subsequent page is
3330 * mapped at a virtual address that is offset from start by the same
3331 * amount as the page is offset from m_start within the object. The
3332 * last page in the sequence is the page with the largest offset from
3333 * m_start that can be mapped at a virtual address less than the given
3334 * virtual address end. Not every virtual page between start and end
3335 * is mapped; only those for which a resident page exists with the
3336 * corresponding offset from m_start are mapped.
3339 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3340 vm_page_t m_start, vm_prot_t prot)
3344 vm_pindex_t diff, psize;
3346 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
3347 psize = atop(end - start);
3351 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3352 va = start + ptoa(diff);
3353 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
3354 (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
3355 pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
3356 pmap_enter_pde(pmap, va, m, prot))
3357 m = &m[NBPDR / PAGE_SIZE - 1];
3359 mpte = pmap_enter_quick_locked(pmap, va, m, prot,
3361 m = TAILQ_NEXT(m, listq);
3367 * this code makes some *MAJOR* assumptions:
3368 * 1. Current pmap & pmap exists.
3371 * 4. No page table pages.
3372 * but is *MUCH* faster than pmap_enter...
3376 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3380 (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
3385 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3386 vm_prot_t prot, vm_page_t mpte)
3392 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3393 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
3394 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3395 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3396 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3399 * In the case that a page table page is not
3400 * resident, we are creating it here.
3402 if (va < VM_MAXUSER_ADDRESS) {
3407 * Calculate pagetable page index
3409 ptepindex = va >> PDRSHIFT;
3410 if (mpte && (mpte->pindex == ptepindex)) {
3414 * Get the page directory entry
3416 ptepa = pmap->pm_pdir[ptepindex];
3419 * If the page table page is mapped, we just increment
3420 * the hold count, and activate it.
3425 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
3428 mpte = _pmap_allocpte(pmap, ptepindex,
3439 * This call to vtopte makes the assumption that we are
3440 * entering the page into the current pmap. In order to support
3441 * quick entry into any pmap, one would likely use pmap_pte_quick.
3442 * But that isn't as quick as vtopte.
3454 * Enter on the PV list if part of our managed memory.
3456 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
3457 !pmap_try_insert_pv_entry(pmap, va, m)) {
3460 if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
3461 pmap_invalidate_page(pmap, va);
3462 pmap_free_zero_pages(free);
3471 * Increment counters
3473 pmap->pm_stats.resident_count++;
3475 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
3477 if ((prot & VM_PROT_EXECUTE) == 0)
3482 * Now validate mapping with RO protection
3484 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
3485 pte_store(pte, pa | PG_V | PG_U);
3487 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
3492 * Make a temporary mapping for a physical address. This is only intended
3493 * to be used for panic dumps.
3496 pmap_kenter_temporary(vm_paddr_t pa, int i)
3500 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
3501 pmap_kenter(va, pa);
3503 return ((void *)crashdumpmap);
3507 * This code maps large physical mmap regions into the
3508 * processor address space. Note that some shortcuts
3509 * are taken, but the code works.
3512 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3513 vm_pindex_t pindex, vm_size_t size)
3516 vm_paddr_t pa, ptepa;
3520 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
3521 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3522 ("pmap_object_init_pt: non-device object"));
3524 (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
3525 if (!vm_object_populate(object, pindex, pindex + atop(size)))
3527 p = vm_page_lookup(object, pindex);
3528 KASSERT(p->valid == VM_PAGE_BITS_ALL,
3529 ("pmap_object_init_pt: invalid page %p", p));
3530 pat_mode = p->md.pat_mode;
3533 * Abort the mapping if the first page is not physically
3534 * aligned to a 2/4MB page boundary.
3536 ptepa = VM_PAGE_TO_PHYS(p);
3537 if (ptepa & (NBPDR - 1))
3541 * Skip the first page. Abort the mapping if the rest of
3542 * the pages are not physically contiguous or have differing
3543 * memory attributes.
3545 p = TAILQ_NEXT(p, listq);
3546 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
3548 KASSERT(p->valid == VM_PAGE_BITS_ALL,
3549 ("pmap_object_init_pt: invalid page %p", p));
3550 if (pa != VM_PAGE_TO_PHYS(p) ||
3551 pat_mode != p->md.pat_mode)
3553 p = TAILQ_NEXT(p, listq);
3557 * Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and
3558 * "size" is a multiple of 2/4M, adding the PAT setting to
3559 * "pa" will not affect the termination of this loop.
3562 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
3563 size; pa += NBPDR) {
3564 pde = pmap_pde(pmap, addr);
3566 pde_store(pde, pa | PG_PS | PG_M | PG_A |
3567 PG_U | PG_RW | PG_V);
3568 pmap->pm_stats.resident_count += NBPDR /
3570 pmap_pde_mappings++;
3572 /* Else continue on if the PDE is already valid. */
3580 * Routine: pmap_change_wiring
3581 * Function: Change the wiring attribute for a map/virtual-address
3583 * In/out conditions:
3584 * The mapping must already exist in the pmap.
3587 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
3591 boolean_t are_queues_locked;
3593 are_queues_locked = FALSE;
3596 pde = pmap_pde(pmap, va);
3597 if ((*pde & PG_PS) != 0) {
3598 if (!wired != ((*pde & PG_W) == 0)) {
3599 if (!are_queues_locked) {
3600 are_queues_locked = TRUE;
3601 if (!mtx_trylock(&vm_page_queue_mtx)) {
3603 vm_page_lock_queues();
3607 if (!pmap_demote_pde(pmap, pde, va))
3608 panic("pmap_change_wiring: demotion failed");
3612 pte = pmap_pte(pmap, va);
3614 if (wired && !pmap_pte_w(pte))
3615 pmap->pm_stats.wired_count++;
3616 else if (!wired && pmap_pte_w(pte))
3617 pmap->pm_stats.wired_count--;
3620 * Wiring is not a hardware characteristic so there is no need to
3623 pmap_pte_set_w(pte, wired);
3624 pmap_pte_release(pte);
3626 if (are_queues_locked)
3627 vm_page_unlock_queues();
3634 * Copy the range specified by src_addr/len
3635 * from the source map to the range dst_addr/len
3636 * in the destination map.
3638 * This routine is only advisory and need not do anything.
3642 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
3643 vm_offset_t src_addr)
3647 vm_offset_t end_addr = src_addr + len;
3650 if (dst_addr != src_addr)
3653 if (!pmap_is_current(src_pmap))
3656 vm_page_lock_queues();
3657 if (dst_pmap < src_pmap) {
3658 PMAP_LOCK(dst_pmap);
3659 PMAP_LOCK(src_pmap);
3661 PMAP_LOCK(src_pmap);
3662 PMAP_LOCK(dst_pmap);
3665 for (addr = src_addr; addr < end_addr; addr = pdnxt) {
3666 pt_entry_t *src_pte, *dst_pte;
3667 vm_page_t dstmpte, srcmpte;
3668 pd_entry_t srcptepaddr;
3671 KASSERT(addr < UPT_MIN_ADDRESS,
3672 ("pmap_copy: invalid to pmap_copy page tables"));
3674 pdnxt = (addr + NBPDR) & ~PDRMASK;
3677 ptepindex = addr >> PDRSHIFT;
3679 srcptepaddr = src_pmap->pm_pdir[ptepindex];
3680 if (srcptepaddr == 0)
3683 if (srcptepaddr & PG_PS) {
3684 if (dst_pmap->pm_pdir[ptepindex] == 0 &&
3685 ((srcptepaddr & PG_MANAGED) == 0 ||
3686 pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
3688 dst_pmap->pm_pdir[ptepindex] = srcptepaddr &
3690 dst_pmap->pm_stats.resident_count +=
3696 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
3697 KASSERT(srcmpte->wire_count > 0,
3698 ("pmap_copy: source page table page is unused"));
3700 if (pdnxt > end_addr)
3703 src_pte = vtopte(addr);
3704 while (addr < pdnxt) {
3708 * we only virtual copy managed pages
3710 if ((ptetemp & PG_MANAGED) != 0) {
3711 dstmpte = pmap_allocpte(dst_pmap, addr,
3713 if (dstmpte == NULL)
3715 dst_pte = pmap_pte_quick(dst_pmap, addr);
3716 if (*dst_pte == 0 &&
3717 pmap_try_insert_pv_entry(dst_pmap, addr,
3718 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
3720 * Clear the wired, modified, and
3721 * accessed (referenced) bits
3724 *dst_pte = ptetemp & ~(PG_W | PG_M |
3726 dst_pmap->pm_stats.resident_count++;
3729 if (pmap_unwire_pte_hold(dst_pmap,
3731 pmap_invalidate_page(dst_pmap,
3733 pmap_free_zero_pages(free);
3737 if (dstmpte->wire_count >= srcmpte->wire_count)
3746 vm_page_unlock_queues();
3747 PMAP_UNLOCK(src_pmap);
3748 PMAP_UNLOCK(dst_pmap);
3751 static __inline void
3752 pagezero(void *page)
3754 #if defined(I686_CPU)
3755 if (cpu_class == CPUCLASS_686) {
3756 #if defined(CPU_ENABLE_SSE)
3757 if (cpu_feature & CPUID_SSE2)
3758 sse2_pagezero(page);
3761 i686_pagezero(page);
3764 bzero(page, PAGE_SIZE);
3768 * pmap_zero_page zeros the specified hardware page by mapping
3769 * the page into KVM and using bzero to clear its contents.
3772 pmap_zero_page(vm_page_t m)
3774 struct sysmaps *sysmaps;
3776 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
3777 mtx_lock(&sysmaps->lock);
3778 if (*sysmaps->CMAP2)
3779 panic("pmap_zero_page: CMAP2 busy");
3781 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
3782 pmap_cache_bits(m->md.pat_mode, 0);
3783 invlcaddr(sysmaps->CADDR2);
3784 pagezero(sysmaps->CADDR2);
3785 *sysmaps->CMAP2 = 0;
3787 mtx_unlock(&sysmaps->lock);
3791 * pmap_zero_page_area zeros the specified hardware page by mapping
3792 * the page into KVM and using bzero to clear its contents.
3794 * off and size may not cover an area beyond a single hardware page.
3797 pmap_zero_page_area(vm_page_t m, int off, int size)
3799 struct sysmaps *sysmaps;
3801 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
3802 mtx_lock(&sysmaps->lock);
3803 if (*sysmaps->CMAP2)
3804 panic("pmap_zero_page_area: CMAP2 busy");
3806 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
3807 pmap_cache_bits(m->md.pat_mode, 0);
3808 invlcaddr(sysmaps->CADDR2);
3809 if (off == 0 && size == PAGE_SIZE)
3810 pagezero(sysmaps->CADDR2);
3812 bzero((char *)sysmaps->CADDR2 + off, size);
3813 *sysmaps->CMAP2 = 0;
3815 mtx_unlock(&sysmaps->lock);
3819 * pmap_zero_page_idle zeros the specified hardware page by mapping
3820 * the page into KVM and using bzero to clear its contents. This
3821 * is intended to be called from the vm_pagezero process only and
3825 pmap_zero_page_idle(vm_page_t m)
3829 panic("pmap_zero_page_idle: CMAP3 busy");
3831 *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
3832 pmap_cache_bits(m->md.pat_mode, 0);
3840 * pmap_copy_page copies the specified (machine independent)
3841 * page by mapping the page into virtual memory and using
3842 * bcopy to copy the page, one machine dependent page at a
3846 pmap_copy_page(vm_page_t src, vm_page_t dst)
3848 struct sysmaps *sysmaps;
3850 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
3851 mtx_lock(&sysmaps->lock);
3852 if (*sysmaps->CMAP1)
3853 panic("pmap_copy_page: CMAP1 busy");
3854 if (*sysmaps->CMAP2)
3855 panic("pmap_copy_page: CMAP2 busy");
3857 invlpg((u_int)sysmaps->CADDR1);
3858 invlpg((u_int)sysmaps->CADDR2);
3859 *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
3860 pmap_cache_bits(src->md.pat_mode, 0);
3861 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
3862 pmap_cache_bits(dst->md.pat_mode, 0);
3863 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
3864 *sysmaps->CMAP1 = 0;
3865 *sysmaps->CMAP2 = 0;
3867 mtx_unlock(&sysmaps->lock);
3871 * Returns true if the pmap's pv is one of the first
3872 * 16 pvs linked to from this page. This count may
3873 * be changed upwards or downwards in the future; it
3874 * is only necessary that true be returned for a small
3875 * subset of pmaps for proper page aging.
3878 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3880 struct md_page *pvh;
3884 if (m->flags & PG_FICTITIOUS)
3887 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3888 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3889 if (PV_PMAP(pv) == pmap) {
3897 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3898 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
3899 if (PV_PMAP(pv) == pmap)
3910 * pmap_page_wired_mappings:
3912 * Return the number of managed mappings to the given physical page
3916 pmap_page_wired_mappings(vm_page_t m)
3921 if ((m->flags & PG_FICTITIOUS) != 0)
3923 count = pmap_pvh_wired_mappings(&m->md, count);
3924 return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count));
3928 * pmap_pvh_wired_mappings:
3930 * Return the updated number "count" of managed mappings that are wired.
3933 pmap_pvh_wired_mappings(struct md_page *pvh, int count)
3939 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3941 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
3944 pte = pmap_pte_quick(pmap, pv->pv_va);
3945 if ((*pte & PG_W) != 0)
3954 * Returns TRUE if the given page is mapped individually or as part of
3955 * a 4mpage. Otherwise, returns FALSE.
3958 pmap_page_is_mapped(vm_page_t m)
3960 struct md_page *pvh;
3962 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
3964 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3965 if (TAILQ_EMPTY(&m->md.pv_list)) {
3966 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3967 return (!TAILQ_EMPTY(&pvh->pv_list));
3973 * Remove all pages from specified address space
3974 * this aids process exit speeds. Also, this code
3975 * is special cased for current process only, but
3976 * can have the more generic (and slightly slower)
3977 * mode enabled. This is much faster than pmap_remove
3978 * in the case of running down an entire address space.
3981 pmap_remove_pages(pmap_t pmap)
3983 pt_entry_t *pte, tpte;
3984 vm_page_t free = NULL;
3985 vm_page_t m, mpte, mt;
3987 struct md_page *pvh;
3988 struct pv_chunk *pc, *npc;
3991 uint32_t inuse, bitmask;
3994 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
3995 printf("warning: pmap_remove_pages called with non-current pmap\n");
3998 vm_page_lock_queues();
4001 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4003 for (field = 0; field < _NPCM; field++) {
4004 inuse = (~(pc->pc_map[field])) & pc_freemask[field];
4005 while (inuse != 0) {
4007 bitmask = 1UL << bit;
4008 idx = field * 32 + bit;
4009 pv = &pc->pc_pventry[idx];
4012 pte = pmap_pde(pmap, pv->pv_va);
4014 if ((tpte & PG_PS) == 0) {
4015 pte = vtopte(pv->pv_va);
4016 tpte = *pte & ~PG_PTE_PAT;
4021 "TPTE at %p IS ZERO @ VA %08x\n",
4027 * We cannot remove wired pages from a process' mapping at this time
4034 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
4035 KASSERT(m->phys_addr == (tpte & PG_FRAME),
4036 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
4037 m, (uintmax_t)m->phys_addr,
4040 KASSERT(m < &vm_page_array[vm_page_array_size],
4041 ("pmap_remove_pages: bad tpte %#jx",
4047 * Update the vm_page_t clean/reference bits.
4049 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
4050 if ((tpte & PG_PS) != 0) {
4051 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4058 PV_STAT(pv_entry_frees++);
4059 PV_STAT(pv_entry_spare++);
4061 pc->pc_map[field] |= bitmask;
4062 if ((tpte & PG_PS) != 0) {
4063 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
4064 pvh = pa_to_pvh(tpte & PG_PS_FRAME);
4065 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
4066 if (TAILQ_EMPTY(&pvh->pv_list)) {
4067 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4068 if (TAILQ_EMPTY(&mt->md.pv_list))
4069 vm_page_flag_clear(mt, PG_WRITEABLE);
4071 mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
4073 pmap_remove_pt_page(pmap, mpte);
4074 pmap->pm_stats.resident_count--;
4075 KASSERT(mpte->wire_count == NPTEPG,
4076 ("pmap_remove_pages: pte page wire count error"));
4077 mpte->wire_count = 0;
4078 pmap_add_delayed_free_list(mpte, &free, FALSE);
4079 atomic_subtract_int(&cnt.v_wire_count, 1);
4082 pmap->pm_stats.resident_count--;
4083 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
4084 if (TAILQ_EMPTY(&m->md.pv_list)) {
4085 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4086 if (TAILQ_EMPTY(&pvh->pv_list))
4087 vm_page_flag_clear(m, PG_WRITEABLE);
4089 pmap_unuse_pt(pmap, pv->pv_va, &free);
4094 PV_STAT(pv_entry_spare -= _NPCPV);
4095 PV_STAT(pc_chunk_count--);
4096 PV_STAT(pc_chunk_frees++);
4097 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4098 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
4099 pmap_qremove((vm_offset_t)pc, 1);
4100 vm_page_unwire(m, 0);
4102 pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
4106 pmap_invalidate_all(pmap);
4107 vm_page_unlock_queues();
4109 pmap_free_zero_pages(free);
4115 * Return whether or not the specified physical page was modified
4116 * in any physical maps.
4119 pmap_is_modified(vm_page_t m)
4122 if (m->flags & PG_FICTITIOUS)
4124 if (pmap_is_modified_pvh(&m->md))
4126 return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
4130 * Returns TRUE if any of the given mappings were used to modify
4131 * physical memory. Otherwise, returns FALSE. Both page and 2mpage
4132 * mappings are supported.
4135 pmap_is_modified_pvh(struct md_page *pvh)
4142 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4145 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
4148 pte = pmap_pte_quick(pmap, pv->pv_va);
4149 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
4159 * pmap_is_prefaultable:
4161 * Return whether or not the specified virtual address is elgible
4165 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
4173 pde = pmap_pde(pmap, addr);
4174 if (*pde != 0 && (*pde & PG_PS) == 0) {
4183 * Clear the write and modified bits in each of the given page's mappings.
4186 pmap_remove_write(vm_page_t m)
4188 struct md_page *pvh;
4189 pv_entry_t next_pv, pv;
4192 pt_entry_t oldpte, *pte;
4195 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4196 if ((m->flags & PG_FICTITIOUS) != 0 ||
4197 (m->flags & PG_WRITEABLE) == 0)
4200 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4201 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
4205 pde = pmap_pde(pmap, va);
4206 if ((*pde & PG_RW) != 0)
4207 (void)pmap_demote_pde(pmap, pde, va);
4210 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4213 pde = pmap_pde(pmap, pv->pv_va);
4214 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
4215 " a 4mpage in page %p's pv list", m));
4216 pte = pmap_pte_quick(pmap, pv->pv_va);
4219 if ((oldpte & PG_RW) != 0) {
4221 * Regardless of whether a pte is 32 or 64 bits
4222 * in size, PG_RW and PG_M are among the least
4223 * significant 32 bits.
4225 if (!atomic_cmpset_int((u_int *)pte, oldpte,
4226 oldpte & ~(PG_RW | PG_M)))
4228 if ((oldpte & PG_M) != 0)
4230 pmap_invalidate_page(pmap, pv->pv_va);
4234 vm_page_flag_clear(m, PG_WRITEABLE);
4239 * pmap_ts_referenced:
4241 * Return a count of reference bits for a page, clearing those bits.
4242 * It is not necessary for every reference bit to be cleared, but it
4243 * is necessary that 0 only be returned when there are truly no
4244 * reference bits set.
4246 * XXX: The exact number of bits to check and clear is a matter that
4247 * should be tested and standardized at some point in the future for
4248 * optimal aging of shared pages.
4251 pmap_ts_referenced(vm_page_t m)
4253 struct md_page *pvh;
4254 pv_entry_t pv, pvf, pvn;
4256 pd_entry_t oldpde, *pde;
4261 if (m->flags & PG_FICTITIOUS)
4264 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4265 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4266 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
4270 pde = pmap_pde(pmap, va);
4272 if ((oldpde & PG_A) != 0) {
4273 if (pmap_demote_pde(pmap, pde, va)) {
4274 if ((oldpde & PG_W) == 0) {
4276 * Remove the mapping to a single page
4277 * so that a subsequent access may
4278 * repromote. Since the underlying
4279 * page table page is fully populated,
4280 * this removal never frees a page
4283 va += VM_PAGE_TO_PHYS(m) - (oldpde &
4285 pmap_remove_page(pmap, va, NULL);
4296 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4299 pvn = TAILQ_NEXT(pv, pv_list);
4300 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
4301 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
4304 pde = pmap_pde(pmap, pv->pv_va);
4305 KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
4306 " found a 4mpage in page %p's pv list", m));
4307 pte = pmap_pte_quick(pmap, pv->pv_va);
4308 if ((*pte & PG_A) != 0) {
4309 atomic_clear_int((u_int *)pte, PG_A);
4310 pmap_invalidate_page(pmap, pv->pv_va);
4316 } while ((pv = pvn) != NULL && pv != pvf);
4323 * Clear the modify bits on the specified physical page.
4326 pmap_clear_modify(vm_page_t m)
4328 struct md_page *pvh;
4329 pv_entry_t next_pv, pv;
4331 pd_entry_t oldpde, *pde;
4332 pt_entry_t oldpte, *pte;
4335 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4336 if ((m->flags & PG_FICTITIOUS) != 0)
4339 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4340 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
4344 pde = pmap_pde(pmap, va);
4346 if ((oldpde & PG_RW) != 0) {
4347 if (pmap_demote_pde(pmap, pde, va)) {
4348 if ((oldpde & PG_W) == 0) {
4350 * Write protect the mapping to a
4351 * single page so that a subsequent
4352 * write access may repromote.
4354 va += VM_PAGE_TO_PHYS(m) - (oldpde &
4356 pte = pmap_pte_quick(pmap, va);
4358 if ((oldpte & PG_V) != 0) {
4360 * Regardless of whether a pte is 32 or 64 bits
4361 * in size, PG_RW and PG_M are among the least
4362 * significant 32 bits.
4364 while (!atomic_cmpset_int((u_int *)pte,
4366 oldpte & ~(PG_M | PG_RW)))
4369 pmap_invalidate_page(pmap, va);
4376 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4379 pde = pmap_pde(pmap, pv->pv_va);
4380 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
4381 " a 4mpage in page %p's pv list", m));
4382 pte = pmap_pte_quick(pmap, pv->pv_va);
4383 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
4385 * Regardless of whether a pte is 32 or 64 bits
4386 * in size, PG_M is among the least significant
4389 atomic_clear_int((u_int *)pte, PG_M);
4390 pmap_invalidate_page(pmap, pv->pv_va);
4398 * pmap_clear_reference:
4400 * Clear the reference bit on the specified physical page.
4403 pmap_clear_reference(vm_page_t m)
4405 struct md_page *pvh;
4406 pv_entry_t next_pv, pv;
4408 pd_entry_t oldpde, *pde;
4412 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4413 if ((m->flags & PG_FICTITIOUS) != 0)
4416 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4417 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
4421 pde = pmap_pde(pmap, va);
4423 if ((oldpde & PG_A) != 0) {
4424 if (pmap_demote_pde(pmap, pde, va)) {
4426 * Remove the mapping to a single page so
4427 * that a subsequent access may repromote.
4428 * Since the underlying page table page is
4429 * fully populated, this removal never frees
4430 * a page table page.
4432 va += VM_PAGE_TO_PHYS(m) - (oldpde &
4434 pmap_remove_page(pmap, va, NULL);
4439 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4442 pde = pmap_pde(pmap, pv->pv_va);
4443 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
4444 " a 4mpage in page %p's pv list", m));
4445 pte = pmap_pte_quick(pmap, pv->pv_va);
4446 if ((*pte & PG_A) != 0) {
4448 * Regardless of whether a pte is 32 or 64 bits
4449 * in size, PG_A is among the least significant
4452 atomic_clear_int((u_int *)pte, PG_A);
4453 pmap_invalidate_page(pmap, pv->pv_va);
4461 * Miscellaneous support routines follow
4464 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
4465 static __inline void
4466 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
4471 * The cache mode bits are all in the low 32-bits of the
4472 * PTE, so we can just spin on updating the low 32-bits.
4475 opte = *(u_int *)pte;
4476 npte = opte & ~PG_PTE_CACHE;
4478 } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
4481 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */
4482 static __inline void
4483 pmap_pde_attr(pd_entry_t *pde, int cache_bits)
4488 * The cache mode bits are all in the low 32-bits of the
4489 * PDE, so we can just spin on updating the low 32-bits.
4492 opde = *(u_int *)pde;
4493 npde = opde & ~PG_PDE_CACHE;
4495 } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
4499 * Map a set of physical memory pages into the kernel virtual
4500 * address space. Return a pointer to where it is mapped. This
4501 * routine is intended to be used for mapping device memory,
4505 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
4507 vm_offset_t va, offset;
4510 offset = pa & PAGE_MASK;
4511 size = roundup(offset + size, PAGE_SIZE);
4514 if (pa < KERNLOAD && pa + size <= KERNLOAD)
4517 va = kmem_alloc_nofault(kernel_map, size);
4519 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
4521 for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
4522 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
4523 pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
4524 pmap_invalidate_cache_range(va, va + size);
4525 return ((void *)(va + offset));
4529 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
4532 return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
4536 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
4539 return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
4543 pmap_unmapdev(vm_offset_t va, vm_size_t size)
4545 vm_offset_t base, offset, tmpva;
4547 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
4549 base = trunc_page(va);
4550 offset = va & PAGE_MASK;
4551 size = roundup(offset + size, PAGE_SIZE);
4552 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
4553 pmap_kremove(tmpva);
4554 pmap_invalidate_range(kernel_pmap, va, tmpva);
4555 kmem_free(kernel_map, base, size);
4559 * Sets the memory attribute for the specified page.
4562 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
4564 struct sysmaps *sysmaps;
4565 vm_offset_t sva, eva;
4567 m->md.pat_mode = ma;
4568 if ((m->flags & PG_FICTITIOUS) != 0)
4572 * If "m" is a normal page, flush it from the cache.
4573 * See pmap_invalidate_cache_range().
4575 * First, try to find an existing mapping of the page by sf
4576 * buffer. sf_buf_invalidate_cache() modifies mapping and
4577 * flushes the cache.
4579 if (sf_buf_invalidate_cache(m))
4583 * If page is not mapped by sf buffer, but CPU does not
4584 * support self snoop, map the page transient and do
4585 * invalidation. In the worst case, whole cache is flushed by
4586 * pmap_invalidate_cache_range().
4588 if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) {
4589 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
4590 mtx_lock(&sysmaps->lock);
4591 if (*sysmaps->CMAP2)
4592 panic("pmap_page_set_memattr: CMAP2 busy");
4594 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
4595 PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0);
4596 invlcaddr(sysmaps->CADDR2);
4597 sva = (vm_offset_t)sysmaps->CADDR2;
4598 eva = sva + PAGE_SIZE;
4600 sva = eva = 0; /* gcc */
4601 pmap_invalidate_cache_range(sva, eva);
4603 *sysmaps->CMAP2 = 0;
4605 mtx_unlock(&sysmaps->lock);
4610 * Changes the specified virtual address range's memory type to that given by
4611 * the parameter "mode". The specified virtual address range must be
4612 * completely contained within either the kernel map.
4614 * Returns zero if the change completed successfully, and either EINVAL or
4615 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
4616 * of the virtual address range was not mapped, and ENOMEM is returned if
4617 * there was insufficient memory available to complete the change.
4620 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
4622 vm_offset_t base, offset, tmpva;
4625 int cache_bits_pte, cache_bits_pde;
4628 base = trunc_page(va);
4629 offset = va & PAGE_MASK;
4630 size = roundup(offset + size, PAGE_SIZE);
4633 * Only supported on kernel virtual addresses above the recursive map.
4635 if (base < VM_MIN_KERNEL_ADDRESS)
4638 cache_bits_pde = pmap_cache_bits(mode, 1);
4639 cache_bits_pte = pmap_cache_bits(mode, 0);
4643 * Pages that aren't mapped aren't supported. Also break down
4644 * 2/4MB pages into 4KB pages if required.
4646 PMAP_LOCK(kernel_pmap);
4647 for (tmpva = base; tmpva < base + size; ) {
4648 pde = pmap_pde(kernel_pmap, tmpva);
4650 PMAP_UNLOCK(kernel_pmap);
4655 * If the current 2/4MB page already has
4656 * the required memory type, then we need not
4657 * demote this page. Just increment tmpva to
4658 * the next 2/4MB page frame.
4660 if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
4661 tmpva = trunc_4mpage(tmpva) + NBPDR;
4666 * If the current offset aligns with a 2/4MB
4667 * page frame and there is at least 2/4MB left
4668 * within the range, then we need not break
4669 * down this page into 4KB pages.
4671 if ((tmpva & PDRMASK) == 0 &&
4672 tmpva + PDRMASK < base + size) {
4676 if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) {
4677 PMAP_UNLOCK(kernel_pmap);
4681 pte = vtopte(tmpva);
4683 PMAP_UNLOCK(kernel_pmap);
4688 PMAP_UNLOCK(kernel_pmap);
4691 * Ok, all the pages exist, so run through them updating their
4692 * cache mode if required.
4694 for (tmpva = base; tmpva < base + size; ) {
4695 pde = pmap_pde(kernel_pmap, tmpva);
4697 if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
4698 pmap_pde_attr(pde, cache_bits_pde);
4701 tmpva = trunc_4mpage(tmpva) + NBPDR;
4703 pte = vtopte(tmpva);
4704 if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
4705 pmap_pte_attr(pte, cache_bits_pte);
4713 * Flush CPU caches to make sure any data isn't cached that
4714 * shouldn't be, etc.
4717 pmap_invalidate_range(kernel_pmap, base, tmpva);
4718 pmap_invalidate_cache_range(base, tmpva);
4724 * perform the pmap work for mincore
4727 pmap_mincore(pmap_t pmap, vm_offset_t addr)
4730 pt_entry_t *ptep, pte;
4736 pdep = pmap_pde(pmap, addr);
4738 if (*pdep & PG_PS) {
4740 val = MINCORE_SUPER;
4741 /* Compute the physical address of the 4KB page. */
4742 pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
4745 ptep = pmap_pte(pmap, addr);
4747 pmap_pte_release(ptep);
4748 pa = pte & PG_FRAME;
4757 val |= MINCORE_INCORE;
4758 if ((pte & PG_MANAGED) == 0)
4761 m = PHYS_TO_VM_PAGE(pa);
4766 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
4767 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
4770 * Modified by someone else
4772 vm_page_lock_queues();
4773 if (m->dirty || pmap_is_modified(m))
4774 val |= MINCORE_MODIFIED_OTHER;
4775 vm_page_unlock_queues();
4781 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
4784 * Referenced by someone else
4786 vm_page_lock_queues();
4787 if ((m->flags & PG_REFERENCED) ||
4788 pmap_ts_referenced(m)) {
4789 val |= MINCORE_REFERENCED_OTHER;
4790 vm_page_flag_set(m, PG_REFERENCED);
4792 vm_page_unlock_queues();
4799 pmap_activate(struct thread *td)
4801 pmap_t pmap, oldpmap;
4805 pmap = vmspace_pmap(td->td_proc->p_vmspace);
4806 oldpmap = PCPU_GET(curpmap);
4808 atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
4809 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
4811 oldpmap->pm_active &= ~1;
4812 pmap->pm_active |= 1;
4815 cr3 = vtophys(pmap->pm_pdpt);
4817 cr3 = vtophys(pmap->pm_pdir);
4820 * pmap_activate is for the current thread on the current cpu
4822 td->td_pcb->pcb_cr3 = cr3;
4824 PCPU_SET(curpmap, pmap);
4829 * Increase the starting virtual address of the given mapping if a
4830 * different alignment might result in more superpage mappings.
4833 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
4834 vm_offset_t *addr, vm_size_t size)
4836 vm_offset_t superpage_offset;
4840 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
4841 offset += ptoa(object->pg_color);
4842 superpage_offset = offset & PDRMASK;
4843 if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
4844 (*addr & PDRMASK) == superpage_offset)
4846 if ((*addr & PDRMASK) < superpage_offset)
4847 *addr = (*addr & ~PDRMASK) + superpage_offset;
4849 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
4853 #if defined(PMAP_DEBUG)
4854 pmap_pid_dump(int pid)
4861 sx_slock(&allproc_lock);
4862 FOREACH_PROC_IN_SYSTEM(p) {
4863 if (p->p_pid != pid)
4869 pmap = vmspace_pmap(p->p_vmspace);
4870 for (i = 0; i < NPDEPTD; i++) {
4873 vm_offset_t base = i << PDRSHIFT;
4875 pde = &pmap->pm_pdir[i];
4876 if (pde && pmap_pde_v(pde)) {
4877 for (j = 0; j < NPTEPG; j++) {
4878 vm_offset_t va = base + (j << PAGE_SHIFT);
4879 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
4884 sx_sunlock(&allproc_lock);
4887 pte = pmap_pte(pmap, va);
4888 if (pte && pmap_pte_v(pte)) {
4892 m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
4893 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
4894 va, pa, m->hold_count, m->wire_count, m->flags);
4909 sx_sunlock(&allproc_lock);
4916 static void pads(pmap_t pm);
4917 void pmap_pvdump(vm_offset_t pa);
4919 /* print address space of pmap*/
4927 if (pm == kernel_pmap)
4929 for (i = 0; i < NPDEPTD; i++)
4931 for (j = 0; j < NPTEPG; j++) {
4932 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
4933 if (pm == kernel_pmap && va < KERNBASE)
4935 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
4937 ptep = pmap_pte(pm, va);
4938 if (pmap_pte_v(ptep))
4939 printf("%x:%x ", va, *ptep);
4945 pmap_pvdump(vm_paddr_t pa)
4951 printf("pa %x", pa);
4952 m = PHYS_TO_VM_PAGE(pa);
4953 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4955 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va);