2 * SPDX-License-Identifier: BSD-4-Clause
4 * Copyright (c) 1991 Regents of the University of California.
6 * Copyright (c) 1994 John S. Dyson
8 * Copyright (c) 1994 David Greenman
10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11 * All rights reserved.
13 * This code is derived from software contributed to Berkeley by
14 * the Systems Programming Group of the University of Utah Computer
15 * Science Department and William Jolitz of UUNET Technologies Inc.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 * must display the following acknowledgement:
27 * This product includes software developed by the University of
28 * California, Berkeley and its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 * may be used to endorse or promote products derived from this software
31 * without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
48 * Copyright (c) 2003 Networks Associates Technology, Inc.
49 * All rights reserved.
50 * Copyright (c) 2018 The FreeBSD Foundation
51 * All rights reserved.
53 * This software was developed for the FreeBSD Project by Jake Burkholder,
54 * Safeport Network Services, and Network Associates Laboratories, the
55 * Security Research Division of Network Associates, Inc. under
56 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
57 * CHATS research program.
59 * Portions of this software were developed by
60 * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
61 * the FreeBSD Foundation.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
66 * 1. Redistributions of source code must retain the above copyright
67 * notice, this list of conditions and the following disclaimer.
68 * 2. Redistributions in binary form must reproduce the above copyright
69 * notice, this list of conditions and the following disclaimer in the
70 * documentation and/or other materials provided with the distribution.
72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
85 #include <sys/cdefs.h>
86 __FBSDID("$FreeBSD$");
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/vmmeter.h>
98 #include <sys/sysctl.h>
99 #include <machine/bootinfo.h>
100 #include <machine/cpu.h>
101 #include <machine/cputypes.h>
102 #include <machine/md_var.h>
105 #include <machine/intr_machdep.h>
106 #include <x86/apicvar.h>
108 #include <x86/ifunc.h>
110 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
111 "VM/pmap parameters");
113 #include <machine/vmparam.h>
115 #include <vm/vm_page.h>
117 #include <machine/pmap_base.h>
119 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
120 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
122 int unmapped_buf_allowed = 1;
126 u_long physfree; /* phys addr of next free page */
127 u_long vm86phystk; /* PA of vm86/bios stack */
128 u_long vm86paddr; /* address of vm86 region */
129 int vm86pa; /* phys addr of vm86 region */
130 u_long KERNend; /* phys addr end of kernel (just after bss) */
131 u_long KPTphys; /* phys addr of kernel page tables */
133 vm_offset_t kernel_vm_end;
135 int i386_pmap_VM_NFREEORDER;
136 int i386_pmap_VM_LEVEL_0_ORDER;
137 int i386_pmap_PDRSHIFT;
140 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD,
142 "Is page attribute table fully functional?");
144 int pg_ps_enabled = 1;
145 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
147 "Are large page mappings enabled?");
149 int pv_entry_max = 0;
150 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD,
152 "Max number of PV entries");
154 int pv_entry_count = 0;
155 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
157 "Current number of pv entries");
159 #ifndef PMAP_SHPGPERPROC
160 #define PMAP_SHPGPERPROC 200
163 int shpgperproc = PMAP_SHPGPERPROC;
164 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD,
166 "Page share factor per proc");
168 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
169 "2/4MB page mapping counters");
171 u_long pmap_pde_demotions;
172 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
173 &pmap_pde_demotions, 0,
174 "2/4MB page demotions");
176 u_long pmap_pde_mappings;
177 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
178 &pmap_pde_mappings, 0,
179 "2/4MB page mappings");
181 u_long pmap_pde_p_failures;
182 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
183 &pmap_pde_p_failures, 0,
184 "2/4MB page promotion failures");
186 u_long pmap_pde_promotions;
187 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
188 &pmap_pde_promotions, 0,
189 "2/4MB page promotions");
193 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
195 "Number of times pmap_pte_quick changed CPU with same PMAP1");
199 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
201 "Number of times pmap_pte_quick changed PMAP1");
203 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
205 "Number of times pmap_pte_quick didn't change PMAP1");
208 kvm_size(SYSCTL_HANDLER_ARGS)
212 ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
213 return (sysctl_handle_long(oidp, &ksize, 0, req));
215 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
216 0, 0, kvm_size, "IU",
220 kvm_free(SYSCTL_HANDLER_ARGS)
224 kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
225 return (sysctl_handle_long(oidp, &kfree, 0, req));
227 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
228 0, 0, kvm_free, "IU",
229 "Amount of KVM free");
232 int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
233 long pv_entry_frees, pv_entry_allocs;
236 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
238 "Current number of pv entry chunks");
239 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
241 "Current number of pv entry chunks allocated");
242 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
244 "Current number of pv entry chunks frees");
245 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
246 &pc_chunk_tryfail, 0,
247 "Number of times tried to get a chunk page but failed.");
248 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
250 "Current number of pv entry frees");
251 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
253 "Current number of pv entry allocs");
254 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
256 "Current number of spare pv entries");
259 struct pmap kernel_pmap_store;
260 static struct pmap_methods *pmap_methods_ptr;
263 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
265 return (pmap_methods_ptr->pm_sysctl_kmaps(oidp, arg1, arg2, req));
267 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
268 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
269 NULL, 0, sysctl_kmaps, "A",
270 "Dump kernel address layout");
273 * Initialize a vm_page's machine-dependent fields.
276 pmap_page_init(vm_page_t m)
279 TAILQ_INIT(&m->md.pv_list);
280 m->md.pat_mode = PAT_WRITE_BACK;
290 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
292 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
296 pmap_flush_page(vm_page_t m)
299 pmap_methods_ptr->pm_flush_page(m);
302 DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t))
305 if ((cpu_feature & CPUID_SS) != 0)
306 return (pmap_invalidate_cache_range_selfsnoop);
307 if ((cpu_feature & CPUID_CLFSH) != 0)
308 return (pmap_force_invalidate_cache_range);
309 return (pmap_invalidate_cache_range_all);
312 #define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
315 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
318 KASSERT((sva & PAGE_MASK) == 0,
319 ("pmap_invalidate_cache_range: sva not page-aligned"));
320 KASSERT((eva & PAGE_MASK) == 0,
321 ("pmap_invalidate_cache_range: eva not page-aligned"));
325 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
328 pmap_invalidate_cache_range_check_align(sva, eva);
332 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
335 sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
336 if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
338 * The supplied range is bigger than 2MB.
339 * Globally invalidate cache.
341 pmap_invalidate_cache();
347 * XXX: Some CPUs fault, hang, or trash the local APIC
348 * registers if we use CLFLUSH on the local APIC
349 * range. The local APIC is always uncached, so we
350 * don't need to flush for that range anyway.
352 if (pmap_kextract(sva) == lapic_paddr)
356 if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
358 * Do per-cache line flush. Use the sfence
359 * instruction to insure that previous stores are
360 * included in the write-back. The processor
361 * propagates flush to other processors in the cache
365 for (; sva < eva; sva += cpu_clflush_line_size)
370 * Writes are ordered by CLFLUSH on Intel CPUs.
372 if (cpu_vendor_id != CPU_VENDOR_INTEL)
374 for (; sva < eva; sva += cpu_clflush_line_size)
376 if (cpu_vendor_id != CPU_VENDOR_INTEL)
382 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
385 pmap_invalidate_cache_range_check_align(sva, eva);
386 pmap_invalidate_cache();
390 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
394 if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
395 (cpu_feature & CPUID_CLFSH) == 0) {
396 pmap_invalidate_cache();
398 for (i = 0; i < count; i++)
399 pmap_flush_page(pages[i]);
404 pmap_ksetrw(vm_offset_t va)
407 pmap_methods_ptr->pm_ksetrw(va);
411 pmap_remap_lower(bool enable)
414 pmap_methods_ptr->pm_remap_lower(enable);
418 pmap_remap_lowptdi(bool enable)
421 pmap_methods_ptr->pm_remap_lowptdi(enable);
425 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
426 vm_offset_t *addr, vm_size_t size)
429 return (pmap_methods_ptr->pm_align_superpage(object, offset,
434 pmap_quick_enter_page(vm_page_t m)
437 return (pmap_methods_ptr->pm_quick_enter_page(m));
441 pmap_quick_remove_page(vm_offset_t addr)
444 return (pmap_methods_ptr->pm_quick_remove_page(addr));
448 pmap_trm_alloc(size_t size, int flags)
451 return (pmap_methods_ptr->pm_trm_alloc(size, flags));
455 pmap_trm_free(void *addr, size_t size)
458 pmap_methods_ptr->pm_trm_free(addr, size);
462 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
467 pmap_get_map_low(void)
470 return (pmap_methods_ptr->pm_get_map_low());
474 pmap_get_vm_maxuser_address(void)
477 return (pmap_methods_ptr->pm_get_vm_maxuser_address());
481 pmap_kextract(vm_offset_t va)
484 return (pmap_methods_ptr->pm_kextract(va));
488 pmap_pg_frame(vm_paddr_t pa)
491 return (pmap_methods_ptr->pm_pg_frame(pa));
495 pmap_sf_buf_map(struct sf_buf *sf)
498 pmap_methods_ptr->pm_sf_buf_map(sf);
502 pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma)
505 pmap_methods_ptr->pm_cp_slow0_map(kaddr, plen, ma);
512 return (pmap_methods_ptr->pm_get_kcr3());
516 pmap_get_cr3(pmap_t pmap)
519 return (pmap_methods_ptr->pm_get_cr3(pmap));
523 pmap_cmap3(vm_paddr_t pa, u_int pte_flags)
526 return (pmap_methods_ptr->pm_cmap3(pa, pte_flags));
530 pmap_basemem_setup(u_int basemem)
533 pmap_methods_ptr->pm_basemem_setup(basemem);
540 pmap_methods_ptr->pm_set_nx();
544 pmap_bios16_enter(void)
547 return (pmap_methods_ptr->pm_bios16_enter());
551 pmap_bios16_leave(void *handle)
554 pmap_methods_ptr->pm_bios16_leave(handle);
558 pmap_bootstrap(vm_paddr_t firstaddr)
561 pmap_methods_ptr->pm_bootstrap(firstaddr);
565 pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
568 return (pmap_methods_ptr->pm_is_valid_memattr(pmap, mode));
572 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
575 return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
579 pmap_ps_enabled(pmap_t pmap)
582 return (pmap_methods_ptr->pm_ps_enabled(pmap));
586 pmap_pinit0(pmap_t pmap)
589 pmap_methods_ptr->pm_pinit0(pmap);
593 pmap_pinit(pmap_t pmap)
596 return (pmap_methods_ptr->pm_pinit(pmap));
600 pmap_activate(struct thread *td)
603 pmap_methods_ptr->pm_activate(td);
607 pmap_activate_boot(pmap_t pmap)
610 pmap_methods_ptr->pm_activate_boot(pmap);
614 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
617 pmap_methods_ptr->pm_advise(pmap, sva, eva, advice);
621 pmap_clear_modify(vm_page_t m)
624 pmap_methods_ptr->pm_clear_modify(m);
628 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
631 return (pmap_methods_ptr->pm_change_attr(va, size, mode));
635 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
638 return (pmap_methods_ptr->pm_mincore(pmap, addr, pap));
642 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
643 vm_offset_t src_addr)
646 pmap_methods_ptr->pm_copy(dst_pmap, src_pmap, dst_addr, len, src_addr);
650 pmap_copy_page(vm_page_t src, vm_page_t dst)
653 pmap_methods_ptr->pm_copy_page(src, dst);
657 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
658 vm_offset_t b_offset, int xfersize)
661 pmap_methods_ptr->pm_copy_pages(ma, a_offset, mb, b_offset, xfersize);
665 pmap_zero_page(vm_page_t m)
668 pmap_methods_ptr->pm_zero_page(m);
672 pmap_zero_page_area(vm_page_t m, int off, int size)
675 pmap_methods_ptr->pm_zero_page_area(m, off, size);
679 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
680 u_int flags, int8_t psind)
683 return (pmap_methods_ptr->pm_enter(pmap, va, m, prot, flags, psind));
687 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
688 vm_page_t m_start, vm_prot_t prot)
691 pmap_methods_ptr->pm_enter_object(pmap, start, end, m_start, prot);
695 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
698 pmap_methods_ptr->pm_enter_quick(pmap, va, m, prot);
702 pmap_kenter_temporary(vm_paddr_t pa, int i)
705 return (pmap_methods_ptr->pm_kenter_temporary(pa, i));
709 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
710 vm_pindex_t pindex, vm_size_t size)
713 pmap_methods_ptr->pm_object_init_pt(pmap, addr, object, pindex, size);
717 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
720 pmap_methods_ptr->pm_unwire(pmap, sva, eva);
724 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
727 return (pmap_methods_ptr->pm_page_exists_quick(pmap, m));
731 pmap_page_wired_mappings(vm_page_t m)
734 return (pmap_methods_ptr->pm_page_wired_mappings(m));
738 pmap_page_is_mapped(vm_page_t m)
741 return (pmap_methods_ptr->pm_page_is_mapped(m));
745 pmap_remove_pages(pmap_t pmap)
748 pmap_methods_ptr->pm_remove_pages(pmap);
752 pmap_is_modified(vm_page_t m)
755 return (pmap_methods_ptr->pm_is_modified(m));
759 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
762 return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
766 pmap_is_referenced(vm_page_t m)
769 return (pmap_methods_ptr->pm_is_referenced(m));
773 pmap_remove_write(vm_page_t m)
776 pmap_methods_ptr->pm_remove_write(m);
780 pmap_ts_referenced(vm_page_t m)
783 return (pmap_methods_ptr->pm_ts_referenced(m));
787 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
790 return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode,
795 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
798 return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE,
803 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
806 return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK, 0));
810 pmap_unmapdev(vm_offset_t va, vm_size_t size)
813 pmap_methods_ptr->pm_unmapdev(va, size);
817 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
820 pmap_methods_ptr->pm_page_set_memattr(m, ma);
824 pmap_extract(pmap_t pmap, vm_offset_t va)
827 return (pmap_methods_ptr->pm_extract(pmap, va));
831 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
834 return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot));
838 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
841 return (pmap_methods_ptr->pm_map(virt, start, end, prot));
845 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
848 pmap_methods_ptr->pm_qenter(sva, ma, count);
852 pmap_qremove(vm_offset_t sva, int count)
855 pmap_methods_ptr->pm_qremove(sva, count);
859 pmap_release(pmap_t pmap)
862 pmap_methods_ptr->pm_release(pmap);
866 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
869 pmap_methods_ptr->pm_remove(pmap, sva, eva);
873 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
876 pmap_methods_ptr->pm_protect(pmap, sva, eva, prot);
880 pmap_remove_all(vm_page_t m)
883 pmap_methods_ptr->pm_remove_all(m);
890 pmap_methods_ptr->pm_init();
897 pmap_methods_ptr->pm_init_pat();
901 pmap_growkernel(vm_offset_t addr)
904 pmap_methods_ptr->pm_growkernel(addr);
908 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
911 pmap_methods_ptr->pm_invalidate_page(pmap, va);
915 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
918 pmap_methods_ptr->pm_invalidate_range(pmap, sva, eva);
922 pmap_invalidate_all(pmap_t pmap)
925 pmap_methods_ptr->pm_invalidate_all(pmap);
929 pmap_invalidate_cache(void)
932 pmap_methods_ptr->pm_invalidate_cache();
936 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
939 pmap_methods_ptr->pm_kenter(va, pa);
943 pmap_kremove(vm_offset_t va)
946 pmap_methods_ptr->pm_kremove(va);
949 extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
951 SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
959 init_static_kenv((char *)bootinfo.bi_envp, 0);
960 pae_mode = (cpu_feature & CPUID_PAE) != 0;
962 TUNABLE_INT_FETCH("vm.pmap.pae_mode", &pae_mode);
964 pmap_methods_ptr = &pmap_pae_methods;
967 pmap_methods_ptr = &pmap_nopae_methods;