]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/pmap_base.c
MFV r345495:
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / pmap_base.c
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * the Systems Programming Group of the University of Utah Computer
15  * Science Department and William Jolitz of UUNET Technologies Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. All advertising materials mentioning features or use of this software
26  *    must display the following acknowledgement:
27  *      This product includes software developed by the University of
28  *      California, Berkeley and its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  *
45  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
46  */
47 /*-
48  * Copyright (c) 2003 Networks Associates Technology, Inc.
49  * All rights reserved.
50  * Copyright (c) 2018 The FreeBSD Foundation
51  * All rights reserved.
52  *
53  * This software was developed for the FreeBSD Project by Jake Burkholder,
54  * Safeport Network Services, and Network Associates Laboratories, the
55  * Security Research Division of Network Associates, Inc. under
56  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
57  * CHATS research program.
58  *
59  * Portions of this software were developed by
60  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
61  * the FreeBSD Foundation.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  * 1. Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  * 2. Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in the
70  *    documentation and/or other materials provided with the distribution.
71  *
72  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
73  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
76  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82  * SUCH DAMAGE.
83  */
84
85 #include <sys/cdefs.h>
86 __FBSDID("$FreeBSD$");
87
88 #include "opt_apic.h"
89 #include "opt_cpu.h"
90 #include "opt_pmap.h"
91 #include "opt_smp.h"
92 #include "opt_vm.h"
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/vmmeter.h>
98 #include <sys/sysctl.h>
99 #include <machine/bootinfo.h>
100 #include <machine/cpu.h>
101 #include <machine/cputypes.h>
102 #include <machine/md_var.h>
103 #ifdef DEV_APIC
104 #include <sys/bus.h>
105 #include <machine/intr_machdep.h>
106 #include <x86/apicvar.h>
107 #endif
108 #include <x86/ifunc.h>
109
110 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
111
112 #include <machine/vmparam.h>
113 #include <vm/vm.h>
114 #include <vm/vm_page.h>
115 #include <vm/pmap.h>
116 #include <machine/pmap_base.h>
117
118 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
119 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
120
121 int unmapped_buf_allowed = 1;
122
123 int pti;
124
125 u_long physfree;        /* phys addr of next free page */
126 u_long vm86phystk;      /* PA of vm86/bios stack */
127 u_long vm86paddr;       /* address of vm86 region */
128 int vm86pa;             /* phys addr of vm86 region */
129 u_long KERNend;         /* phys addr end of kernel (just after bss) */
130 u_long KPTphys;         /* phys addr of kernel page tables */
131 caddr_t ptvmmap = 0;
132 vm_offset_t kernel_vm_end;
133
134 int i386_pmap_VM_NFREEORDER;
135 int i386_pmap_VM_LEVEL_0_ORDER;
136 int i386_pmap_PDRSHIFT;
137
138 int pat_works = 1;
139 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD,
140     &pat_works, 0,
141     "Is page attribute table fully functional?");
142
143 int pg_ps_enabled = 1;
144 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
145     &pg_ps_enabled, 0,
146     "Are large page mappings enabled?");
147
148 int pv_entry_max = 0;
149 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD,
150     &pv_entry_max, 0,
151     "Max number of PV entries");
152
153 int pv_entry_count = 0;
154 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
155     &pv_entry_count, 0,
156     "Current number of pv entries");
157
158 #ifndef PMAP_SHPGPERPROC
159 #define PMAP_SHPGPERPROC 200
160 #endif
161
162 int shpgperproc = PMAP_SHPGPERPROC;
163 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD,
164     &shpgperproc, 0,
165     "Page share factor per proc");
166
167 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
168     "2/4MB page mapping counters");
169
170 u_long pmap_pde_demotions;
171 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
172     &pmap_pde_demotions, 0,
173     "2/4MB page demotions");
174
175 u_long pmap_pde_mappings;
176 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
177     &pmap_pde_mappings, 0,
178     "2/4MB page mappings");
179
180 u_long pmap_pde_p_failures;
181 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
182     &pmap_pde_p_failures, 0,
183     "2/4MB page promotion failures");
184
185 u_long pmap_pde_promotions;
186 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
187     &pmap_pde_promotions, 0,
188     "2/4MB page promotions");
189
190 #ifdef SMP
191 int PMAP1changedcpu;
192 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
193     &PMAP1changedcpu, 0,
194     "Number of times pmap_pte_quick changed CPU with same PMAP1");
195 #endif
196
197 int PMAP1changed;
198 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
199     &PMAP1changed, 0,
200     "Number of times pmap_pte_quick changed PMAP1");
201 int PMAP1unchanged;
202 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
203     &PMAP1unchanged, 0,
204     "Number of times pmap_pte_quick didn't change PMAP1");
205
206 static int
207 kvm_size(SYSCTL_HANDLER_ARGS)
208 {
209         unsigned long ksize;
210
211         ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
212         return (sysctl_handle_long(oidp, &ksize, 0, req));
213 }
214 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
215     0, 0, kvm_size, "IU",
216     "Size of KVM");
217
218 static int
219 kvm_free(SYSCTL_HANDLER_ARGS)
220 {
221         unsigned long kfree;
222
223         kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
224         return (sysctl_handle_long(oidp, &kfree, 0, req));
225 }
226 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
227     0, 0, kvm_free, "IU",
228     "Amount of KVM free");
229
230 #ifdef PV_STATS
231 int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
232 long pv_entry_frees, pv_entry_allocs;
233 int pv_entry_spare;
234
235 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
236     &pc_chunk_count, 0,
237     "Current number of pv entry chunks");
238 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
239     &pc_chunk_allocs, 0,
240     "Current number of pv entry chunks allocated");
241 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
242     &pc_chunk_frees, 0,
243     "Current number of pv entry chunks frees");
244 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
245     &pc_chunk_tryfail, 0,
246     "Number of times tried to get a chunk page but failed.");
247 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
248     &pv_entry_frees, 0,
249     "Current number of pv entry frees");
250 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
251     &pv_entry_allocs, 0,
252     "Current number of pv entry allocs");
253 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
254     &pv_entry_spare, 0,
255     "Current number of spare pv entries");
256 #endif
257
258 struct pmap kernel_pmap_store;
259 static struct pmap_methods *pmap_methods_ptr;
260
261 /*
262  * Initialize a vm_page's machine-dependent fields.
263  */
264 void
265 pmap_page_init(vm_page_t m)
266 {
267
268         TAILQ_INIT(&m->md.pv_list);
269         m->md.pat_mode = PAT_WRITE_BACK;
270 }
271
272 void
273 invltlb_glob(void)
274 {
275
276         invltlb();
277 }
278
279 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
280     vm_offset_t eva);
281 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
282     vm_offset_t eva);
283
284 void
285 pmap_flush_page(vm_page_t m)
286 {
287
288         pmap_methods_ptr->pm_flush_page(m);
289 }
290
291 DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t),
292     static)
293 {
294
295         if ((cpu_feature & CPUID_SS) != 0)
296                 return (pmap_invalidate_cache_range_selfsnoop);
297         if ((cpu_feature & CPUID_CLFSH) != 0)
298                 return (pmap_force_invalidate_cache_range);
299         return (pmap_invalidate_cache_range_all);
300 }
301
302 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
303
304 static void
305 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
306 {
307
308         KASSERT((sva & PAGE_MASK) == 0,
309             ("pmap_invalidate_cache_range: sva not page-aligned"));
310         KASSERT((eva & PAGE_MASK) == 0,
311             ("pmap_invalidate_cache_range: eva not page-aligned"));
312 }
313
314 static void
315 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
316 {
317
318         pmap_invalidate_cache_range_check_align(sva, eva);
319 }
320
321 void
322 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
323 {
324
325         sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
326         if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
327                 /*
328                  * The supplied range is bigger than 2MB.
329                  * Globally invalidate cache.
330                  */
331                 pmap_invalidate_cache();
332                 return;
333         }
334
335 #ifdef DEV_APIC
336         /*
337          * XXX: Some CPUs fault, hang, or trash the local APIC
338          * registers if we use CLFLUSH on the local APIC
339          * range.  The local APIC is always uncached, so we
340          * don't need to flush for that range anyway.
341          */
342         if (pmap_kextract(sva) == lapic_paddr)
343                 return;
344 #endif
345
346         if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
347                 /*
348                  * Do per-cache line flush.  Use the sfence
349                  * instruction to insure that previous stores are
350                  * included in the write-back.  The processor
351                  * propagates flush to other processors in the cache
352                  * coherence domain.
353                  */
354                 sfence();
355                 for (; sva < eva; sva += cpu_clflush_line_size)
356                         clflushopt(sva);
357                 sfence();
358         } else {
359                 /*
360                  * Writes are ordered by CLFLUSH on Intel CPUs.
361                  */
362                 if (cpu_vendor_id != CPU_VENDOR_INTEL)
363                         mfence();
364                 for (; sva < eva; sva += cpu_clflush_line_size)
365                         clflush(sva);
366                 if (cpu_vendor_id != CPU_VENDOR_INTEL)
367                         mfence();
368         }
369 }
370
371 static void
372 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
373 {
374
375         pmap_invalidate_cache_range_check_align(sva, eva);
376         pmap_invalidate_cache();
377 }
378
379 void
380 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
381 {
382         int i;
383
384         if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
385             (cpu_feature & CPUID_CLFSH) == 0) {
386                 pmap_invalidate_cache();
387         } else {
388                 for (i = 0; i < count; i++)
389                         pmap_flush_page(pages[i]);
390         }
391 }
392
393 void
394 pmap_ksetrw(vm_offset_t va)
395 {
396
397         pmap_methods_ptr->pm_ksetrw(va);
398 }
399
400 void
401 pmap_remap_lower(bool enable)
402 {
403
404         pmap_methods_ptr->pm_remap_lower(enable);
405 }
406
407 void
408 pmap_remap_lowptdi(bool enable)
409 {
410
411         pmap_methods_ptr->pm_remap_lowptdi(enable);
412 }
413
414 void
415 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
416     vm_offset_t *addr, vm_size_t size)
417 {
418
419         return (pmap_methods_ptr->pm_align_superpage(object, offset,
420             addr, size));
421 }
422
423 vm_offset_t
424 pmap_quick_enter_page(vm_page_t m)
425 {
426
427         return (pmap_methods_ptr->pm_quick_enter_page(m));
428 }
429
430 void
431 pmap_quick_remove_page(vm_offset_t addr)
432 {
433
434         return (pmap_methods_ptr->pm_quick_remove_page(addr));
435 }
436
437 void *
438 pmap_trm_alloc(size_t size, int flags)
439 {
440
441         return (pmap_methods_ptr->pm_trm_alloc(size, flags));
442 }
443
444 void
445 pmap_trm_free(void *addr, size_t size)
446 {
447
448         pmap_methods_ptr->pm_trm_free(addr, size);
449 }
450
451 void
452 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
453 {
454 }
455
456 vm_offset_t
457 pmap_get_map_low(void)
458 {
459
460         return (pmap_methods_ptr->pm_get_map_low());
461 }
462
463 vm_offset_t
464 pmap_get_vm_maxuser_address(void)
465 {
466
467         return (pmap_methods_ptr->pm_get_vm_maxuser_address());
468 }
469
470 vm_paddr_t
471 pmap_kextract(vm_offset_t va)
472 {
473
474         return (pmap_methods_ptr->pm_kextract(va));
475 }
476
477 vm_paddr_t
478 pmap_pg_frame(vm_paddr_t pa)
479 {
480
481         return (pmap_methods_ptr->pm_pg_frame(pa));
482 }
483
484 void
485 pmap_sf_buf_map(struct sf_buf *sf)
486 {
487
488         pmap_methods_ptr->pm_sf_buf_map(sf);
489 }
490
491 void
492 pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma)
493 {
494
495         pmap_methods_ptr->pm_cp_slow0_map(kaddr, plen, ma);
496 }
497
498 u_int
499 pmap_get_kcr3(void)
500 {
501
502         return (pmap_methods_ptr->pm_get_kcr3());
503 }
504
505 u_int
506 pmap_get_cr3(pmap_t pmap)
507 {
508
509         return (pmap_methods_ptr->pm_get_cr3(pmap));
510 }
511
512 caddr_t
513 pmap_cmap3(vm_paddr_t pa, u_int pte_flags)
514 {
515
516         return (pmap_methods_ptr->pm_cmap3(pa, pte_flags));
517 }
518
519 void
520 pmap_basemem_setup(u_int basemem)
521 {
522
523         pmap_methods_ptr->pm_basemem_setup(basemem);
524 }
525
526 void
527 pmap_set_nx(void)
528 {
529
530         pmap_methods_ptr->pm_set_nx();
531 }
532
533 void *
534 pmap_bios16_enter(void)
535 {
536
537         return (pmap_methods_ptr->pm_bios16_enter());
538 }
539
540 void
541 pmap_bios16_leave(void *handle)
542 {
543
544         pmap_methods_ptr->pm_bios16_leave(handle);
545 }
546
547 void
548 pmap_bootstrap(vm_paddr_t firstaddr)
549 {
550
551         pmap_methods_ptr->pm_bootstrap(firstaddr);
552 }
553
554 boolean_t
555 pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
556 {
557
558         return (pmap_methods_ptr->pm_is_valid_memattr(pmap, mode));
559 }
560
561 int
562 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
563 {
564
565         return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
566 }
567
568 bool
569 pmap_ps_enabled(pmap_t pmap)
570 {
571
572         return (pmap_methods_ptr->pm_ps_enabled(pmap));
573 }
574
575 void
576 pmap_pinit0(pmap_t pmap)
577 {
578
579         pmap_methods_ptr->pm_pinit0(pmap);
580 }
581
582 int
583 pmap_pinit(pmap_t pmap)
584 {
585
586         return (pmap_methods_ptr->pm_pinit(pmap));
587 }
588
589 void
590 pmap_activate(struct thread *td)
591 {
592
593         pmap_methods_ptr->pm_activate(td);
594 }
595
596 void
597 pmap_activate_boot(pmap_t pmap)
598 {
599
600         pmap_methods_ptr->pm_activate_boot(pmap);
601 }
602
603 void
604 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
605 {
606
607         pmap_methods_ptr->pm_advise(pmap, sva, eva, advice);
608 }
609
610 void
611 pmap_clear_modify(vm_page_t m)
612 {
613
614         pmap_methods_ptr->pm_clear_modify(m);
615 }
616
617 int
618 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
619 {
620
621         return (pmap_methods_ptr->pm_change_attr(va, size, mode));
622 }
623
624 int
625 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
626 {
627
628         return (pmap_methods_ptr->pm_mincore(pmap, addr, locked_pa));
629 }
630
631 void
632 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
633     vm_offset_t src_addr)
634 {
635
636         pmap_methods_ptr->pm_copy(dst_pmap, src_pmap, dst_addr, len, src_addr);
637 }
638
639 void
640 pmap_copy_page(vm_page_t src, vm_page_t dst)
641 {
642
643         pmap_methods_ptr->pm_copy_page(src, dst);
644 }
645
646 void
647 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
648     vm_offset_t b_offset, int xfersize)
649 {
650
651         pmap_methods_ptr->pm_copy_pages(ma, a_offset, mb, b_offset, xfersize);
652 }
653
654 void
655 pmap_zero_page(vm_page_t m)
656 {
657
658         pmap_methods_ptr->pm_zero_page(m);
659 }
660
661 void
662 pmap_zero_page_area(vm_page_t m, int off, int size)
663 {
664
665         pmap_methods_ptr->pm_zero_page_area(m, off, size);
666 }
667
668 int
669 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
670     u_int flags, int8_t psind)
671 {
672
673         return (pmap_methods_ptr->pm_enter(pmap, va, m, prot, flags, psind));
674 }
675
676 void
677 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
678     vm_page_t m_start, vm_prot_t prot)
679 {
680
681         pmap_methods_ptr->pm_enter_object(pmap, start, end, m_start, prot);
682 }
683
684 void
685 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
686 {
687
688         pmap_methods_ptr->pm_enter_quick(pmap, va, m, prot);
689 }
690
691 void *
692 pmap_kenter_temporary(vm_paddr_t pa, int i)
693 {
694
695         return (pmap_methods_ptr->pm_kenter_temporary(pa, i));
696 }
697
698 void
699 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
700     vm_pindex_t pindex, vm_size_t size)
701 {
702
703         pmap_methods_ptr->pm_object_init_pt(pmap, addr, object, pindex, size);
704 }
705
706 void
707 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
708 {
709
710         pmap_methods_ptr->pm_unwire(pmap, sva, eva);
711 }
712
713 boolean_t
714 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
715 {
716
717         return (pmap_methods_ptr->pm_page_exists_quick(pmap, m));
718 }
719
720 int
721 pmap_page_wired_mappings(vm_page_t m)
722 {
723
724         return (pmap_methods_ptr->pm_page_wired_mappings(m));
725 }
726
727 boolean_t
728 pmap_page_is_mapped(vm_page_t m)
729 {
730
731         return (pmap_methods_ptr->pm_page_is_mapped(m));
732 }
733
734 void
735 pmap_remove_pages(pmap_t pmap)
736 {
737
738         pmap_methods_ptr->pm_remove_pages(pmap);
739 }
740
741 boolean_t
742 pmap_is_modified(vm_page_t m)
743 {
744
745         return (pmap_methods_ptr->pm_is_modified(m));
746 }
747
748 boolean_t
749 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
750 {
751
752         return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
753 }
754
755 boolean_t
756 pmap_is_referenced(vm_page_t m)
757 {
758
759         return (pmap_methods_ptr->pm_is_referenced(m));
760 }
761
762 void
763 pmap_remove_write(vm_page_t m)
764 {
765
766         pmap_methods_ptr->pm_remove_write(m);
767 }
768
769 int
770 pmap_ts_referenced(vm_page_t m)
771 {
772
773         return (pmap_methods_ptr->pm_ts_referenced(m));
774 }
775
776 void *
777 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
778 {
779
780         return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode));
781 }
782
783 void *
784 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
785 {
786
787         return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE));
788 }
789
790 void *
791 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
792 {
793
794         return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK));
795 }
796
797 void
798 pmap_unmapdev(vm_offset_t va, vm_size_t size)
799 {
800
801         pmap_methods_ptr->pm_unmapdev(va, size);
802 }
803
804 void
805 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
806 {
807
808         pmap_methods_ptr->pm_page_set_memattr(m, ma);
809 }
810
811 vm_paddr_t
812 pmap_extract(pmap_t pmap, vm_offset_t va)
813 {
814
815         return (pmap_methods_ptr->pm_extract(pmap, va));
816 }
817
818 vm_page_t
819 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
820 {
821
822         return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot));
823 }
824
825 vm_offset_t
826 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
827 {
828
829         return (pmap_methods_ptr->pm_map(virt, start, end, prot));
830 }
831
832 void
833 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
834 {
835
836         pmap_methods_ptr->pm_qenter(sva, ma, count);
837 }
838
839 void
840 pmap_qremove(vm_offset_t sva, int count)
841 {
842
843         pmap_methods_ptr->pm_qremove(sva, count);
844 }
845
846 void
847 pmap_release(pmap_t pmap)
848 {
849
850         pmap_methods_ptr->pm_release(pmap);
851 }
852
853 void
854 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
855 {
856
857         pmap_methods_ptr->pm_remove(pmap, sva, eva);
858 }
859
860 void
861 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
862 {
863
864         pmap_methods_ptr->pm_protect(pmap, sva, eva, prot);
865 }
866
867 void
868 pmap_remove_all(vm_page_t m)
869 {
870
871         pmap_methods_ptr->pm_remove_all(m);
872 }
873
874 void
875 pmap_init(void)
876 {
877
878         pmap_methods_ptr->pm_init();
879 }
880
881 void
882 pmap_init_pat(void)
883 {
884
885         pmap_methods_ptr->pm_init_pat();
886 }
887
888 void
889 pmap_growkernel(vm_offset_t addr)
890 {
891
892         pmap_methods_ptr->pm_growkernel(addr);
893 }
894
895 void
896 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
897 {
898
899         pmap_methods_ptr->pm_invalidate_page(pmap, va);
900 }
901
902 void
903 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
904 {
905
906         pmap_methods_ptr->pm_invalidate_range(pmap, sva, eva);
907 }
908
909 void
910 pmap_invalidate_all(pmap_t pmap)
911 {
912
913         pmap_methods_ptr->pm_invalidate_all(pmap);
914 }
915
916 void
917 pmap_invalidate_cache(void)
918 {
919
920         pmap_methods_ptr->pm_invalidate_cache();
921 }
922
923 void
924 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
925 {
926
927         pmap_methods_ptr->pm_kenter(va, pa);
928 }
929
930 void
931 pmap_kremove(vm_offset_t va)
932 {
933
934         pmap_methods_ptr->pm_kremove(va);
935 }
936
937 extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
938 int pae_mode;
939 SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
940     &pae_mode, 0,
941     "PAE");
942
943 void
944 pmap_cold(void)
945 {
946
947         init_static_kenv((char *)bootinfo.bi_envp, 0);
948         pae_mode = (cpu_feature & CPUID_PAE) != 0;
949         if (pae_mode)
950                 TUNABLE_INT_FETCH("vm.pmap.pae_mode", &pae_mode);
951         if (pae_mode) {
952                 pmap_methods_ptr = &pmap_pae_methods;
953                 pmap_pae_cold();
954         } else {
955                 pmap_methods_ptr = &pmap_nopae_methods;
956                 pmap_nopae_cold();
957         }
958 }