]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/pmap_base.c
Merge llvm, clang, compiler-rt, libc++, lld, and lldb release_80 branch
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / pmap_base.c
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * the Systems Programming Group of the University of Utah Computer
15  * Science Department and William Jolitz of UUNET Technologies Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. All advertising materials mentioning features or use of this software
26  *    must display the following acknowledgement:
27  *      This product includes software developed by the University of
28  *      California, Berkeley and its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  *
45  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
46  */
47 /*-
48  * Copyright (c) 2003 Networks Associates Technology, Inc.
49  * All rights reserved.
50  * Copyright (c) 2018 The FreeBSD Foundation
51  * All rights reserved.
52  *
53  * This software was developed for the FreeBSD Project by Jake Burkholder,
54  * Safeport Network Services, and Network Associates Laboratories, the
55  * Security Research Division of Network Associates, Inc. under
56  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
57  * CHATS research program.
58  *
59  * Portions of this software were developed by
60  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
61  * the FreeBSD Foundation.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  * 1. Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  * 2. Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in the
70  *    documentation and/or other materials provided with the distribution.
71  *
72  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
73  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
76  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82  * SUCH DAMAGE.
83  */
84
85 #include <sys/cdefs.h>
86 __FBSDID("$FreeBSD$");
87
88 #include "opt_apic.h"
89 #include "opt_cpu.h"
90 #include "opt_pmap.h"
91 #include "opt_smp.h"
92 #include "opt_vm.h"
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/vmmeter.h>
98 #include <sys/sysctl.h>
99 #include <machine/cpu.h>
100 #include <machine/cputypes.h>
101 #include <machine/md_var.h>
102 #ifdef DEV_APIC
103 #include <sys/bus.h>
104 #include <machine/intr_machdep.h>
105 #include <x86/apicvar.h>
106 #endif
107 #include <x86/ifunc.h>
108
109 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
110
111 #include <machine/vmparam.h>
112 #include <vm/vm.h>
113 #include <vm/vm_page.h>
114 #include <vm/pmap.h>
115 #include <machine/pmap_base.h>
116
117 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
118 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
119
120 int unmapped_buf_allowed = 1;
121
122 int pti;
123
124 u_long physfree;        /* phys addr of next free page */
125 u_long vm86phystk;      /* PA of vm86/bios stack */
126 u_long vm86paddr;       /* address of vm86 region */
127 int vm86pa;             /* phys addr of vm86 region */
128 u_long KERNend;         /* phys addr end of kernel (just after bss) */
129 u_long KPTphys;         /* phys addr of kernel page tables */
130 caddr_t ptvmmap = 0;
131 vm_offset_t kernel_vm_end;
132
133 int i386_pmap_VM_NFREEORDER;
134 int i386_pmap_VM_LEVEL_0_ORDER;
135 int i386_pmap_PDRSHIFT;
136
137 int pat_works = 1;
138 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD,
139     &pat_works, 1,
140     "Is page attribute table fully functional?");
141
142 int pg_ps_enabled = 1;
143 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
144     &pg_ps_enabled, 0,
145     "Are large page mappings enabled?");
146
147 int pv_entry_max = 0;
148 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD,
149     &pv_entry_max, 0,
150     "Max number of PV entries");
151
152 int pv_entry_count = 0;
153 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
154     &pv_entry_count, 0,
155     "Current number of pv entries");
156
157 #ifndef PMAP_SHPGPERPROC
158 #define PMAP_SHPGPERPROC 200
159 #endif
160
161 int shpgperproc = PMAP_SHPGPERPROC;
162 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD,
163     &shpgperproc, 0,
164     "Page share factor per proc");
165
166 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
167     "2/4MB page mapping counters");
168
169 u_long pmap_pde_demotions;
170 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
171     &pmap_pde_demotions, 0,
172     "2/4MB page demotions");
173
174 u_long pmap_pde_mappings;
175 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
176     &pmap_pde_mappings, 0,
177     "2/4MB page mappings");
178
179 u_long pmap_pde_p_failures;
180 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
181     &pmap_pde_p_failures, 0,
182     "2/4MB page promotion failures");
183
184 u_long pmap_pde_promotions;
185 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
186     &pmap_pde_promotions, 0,
187     "2/4MB page promotions");
188
189 #ifdef SMP
190 int PMAP1changedcpu;
191 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
192     &PMAP1changedcpu, 0,
193     "Number of times pmap_pte_quick changed CPU with same PMAP1");
194 #endif
195
196 int PMAP1changed;
197 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
198     &PMAP1changed, 0,
199     "Number of times pmap_pte_quick changed PMAP1");
200 int PMAP1unchanged;
201 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
202     &PMAP1unchanged, 0,
203     "Number of times pmap_pte_quick didn't change PMAP1");
204
205 static int
206 kvm_size(SYSCTL_HANDLER_ARGS)
207 {
208         unsigned long ksize;
209
210         ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
211         return (sysctl_handle_long(oidp, &ksize, 0, req));
212 }
213 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
214     0, 0, kvm_size, "IU",
215     "Size of KVM");
216
217 static int
218 kvm_free(SYSCTL_HANDLER_ARGS)
219 {
220         unsigned long kfree;
221
222         kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
223         return (sysctl_handle_long(oidp, &kfree, 0, req));
224 }
225 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
226     0, 0, kvm_free, "IU",
227     "Amount of KVM free");
228
229 #ifdef PV_STATS
230 int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
231 long pv_entry_frees, pv_entry_allocs;
232 int pv_entry_spare;
233
234 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
235     &pc_chunk_count, 0,
236     "Current number of pv entry chunks");
237 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
238     &pc_chunk_allocs, 0,
239     "Current number of pv entry chunks allocated");
240 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
241     &pc_chunk_frees, 0,
242     "Current number of pv entry chunks frees");
243 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
244     &pc_chunk_tryfail, 0,
245     "Number of times tried to get a chunk page but failed.");
246 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
247     &pv_entry_frees, 0,
248     "Current number of pv entry frees");
249 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
250     &pv_entry_allocs, 0,
251     "Current number of pv entry allocs");
252 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
253     &pv_entry_spare, 0,
254     "Current number of spare pv entries");
255 #endif
256
257 struct pmap kernel_pmap_store;
258 static struct pmap_methods *pmap_methods_ptr;
259
260 /*
261  * Initialize a vm_page's machine-dependent fields.
262  */
263 void
264 pmap_page_init(vm_page_t m)
265 {
266
267         TAILQ_INIT(&m->md.pv_list);
268         m->md.pat_mode = PAT_WRITE_BACK;
269 }
270
271 void
272 invltlb_glob(void)
273 {
274
275         invltlb();
276 }
277
278 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
279     vm_offset_t eva);
280 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
281     vm_offset_t eva);
282
283 void
284 pmap_flush_page(vm_page_t m)
285 {
286
287         pmap_methods_ptr->pm_flush_page(m);
288 }
289
290 DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t),
291     static)
292 {
293
294         if ((cpu_feature & CPUID_SS) != 0)
295                 return (pmap_invalidate_cache_range_selfsnoop);
296         if ((cpu_feature & CPUID_CLFSH) != 0)
297                 return (pmap_force_invalidate_cache_range);
298         return (pmap_invalidate_cache_range_all);
299 }
300
301 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
302
303 static void
304 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
305 {
306
307         KASSERT((sva & PAGE_MASK) == 0,
308             ("pmap_invalidate_cache_range: sva not page-aligned"));
309         KASSERT((eva & PAGE_MASK) == 0,
310             ("pmap_invalidate_cache_range: eva not page-aligned"));
311 }
312
313 static void
314 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
315 {
316
317         pmap_invalidate_cache_range_check_align(sva, eva);
318 }
319
320 void
321 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
322 {
323
324         sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
325         if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
326                 /*
327                  * The supplied range is bigger than 2MB.
328                  * Globally invalidate cache.
329                  */
330                 pmap_invalidate_cache();
331                 return;
332         }
333
334 #ifdef DEV_APIC
335         /*
336          * XXX: Some CPUs fault, hang, or trash the local APIC
337          * registers if we use CLFLUSH on the local APIC
338          * range.  The local APIC is always uncached, so we
339          * don't need to flush for that range anyway.
340          */
341         if (pmap_kextract(sva) == lapic_paddr)
342                 return;
343 #endif
344
345         if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
346                 /*
347                  * Do per-cache line flush.  Use the sfence
348                  * instruction to insure that previous stores are
349                  * included in the write-back.  The processor
350                  * propagates flush to other processors in the cache
351                  * coherence domain.
352                  */
353                 sfence();
354                 for (; sva < eva; sva += cpu_clflush_line_size)
355                         clflushopt(sva);
356                 sfence();
357         } else {
358                 /*
359                  * Writes are ordered by CLFLUSH on Intel CPUs.
360                  */
361                 if (cpu_vendor_id != CPU_VENDOR_INTEL)
362                         mfence();
363                 for (; sva < eva; sva += cpu_clflush_line_size)
364                         clflush(sva);
365                 if (cpu_vendor_id != CPU_VENDOR_INTEL)
366                         mfence();
367         }
368 }
369
370 static void
371 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
372 {
373
374         pmap_invalidate_cache_range_check_align(sva, eva);
375         pmap_invalidate_cache();
376 }
377
378 void
379 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
380 {
381         int i;
382
383         if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
384             (cpu_feature & CPUID_CLFSH) == 0) {
385                 pmap_invalidate_cache();
386         } else {
387                 for (i = 0; i < count; i++)
388                         pmap_flush_page(pages[i]);
389         }
390 }
391
392 void
393 pmap_ksetrw(vm_offset_t va)
394 {
395
396         pmap_methods_ptr->pm_ksetrw(va);
397 }
398
399 void
400 pmap_remap_lower(bool enable)
401 {
402
403         pmap_methods_ptr->pm_remap_lower(enable);
404 }
405
406 void
407 pmap_remap_lowptdi(bool enable)
408 {
409
410         pmap_methods_ptr->pm_remap_lowptdi(enable);
411 }
412
413 void
414 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
415     vm_offset_t *addr, vm_size_t size)
416 {
417
418         return (pmap_methods_ptr->pm_align_superpage(object, offset,
419             addr, size));
420 }
421
422 vm_offset_t
423 pmap_quick_enter_page(vm_page_t m)
424 {
425
426         return (pmap_methods_ptr->pm_quick_enter_page(m));
427 }
428
429 void
430 pmap_quick_remove_page(vm_offset_t addr)
431 {
432
433         return (pmap_methods_ptr->pm_quick_remove_page(addr));
434 }
435
436 void *
437 pmap_trm_alloc(size_t size, int flags)
438 {
439
440         return (pmap_methods_ptr->pm_trm_alloc(size, flags));
441 }
442
443 void
444 pmap_trm_free(void *addr, size_t size)
445 {
446
447         pmap_methods_ptr->pm_trm_free(addr, size);
448 }
449
450 void
451 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
452 {
453 }
454
455 vm_offset_t
456 pmap_get_map_low(void)
457 {
458
459         return (pmap_methods_ptr->pm_get_map_low());
460 }
461
462 vm_offset_t
463 pmap_get_vm_maxuser_address(void)
464 {
465
466         return (pmap_methods_ptr->pm_get_vm_maxuser_address());
467 }
468
469 vm_paddr_t
470 pmap_kextract(vm_offset_t va)
471 {
472
473         return (pmap_methods_ptr->pm_kextract(va));
474 }
475
476 vm_paddr_t
477 pmap_pg_frame(vm_paddr_t pa)
478 {
479
480         return (pmap_methods_ptr->pm_pg_frame(pa));
481 }
482
483 void
484 pmap_sf_buf_map(struct sf_buf *sf)
485 {
486
487         pmap_methods_ptr->pm_sf_buf_map(sf);
488 }
489
490 void
491 pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma)
492 {
493
494         pmap_methods_ptr->pm_cp_slow0_map(kaddr, plen, ma);
495 }
496
497 u_int
498 pmap_get_kcr3(void)
499 {
500
501         return (pmap_methods_ptr->pm_get_kcr3());
502 }
503
504 u_int
505 pmap_get_cr3(pmap_t pmap)
506 {
507
508         return (pmap_methods_ptr->pm_get_cr3(pmap));
509 }
510
511 caddr_t
512 pmap_cmap3(vm_paddr_t pa, u_int pte_flags)
513 {
514
515         return (pmap_methods_ptr->pm_cmap3(pa, pte_flags));
516 }
517
518 void
519 pmap_basemem_setup(u_int basemem)
520 {
521
522         pmap_methods_ptr->pm_basemem_setup(basemem);
523 }
524
525 void
526 pmap_set_nx(void)
527 {
528
529         pmap_methods_ptr->pm_set_nx();
530 }
531
532 void *
533 pmap_bios16_enter(void)
534 {
535
536         return (pmap_methods_ptr->pm_bios16_enter());
537 }
538
539 void
540 pmap_bios16_leave(void *handle)
541 {
542
543         pmap_methods_ptr->pm_bios16_leave(handle);
544 }
545
546 void
547 pmap_bootstrap(vm_paddr_t firstaddr)
548 {
549
550         pmap_methods_ptr->pm_bootstrap(firstaddr);
551 }
552
553 boolean_t
554 pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
555 {
556
557         return (pmap_methods_ptr->pm_is_valid_memattr(pmap, mode));
558 }
559
560 int
561 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
562 {
563
564         return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
565 }
566
567 bool
568 pmap_ps_enabled(pmap_t pmap)
569 {
570
571         return (pmap_methods_ptr->pm_ps_enabled(pmap));
572 }
573
574 void
575 pmap_pinit0(pmap_t pmap)
576 {
577
578         pmap_methods_ptr->pm_pinit0(pmap);
579 }
580
581 int
582 pmap_pinit(pmap_t pmap)
583 {
584
585         return (pmap_methods_ptr->pm_pinit(pmap));
586 }
587
588 void
589 pmap_activate(struct thread *td)
590 {
591
592         pmap_methods_ptr->pm_activate(td);
593 }
594
595 void
596 pmap_activate_boot(pmap_t pmap)
597 {
598
599         pmap_methods_ptr->pm_activate_boot(pmap);
600 }
601
602 void
603 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
604 {
605
606         pmap_methods_ptr->pm_advise(pmap, sva, eva, advice);
607 }
608
609 void
610 pmap_clear_modify(vm_page_t m)
611 {
612
613         pmap_methods_ptr->pm_clear_modify(m);
614 }
615
616 int
617 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
618 {
619
620         return (pmap_methods_ptr->pm_change_attr(va, size, mode));
621 }
622
623 int
624 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
625 {
626
627         return (pmap_methods_ptr->pm_mincore(pmap, addr, locked_pa));
628 }
629
630 void
631 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
632     vm_offset_t src_addr)
633 {
634
635         pmap_methods_ptr->pm_copy(dst_pmap, src_pmap, dst_addr, len, src_addr);
636 }
637
638 void
639 pmap_copy_page(vm_page_t src, vm_page_t dst)
640 {
641
642         pmap_methods_ptr->pm_copy_page(src, dst);
643 }
644
645 void
646 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
647     vm_offset_t b_offset, int xfersize)
648 {
649
650         pmap_methods_ptr->pm_copy_pages(ma, a_offset, mb, b_offset, xfersize);
651 }
652
653 void
654 pmap_zero_page(vm_page_t m)
655 {
656
657         pmap_methods_ptr->pm_zero_page(m);
658 }
659
660 void
661 pmap_zero_page_area(vm_page_t m, int off, int size)
662 {
663
664         pmap_methods_ptr->pm_zero_page_area(m, off, size);
665 }
666
667 int
668 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
669     u_int flags, int8_t psind)
670 {
671
672         return (pmap_methods_ptr->pm_enter(pmap, va, m, prot, flags, psind));
673 }
674
675 void
676 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
677     vm_page_t m_start, vm_prot_t prot)
678 {
679
680         pmap_methods_ptr->pm_enter_object(pmap, start, end, m_start, prot);
681 }
682
683 void
684 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
685 {
686
687         pmap_methods_ptr->pm_enter_quick(pmap, va, m, prot);
688 }
689
690 void *
691 pmap_kenter_temporary(vm_paddr_t pa, int i)
692 {
693
694         return (pmap_methods_ptr->pm_kenter_temporary(pa, i));
695 }
696
697 void
698 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
699     vm_pindex_t pindex, vm_size_t size)
700 {
701
702         pmap_methods_ptr->pm_object_init_pt(pmap, addr, object, pindex, size);
703 }
704
705 void
706 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
707 {
708
709         pmap_methods_ptr->pm_unwire(pmap, sva, eva);
710 }
711
712 boolean_t
713 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
714 {
715
716         return (pmap_methods_ptr->pm_page_exists_quick(pmap, m));
717 }
718
719 int
720 pmap_page_wired_mappings(vm_page_t m)
721 {
722
723         return (pmap_methods_ptr->pm_page_wired_mappings(m));
724 }
725
726 boolean_t
727 pmap_page_is_mapped(vm_page_t m)
728 {
729
730         return (pmap_methods_ptr->pm_page_is_mapped(m));
731 }
732
733 void
734 pmap_remove_pages(pmap_t pmap)
735 {
736
737         pmap_methods_ptr->pm_remove_pages(pmap);
738 }
739
740 boolean_t
741 pmap_is_modified(vm_page_t m)
742 {
743
744         return (pmap_methods_ptr->pm_is_modified(m));
745 }
746
747 boolean_t
748 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
749 {
750
751         return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
752 }
753
754 boolean_t
755 pmap_is_referenced(vm_page_t m)
756 {
757
758         return (pmap_methods_ptr->pm_is_referenced(m));
759 }
760
761 void
762 pmap_remove_write(vm_page_t m)
763 {
764
765         pmap_methods_ptr->pm_remove_write(m);
766 }
767
768 int
769 pmap_ts_referenced(vm_page_t m)
770 {
771
772         return (pmap_methods_ptr->pm_ts_referenced(m));
773 }
774
775 void *
776 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
777 {
778
779         return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode));
780 }
781
782 void *
783 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
784 {
785
786         return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE));
787 }
788
789 void *
790 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
791 {
792
793         return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK));
794 }
795
796 void
797 pmap_unmapdev(vm_offset_t va, vm_size_t size)
798 {
799
800         pmap_methods_ptr->pm_unmapdev(va, size);
801 }
802
803 void
804 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
805 {
806
807         pmap_methods_ptr->pm_page_set_memattr(m, ma);
808 }
809
810 vm_paddr_t
811 pmap_extract(pmap_t pmap, vm_offset_t va)
812 {
813
814         return (pmap_methods_ptr->pm_extract(pmap, va));
815 }
816
817 vm_page_t
818 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
819 {
820
821         return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot));
822 }
823
824 vm_offset_t
825 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
826 {
827
828         return (pmap_methods_ptr->pm_map(virt, start, end, prot));
829 }
830
831 void
832 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
833 {
834
835         pmap_methods_ptr->pm_qenter(sva, ma, count);
836 }
837
838 void
839 pmap_qremove(vm_offset_t sva, int count)
840 {
841
842         pmap_methods_ptr->pm_qremove(sva, count);
843 }
844
845 void
846 pmap_release(pmap_t pmap)
847 {
848
849         pmap_methods_ptr->pm_release(pmap);
850 }
851
852 void
853 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
854 {
855
856         pmap_methods_ptr->pm_remove(pmap, sva, eva);
857 }
858
859 void
860 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
861 {
862
863         pmap_methods_ptr->pm_protect(pmap, sva, eva, prot);
864 }
865
866 void
867 pmap_remove_all(vm_page_t m)
868 {
869
870         pmap_methods_ptr->pm_remove_all(m);
871 }
872
873 void
874 pmap_init(void)
875 {
876
877         pmap_methods_ptr->pm_init();
878 }
879
880 void
881 pmap_init_pat(void)
882 {
883
884         pmap_methods_ptr->pm_init_pat();
885 }
886
887 void
888 pmap_growkernel(vm_offset_t addr)
889 {
890
891         pmap_methods_ptr->pm_growkernel(addr);
892 }
893
894 void
895 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
896 {
897
898         pmap_methods_ptr->pm_invalidate_page(pmap, va);
899 }
900
901 void
902 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
903 {
904
905         pmap_methods_ptr->pm_invalidate_range(pmap, sva, eva);
906 }
907
908 void
909 pmap_invalidate_all(pmap_t pmap)
910 {
911
912         pmap_methods_ptr->pm_invalidate_all(pmap);
913 }
914
915 void
916 pmap_invalidate_cache(void)
917 {
918
919         pmap_methods_ptr->pm_invalidate_cache();
920 }
921
922 void
923 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
924 {
925
926         pmap_methods_ptr->pm_kenter(va, pa);
927 }
928
929 void
930 pmap_kremove(vm_offset_t va)
931 {
932
933         pmap_methods_ptr->pm_kremove(va);
934 }
935
936 extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
937 int pae_mode;
938 SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RD,
939     &pae_mode, 1,
940     "PAE");
941
942 void
943 pmap_cold(void)
944 {
945
946         if ((cpu_feature & CPUID_PAE) != 0) {
947                 pae_mode = 1;
948                 pmap_methods_ptr = &pmap_pae_methods;
949                 pmap_pae_cold();
950         } else {
951                 pmap_methods_ptr = &pmap_nopae_methods;
952                 pmap_nopae_cold();
953         }
954 }