]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/pmap_base.c
Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / pmap_base.c
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * the Systems Programming Group of the University of Utah Computer
15  * Science Department and William Jolitz of UUNET Technologies Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. All advertising materials mentioning features or use of this software
26  *    must display the following acknowledgement:
27  *      This product includes software developed by the University of
28  *      California, Berkeley and its contributors.
29  * 4. Neither the name of the University nor the names of its contributors
30  *    may be used to endorse or promote products derived from this software
31  *    without specific prior written permission.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43  * SUCH DAMAGE.
44  *
45  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
46  */
47 /*-
48  * Copyright (c) 2003 Networks Associates Technology, Inc.
49  * All rights reserved.
50  * Copyright (c) 2018 The FreeBSD Foundation
51  * All rights reserved.
52  *
53  * This software was developed for the FreeBSD Project by Jake Burkholder,
54  * Safeport Network Services, and Network Associates Laboratories, the
55  * Security Research Division of Network Associates, Inc. under
56  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
57  * CHATS research program.
58  *
59  * Portions of this software were developed by
60  * Konstantin Belousov <kib@FreeBSD.org> under sponsorship from
61  * the FreeBSD Foundation.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  * 1. Redistributions of source code must retain the above copyright
67  *    notice, this list of conditions and the following disclaimer.
68  * 2. Redistributions in binary form must reproduce the above copyright
69  *    notice, this list of conditions and the following disclaimer in the
70  *    documentation and/or other materials provided with the distribution.
71  *
72  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
73  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
76  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82  * SUCH DAMAGE.
83  */
84
85 #include <sys/cdefs.h>
86 __FBSDID("$FreeBSD$");
87
88 #include "opt_apic.h"
89 #include "opt_cpu.h"
90 #include "opt_pmap.h"
91 #include "opt_smp.h"
92 #include "opt_vm.h"
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kernel.h>
97 #include <sys/vmmeter.h>
98 #include <sys/sysctl.h>
99 #include <machine/bootinfo.h>
100 #include <machine/cpu.h>
101 #include <machine/cputypes.h>
102 #include <machine/md_var.h>
103 #ifdef DEV_APIC
104 #include <sys/bus.h>
105 #include <machine/intr_machdep.h>
106 #include <x86/apicvar.h>
107 #endif
108 #include <x86/ifunc.h>
109
110 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
111     "VM/pmap parameters");
112
113 #include <machine/vmparam.h>
114 #include <vm/vm.h>
115 #include <vm/vm_page.h>
116 #include <vm/pmap.h>
117 #include <machine/pmap_base.h>
118
119 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
120 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
121
122 int unmapped_buf_allowed = 1;
123
124 int pti;
125
126 u_long physfree;        /* phys addr of next free page */
127 u_long vm86phystk;      /* PA of vm86/bios stack */
128 u_long vm86paddr;       /* address of vm86 region */
129 int vm86pa;             /* phys addr of vm86 region */
130 u_long KERNend;         /* phys addr end of kernel (just after bss) */
131 u_long KPTphys;         /* phys addr of kernel page tables */
132 caddr_t ptvmmap = 0;
133 vm_offset_t kernel_vm_end;
134
135 int i386_pmap_VM_NFREEORDER;
136 int i386_pmap_VM_LEVEL_0_ORDER;
137 int i386_pmap_PDRSHIFT;
138
139 int pat_works = 1;
140 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD,
141     &pat_works, 0,
142     "Is page attribute table fully functional?");
143
144 int pg_ps_enabled = 1;
145 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
146     &pg_ps_enabled, 0,
147     "Are large page mappings enabled?");
148
149 int pv_entry_max = 0;
150 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD,
151     &pv_entry_max, 0,
152     "Max number of PV entries");
153
154 int pv_entry_count = 0;
155 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD,
156     &pv_entry_count, 0,
157     "Current number of pv entries");
158
159 #ifndef PMAP_SHPGPERPROC
160 #define PMAP_SHPGPERPROC 200
161 #endif
162
163 int shpgperproc = PMAP_SHPGPERPROC;
164 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD,
165     &shpgperproc, 0,
166     "Page share factor per proc");
167
168 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
169     "2/4MB page mapping counters");
170
171 u_long pmap_pde_demotions;
172 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
173     &pmap_pde_demotions, 0,
174     "2/4MB page demotions");
175
176 u_long pmap_pde_mappings;
177 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
178     &pmap_pde_mappings, 0,
179     "2/4MB page mappings");
180
181 u_long pmap_pde_p_failures;
182 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
183     &pmap_pde_p_failures, 0,
184     "2/4MB page promotion failures");
185
186 u_long pmap_pde_promotions;
187 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
188     &pmap_pde_promotions, 0,
189     "2/4MB page promotions");
190
191 #ifdef SMP
192 int PMAP1changedcpu;
193 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
194     &PMAP1changedcpu, 0,
195     "Number of times pmap_pte_quick changed CPU with same PMAP1");
196 #endif
197
198 int PMAP1changed;
199 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
200     &PMAP1changed, 0,
201     "Number of times pmap_pte_quick changed PMAP1");
202 int PMAP1unchanged;
203 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
204     &PMAP1unchanged, 0,
205     "Number of times pmap_pte_quick didn't change PMAP1");
206
207 static int
208 kvm_size(SYSCTL_HANDLER_ARGS)
209 {
210         unsigned long ksize;
211
212         ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
213         return (sysctl_handle_long(oidp, &ksize, 0, req));
214 }
215 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
216     0, 0, kvm_size, "IU",
217     "Size of KVM");
218
219 static int
220 kvm_free(SYSCTL_HANDLER_ARGS)
221 {
222         unsigned long kfree;
223
224         kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
225         return (sysctl_handle_long(oidp, &kfree, 0, req));
226 }
227 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE,
228     0, 0, kvm_free, "IU",
229     "Amount of KVM free");
230
231 #ifdef PV_STATS
232 int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
233 long pv_entry_frees, pv_entry_allocs;
234 int pv_entry_spare;
235
236 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD,
237     &pc_chunk_count, 0,
238     "Current number of pv entry chunks");
239 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD,
240     &pc_chunk_allocs, 0,
241     "Current number of pv entry chunks allocated");
242 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD,
243     &pc_chunk_frees, 0,
244     "Current number of pv entry chunks frees");
245 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD,
246     &pc_chunk_tryfail, 0,
247     "Number of times tried to get a chunk page but failed.");
248 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD,
249     &pv_entry_frees, 0,
250     "Current number of pv entry frees");
251 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD,
252     &pv_entry_allocs, 0,
253     "Current number of pv entry allocs");
254 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD,
255     &pv_entry_spare, 0,
256     "Current number of spare pv entries");
257 #endif
258
259 struct pmap kernel_pmap_store;
260 static struct pmap_methods *pmap_methods_ptr;
261
262 static int
263 sysctl_kmaps(SYSCTL_HANDLER_ARGS)
264 {
265         return (pmap_methods_ptr->pm_sysctl_kmaps(oidp, arg1, arg2, req));
266 }
267 SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
268     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
269     NULL, 0, sysctl_kmaps, "A",
270     "Dump kernel address layout");
271
272
273 /*
274  * Initialize a vm_page's machine-dependent fields.
275  */
276 void
277 pmap_page_init(vm_page_t m)
278 {
279
280         TAILQ_INIT(&m->md.pv_list);
281         m->md.pat_mode = PAT_WRITE_BACK;
282 }
283
284 void
285 invltlb_glob(void)
286 {
287
288         invltlb();
289 }
290
291 static void pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva,
292     vm_offset_t eva);
293 static void pmap_invalidate_cache_range_all(vm_offset_t sva,
294     vm_offset_t eva);
295
296 void
297 pmap_flush_page(vm_page_t m)
298 {
299
300         pmap_methods_ptr->pm_flush_page(m);
301 }
302
303 DEFINE_IFUNC(, void, pmap_invalidate_cache_range, (vm_offset_t, vm_offset_t))
304 {
305
306         if ((cpu_feature & CPUID_SS) != 0)
307                 return (pmap_invalidate_cache_range_selfsnoop);
308         if ((cpu_feature & CPUID_CLFSH) != 0)
309                 return (pmap_force_invalidate_cache_range);
310         return (pmap_invalidate_cache_range_all);
311 }
312
313 #define PMAP_CLFLUSH_THRESHOLD  (2 * 1024 * 1024)
314
315 static void
316 pmap_invalidate_cache_range_check_align(vm_offset_t sva, vm_offset_t eva)
317 {
318
319         KASSERT((sva & PAGE_MASK) == 0,
320             ("pmap_invalidate_cache_range: sva not page-aligned"));
321         KASSERT((eva & PAGE_MASK) == 0,
322             ("pmap_invalidate_cache_range: eva not page-aligned"));
323 }
324
325 static void
326 pmap_invalidate_cache_range_selfsnoop(vm_offset_t sva, vm_offset_t eva)
327 {
328
329         pmap_invalidate_cache_range_check_align(sva, eva);
330 }
331
332 void
333 pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
334 {
335
336         sva &= ~(vm_offset_t)(cpu_clflush_line_size - 1);
337         if (eva - sva >= PMAP_CLFLUSH_THRESHOLD) {
338                 /*
339                  * The supplied range is bigger than 2MB.
340                  * Globally invalidate cache.
341                  */
342                 pmap_invalidate_cache();
343                 return;
344         }
345
346 #ifdef DEV_APIC
347         /*
348          * XXX: Some CPUs fault, hang, or trash the local APIC
349          * registers if we use CLFLUSH on the local APIC
350          * range.  The local APIC is always uncached, so we
351          * don't need to flush for that range anyway.
352          */
353         if (pmap_kextract(sva) == lapic_paddr)
354                 return;
355 #endif
356
357         if ((cpu_stdext_feature & CPUID_STDEXT_CLFLUSHOPT) != 0) {
358                 /*
359                  * Do per-cache line flush.  Use the sfence
360                  * instruction to insure that previous stores are
361                  * included in the write-back.  The processor
362                  * propagates flush to other processors in the cache
363                  * coherence domain.
364                  */
365                 sfence();
366                 for (; sva < eva; sva += cpu_clflush_line_size)
367                         clflushopt(sva);
368                 sfence();
369         } else {
370                 /*
371                  * Writes are ordered by CLFLUSH on Intel CPUs.
372                  */
373                 if (cpu_vendor_id != CPU_VENDOR_INTEL)
374                         mfence();
375                 for (; sva < eva; sva += cpu_clflush_line_size)
376                         clflush(sva);
377                 if (cpu_vendor_id != CPU_VENDOR_INTEL)
378                         mfence();
379         }
380 }
381
382 static void
383 pmap_invalidate_cache_range_all(vm_offset_t sva, vm_offset_t eva)
384 {
385
386         pmap_invalidate_cache_range_check_align(sva, eva);
387         pmap_invalidate_cache();
388 }
389
390 void
391 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
392 {
393         int i;
394
395         if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
396             (cpu_feature & CPUID_CLFSH) == 0) {
397                 pmap_invalidate_cache();
398         } else {
399                 for (i = 0; i < count; i++)
400                         pmap_flush_page(pages[i]);
401         }
402 }
403
404 void
405 pmap_ksetrw(vm_offset_t va)
406 {
407
408         pmap_methods_ptr->pm_ksetrw(va);
409 }
410
411 void
412 pmap_remap_lower(bool enable)
413 {
414
415         pmap_methods_ptr->pm_remap_lower(enable);
416 }
417
418 void
419 pmap_remap_lowptdi(bool enable)
420 {
421
422         pmap_methods_ptr->pm_remap_lowptdi(enable);
423 }
424
425 void
426 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
427     vm_offset_t *addr, vm_size_t size)
428 {
429
430         return (pmap_methods_ptr->pm_align_superpage(object, offset,
431             addr, size));
432 }
433
434 vm_offset_t
435 pmap_quick_enter_page(vm_page_t m)
436 {
437
438         return (pmap_methods_ptr->pm_quick_enter_page(m));
439 }
440
441 void
442 pmap_quick_remove_page(vm_offset_t addr)
443 {
444
445         return (pmap_methods_ptr->pm_quick_remove_page(addr));
446 }
447
448 void *
449 pmap_trm_alloc(size_t size, int flags)
450 {
451
452         return (pmap_methods_ptr->pm_trm_alloc(size, flags));
453 }
454
455 void
456 pmap_trm_free(void *addr, size_t size)
457 {
458
459         pmap_methods_ptr->pm_trm_free(addr, size);
460 }
461
462 void
463 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
464 {
465 }
466
467 vm_offset_t
468 pmap_get_map_low(void)
469 {
470
471         return (pmap_methods_ptr->pm_get_map_low());
472 }
473
474 vm_offset_t
475 pmap_get_vm_maxuser_address(void)
476 {
477
478         return (pmap_methods_ptr->pm_get_vm_maxuser_address());
479 }
480
481 vm_paddr_t
482 pmap_kextract(vm_offset_t va)
483 {
484
485         return (pmap_methods_ptr->pm_kextract(va));
486 }
487
488 vm_paddr_t
489 pmap_pg_frame(vm_paddr_t pa)
490 {
491
492         return (pmap_methods_ptr->pm_pg_frame(pa));
493 }
494
495 void
496 pmap_sf_buf_map(struct sf_buf *sf)
497 {
498
499         pmap_methods_ptr->pm_sf_buf_map(sf);
500 }
501
502 void
503 pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma)
504 {
505
506         pmap_methods_ptr->pm_cp_slow0_map(kaddr, plen, ma);
507 }
508
509 u_int
510 pmap_get_kcr3(void)
511 {
512
513         return (pmap_methods_ptr->pm_get_kcr3());
514 }
515
516 u_int
517 pmap_get_cr3(pmap_t pmap)
518 {
519
520         return (pmap_methods_ptr->pm_get_cr3(pmap));
521 }
522
523 caddr_t
524 pmap_cmap3(vm_paddr_t pa, u_int pte_flags)
525 {
526
527         return (pmap_methods_ptr->pm_cmap3(pa, pte_flags));
528 }
529
530 void
531 pmap_basemem_setup(u_int basemem)
532 {
533
534         pmap_methods_ptr->pm_basemem_setup(basemem);
535 }
536
537 void
538 pmap_set_nx(void)
539 {
540
541         pmap_methods_ptr->pm_set_nx();
542 }
543
544 void *
545 pmap_bios16_enter(void)
546 {
547
548         return (pmap_methods_ptr->pm_bios16_enter());
549 }
550
551 void
552 pmap_bios16_leave(void *handle)
553 {
554
555         pmap_methods_ptr->pm_bios16_leave(handle);
556 }
557
558 void
559 pmap_bootstrap(vm_paddr_t firstaddr)
560 {
561
562         pmap_methods_ptr->pm_bootstrap(firstaddr);
563 }
564
565 boolean_t
566 pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
567 {
568
569         return (pmap_methods_ptr->pm_is_valid_memattr(pmap, mode));
570 }
571
572 int
573 pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
574 {
575
576         return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
577 }
578
579 bool
580 pmap_ps_enabled(pmap_t pmap)
581 {
582
583         return (pmap_methods_ptr->pm_ps_enabled(pmap));
584 }
585
586 void
587 pmap_pinit0(pmap_t pmap)
588 {
589
590         pmap_methods_ptr->pm_pinit0(pmap);
591 }
592
593 int
594 pmap_pinit(pmap_t pmap)
595 {
596
597         return (pmap_methods_ptr->pm_pinit(pmap));
598 }
599
600 void
601 pmap_activate(struct thread *td)
602 {
603
604         pmap_methods_ptr->pm_activate(td);
605 }
606
607 void
608 pmap_activate_boot(pmap_t pmap)
609 {
610
611         pmap_methods_ptr->pm_activate_boot(pmap);
612 }
613
614 void
615 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
616 {
617
618         pmap_methods_ptr->pm_advise(pmap, sva, eva, advice);
619 }
620
621 void
622 pmap_clear_modify(vm_page_t m)
623 {
624
625         pmap_methods_ptr->pm_clear_modify(m);
626 }
627
628 int
629 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
630 {
631
632         return (pmap_methods_ptr->pm_change_attr(va, size, mode));
633 }
634
635 int
636 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
637 {
638
639         return (pmap_methods_ptr->pm_mincore(pmap, addr, pap));
640 }
641
642 void
643 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
644     vm_offset_t src_addr)
645 {
646
647         pmap_methods_ptr->pm_copy(dst_pmap, src_pmap, dst_addr, len, src_addr);
648 }
649
650 void
651 pmap_copy_page(vm_page_t src, vm_page_t dst)
652 {
653
654         pmap_methods_ptr->pm_copy_page(src, dst);
655 }
656
657 void
658 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
659     vm_offset_t b_offset, int xfersize)
660 {
661
662         pmap_methods_ptr->pm_copy_pages(ma, a_offset, mb, b_offset, xfersize);
663 }
664
665 void
666 pmap_zero_page(vm_page_t m)
667 {
668
669         pmap_methods_ptr->pm_zero_page(m);
670 }
671
672 void
673 pmap_zero_page_area(vm_page_t m, int off, int size)
674 {
675
676         pmap_methods_ptr->pm_zero_page_area(m, off, size);
677 }
678
679 int
680 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
681     u_int flags, int8_t psind)
682 {
683
684         return (pmap_methods_ptr->pm_enter(pmap, va, m, prot, flags, psind));
685 }
686
687 void
688 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
689     vm_page_t m_start, vm_prot_t prot)
690 {
691
692         pmap_methods_ptr->pm_enter_object(pmap, start, end, m_start, prot);
693 }
694
695 void
696 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
697 {
698
699         pmap_methods_ptr->pm_enter_quick(pmap, va, m, prot);
700 }
701
702 void *
703 pmap_kenter_temporary(vm_paddr_t pa, int i)
704 {
705
706         return (pmap_methods_ptr->pm_kenter_temporary(pa, i));
707 }
708
709 void
710 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
711     vm_pindex_t pindex, vm_size_t size)
712 {
713
714         pmap_methods_ptr->pm_object_init_pt(pmap, addr, object, pindex, size);
715 }
716
717 void
718 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
719 {
720
721         pmap_methods_ptr->pm_unwire(pmap, sva, eva);
722 }
723
724 boolean_t
725 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
726 {
727
728         return (pmap_methods_ptr->pm_page_exists_quick(pmap, m));
729 }
730
731 int
732 pmap_page_wired_mappings(vm_page_t m)
733 {
734
735         return (pmap_methods_ptr->pm_page_wired_mappings(m));
736 }
737
738 boolean_t
739 pmap_page_is_mapped(vm_page_t m)
740 {
741
742         return (pmap_methods_ptr->pm_page_is_mapped(m));
743 }
744
745 void
746 pmap_remove_pages(pmap_t pmap)
747 {
748
749         pmap_methods_ptr->pm_remove_pages(pmap);
750 }
751
752 boolean_t
753 pmap_is_modified(vm_page_t m)
754 {
755
756         return (pmap_methods_ptr->pm_is_modified(m));
757 }
758
759 boolean_t
760 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
761 {
762
763         return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
764 }
765
766 boolean_t
767 pmap_is_referenced(vm_page_t m)
768 {
769
770         return (pmap_methods_ptr->pm_is_referenced(m));
771 }
772
773 void
774 pmap_remove_write(vm_page_t m)
775 {
776
777         pmap_methods_ptr->pm_remove_write(m);
778 }
779
780 int
781 pmap_ts_referenced(vm_page_t m)
782 {
783
784         return (pmap_methods_ptr->pm_ts_referenced(m));
785 }
786
787 void *
788 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
789 {
790
791         return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode,
792             MAPDEV_SETATTR));
793 }
794
795 void *
796 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
797 {
798
799         return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE,
800             MAPDEV_SETATTR));
801 }
802
803 void *
804 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
805 {
806
807         return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK, 0));
808 }
809
810 void
811 pmap_unmapdev(vm_offset_t va, vm_size_t size)
812 {
813
814         pmap_methods_ptr->pm_unmapdev(va, size);
815 }
816
817 void
818 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
819 {
820
821         pmap_methods_ptr->pm_page_set_memattr(m, ma);
822 }
823
824 vm_paddr_t
825 pmap_extract(pmap_t pmap, vm_offset_t va)
826 {
827
828         return (pmap_methods_ptr->pm_extract(pmap, va));
829 }
830
831 vm_page_t
832 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
833 {
834
835         return (pmap_methods_ptr->pm_extract_and_hold(pmap, va, prot));
836 }
837
838 vm_offset_t
839 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
840 {
841
842         return (pmap_methods_ptr->pm_map(virt, start, end, prot));
843 }
844
845 void
846 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
847 {
848
849         pmap_methods_ptr->pm_qenter(sva, ma, count);
850 }
851
852 void
853 pmap_qremove(vm_offset_t sva, int count)
854 {
855
856         pmap_methods_ptr->pm_qremove(sva, count);
857 }
858
859 void
860 pmap_release(pmap_t pmap)
861 {
862
863         pmap_methods_ptr->pm_release(pmap);
864 }
865
866 void
867 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
868 {
869
870         pmap_methods_ptr->pm_remove(pmap, sva, eva);
871 }
872
873 void
874 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
875 {
876
877         pmap_methods_ptr->pm_protect(pmap, sva, eva, prot);
878 }
879
880 void
881 pmap_remove_all(vm_page_t m)
882 {
883
884         pmap_methods_ptr->pm_remove_all(m);
885 }
886
887 void
888 pmap_init(void)
889 {
890
891         pmap_methods_ptr->pm_init();
892 }
893
894 void
895 pmap_init_pat(void)
896 {
897
898         pmap_methods_ptr->pm_init_pat();
899 }
900
901 void
902 pmap_growkernel(vm_offset_t addr)
903 {
904
905         pmap_methods_ptr->pm_growkernel(addr);
906 }
907
908 void
909 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
910 {
911
912         pmap_methods_ptr->pm_invalidate_page(pmap, va);
913 }
914
915 void
916 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
917 {
918
919         pmap_methods_ptr->pm_invalidate_range(pmap, sva, eva);
920 }
921
922 void
923 pmap_invalidate_all(pmap_t pmap)
924 {
925
926         pmap_methods_ptr->pm_invalidate_all(pmap);
927 }
928
929 void
930 pmap_invalidate_cache(void)
931 {
932
933         pmap_methods_ptr->pm_invalidate_cache();
934 }
935
936 void
937 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
938 {
939
940         pmap_methods_ptr->pm_kenter(va, pa);
941 }
942
943 void
944 pmap_kremove(vm_offset_t va)
945 {
946
947         pmap_methods_ptr->pm_kremove(va);
948 }
949
950 extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
951 int pae_mode;
952 SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
953     &pae_mode, 0,
954     "PAE");
955
956 void
957 pmap_cold(void)
958 {
959
960         init_static_kenv((char *)bootinfo.bi_envp, 0);
961         pae_mode = (cpu_feature & CPUID_PAE) != 0;
962         if (pae_mode)
963                 TUNABLE_INT_FETCH("vm.pmap.pae_mode", &pae_mode);
964         if (pae_mode) {
965                 pmap_methods_ptr = &pmap_pae_methods;
966                 pmap_pae_cold();
967         } else {
968                 pmap_methods_ptr = &pmap_nopae_methods;
969                 pmap_nopae_cold();
970         }
971 }