2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2005 Peter Grehan
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * Dispatch MI pmap calls to the appropriate MMU implementation
35 * through a previously registered kernel object.
37 * Before pmap_bootstrap() can be called, a CPU module must have
38 * called pmap_mmu_install(). This may be called multiple times:
39 * the highest priority call will be installed as the default
40 * MMU handler when pmap_bootstrap() is called.
42 * It is required that mutex_init() be called before pmap_bootstrap(),
43 * as the PMAP layer makes extensive use of mutexes.
46 #include <sys/param.h>
47 #include <sys/kernel.h>
50 #include <sys/kerneldump.h>
52 #include <sys/mutex.h>
53 #include <sys/systm.h>
56 #include <vm/vm_page.h>
58 #include <machine/dump.h>
59 #include <machine/md_var.h>
60 #include <machine/mmuvar.h>
61 #include <machine/smp.h>
65 static mmu_def_t *mmu_def_impl;
67 static struct mmu_kobj mmu_kernel_obj;
68 static struct kobj_ops mmu_kernel_kops;
73 struct pmap kernel_pmap_store;
75 vm_offset_t msgbuf_phys;
77 vm_offset_t kernel_vm_end;
78 vm_paddr_t phys_avail[PHYS_AVAIL_SZ];
79 vm_offset_t virtual_avail;
80 vm_offset_t virtual_end;
82 int pmap_bootstrapped;
86 pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
88 if (PVO_VADDR(a) < PVO_VADDR(b))
90 else if (PVO_VADDR(a) > PVO_VADDR(b))
94 RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
99 pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
102 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, start, end,
104 MMU_ADVISE(mmu_obj, pmap, start, end, advice);
108 pmap_clear_modify(vm_page_t m)
111 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
112 MMU_CLEAR_MODIFY(mmu_obj, m);
116 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
117 vm_size_t len, vm_offset_t src_addr)
120 CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
121 src_pmap, dst_addr, len, src_addr);
122 MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
126 pmap_copy_page(vm_page_t src, vm_page_t dst)
129 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
130 MMU_COPY_PAGE(mmu_obj, src, dst);
134 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
135 vm_offset_t b_offset, int xfersize)
138 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
139 a_offset, mb, b_offset, xfersize);
140 MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
144 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
145 u_int flags, int8_t psind)
148 CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
149 p, prot, flags, psind);
150 return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
154 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
155 vm_page_t m_start, vm_prot_t prot)
158 CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
160 MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
164 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
167 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
168 MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
172 pmap_extract(pmap_t pmap, vm_offset_t va)
175 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
176 return (MMU_EXTRACT(mmu_obj, pmap, va));
180 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
183 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
184 return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
188 pmap_growkernel(vm_offset_t va)
191 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
192 MMU_GROWKERNEL(mmu_obj, va);
199 CTR1(KTR_PMAP, "%s()", __func__);
204 pmap_is_modified(vm_page_t m)
207 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
208 return (MMU_IS_MODIFIED(mmu_obj, m));
212 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
215 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
216 return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
220 pmap_is_referenced(vm_page_t m)
223 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
224 return (MMU_IS_REFERENCED(mmu_obj, m));
228 pmap_ts_referenced(vm_page_t m)
231 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
232 return (MMU_TS_REFERENCED(mmu_obj, m));
236 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
239 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
241 return (MMU_MAP(mmu_obj, virt, start, end, prot));
245 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
246 vm_pindex_t pindex, vm_size_t size)
249 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
250 object, pindex, size);
251 MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
255 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
258 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
259 return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
263 pmap_page_init(vm_page_t m)
266 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
267 MMU_PAGE_INIT(mmu_obj, m);
271 pmap_page_wired_mappings(vm_page_t m)
274 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
275 return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
279 pmap_pinit(pmap_t pmap)
282 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
283 MMU_PINIT(mmu_obj, pmap);
288 pmap_pinit0(pmap_t pmap)
291 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
292 MMU_PINIT0(mmu_obj, pmap);
296 pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
299 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
301 MMU_PROTECT(mmu_obj, pmap, start, end, prot);
305 pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
308 CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
309 MMU_QENTER(mmu_obj, start, m, count);
313 pmap_qremove(vm_offset_t start, int count)
316 CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
317 MMU_QREMOVE(mmu_obj, start, count);
321 pmap_release(pmap_t pmap)
324 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
325 MMU_RELEASE(mmu_obj, pmap);
329 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
332 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
333 MMU_REMOVE(mmu_obj, pmap, start, end);
337 pmap_remove_all(vm_page_t m)
340 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
341 MMU_REMOVE_ALL(mmu_obj, m);
345 pmap_remove_pages(pmap_t pmap)
348 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
349 MMU_REMOVE_PAGES(mmu_obj, pmap);
353 pmap_remove_write(vm_page_t m)
356 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
357 MMU_REMOVE_WRITE(mmu_obj, m);
361 pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
364 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
365 MMU_UNWIRE(mmu_obj, pmap, start, end);
369 pmap_zero_page(vm_page_t m)
372 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
373 MMU_ZERO_PAGE(mmu_obj, m);
377 pmap_zero_page_area(vm_page_t m, int off, int size)
380 CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
381 MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
385 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
388 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
389 return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
393 pmap_activate(struct thread *td)
396 CTR2(KTR_PMAP, "%s(%p)", __func__, td);
397 MMU_ACTIVATE(mmu_obj, td);
401 pmap_deactivate(struct thread *td)
404 CTR2(KTR_PMAP, "%s(%p)", __func__, td);
405 MMU_DEACTIVATE(mmu_obj, td);
409 * Increase the starting virtual address of the given mapping if a
410 * different alignment might result in more superpage mappings.
413 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
414 vm_offset_t *addr, vm_size_t size)
417 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
419 MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
423 * Routines used in machine-dependent code
426 pmap_bootstrap(vm_offset_t start, vm_offset_t end)
428 mmu_obj = &mmu_kernel_obj;
431 * Take care of compiling the selected class, and
432 * then statically initialise the MMU object
434 kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
435 kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
437 MMU_BOOTSTRAP(mmu_obj, start, end);
441 pmap_cpu_bootstrap(int ap)
444 * No KTR here because our console probably doesn't work yet
447 return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
451 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
454 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
455 return (MMU_MAPDEV(mmu_obj, pa, size));
459 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
462 CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
463 return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
467 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
470 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
471 return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
475 pmap_unmapdev(vm_offset_t va, vm_size_t size)
478 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
479 MMU_UNMAPDEV(mmu_obj, va, size);
483 pmap_kextract(vm_offset_t va)
486 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
487 return (MMU_KEXTRACT(mmu_obj, va));
491 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
494 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
495 MMU_KENTER(mmu_obj, va, pa);
499 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
502 CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
503 MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
507 pmap_kremove(vm_offset_t va)
510 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
511 return (MMU_KREMOVE(mmu_obj, va));
515 pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr,
516 size_t ulen, size_t *klen)
519 CTR2(KTR_PMAP, "%s(%p)", __func__, uaddr);
520 return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen));
524 pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded)
527 CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
528 return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded));
532 pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
535 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
536 return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
540 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
543 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
544 return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
548 dumpsys_map_chunk(vm_paddr_t pa, size_t sz, void **va)
551 CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
552 return (MMU_DUMPSYS_MAP(mmu_obj, pa, sz, va));
556 dumpsys_unmap_chunk(vm_paddr_t pa, size_t sz, void *va)
559 CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
560 return (MMU_DUMPSYS_UNMAP(mmu_obj, pa, sz, va));
564 dumpsys_pa_init(void)
567 CTR1(KTR_PMAP, "%s()", __func__);
568 return (MMU_SCAN_INIT(mmu_obj));
572 pmap_quick_enter_page(vm_page_t m)
574 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
575 return (MMU_QUICK_ENTER_PAGE(mmu_obj, m));
579 pmap_quick_remove_page(vm_offset_t addr)
581 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
582 MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
586 pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
588 CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
589 return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
593 * MMU install routines. Highest priority wins, equal priority also
594 * overrides allowing last-set to win.
596 SET_DECLARE(mmu_set, mmu_def_t);
599 pmap_mmu_install(char *name, int prio)
601 mmu_def_t **mmupp, *mmup;
602 static int curr_prio = 0;
605 * Try and locate the MMU kobj corresponding to the name
607 SET_FOREACH(mmupp, mmu_set) {
611 !strcmp(mmup->name, name) &&
612 (prio >= curr_prio || mmu_def_impl == NULL)) {
622 int unmapped_buf_allowed;
625 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
629 case VM_MEMATTR_DEFAULT:
630 case VM_MEMATTR_UNCACHEABLE:
631 case VM_MEMATTR_CACHEABLE:
632 case VM_MEMATTR_WRITE_COMBINING:
633 case VM_MEMATTR_WRITE_BACK:
634 case VM_MEMATTR_WRITE_THROUGH:
635 case VM_MEMATTR_PREFETCHABLE: