2 * Copyright (c) 2005 Peter Grehan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 * Dispatch MI pmap calls to the appropriate MMU implementation
33 * through a previously registered kernel object.
35 * Before pmap_bootstrap() can be called, a CPU module must have
36 * called pmap_mmu_install(). This may be called multiple times:
37 * the highest priority call will be installed as the default
38 * MMU handler when pmap_bootstrap() is called.
40 * It is required that mutex_init() be called before pmap_bootstrap(),
41 * as the PMAP layer makes extensive use of mutexes.
44 #include <sys/param.h>
45 #include <sys/kernel.h>
48 #include <sys/mutex.h>
49 #include <sys/systm.h>
52 #include <vm/vm_page.h>
54 #include <machine/mmuvar.h>
55 #include <machine/smp.h>
59 static mmu_def_t *mmu_def_impl;
61 static struct mmu_kobj mmu_kernel_obj;
62 static struct kobj_ops mmu_kernel_kops;
67 struct pmap kernel_pmap_store;
69 struct msgbuf *msgbufp;
70 vm_offset_t msgbuf_phys;
72 vm_offset_t kernel_vm_end;
73 vm_offset_t phys_avail[PHYS_AVAIL_SZ];
74 vm_offset_t virtual_avail;
75 vm_offset_t virtual_end;
77 int pmap_bootstrapped;
80 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
83 CTR4(KTR_PMAP, "%s(%p, %#x, %u)", __func__, pmap, va, wired);
84 MMU_CHANGE_WIRING(mmu_obj, pmap, va, wired);
88 pmap_clear_modify(vm_page_t m)
91 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
92 MMU_CLEAR_MODIFY(mmu_obj, m);
96 pmap_clear_reference(vm_page_t m)
99 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
100 MMU_CLEAR_REFERENCE(mmu_obj, m);
104 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
105 vm_size_t len, vm_offset_t src_addr)
108 CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
109 src_pmap, dst_addr, len, src_addr);
110 MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
114 pmap_copy_page(vm_page_t src, vm_page_t dst)
117 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
118 MMU_COPY_PAGE(mmu_obj, src, dst);
122 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
123 vm_offset_t b_offset, int xfersize)
126 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
127 a_offset, mb, b_offset, xfersize);
128 MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
132 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
133 vm_prot_t prot, boolean_t wired)
136 CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %#x, %p, %#x, %u)", pmap, va,
137 access, p, prot, wired);
138 MMU_ENTER(mmu_obj, pmap, va, p, prot, wired);
142 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
143 vm_page_t m_start, vm_prot_t prot)
146 CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
148 MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
152 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
155 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
156 MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
160 pmap_extract(pmap_t pmap, vm_offset_t va)
163 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
164 return (MMU_EXTRACT(mmu_obj, pmap, va));
168 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
171 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
172 return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
176 pmap_growkernel(vm_offset_t va)
179 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
180 MMU_GROWKERNEL(mmu_obj, va);
187 CTR1(KTR_PMAP, "%s()", __func__);
192 pmap_is_modified(vm_page_t m)
195 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
196 return (MMU_IS_MODIFIED(mmu_obj, m));
200 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
203 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
204 return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
208 pmap_is_referenced(vm_page_t m)
211 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
212 return (MMU_IS_REFERENCED(mmu_obj, m));
216 pmap_ts_referenced(vm_page_t m)
219 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
220 return (MMU_TS_REFERENCED(mmu_obj, m));
224 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
227 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
229 return (MMU_MAP(mmu_obj, virt, start, end, prot));
233 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
234 vm_pindex_t pindex, vm_size_t size)
237 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
238 object, pindex, size);
239 MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
243 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
246 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
247 return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
251 pmap_page_init(vm_page_t m)
254 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
255 MMU_PAGE_INIT(mmu_obj, m);
259 pmap_page_wired_mappings(vm_page_t m)
262 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
263 return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
267 pmap_pinit(pmap_t pmap)
270 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
271 MMU_PINIT(mmu_obj, pmap);
276 pmap_pinit0(pmap_t pmap)
279 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
280 MMU_PINIT0(mmu_obj, pmap);
284 pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
287 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
289 MMU_PROTECT(mmu_obj, pmap, start, end, prot);
293 pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
296 CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
297 MMU_QENTER(mmu_obj, start, m, count);
301 pmap_qremove(vm_offset_t start, int count)
304 CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
305 MMU_QREMOVE(mmu_obj, start, count);
309 pmap_release(pmap_t pmap)
312 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
313 MMU_RELEASE(mmu_obj, pmap);
317 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
320 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
321 MMU_REMOVE(mmu_obj, pmap, start, end);
325 pmap_remove_all(vm_page_t m)
328 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
329 MMU_REMOVE_ALL(mmu_obj, m);
333 pmap_remove_pages(pmap_t pmap)
336 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
337 MMU_REMOVE_PAGES(mmu_obj, pmap);
341 pmap_remove_write(vm_page_t m)
344 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
345 MMU_REMOVE_WRITE(mmu_obj, m);
349 pmap_zero_page(vm_page_t m)
352 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
353 MMU_ZERO_PAGE(mmu_obj, m);
357 pmap_zero_page_area(vm_page_t m, int off, int size)
360 CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
361 MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
365 pmap_zero_page_idle(vm_page_t m)
368 CTR2(KTR_PMAP, "%s(%p)", __func__, m);
369 MMU_ZERO_PAGE_IDLE(mmu_obj, m);
373 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
376 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
377 return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
381 pmap_activate(struct thread *td)
384 CTR2(KTR_PMAP, "%s(%p)", __func__, td);
385 MMU_ACTIVATE(mmu_obj, td);
389 pmap_deactivate(struct thread *td)
392 CTR2(KTR_PMAP, "%s(%p)", __func__, td);
393 MMU_DEACTIVATE(mmu_obj, td);
397 * Increase the starting virtual address of the given mapping if a
398 * different alignment might result in more superpage mappings.
401 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
402 vm_offset_t *addr, vm_size_t size)
405 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
407 MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
411 * Routines used in machine-dependent code
414 pmap_bootstrap(vm_offset_t start, vm_offset_t end)
416 mmu_obj = &mmu_kernel_obj;
419 * Take care of compiling the selected class, and
420 * then statically initialise the MMU object
422 kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
423 kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
425 MMU_BOOTSTRAP(mmu_obj, start, end);
429 pmap_cpu_bootstrap(int ap)
432 * No KTR here because our console probably doesn't work yet
435 return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
439 pmap_mapdev(vm_offset_t pa, vm_size_t size)
442 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
443 return (MMU_MAPDEV(mmu_obj, pa, size));
447 pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t attr)
450 CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
451 return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
455 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
458 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
459 return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
463 pmap_unmapdev(vm_offset_t va, vm_size_t size)
466 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
467 MMU_UNMAPDEV(mmu_obj, va, size);
471 pmap_kextract(vm_offset_t va)
474 CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
475 return (MMU_KEXTRACT(mmu_obj, va));
479 pmap_kenter(vm_offset_t va, vm_offset_t pa)
482 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
483 MMU_KENTER(mmu_obj, va, pa);
487 pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
490 CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
491 MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
495 pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
498 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
499 return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
503 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
506 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
507 return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
511 pmap_dumpsys_map(struct pmap_md *md, vm_size_t ofs, vm_size_t *sz)
514 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, *sz);
515 return (MMU_DUMPSYS_MAP(mmu_obj, md, ofs, sz));
519 pmap_dumpsys_unmap(struct pmap_md *md, vm_size_t ofs, vm_offset_t va)
522 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, va);
523 return (MMU_DUMPSYS_UNMAP(mmu_obj, md, ofs, va));
527 pmap_scan_md(struct pmap_md *prev)
530 CTR2(KTR_PMAP, "%s(%p)", __func__, prev);
531 return (MMU_SCAN_MD(mmu_obj, prev));
535 * MMU install routines. Highest priority wins, equal priority also
536 * overrides allowing last-set to win.
538 SET_DECLARE(mmu_set, mmu_def_t);
541 pmap_mmu_install(char *name, int prio)
543 mmu_def_t **mmupp, *mmup;
544 static int curr_prio = 0;
547 * Try and locate the MMU kobj corresponding to the name
549 SET_FOREACH(mmupp, mmu_set) {
553 !strcmp(mmup->name, name) &&
554 (prio >= curr_prio || mmu_def_impl == NULL)) {
564 int unmapped_buf_allowed;