2 # Copyright (c) 2005 Peter Grehan
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions
8 # 1. Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # 2. Redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution.
14 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
31 #include <sys/mutex.h>
32 #include <sys/systm.h>
35 #include <vm/vm_page.h>
37 #include <machine/mmuvar.h>
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
52 # Default implementations of some methods
55 static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
56 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
61 static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
66 static void mmu_null_init(mmu_t mmu)
71 static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
77 static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
78 vm_offset_t addr, vm_object_t object, vm_pindex_t index,
84 static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
89 static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
94 static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
100 static void mmu_null_deactivate(struct thread *td)
105 static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
106 vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
111 static void *mmu_null_mapdev_attr(mmu_t mmu, vm_paddr_t pa,
112 vm_size_t size, vm_memattr_t ma)
114 return MMU_MAPDEV(mmu, pa, size);
117 static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
118 vm_paddr_t pa, vm_memattr_t ma)
120 MMU_KENTER(mmu, va, pa);
123 static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
129 static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
130 vm_size_t sz, vm_memattr_t mode)
135 static size_t mmu_null_scan_pmap(mmu_t mmu)
140 static void *mmu_null_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
145 static void * mmu_null_dump_pmap(mmu_t mmu, void *ctx, void *buf,
154 * @brief Apply the given advice to the specified range of addresses within
155 * the given pmap. Depending on the advice, clear the referenced and/or
156 * modified flags in each mapping and set the mapped page's dirty field.
158 * @param _pmap physical map
159 * @param _start virtual range start
160 * @param _end virtual range end
161 * @param _advice advice to apply
173 * @brief Clear the 'modified' bit on the given physical page
175 * @param _pg physical page
177 METHOD void clear_modify {
184 * @brief Clear the write and modified bits in each of the given
185 * physical page's mappings
187 * @param _pg physical page
189 METHOD void remove_write {
196 * @brief Copy the address range given by the source physical map, virtual
197 * address and length to the destination physical map and virtual address.
198 * This routine is optional (xxx default null implementation ?)
200 * @param _dst_pmap destination physical map
201 * @param _src_pmap source physical map
202 * @param _dst_addr destination virtual address
203 * @param _len size of range
204 * @param _src_addr source virtual address
210 vm_offset_t _dst_addr;
212 vm_offset_t _src_addr;
213 } DEFAULT mmu_null_copy;
217 * @brief Copy the source physical page to the destination physical page
219 * @param _src source physical page
220 * @param _dst destination physical page
222 METHOD void copy_page {
228 METHOD void copy_pages {
231 vm_offset_t _a_offset;
233 vm_offset_t _b_offset;
238 * @brief Create a mapping between a virtual/physical address pair in the
239 * passed physical map with the specified protection and wiring
241 * @param _pmap physical map
242 * @param _va mapping virtual address
243 * @param _p mapping physical page
244 * @param _prot mapping page protection
245 * @param _flags pmap_enter flags
246 * @param _psind superpage size index
260 * @brief Maps a sequence of resident pages belonging to the same object.
262 * @param _pmap physical map
263 * @param _start virtual range start
264 * @param _end virtual range end
265 * @param _m_start physical page mapped at start
266 * @param _prot mapping page protection
268 METHOD void enter_object {
279 * @brief A faster entry point for page mapping where it is possible
280 * to short-circuit some of the tests in pmap_enter.
282 * @param _pmap physical map (and also currently active pmap)
283 * @param _va mapping virtual address
284 * @param _pg mapping physical page
285 * @param _prot new page protection - used to see if page is exec.
287 METHOD void enter_quick {
297 * @brief Reverse map the given virtual address, returning the physical
298 * page associated with the address if a mapping exists.
300 * @param _pmap physical map
301 * @param _va mapping virtual address
303 * @retval 0 No mapping found
304 * @retval addr The mapping physical address
306 METHOD vm_paddr_t extract {
314 * @brief Reverse map the given virtual address, returning the
315 * physical page if found. The page must be held (by calling
316 * vm_page_hold) if the page protection matches the given protection
318 * @param _pmap physical map
319 * @param _va mapping virtual address
320 * @param _prot protection used to determine if physical page
323 * @retval NULL No mapping found
324 * @retval page Pointer to physical page. Held if protections match
326 METHOD vm_page_t extract_and_hold {
335 * @brief Increase kernel virtual address space to the given virtual address.
336 * Not really required for PowerPC, so optional unless the MMU implementation
339 * @param _va new upper limit for kernel virtual address space
341 METHOD void growkernel {
344 } DEFAULT mmu_null_growkernel;
348 * @brief Called from vm_mem_init. Zone allocation is available at
349 * this stage so a convenient time to create zones. This routine is
350 * for MMU-implementation convenience and is optional.
354 } DEFAULT mmu_null_init;
358 * @brief Return if the page has been marked by MMU hardware to have been
361 * @param _pg physical page to test
363 * @retval boolean TRUE if page has been modified
365 METHOD boolean_t is_modified {
372 * @brief Return whether the specified virtual address is a candidate to be
373 * prefaulted in. This routine is optional.
375 * @param _pmap physical map
376 * @param _va virtual address to test
378 * @retval boolean TRUE if the address is a candidate.
380 METHOD boolean_t is_prefaultable {
384 } DEFAULT mmu_null_is_prefaultable;
388 * @brief Return whether or not the specified physical page was referenced
389 * in any physical maps.
391 * @params _pg physical page
393 * @retval boolean TRUE if page has been referenced
395 METHOD boolean_t is_referenced {
402 * @brief Return a count of referenced bits for a page, clearing those bits.
403 * Not all referenced bits need to be cleared, but it is necessary that 0
404 * only be returned when there are none set.
406 * @params _m physical page
408 * @retval int count of referenced bits
410 METHOD int ts_referenced {
417 * @brief Map the requested physical address range into kernel virtual
418 * address space. The value in _virt is taken as a hint. The virtual
419 * address of the range is returned, or NULL if the mapping could not
420 * be created. The range can be direct-mapped if that is supported.
422 * @param *_virt Hint for start virtual address, and also return
424 * @param _start physical address range start
425 * @param _end physical address range end
426 * @param _prot protection of range (currently ignored)
428 * @retval NULL could not map the area
429 * @retval addr, *_virt mapping start virtual address
431 METHOD vm_offset_t map {
441 * @brief Used to create a contiguous set of read-only mappings for a
442 * given object to try and eliminate a cascade of on-demand faults as
443 * the object is accessed sequentially. This routine is optional.
445 * @param _pmap physical map
446 * @param _addr mapping start virtual address
447 * @param _object device-backed V.M. object to be mapped
448 * @param _pindex page-index within object of mapping start
449 * @param _size size in bytes of mapping
451 METHOD void object_init_pt {
458 } DEFAULT mmu_null_object_init_pt;
462 * @brief Used to determine if the specified page has a mapping for the
463 * given physical map, by scanning the list of reverse-mappings from the
464 * page. The list is scanned to a maximum of 16 entries.
466 * @param _pmap physical map
467 * @param _pg physical page
469 * @retval bool TRUE if the physical map was found in the first 16
470 * reverse-map list entries off the physical page.
472 METHOD boolean_t page_exists_quick {
480 * @brief Initialise the machine-dependent section of the physical page
481 * data structure. This routine is optional.
483 * @param _pg physical page
485 METHOD void page_init {
488 } DEFAULT mmu_null_page_init;
492 * @brief Count the number of managed mappings to the given physical
493 * page that are wired.
495 * @param _pg physical page
497 * @retval int the number of wired, managed mappings to the
498 * given physical page
500 METHOD int page_wired_mappings {
507 * @brief Initialise a physical map data structure
509 * @param _pmap physical map
518 * @brief Initialise the physical map for process 0, the initial process
520 * XXX default to pinit ?
522 * @param _pmap physical map
531 * @brief Set the protection for physical pages in the given virtual address
532 * range to the given value.
534 * @param _pmap physical map
535 * @param _start virtual range start
536 * @param _end virtual range end
537 * @param _prot new page protection
539 METHOD void protect {
549 * @brief Create a mapping in kernel virtual address space for the given array
550 * of wired physical pages.
552 * @param _start mapping virtual address start
553 * @param *_m array of physical page pointers
554 * @param _count array elements
565 * @brief Remove the temporary mappings created by qenter.
567 * @param _start mapping virtual address start
568 * @param _count number of pages in mapping
570 METHOD void qremove {
578 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
579 * should be no existing mappings for the physical map at this point
581 * @param _pmap physical map
583 METHOD void release {
590 * @brief Remove all mappings in the given physical map for the start/end
591 * virtual address range. The range will be page-aligned.
593 * @param _pmap physical map
594 * @param _start mapping virtual address start
595 * @param _end mapping virtual address end
606 * @brief Traverse the reverse-map list off the given physical page and
607 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
609 * @param _pg physical page
611 METHOD void remove_all {
618 * @brief Remove all mappings in the given start/end virtual address range
619 * for the given physical map. Similar to the remove method, but it used
620 * when tearing down all mappings in an address space. This method is
621 * optional, since pmap_remove will be called for each valid vm_map in
622 * the address space later.
624 * @param _pmap physical map
625 * @param _start mapping virtual address start
626 * @param _end mapping virtual address end
628 METHOD void remove_pages {
631 } DEFAULT mmu_null_remove_pages;
635 * @brief Clear the wired attribute from the mappings for the specified range
636 * of addresses in the given pmap.
638 * @param _pmap physical map
639 * @param _start virtual range start
640 * @param _end virtual range end
651 * @brief Zero a physical page. It is not assumed that the page is mapped,
652 * so a temporary (or direct) mapping may need to be used.
654 * @param _pg physical page
656 METHOD void zero_page {
663 * @brief Zero a portion of a physical page, starting at a given offset and
664 * for a given size (multiples of 512 bytes for 4k pages).
666 * @param _pg physical page
667 * @param _off byte offset from start of page
668 * @param _size size of area to zero
670 METHOD void zero_page_area {
679 * @brief Extract mincore(2) information from a mapping.
681 * @param _pmap physical map
682 * @param _addr page virtual address
683 * @param _pa page physical address
685 * @retval 0 no result
686 * @retval non-zero mincore(2) flag values
693 } DEFAULT mmu_null_mincore;
697 * @brief Perform any operations required to allow a physical map to be used
698 * before it's address space is accessed.
700 * @param _td thread associated with physical map
702 METHOD void activate {
708 * @brief Perform any operations required to deactivate a physical map,
709 * for instance as it is context-switched out.
711 * @param _td thread associated with physical map
713 METHOD void deactivate {
716 } DEFAULT mmu_null_deactivate;
719 * @brief Return a hint for the best virtual address to map a tentative
720 * virtual address range in a given VM object. The default is to just
721 * return the given tentative start address.
723 * @param _obj VM backing object
724 * @param _offset starting offset with the VM object
725 * @param _addr initial guess at virtual address
726 * @param _size size of virtual address range
728 METHOD void align_superpage {
731 vm_ooffset_t _offset;
734 } DEFAULT mmu_null_align_superpage;
740 * INTERNAL INTERFACES
744 * @brief Bootstrap the VM system. At the completion of this routine, the
745 * kernel will be running in its own address space with full control over
748 * @param _start start of reserved memory (obsolete ???)
749 * @param _end end of reserved memory (obsolete ???)
750 * XXX I think the intent of these was to allow
751 * the memory used by kernel text+data+bss and
752 * loader variables/load-time kld's to be carved out
753 * of available physical mem.
756 METHOD void bootstrap {
763 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
764 * for alternate CPUs on SMP systems.
766 * @param _ap Set to 1 if the CPU being set up is an AP
769 METHOD void cpu_bootstrap {
776 * @brief Create a kernel mapping for a given physical address range.
777 * Called by bus code on behalf of device drivers. The mapping does not
778 * have to be a virtual address: it can be a direct-mapped physical address
779 * if that is supported by the MMU.
781 * @param _pa start physical address
782 * @param _size size in bytes of mapping
784 * @retval addr address of mapping.
786 METHOD void * mapdev {
793 * @brief Create a kernel mapping for a given physical address range.
794 * Called by bus code on behalf of device drivers. The mapping does not
795 * have to be a virtual address: it can be a direct-mapped physical address
796 * if that is supported by the MMU.
798 * @param _pa start physical address
799 * @param _size size in bytes of mapping
800 * @param _attr cache attributes
802 * @retval addr address of mapping.
804 METHOD void * mapdev_attr {
809 } DEFAULT mmu_null_mapdev_attr;
812 * @brief Change cache control attributes for a page. Should modify all
813 * mappings for that page.
815 * @param _m page to modify
816 * @param _ma new cache control attributes
818 METHOD void page_set_memattr {
822 } DEFAULT mmu_null_page_set_memattr;
825 * @brief Remove the mapping created by mapdev. Called when a driver
828 * @param _va Mapping address returned from mapdev
829 * @param _size size in bytes of mapping
831 METHOD void unmapdev {
838 * @brief Provide a kernel-space pointer that can be used to access the
839 * given userland address. The kernel accessible length returned in klen
840 * may be less than the requested length of the userland buffer (ulen). If
841 * so, retry with a higher address to get access to the later parts of the
842 * buffer. Returns EFAULT if no mapping can be made, else zero.
844 * @param _pm PMAP for the user pointer.
845 * @param _uaddr Userland address to map.
846 * @param _kaddr Corresponding kernel address.
847 * @param _ulen Length of user buffer.
848 * @param _klen Available subset of ulen with _kaddr.
850 METHOD int map_user_ptr {
853 volatile const void *_uaddr;
860 * @brief Decode a kernel pointer, as visible to the current thread,
861 * by setting whether it corresponds to a user or kernel address and
862 * the address in the respective memory maps to which the address as
863 * seen in the kernel corresponds. This is essentially the inverse of
864 * MMU_MAP_USER_PTR() above and is used in kernel-space fault handling.
865 * Returns 0 on success or EFAULT if the address could not be mapped.
867 METHOD int decode_kernel_ptr {
871 vm_offset_t *decoded_addr;
875 * @brief Reverse-map a kernel virtual address
877 * @param _va kernel virtual address to reverse-map
879 * @retval pa physical address corresponding to mapping
881 METHOD vm_paddr_t kextract {
888 * @brief Map a wired page into kernel virtual address space
890 * @param _va mapping virtual address
891 * @param _pa mapping physical address
900 * @brief Map a wired page into kernel virtual address space
902 * @param _va mapping virtual address
903 * @param _pa mapping physical address
904 * @param _ma mapping cache control attributes
906 METHOD void kenter_attr {
911 } DEFAULT mmu_null_kenter_attr;
914 * @brief Unmap a wired page from kernel virtual address space
916 * @param _va mapped virtual address
918 METHOD void kremove {
924 * @brief Determine if the given physical address range has been direct-mapped.
926 * @param _pa physical address start
927 * @param _size physical address range size
929 * @retval bool TRUE if the range is direct-mapped.
931 METHOD boolean_t dev_direct_mapped {
939 * @brief Enforce instruction cache coherency. Typically called after a
940 * region of memory has been modified and before execution of or within
941 * that region is attempted. Setting breakpoints in a process through
942 * ptrace(2) is one example of when the instruction cache needs to be
945 * @param _pm the physical map of the virtual address
946 * @param _va the virtual address of the modified region
947 * @param _sz the size of the modified region
949 METHOD void sync_icache {
958 * @brief Create temporary memory mapping for use by dumpsys().
960 * @param _pa The physical page to map.
961 * @param _sz The requested size of the mapping.
962 * @param _va The virtual address of the mapping.
964 METHOD void dumpsys_map {
973 * @brief Remove temporary dumpsys() mapping.
975 * @param _pa The physical page to map.
976 * @param _sz The requested size of the mapping.
977 * @param _va The virtual address of the mapping.
979 METHOD void dumpsys_unmap {
988 * @brief Initialize memory chunks for dumpsys.
990 METHOD void scan_init {
995 * @brief Scan kernel PMAP, adding mapped physical pages to dump.
997 * @retval pmap_size Number of bytes used by all PTE entries.
999 METHOD size_t scan_pmap {
1001 } DEFAULT mmu_null_scan_pmap;
1004 * @brief Initialize a PMAP dump.
1006 * @param _blkpgs Size of a dump block, in pages.
1008 * @retval ctx Dump context, used by dump_pmap.
1010 METHOD void * dump_pmap_init {
1013 } DEFAULT mmu_null_dump_pmap_init;
1016 * @brief Dump a block of PTEs.
1017 * The size of the dump block is specified in dump_pmap_init and
1018 * the 'buf' argument must be big enough to hold a full block.
1019 * If the page table resides in regular memory, then the 'buf'
1020 * argument is ignored and a pointer to the specified dump block
1021 * is returned instead, avoiding memory copy. Else, the buffer is
1022 * filled with PTEs and the own buffer pointer is returned.
1023 * In the end, the cursor in 'ctx' is adjusted to point to the next block.
1025 * @param _ctx Dump context, retrieved from dump_pmap_init.
1026 * @param _buf Buffer to hold the dump block contents.
1027 * @param _nbytes Number of bytes dumped.
1029 * @retval NULL No more blocks to dump.
1030 * @retval buf Pointer to dumped data (may be different than _buf).
1032 METHOD void * dump_pmap {
1037 } DEFAULT mmu_null_dump_pmap;
1040 * @brief Create a temporary thread-local KVA mapping of a single page.
1042 * @param _pg The physical page to map
1044 * @retval addr The temporary KVA
1046 METHOD vm_offset_t quick_enter_page {
1052 * @brief Undo a mapping created by quick_enter_page
1054 * @param _va The mapped KVA
1056 METHOD void quick_remove_page {
1062 * @brief Change the specified virtual address range's memory type.
1064 * @param _va The virtual base address to change
1066 * @param _sz Size of the region to change
1068 * @param _mode New mode to set on the VA range
1070 * @retval error 0 on success, EINVAL or ENOMEM on error.
1072 METHOD int change_attr {
1077 } DEFAULT mmu_null_change_attr;
1080 * @brief Initialize the page array.
1082 * @param _pages The number of pages to be accounted by the array.
1084 METHOD void page_array_startup {