2 # Copyright (c) 2005 Peter Grehan
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions
8 # 1. Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # 2. Redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution.
14 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
31 #include <sys/mutex.h>
32 #include <sys/systm.h>
35 #include <vm/vm_page.h>
37 #include <machine/mmuvar.h>
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
51 # Default implementations of some methods
54 static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
60 static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
65 static void mmu_null_init(mmu_t mmu)
70 static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
76 static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77 vm_offset_t addr, vm_object_t object, vm_pindex_t index,
83 static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
88 static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
93 static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
99 static void mmu_null_deactivate(struct thread *td)
104 static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105 vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
110 static void *mmu_null_mapdev_attr(mmu_t mmu, vm_paddr_t pa,
111 vm_size_t size, vm_memattr_t ma)
113 return MMU_MAPDEV(mmu, pa, size);
116 static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
117 vm_paddr_t pa, vm_memattr_t ma)
119 MMU_KENTER(mmu, va, pa);
122 static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
128 static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
129 vm_size_t sz, vm_memattr_t mode)
134 static size_t mmu_null_scan_pmap(mmu_t mmu)
139 static void *mmu_null_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
144 static void * mmu_null_dump_pmap(mmu_t mmu, void *ctx, void *buf,
153 * @brief Apply the given advice to the specified range of addresses within
154 * the given pmap. Depending on the advice, clear the referenced and/or
155 * modified flags in each mapping and set the mapped page's dirty field.
157 * @param _pmap physical map
158 * @param _start virtual range start
159 * @param _end virtual range end
160 * @param _advice advice to apply
172 * @brief Clear the 'modified' bit on the given physical page
174 * @param _pg physical page
176 METHOD void clear_modify {
183 * @brief Clear the write and modified bits in each of the given
184 * physical page's mappings
186 * @param _pg physical page
188 METHOD void remove_write {
195 * @brief Copy the address range given by the source physical map, virtual
196 * address and length to the destination physical map and virtual address.
197 * This routine is optional (xxx default null implementation ?)
199 * @param _dst_pmap destination physical map
200 * @param _src_pmap source physical map
201 * @param _dst_addr destination virtual address
202 * @param _len size of range
203 * @param _src_addr source virtual address
209 vm_offset_t _dst_addr;
211 vm_offset_t _src_addr;
212 } DEFAULT mmu_null_copy;
216 * @brief Copy the source physical page to the destination physical page
218 * @param _src source physical page
219 * @param _dst destination physical page
221 METHOD void copy_page {
227 METHOD void copy_pages {
230 vm_offset_t _a_offset;
232 vm_offset_t _b_offset;
237 * @brief Create a mapping between a virtual/physical address pair in the
238 * passed physical map with the specified protection and wiring
240 * @param _pmap physical map
241 * @param _va mapping virtual address
242 * @param _p mapping physical page
243 * @param _prot mapping page protection
244 * @param _flags pmap_enter flags
245 * @param _psind superpage size index
259 * @brief Maps a sequence of resident pages belonging to the same object.
261 * @param _pmap physical map
262 * @param _start virtual range start
263 * @param _end virtual range end
264 * @param _m_start physical page mapped at start
265 * @param _prot mapping page protection
267 METHOD void enter_object {
278 * @brief A faster entry point for page mapping where it is possible
279 * to short-circuit some of the tests in pmap_enter.
281 * @param _pmap physical map (and also currently active pmap)
282 * @param _va mapping virtual address
283 * @param _pg mapping physical page
284 * @param _prot new page protection - used to see if page is exec.
286 METHOD void enter_quick {
296 * @brief Reverse map the given virtual address, returning the physical
297 * page associated with the address if a mapping exists.
299 * @param _pmap physical map
300 * @param _va mapping virtual address
302 * @retval 0 No mapping found
303 * @retval addr The mapping physical address
305 METHOD vm_paddr_t extract {
313 * @brief Reverse map the given virtual address, returning the
314 * physical page if found. The page must be held (by calling
315 * vm_page_hold) if the page protection matches the given protection
317 * @param _pmap physical map
318 * @param _va mapping virtual address
319 * @param _prot protection used to determine if physical page
322 * @retval NULL No mapping found
323 * @retval page Pointer to physical page. Held if protections match
325 METHOD vm_page_t extract_and_hold {
334 * @brief Increase kernel virtual address space to the given virtual address.
335 * Not really required for PowerPC, so optional unless the MMU implementation
338 * @param _va new upper limit for kernel virtual address space
340 METHOD void growkernel {
343 } DEFAULT mmu_null_growkernel;
347 * @brief Called from vm_mem_init. Zone allocation is available at
348 * this stage so a convenient time to create zones. This routine is
349 * for MMU-implementation convenience and is optional.
353 } DEFAULT mmu_null_init;
357 * @brief Return if the page has been marked by MMU hardware to have been
360 * @param _pg physical page to test
362 * @retval boolean TRUE if page has been modified
364 METHOD boolean_t is_modified {
371 * @brief Return whether the specified virtual address is a candidate to be
372 * prefaulted in. This routine is optional.
374 * @param _pmap physical map
375 * @param _va virtual address to test
377 * @retval boolean TRUE if the address is a candidate.
379 METHOD boolean_t is_prefaultable {
383 } DEFAULT mmu_null_is_prefaultable;
387 * @brief Return whether or not the specified physical page was referenced
388 * in any physical maps.
390 * @params _pg physical page
392 * @retval boolean TRUE if page has been referenced
394 METHOD boolean_t is_referenced {
401 * @brief Return a count of referenced bits for a page, clearing those bits.
402 * Not all referenced bits need to be cleared, but it is necessary that 0
403 * only be returned when there are none set.
405 * @params _m physical page
407 * @retval int count of referenced bits
409 METHOD int ts_referenced {
416 * @brief Map the requested physical address range into kernel virtual
417 * address space. The value in _virt is taken as a hint. The virtual
418 * address of the range is returned, or NULL if the mapping could not
419 * be created. The range can be direct-mapped if that is supported.
421 * @param *_virt Hint for start virtual address, and also return
423 * @param _start physical address range start
424 * @param _end physical address range end
425 * @param _prot protection of range (currently ignored)
427 * @retval NULL could not map the area
428 * @retval addr, *_virt mapping start virtual address
430 METHOD vm_offset_t map {
440 * @brief Used to create a contiguous set of read-only mappings for a
441 * given object to try and eliminate a cascade of on-demand faults as
442 * the object is accessed sequentially. This routine is optional.
444 * @param _pmap physical map
445 * @param _addr mapping start virtual address
446 * @param _object device-backed V.M. object to be mapped
447 * @param _pindex page-index within object of mapping start
448 * @param _size size in bytes of mapping
450 METHOD void object_init_pt {
457 } DEFAULT mmu_null_object_init_pt;
461 * @brief Used to determine if the specified page has a mapping for the
462 * given physical map, by scanning the list of reverse-mappings from the
463 * page. The list is scanned to a maximum of 16 entries.
465 * @param _pmap physical map
466 * @param _pg physical page
468 * @retval bool TRUE if the physical map was found in the first 16
469 * reverse-map list entries off the physical page.
471 METHOD boolean_t page_exists_quick {
479 * @brief Initialise the machine-dependent section of the physical page
480 * data structure. This routine is optional.
482 * @param _pg physical page
484 METHOD void page_init {
487 } DEFAULT mmu_null_page_init;
491 * @brief Count the number of managed mappings to the given physical
492 * page that are wired.
494 * @param _pg physical page
496 * @retval int the number of wired, managed mappings to the
497 * given physical page
499 METHOD int page_wired_mappings {
506 * @brief Initialise a physical map data structure
508 * @param _pmap physical map
517 * @brief Initialise the physical map for process 0, the initial process
519 * XXX default to pinit ?
521 * @param _pmap physical map
530 * @brief Set the protection for physical pages in the given virtual address
531 * range to the given value.
533 * @param _pmap physical map
534 * @param _start virtual range start
535 * @param _end virtual range end
536 * @param _prot new page protection
538 METHOD void protect {
548 * @brief Create a mapping in kernel virtual address space for the given array
549 * of wired physical pages.
551 * @param _start mapping virtual address start
552 * @param *_m array of physical page pointers
553 * @param _count array elements
564 * @brief Remove the temporary mappings created by qenter.
566 * @param _start mapping virtual address start
567 * @param _count number of pages in mapping
569 METHOD void qremove {
577 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
578 * should be no existing mappings for the physical map at this point
580 * @param _pmap physical map
582 METHOD void release {
589 * @brief Remove all mappings in the given physical map for the start/end
590 * virtual address range. The range will be page-aligned.
592 * @param _pmap physical map
593 * @param _start mapping virtual address start
594 * @param _end mapping virtual address end
605 * @brief Traverse the reverse-map list off the given physical page and
606 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
608 * @param _pg physical page
610 METHOD void remove_all {
617 * @brief Remove all mappings in the given start/end virtual address range
618 * for the given physical map. Similar to the remove method, but it used
619 * when tearing down all mappings in an address space. This method is
620 * optional, since pmap_remove will be called for each valid vm_map in
621 * the address space later.
623 * @param _pmap physical map
624 * @param _start mapping virtual address start
625 * @param _end mapping virtual address end
627 METHOD void remove_pages {
630 } DEFAULT mmu_null_remove_pages;
634 * @brief Clear the wired attribute from the mappings for the specified range
635 * of addresses in the given pmap.
637 * @param _pmap physical map
638 * @param _start virtual range start
639 * @param _end virtual range end
650 * @brief Zero a physical page. It is not assumed that the page is mapped,
651 * so a temporary (or direct) mapping may need to be used.
653 * @param _pg physical page
655 METHOD void zero_page {
662 * @brief Zero a portion of a physical page, starting at a given offset and
663 * for a given size (multiples of 512 bytes for 4k pages).
665 * @param _pg physical page
666 * @param _off byte offset from start of page
667 * @param _size size of area to zero
669 METHOD void zero_page_area {
678 * @brief Extract mincore(2) information from a mapping.
680 * @param _pmap physical map
681 * @param _addr page virtual address
682 * @param _pa page physical address
684 * @retval 0 no result
685 * @retval non-zero mincore(2) flag values
692 } DEFAULT mmu_null_mincore;
696 * @brief Perform any operations required to allow a physical map to be used
697 * before it's address space is accessed.
699 * @param _td thread associated with physical map
701 METHOD void activate {
707 * @brief Perform any operations required to deactivate a physical map,
708 * for instance as it is context-switched out.
710 * @param _td thread associated with physical map
712 METHOD void deactivate {
715 } DEFAULT mmu_null_deactivate;
718 * @brief Return a hint for the best virtual address to map a tentative
719 * virtual address range in a given VM object. The default is to just
720 * return the given tentative start address.
722 * @param _obj VM backing object
723 * @param _offset starting offset with the VM object
724 * @param _addr initial guess at virtual address
725 * @param _size size of virtual address range
727 METHOD void align_superpage {
730 vm_ooffset_t _offset;
733 } DEFAULT mmu_null_align_superpage;
739 * INTERNAL INTERFACES
743 * @brief Bootstrap the VM system. At the completion of this routine, the
744 * kernel will be running in its own address space with full control over
747 * @param _start start of reserved memory (obsolete ???)
748 * @param _end end of reserved memory (obsolete ???)
749 * XXX I think the intent of these was to allow
750 * the memory used by kernel text+data+bss and
751 * loader variables/load-time kld's to be carved out
752 * of available physical mem.
755 METHOD void bootstrap {
762 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
763 * for alternate CPUs on SMP systems.
765 * @param _ap Set to 1 if the CPU being set up is an AP
768 METHOD void cpu_bootstrap {
775 * @brief Create a kernel mapping for a given physical address range.
776 * Called by bus code on behalf of device drivers. The mapping does not
777 * have to be a virtual address: it can be a direct-mapped physical address
778 * if that is supported by the MMU.
780 * @param _pa start physical address
781 * @param _size size in bytes of mapping
783 * @retval addr address of mapping.
785 METHOD void * mapdev {
792 * @brief Create a kernel mapping for a given physical address range.
793 * Called by bus code on behalf of device drivers. The mapping does not
794 * have to be a virtual address: it can be a direct-mapped physical address
795 * if that is supported by the MMU.
797 * @param _pa start physical address
798 * @param _size size in bytes of mapping
799 * @param _attr cache attributes
801 * @retval addr address of mapping.
803 METHOD void * mapdev_attr {
808 } DEFAULT mmu_null_mapdev_attr;
811 * @brief Change cache control attributes for a page. Should modify all
812 * mappings for that page.
814 * @param _m page to modify
815 * @param _ma new cache control attributes
817 METHOD void page_set_memattr {
821 } DEFAULT mmu_null_page_set_memattr;
824 * @brief Remove the mapping created by mapdev. Called when a driver
827 * @param _va Mapping address returned from mapdev
828 * @param _size size in bytes of mapping
830 METHOD void unmapdev {
837 * @brief Provide a kernel-space pointer that can be used to access the
838 * given userland address. The kernel accessible length returned in klen
839 * may be less than the requested length of the userland buffer (ulen). If
840 * so, retry with a higher address to get access to the later parts of the
841 * buffer. Returns EFAULT if no mapping can be made, else zero.
843 * @param _pm PMAP for the user pointer.
844 * @param _uaddr Userland address to map.
845 * @param _kaddr Corresponding kernel address.
846 * @param _ulen Length of user buffer.
847 * @param _klen Available subset of ulen with _kaddr.
849 METHOD int map_user_ptr {
852 volatile const void *_uaddr;
859 * @brief Decode a kernel pointer, as visible to the current thread,
860 * by setting whether it corresponds to a user or kernel address and
861 * the address in the respective memory maps to which the address as
862 * seen in the kernel corresponds. This is essentially the inverse of
863 * MMU_MAP_USER_PTR() above and is used in kernel-space fault handling.
864 * Returns 0 on success or EFAULT if the address could not be mapped.
866 METHOD int decode_kernel_ptr {
870 vm_offset_t *decoded_addr;
874 * @brief Reverse-map a kernel virtual address
876 * @param _va kernel virtual address to reverse-map
878 * @retval pa physical address corresponding to mapping
880 METHOD vm_paddr_t kextract {
887 * @brief Map a wired page into kernel virtual address space
889 * @param _va mapping virtual address
890 * @param _pa mapping physical address
899 * @brief Map a wired page into kernel virtual address space
901 * @param _va mapping virtual address
902 * @param _pa mapping physical address
903 * @param _ma mapping cache control attributes
905 METHOD void kenter_attr {
910 } DEFAULT mmu_null_kenter_attr;
913 * @brief Unmap a wired page from kernel virtual address space
915 * @param _va mapped virtual address
917 METHOD void kremove {
923 * @brief Determine if the given physical address range has been direct-mapped.
925 * @param _pa physical address start
926 * @param _size physical address range size
928 * @retval bool TRUE if the range is direct-mapped.
930 METHOD boolean_t dev_direct_mapped {
938 * @brief Enforce instruction cache coherency. Typically called after a
939 * region of memory has been modified and before execution of or within
940 * that region is attempted. Setting breakpoints in a process through
941 * ptrace(2) is one example of when the instruction cache needs to be
944 * @param _pm the physical map of the virtual address
945 * @param _va the virtual address of the modified region
946 * @param _sz the size of the modified region
948 METHOD void sync_icache {
957 * @brief Create temporary memory mapping for use by dumpsys().
959 * @param _pa The physical page to map.
960 * @param _sz The requested size of the mapping.
961 * @param _va The virtual address of the mapping.
963 METHOD void dumpsys_map {
972 * @brief Remove temporary dumpsys() mapping.
974 * @param _pa The physical page to map.
975 * @param _sz The requested size of the mapping.
976 * @param _va The virtual address of the mapping.
978 METHOD void dumpsys_unmap {
987 * @brief Initialize memory chunks for dumpsys.
989 METHOD void scan_init {
994 * @brief Scan kernel PMAP, adding mapped physical pages to dump.
996 * @retval pmap_size Number of bytes used by all PTE entries.
998 METHOD size_t scan_pmap {
1000 } DEFAULT mmu_null_scan_pmap;
1003 * @brief Initialize a PMAP dump.
1005 * @param _blkpgs Size of a dump block, in pages.
1007 * @retval ctx Dump context, used by dump_pmap.
1009 METHOD void * dump_pmap_init {
1012 } DEFAULT mmu_null_dump_pmap_init;
1015 * @brief Dump a block of PTEs.
1016 * The size of the dump block is specified in dump_pmap_init and
1017 * the 'buf' argument must be big enough to hold a full block.
1018 * If the page table resides in regular memory, then the 'buf'
1019 * argument is ignored and a pointer to the specified dump block
1020 * is returned instead, avoiding memory copy. Else, the buffer is
1021 * filled with PTEs and the own buffer pointer is returned.
1022 * In the end, the cursor in 'ctx' is adjusted to point to the next block.
1024 * @param _ctx Dump context, retrieved from dump_pmap_init.
1025 * @param _buf Buffer to hold the dump block contents.
1026 * @param _nbytes Number of bytes dumped.
1028 * @retval NULL No more blocks to dump.
1029 * @retval buf Pointer to dumped data (may be different than _buf).
1031 METHOD void * dump_pmap {
1036 } DEFAULT mmu_null_dump_pmap;
1039 * @brief Create a temporary thread-local KVA mapping of a single page.
1041 * @param _pg The physical page to map
1043 * @retval addr The temporary KVA
1045 METHOD vm_offset_t quick_enter_page {
1051 * @brief Undo a mapping created by quick_enter_page
1053 * @param _va The mapped KVA
1055 METHOD void quick_remove_page {
1061 * @brief Change the specified virtual address range's memory type.
1063 * @param _va The virtual base address to change
1065 * @param _sz Size of the region to change
1067 * @param _mode New mode to set on the VA range
1069 * @retval error 0 on success, EINVAL or ENOMEM on error.
1071 METHOD int change_attr {
1076 } DEFAULT mmu_null_change_attr;