2 # Copyright (c) 2005 Peter Grehan
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions
8 # 1. Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # 2. Redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution.
14 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
31 #include <sys/mutex.h>
32 #include <sys/systm.h>
35 #include <vm/vm_page.h>
37 #include <machine/mmuvar.h>
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
51 # Default implementations of some methods
54 static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
60 static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
65 static void mmu_null_init(mmu_t mmu)
70 static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
76 static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77 vm_offset_t addr, vm_object_t object, vm_pindex_t index,
83 static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
88 static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
93 static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94 vm_paddr_t *locked_pa)
99 static void mmu_null_deactivate(struct thread *td)
104 static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105 vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
110 static void *mmu_null_mapdev_attr(mmu_t mmu, vm_paddr_t pa,
111 vm_size_t size, vm_memattr_t ma)
113 return MMU_MAPDEV(mmu, pa, size);
116 static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
117 vm_paddr_t pa, vm_memattr_t ma)
119 MMU_KENTER(mmu, va, pa);
122 static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
128 static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
129 vm_size_t sz, vm_memattr_t mode)
137 * @brief Apply the given advice to the specified range of addresses within
138 * the given pmap. Depending on the advice, clear the referenced and/or
139 * modified flags in each mapping and set the mapped page's dirty field.
141 * @param _pmap physical map
142 * @param _start virtual range start
143 * @param _end virtual range end
144 * @param _advice advice to apply
156 * @brief Clear the 'modified' bit on the given physical page
158 * @param _pg physical page
160 METHOD void clear_modify {
167 * @brief Clear the write and modified bits in each of the given
168 * physical page's mappings
170 * @param _pg physical page
172 METHOD void remove_write {
179 * @brief Copy the address range given by the source physical map, virtual
180 * address and length to the destination physical map and virtual address.
181 * This routine is optional (xxx default null implementation ?)
183 * @param _dst_pmap destination physical map
184 * @param _src_pmap source physical map
185 * @param _dst_addr destination virtual address
186 * @param _len size of range
187 * @param _src_addr source virtual address
193 vm_offset_t _dst_addr;
195 vm_offset_t _src_addr;
196 } DEFAULT mmu_null_copy;
200 * @brief Copy the source physical page to the destination physical page
202 * @param _src source physical page
203 * @param _dst destination physical page
205 METHOD void copy_page {
211 METHOD void copy_pages {
214 vm_offset_t _a_offset;
216 vm_offset_t _b_offset;
221 * @brief Create a mapping between a virtual/physical address pair in the
222 * passed physical map with the specified protection and wiring
224 * @param _pmap physical map
225 * @param _va mapping virtual address
226 * @param _p mapping physical page
227 * @param _prot mapping page protection
228 * @param _flags pmap_enter flags
229 * @param _psind superpage size index
243 * @brief Maps a sequence of resident pages belonging to the same object.
245 * @param _pmap physical map
246 * @param _start virtual range start
247 * @param _end virtual range end
248 * @param _m_start physical page mapped at start
249 * @param _prot mapping page protection
251 METHOD void enter_object {
262 * @brief A faster entry point for page mapping where it is possible
263 * to short-circuit some of the tests in pmap_enter.
265 * @param _pmap physical map (and also currently active pmap)
266 * @param _va mapping virtual address
267 * @param _pg mapping physical page
268 * @param _prot new page protection - used to see if page is exec.
270 METHOD void enter_quick {
280 * @brief Reverse map the given virtual address, returning the physical
281 * page associated with the address if a mapping exists.
283 * @param _pmap physical map
284 * @param _va mapping virtual address
286 * @retval 0 No mapping found
287 * @retval addr The mapping physical address
289 METHOD vm_paddr_t extract {
297 * @brief Reverse map the given virtual address, returning the
298 * physical page if found. The page must be held (by calling
299 * vm_page_hold) if the page protection matches the given protection
301 * @param _pmap physical map
302 * @param _va mapping virtual address
303 * @param _prot protection used to determine if physical page
306 * @retval NULL No mapping found
307 * @retval page Pointer to physical page. Held if protections match
309 METHOD vm_page_t extract_and_hold {
318 * @brief Increase kernel virtual address space to the given virtual address.
319 * Not really required for PowerPC, so optional unless the MMU implementation
322 * @param _va new upper limit for kernel virtual address space
324 METHOD void growkernel {
327 } DEFAULT mmu_null_growkernel;
331 * @brief Called from vm_mem_init. Zone allocation is available at
332 * this stage so a convenient time to create zones. This routine is
333 * for MMU-implementation convenience and is optional.
337 } DEFAULT mmu_null_init;
341 * @brief Return if the page has been marked by MMU hardware to have been
344 * @param _pg physical page to test
346 * @retval boolean TRUE if page has been modified
348 METHOD boolean_t is_modified {
355 * @brief Return whether the specified virtual address is a candidate to be
356 * prefaulted in. This routine is optional.
358 * @param _pmap physical map
359 * @param _va virtual address to test
361 * @retval boolean TRUE if the address is a candidate.
363 METHOD boolean_t is_prefaultable {
367 } DEFAULT mmu_null_is_prefaultable;
371 * @brief Return whether or not the specified physical page was referenced
372 * in any physical maps.
374 * @params _pg physical page
376 * @retval boolean TRUE if page has been referenced
378 METHOD boolean_t is_referenced {
385 * @brief Return a count of referenced bits for a page, clearing those bits.
386 * Not all referenced bits need to be cleared, but it is necessary that 0
387 * only be returned when there are none set.
389 * @params _m physical page
391 * @retval int count of referenced bits
393 METHOD int ts_referenced {
400 * @brief Map the requested physical address range into kernel virtual
401 * address space. The value in _virt is taken as a hint. The virtual
402 * address of the range is returned, or NULL if the mapping could not
403 * be created. The range can be direct-mapped if that is supported.
405 * @param *_virt Hint for start virtual address, and also return
407 * @param _start physical address range start
408 * @param _end physical address range end
409 * @param _prot protection of range (currently ignored)
411 * @retval NULL could not map the area
412 * @retval addr, *_virt mapping start virtual address
414 METHOD vm_offset_t map {
424 * @brief Used to create a contiguous set of read-only mappings for a
425 * given object to try and eliminate a cascade of on-demand faults as
426 * the object is accessed sequentially. This routine is optional.
428 * @param _pmap physical map
429 * @param _addr mapping start virtual address
430 * @param _object device-backed V.M. object to be mapped
431 * @param _pindex page-index within object of mapping start
432 * @param _size size in bytes of mapping
434 METHOD void object_init_pt {
441 } DEFAULT mmu_null_object_init_pt;
445 * @brief Used to determine if the specified page has a mapping for the
446 * given physical map, by scanning the list of reverse-mappings from the
447 * page. The list is scanned to a maximum of 16 entries.
449 * @param _pmap physical map
450 * @param _pg physical page
452 * @retval bool TRUE if the physical map was found in the first 16
453 * reverse-map list entries off the physical page.
455 METHOD boolean_t page_exists_quick {
463 * @brief Initialise the machine-dependent section of the physical page
464 * data structure. This routine is optional.
466 * @param _pg physical page
468 METHOD void page_init {
471 } DEFAULT mmu_null_page_init;
475 * @brief Count the number of managed mappings to the given physical
476 * page that are wired.
478 * @param _pg physical page
480 * @retval int the number of wired, managed mappings to the
481 * given physical page
483 METHOD int page_wired_mappings {
490 * @brief Initialise a physical map data structure
492 * @param _pmap physical map
501 * @brief Initialise the physical map for process 0, the initial process
503 * XXX default to pinit ?
505 * @param _pmap physical map
514 * @brief Set the protection for physical pages in the given virtual address
515 * range to the given value.
517 * @param _pmap physical map
518 * @param _start virtual range start
519 * @param _end virtual range end
520 * @param _prot new page protection
522 METHOD void protect {
532 * @brief Create a mapping in kernel virtual address space for the given array
533 * of wired physical pages.
535 * @param _start mapping virtual address start
536 * @param *_m array of physical page pointers
537 * @param _count array elements
548 * @brief Remove the temporary mappings created by qenter.
550 * @param _start mapping virtual address start
551 * @param _count number of pages in mapping
553 METHOD void qremove {
561 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
562 * should be no existing mappings for the physical map at this point
564 * @param _pmap physical map
566 METHOD void release {
573 * @brief Remove all mappings in the given physical map for the start/end
574 * virtual address range. The range will be page-aligned.
576 * @param _pmap physical map
577 * @param _start mapping virtual address start
578 * @param _end mapping virtual address end
589 * @brief Traverse the reverse-map list off the given physical page and
590 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
592 * @param _pg physical page
594 METHOD void remove_all {
601 * @brief Remove all mappings in the given start/end virtual address range
602 * for the given physical map. Similar to the remove method, but it used
603 * when tearing down all mappings in an address space. This method is
604 * optional, since pmap_remove will be called for each valid vm_map in
605 * the address space later.
607 * @param _pmap physical map
608 * @param _start mapping virtual address start
609 * @param _end mapping virtual address end
611 METHOD void remove_pages {
614 } DEFAULT mmu_null_remove_pages;
618 * @brief Clear the wired attribute from the mappings for the specified range
619 * of addresses in the given pmap.
621 * @param _pmap physical map
622 * @param _start virtual range start
623 * @param _end virtual range end
634 * @brief Zero a physical page. It is not assumed that the page is mapped,
635 * so a temporary (or direct) mapping may need to be used.
637 * @param _pg physical page
639 METHOD void zero_page {
646 * @brief Zero a portion of a physical page, starting at a given offset and
647 * for a given size (multiples of 512 bytes for 4k pages).
649 * @param _pg physical page
650 * @param _off byte offset from start of page
651 * @param _size size of area to zero
653 METHOD void zero_page_area {
662 * @brief Extract mincore(2) information from a mapping.
664 * @param _pmap physical map
665 * @param _addr page virtual address
666 * @param _locked_pa page physical address
668 * @retval 0 no result
669 * @retval non-zero mincore(2) flag values
675 vm_paddr_t *_locked_pa;
676 } DEFAULT mmu_null_mincore;
680 * @brief Perform any operations required to allow a physical map to be used
681 * before it's address space is accessed.
683 * @param _td thread associated with physical map
685 METHOD void activate {
691 * @brief Perform any operations required to deactivate a physical map,
692 * for instance as it is context-switched out.
694 * @param _td thread associated with physical map
696 METHOD void deactivate {
699 } DEFAULT mmu_null_deactivate;
702 * @brief Return a hint for the best virtual address to map a tentative
703 * virtual address range in a given VM object. The default is to just
704 * return the given tentative start address.
706 * @param _obj VM backing object
707 * @param _offset starting offset with the VM object
708 * @param _addr initial guess at virtual address
709 * @param _size size of virtual address range
711 METHOD void align_superpage {
714 vm_ooffset_t _offset;
717 } DEFAULT mmu_null_align_superpage;
723 * INTERNAL INTERFACES
727 * @brief Bootstrap the VM system. At the completion of this routine, the
728 * kernel will be running in its own address space with full control over
731 * @param _start start of reserved memory (obsolete ???)
732 * @param _end end of reserved memory (obsolete ???)
733 * XXX I think the intent of these was to allow
734 * the memory used by kernel text+data+bss and
735 * loader variables/load-time kld's to be carved out
736 * of available physical mem.
739 METHOD void bootstrap {
746 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
747 * for alternate CPUs on SMP systems.
749 * @param _ap Set to 1 if the CPU being set up is an AP
752 METHOD void cpu_bootstrap {
759 * @brief Create a kernel mapping for a given physical address range.
760 * Called by bus code on behalf of device drivers. The mapping does not
761 * have to be a virtual address: it can be a direct-mapped physical address
762 * if that is supported by the MMU.
764 * @param _pa start physical address
765 * @param _size size in bytes of mapping
767 * @retval addr address of mapping.
769 METHOD void * mapdev {
776 * @brief Create a kernel mapping for a given physical address range.
777 * Called by bus code on behalf of device drivers. The mapping does not
778 * have to be a virtual address: it can be a direct-mapped physical address
779 * if that is supported by the MMU.
781 * @param _pa start physical address
782 * @param _size size in bytes of mapping
783 * @param _attr cache attributes
785 * @retval addr address of mapping.
787 METHOD void * mapdev_attr {
792 } DEFAULT mmu_null_mapdev_attr;
795 * @brief Change cache control attributes for a page. Should modify all
796 * mappings for that page.
798 * @param _m page to modify
799 * @param _ma new cache control attributes
801 METHOD void page_set_memattr {
805 } DEFAULT mmu_null_page_set_memattr;
808 * @brief Remove the mapping created by mapdev. Called when a driver
811 * @param _va Mapping address returned from mapdev
812 * @param _size size in bytes of mapping
814 METHOD void unmapdev {
821 * @brief Provide a kernel-space pointer that can be used to access the
822 * given userland address. The kernel accessible length returned in klen
823 * may be less than the requested length of the userland buffer (ulen). If
824 * so, retry with a higher address to get access to the later parts of the
825 * buffer. Returns EFAULT if no mapping can be made, else zero.
827 * @param _pm PMAP for the user pointer.
828 * @param _uaddr Userland address to map.
829 * @param _kaddr Corresponding kernel address.
830 * @param _ulen Length of user buffer.
831 * @param _klen Available subset of ulen with _kaddr.
833 METHOD int map_user_ptr {
836 volatile const void *_uaddr;
843 * @brief Decode a kernel pointer, as visible to the current thread,
844 * by setting whether it corresponds to a user or kernel address and
845 * the address in the respective memory maps to which the address as
846 * seen in the kernel corresponds. This is essentially the inverse of
847 * MMU_MAP_USER_PTR() above and is used in kernel-space fault handling.
848 * Returns 0 on success or EFAULT if the address could not be mapped.
850 METHOD int decode_kernel_ptr {
854 vm_offset_t *decoded_addr;
858 * @brief Reverse-map a kernel virtual address
860 * @param _va kernel virtual address to reverse-map
862 * @retval pa physical address corresponding to mapping
864 METHOD vm_paddr_t kextract {
871 * @brief Map a wired page into kernel virtual address space
873 * @param _va mapping virtual address
874 * @param _pa mapping physical address
883 * @brief Map a wired page into kernel virtual address space
885 * @param _va mapping virtual address
886 * @param _pa mapping physical address
887 * @param _ma mapping cache control attributes
889 METHOD void kenter_attr {
894 } DEFAULT mmu_null_kenter_attr;
897 * @brief Unmap a wired page from kernel virtual address space
899 * @param _va mapped virtual address
901 METHOD void kremove {
907 * @brief Determine if the given physical address range has been direct-mapped.
909 * @param _pa physical address start
910 * @param _size physical address range size
912 * @retval bool TRUE if the range is direct-mapped.
914 METHOD boolean_t dev_direct_mapped {
922 * @brief Enforce instruction cache coherency. Typically called after a
923 * region of memory has been modified and before execution of or within
924 * that region is attempted. Setting breakpoints in a process through
925 * ptrace(2) is one example of when the instruction cache needs to be
928 * @param _pm the physical map of the virtual address
929 * @param _va the virtual address of the modified region
930 * @param _sz the size of the modified region
932 METHOD void sync_icache {
941 * @brief Create temporary memory mapping for use by dumpsys().
943 * @param _pa The physical page to map.
944 * @param _sz The requested size of the mapping.
945 * @param _va The virtual address of the mapping.
947 METHOD void dumpsys_map {
956 * @brief Remove temporary dumpsys() mapping.
958 * @param _pa The physical page to map.
959 * @param _sz The requested size of the mapping.
960 * @param _va The virtual address of the mapping.
962 METHOD void dumpsys_unmap {
971 * @brief Initialize memory chunks for dumpsys.
973 METHOD void scan_init {
978 * @brief Create a temporary thread-local KVA mapping of a single page.
980 * @param _pg The physical page to map
982 * @retval addr The temporary KVA
984 METHOD vm_offset_t quick_enter_page {
990 * @brief Undo a mapping created by quick_enter_page
992 * @param _va The mapped KVA
994 METHOD void quick_remove_page {
1000 * @brief Change the specified virtual address range's memory type.
1002 * @param _va The virtual base address to change
1004 * @param _sz Size of the region to change
1006 * @param _mode New mode to set on the VA range
1008 * @retval error 0 on success, EINVAL or ENOMEM on error.
1010 METHOD int change_attr {
1015 } DEFAULT mmu_null_change_attr;