2 # Copyright (c) 2005 Peter Grehan
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions
8 # 1. Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # 2. Redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution.
14 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
31 #include <sys/mutex.h>
32 #include <sys/systm.h>
35 #include <vm/vm_page.h>
37 #include <machine/mmuvar.h>
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
51 # Default implementations of some methods
54 static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
60 static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
65 static void mmu_null_init(mmu_t mmu)
70 static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
76 static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77 vm_offset_t addr, vm_object_t object, vm_pindex_t index,
83 static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
88 static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
93 static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94 vm_paddr_t *locked_pa)
99 static void mmu_null_deactivate(struct thread *td)
104 static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105 vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
110 static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
111 vm_size_t size, vm_memattr_t ma)
113 return MMU_MAPDEV(mmu, pa, size);
116 static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
117 vm_offset_t pa, vm_memattr_t ma)
119 MMU_KENTER(mmu, va, pa);
122 static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
131 * @brief Apply the given advice to the specified range of addresses within
132 * the given pmap. Depending on the advice, clear the referenced and/or
133 * modified flags in each mapping and set the mapped page's dirty field.
135 * @param _pmap physical map
136 * @param _start virtual range start
137 * @param _end virtual range end
138 * @param _advice advice to apply
150 * @brief Clear the 'modified' bit on the given physical page
152 * @param _pg physical page
154 METHOD void clear_modify {
161 * @brief Clear the write and modified bits in each of the given
162 * physical page's mappings
164 * @param _pg physical page
166 METHOD void remove_write {
173 * @brief Copy the address range given by the source physical map, virtual
174 * address and length to the destination physical map and virtual address.
175 * This routine is optional (xxx default null implementation ?)
177 * @param _dst_pmap destination physical map
178 * @param _src_pmap source physical map
179 * @param _dst_addr destination virtual address
180 * @param _len size of range
181 * @param _src_addr source virtual address
187 vm_offset_t _dst_addr;
189 vm_offset_t _src_addr;
190 } DEFAULT mmu_null_copy;
194 * @brief Copy the source physical page to the destination physical page
196 * @param _src source physical page
197 * @param _dst destination physical page
199 METHOD void copy_page {
205 METHOD void copy_pages {
208 vm_offset_t _a_offset;
210 vm_offset_t _b_offset;
215 * @brief Create a mapping between a virtual/physical address pair in the
216 * passed physical map with the specified protection and wiring
218 * @param _pmap physical map
219 * @param _va mapping virtual address
220 * @param _p mapping physical page
221 * @param _prot mapping page protection
222 * @param _flags pmap_enter flags
223 * @param _psind superpage size index
237 * @brief Maps a sequence of resident pages belonging to the same object.
239 * @param _pmap physical map
240 * @param _start virtual range start
241 * @param _end virtual range end
242 * @param _m_start physical page mapped at start
243 * @param _prot mapping page protection
245 METHOD void enter_object {
256 * @brief A faster entry point for page mapping where it is possible
257 * to short-circuit some of the tests in pmap_enter.
259 * @param _pmap physical map (and also currently active pmap)
260 * @param _va mapping virtual address
261 * @param _pg mapping physical page
262 * @param _prot new page protection - used to see if page is exec.
264 METHOD void enter_quick {
274 * @brief Reverse map the given virtual address, returning the physical
275 * page associated with the address if a mapping exists.
277 * @param _pmap physical map
278 * @param _va mapping virtual address
280 * @retval 0 No mapping found
281 * @retval addr The mapping physical address
283 METHOD vm_paddr_t extract {
291 * @brief Reverse map the given virtual address, returning the
292 * physical page if found. The page must be held (by calling
293 * vm_page_hold) if the page protection matches the given protection
295 * @param _pmap physical map
296 * @param _va mapping virtual address
297 * @param _prot protection used to determine if physical page
300 * @retval NULL No mapping found
301 * @retval page Pointer to physical page. Held if protections match
303 METHOD vm_page_t extract_and_hold {
312 * @brief Increase kernel virtual address space to the given virtual address.
313 * Not really required for PowerPC, so optional unless the MMU implementation
316 * @param _va new upper limit for kernel virtual address space
318 METHOD void growkernel {
321 } DEFAULT mmu_null_growkernel;
325 * @brief Called from vm_mem_init. Zone allocation is available at
326 * this stage so a convenient time to create zones. This routine is
327 * for MMU-implementation convenience and is optional.
331 } DEFAULT mmu_null_init;
335 * @brief Return if the page has been marked by MMU hardware to have been
338 * @param _pg physical page to test
340 * @retval boolean TRUE if page has been modified
342 METHOD boolean_t is_modified {
349 * @brief Return whether the specified virtual address is a candidate to be
350 * prefaulted in. This routine is optional.
352 * @param _pmap physical map
353 * @param _va virtual address to test
355 * @retval boolean TRUE if the address is a candidate.
357 METHOD boolean_t is_prefaultable {
361 } DEFAULT mmu_null_is_prefaultable;
365 * @brief Return whether or not the specified physical page was referenced
366 * in any physical maps.
368 * @params _pg physical page
370 * @retval boolean TRUE if page has been referenced
372 METHOD boolean_t is_referenced {
379 * @brief Return a count of referenced bits for a page, clearing those bits.
380 * Not all referenced bits need to be cleared, but it is necessary that 0
381 * only be returned when there are none set.
383 * @params _m physical page
385 * @retval int count of referenced bits
387 METHOD int ts_referenced {
394 * @brief Map the requested physical address range into kernel virtual
395 * address space. The value in _virt is taken as a hint. The virtual
396 * address of the range is returned, or NULL if the mapping could not
397 * be created. The range can be direct-mapped if that is supported.
399 * @param *_virt Hint for start virtual address, and also return
401 * @param _start physical address range start
402 * @param _end physical address range end
403 * @param _prot protection of range (currently ignored)
405 * @retval NULL could not map the area
406 * @retval addr, *_virt mapping start virtual address
408 METHOD vm_offset_t map {
418 * @brief Used to create a contiguous set of read-only mappings for a
419 * given object to try and eliminate a cascade of on-demand faults as
420 * the object is accessed sequentially. This routine is optional.
422 * @param _pmap physical map
423 * @param _addr mapping start virtual address
424 * @param _object device-backed V.M. object to be mapped
425 * @param _pindex page-index within object of mapping start
426 * @param _size size in bytes of mapping
428 METHOD void object_init_pt {
435 } DEFAULT mmu_null_object_init_pt;
439 * @brief Used to determine if the specified page has a mapping for the
440 * given physical map, by scanning the list of reverse-mappings from the
441 * page. The list is scanned to a maximum of 16 entries.
443 * @param _pmap physical map
444 * @param _pg physical page
446 * @retval bool TRUE if the physical map was found in the first 16
447 * reverse-map list entries off the physical page.
449 METHOD boolean_t page_exists_quick {
457 * @brief Initialise the machine-dependent section of the physical page
458 * data structure. This routine is optional.
460 * @param _pg physical page
462 METHOD void page_init {
465 } DEFAULT mmu_null_page_init;
469 * @brief Count the number of managed mappings to the given physical
470 * page that are wired.
472 * @param _pg physical page
474 * @retval int the number of wired, managed mappings to the
475 * given physical page
477 METHOD int page_wired_mappings {
484 * @brief Initialise a physical map data structure
486 * @param _pmap physical map
495 * @brief Initialise the physical map for process 0, the initial process
497 * XXX default to pinit ?
499 * @param _pmap physical map
508 * @brief Set the protection for physical pages in the given virtual address
509 * range to the given value.
511 * @param _pmap physical map
512 * @param _start virtual range start
513 * @param _end virtual range end
514 * @param _prot new page protection
516 METHOD void protect {
526 * @brief Create a mapping in kernel virtual address space for the given array
527 * of wired physical pages.
529 * @param _start mapping virtual address start
530 * @param *_m array of physical page pointers
531 * @param _count array elements
542 * @brief Remove the temporary mappings created by qenter.
544 * @param _start mapping virtual address start
545 * @param _count number of pages in mapping
547 METHOD void qremove {
555 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
556 * should be no existing mappings for the physical map at this point
558 * @param _pmap physical map
560 METHOD void release {
567 * @brief Remove all mappings in the given physical map for the start/end
568 * virtual address range. The range will be page-aligned.
570 * @param _pmap physical map
571 * @param _start mapping virtual address start
572 * @param _end mapping virtual address end
583 * @brief Traverse the reverse-map list off the given physical page and
584 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
586 * @param _pg physical page
588 METHOD void remove_all {
595 * @brief Remove all mappings in the given start/end virtual address range
596 * for the given physical map. Similar to the remove method, but it used
597 * when tearing down all mappings in an address space. This method is
598 * optional, since pmap_remove will be called for each valid vm_map in
599 * the address space later.
601 * @param _pmap physical map
602 * @param _start mapping virtual address start
603 * @param _end mapping virtual address end
605 METHOD void remove_pages {
608 } DEFAULT mmu_null_remove_pages;
612 * @brief Clear the wired attribute from the mappings for the specified range
613 * of addresses in the given pmap.
615 * @param _pmap physical map
616 * @param _start virtual range start
617 * @param _end virtual range end
628 * @brief Zero a physical page. It is not assumed that the page is mapped,
629 * so a temporary (or direct) mapping may need to be used.
631 * @param _pg physical page
633 METHOD void zero_page {
640 * @brief Zero a portion of a physical page, starting at a given offset and
641 * for a given size (multiples of 512 bytes for 4k pages).
643 * @param _pg physical page
644 * @param _off byte offset from start of page
645 * @param _size size of area to zero
647 METHOD void zero_page_area {
656 * @brief Called from the idle loop to zero pages. XXX I think locking
657 * constraints might be different here compared to zero_page.
659 * @param _pg physical page
661 METHOD void zero_page_idle {
668 * @brief Extract mincore(2) information from a mapping.
670 * @param _pmap physical map
671 * @param _addr page virtual address
672 * @param _locked_pa page physical address
674 * @retval 0 no result
675 * @retval non-zero mincore(2) flag values
681 vm_paddr_t *_locked_pa;
682 } DEFAULT mmu_null_mincore;
686 * @brief Perform any operations required to allow a physical map to be used
687 * before it's address space is accessed.
689 * @param _td thread associated with physical map
691 METHOD void activate {
697 * @brief Perform any operations required to deactivate a physical map,
698 * for instance as it is context-switched out.
700 * @param _td thread associated with physical map
702 METHOD void deactivate {
705 } DEFAULT mmu_null_deactivate;
708 * @brief Return a hint for the best virtual address to map a tentative
709 * virtual address range in a given VM object. The default is to just
710 * return the given tentative start address.
712 * @param _obj VM backing object
713 * @param _offset starting offset with the VM object
714 * @param _addr initial guess at virtual address
715 * @param _size size of virtual address range
717 METHOD void align_superpage {
720 vm_ooffset_t _offset;
723 } DEFAULT mmu_null_align_superpage;
729 * INTERNAL INTERFACES
733 * @brief Bootstrap the VM system. At the completion of this routine, the
734 * kernel will be running in it's own address space with full control over
737 * @param _start start of reserved memory (obsolete ???)
738 * @param _end end of reserved memory (obsolete ???)
739 * XXX I think the intent of these was to allow
740 * the memory used by kernel text+data+bss and
741 * loader variables/load-time kld's to be carved out
742 * of available physical mem.
745 METHOD void bootstrap {
752 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
753 * for alternate CPUs on SMP systems.
755 * @param _ap Set to 1 if the CPU being set up is an AP
758 METHOD void cpu_bootstrap {
765 * @brief Create a kernel mapping for a given physical address range.
766 * Called by bus code on behalf of device drivers. The mapping does not
767 * have to be a virtual address: it can be a direct-mapped physical address
768 * if that is supported by the MMU.
770 * @param _pa start physical address
771 * @param _size size in bytes of mapping
773 * @retval addr address of mapping.
775 METHOD void * mapdev {
782 * @brief Create a kernel mapping for a given physical address range.
783 * Called by bus code on behalf of device drivers. The mapping does not
784 * have to be a virtual address: it can be a direct-mapped physical address
785 * if that is supported by the MMU.
787 * @param _pa start physical address
788 * @param _size size in bytes of mapping
789 * @param _attr cache attributes
791 * @retval addr address of mapping.
793 METHOD void * mapdev_attr {
798 } DEFAULT mmu_null_mapdev_attr;
801 * @brief Change cache control attributes for a page. Should modify all
802 * mappings for that page.
804 * @param _m page to modify
805 * @param _ma new cache control attributes
807 METHOD void page_set_memattr {
811 } DEFAULT mmu_null_page_set_memattr;
814 * @brief Remove the mapping created by mapdev. Called when a driver
817 * @param _va Mapping address returned from mapdev
818 * @param _size size in bytes of mapping
820 METHOD void unmapdev {
828 * @brief Reverse-map a kernel virtual address
830 * @param _va kernel virtual address to reverse-map
832 * @retval pa physical address corresponding to mapping
834 METHOD vm_paddr_t kextract {
841 * @brief Map a wired page into kernel virtual address space
843 * @param _va mapping virtual address
844 * @param _pa mapping physical address
853 * @brief Map a wired page into kernel virtual address space
855 * @param _va mapping virtual address
856 * @param _pa mapping physical address
857 * @param _ma mapping cache control attributes
859 METHOD void kenter_attr {
864 } DEFAULT mmu_null_kenter_attr;
867 * @brief Determine if the given physical address range has been direct-mapped.
869 * @param _pa physical address start
870 * @param _size physical address range size
872 * @retval bool TRUE if the range is direct-mapped.
874 METHOD boolean_t dev_direct_mapped {
882 * @brief Enforce instruction cache coherency. Typically called after a
883 * region of memory has been modified and before execution of or within
884 * that region is attempted. Setting breakpoints in a process through
885 * ptrace(2) is one example of when the instruction cache needs to be
888 * @param _pm the physical map of the virtual address
889 * @param _va the virtual address of the modified region
890 * @param _sz the size of the modified region
892 METHOD void sync_icache {
901 * @brief Create temporary memory mapping for use by dumpsys().
903 * @param _pa The physical page to map.
904 * @param _sz The requested size of the mapping.
905 * @param _va The virtual address of the mapping.
907 METHOD void dumpsys_map {
916 * @brief Remove temporary dumpsys() mapping.
918 * @param _pa The physical page to map.
919 * @param _sz The requested size of the mapping.
920 * @param _va The virtual address of the mapping.
922 METHOD void dumpsys_unmap {
931 * @brief Initialize memory chunks for dumpsys.
933 METHOD void scan_init {