2 # Copyright (c) 2005 Peter Grehan
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions
8 # 1. Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # 2. Redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution.
14 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
31 #include <sys/mutex.h>
32 #include <sys/systm.h>
35 #include <vm/vm_page.h>
37 #include <machine/mmuvar.h>
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
51 # Default implementations of some methods
54 static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
60 static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
65 static void mmu_null_init(mmu_t mmu)
70 static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
76 static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77 vm_offset_t addr, vm_object_t object, vm_pindex_t index,
83 static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
88 static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
93 static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
98 static void mmu_null_deactivate(struct thread *td)
103 static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
104 vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
109 static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
117 * @brief Change the wiring attribute for the page in the given physical
118 * map and virtual address.
120 * @param _pmap physical map of page
121 * @param _va page virtual address
122 * @param _wired TRUE to increment wired count, FALSE to decrement
124 METHOD void change_wiring {
133 * @brief Clear the 'modified' bit on the given physical page
135 * @param _pg physical page
137 METHOD void clear_modify {
144 * @brief Clear the 'referenced' bit on the given physical page
146 * @param _pg physical page
148 METHOD void clear_reference {
155 * @brief Clear the write and modified bits in each of the given
156 * physical page's mappings
158 * @param _pg physical page
160 METHOD void remove_write {
167 * @brief Copy the address range given by the source physical map, virtual
168 * address and length to the destination physical map and virtual address.
169 * This routine is optional (xxx default null implementation ?)
171 * @param _dst_pmap destination physical map
172 * @param _src_pmap source physical map
173 * @param _dst_addr destination virtual address
174 * @param _len size of range
175 * @param _src_addr source virtual address
181 vm_offset_t _dst_addr;
183 vm_offset_t _src_addr;
184 } DEFAULT mmu_null_copy;
188 * @brief Copy the source physical page to the destination physical page
190 * @param _src source physical page
191 * @param _dst destination physical page
193 METHOD void copy_page {
201 * @brief Create a mapping between a virtual/physical address pair in the
202 * passed physical map with the specified protection and wiring
204 * @param _pmap physical map
205 * @param _va mapping virtual address
206 * @param _p mapping physical page
207 * @param _prot mapping page protection
208 * @param _wired TRUE if page will be wired
221 * @brief Maps a sequence of resident pages belonging to the same object.
223 * @param _pmap physical map
224 * @param _start virtual range start
225 * @param _end virtual range end
226 * @param _m_start physical page mapped at start
227 * @param _prot mapping page protection
229 METHOD void enter_object {
240 * @brief A faster entry point for page mapping where it is possible
241 * to short-circuit some of the tests in pmap_enter.
243 * @param _pmap physical map (and also currently active pmap)
244 * @param _va mapping virtual address
245 * @param _pg mapping physical page
246 * @param _prot new page protection - used to see if page is exec.
248 METHOD void enter_quick {
258 * @brief Reverse map the given virtual address, returning the physical
259 * page associated with the address if a mapping exists.
261 * @param _pmap physical map
262 * @param _va mapping virtual address
264 * @retval 0 No mapping found
265 * @retval addr The mapping physical address
267 METHOD vm_paddr_t extract {
275 * @brief Reverse map the given virtual address, returning the
276 * physical page if found. The page must be held (by calling
277 * vm_page_hold) if the page protection matches the given protection
279 * @param _pmap physical map
280 * @param _va mapping virtual address
281 * @param _prot protection used to determine if physical page
284 * @retval NULL No mapping found
285 * @retval page Pointer to physical page. Held if protections match
287 METHOD vm_page_t extract_and_hold {
296 * @brief Increase kernel virtual address space to the given virtual address.
297 * Not really required for PowerPC, so optional unless the MMU implementation
300 * @param _va new upper limit for kernel virtual address space
302 METHOD void growkernel {
305 } DEFAULT mmu_null_growkernel;
309 * @brief Called from vm_mem_init. Zone allocation is available at
310 * this stage so a convenient time to create zones. This routine is
311 * for MMU-implementation convenience and is optional.
315 } DEFAULT mmu_null_init;
319 * @brief Return if the page has been marked by MMU hardware to have been
322 * @param _pg physical page to test
324 * @retval boolean TRUE if page has been modified
326 METHOD boolean_t is_modified {
333 * @brief Return whether the specified virtual address is a candidate to be
334 * prefaulted in. This routine is optional.
336 * @param _pmap physical map
337 * @param _va virtual address to test
339 * @retval boolean TRUE if the address is a candidate.
341 METHOD boolean_t is_prefaultable {
345 } DEFAULT mmu_null_is_prefaultable;
349 * @brief Return a count of referenced bits for a page, clearing those bits.
350 * Not all referenced bits need to be cleared, but it is necessary that 0
351 * only be returned when there are none set.
353 * @params _m physical page
355 * @retval int count of referenced bits
357 METHOD boolean_t ts_referenced {
364 * @brief Map the requested physical address range into kernel virtual
365 * address space. The value in _virt is taken as a hint. The virtual
366 * address of the range is returned, or NULL if the mapping could not
367 * be created. The range can be direct-mapped if that is supported.
369 * @param *_virt Hint for start virtual address, and also return
371 * @param _start physical address range start
372 * @param _end physical address range end
373 * @param _prot protection of range (currently ignored)
375 * @retval NULL could not map the area
376 * @retval addr, *_virt mapping start virtual address
378 METHOD vm_offset_t map {
388 * @brief Used to create a contiguous set of read-only mappings for a
389 * given object to try and eliminate a cascade of on-demand faults as
390 * the object is accessed sequentially. This routine is optional.
392 * @param _pmap physical map
393 * @param _addr mapping start virtual address
394 * @param _object device-backed V.M. object to be mapped
395 * @param _pindex page-index within object of mapping start
396 * @param _size size in bytes of mapping
398 METHOD void object_init_pt {
405 } DEFAULT mmu_null_object_init_pt;
409 * @brief Used to determine if the specified page has a mapping for the
410 * given physical map, by scanning the list of reverse-mappings from the
411 * page. The list is scanned to a maximum of 16 entries.
413 * @param _pmap physical map
414 * @param _pg physical page
416 * @retval bool TRUE if the physical map was found in the first 16
417 * reverse-map list entries off the physical page.
419 METHOD boolean_t page_exists_quick {
427 * @brief Initialise the machine-dependent section of the physical page
428 * data structure. This routine is optional.
430 * @param _pg physical page
432 METHOD void page_init {
435 } DEFAULT mmu_null_page_init;
439 * @brief Count the number of managed mappings to the given physical
440 * page that are wired.
442 * @param _pg physical page
444 * @retval int the number of wired, managed mappings to the
445 * given physical page
447 METHOD int page_wired_mappings {
454 * @brief Initialise a physical map data structure
456 * @param _pmap physical map
465 * @brief Initialise the physical map for process 0, the initial process
467 * XXX default to pinit ?
469 * @param _pmap physical map
478 * @brief Set the protection for physical pages in the given virtual address
479 * range to the given value.
481 * @param _pmap physical map
482 * @param _start virtual range start
483 * @param _end virtual range end
484 * @param _prot new page protection
486 METHOD void protect {
496 * @brief Create a mapping in kernel virtual address space for the given array
497 * of wired physical pages.
499 * @param _start mapping virtual address start
500 * @param *_m array of physical page pointers
501 * @param _count array elements
512 * @brief Remove the temporary mappings created by qenter.
514 * @param _start mapping virtual address start
515 * @param _count number of pages in mapping
517 METHOD void qremove {
525 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
526 * should be no existing mappings for the physical map at this point
528 * @param _pmap physical map
530 METHOD void release {
537 * @brief Remove all mappings in the given physical map for the start/end
538 * virtual address range. The range will be page-aligned.
540 * @param _pmap physical map
541 * @param _start mapping virtual address start
542 * @param _end mapping virtual address end
553 * @brief Traverse the reverse-map list off the given physical page and
554 * remove all mappings. Clear the PG_WRITEABLE attribute from the page.
556 * @param _pg physical page
558 METHOD void remove_all {
565 * @brief Remove all mappings in the given start/end virtual address range
566 * for the given physical map. Similar to the remove method, but it used
567 * when tearing down all mappings in an address space. This method is
568 * optional, since pmap_remove will be called for each valid vm_map in
569 * the address space later.
571 * @param _pmap physical map
572 * @param _start mapping virtual address start
573 * @param _end mapping virtual address end
575 METHOD void remove_pages {
578 } DEFAULT mmu_null_remove_pages;
582 * @brief Zero a physical page. It is not assumed that the page is mapped,
583 * so a temporary (or direct) mapping may need to be used.
585 * @param _pg physical page
587 METHOD void zero_page {
594 * @brief Zero a portion of a physical page, starting at a given offset and
595 * for a given size (multiples of 512 bytes for 4k pages).
597 * @param _pg physical page
598 * @param _off byte offset from start of page
599 * @param _size size of area to zero
601 METHOD void zero_page_area {
610 * @brief Called from the idle loop to zero pages. XXX I think locking
611 * constraints might be different here compared to zero_page.
613 * @param _pg physical page
615 METHOD void zero_page_idle {
622 * @brief Extract mincore(2) information from a mapping. This routine is
623 * optional and is an optimisation: the mincore code will call is_modified
624 * and ts_referenced if no result is returned.
626 * @param _pmap physical map
627 * @param _addr page virtual address
629 * @retval 0 no result
630 * @retval non-zero mincore(2) flag values
636 } DEFAULT mmu_null_mincore;
640 * @brief Perform any operations required to allow a physical map to be used
641 * before it's address space is accessed.
643 * @param _td thread associated with physical map
645 METHOD void activate {
651 * @brief Perform any operations required to deactivate a physical map,
652 * for instance as it is context-switched out.
654 * @param _td thread associated with physical map
656 METHOD void deactivate {
659 } DEFAULT mmu_null_deactivate;
662 * @brief Return a hint for the best virtual address to map a tentative
663 * virtual address range in a given VM object. The default is to just
664 * return the given tentative start address.
666 * @param _obj VM backing object
667 * @param _offset starting offset with the VM object
668 * @param _addr initial guess at virtual address
669 * @param _size size of virtual address range
671 METHOD void align_superpage {
674 vm_ooffset_t _offset;
677 } DEFAULT mmu_null_align_superpage;
683 * INTERNAL INTERFACES
687 * @brief Bootstrap the VM system. At the completion of this routine, the
688 * kernel will be running in it's own address space with full control over
691 * @param _start start of reserved memory (obsolete ???)
692 * @param _end end of reserved memory (obsolete ???)
693 * XXX I think the intent of these was to allow
694 * the memory used by kernel text+data+bss and
695 * loader variables/load-time kld's to be carved out
696 * of available physical mem.
699 METHOD void bootstrap {
706 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
707 * for alternate CPUs on SMP systems.
709 * @param _ap Set to 1 if the CPU being set up is an AP
712 METHOD void cpu_bootstrap {
719 * @brief Create a kernel mapping for a given physical address range.
720 * Called by bus code on behalf of device drivers. The mapping does not
721 * have to be a virtual address: it can be a direct-mapped physical address
722 * if that is supported by the MMU.
724 * @param _pa start physical address
725 * @param _size size in bytes of mapping
727 * @retval addr address of mapping.
729 METHOD void * mapdev {
737 * @brief Remove the mapping created by mapdev. Called when a driver
740 * @param _va Mapping address returned from mapdev
741 * @param _size size in bytes of mapping
743 METHOD void unmapdev {
751 * @brief Reverse-map a kernel virtual address
753 * @param _va kernel virtual address to reverse-map
755 * @retval pa physical address corresponding to mapping
757 METHOD vm_offset_t kextract {
764 * @brief Map a wired page into kernel virtual address space
766 * @param _va mapping virtual address
767 * @param _pa mapping physical address
777 * @brief Determine if the given physical address range has been direct-mapped.
779 * @param _pa physical address start
780 * @param _size physical address range size
782 * @retval bool TRUE if the range is direct-mapped.
784 METHOD boolean_t dev_direct_mapped {
792 * @brief Evaluate if a physical page has an executable mapping
794 * @param _pg physical page
796 * @retval bool TRUE if a physical mapping exists for the given page.
798 METHOD boolean_t page_executable {
805 * @brief Create temporary memory mapping for use by dumpsys().
807 * @param _md The memory chunk in which the mapping lies.
808 * @param _ofs The offset within the chunk of the mapping.
809 * @param _sz The requested size of the mapping.
811 * @retval vm_offset_t The virtual address of the mapping.
813 * The sz argument is modified to reflect the actual size of the
816 METHOD vm_offset_t dumpsys_map {
825 * @brief Remove temporary dumpsys() mapping.
827 * @param _md The memory chunk in which the mapping lies.
828 * @param _ofs The offset within the chunk of the mapping.
829 * @param _va The virtual address of the mapping.
831 METHOD void dumpsys_unmap {
840 * @brief Scan/iterate memory chunks.
842 * @param _prev The previously returned chunk or NULL.
844 * @retval The next (or first when _prev is NULL) chunk.
846 METHOD struct pmap_md * scan_md {
848 struct pmap_md *_prev;
849 } DEFAULT mmu_null_scan_md;