2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2020 Justin Hibbits
5 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
6 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Some hw specific parts of this pmap were derived or influenced
30 * by NetBSD's ibm4xx pmap module. More generic code is shared with
31 * a few other pmap modules from the FreeBSD tree.
37 * Kernel and user threads run within one common virtual address space
41 * Virtual address space layout:
42 * -----------------------------
43 * 0x0000_0000_0000_0000 - 0x3fff_ffff_ffff_ffff : user process
44 * 0x4000_0000_0000_0000 - 0x7fff_ffff_ffff_ffff : unused
45 * 0x8000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : mmio region
46 * 0xc000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : direct map
47 * 0xe000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : KVA
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
54 #include "opt_kstack_pages.h"
56 #include <sys/param.h>
58 #include <sys/malloc.h>
62 #include <sys/queue.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/kerneldump.h>
66 #include <sys/linker.h>
67 #include <sys/msgbuf.h>
69 #include <sys/mutex.h>
70 #include <sys/rwlock.h>
71 #include <sys/sched.h>
73 #include <sys/vmmeter.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_pager.h>
84 #include <vm/vm_phys.h>
85 #include <vm/vm_pagequeue.h>
88 #include <machine/_inttypes.h>
89 #include <machine/cpu.h>
90 #include <machine/pcb.h>
91 #include <machine/platform.h>
93 #include <machine/tlb.h>
94 #include <machine/spr.h>
95 #include <machine/md_var.h>
96 #include <machine/mmuvar.h>
97 #include <machine/pmap.h>
98 #include <machine/pte.h>
103 #define debugf(fmt, args...) printf(fmt, ##args)
105 #define debugf(fmt, args...)
108 #define PRI0ptrX "016lx"
110 /**************************************************************************/
112 /**************************************************************************/
114 unsigned int kernel_pdirs;
115 static uma_zone_t ptbl_root_zone;
116 static pte_t ****kernel_ptbl_root;
119 * Base of the pmap_mapdev() region. On 32-bit it immediately follows the
120 * userspace address range. On On 64-bit it's far above, at (1 << 63), and
121 * ranges up to the DMAP, giving 62 bits of PA allowed. This is far larger than
122 * the widest Book-E address bus, the e6500 has a 40-bit PA space. This allows
123 * us to map akin to the DMAP, with addresses identical to the PA, offset by the
126 #define VM_MAPDEV_BASE 0x8000000000000000
127 #define VM_MAPDEV_PA_MAX 0x4000000000000000 /* Don't encroach on DMAP */
129 static void tid_flush(tlbtid_t tid);
130 static unsigned long ilog2(unsigned long);
132 /**************************************************************************/
133 /* Page table management */
134 /**************************************************************************/
136 #define PMAP_ROOT_SIZE (sizeof(pte_t****) * PG_ROOT_NENTRIES)
137 static pte_t *ptbl_alloc(pmap_t pmap, vm_offset_t va,
138 bool nosleep, bool *is_new);
139 static void ptbl_hold(pmap_t, pte_t *);
140 static int ptbl_unhold(pmap_t, vm_offset_t);
142 static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
143 static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
144 static int pte_remove(pmap_t, vm_offset_t, uint8_t);
145 static pte_t *pte_find(pmap_t, vm_offset_t);
146 static pte_t *pte_find_next(pmap_t, vm_offset_t *);
147 static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
149 /**************************************************************************/
150 /* Page table related */
151 /**************************************************************************/
153 /* Allocate a page, to be used in a page table. */
155 mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
160 req = VM_ALLOC_WIRED | VM_ALLOC_ZERO;
161 while ((m = vm_page_alloc_noobj(req)) == NULL) {
166 rw_wunlock(&pvh_global_lock);
168 rw_wlock(&pvh_global_lock);
173 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
176 /* Initialize pool of kva ptbl buffers. */
182 /* Get a pointer to a PTE in a page table. */
183 static __inline pte_t *
184 pte_find(pmap_t pmap, vm_offset_t va)
190 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
192 pdir_l1 = pmap->pm_root[PG_ROOT_IDX(va)];
195 pdir = pdir_l1[PDIR_L1_IDX(va)];
198 ptbl = pdir[PDIR_IDX(va)];
200 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
203 /* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
204 static __inline pte_t *
205 pte_find_next(pmap_t pmap, vm_offset_t *pva)
210 unsigned long i, j, k, l;
212 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
219 pm_root = pmap->pm_root;
221 /* truncate the VA for later. */
222 va &= ~((1UL << (PG_ROOT_H + 1)) - 1);
223 for (; i < PG_ROOT_NENTRIES; i++, j = 0, k = 0, l = 0) {
226 for (; j < PDIR_L1_NENTRIES; j++, k = 0, l = 0) {
227 if (pm_root[i][j] == 0)
229 for (; k < PDIR_NENTRIES; k++, l = 0) {
230 if (pm_root[i][j][k] == NULL)
232 for (; l < PTBL_NENTRIES; l++) {
233 pte = &pm_root[i][j][k][l];
234 if (!PTE_ISVALID(pte))
236 *pva = va + PG_ROOT_SIZE * i +
249 unhold_free_page(pmap_t pmap, vm_page_t m)
252 if (vm_page_unwire_noq(m)) {
253 vm_page_free_zero(m);
261 get_pgtbl_page(pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
262 bool nosleep, bool hold_parent, bool *isnew)
267 page = ptr_tbl[index];
268 KASSERT(page != 0 || pmap != kernel_pmap,
269 ("NULL page table page found in kernel pmap!"));
271 page = mmu_booke_alloc_page(pmap, index, nosleep);
272 if (ptr_tbl[index] == 0) {
274 ptr_tbl[index] = page;
276 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)ptr_tbl));
281 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page));
282 page = ptr_tbl[index];
283 vm_page_unwire_noq(m);
284 vm_page_free_zero(m);
292 /* Allocate page table. */
294 ptbl_alloc(pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
296 unsigned int pg_root_idx = PG_ROOT_IDX(va);
297 unsigned int pdir_l1_idx = PDIR_L1_IDX(va);
298 unsigned int pdir_idx = PDIR_IDX(va);
299 vm_offset_t pdir_l1, pdir, ptbl;
301 /* When holding a parent, no need to hold the root index pages. */
302 pdir_l1 = get_pgtbl_page(pmap, (vm_offset_t *)pmap->pm_root,
303 pg_root_idx, nosleep, false, is_new);
306 pdir = get_pgtbl_page(pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
307 nosleep, !*is_new, is_new);
310 ptbl = get_pgtbl_page(pmap, (vm_offset_t *)pdir, pdir_idx,
311 nosleep, !*is_new, is_new);
313 return ((pte_t *)ptbl);
317 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
318 * when removing pte entry from ptbl.
320 * Return 1 if ptbl pages were freed.
323 ptbl_unhold(pmap_t pmap, vm_offset_t va)
333 pg_root_idx = PG_ROOT_IDX(va);
334 pdir_l1_idx = PDIR_L1_IDX(va);
335 pdir_idx = PDIR_IDX(va);
337 KASSERT((pmap != kernel_pmap),
338 ("ptbl_unhold: unholding kernel ptbl!"));
340 pdir_l1 = pmap->pm_root[pg_root_idx];
341 pdir = pdir_l1[pdir_l1_idx];
342 ptbl = pdir[pdir_idx];
344 /* decrement hold count */
345 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
347 if (!unhold_free_page(pmap, m))
350 pdir[pdir_idx] = NULL;
351 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
353 if (!unhold_free_page(pmap, m))
356 pdir_l1[pdir_l1_idx] = NULL;
357 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
359 if (!unhold_free_page(pmap, m))
361 pmap->pm_root[pg_root_idx] = NULL;
367 * Increment hold count for ptbl pages. This routine is used when new pte
368 * entry is being inserted into ptbl.
371 ptbl_hold(pmap_t pmap, pte_t *ptbl)
375 KASSERT((pmap != kernel_pmap),
376 ("ptbl_hold: holding kernel ptbl!"));
378 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
383 * Clean pte entry, try to free page table page if requested.
385 * Return 1 if ptbl pages were freed, otherwise return 0.
388 pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
393 pte = pte_find(pmap, va);
394 KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p",
395 __func__, (uintmax_t)va, pmap));
397 if (!PTE_ISVALID(pte))
400 /* Get vm_page_t for mapped pte. */
401 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
403 if (PTE_ISWIRED(pte))
404 pmap->pm_stats.wired_count--;
406 /* Handle managed entry. */
407 if (PTE_ISMANAGED(pte)) {
408 /* Handle modified pages. */
409 if (PTE_ISMODIFIED(pte))
412 /* Referenced pages. */
413 if (PTE_ISREFERENCED(pte))
414 vm_page_aflag_set(m, PGA_REFERENCED);
416 /* Remove pv_entry from pv_list. */
417 pv_remove(pmap, va, m);
418 } else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
419 pv_remove(pmap, va, m);
420 if (TAILQ_EMPTY(&m->md.pv_list))
421 m->md.pv_tracked = false;
423 mtx_lock_spin(&tlbivax_mutex);
426 tlb0_flush_entry(va);
430 mtx_unlock_spin(&tlbivax_mutex);
432 pmap->pm_stats.resident_count--;
434 if (flags & PTBL_UNHOLD) {
435 return (ptbl_unhold(pmap, va));
441 * Insert PTE for a given page and virtual address.
444 pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
447 unsigned int ptbl_idx = PTBL_IDX(va);
448 pte_t *ptbl, *pte, pte_tmp;
451 /* Get the page directory pointer. */
452 ptbl = ptbl_alloc(pmap, va, nosleep, &is_new);
454 KASSERT(nosleep, ("nosleep and NULL ptbl"));
458 pte = &ptbl[ptbl_idx];
461 * Check if there is valid mapping for requested va, if there
464 pte = &ptbl[ptbl_idx];
465 if (PTE_ISVALID(pte)) {
466 pte_remove(pmap, va, PTBL_HOLD);
469 * pte is not used, increment hold count for ptbl
472 if (pmap != kernel_pmap)
473 ptbl_hold(pmap, ptbl);
478 * Insert pv_entry into pv_list for mapped page if part of managed
481 if ((m->oflags & VPO_UNMANAGED) == 0) {
482 flags |= PTE_MANAGED;
484 /* Create and insert pv entry. */
485 pv_insert(pmap, va, m);
488 pmap->pm_stats.resident_count++;
490 pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
491 pte_tmp |= (PTE_VALID | flags);
493 mtx_lock_spin(&tlbivax_mutex);
496 tlb0_flush_entry(va);
500 mtx_unlock_spin(&tlbivax_mutex);
505 /* Return the pa for the given pmap/va. */
507 pte_vatopa(pmap_t pmap, vm_offset_t va)
512 pte = pte_find(pmap, va);
513 if ((pte != NULL) && PTE_ISVALID(pte))
514 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
518 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
520 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)
524 int kernel_pdirs, kernel_pgtbls, pdir_l1s;
525 vm_offset_t va, l1_va, pdir_va, ptbl_va;
528 kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
529 kernel_pmap->pm_root = kernel_ptbl_root;
530 pdir_l1s = howmany(kva_size, PG_ROOT_SIZE);
531 kernel_pdirs = howmany(kva_size, PDIR_L1_SIZE);
532 kernel_pgtbls = howmany(kva_size, PDIR_SIZE);
534 /* Initialize kernel pdir */
535 l1_va = (vm_offset_t)kernel_ptbl_root +
536 round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
537 pdir_va = l1_va + pdir_l1s * PAGE_SIZE;
538 ptbl_va = pdir_va + kernel_pdirs * PAGE_SIZE;
540 printf("ptbl_root_va: %#lx\n", (vm_offset_t)kernel_ptbl_root);
541 printf("l1_va: %#lx (%d entries)\n", l1_va, pdir_l1s);
542 printf("pdir_va: %#lx(%d entries)\n", pdir_va, kernel_pdirs);
543 printf("ptbl_va: %#lx(%d entries)\n", ptbl_va, kernel_pgtbls);
546 va = VM_MIN_KERNEL_ADDRESS;
547 for (i = PG_ROOT_IDX(va); i < PG_ROOT_IDX(va) + pdir_l1s;
548 i++, l1_va += PAGE_SIZE) {
549 kernel_pmap->pm_root[i] = (pte_t ***)l1_va;
551 j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
552 j++, pdir_va += PAGE_SIZE) {
553 kernel_pmap->pm_root[i][j] = (pte_t **)pdir_va;
555 k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
556 k++, va += PDIR_SIZE, ptbl_va += PAGE_SIZE)
557 kernel_pmap->pm_root[i][j][k] = (pte_t *)ptbl_va;
561 * Fill in PTEs covering kernel code and data. They are not required
562 * for address translation, as this area is covered by static TLB1
563 * entries, but for pte_vatopa() to work correctly with kernel area
566 for (va = addr; va < data_end; va += PAGE_SIZE) {
567 pte = &(kernel_pmap->pm_root[PG_ROOT_IDX(va)][PDIR_L1_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
568 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
569 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
570 PTE_VALID | PTE_PS_4KB;
575 mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
577 vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
578 kernel_ptbl_root = (pte_t ****)data_end;
580 data_end += round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
581 data_end += howmany(kva_size, PG_ROOT_SIZE) * PAGE_SIZE;
582 data_end += howmany(kva_size, PDIR_L1_SIZE) * PAGE_SIZE;
583 data_end += howmany(kva_size, PDIR_SIZE) * PAGE_SIZE;
589 * Initialize a preallocated and zeroed pmap structure,
590 * such as one in a vmspace structure.
593 mmu_booke_pinit(pmap_t pmap)
597 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
598 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
600 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
602 for (i = 0; i < MAXCPU; i++)
603 pmap->pm_tid[i] = TID_NONE;
604 CPU_ZERO(&kernel_pmap->pm_active);
605 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
606 pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK);
607 bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES);
613 * Release any resources held by the given physical map.
614 * Called when a pmap initialized by mmu_booke_pinit is being released.
615 * Should only be called if the map contains no valid mappings.
618 mmu_booke_release(pmap_t pmap)
621 KASSERT(pmap->pm_stats.resident_count == 0,
622 ("pmap_release: pmap resident count %ld != 0",
623 pmap->pm_stats.resident_count));
626 * Verify that all page directories are gone.
627 * Protects against reference count leakage.
629 for (int i = 0; i < PG_ROOT_NENTRIES; i++)
630 KASSERT(pmap->pm_root[i] == 0,
631 ("Index %d on root page %p is non-zero!\n", i, pmap->pm_root));
633 uma_zfree(ptbl_root_zone, pmap->pm_root);
637 mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
645 pte = pte_find(pm, va);
646 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
650 sync_sz = PAGE_SIZE - (va & PAGE_MASK);
651 sync_sz = min(sync_sz, sz);
653 pa += (va & PAGE_MASK);
654 __syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
662 * mmu_booke_zero_page_area zeros the specified hardware page by
663 * mapping it into virtual memory and using bzero to clear
666 * off and size must reside within a single page.
669 mmu_booke_zero_page_area(vm_page_t m, int off, int size)
673 /* XXX KASSERT off and size are within a single page? */
675 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
676 bzero((caddr_t)va + off, size);
680 * mmu_booke_zero_page zeros the specified hardware page.
683 mmu_booke_zero_page(vm_page_t m)
687 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
689 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
690 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
694 * mmu_booke_copy_page copies the specified (machine independent) page by
695 * mapping the page into virtual memory and using memcopy to copy the page,
696 * one machine dependent page at a time.
699 mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
701 vm_offset_t sva, dva;
703 sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
704 dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
705 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
709 mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
710 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
713 vm_offset_t a_pg_offset, b_pg_offset;
718 while (xfersize > 0) {
719 a_pg_offset = a_offset & PAGE_MASK;
720 pa = ma[a_offset >> PAGE_SHIFT];
721 b_pg_offset = b_offset & PAGE_MASK;
722 pb = mb[b_offset >> PAGE_SHIFT];
723 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
724 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
725 a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) +
727 b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) +
729 bcopy(a_cp, b_cp, cnt);
737 mmu_booke_quick_enter_page(vm_page_t m)
739 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
743 mmu_booke_quick_remove_page(vm_offset_t addr)
747 /**************************************************************************/
749 /**************************************************************************/
752 * Return the largest uint value log such that 2^log <= num.
755 ilog2(unsigned long num)
759 __asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num));
764 * Invalidate all TLB0 entries which match the given TID. Note this is
765 * dedicated for cases when invalidations should NOT be propagated to other
769 tid_flush(tlbtid_t tid)
773 /* Don't evict kernel translations */
774 if (tid == TID_KERNEL)
778 __asm __volatile("wrteei 0");
781 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
782 * it for PID invalidation.
784 mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
785 __asm __volatile("isync; .long 0x7c200024; isync; msync");
787 __asm __volatile("wrtee %0" :: "r"(msr));