2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2020 Justin Hibbits
5 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
6 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Some hw specific parts of this pmap were derived or influenced
30 * by NetBSD's ibm4xx pmap module. More generic code is shared with
31 * a few other pmap modules from the FreeBSD tree.
37 * Kernel and user threads run within one common virtual address space
41 * Virtual address space layout:
42 * -----------------------------
43 * 0x0000_0000_0000_0000 - 0x3fff_ffff_ffff_ffff : user process
44 * 0x4000_0000_0000_0000 - 0x7fff_ffff_ffff_ffff : unused
45 * 0x8000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : mmio region
46 * 0xc000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : direct map
47 * 0xe000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : KVA
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
54 #include "opt_kstack_pages.h"
56 #include <sys/param.h>
58 #include <sys/malloc.h>
62 #include <sys/queue.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/kerneldump.h>
66 #include <sys/linker.h>
67 #include <sys/msgbuf.h>
69 #include <sys/mutex.h>
70 #include <sys/rwlock.h>
71 #include <sys/sched.h>
73 #include <sys/vmmeter.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_pager.h>
84 #include <vm/vm_phys.h>
85 #include <vm/vm_pagequeue.h>
88 #include <machine/_inttypes.h>
89 #include <machine/cpu.h>
90 #include <machine/pcb.h>
91 #include <machine/platform.h>
93 #include <machine/tlb.h>
94 #include <machine/spr.h>
95 #include <machine/md_var.h>
96 #include <machine/mmuvar.h>
97 #include <machine/pmap.h>
98 #include <machine/pte.h>
103 #define debugf(fmt, args...) printf(fmt, ##args)
105 #define debugf(fmt, args...)
108 #define PRI0ptrX "016lx"
110 /**************************************************************************/
112 /**************************************************************************/
114 unsigned int kernel_pdirs;
115 static uma_zone_t ptbl_root_zone;
116 static pte_t ****kernel_ptbl_root;
119 * Base of the pmap_mapdev() region. On 32-bit it immediately follows the
120 * userspace address range. On On 64-bit it's far above, at (1 << 63), and
121 * ranges up to the DMAP, giving 62 bits of PA allowed. This is far larger than
122 * the widest Book-E address bus, the e6500 has a 40-bit PA space. This allows
123 * us to map akin to the DMAP, with addresses identical to the PA, offset by the
126 #define VM_MAPDEV_BASE 0x8000000000000000
127 #define VM_MAPDEV_PA_MAX 0x4000000000000000 /* Don't encroach on DMAP */
129 static void tid_flush(tlbtid_t tid);
130 static unsigned long ilog2(unsigned long);
132 /**************************************************************************/
133 /* Page table management */
134 /**************************************************************************/
136 #define PMAP_ROOT_SIZE (sizeof(pte_t****) * PG_ROOT_NENTRIES)
137 static pte_t *ptbl_alloc(pmap_t pmap, vm_offset_t va,
138 bool nosleep, bool *is_new);
139 static void ptbl_hold(pmap_t, pte_t *);
140 static int ptbl_unhold(pmap_t, vm_offset_t);
142 static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
143 static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
144 static int pte_remove(pmap_t, vm_offset_t, uint8_t);
145 static pte_t *pte_find(pmap_t, vm_offset_t);
146 static pte_t *pte_find_next(pmap_t, vm_offset_t *);
147 static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
149 /**************************************************************************/
150 /* Page table related */
151 /**************************************************************************/
153 /* Allocate a page, to be used in a page table. */
155 mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
160 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO;
161 while ((m = vm_page_alloc(NULL, idx, req)) == NULL) {
166 rw_wunlock(&pvh_global_lock);
168 rw_wlock(&pvh_global_lock);
172 if (!(m->flags & PG_ZERO))
173 /* Zero whole ptbl. */
174 mmu_booke_zero_page(m);
176 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
179 /* Initialize pool of kva ptbl buffers. */
185 /* Get a pointer to a PTE in a page table. */
186 static __inline pte_t *
187 pte_find(pmap_t pmap, vm_offset_t va)
193 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
195 pdir_l1 = pmap->pm_root[PG_ROOT_IDX(va)];
198 pdir = pdir_l1[PDIR_L1_IDX(va)];
201 ptbl = pdir[PDIR_IDX(va)];
203 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
206 /* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
207 static __inline pte_t *
208 pte_find_next(pmap_t pmap, vm_offset_t *pva)
213 unsigned long i, j, k, l;
215 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
222 pm_root = pmap->pm_root;
224 /* truncate the VA for later. */
225 va &= ~((1UL << (PG_ROOT_H + 1)) - 1);
226 for (; i < PG_ROOT_NENTRIES; i++, j = 0, k = 0, l = 0) {
229 for (; j < PDIR_L1_NENTRIES; j++, k = 0, l = 0) {
230 if (pm_root[i][j] == 0)
232 for (; k < PDIR_NENTRIES; k++, l = 0) {
233 if (pm_root[i][j][k] == NULL)
235 for (; l < PTBL_NENTRIES; l++) {
236 pte = &pm_root[i][j][k][l];
237 if (!PTE_ISVALID(pte))
239 *pva = va + PG_ROOT_SIZE * i +
252 unhold_free_page(pmap_t pmap, vm_page_t m)
255 if (vm_page_unwire_noq(m)) {
256 vm_page_free_zero(m);
264 get_pgtbl_page(pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
265 bool nosleep, bool hold_parent, bool *isnew)
270 page = ptr_tbl[index];
271 KASSERT(page != 0 || pmap != kernel_pmap,
272 ("NULL page table page found in kernel pmap!"));
274 page = mmu_booke_alloc_page(pmap, index, nosleep);
275 if (ptr_tbl[index] == 0) {
277 ptr_tbl[index] = page;
279 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)ptr_tbl));
284 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page));
285 page = ptr_tbl[index];
286 vm_page_unwire_noq(m);
287 vm_page_free_zero(m);
295 /* Allocate page table. */
297 ptbl_alloc(pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
299 unsigned int pg_root_idx = PG_ROOT_IDX(va);
300 unsigned int pdir_l1_idx = PDIR_L1_IDX(va);
301 unsigned int pdir_idx = PDIR_IDX(va);
302 vm_offset_t pdir_l1, pdir, ptbl;
304 /* When holding a parent, no need to hold the root index pages. */
305 pdir_l1 = get_pgtbl_page(pmap, (vm_offset_t *)pmap->pm_root,
306 pg_root_idx, nosleep, false, is_new);
309 pdir = get_pgtbl_page(pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
310 nosleep, !*is_new, is_new);
313 ptbl = get_pgtbl_page(pmap, (vm_offset_t *)pdir, pdir_idx,
314 nosleep, !*is_new, is_new);
316 return ((pte_t *)ptbl);
320 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
321 * when removing pte entry from ptbl.
323 * Return 1 if ptbl pages were freed.
326 ptbl_unhold(pmap_t pmap, vm_offset_t va)
336 pg_root_idx = PG_ROOT_IDX(va);
337 pdir_l1_idx = PDIR_L1_IDX(va);
338 pdir_idx = PDIR_IDX(va);
340 KASSERT((pmap != kernel_pmap),
341 ("ptbl_unhold: unholding kernel ptbl!"));
343 pdir_l1 = pmap->pm_root[pg_root_idx];
344 pdir = pdir_l1[pdir_l1_idx];
345 ptbl = pdir[pdir_idx];
347 /* decrement hold count */
348 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
350 if (!unhold_free_page(pmap, m))
353 pdir[pdir_idx] = NULL;
354 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
356 if (!unhold_free_page(pmap, m))
359 pdir_l1[pdir_l1_idx] = NULL;
360 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
362 if (!unhold_free_page(pmap, m))
364 pmap->pm_root[pg_root_idx] = NULL;
370 * Increment hold count for ptbl pages. This routine is used when new pte
371 * entry is being inserted into ptbl.
374 ptbl_hold(pmap_t pmap, pte_t *ptbl)
378 KASSERT((pmap != kernel_pmap),
379 ("ptbl_hold: holding kernel ptbl!"));
381 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
386 * Clean pte entry, try to free page table page if requested.
388 * Return 1 if ptbl pages were freed, otherwise return 0.
391 pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
396 pte = pte_find(pmap, va);
397 KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p",
398 __func__, (uintmax_t)va, pmap));
400 if (!PTE_ISVALID(pte))
403 /* Get vm_page_t for mapped pte. */
404 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
406 if (PTE_ISWIRED(pte))
407 pmap->pm_stats.wired_count--;
409 /* Handle managed entry. */
410 if (PTE_ISMANAGED(pte)) {
411 /* Handle modified pages. */
412 if (PTE_ISMODIFIED(pte))
415 /* Referenced pages. */
416 if (PTE_ISREFERENCED(pte))
417 vm_page_aflag_set(m, PGA_REFERENCED);
419 /* Remove pv_entry from pv_list. */
420 pv_remove(pmap, va, m);
421 } else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
422 pv_remove(pmap, va, m);
423 if (TAILQ_EMPTY(&m->md.pv_list))
424 m->md.pv_tracked = false;
426 mtx_lock_spin(&tlbivax_mutex);
429 tlb0_flush_entry(va);
433 mtx_unlock_spin(&tlbivax_mutex);
435 pmap->pm_stats.resident_count--;
437 if (flags & PTBL_UNHOLD) {
438 return (ptbl_unhold(pmap, va));
444 * Insert PTE for a given page and virtual address.
447 pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
450 unsigned int ptbl_idx = PTBL_IDX(va);
451 pte_t *ptbl, *pte, pte_tmp;
454 /* Get the page directory pointer. */
455 ptbl = ptbl_alloc(pmap, va, nosleep, &is_new);
457 KASSERT(nosleep, ("nosleep and NULL ptbl"));
461 pte = &ptbl[ptbl_idx];
464 * Check if there is valid mapping for requested va, if there
467 pte = &ptbl[ptbl_idx];
468 if (PTE_ISVALID(pte)) {
469 pte_remove(pmap, va, PTBL_HOLD);
472 * pte is not used, increment hold count for ptbl
475 if (pmap != kernel_pmap)
476 ptbl_hold(pmap, ptbl);
481 * Insert pv_entry into pv_list for mapped page if part of managed
484 if ((m->oflags & VPO_UNMANAGED) == 0) {
485 flags |= PTE_MANAGED;
487 /* Create and insert pv entry. */
488 pv_insert(pmap, va, m);
491 pmap->pm_stats.resident_count++;
493 pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
494 pte_tmp |= (PTE_VALID | flags);
496 mtx_lock_spin(&tlbivax_mutex);
499 tlb0_flush_entry(va);
503 mtx_unlock_spin(&tlbivax_mutex);
508 /* Return the pa for the given pmap/va. */
510 pte_vatopa(pmap_t pmap, vm_offset_t va)
515 pte = pte_find(pmap, va);
516 if ((pte != NULL) && PTE_ISVALID(pte))
517 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
521 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
523 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)
527 int kernel_pdirs, kernel_pgtbls, pdir_l1s;
528 vm_offset_t va, l1_va, pdir_va, ptbl_va;
531 kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
532 kernel_pmap->pm_root = kernel_ptbl_root;
533 pdir_l1s = howmany(kva_size, PG_ROOT_SIZE);
534 kernel_pdirs = howmany(kva_size, PDIR_L1_SIZE);
535 kernel_pgtbls = howmany(kva_size, PDIR_SIZE);
537 /* Initialize kernel pdir */
538 l1_va = (vm_offset_t)kernel_ptbl_root +
539 round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
540 pdir_va = l1_va + pdir_l1s * PAGE_SIZE;
541 ptbl_va = pdir_va + kernel_pdirs * PAGE_SIZE;
543 printf("ptbl_root_va: %#lx\n", (vm_offset_t)kernel_ptbl_root);
544 printf("l1_va: %#lx (%d entries)\n", l1_va, pdir_l1s);
545 printf("pdir_va: %#lx(%d entries)\n", pdir_va, kernel_pdirs);
546 printf("ptbl_va: %#lx(%d entries)\n", ptbl_va, kernel_pgtbls);
549 va = VM_MIN_KERNEL_ADDRESS;
550 for (i = PG_ROOT_IDX(va); i < PG_ROOT_IDX(va) + pdir_l1s;
551 i++, l1_va += PAGE_SIZE) {
552 kernel_pmap->pm_root[i] = (pte_t ***)l1_va;
554 j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
555 j++, pdir_va += PAGE_SIZE) {
556 kernel_pmap->pm_root[i][j] = (pte_t **)pdir_va;
558 k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
559 k++, va += PDIR_SIZE, ptbl_va += PAGE_SIZE)
560 kernel_pmap->pm_root[i][j][k] = (pte_t *)ptbl_va;
564 * Fill in PTEs covering kernel code and data. They are not required
565 * for address translation, as this area is covered by static TLB1
566 * entries, but for pte_vatopa() to work correctly with kernel area
569 for (va = addr; va < data_end; va += PAGE_SIZE) {
570 pte = &(kernel_pmap->pm_root[PG_ROOT_IDX(va)][PDIR_L1_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
571 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
572 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
573 PTE_VALID | PTE_PS_4KB;
578 mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
580 vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
581 kernel_ptbl_root = (pte_t ****)data_end;
583 data_end += round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
584 data_end += howmany(kva_size, PG_ROOT_SIZE) * PAGE_SIZE;
585 data_end += howmany(kva_size, PDIR_L1_SIZE) * PAGE_SIZE;
586 data_end += howmany(kva_size, PDIR_SIZE) * PAGE_SIZE;
592 * Initialize a preallocated and zeroed pmap structure,
593 * such as one in a vmspace structure.
596 mmu_booke_pinit(pmap_t pmap)
600 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
601 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
603 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
605 for (i = 0; i < MAXCPU; i++)
606 pmap->pm_tid[i] = TID_NONE;
607 CPU_ZERO(&kernel_pmap->pm_active);
608 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
609 pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK);
610 bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES);
616 * Release any resources held by the given physical map.
617 * Called when a pmap initialized by mmu_booke_pinit is being released.
618 * Should only be called if the map contains no valid mappings.
621 mmu_booke_release(pmap_t pmap)
624 KASSERT(pmap->pm_stats.resident_count == 0,
625 ("pmap_release: pmap resident count %ld != 0",
626 pmap->pm_stats.resident_count));
629 * Verify that all page directories are gone.
630 * Protects against reference count leakage.
632 for (int i = 0; i < PG_ROOT_NENTRIES; i++)
633 KASSERT(pmap->pm_root[i] == 0,
634 ("Index %d on root page %p is non-zero!\n", i, pmap->pm_root));
636 uma_zfree(ptbl_root_zone, pmap->pm_root);
640 mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
648 pte = pte_find(pm, va);
649 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
653 sync_sz = PAGE_SIZE - (va & PAGE_MASK);
654 sync_sz = min(sync_sz, sz);
656 pa += (va & PAGE_MASK);
657 __syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
665 * mmu_booke_zero_page_area zeros the specified hardware page by
666 * mapping it into virtual memory and using bzero to clear
669 * off and size must reside within a single page.
672 mmu_booke_zero_page_area(vm_page_t m, int off, int size)
676 /* XXX KASSERT off and size are within a single page? */
678 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
679 bzero((caddr_t)va + off, size);
683 * mmu_booke_zero_page zeros the specified hardware page.
686 mmu_booke_zero_page(vm_page_t m)
690 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
692 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
693 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
697 * mmu_booke_copy_page copies the specified (machine independent) page by
698 * mapping the page into virtual memory and using memcopy to copy the page,
699 * one machine dependent page at a time.
702 mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
704 vm_offset_t sva, dva;
706 sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
707 dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
708 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
712 mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
713 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
716 vm_offset_t a_pg_offset, b_pg_offset;
721 while (xfersize > 0) {
722 a_pg_offset = a_offset & PAGE_MASK;
723 pa = ma[a_offset >> PAGE_SHIFT];
724 b_pg_offset = b_offset & PAGE_MASK;
725 pb = mb[b_offset >> PAGE_SHIFT];
726 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
727 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
728 a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) +
730 b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) +
732 bcopy(a_cp, b_cp, cnt);
740 mmu_booke_quick_enter_page(vm_page_t m)
742 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
746 mmu_booke_quick_remove_page(vm_offset_t addr)
750 /**************************************************************************/
752 /**************************************************************************/
755 * Return the largest uint value log such that 2^log <= num.
758 ilog2(unsigned long num)
762 __asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num));
767 * Invalidate all TLB0 entries which match the given TID. Note this is
768 * dedicated for cases when invalidations should NOT be propagated to other
772 tid_flush(tlbtid_t tid)
776 /* Don't evict kernel translations */
777 if (tid == TID_KERNEL)
781 __asm __volatile("wrteei 0");
784 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
785 * it for PID invalidation.
787 mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
788 __asm __volatile("isync; .long 0x7c200024; isync; msync");
790 __asm __volatile("wrtee %0" :: "r"(msr));