2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2020 Justin Hibbits
5 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
6 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Some hw specific parts of this pmap were derived or influenced
30 * by NetBSD's ibm4xx pmap module. More generic code is shared with
31 * a few other pmap modules from the FreeBSD tree.
37 * Kernel and user threads run within one common virtual address space
41 * Virtual address space layout:
42 * -----------------------------
43 * 0x0000_0000_0000_0000 - 0x3fff_ffff_ffff_ffff : user process
44 * 0x4000_0000_0000_0000 - 0x7fff_ffff_ffff_ffff : unused
45 * 0x8000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : mmio region
46 * 0xc000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : direct map
47 * 0xe000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : KVA
50 #include <sys/cdefs.h>
52 #include "opt_kstack_pages.h"
54 #include <sys/param.h>
56 #include <sys/malloc.h>
60 #include <sys/queue.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/kerneldump.h>
64 #include <sys/linker.h>
65 #include <sys/msgbuf.h>
67 #include <sys/mutex.h>
68 #include <sys/rwlock.h>
69 #include <sys/sched.h>
71 #include <sys/vmmeter.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_param.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_pager.h>
82 #include <vm/vm_phys.h>
83 #include <vm/vm_pagequeue.h>
86 #include <machine/_inttypes.h>
87 #include <machine/cpu.h>
88 #include <machine/pcb.h>
89 #include <machine/platform.h>
91 #include <machine/tlb.h>
92 #include <machine/spr.h>
93 #include <machine/md_var.h>
94 #include <machine/mmuvar.h>
95 #include <machine/pmap.h>
96 #include <machine/pte.h>
101 #define debugf(fmt, args...) printf(fmt, ##args)
103 #define debugf(fmt, args...)
106 #define PRI0ptrX "016lx"
108 /**************************************************************************/
110 /**************************************************************************/
112 unsigned int kernel_pdirs;
113 static uma_zone_t ptbl_root_zone;
114 static pte_t ****kernel_ptbl_root;
117 * Base of the pmap_mapdev() region. On 32-bit it immediately follows the
118 * userspace address range. On On 64-bit it's far above, at (1 << 63), and
119 * ranges up to the DMAP, giving 62 bits of PA allowed. This is far larger than
120 * the widest Book-E address bus, the e6500 has a 40-bit PA space. This allows
121 * us to map akin to the DMAP, with addresses identical to the PA, offset by the
124 #define VM_MAPDEV_BASE 0x8000000000000000
125 #define VM_MAPDEV_PA_MAX 0x4000000000000000 /* Don't encroach on DMAP */
127 static void tid_flush(tlbtid_t tid);
128 static unsigned long ilog2(unsigned long);
130 /**************************************************************************/
131 /* Page table management */
132 /**************************************************************************/
134 #define PMAP_ROOT_SIZE (sizeof(pte_t****) * PG_ROOT_NENTRIES)
135 static pte_t *ptbl_alloc(pmap_t pmap, vm_offset_t va,
136 bool nosleep, bool *is_new);
137 static void ptbl_hold(pmap_t, pte_t *);
138 static int ptbl_unhold(pmap_t, vm_offset_t);
140 static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
141 static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
142 static int pte_remove(pmap_t, vm_offset_t, uint8_t);
143 static pte_t *pte_find(pmap_t, vm_offset_t);
144 static pte_t *pte_find_next(pmap_t, vm_offset_t *);
145 static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
147 /**************************************************************************/
148 /* Page table related */
149 /**************************************************************************/
151 /* Allocate a page, to be used in a page table. */
153 mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
158 req = VM_ALLOC_WIRED | VM_ALLOC_ZERO;
159 while ((m = vm_page_alloc_noobj(req)) == NULL) {
164 rw_wunlock(&pvh_global_lock);
166 rw_wlock(&pvh_global_lock);
171 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
174 /* Initialize pool of kva ptbl buffers. */
180 /* Get a pointer to a PTE in a page table. */
181 static __inline pte_t *
182 pte_find(pmap_t pmap, vm_offset_t va)
188 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
190 pdir_l1 = pmap->pm_root[PG_ROOT_IDX(va)];
193 pdir = pdir_l1[PDIR_L1_IDX(va)];
196 ptbl = pdir[PDIR_IDX(va)];
198 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
201 /* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
202 static __inline pte_t *
203 pte_find_next(pmap_t pmap, vm_offset_t *pva)
208 unsigned long i, j, k, l;
210 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
217 pm_root = pmap->pm_root;
219 /* truncate the VA for later. */
220 va &= ~((1UL << (PG_ROOT_H + 1)) - 1);
221 for (; i < PG_ROOT_NENTRIES; i++, j = 0, k = 0, l = 0) {
224 for (; j < PDIR_L1_NENTRIES; j++, k = 0, l = 0) {
225 if (pm_root[i][j] == 0)
227 for (; k < PDIR_NENTRIES; k++, l = 0) {
228 if (pm_root[i][j][k] == NULL)
230 for (; l < PTBL_NENTRIES; l++) {
231 pte = &pm_root[i][j][k][l];
232 if (!PTE_ISVALID(pte))
234 *pva = va + PG_ROOT_SIZE * i +
247 unhold_free_page(pmap_t pmap, vm_page_t m)
250 if (vm_page_unwire_noq(m)) {
251 vm_page_free_zero(m);
259 get_pgtbl_page(pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
260 bool nosleep, bool hold_parent, bool *isnew)
265 page = ptr_tbl[index];
266 KASSERT(page != 0 || pmap != kernel_pmap,
267 ("NULL page table page found in kernel pmap!"));
269 page = mmu_booke_alloc_page(pmap, index, nosleep);
270 if (ptr_tbl[index] == 0) {
272 ptr_tbl[index] = page;
274 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)ptr_tbl));
279 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page));
280 page = ptr_tbl[index];
281 vm_page_unwire_noq(m);
282 vm_page_free_zero(m);
290 /* Allocate page table. */
292 ptbl_alloc(pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
294 unsigned int pg_root_idx = PG_ROOT_IDX(va);
295 unsigned int pdir_l1_idx = PDIR_L1_IDX(va);
296 unsigned int pdir_idx = PDIR_IDX(va);
297 vm_offset_t pdir_l1, pdir, ptbl;
299 /* When holding a parent, no need to hold the root index pages. */
300 pdir_l1 = get_pgtbl_page(pmap, (vm_offset_t *)pmap->pm_root,
301 pg_root_idx, nosleep, false, is_new);
304 pdir = get_pgtbl_page(pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
305 nosleep, !*is_new, is_new);
308 ptbl = get_pgtbl_page(pmap, (vm_offset_t *)pdir, pdir_idx,
309 nosleep, !*is_new, is_new);
311 return ((pte_t *)ptbl);
315 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
316 * when removing pte entry from ptbl.
318 * Return 1 if ptbl pages were freed.
321 ptbl_unhold(pmap_t pmap, vm_offset_t va)
331 pg_root_idx = PG_ROOT_IDX(va);
332 pdir_l1_idx = PDIR_L1_IDX(va);
333 pdir_idx = PDIR_IDX(va);
335 KASSERT((pmap != kernel_pmap),
336 ("ptbl_unhold: unholding kernel ptbl!"));
338 pdir_l1 = pmap->pm_root[pg_root_idx];
339 pdir = pdir_l1[pdir_l1_idx];
340 ptbl = pdir[pdir_idx];
342 /* decrement hold count */
343 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
345 if (!unhold_free_page(pmap, m))
348 pdir[pdir_idx] = NULL;
349 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
351 if (!unhold_free_page(pmap, m))
354 pdir_l1[pdir_l1_idx] = NULL;
355 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
357 if (!unhold_free_page(pmap, m))
359 pmap->pm_root[pg_root_idx] = NULL;
365 * Increment hold count for ptbl pages. This routine is used when new pte
366 * entry is being inserted into ptbl.
369 ptbl_hold(pmap_t pmap, pte_t *ptbl)
373 KASSERT((pmap != kernel_pmap),
374 ("ptbl_hold: holding kernel ptbl!"));
376 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
381 * Clean pte entry, try to free page table page if requested.
383 * Return 1 if ptbl pages were freed, otherwise return 0.
386 pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
391 pte = pte_find(pmap, va);
392 KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p",
393 __func__, (uintmax_t)va, pmap));
395 if (!PTE_ISVALID(pte))
398 /* Get vm_page_t for mapped pte. */
399 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
401 if (PTE_ISWIRED(pte))
402 pmap->pm_stats.wired_count--;
404 /* Handle managed entry. */
405 if (PTE_ISMANAGED(pte)) {
406 /* Handle modified pages. */
407 if (PTE_ISMODIFIED(pte))
410 /* Referenced pages. */
411 if (PTE_ISREFERENCED(pte))
412 vm_page_aflag_set(m, PGA_REFERENCED);
414 /* Remove pv_entry from pv_list. */
415 pv_remove(pmap, va, m);
416 } else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
417 pv_remove(pmap, va, m);
418 if (TAILQ_EMPTY(&m->md.pv_list))
419 m->md.pv_tracked = false;
421 mtx_lock_spin(&tlbivax_mutex);
424 tlb0_flush_entry(va);
428 mtx_unlock_spin(&tlbivax_mutex);
430 pmap->pm_stats.resident_count--;
432 if (flags & PTBL_UNHOLD) {
433 return (ptbl_unhold(pmap, va));
439 * Insert PTE for a given page and virtual address.
442 pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
445 unsigned int ptbl_idx = PTBL_IDX(va);
446 pte_t *ptbl, *pte, pte_tmp;
449 /* Get the page directory pointer. */
450 ptbl = ptbl_alloc(pmap, va, nosleep, &is_new);
452 KASSERT(nosleep, ("nosleep and NULL ptbl"));
456 pte = &ptbl[ptbl_idx];
459 * Check if there is valid mapping for requested va, if there
462 pte = &ptbl[ptbl_idx];
463 if (PTE_ISVALID(pte)) {
464 pte_remove(pmap, va, PTBL_HOLD);
467 * pte is not used, increment hold count for ptbl
470 if (pmap != kernel_pmap)
471 ptbl_hold(pmap, ptbl);
476 * Insert pv_entry into pv_list for mapped page if part of managed
479 if ((m->oflags & VPO_UNMANAGED) == 0) {
480 flags |= PTE_MANAGED;
482 /* Create and insert pv entry. */
483 pv_insert(pmap, va, m);
486 pmap->pm_stats.resident_count++;
488 pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
489 pte_tmp |= (PTE_VALID | flags);
491 mtx_lock_spin(&tlbivax_mutex);
494 tlb0_flush_entry(va);
498 mtx_unlock_spin(&tlbivax_mutex);
503 /* Return the pa for the given pmap/va. */
505 pte_vatopa(pmap_t pmap, vm_offset_t va)
510 pte = pte_find(pmap, va);
511 if ((pte != NULL) && PTE_ISVALID(pte))
512 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
516 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
518 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)
522 int kernel_pdirs, kernel_pgtbls, pdir_l1s;
523 vm_offset_t va, l1_va, pdir_va, ptbl_va;
526 kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
527 kernel_pmap->pm_root = kernel_ptbl_root;
528 pdir_l1s = howmany(kva_size, PG_ROOT_SIZE);
529 kernel_pdirs = howmany(kva_size, PDIR_L1_SIZE);
530 kernel_pgtbls = howmany(kva_size, PDIR_SIZE);
532 /* Initialize kernel pdir */
533 l1_va = (vm_offset_t)kernel_ptbl_root +
534 round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
535 pdir_va = l1_va + pdir_l1s * PAGE_SIZE;
536 ptbl_va = pdir_va + kernel_pdirs * PAGE_SIZE;
538 printf("ptbl_root_va: %#lx\n", (vm_offset_t)kernel_ptbl_root);
539 printf("l1_va: %#lx (%d entries)\n", l1_va, pdir_l1s);
540 printf("pdir_va: %#lx(%d entries)\n", pdir_va, kernel_pdirs);
541 printf("ptbl_va: %#lx(%d entries)\n", ptbl_va, kernel_pgtbls);
544 va = VM_MIN_KERNEL_ADDRESS;
545 for (i = PG_ROOT_IDX(va); i < PG_ROOT_IDX(va) + pdir_l1s;
546 i++, l1_va += PAGE_SIZE) {
547 kernel_pmap->pm_root[i] = (pte_t ***)l1_va;
549 j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
550 j++, pdir_va += PAGE_SIZE) {
551 kernel_pmap->pm_root[i][j] = (pte_t **)pdir_va;
553 k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
554 k++, va += PDIR_SIZE, ptbl_va += PAGE_SIZE)
555 kernel_pmap->pm_root[i][j][k] = (pte_t *)ptbl_va;
559 * Fill in PTEs covering kernel code and data. They are not required
560 * for address translation, as this area is covered by static TLB1
561 * entries, but for pte_vatopa() to work correctly with kernel area
564 for (va = addr; va < data_end; va += PAGE_SIZE) {
565 pte = &(kernel_pmap->pm_root[PG_ROOT_IDX(va)][PDIR_L1_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
566 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
567 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
568 PTE_VALID | PTE_PS_4KB;
573 mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
575 vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
576 kernel_ptbl_root = (pte_t ****)data_end;
578 data_end += round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
579 data_end += howmany(kva_size, PG_ROOT_SIZE) * PAGE_SIZE;
580 data_end += howmany(kva_size, PDIR_L1_SIZE) * PAGE_SIZE;
581 data_end += howmany(kva_size, PDIR_SIZE) * PAGE_SIZE;
587 * Initialize a preallocated and zeroed pmap structure,
588 * such as one in a vmspace structure.
591 mmu_booke_pinit(pmap_t pmap)
595 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
596 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
598 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
600 for (i = 0; i < MAXCPU; i++)
601 pmap->pm_tid[i] = TID_NONE;
602 CPU_ZERO(&kernel_pmap->pm_active);
603 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
604 pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK);
605 bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES);
611 * Release any resources held by the given physical map.
612 * Called when a pmap initialized by mmu_booke_pinit is being released.
613 * Should only be called if the map contains no valid mappings.
616 mmu_booke_release(pmap_t pmap)
619 KASSERT(pmap->pm_stats.resident_count == 0,
620 ("pmap_release: pmap resident count %ld != 0",
621 pmap->pm_stats.resident_count));
624 * Verify that all page directories are gone.
625 * Protects against reference count leakage.
627 for (int i = 0; i < PG_ROOT_NENTRIES; i++)
628 KASSERT(pmap->pm_root[i] == 0,
629 ("Index %d on root page %p is non-zero!\n", i, pmap->pm_root));
631 uma_zfree(ptbl_root_zone, pmap->pm_root);
635 mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
643 pte = pte_find(pm, va);
644 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
648 sync_sz = PAGE_SIZE - (va & PAGE_MASK);
649 sync_sz = min(sync_sz, sz);
651 pa += (va & PAGE_MASK);
652 __syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
660 * mmu_booke_zero_page_area zeros the specified hardware page by
661 * mapping it into virtual memory and using bzero to clear
664 * off and size must reside within a single page.
667 mmu_booke_zero_page_area(vm_page_t m, int off, int size)
671 /* XXX KASSERT off and size are within a single page? */
673 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
674 bzero((caddr_t)va + off, size);
678 * mmu_booke_zero_page zeros the specified hardware page.
681 mmu_booke_zero_page(vm_page_t m)
685 va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
687 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
688 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
692 * mmu_booke_copy_page copies the specified (machine independent) page by
693 * mapping the page into virtual memory and using memcopy to copy the page,
694 * one machine dependent page at a time.
697 mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
699 vm_offset_t sva, dva;
701 sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
702 dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
703 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
707 mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
708 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
711 vm_offset_t a_pg_offset, b_pg_offset;
716 while (xfersize > 0) {
717 a_pg_offset = a_offset & PAGE_MASK;
718 pa = ma[a_offset >> PAGE_SHIFT];
719 b_pg_offset = b_offset & PAGE_MASK;
720 pb = mb[b_offset >> PAGE_SHIFT];
721 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
722 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
723 a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) +
725 b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) +
727 bcopy(a_cp, b_cp, cnt);
735 mmu_booke_quick_enter_page(vm_page_t m)
737 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
741 mmu_booke_quick_remove_page(vm_offset_t addr)
745 /**************************************************************************/
747 /**************************************************************************/
750 * Return the largest uint value log such that 2^log <= num.
753 ilog2(unsigned long num)
757 __asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num));
762 * Invalidate all TLB0 entries which match the given TID. Note this is
763 * dedicated for cases when invalidations should NOT be propagated to other
767 tid_flush(tlbtid_t tid)
771 /* Don't evict kernel translations */
772 if (tid == TID_KERNEL)
776 __asm __volatile("wrteei 0");
779 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
780 * it for PID invalidation.
782 mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
783 __asm __volatile("isync; .long 0x7c200024; isync; msync");
785 __asm __volatile("wrtee %0" :: "r"(msr));