2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2020 Justin Hibbits
5 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
6 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Some hw specific parts of this pmap were derived or influenced
30 * by NetBSD's ibm4xx pmap module. More generic code is shared with
31 * a few other pmap modules from the FreeBSD tree.
37 * Kernel and user threads run within one common virtual address space
41 * Virtual address space layout:
42 * -----------------------------
43 * 0x0000_0000 - 0x7fff_ffff : user process
44 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
45 * 0xc000_0000 - 0xffff_efff : KVA
48 #include <sys/cdefs.h>
50 #include "opt_kstack_pages.h"
52 #include <sys/param.h>
54 #include <sys/malloc.h>
58 #include <sys/queue.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/kerneldump.h>
62 #include <sys/linker.h>
63 #include <sys/msgbuf.h>
65 #include <sys/mutex.h>
66 #include <sys/rwlock.h>
67 #include <sys/sched.h>
69 #include <sys/vmmeter.h>
72 #include <vm/vm_page.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_pageout.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_object.h>
77 #include <vm/vm_param.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_pager.h>
80 #include <vm/vm_phys.h>
81 #include <vm/vm_pagequeue.h>
84 #include <machine/_inttypes.h>
85 #include <machine/cpu.h>
86 #include <machine/pcb.h>
87 #include <machine/platform.h>
89 #include <machine/tlb.h>
90 #include <machine/spr.h>
91 #include <machine/md_var.h>
92 #include <machine/mmuvar.h>
93 #include <machine/pmap.h>
94 #include <machine/pte.h>
98 #define PRI0ptrX "08x"
100 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
101 static vm_offset_t zero_page_va;
102 static struct mtx zero_page_mutex;
104 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
105 static vm_offset_t copy_page_src_va;
106 static vm_offset_t copy_page_dst_va;
107 static struct mtx copy_page_mutex;
109 static vm_offset_t kernel_ptbl_root;
110 static unsigned int kernel_ptbls; /* Number of KVA ptbls. */
112 /**************************************************************************/
114 /**************************************************************************/
116 #define VM_MAPDEV_BASE ((vm_offset_t)VM_MAXUSER_ADDRESS + PAGE_SIZE)
118 static void tid_flush(tlbtid_t tid);
119 static unsigned long ilog2(unsigned long);
121 /**************************************************************************/
122 /* Page table management */
123 /**************************************************************************/
125 #define PMAP_ROOT_SIZE (sizeof(pte_t**) * PDIR_NENTRIES)
126 static void ptbl_init(void);
127 static struct ptbl_buf *ptbl_buf_alloc(void);
128 static void ptbl_buf_free(struct ptbl_buf *);
129 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
131 static pte_t *ptbl_alloc(pmap_t, unsigned int, bool);
132 static void ptbl_free(pmap_t, unsigned int);
133 static void ptbl_hold(pmap_t, unsigned int);
134 static int ptbl_unhold(pmap_t, unsigned int);
136 static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
137 static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, bool);
138 static int pte_remove(pmap_t, vm_offset_t, uint8_t);
139 static pte_t *pte_find(pmap_t, vm_offset_t);
142 TAILQ_ENTRY(ptbl_buf) link; /* list link */
143 vm_offset_t kva; /* va of mapping */
146 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
147 #define PTBL_BUFS (128 * 16)
149 /* ptbl free list and a lock used for access synchronization. */
150 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
151 static struct mtx ptbl_buf_freelist_lock;
153 /* Base address of kva space allocated fot ptbl bufs. */
154 static vm_offset_t ptbl_buf_pool_vabase;
156 /* Pointer to ptbl_buf structures. */
157 static struct ptbl_buf *ptbl_bufs;
159 /**************************************************************************/
160 /* Page table related */
161 /**************************************************************************/
163 /* Initialize pool of kva ptbl buffers. */
169 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
170 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
171 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
172 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
174 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
175 TAILQ_INIT(&ptbl_buf_freelist);
177 for (i = 0; i < PTBL_BUFS; i++) {
179 ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
180 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
184 /* Get a ptbl_buf from the freelist. */
185 static struct ptbl_buf *
188 struct ptbl_buf *buf;
190 mtx_lock(&ptbl_buf_freelist_lock);
191 buf = TAILQ_FIRST(&ptbl_buf_freelist);
193 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
194 mtx_unlock(&ptbl_buf_freelist_lock);
196 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
201 /* Return ptbl buff to free pool. */
203 ptbl_buf_free(struct ptbl_buf *buf)
206 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
208 mtx_lock(&ptbl_buf_freelist_lock);
209 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
210 mtx_unlock(&ptbl_buf_freelist_lock);
214 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
217 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
219 struct ptbl_buf *pbuf;
221 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
223 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
225 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
226 if (pbuf->kva == (vm_offset_t)ptbl) {
227 /* Remove from pmap ptbl buf list. */
228 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
230 /* Free corresponding ptbl buf. */
236 /* Allocate page table. */
238 ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, bool nosleep)
240 vm_page_t mtbl[PTBL_PAGES];
242 struct ptbl_buf *pbuf;
247 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
248 (pmap == kernel_pmap), pdir_idx);
250 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
251 ("ptbl_alloc: invalid pdir_idx"));
252 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
253 ("pte_alloc: valid ptbl entry exists!"));
255 pbuf = ptbl_buf_alloc();
257 panic("pte_alloc: couldn't alloc kernel virtual memory");
259 ptbl = (pte_t *)pbuf->kva;
261 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
263 for (i = 0; i < PTBL_PAGES; i++) {
264 pidx = (PTBL_PAGES * pdir_idx) + i;
265 while ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) {
267 ptbl_free_pmap_ptbl(pmap, ptbl);
268 for (j = 0; j < i; j++)
269 vm_page_free(mtbl[j]);
274 rw_wunlock(&pvh_global_lock);
276 rw_wlock(&pvh_global_lock);
283 /* Map allocated pages into kernel_pmap. */
284 mmu_booke_qenter((vm_offset_t)ptbl, mtbl, PTBL_PAGES);
286 /* Zero whole ptbl. */
287 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
289 /* Add pbuf to the pmap ptbl bufs list. */
290 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
295 /* Free ptbl pages and invalidate pdir entry. */
297 ptbl_free(pmap_t pmap, unsigned int pdir_idx)
305 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
306 (pmap == kernel_pmap), pdir_idx);
308 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
309 ("ptbl_free: invalid pdir_idx"));
311 ptbl = pmap->pm_pdir[pdir_idx];
313 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
315 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
318 * Invalidate the pdir entry as soon as possible, so that other CPUs
319 * don't attempt to look up the page tables we are releasing.
321 mtx_lock_spin(&tlbivax_mutex);
324 pmap->pm_pdir[pdir_idx] = NULL;
327 mtx_unlock_spin(&tlbivax_mutex);
329 for (i = 0; i < PTBL_PAGES; i++) {
330 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
331 pa = pte_vatopa(kernel_pmap, va);
332 m = PHYS_TO_VM_PAGE(pa);
333 vm_page_free_zero(m);
335 mmu_booke_kremove(va);
338 ptbl_free_pmap_ptbl(pmap, ptbl);
342 * Decrement ptbl pages hold count and attempt to free ptbl pages.
343 * Called when removing pte entry from ptbl.
345 * Return 1 if ptbl pages were freed.
348 ptbl_unhold(pmap_t pmap, unsigned int pdir_idx)
355 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
356 (pmap == kernel_pmap), pdir_idx);
358 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
359 ("ptbl_unhold: invalid pdir_idx"));
360 KASSERT((pmap != kernel_pmap),
361 ("ptbl_unhold: unholding kernel ptbl!"));
363 ptbl = pmap->pm_pdir[pdir_idx];
365 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
366 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
367 ("ptbl_unhold: non kva ptbl"));
369 /* decrement hold count */
370 for (i = 0; i < PTBL_PAGES; i++) {
371 pa = pte_vatopa(kernel_pmap,
372 (vm_offset_t)ptbl + (i * PAGE_SIZE));
373 m = PHYS_TO_VM_PAGE(pa);
378 * Free ptbl pages if there are no pte etries in this ptbl.
379 * ref_count has the same value for all ptbl pages, so check the last
382 if (m->ref_count == 0) {
383 ptbl_free(pmap, pdir_idx);
385 //debugf("ptbl_unhold: e (freed ptbl)\n");
393 * Increment hold count for ptbl pages. This routine is used when a new pte
394 * entry is being inserted into the ptbl.
397 ptbl_hold(pmap_t pmap, unsigned int pdir_idx)
404 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
407 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
408 ("ptbl_hold: invalid pdir_idx"));
409 KASSERT((pmap != kernel_pmap),
410 ("ptbl_hold: holding kernel ptbl!"));
412 ptbl = pmap->pm_pdir[pdir_idx];
414 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
416 for (i = 0; i < PTBL_PAGES; i++) {
417 pa = pte_vatopa(kernel_pmap,
418 (vm_offset_t)ptbl + (i * PAGE_SIZE));
419 m = PHYS_TO_VM_PAGE(pa);
425 * Clean pte entry, try to free page table page if requested.
427 * Return 1 if ptbl pages were freed, otherwise return 0.
430 pte_remove(pmap_t pmap, vm_offset_t va, uint8_t flags)
432 unsigned int pdir_idx = PDIR_IDX(va);
433 unsigned int ptbl_idx = PTBL_IDX(va);
438 //int su = (pmap == kernel_pmap);
439 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
440 // su, (u_int32_t)pmap, va, flags);
442 ptbl = pmap->pm_pdir[pdir_idx];
443 KASSERT(ptbl, ("pte_remove: null ptbl"));
445 pte = &ptbl[ptbl_idx];
447 if (pte == NULL || !PTE_ISVALID(pte))
450 if (PTE_ISWIRED(pte))
451 pmap->pm_stats.wired_count--;
453 /* Get vm_page_t for mapped pte. */
454 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
456 /* Handle managed entry. */
457 if (PTE_ISMANAGED(pte)) {
458 if (PTE_ISMODIFIED(pte))
461 if (PTE_ISREFERENCED(pte))
462 vm_page_aflag_set(m, PGA_REFERENCED);
464 pv_remove(pmap, va, m);
465 } else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
467 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
468 * used. This is needed by the NCSW support code for fast
469 * VA<->PA translation.
471 pv_remove(pmap, va, m);
472 if (TAILQ_EMPTY(&m->md.pv_list))
473 m->md.pv_tracked = false;
476 mtx_lock_spin(&tlbivax_mutex);
479 tlb0_flush_entry(va);
483 mtx_unlock_spin(&tlbivax_mutex);
485 pmap->pm_stats.resident_count--;
487 if (flags & PTBL_UNHOLD) {
488 //debugf("pte_remove: e (unhold)\n");
489 return (ptbl_unhold(pmap, pdir_idx));
492 //debugf("pte_remove: e\n");
497 * Insert PTE for a given page and virtual address.
500 pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
503 unsigned int pdir_idx = PDIR_IDX(va);
504 unsigned int ptbl_idx = PTBL_IDX(va);
505 pte_t *ptbl, *pte, pte_tmp;
507 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
508 pmap == kernel_pmap, pmap, va);
510 /* Get the page table pointer. */
511 ptbl = pmap->pm_pdir[pdir_idx];
514 /* Allocate page table pages. */
515 ptbl = ptbl_alloc(pmap, pdir_idx, nosleep);
517 KASSERT(nosleep, ("nosleep and NULL ptbl"));
520 pmap->pm_pdir[pdir_idx] = ptbl;
521 pte = &ptbl[ptbl_idx];
524 * Check if there is valid mapping for requested
525 * va, if there is, remove it.
527 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
528 if (PTE_ISVALID(pte)) {
529 pte_remove(pmap, va, PTBL_HOLD);
532 * pte is not used, increment hold count
535 if (pmap != kernel_pmap)
536 ptbl_hold(pmap, pdir_idx);
541 * Insert pv_entry into pv_list for mapped page if part of managed
544 if ((m->oflags & VPO_UNMANAGED) == 0) {
545 flags |= PTE_MANAGED;
547 /* Create and insert pv entry. */
548 pv_insert(pmap, va, m);
551 pmap->pm_stats.resident_count++;
553 pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
554 pte_tmp |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
556 mtx_lock_spin(&tlbivax_mutex);
559 tlb0_flush_entry(va);
563 mtx_unlock_spin(&tlbivax_mutex);
567 /* Return the pa for the given pmap/va. */
569 pte_vatopa(pmap_t pmap, vm_offset_t va)
574 pte = pte_find(pmap, va);
575 if ((pte != NULL) && PTE_ISVALID(pte))
576 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
580 /* Get a pointer to a PTE in a page table. */
582 pte_find(pmap_t pmap, vm_offset_t va)
584 unsigned int pdir_idx = PDIR_IDX(va);
585 unsigned int ptbl_idx = PTBL_IDX(va);
587 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
589 if (pmap->pm_pdir[pdir_idx])
590 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
595 /* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
596 static __inline pte_t *
597 pte_find_next(pmap_t pmap, vm_offset_t *pva)
604 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
609 pdir = pmap->pm_pdir;
610 for (; i < PDIR_NENTRIES; i++, j = 0) {
613 for (; j < PTBL_NENTRIES; j++) {
615 if (!PTE_ISVALID(pte))
617 *pva = PDIR_SIZE * i + PAGE_SIZE * j;
624 /* Set up kernel page tables. */
626 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)
630 vm_offset_t pdir_start;
633 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
634 kernel_pmap->pm_pdir = (pte_t **)kernel_ptbl_root;
636 pdir_start = kernel_ptbl_root + PDIR_NENTRIES * sizeof(pte_t);
638 /* Initialize kernel pdir */
639 for (i = 0; i < kernel_ptbls; i++) {
640 kernel_pmap->pm_pdir[kptbl_min + i] =
641 (pte_t *)(pdir_start + (i * PAGE_SIZE * PTBL_PAGES));
645 * Fill in PTEs covering kernel code and data. They are not required
646 * for address translation, as this area is covered by static TLB1
647 * entries, but for pte_vatopa() to work correctly with kernel area
650 for (va = addr; va < data_end; va += PAGE_SIZE) {
651 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
653 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
654 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
655 PTE_VALID | PTE_PS_4KB;
660 mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
662 /* Allocate space for ptbl_bufs. */
663 ptbl_bufs = (struct ptbl_buf *)data_end;
664 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
665 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
666 (uintptr_t)ptbl_bufs, data_end);
668 data_end = round_page(data_end);
670 kernel_ptbl_root = data_end;
671 data_end += PDIR_NENTRIES * sizeof(pte_t*);
673 /* Allocate PTE tables for kernel KVA. */
674 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
676 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
677 debugf(" kernel ptbls: %d\n", kernel_ptbls);
678 debugf(" kernel pdir at %#jx end = %#jx\n",
679 (uintmax_t)kernel_ptbl_root, (uintmax_t)data_end);
685 * Initialize a preallocated and zeroed pmap structure,
686 * such as one in a vmspace structure.
689 mmu_booke_pinit(pmap_t pmap)
693 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
694 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
696 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
698 for (i = 0; i < MAXCPU; i++)
699 pmap->pm_tid[i] = TID_NONE;
700 CPU_ZERO(&kernel_pmap->pm_active);
701 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
702 pmap->pm_pdir = uma_zalloc(ptbl_root_zone, M_WAITOK);
703 bzero(pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
704 TAILQ_INIT(&pmap->pm_ptbl_list);
710 * Release any resources held by the given physical map.
711 * Called when a pmap initialized by mmu_booke_pinit is being released.
712 * Should only be called if the map contains no valid mappings.
715 mmu_booke_release(pmap_t pmap)
718 KASSERT(pmap->pm_stats.resident_count == 0,
719 ("pmap_release: pmap resident count %ld != 0",
720 pmap->pm_stats.resident_count));
721 uma_zfree(ptbl_root_zone, pmap->pm_pdir);
725 mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
735 rw_wlock(&pvh_global_lock);
736 pmap = PCPU_GET(curpmap);
737 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
740 pte = pte_find(pm, va);
741 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
745 sync_sz = PAGE_SIZE - (va & PAGE_MASK);
746 sync_sz = min(sync_sz, sz);
750 * Create a mapping in the active pmap.
752 * XXX: We use the zero page here, because
753 * it isn't likely to be in use.
754 * If we ever decide to support
755 * security.bsd.map_at_zero on Book-E, change
756 * this to some other address that isn't
760 m = PHYS_TO_VM_PAGE(pa);
762 pte_enter(pmap, m, addr,
763 PTE_SR | PTE_VALID, false);
764 __syncicache((void *)(addr + (va & PAGE_MASK)),
766 pte_remove(pmap, addr, PTBL_UNHOLD);
769 __syncicache((void *)va, sync_sz);
774 rw_wunlock(&pvh_global_lock);
778 * mmu_booke_zero_page_area zeros the specified hardware page by
779 * mapping it into virtual memory and using bzero to clear
782 * off and size must reside within a single page.
785 mmu_booke_zero_page_area(vm_page_t m, int off, int size)
789 /* XXX KASSERT off and size are within a single page? */
791 mtx_lock(&zero_page_mutex);
794 mmu_booke_kenter(va, VM_PAGE_TO_PHYS(m));
795 bzero((caddr_t)va + off, size);
796 mmu_booke_kremove(va);
798 mtx_unlock(&zero_page_mutex);
802 * mmu_booke_zero_page zeros the specified hardware page.
805 mmu_booke_zero_page(vm_page_t m)
810 mtx_lock(&zero_page_mutex);
812 mmu_booke_kenter(va, VM_PAGE_TO_PHYS(m));
814 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
815 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
817 mmu_booke_kremove(va);
819 mtx_unlock(&zero_page_mutex);
823 * mmu_booke_copy_page copies the specified (machine independent) page by
824 * mapping the page into virtual memory and using memcopy to copy the page,
825 * one machine dependent page at a time.
828 mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
830 vm_offset_t sva, dva;
832 sva = copy_page_src_va;
833 dva = copy_page_dst_va;
835 mtx_lock(©_page_mutex);
836 mmu_booke_kenter(sva, VM_PAGE_TO_PHYS(sm));
837 mmu_booke_kenter(dva, VM_PAGE_TO_PHYS(dm));
839 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
841 mmu_booke_kremove(dva);
842 mmu_booke_kremove(sva);
843 mtx_unlock(©_page_mutex);
847 mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
848 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
851 vm_offset_t a_pg_offset, b_pg_offset;
854 mtx_lock(©_page_mutex);
855 while (xfersize > 0) {
856 a_pg_offset = a_offset & PAGE_MASK;
857 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
858 mmu_booke_kenter(copy_page_src_va,
859 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
860 a_cp = (char *)copy_page_src_va + a_pg_offset;
861 b_pg_offset = b_offset & PAGE_MASK;
862 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
863 mmu_booke_kenter(copy_page_dst_va,
864 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
865 b_cp = (char *)copy_page_dst_va + b_pg_offset;
866 bcopy(a_cp, b_cp, cnt);
867 mmu_booke_kremove(copy_page_dst_va);
868 mmu_booke_kremove(copy_page_src_va);
873 mtx_unlock(©_page_mutex);
877 mmu_booke_quick_enter_page(vm_page_t m)
884 paddr = VM_PAGE_TO_PHYS(m);
886 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
887 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
891 qaddr = PCPU_GET(qmap_addr);
893 pte = pte_find(kernel_pmap, qaddr);
895 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
898 * XXX: tlbivax is broadcast to other cores, but qaddr should
899 * not be present in other TLBs. Is there a better instruction
900 * sequence to use? Or just forget it & use mmu_booke_kenter()...
902 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
903 __asm __volatile("isync; msync");
905 *pte = PTE_RPN_FROM_PA(paddr) | flags;
907 /* Flush the real memory from the instruction cache. */
908 if ((flags & (PTE_I | PTE_G)) == 0)
909 __syncicache((void *)qaddr, PAGE_SIZE);
915 mmu_booke_quick_remove_page(vm_offset_t addr)
919 pte = pte_find(kernel_pmap, addr);
921 KASSERT(PCPU_GET(qmap_addr) == addr,
922 ("mmu_booke_quick_remove_page: invalid address"));
924 ("mmu_booke_quick_remove_page: PTE not in use"));
930 /**************************************************************************/
932 /**************************************************************************/
935 * Return the largest uint value log such that 2^log <= num.
938 ilog2(unsigned long num)
942 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
947 * Invalidate all TLB0 entries which match the given TID. Note this is
948 * dedicated for cases when invalidations should NOT be propagated to other
952 tid_flush(tlbtid_t tid)
955 uint32_t mas0, mas1, mas2;
958 /* Don't evict kernel translations */
959 if (tid == TID_KERNEL)
963 __asm __volatile("wrteei 0");
966 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
967 * it for PID invalidation.
969 switch ((mfpvr() >> 16) & 0xffff) {
973 mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
975 __asm __volatile("isync; .long 0x7c200024; isync; msync");
976 __asm __volatile("wrtee %0" :: "r"(msr));
980 for (way = 0; way < TLB0_WAYS; way++)
981 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
982 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
983 mtspr(SPR_MAS0, mas0);
985 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
986 mtspr(SPR_MAS2, mas2);
988 __asm __volatile("isync; tlbre");
990 mas1 = mfspr(SPR_MAS1);
992 if (!(mas1 & MAS1_VALID))
994 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
997 mtspr(SPR_MAS1, mas1);
998 __asm __volatile("isync; tlbwe; isync; msync");
1000 __asm __volatile("wrtee %0" :: "r"(msr));