2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Some hw specific parts of this pmap were derived or influenced
29 * by NetBSD's ibm4xx pmap module. More generic code is shared with
30 * a few other pmap modules from the FreeBSD tree.
36 * Kernel and user threads run within one common virtual address space
40 * Virtual address space layout:
41 * -----------------------------
42 * 0x0000_0000 - 0x7fff_ffff : user process
43 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
44 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
45 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
46 * 0xc100_0000 - 0xffff_ffff : KVA
47 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
48 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
49 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
50 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
53 * Virtual address space layout:
54 * -----------------------------
55 * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process
56 * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries
57 * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region
58 * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack
59 * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved
60 * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data
61 * endkernel - msgbufp-1 : flat device tree
62 * msgbufp - kernel_pdir-1 : message buffer
63 * kernel_pdir - kernel_pp2d-1 : kernel page directory
64 * kernel_pp2d - . : kernel pointers to page directory
65 * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy
66 * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs
67 * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables
68 * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space
69 * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region
70 * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region
71 * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map
72 * 0xf000_0000_0000_0000 - +Maxmem : physmem map
73 * - 0xffff_ffff_ffff_ffff : device direct map
76 #include <sys/cdefs.h>
77 __FBSDID("$FreeBSD$");
80 #include "opt_kstack_pages.h"
82 #include <sys/param.h>
84 #include <sys/malloc.h>
88 #include <sys/queue.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/kerneldump.h>
92 #include <sys/linker.h>
93 #include <sys/msgbuf.h>
95 #include <sys/mutex.h>
96 #include <sys/rwlock.h>
97 #include <sys/sched.h>
99 #include <sys/vmmeter.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_extern.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_pager.h>
110 #include <vm/vm_phys.h>
111 #include <vm/vm_pagequeue.h>
114 #include <machine/_inttypes.h>
115 #include <machine/cpu.h>
116 #include <machine/pcb.h>
117 #include <machine/platform.h>
119 #include <machine/tlb.h>
120 #include <machine/spr.h>
121 #include <machine/md_var.h>
122 #include <machine/mmuvar.h>
123 #include <machine/pmap.h>
124 #include <machine/pte.h>
130 #define SPARSE_MAPDEV
132 #define debugf(fmt, args...) printf(fmt, ##args)
134 #define debugf(fmt, args...)
138 #define PRI0ptrX "016lx"
140 #define PRI0ptrX "08x"
143 #define TODO panic("%s: not implemented", __func__);
145 extern unsigned char _etext[];
146 extern unsigned char _end[];
148 extern uint32_t *bootinfo;
151 vm_offset_t kernstart;
154 /* Message buffer and tables. */
155 static vm_offset_t data_start;
156 static vm_size_t data_end;
158 /* Phys/avail memory regions. */
159 static struct mem_region *availmem_regions;
160 static int availmem_regions_sz;
161 static struct mem_region *physmem_regions;
162 static int physmem_regions_sz;
164 #ifndef __powerpc64__
165 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
166 static vm_offset_t zero_page_va;
167 static struct mtx zero_page_mutex;
169 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
170 static vm_offset_t copy_page_src_va;
171 static vm_offset_t copy_page_dst_va;
172 static struct mtx copy_page_mutex;
175 static struct mtx tlbivax_mutex;
177 /**************************************************************************/
179 /**************************************************************************/
181 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
182 vm_prot_t, u_int flags, int8_t psind);
184 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
185 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
187 unsigned int kernel_pdirs;
189 static uma_zone_t ptbl_root_zone;
192 * If user pmap is processed with mmu_booke_remove and the resident count
193 * drops to 0, there are no more pages to remove, so we need not continue.
195 #define PMAP_REMOVE_DONE(pmap) \
196 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
198 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
199 extern int elf32_nxstack;
202 /**************************************************************************/
203 /* TLB and TID handling */
204 /**************************************************************************/
206 /* Translation ID busy table */
207 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
210 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
211 * core revisions and should be read from h/w registers during early config.
213 uint32_t tlb0_entries;
215 uint32_t tlb0_entries_per_way;
216 uint32_t tlb1_entries;
218 #define TLB0_ENTRIES (tlb0_entries)
219 #define TLB0_WAYS (tlb0_ways)
220 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
222 #define TLB1_ENTRIES (tlb1_entries)
224 static tlbtid_t tid_alloc(struct pmap *);
228 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
230 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
234 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
235 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
236 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
237 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t, int);
239 static __inline uint32_t tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma);
241 static vm_size_t tsize2size(unsigned int);
242 static unsigned int size2tsize(vm_size_t);
243 static unsigned long ilog2(unsigned long);
245 static void set_mas4_defaults(void);
247 static inline void tlb0_flush_entry(vm_offset_t);
248 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
250 /**************************************************************************/
251 /* Page table management */
252 /**************************************************************************/
254 static struct rwlock_padalign pvh_global_lock;
256 /* Data for the pv entry allocation mechanism */
257 static uma_zone_t pvzone;
258 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
260 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
262 #ifndef PMAP_SHPGPERPROC
263 #define PMAP_SHPGPERPROC 200
266 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
267 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
268 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
269 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
270 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
272 static pv_entry_t pv_alloc(void);
273 static void pv_free(pv_entry_t);
274 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
275 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
277 static void booke_pmap_init_qpages(void);
279 static inline void tlb_miss_lock(void);
280 static inline void tlb_miss_unlock(void);
283 extern tlb_entry_t __boot_tlb1[];
284 void pmap_bootstrap_ap(volatile uint32_t *);
288 * Kernel MMU interface
290 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
291 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
292 vm_size_t, vm_offset_t);
293 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
294 static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
295 vm_offset_t, vm_page_t *, vm_offset_t, int);
296 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
297 vm_prot_t, u_int flags, int8_t psind);
298 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
299 vm_page_t, vm_prot_t);
300 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
302 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
303 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
305 static void mmu_booke_init(mmu_t);
306 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
307 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
308 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
309 static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
310 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
312 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
314 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
315 vm_object_t, vm_pindex_t, vm_size_t);
316 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
317 static void mmu_booke_page_init(mmu_t, vm_page_t);
318 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
319 static void mmu_booke_pinit(mmu_t, pmap_t);
320 static void mmu_booke_pinit0(mmu_t, pmap_t);
321 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
323 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
324 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
325 static void mmu_booke_release(mmu_t, pmap_t);
326 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
327 static void mmu_booke_remove_all(mmu_t, vm_page_t);
328 static void mmu_booke_remove_write(mmu_t, vm_page_t);
329 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
330 static void mmu_booke_zero_page(mmu_t, vm_page_t);
331 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
332 static void mmu_booke_activate(mmu_t, struct thread *);
333 static void mmu_booke_deactivate(mmu_t, struct thread *);
334 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
335 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
336 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
337 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
338 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
339 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
340 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
341 static void mmu_booke_kremove(mmu_t, vm_offset_t);
342 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
343 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
345 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
347 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
349 static void mmu_booke_scan_init(mmu_t);
350 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
351 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
352 static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
353 vm_size_t sz, vm_memattr_t mode);
354 static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
355 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
356 static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
357 int *is_user, vm_offset_t *decoded_addr);
358 static void mmu_booke_page_array_startup(mmu_t , long);
361 static mmu_method_t mmu_booke_methods[] = {
362 /* pmap dispatcher interface */
363 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
364 MMUMETHOD(mmu_copy, mmu_booke_copy),
365 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
366 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
367 MMUMETHOD(mmu_enter, mmu_booke_enter),
368 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
369 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
370 MMUMETHOD(mmu_extract, mmu_booke_extract),
371 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
372 MMUMETHOD(mmu_init, mmu_booke_init),
373 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
374 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
375 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
376 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
377 MMUMETHOD(mmu_map, mmu_booke_map),
378 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
379 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
380 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
381 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
382 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
383 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
384 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
385 MMUMETHOD(mmu_protect, mmu_booke_protect),
386 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
387 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
388 MMUMETHOD(mmu_release, mmu_booke_release),
389 MMUMETHOD(mmu_remove, mmu_booke_remove),
390 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
391 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
392 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
393 MMUMETHOD(mmu_unwire, mmu_booke_unwire),
394 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
395 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
396 MMUMETHOD(mmu_activate, mmu_booke_activate),
397 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
398 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
399 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
400 MMUMETHOD(mmu_page_array_startup, mmu_booke_page_array_startup),
402 /* Internal interfaces */
403 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
404 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
405 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
406 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
407 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
408 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
409 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
410 MMUMETHOD(mmu_kremove, mmu_booke_kremove),
411 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
412 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
413 MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr),
414 MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
416 /* dumpsys() support */
417 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
418 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
419 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
424 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
432 static vm_offset_t tlb1_map_base = VM_MAPDEV_BASE;
434 static __inline uint32_t
435 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
440 if (ma != VM_MEMATTR_DEFAULT) {
442 case VM_MEMATTR_UNCACHEABLE:
443 return (MAS2_I | MAS2_G);
444 case VM_MEMATTR_WRITE_COMBINING:
445 case VM_MEMATTR_WRITE_BACK:
446 case VM_MEMATTR_PREFETCHABLE:
448 case VM_MEMATTR_WRITE_THROUGH:
449 return (MAS2_W | MAS2_M);
450 case VM_MEMATTR_CACHEABLE:
456 * Assume the page is cache inhibited and access is guarded unless
457 * it's in our available memory array.
459 attrib = _TLB_ENTRY_IO;
460 for (i = 0; i < physmem_regions_sz; i++) {
461 if ((pa >= physmem_regions[i].mr_start) &&
462 (pa < (physmem_regions[i].mr_start +
463 physmem_regions[i].mr_size))) {
464 attrib = _TLB_ENTRY_MEM;
481 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
484 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
485 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke.tlb_lock);
487 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
488 ("tlb_miss_lock: tried to lock self"));
490 tlb_lock(pc->pc_booke.tlb_lock);
492 CTR1(KTR_PMAP, "%s: locked", __func__);
499 tlb_miss_unlock(void)
507 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
509 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
510 __func__, pc->pc_cpuid);
512 tlb_unlock(pc->pc_booke.tlb_lock);
514 CTR1(KTR_PMAP, "%s: unlocked", __func__);
520 /* Return number of entries in TLB0. */
522 tlb0_get_tlbconf(void)
526 tlb0_cfg = mfspr(SPR_TLB0CFG);
527 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
528 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
529 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
532 /* Return number of entries in TLB1. */
534 tlb1_get_tlbconf(void)
538 tlb1_cfg = mfspr(SPR_TLB1CFG);
539 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
542 /**************************************************************************/
543 /* Page table related */
544 /**************************************************************************/
546 /* Allocate pv_entry structure. */
553 if (pv_entry_count > pv_entry_high_water)
554 pagedaemon_wakeup(0); /* XXX powerpc NUMA */
555 pv = uma_zalloc(pvzone, M_NOWAIT);
560 /* Free pv_entry structure. */
562 pv_free(pv_entry_t pve)
566 uma_zfree(pvzone, pve);
570 /* Allocate and initialize pv_entry structure. */
572 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
576 //int su = (pmap == kernel_pmap);
577 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
578 // (u_int32_t)pmap, va, (u_int32_t)m);
582 panic("pv_insert: no pv entries!");
588 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
589 rw_assert(&pvh_global_lock, RA_WLOCKED);
591 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
593 //debugf("pv_insert: e\n");
596 /* Destroy pv entry. */
598 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
602 //int su = (pmap == kernel_pmap);
603 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
605 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
606 rw_assert(&pvh_global_lock, RA_WLOCKED);
609 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
610 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
611 /* remove from pv_list */
612 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
613 if (TAILQ_EMPTY(&m->md.pv_list))
614 vm_page_aflag_clear(m, PGA_WRITEABLE);
616 /* free pv entry struct */
622 //debugf("pv_remove: e\n");
625 /**************************************************************************/
627 /**************************************************************************/
630 * This is called during booke_init, before the system is really initialized.
633 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
635 vm_paddr_t phys_kernelend;
636 struct mem_region *mp, *mp1;
639 vm_paddr_t physsz, hwphyssz;
640 u_int phys_avail_count;
641 vm_size_t kstack0_sz;
642 vm_offset_t kernel_pdir, kstack0;
643 vm_paddr_t kstack0_phys;
645 vm_offset_t kernel_ptbl_root;
647 debugf("mmu_booke_bootstrap: entered\n");
649 /* Set interesting system properties */
655 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
659 /* Initialize invalidation mutex */
660 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
662 /* Read TLB0 size and associativity. */
666 * Align kernel start and end address (kernel image).
667 * Note that kernel end does not necessarily relate to kernsize.
668 * kernsize is the size of the kernel that is actually mapped.
670 data_start = round_page(kernelend);
671 data_end = data_start;
673 /* Allocate the dynamic per-cpu area. */
674 dpcpu = (void *)data_end;
675 data_end += DPCPU_SIZE;
677 /* Allocate space for the message buffer. */
678 msgbufp = (struct msgbuf *)data_end;
679 data_end += msgbufsize;
680 debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
681 (uintptr_t)msgbufp, data_end);
683 data_end = round_page(data_end);
686 kernel_ptbl_root = data_end;
687 data_end += PP2D_NENTRIES * sizeof(pte_t**);
689 /* Allocate space for ptbl_bufs. */
690 ptbl_bufs = (struct ptbl_buf *)data_end;
691 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
692 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
693 (uintptr_t)ptbl_bufs, data_end);
695 data_end = round_page(data_end);
696 kernel_ptbl_root = data_end;
697 data_end += PDIR_NENTRIES * sizeof(pte_t*);
700 /* Allocate PTE tables for kernel KVA. */
701 kernel_pdir = data_end;
702 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
705 kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
706 data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
708 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
709 debugf(" kernel ptbls: %d\n", kernel_ptbls);
710 debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
711 kernel_pdir, data_end);
713 /* Retrieve phys/avail mem regions */
714 mem_regions(&physmem_regions, &physmem_regions_sz,
715 &availmem_regions, &availmem_regions_sz);
717 if (PHYS_AVAIL_ENTRIES < availmem_regions_sz)
718 panic("mmu_booke_bootstrap: phys_avail too small");
720 data_end = round_page(data_end);
721 vm_page_array = (vm_page_t)data_end;
723 * Get a rough idea (upper bound) on the size of the page array. The
724 * vm_page_array will not handle any more pages than we have in the
725 * avail_regions array, and most likely much less.
728 for (mp = availmem_regions; mp->mr_size; mp++) {
731 sz = (round_page(sz) / (PAGE_SIZE + sizeof(struct vm_page)));
732 data_end += round_page(sz * sizeof(struct vm_page));
734 /* Pre-round up to 1MB. This wastes some space, but saves TLB entries */
735 data_end = roundup2(data_end, 1 << 20);
737 debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
738 debugf(" kernstart: %#zx\n", kernstart);
739 debugf(" kernsize: %#zx\n", kernsize);
741 if (data_end - kernstart > kernsize) {
742 kernsize += tlb1_mapin_region(kernstart + kernsize,
743 kernload + kernsize, (data_end - kernstart) - kernsize,
746 data_end = kernstart + kernsize;
747 debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
750 * Clear the structures - note we can only do it safely after the
751 * possible additional TLB1 translations are in place (above) so that
752 * all range up to the currently calculated 'data_end' is covered.
754 dpcpu_init(dpcpu, 0);
756 memset((void *)kernel_pdir, 0,
757 kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
758 kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
760 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
761 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
764 /*******************************************************/
765 /* Set the start and end of kva. */
766 /*******************************************************/
767 virtual_avail = round_page(data_end);
768 virtual_end = VM_MAX_KERNEL_ADDRESS;
770 #ifndef __powerpc64__
771 /* Allocate KVA space for page zero/copy operations. */
772 zero_page_va = virtual_avail;
773 virtual_avail += PAGE_SIZE;
774 copy_page_src_va = virtual_avail;
775 virtual_avail += PAGE_SIZE;
776 copy_page_dst_va = virtual_avail;
777 virtual_avail += PAGE_SIZE;
778 debugf("zero_page_va = 0x%"PRI0ptrX"\n", zero_page_va);
779 debugf("copy_page_src_va = 0x%"PRI0ptrX"\n", copy_page_src_va);
780 debugf("copy_page_dst_va = 0x%"PRI0ptrX"\n", copy_page_dst_va);
782 /* Initialize page zero/copy mutexes. */
783 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
784 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
786 /* Allocate KVA space for ptbl bufs. */
787 ptbl_buf_pool_vabase = virtual_avail;
788 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
789 debugf("ptbl_buf_pool_vabase = 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
790 ptbl_buf_pool_vabase, virtual_avail);
793 /* Calculate corresponding physical addresses for the kernel region. */
794 phys_kernelend = kernload + kernsize;
795 debugf("kernel image and allocated data:\n");
796 debugf(" kernload = 0x%09jx\n", (uintmax_t)kernload);
797 debugf(" kernstart = 0x%"PRI0ptrX"\n", kernstart);
798 debugf(" kernsize = 0x%"PRI0ptrX"\n", kernsize);
801 * Remove kernel physical address range from avail regions list. Page
802 * align all regions. Non-page aligned memory isn't very interesting
803 * to us. Also, sort the entries for ascending addresses.
807 cnt = availmem_regions_sz;
808 debugf("processing avail regions:\n");
809 for (mp = availmem_regions; mp->mr_size; mp++) {
811 e = mp->mr_start + mp->mr_size;
812 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
813 /* Check whether this region holds all of the kernel. */
814 if (s < kernload && e > phys_kernelend) {
815 availmem_regions[cnt].mr_start = phys_kernelend;
816 availmem_regions[cnt++].mr_size = e - phys_kernelend;
819 /* Look whether this regions starts within the kernel. */
820 if (s >= kernload && s < phys_kernelend) {
821 if (e <= phys_kernelend)
825 /* Now look whether this region ends within the kernel. */
826 if (e > kernload && e <= phys_kernelend) {
831 /* Now page align the start and size of the region. */
837 debugf("%09jx-%09jx = %jx\n",
838 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
840 /* Check whether some memory is left here. */
844 (cnt - (mp - availmem_regions)) * sizeof(*mp));
850 /* Do an insertion sort. */
851 for (mp1 = availmem_regions; mp1 < mp; mp1++)
852 if (s < mp1->mr_start)
855 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
863 availmem_regions_sz = cnt;
865 /*******************************************************/
866 /* Steal physical memory for kernel stack from the end */
867 /* of the first avail region */
868 /*******************************************************/
869 kstack0_sz = kstack_pages * PAGE_SIZE;
870 kstack0_phys = availmem_regions[0].mr_start +
871 availmem_regions[0].mr_size;
872 kstack0_phys -= kstack0_sz;
873 availmem_regions[0].mr_size -= kstack0_sz;
875 /*******************************************************/
876 /* Fill in phys_avail table, based on availmem_regions */
877 /*******************************************************/
878 phys_avail_count = 0;
881 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
883 debugf("fill in phys_avail:\n");
884 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
886 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
887 (uintmax_t)availmem_regions[i].mr_start,
888 (uintmax_t)availmem_regions[i].mr_start +
889 availmem_regions[i].mr_size,
890 (uintmax_t)availmem_regions[i].mr_size);
893 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
894 debugf(" hw.physmem adjust\n");
895 if (physsz < hwphyssz) {
896 phys_avail[j] = availmem_regions[i].mr_start;
898 availmem_regions[i].mr_start +
902 dump_avail[j] = phys_avail[j];
903 dump_avail[j + 1] = phys_avail[j + 1];
908 phys_avail[j] = availmem_regions[i].mr_start;
909 phys_avail[j + 1] = availmem_regions[i].mr_start +
910 availmem_regions[i].mr_size;
912 physsz += availmem_regions[i].mr_size;
913 dump_avail[j] = phys_avail[j];
914 dump_avail[j + 1] = phys_avail[j + 1];
916 physmem = btoc(physsz);
918 /* Calculate the last available physical address. */
919 for (i = 0; phys_avail[i + 2] != 0; i += 2)
921 Maxmem = powerpc_btop(phys_avail[i + 1]);
923 debugf("Maxmem = 0x%08lx\n", Maxmem);
924 debugf("phys_avail_count = %d\n", phys_avail_count);
925 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
926 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
930 * Map the physical memory contiguously in TLB1.
931 * Round so it fits into a single mapping.
933 tlb1_mapin_region(DMAP_BASE_ADDRESS, 0,
934 phys_avail[i + 1], _TLB_ENTRY_MEM);
937 /*******************************************************/
938 /* Initialize (statically allocated) kernel pmap. */
939 /*******************************************************/
940 PMAP_LOCK_INIT(kernel_pmap);
942 kernel_pmap->pm_pp2d = (pte_t ***)kernel_ptbl_root;
944 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
945 kernel_pmap->pm_pdir = (pte_t **)kernel_ptbl_root;
948 debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
949 kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
950 for (i = 0; i < MAXCPU; i++) {
951 kernel_pmap->pm_tid[i] = TID_KERNEL;
953 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
954 tidbusy[i][TID_KERNEL] = kernel_pmap;
957 /* Mark kernel_pmap active on all CPUs */
958 CPU_FILL(&kernel_pmap->pm_active);
961 * Initialize the global pv list lock.
963 rw_init(&pvh_global_lock, "pmap pv global");
965 /*******************************************************/
967 /*******************************************************/
969 /* Enter kstack0 into kernel map, provide guard page */
970 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
971 thread0.td_kstack = kstack0;
972 thread0.td_kstack_pages = kstack_pages;
974 debugf("kstack_sz = 0x%08jx\n", (uintmax_t)kstack0_sz);
975 debugf("kstack0_phys at 0x%09jx - 0x%09jx\n",
976 (uintmax_t)kstack0_phys, (uintmax_t)kstack0_phys + kstack0_sz);
977 debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
978 kstack0, kstack0 + kstack0_sz);
980 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
981 for (i = 0; i < kstack_pages; i++) {
982 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
983 kstack0 += PAGE_SIZE;
984 kstack0_phys += PAGE_SIZE;
987 pmap_bootstrapped = 1;
989 debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
990 debugf("virtual_end = %"PRI0ptrX"\n", virtual_end);
992 debugf("mmu_booke_bootstrap: exit\n");
1002 /* Prepare TLB1 image for AP processors */
1004 for (i = 0; i < TLB1_ENTRIES; i++) {
1005 tlb1_read_entry(&tmp, i);
1007 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
1008 memcpy(e++, &tmp, sizeof(tmp));
1013 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
1018 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
1019 * have the snapshot of its contents in the s/w __boot_tlb1[] table
1020 * created by tlb1_ap_prep(), so use these values directly to
1021 * (re)program AP's TLB1 hardware.
1023 * Start at index 1 because index 0 has the kernel map.
1025 for (i = 1; i < TLB1_ENTRIES; i++) {
1026 if (__boot_tlb1[i].mas1 & MAS1_VALID)
1027 tlb1_write_entry(&__boot_tlb1[i], i);
1030 set_mas4_defaults();
1035 booke_pmap_init_qpages(void)
1042 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
1043 if (pc->pc_qmap_addr == 0)
1044 panic("pmap_init_qpages: unable to allocate KVA");
1048 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
1051 * Get the physical page address for the given pmap/virtual address.
1054 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1059 pa = pte_vatopa(mmu, pmap, va);
1066 * Extract the physical page address associated with the given
1067 * kernel virtual address.
1070 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1076 #ifdef __powerpc64__
1077 if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS)
1078 return (DMAP_TO_PHYS(va));
1081 if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
1082 p = pte_vatopa(mmu, kernel_pmap, va);
1085 /* Check TLB1 mappings */
1086 for (i = 0; i < TLB1_ENTRIES; i++) {
1087 tlb1_read_entry(&e, i);
1088 if (!(e.mas1 & MAS1_VALID))
1090 if (va >= e.virt && va < e.virt + e.size)
1091 return (e.phys + (va - e.virt));
1099 * Initialize the pmap module.
1100 * Called by vm_init, to initialize any structures that the pmap
1101 * system needs to map virtual memory.
1104 mmu_booke_init(mmu_t mmu)
1106 int shpgperproc = PMAP_SHPGPERPROC;
1109 * Initialize the address space (zone) for the pv entries. Set a
1110 * high water mark so that the system can recover from excessive
1111 * numbers of pv entries.
1113 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1114 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1116 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1117 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
1119 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1120 pv_entry_high_water = 9 * (pv_entry_max / 10);
1122 uma_zone_reserve_kva(pvzone, pv_entry_max);
1124 /* Pre-fill pvzone with initial number of pv entries. */
1125 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1127 /* Create a UMA zone for page table roots. */
1128 ptbl_root_zone = uma_zcreate("pmap root", PMAP_ROOT_SIZE,
1129 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, UMA_ZONE_VM);
1131 /* Initialize ptbl allocation. */
1136 * Map a list of wired pages into kernel virtual address space. This is
1137 * intended for temporary mappings which do not need page modification or
1138 * references recorded. Existing mappings in the region are overwritten.
1141 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1146 while (count-- > 0) {
1147 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1154 * Remove page mappings from kernel virtual address space. Intended for
1155 * temporary mappings entered by mmu_booke_qenter.
1158 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1163 while (count-- > 0) {
1164 mmu_booke_kremove(mmu, va);
1170 * Map a wired page into kernel virtual address space.
1173 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1176 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1180 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1185 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1186 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1188 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
1189 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
1190 flags |= PTE_PS_4KB;
1192 pte = pte_find(mmu, kernel_pmap, va);
1193 KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
1195 mtx_lock_spin(&tlbivax_mutex);
1198 if (PTE_ISVALID(pte)) {
1200 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1202 /* Flush entry from TLB0 */
1203 tlb0_flush_entry(va);
1206 *pte = PTE_RPN_FROM_PA(pa) | flags;
1208 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1209 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1210 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1212 /* Flush the real memory from the instruction cache. */
1213 if ((flags & (PTE_I | PTE_G)) == 0)
1214 __syncicache((void *)va, PAGE_SIZE);
1217 mtx_unlock_spin(&tlbivax_mutex);
1221 * Remove a page from kernel page table.
1224 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1228 CTR2(KTR_PMAP,"%s: s (va = 0x%"PRI0ptrX")\n", __func__, va);
1230 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1231 (va <= VM_MAX_KERNEL_ADDRESS)),
1232 ("mmu_booke_kremove: invalid va"));
1234 pte = pte_find(mmu, kernel_pmap, va);
1236 if (!PTE_ISVALID(pte)) {
1238 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1243 mtx_lock_spin(&tlbivax_mutex);
1246 /* Invalidate entry in TLB0, update PTE. */
1247 tlb0_flush_entry(va);
1251 mtx_unlock_spin(&tlbivax_mutex);
1255 * Provide a kernel pointer corresponding to a given userland pointer.
1256 * The returned pointer is valid until the next time this function is
1257 * called in this thread. This is used internally in copyin/copyout.
1260 mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
1261 void **kaddr, size_t ulen, size_t *klen)
1264 if (trunc_page((uintptr_t)uaddr + ulen) > VM_MAXUSER_ADDRESS)
1267 *kaddr = (void *)(uintptr_t)uaddr;
1275 * Figure out where a given kernel pointer (usually in a fault) points
1276 * to from the VM's perspective, potentially remapping into userland's
1280 mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
1281 vm_offset_t *decoded_addr)
1284 if (trunc_page(addr) <= VM_MAXUSER_ADDRESS)
1289 *decoded_addr = addr;
1294 * Initialize pmap associated with process 0.
1297 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1300 PMAP_LOCK_INIT(pmap);
1301 mmu_booke_pinit(mmu, pmap);
1302 PCPU_SET(curpmap, pmap);
1306 * Insert the given physical page at the specified virtual address in the
1307 * target physical map with the protection requested. If specified the page
1308 * will be wired down.
1311 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1312 vm_prot_t prot, u_int flags, int8_t psind)
1316 rw_wlock(&pvh_global_lock);
1318 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
1320 rw_wunlock(&pvh_global_lock);
1325 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1326 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
1331 int error, su, sync;
1333 pa = VM_PAGE_TO_PHYS(m);
1334 su = (pmap == kernel_pmap);
1337 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1338 // "pa=0x%08x prot=0x%08x flags=%#x)\n",
1339 // (u_int32_t)pmap, su, pmap->pm_tid,
1340 // (u_int32_t)m, va, pa, prot, flags);
1343 KASSERT(((va >= virtual_avail) &&
1344 (va <= VM_MAX_KERNEL_ADDRESS)),
1345 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1347 KASSERT((va <= VM_MAXUSER_ADDRESS),
1348 ("mmu_booke_enter_locked: user pmap, non user va"));
1350 if ((m->oflags & VPO_UNMANAGED) == 0) {
1351 if ((pmap_flags & PMAP_ENTER_QUICK_LOCKED) == 0)
1352 VM_PAGE_OBJECT_BUSY_ASSERT(m);
1354 VM_OBJECT_ASSERT_LOCKED(m->object);
1357 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1360 * If there is an existing mapping, and the physical address has not
1361 * changed, must be protection or wiring change.
1363 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1364 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1367 * Before actually updating pte->flags we calculate and
1368 * prepare its new value in a helper var.
1371 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1373 /* Wiring change, just update stats. */
1374 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
1375 if (!PTE_ISWIRED(pte)) {
1377 pmap->pm_stats.wired_count++;
1380 if (PTE_ISWIRED(pte)) {
1381 flags &= ~PTE_WIRED;
1382 pmap->pm_stats.wired_count--;
1386 if (prot & VM_PROT_WRITE) {
1387 /* Add write permissions. */
1392 if ((flags & PTE_MANAGED) != 0)
1393 vm_page_aflag_set(m, PGA_WRITEABLE);
1395 /* Handle modified pages, sense modify status. */
1398 * The PTE_MODIFIED flag could be set by underlying
1399 * TLB misses since we last read it (above), possibly
1400 * other CPUs could update it so we check in the PTE
1401 * directly rather than rely on that saved local flags
1404 if (PTE_ISMODIFIED(pte))
1408 if (prot & VM_PROT_EXECUTE) {
1414 * Check existing flags for execute permissions: if we
1415 * are turning execute permissions on, icache should
1418 if ((*pte & (PTE_UX | PTE_SX)) == 0)
1422 flags &= ~PTE_REFERENCED;
1425 * The new flags value is all calculated -- only now actually
1428 mtx_lock_spin(&tlbivax_mutex);
1431 tlb0_flush_entry(va);
1432 *pte &= ~PTE_FLAGS_MASK;
1436 mtx_unlock_spin(&tlbivax_mutex);
1440 * If there is an existing mapping, but it's for a different
1441 * physical address, pte_enter() will delete the old mapping.
1443 //if ((pte != NULL) && PTE_ISVALID(pte))
1444 // debugf("mmu_booke_enter_locked: replace\n");
1446 // debugf("mmu_booke_enter_locked: new\n");
1448 /* Now set up the flags and install the new mapping. */
1449 flags = (PTE_SR | PTE_VALID);
1455 if (prot & VM_PROT_WRITE) {
1460 if ((m->oflags & VPO_UNMANAGED) == 0)
1461 vm_page_aflag_set(m, PGA_WRITEABLE);
1464 if (prot & VM_PROT_EXECUTE) {
1470 /* If its wired update stats. */
1471 if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
1474 error = pte_enter(mmu, pmap, m, va, flags,
1475 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
1477 return (KERN_RESOURCE_SHORTAGE);
1479 if ((flags & PMAP_ENTER_WIRED) != 0)
1480 pmap->pm_stats.wired_count++;
1482 /* Flush the real memory from the instruction cache. */
1483 if (prot & VM_PROT_EXECUTE)
1487 if (sync && (su || pmap == PCPU_GET(curpmap))) {
1488 __syncicache((void *)va, PAGE_SIZE);
1492 return (KERN_SUCCESS);
1496 * Maps a sequence of resident pages belonging to the same object.
1497 * The sequence begins with the given page m_start. This page is
1498 * mapped at the given virtual address start. Each subsequent page is
1499 * mapped at a virtual address that is offset from start by the same
1500 * amount as the page is offset from m_start within the object. The
1501 * last page in the sequence is the page with the largest offset from
1502 * m_start that can be mapped at a virtual address less than the given
1503 * virtual address end. Not every virtual page between start and end
1504 * is mapped; only those for which a resident page exists with the
1505 * corresponding offset from m_start are mapped.
1508 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1509 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1512 vm_pindex_t diff, psize;
1514 VM_OBJECT_ASSERT_LOCKED(m_start->object);
1516 psize = atop(end - start);
1518 rw_wlock(&pvh_global_lock);
1520 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1521 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1522 prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1523 PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
1524 m = TAILQ_NEXT(m, listq);
1527 rw_wunlock(&pvh_global_lock);
1531 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1535 rw_wlock(&pvh_global_lock);
1537 mmu_booke_enter_locked(mmu, pmap, va, m,
1538 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP |
1539 PMAP_ENTER_QUICK_LOCKED, 0);
1541 rw_wunlock(&pvh_global_lock);
1545 * Remove the given range of addresses from the specified map.
1547 * It is assumed that the start and end are properly rounded to the page size.
1550 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1555 int su = (pmap == kernel_pmap);
1557 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1558 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1561 KASSERT(((va >= virtual_avail) &&
1562 (va <= VM_MAX_KERNEL_ADDRESS)),
1563 ("mmu_booke_remove: kernel pmap, non kernel va"));
1565 KASSERT((va <= VM_MAXUSER_ADDRESS),
1566 ("mmu_booke_remove: user pmap, non user va"));
1569 if (PMAP_REMOVE_DONE(pmap)) {
1570 //debugf("mmu_booke_remove: e (empty)\n");
1574 hold_flag = PTBL_HOLD_FLAG(pmap);
1575 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1577 rw_wlock(&pvh_global_lock);
1579 for (; va < endva; va += PAGE_SIZE) {
1580 pte = pte_find(mmu, pmap, va);
1581 if ((pte != NULL) && PTE_ISVALID(pte))
1582 pte_remove(mmu, pmap, va, hold_flag);
1585 rw_wunlock(&pvh_global_lock);
1587 //debugf("mmu_booke_remove: e\n");
1591 * Remove physical page from all pmaps in which it resides.
1594 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1599 rw_wlock(&pvh_global_lock);
1600 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1601 pvn = TAILQ_NEXT(pv, pv_link);
1603 PMAP_LOCK(pv->pv_pmap);
1604 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1605 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1606 PMAP_UNLOCK(pv->pv_pmap);
1608 vm_page_aflag_clear(m, PGA_WRITEABLE);
1609 rw_wunlock(&pvh_global_lock);
1613 * Map a range of physical addresses into kernel virtual address space.
1616 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1617 vm_paddr_t pa_end, int prot)
1619 vm_offset_t sva = *virt;
1620 vm_offset_t va = sva;
1622 #ifdef __powerpc64__
1623 /* XXX: Handle memory not starting at 0x0. */
1624 if (pa_end < ctob(Maxmem))
1625 return (PHYS_TO_DMAP(pa_start));
1628 while (pa_start < pa_end) {
1629 mmu_booke_kenter(mmu, va, pa_start);
1631 pa_start += PAGE_SIZE;
1639 * The pmap must be activated before it's address space can be accessed in any
1643 mmu_booke_activate(mmu_t mmu, struct thread *td)
1648 pmap = &td->td_proc->p_vmspace->vm_pmap;
1650 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%"PRI0ptrX")",
1651 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1653 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1657 cpuid = PCPU_GET(cpuid);
1658 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
1659 PCPU_SET(curpmap, pmap);
1661 if (pmap->pm_tid[cpuid] == TID_NONE)
1664 /* Load PID0 register with pmap tid value. */
1665 mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
1666 __asm __volatile("isync");
1668 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
1672 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1673 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1677 * Deactivate the specified process's address space.
1680 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1684 pmap = &td->td_proc->p_vmspace->vm_pmap;
1686 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%"PRI0ptrX,
1687 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1689 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
1691 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
1692 PCPU_SET(curpmap, NULL);
1696 * Copy the range specified by src_addr/len
1697 * from the source map to the range dst_addr/len
1698 * in the destination map.
1700 * This routine is only advisory and need not do anything.
1703 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
1704 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
1710 * Set the physical protection on the specified range of this map as requested.
1713 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1720 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1721 mmu_booke_remove(mmu, pmap, sva, eva);
1725 if (prot & VM_PROT_WRITE)
1729 for (va = sva; va < eva; va += PAGE_SIZE) {
1730 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1731 if (PTE_ISVALID(pte)) {
1732 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1734 mtx_lock_spin(&tlbivax_mutex);
1737 /* Handle modified pages. */
1738 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
1741 tlb0_flush_entry(va);
1742 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1745 mtx_unlock_spin(&tlbivax_mutex);
1753 * Clear the write and modified bits in each of the given page's mappings.
1756 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1761 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1762 ("mmu_booke_remove_write: page %p is not managed", m));
1763 vm_page_assert_busied(m);
1765 if (!pmap_page_is_write_mapped(m))
1767 rw_wlock(&pvh_global_lock);
1768 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1769 PMAP_LOCK(pv->pv_pmap);
1770 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1771 if (PTE_ISVALID(pte)) {
1772 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1774 mtx_lock_spin(&tlbivax_mutex);
1777 /* Handle modified pages. */
1778 if (PTE_ISMODIFIED(pte))
1781 /* Flush mapping from TLB0. */
1782 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1785 mtx_unlock_spin(&tlbivax_mutex);
1788 PMAP_UNLOCK(pv->pv_pmap);
1790 vm_page_aflag_clear(m, PGA_WRITEABLE);
1791 rw_wunlock(&pvh_global_lock);
1795 * Atomically extract and hold the physical page with the given
1796 * pmap and virtual address pair if that mapping permits the given
1800 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
1809 pte = pte_find(mmu, pmap, va);
1810 if ((pte != NULL) && PTE_ISVALID(pte)) {
1811 if (pmap == kernel_pmap)
1816 if ((*pte & pte_wbit) != 0 || (prot & VM_PROT_WRITE) == 0) {
1817 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1818 if (!vm_page_wire_mapped(m))
1827 * Initialize a vm_page's machine-dependent fields.
1830 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
1833 m->md.pv_tracked = 0;
1834 TAILQ_INIT(&m->md.pv_list);
1838 * Return whether or not the specified physical page was modified
1839 * in any of physical maps.
1842 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
1848 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1849 ("mmu_booke_is_modified: page %p is not managed", m));
1853 * If the page is not busied then this check is racy.
1855 if (!pmap_page_is_write_mapped(m))
1858 rw_wlock(&pvh_global_lock);
1859 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1860 PMAP_LOCK(pv->pv_pmap);
1861 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
1863 if (PTE_ISMODIFIED(pte))
1866 PMAP_UNLOCK(pv->pv_pmap);
1870 rw_wunlock(&pvh_global_lock);
1875 * Return whether or not the specified virtual address is eligible
1879 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
1886 * Return whether or not the specified physical page was referenced
1887 * in any physical maps.
1890 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
1896 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1897 ("mmu_booke_is_referenced: page %p is not managed", m));
1899 rw_wlock(&pvh_global_lock);
1900 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1901 PMAP_LOCK(pv->pv_pmap);
1902 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
1904 if (PTE_ISREFERENCED(pte))
1907 PMAP_UNLOCK(pv->pv_pmap);
1911 rw_wunlock(&pvh_global_lock);
1916 * Clear the modify bits on the specified physical page.
1919 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
1924 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1925 ("mmu_booke_clear_modify: page %p is not managed", m));
1926 vm_page_assert_busied(m);
1928 if (!pmap_page_is_write_mapped(m))
1931 rw_wlock(&pvh_global_lock);
1932 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1933 PMAP_LOCK(pv->pv_pmap);
1934 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
1936 mtx_lock_spin(&tlbivax_mutex);
1939 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
1940 tlb0_flush_entry(pv->pv_va);
1941 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
1946 mtx_unlock_spin(&tlbivax_mutex);
1948 PMAP_UNLOCK(pv->pv_pmap);
1950 rw_wunlock(&pvh_global_lock);
1954 * Return a count of reference bits for a page, clearing those bits.
1955 * It is not necessary for every reference bit to be cleared, but it
1956 * is necessary that 0 only be returned when there are truly no
1957 * reference bits set.
1959 * As an optimization, update the page's dirty field if a modified bit is
1960 * found while counting reference bits. This opportunistic update can be
1961 * performed at low cost and can eliminate the need for some future calls
1962 * to pmap_is_modified(). However, since this function stops after
1963 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
1964 * dirty pages. Those dirty pages will only be detected by a future call
1965 * to pmap_is_modified().
1968 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
1974 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1975 ("mmu_booke_ts_referenced: page %p is not managed", m));
1977 rw_wlock(&pvh_global_lock);
1978 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1979 PMAP_LOCK(pv->pv_pmap);
1980 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
1982 if (PTE_ISMODIFIED(pte))
1984 if (PTE_ISREFERENCED(pte)) {
1985 mtx_lock_spin(&tlbivax_mutex);
1988 tlb0_flush_entry(pv->pv_va);
1989 *pte &= ~PTE_REFERENCED;
1992 mtx_unlock_spin(&tlbivax_mutex);
1994 if (++count >= PMAP_TS_REFERENCED_MAX) {
1995 PMAP_UNLOCK(pv->pv_pmap);
2000 PMAP_UNLOCK(pv->pv_pmap);
2002 rw_wunlock(&pvh_global_lock);
2007 * Clear the wired attribute from the mappings for the specified range of
2008 * addresses in the given pmap. Every valid mapping within that range must
2009 * have the wired attribute set. In contrast, invalid mappings cannot have
2010 * the wired attribute set, so they are ignored.
2012 * The wired attribute of the page table entry is not a hardware feature, so
2013 * there is no need to invalidate any TLB entries.
2016 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2022 for (va = sva; va < eva; va += PAGE_SIZE) {
2023 if ((pte = pte_find(mmu, pmap, va)) != NULL &&
2025 if (!PTE_ISWIRED(pte))
2026 panic("mmu_booke_unwire: pte %p isn't wired",
2029 pmap->pm_stats.wired_count--;
2037 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2038 * page. This count may be changed upwards or downwards in the future; it is
2039 * only necessary that true be returned for a small subset of pmaps for proper
2043 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2049 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2050 ("mmu_booke_page_exists_quick: page %p is not managed", m));
2053 rw_wlock(&pvh_global_lock);
2054 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2055 if (pv->pv_pmap == pmap) {
2062 rw_wunlock(&pvh_global_lock);
2067 * Return the number of managed mappings to the given physical page that are
2071 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2077 if ((m->oflags & VPO_UNMANAGED) != 0)
2079 rw_wlock(&pvh_global_lock);
2080 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2081 PMAP_LOCK(pv->pv_pmap);
2082 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2083 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2085 PMAP_UNLOCK(pv->pv_pmap);
2087 rw_wunlock(&pvh_global_lock);
2092 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2098 * This currently does not work for entries that
2099 * overlap TLB1 entries.
2101 for (i = 0; i < TLB1_ENTRIES; i ++) {
2102 if (tlb1_iomapped(i, pa, size, &va) == 0)
2110 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2116 /* Minidumps are based on virtual memory addresses. */
2118 *va = (void *)(vm_offset_t)pa;
2122 /* Raw physical memory dumps don't have a virtual address. */
2123 /* We always map a 256MB page at 256M. */
2124 gran = 256 * 1024 * 1024;
2125 ppa = rounddown2(pa, gran);
2128 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
2130 if (sz > (gran - ofs))
2131 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
2136 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
2144 /* Minidumps are based on virtual memory addresses. */
2145 /* Nothing to do... */
2149 for (i = 0; i < TLB1_ENTRIES; i++) {
2150 tlb1_read_entry(&e, i);
2151 if (!(e.mas1 & MAS1_VALID))
2155 /* Raw physical memory dumps don't have a virtual address. */
2160 tlb1_write_entry(&e, i);
2162 gran = 256 * 1024 * 1024;
2163 ppa = rounddown2(pa, gran);
2165 if (sz > (gran - ofs)) {
2170 tlb1_write_entry(&e, i);
2174 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2177 mmu_booke_scan_init(mmu_t mmu)
2184 /* Initialize phys. segments for dumpsys(). */
2185 memset(&dump_map, 0, sizeof(dump_map));
2186 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
2187 &availmem_regions_sz);
2188 for (i = 0; i < physmem_regions_sz; i++) {
2189 dump_map[i].pa_start = physmem_regions[i].mr_start;
2190 dump_map[i].pa_size = physmem_regions[i].mr_size;
2195 /* Virtual segments for minidumps: */
2196 memset(&dump_map, 0, sizeof(dump_map));
2198 /* 1st: kernel .data and .bss. */
2199 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2200 dump_map[0].pa_size =
2201 round_page((uintptr_t)_end) - dump_map[0].pa_start;
2203 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2204 dump_map[1].pa_start = data_start;
2205 dump_map[1].pa_size = data_end - data_start;
2207 /* 3rd: kernel VM. */
2208 va = dump_map[1].pa_start + dump_map[1].pa_size;
2209 /* Find start of next chunk (from va). */
2210 while (va < virtual_end) {
2211 /* Don't dump the buffer cache. */
2212 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2213 va = kmi.buffer_eva;
2216 pte = pte_find(mmu, kernel_pmap, va);
2217 if (pte != NULL && PTE_ISVALID(pte))
2221 if (va < virtual_end) {
2222 dump_map[2].pa_start = va;
2224 /* Find last page in chunk. */
2225 while (va < virtual_end) {
2226 /* Don't run into the buffer cache. */
2227 if (va == kmi.buffer_sva)
2229 pte = pte_find(mmu, kernel_pmap, va);
2230 if (pte == NULL || !PTE_ISVALID(pte))
2234 dump_map[2].pa_size = va - dump_map[2].pa_start;
2239 * Map a set of physical memory pages into the kernel virtual address space.
2240 * Return a pointer to where it is mapped. This routine is intended to be used
2241 * for mapping device memory, NOT real memory.
2244 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2247 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
2251 tlb1_find_pa(vm_paddr_t pa, tlb_entry_t *e)
2255 for (i = 0; i < TLB1_ENTRIES; i++) {
2256 tlb1_read_entry(e, i);
2257 if ((e->mas1 & MAS1_VALID) == 0)
2264 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2268 #ifndef __powerpc64__
2277 * Check if this is premapped in TLB1.
2282 wimge = tlb_calc_wimg(pa, ma);
2283 for (i = 0; i < TLB1_ENTRIES; i++) {
2284 tlb1_read_entry(&e, i);
2285 if (!(e.mas1 & MAS1_VALID))
2287 if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED)))
2289 if (tmppa >= e.phys && tmppa < e.phys + e.size) {
2290 va = e.virt + (pa - e.phys);
2291 tmppa = e.phys + e.size;
2292 sz -= MIN(sz, e.size);
2293 while (sz > 0 && (i = tlb1_find_pa(tmppa, &e)) != -1) {
2294 if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED)))
2296 sz -= MIN(sz, e.size);
2297 tmppa = e.phys + e.size;
2301 return ((void *)va);
2305 size = roundup(size, PAGE_SIZE);
2307 #ifdef __powerpc64__
2308 KASSERT(pa < VM_MAPDEV_PA_MAX,
2309 ("Unsupported physical address! %lx", pa));
2310 va = VM_MAPDEV_BASE + pa;
2313 * The device mapping area is between VM_MAXUSER_ADDRESS and
2314 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing.
2316 #ifdef SPARSE_MAPDEV
2318 * With a sparse mapdev, align to the largest starting region. This
2319 * could feasibly be optimized for a 'best-fit' alignment, but that
2320 * calculation could be very costly.
2321 * Align to the smaller of:
2322 * - first set bit in overlap of (pa & size mask)
2323 * - largest size envelope
2325 * It's possible the device mapping may start at a PA that's not larger
2326 * than the size mask, so we need to offset in to maximize the TLB entry
2327 * range and minimize the number of used TLB entries.
2330 tmpva = tlb1_map_base;
2331 sz = ffsl((~((1 << flsl(size-1)) - 1)) & pa);
2332 sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
2333 va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
2334 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
2335 va = atomic_fetchadd_int(&tlb1_map_base, size);
2339 if (tlb1_mapin_region(va, pa, size, tlb_calc_wimg(pa, ma)) != size)
2342 return ((void *)va);
2346 * 'Unmap' a range mapped by mmu_booke_mapdev().
2349 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2351 #ifdef SUPPORTS_SHRINKING_TLB1
2352 vm_offset_t base, offset;
2355 * Unmap only if this is inside kernel virtual space.
2357 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2358 base = trunc_page(va);
2359 offset = va & PAGE_MASK;
2360 size = roundup(offset + size, PAGE_SIZE);
2361 kva_free(base, size);
2367 * mmu_booke_object_init_pt preloads the ptes for a given object into the
2368 * specified pmap. This eliminates the blast of soft faults on process startup
2369 * and immediately after an mmap.
2372 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2373 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2376 VM_OBJECT_ASSERT_WLOCKED(object);
2377 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2378 ("mmu_booke_object_init_pt: non-device object"));
2382 * Perform the pmap work for mincore.
2385 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2389 /* XXX: this should be implemented at some point */
2394 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
2402 addr = trunc_page(addr);
2404 /* Only allow changes to mapped kernel addresses. This includes:
2406 * - DMAP (powerpc64)
2409 if (addr <= VM_MAXUSER_ADDRESS ||
2410 #ifdef __powerpc64__
2411 (addr >= tlb1_map_base && addr < DMAP_BASE_ADDRESS) ||
2412 (addr > DMAP_MAX_ADDRESS && addr < VM_MIN_KERNEL_ADDRESS) ||
2414 (addr >= tlb1_map_base && addr < VM_MIN_KERNEL_ADDRESS) ||
2416 (addr > VM_MAX_KERNEL_ADDRESS))
2419 /* Check TLB1 mappings */
2420 for (i = 0; i < TLB1_ENTRIES; i++) {
2421 tlb1_read_entry(&e, i);
2422 if (!(e.mas1 & MAS1_VALID))
2424 if (addr >= e.virt && addr < e.virt + e.size)
2427 if (i < TLB1_ENTRIES) {
2428 /* Only allow full mappings to be modified for now. */
2429 /* Validate the range. */
2430 for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
2431 tlb1_read_entry(&e, j);
2432 if (va != e.virt || (sz - (va - addr) < e.size))
2435 for (va = addr; va < addr + sz; va += e.size, i++) {
2436 tlb1_read_entry(&e, i);
2437 e.mas2 &= ~MAS2_WIMGE_MASK;
2438 e.mas2 |= tlb_calc_wimg(e.phys, mode);
2441 * Write it out to the TLB. Should really re-sync with other
2444 tlb1_write_entry(&e, i);
2449 /* Not in TLB1, try through pmap */
2450 /* First validate the range. */
2451 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
2452 pte = pte_find(mmu, kernel_pmap, va);
2453 if (pte == NULL || !PTE_ISVALID(pte))
2457 mtx_lock_spin(&tlbivax_mutex);
2459 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
2460 pte = pte_find(mmu, kernel_pmap, va);
2461 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
2462 *pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
2463 tlb0_flush_entry(va);
2466 mtx_unlock_spin(&tlbivax_mutex);
2472 mmu_booke_page_array_startup(mmu_t mmu, long pages)
2474 vm_page_array_size = pages;
2477 /**************************************************************************/
2479 /**************************************************************************/
2482 * Allocate a TID. If necessary, steal one from someone else.
2483 * The new TID is flushed from the TLB before returning.
2486 tid_alloc(pmap_t pmap)
2491 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2493 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2495 thiscpu = PCPU_GET(cpuid);
2497 tid = PCPU_GET(booke.tid_next);
2500 PCPU_SET(booke.tid_next, tid + 1);
2502 /* If we are stealing TID then clear the relevant pmap's field */
2503 if (tidbusy[thiscpu][tid] != NULL) {
2505 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2507 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2509 /* Flush all entries from TLB0 matching this TID. */
2513 tidbusy[thiscpu][tid] = pmap;
2514 pmap->pm_tid[thiscpu] = tid;
2515 __asm __volatile("msync; isync");
2517 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2518 PCPU_GET(booke.tid_next));
2523 /**************************************************************************/
2525 /**************************************************************************/
2527 /* Convert TLB0 va and way number to tlb0[] table index. */
2528 static inline unsigned int
2529 tlb0_tableidx(vm_offset_t va, unsigned int way)
2533 idx = (way * TLB0_ENTRIES_PER_WAY);
2534 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2539 * Invalidate TLB0 entry.
2542 tlb0_flush_entry(vm_offset_t va)
2545 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2547 mtx_assert(&tlbivax_mutex, MA_OWNED);
2549 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2550 __asm __volatile("isync; msync");
2551 __asm __volatile("tlbsync; msync");
2553 CTR1(KTR_PMAP, "%s: e", __func__);
2557 /**************************************************************************/
2559 /**************************************************************************/
2562 * TLB1 mapping notes:
2564 * TLB1[0] Kernel text and data.
2565 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
2566 * windows, other devices mappings.
2570 * Read an entry from given TLB1 slot.
2573 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
2578 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
2581 __asm __volatile("wrteei 0");
2583 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
2584 mtspr(SPR_MAS0, mas0);
2585 __asm __volatile("isync; tlbre");
2587 entry->mas1 = mfspr(SPR_MAS1);
2588 entry->mas2 = mfspr(SPR_MAS2);
2589 entry->mas3 = mfspr(SPR_MAS3);
2591 switch ((mfpvr() >> 16) & 0xFFFF) {
2596 entry->mas7 = mfspr(SPR_MAS7);
2602 __asm __volatile("wrtee %0" :: "r"(msr));
2604 entry->virt = entry->mas2 & MAS2_EPN_MASK;
2605 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
2606 (entry->mas3 & MAS3_RPN);
2608 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
2611 struct tlbwrite_args {
2617 tlb1_find_free(void)
2622 for (i = 0; i < TLB1_ENTRIES; i++) {
2623 tlb1_read_entry(&e, i);
2624 if ((e.mas1 & MAS1_VALID) == 0)
2631 tlb1_write_entry_int(void *arg)
2633 struct tlbwrite_args *args = arg;
2638 idx = tlb1_find_free();
2640 panic("No free TLB1 entries!\n");
2643 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2645 mtspr(SPR_MAS0, mas0);
2646 mtspr(SPR_MAS1, args->e->mas1);
2647 mtspr(SPR_MAS2, args->e->mas2);
2648 mtspr(SPR_MAS3, args->e->mas3);
2649 switch ((mfpvr() >> 16) & 0xFFFF) {
2656 mtspr(SPR_MAS7, args->e->mas7);
2662 __asm __volatile("isync; tlbwe; isync; msync");
2667 tlb1_write_entry_sync(void *arg)
2669 /* Empty synchronization point for smp_rendezvous(). */
2673 * Write given entry to TLB1 hardware.
2676 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
2678 struct tlbwrite_args args;
2684 if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) {
2686 smp_rendezvous(tlb1_write_entry_sync,
2687 tlb1_write_entry_int,
2688 tlb1_write_entry_sync, &args);
2695 __asm __volatile("wrteei 0");
2696 tlb1_write_entry_int(&args);
2697 __asm __volatile("wrtee %0" :: "r"(msr));
2702 * Convert TLB TSIZE value to mapped region size.
2705 tsize2size(unsigned int tsize)
2710 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2713 return ((1 << (2 * tsize)) * 1024);
2717 * Convert region size (must be power of 4) to TLB TSIZE value.
2720 size2tsize(vm_size_t size)
2723 return (ilog2(size) / 2 - 5);
2727 * Register permanent kernel mapping in TLB1.
2729 * Entries are created starting from index 0 (current free entry is
2730 * kept in tlb1_idx) and are not supposed to be invalidated.
2733 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
2740 /* First try to update an existing entry. */
2741 for (index = 0; index < TLB1_ENTRIES; index++) {
2742 tlb1_read_entry(&e, index);
2743 /* Check if we're just updating the flags, and update them. */
2744 if (e.phys == pa && e.virt == va && e.size == size) {
2745 e.mas2 = (va & MAS2_EPN_MASK) | flags;
2746 tlb1_write_entry(&e, index);
2751 /* Convert size to TSIZE */
2752 tsize = size2tsize(size);
2754 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2755 /* XXX TS is hard coded to 0 for now as we only use single address space */
2756 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
2761 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2762 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2763 e.mas2 = (va & MAS2_EPN_MASK) | flags;
2765 /* Set supervisor RWX permission bits */
2766 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2767 e.mas7 = (pa >> 32) & MAS7_RPN;
2769 tlb1_write_entry(&e, -1);
2775 * Map in contiguous RAM region into the TLB1.
2778 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size, int wimge)
2781 vm_size_t mapped, sz, ssize;
2788 sz = 1UL << (ilog2(size) & ~1);
2789 /* Align size to PA */
2793 } while (pa % sz != 0);
2795 /* Now align from there to VA */
2799 } while (va % sz != 0);
2801 #ifdef __powerpc64__
2803 * Clamp TLB1 entries to 4G.
2805 * While the e6500 supports up to 1TB mappings, the e5500
2806 * only supports up to 4G mappings. (0b1011)
2808 * If any e6500 machines capable of supporting a very
2809 * large amount of memory appear in the future, we can
2812 * For now, though, since we have plenty of space in TLB1,
2813 * always avoid creating entries larger than 4GB.
2815 sz = MIN(sz, 1UL << 32);
2818 printf("Wiring VA=%p to PA=%jx (size=%lx)\n",
2819 (void *)va, (uintmax_t)pa, (long)sz);
2820 if (tlb1_set_entry(va, pa, sz,
2821 _TLB_ENTRY_SHARED | wimge) < 0)
2828 mapped = (va - base);
2830 printf("mapped size 0x%"PRIxPTR" (wasted space 0x%"PRIxPTR")\n",
2831 mapped, mapped - ssize);
2837 * TLB1 initialization routine, to be called after the very first
2838 * assembler level setup done in locore.S.
2844 uint32_t mas0, mas1, mas3, mas7;
2849 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
2850 mtspr(SPR_MAS0, mas0);
2851 __asm __volatile("isync; tlbre");
2853 mas1 = mfspr(SPR_MAS1);
2854 mas2 = mfspr(SPR_MAS2);
2855 mas3 = mfspr(SPR_MAS3);
2856 mas7 = mfspr(SPR_MAS7);
2858 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
2861 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2862 kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
2863 kernstart = trunc_page(mas2);
2865 /* Setup TLB miss defaults */
2866 set_mas4_defaults();
2870 * pmap_early_io_unmap() should be used in short conjunction with
2871 * pmap_early_io_map(), as in the following snippet:
2873 * x = pmap_early_io_map(...);
2874 * <do something with x>
2875 * pmap_early_io_unmap(x, size);
2877 * And avoiding more allocations between.
2880 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
2886 size = roundup(size, PAGE_SIZE);
2888 for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
2889 tlb1_read_entry(&e, i);
2890 if (!(e.mas1 & MAS1_VALID))
2892 if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
2894 e.mas1 &= ~MAS1_VALID;
2895 tlb1_write_entry(&e, i);
2898 if (tlb1_map_base == va + isize)
2899 tlb1_map_base -= isize;
2903 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
2910 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
2912 for (i = 0; i < TLB1_ENTRIES; i++) {
2913 tlb1_read_entry(&e, i);
2914 if (!(e.mas1 & MAS1_VALID))
2916 if (pa >= e.phys && (pa + size) <=
2918 return (e.virt + (pa - e.phys));
2921 pa_base = rounddown(pa, PAGE_SIZE);
2922 size = roundup(size + (pa - pa_base), PAGE_SIZE);
2923 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
2924 va = tlb1_map_base + (pa - pa_base);
2927 sz = 1 << (ilog2(size) & ~1);
2928 tlb1_set_entry(tlb1_map_base, pa_base, sz,
2929 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
2932 tlb1_map_base += sz;
2939 pmap_track_page(pmap_t pmap, vm_offset_t va)
2943 struct pv_entry *pve;
2945 va = trunc_page(va);
2946 pa = pmap_kextract(va);
2947 page = PHYS_TO_VM_PAGE(pa);
2949 rw_wlock(&pvh_global_lock);
2952 TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
2953 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
2957 page->md.pv_tracked = true;
2958 pv_insert(pmap, va, page);
2961 rw_wunlock(&pvh_global_lock);
2966 * Setup MAS4 defaults.
2967 * These values are loaded to MAS0-2 on a TLB miss.
2970 set_mas4_defaults(void)
2974 /* Defaults: TLB0, PID0, TSIZED=4K */
2975 mas4 = MAS4_TLBSELD0;
2976 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
2980 mtspr(SPR_MAS4, mas4);
2981 __asm __volatile("isync");
2986 * Return 0 if the physical IO range is encompassed by one of the
2987 * the TLB1 entries, otherwise return related error code.
2990 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
2993 vm_paddr_t pa_start;
2995 unsigned int entry_tsize;
2996 vm_size_t entry_size;
2999 *va = (vm_offset_t)NULL;
3001 tlb1_read_entry(&e, i);
3002 /* Skip invalid entries */
3003 if (!(e.mas1 & MAS1_VALID))
3007 * The entry must be cache-inhibited, guarded, and r/w
3008 * so it can function as an i/o page
3010 prot = e.mas2 & (MAS2_I | MAS2_G);
3011 if (prot != (MAS2_I | MAS2_G))
3014 prot = e.mas3 & (MAS3_SR | MAS3_SW);
3015 if (prot != (MAS3_SR | MAS3_SW))
3018 /* The address should be within the entry range. */
3019 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3020 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3022 entry_size = tsize2size(entry_tsize);
3023 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) |
3024 (e.mas3 & MAS3_RPN);
3025 pa_end = pa_start + entry_size;
3027 if ((pa < pa_start) || ((pa + size) > pa_end))
3030 /* Return virtual address of this mapping. */
3031 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
3036 /* Print out contents of the MAS registers for each TLB0 entry */
3038 #ifdef __powerpc64__
3039 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
3041 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
3052 if (mas1 & MAS1_VALID)
3057 if (mas1 & MAS1_IPROT)
3062 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
3063 tid = MAS1_GETTID(mas1);
3065 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3068 size = tsize2size(tsize);
3070 printf("%3d: (%s) [AS=%d] "
3071 "sz = 0x%jx tsz = %d tid = %d mas1 = 0x%08x "
3072 "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
3073 i, desc, as, (uintmax_t)size, tsize, tid, mas1, mas2, mas3, mas7);
3076 DB_SHOW_COMMAND(tlb0, tlb0_print_tlbentries)
3078 uint32_t mas0, mas1, mas3, mas7;
3079 #ifdef __powerpc64__
3084 int entryidx, way, idx;
3086 printf("TLB0 entries:\n");
3087 for (way = 0; way < TLB0_WAYS; way ++)
3088 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
3090 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
3091 mtspr(SPR_MAS0, mas0);
3093 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
3094 mtspr(SPR_MAS2, mas2);
3096 __asm __volatile("isync; tlbre");
3098 mas1 = mfspr(SPR_MAS1);
3099 mas2 = mfspr(SPR_MAS2);
3100 mas3 = mfspr(SPR_MAS3);
3101 mas7 = mfspr(SPR_MAS7);
3103 idx = tlb0_tableidx(mas2, way);
3104 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3109 * Print out contents of the MAS registers for each TLB1 entry
3111 DB_SHOW_COMMAND(tlb1, tlb1_print_tlbentries)
3113 uint32_t mas0, mas1, mas3, mas7;
3114 #ifdef __powerpc64__
3121 printf("TLB1 entries:\n");
3122 for (i = 0; i < TLB1_ENTRIES; i++) {
3124 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3125 mtspr(SPR_MAS0, mas0);
3127 __asm __volatile("isync; tlbre");
3129 mas1 = mfspr(SPR_MAS1);
3130 mas2 = mfspr(SPR_MAS2);
3131 mas3 = mfspr(SPR_MAS3);
3132 mas7 = mfspr(SPR_MAS7);
3134 tlb_print_entry(i, mas1, mas2, mas3, mas7);