2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * Some hw specific parts of this pmap were derived or influenced
27 * by NetBSD's ibm4xx pmap module. More generic code is shared with
28 * a few other pmap modules from the FreeBSD tree.
34 * Kernel and user threads run within one common virtual address space
37 * Virtual address space layout:
38 * -----------------------------
39 * 0x0000_0000 - 0xafff_ffff : user process
40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
43 * 0xc100_0000 - 0xfeef_ffff : KVA
44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region
51 #include <sys/cdefs.h>
52 __FBSDID("$FreeBSD$");
54 #include "opt_kstack_pages.h"
56 #include <sys/param.h>
58 #include <sys/malloc.h>
62 #include <sys/queue.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/kerneldump.h>
66 #include <sys/linker.h>
67 #include <sys/msgbuf.h>
69 #include <sys/mutex.h>
70 #include <sys/rwlock.h>
71 #include <sys/sched.h>
73 #include <sys/vmmeter.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_pager.h>
86 #include <machine/cpu.h>
87 #include <machine/pcb.h>
88 #include <machine/platform.h>
90 #include <machine/tlb.h>
91 #include <machine/spr.h>
92 #include <machine/md_var.h>
93 #include <machine/mmuvar.h>
94 #include <machine/pmap.h>
95 #include <machine/pte.h>
100 #define debugf(fmt, args...) printf(fmt, ##args)
102 #define debugf(fmt, args...)
105 #define TODO panic("%s: not implemented", __func__);
107 extern unsigned char _etext[];
108 extern unsigned char _end[];
110 extern uint32_t *bootinfo;
113 extern uint32_t bp_ntlb1s;
117 vm_offset_t kernstart;
120 /* Message buffer and tables. */
121 static vm_offset_t data_start;
122 static vm_size_t data_end;
124 /* Phys/avail memory regions. */
125 static struct mem_region *availmem_regions;
126 static int availmem_regions_sz;
127 static struct mem_region *physmem_regions;
128 static int physmem_regions_sz;
130 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
131 static vm_offset_t zero_page_va;
132 static struct mtx zero_page_mutex;
134 static struct mtx tlbivax_mutex;
137 * Reserved KVA space for mmu_booke_zero_page_idle. This is used
138 * by idle thred only, no lock required.
140 static vm_offset_t zero_page_idle_va;
142 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
143 static vm_offset_t copy_page_src_va;
144 static vm_offset_t copy_page_dst_va;
145 static struct mtx copy_page_mutex;
147 /**************************************************************************/
149 /**************************************************************************/
151 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
152 vm_prot_t, u_int flags, int8_t psind);
154 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
155 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
158 * If user pmap is processed with mmu_booke_remove and the resident count
159 * drops to 0, there are no more pages to remove, so we need not continue.
161 #define PMAP_REMOVE_DONE(pmap) \
162 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
164 extern void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way);
165 extern int elf32_nxstack;
167 /**************************************************************************/
168 /* TLB and TID handling */
169 /**************************************************************************/
171 /* Translation ID busy table */
172 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
175 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
176 * core revisions and should be read from h/w registers during early config.
178 uint32_t tlb0_entries;
180 uint32_t tlb0_entries_per_way;
182 #define TLB0_ENTRIES (tlb0_entries)
183 #define TLB0_WAYS (tlb0_ways)
184 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
186 #define TLB1_ENTRIES 16
188 /* In-ram copy of the TLB1 */
189 static tlb_entry_t tlb1[TLB1_ENTRIES];
191 /* Next free entry in the TLB1 */
192 static unsigned int tlb1_idx;
193 static vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS;
195 static tlbtid_t tid_alloc(struct pmap *);
197 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
199 static int tlb1_set_entry(vm_offset_t, vm_paddr_t, vm_size_t, uint32_t);
200 static void tlb1_write_entry(unsigned int);
201 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
202 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
204 static vm_size_t tsize2size(unsigned int);
205 static unsigned int size2tsize(vm_size_t);
206 static unsigned int ilog2(unsigned int);
208 static void set_mas4_defaults(void);
210 static inline void tlb0_flush_entry(vm_offset_t);
211 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
213 /**************************************************************************/
214 /* Page table management */
215 /**************************************************************************/
217 static struct rwlock_padalign pvh_global_lock;
219 /* Data for the pv entry allocation mechanism */
220 static uma_zone_t pvzone;
221 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
223 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
225 #ifndef PMAP_SHPGPERPROC
226 #define PMAP_SHPGPERPROC 200
229 static void ptbl_init(void);
230 static struct ptbl_buf *ptbl_buf_alloc(void);
231 static void ptbl_buf_free(struct ptbl_buf *);
232 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
234 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
235 static void ptbl_free(mmu_t, pmap_t, unsigned int);
236 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
237 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
239 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
240 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
241 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
242 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
244 static pv_entry_t pv_alloc(void);
245 static void pv_free(pv_entry_t);
246 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
247 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
249 static void booke_pmap_init_qpages(void);
251 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
252 #define PTBL_BUFS (128 * 16)
255 TAILQ_ENTRY(ptbl_buf) link; /* list link */
256 vm_offset_t kva; /* va of mapping */
259 /* ptbl free list and a lock used for access synchronization. */
260 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
261 static struct mtx ptbl_buf_freelist_lock;
263 /* Base address of kva space allocated fot ptbl bufs. */
264 static vm_offset_t ptbl_buf_pool_vabase;
266 /* Pointer to ptbl_buf structures. */
267 static struct ptbl_buf *ptbl_bufs;
270 void pmap_bootstrap_ap(volatile uint32_t *);
274 * Kernel MMU interface
276 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
277 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
278 vm_size_t, vm_offset_t);
279 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
280 static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
281 vm_offset_t, vm_page_t *, vm_offset_t, int);
282 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
283 vm_prot_t, u_int flags, int8_t psind);
284 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
285 vm_page_t, vm_prot_t);
286 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
288 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
289 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
291 static void mmu_booke_init(mmu_t);
292 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
293 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
294 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
295 static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
296 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
298 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
300 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
301 vm_object_t, vm_pindex_t, vm_size_t);
302 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
303 static void mmu_booke_page_init(mmu_t, vm_page_t);
304 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
305 static void mmu_booke_pinit(mmu_t, pmap_t);
306 static void mmu_booke_pinit0(mmu_t, pmap_t);
307 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
309 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
310 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
311 static void mmu_booke_release(mmu_t, pmap_t);
312 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
313 static void mmu_booke_remove_all(mmu_t, vm_page_t);
314 static void mmu_booke_remove_write(mmu_t, vm_page_t);
315 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
316 static void mmu_booke_zero_page(mmu_t, vm_page_t);
317 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
318 static void mmu_booke_zero_page_idle(mmu_t, vm_page_t);
319 static void mmu_booke_activate(mmu_t, struct thread *);
320 static void mmu_booke_deactivate(mmu_t, struct thread *);
321 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
322 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
323 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
324 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
325 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
326 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
327 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
328 static void mmu_booke_kremove(mmu_t, vm_offset_t);
329 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
330 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
332 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
334 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
336 static void mmu_booke_scan_init(mmu_t);
337 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
338 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
340 static mmu_method_t mmu_booke_methods[] = {
341 /* pmap dispatcher interface */
342 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
343 MMUMETHOD(mmu_copy, mmu_booke_copy),
344 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
345 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
346 MMUMETHOD(mmu_enter, mmu_booke_enter),
347 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
348 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
349 MMUMETHOD(mmu_extract, mmu_booke_extract),
350 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
351 MMUMETHOD(mmu_init, mmu_booke_init),
352 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
353 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
354 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
355 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
356 MMUMETHOD(mmu_map, mmu_booke_map),
357 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
358 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
359 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
360 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
361 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
362 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
363 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
364 MMUMETHOD(mmu_protect, mmu_booke_protect),
365 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
366 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
367 MMUMETHOD(mmu_release, mmu_booke_release),
368 MMUMETHOD(mmu_remove, mmu_booke_remove),
369 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
370 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
371 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
372 MMUMETHOD(mmu_unwire, mmu_booke_unwire),
373 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
374 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
375 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
376 MMUMETHOD(mmu_activate, mmu_booke_activate),
377 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
378 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
379 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
381 /* Internal interfaces */
382 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
383 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
384 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
385 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
386 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
387 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
388 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
389 /* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
390 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
392 /* dumpsys() support */
393 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
394 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
395 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
400 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
402 static __inline uint32_t
403 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
408 if (ma != VM_MEMATTR_DEFAULT) {
410 case VM_MEMATTR_UNCACHEABLE:
411 return (PTE_I | PTE_G);
412 case VM_MEMATTR_WRITE_COMBINING:
413 case VM_MEMATTR_WRITE_BACK:
414 case VM_MEMATTR_PREFETCHABLE:
416 case VM_MEMATTR_WRITE_THROUGH:
417 return (PTE_W | PTE_M);
422 * Assume the page is cache inhibited and access is guarded unless
423 * it's in our available memory array.
425 attrib = _TLB_ENTRY_IO;
426 for (i = 0; i < physmem_regions_sz; i++) {
427 if ((pa >= physmem_regions[i].mr_start) &&
428 (pa < (physmem_regions[i].mr_start +
429 physmem_regions[i].mr_size))) {
430 attrib = _TLB_ENTRY_MEM;
447 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
450 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
451 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
453 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
454 ("tlb_miss_lock: tried to lock self"));
456 tlb_lock(pc->pc_booke_tlb_lock);
458 CTR1(KTR_PMAP, "%s: locked", __func__);
465 tlb_miss_unlock(void)
473 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
475 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
476 __func__, pc->pc_cpuid);
478 tlb_unlock(pc->pc_booke_tlb_lock);
480 CTR1(KTR_PMAP, "%s: unlocked", __func__);
486 /* Return number of entries in TLB0. */
488 tlb0_get_tlbconf(void)
492 tlb0_cfg = mfspr(SPR_TLB0CFG);
493 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
494 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
495 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
498 /* Initialize pool of kva ptbl buffers. */
504 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
505 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
506 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
507 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
509 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
510 TAILQ_INIT(&ptbl_buf_freelist);
512 for (i = 0; i < PTBL_BUFS; i++) {
513 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
514 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
518 /* Get a ptbl_buf from the freelist. */
519 static struct ptbl_buf *
522 struct ptbl_buf *buf;
524 mtx_lock(&ptbl_buf_freelist_lock);
525 buf = TAILQ_FIRST(&ptbl_buf_freelist);
527 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
528 mtx_unlock(&ptbl_buf_freelist_lock);
530 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
535 /* Return ptbl buff to free pool. */
537 ptbl_buf_free(struct ptbl_buf *buf)
540 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
542 mtx_lock(&ptbl_buf_freelist_lock);
543 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
544 mtx_unlock(&ptbl_buf_freelist_lock);
548 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
551 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
553 struct ptbl_buf *pbuf;
555 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
557 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
559 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
560 if (pbuf->kva == (vm_offset_t)ptbl) {
561 /* Remove from pmap ptbl buf list. */
562 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
564 /* Free corresponding ptbl buf. */
570 /* Allocate page table. */
572 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
574 vm_page_t mtbl[PTBL_PAGES];
576 struct ptbl_buf *pbuf;
581 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
582 (pmap == kernel_pmap), pdir_idx);
584 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
585 ("ptbl_alloc: invalid pdir_idx"));
586 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
587 ("pte_alloc: valid ptbl entry exists!"));
589 pbuf = ptbl_buf_alloc();
591 panic("pte_alloc: couldn't alloc kernel virtual memory");
593 ptbl = (pte_t *)pbuf->kva;
595 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
597 /* Allocate ptbl pages, this will sleep! */
598 for (i = 0; i < PTBL_PAGES; i++) {
599 pidx = (PTBL_PAGES * pdir_idx) + i;
600 while ((m = vm_page_alloc(NULL, pidx,
601 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
603 rw_wunlock(&pvh_global_lock);
605 ptbl_free_pmap_ptbl(pmap, ptbl);
606 for (j = 0; j < i; j++)
607 vm_page_free(mtbl[j]);
608 atomic_subtract_int(&vm_cnt.v_wire_count, i);
612 rw_wlock(&pvh_global_lock);
618 /* Map allocated pages into kernel_pmap. */
619 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
621 /* Zero whole ptbl. */
622 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
624 /* Add pbuf to the pmap ptbl bufs list. */
625 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
630 /* Free ptbl pages and invalidate pdir entry. */
632 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
640 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
641 (pmap == kernel_pmap), pdir_idx);
643 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
644 ("ptbl_free: invalid pdir_idx"));
646 ptbl = pmap->pm_pdir[pdir_idx];
648 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
650 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
653 * Invalidate the pdir entry as soon as possible, so that other CPUs
654 * don't attempt to look up the page tables we are releasing.
656 mtx_lock_spin(&tlbivax_mutex);
659 pmap->pm_pdir[pdir_idx] = NULL;
662 mtx_unlock_spin(&tlbivax_mutex);
664 for (i = 0; i < PTBL_PAGES; i++) {
665 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
666 pa = pte_vatopa(mmu, kernel_pmap, va);
667 m = PHYS_TO_VM_PAGE(pa);
668 vm_page_free_zero(m);
669 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
670 mmu_booke_kremove(mmu, va);
673 ptbl_free_pmap_ptbl(pmap, ptbl);
677 * Decrement ptbl pages hold count and attempt to free ptbl pages.
678 * Called when removing pte entry from ptbl.
680 * Return 1 if ptbl pages were freed.
683 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
690 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
691 (pmap == kernel_pmap), pdir_idx);
693 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
694 ("ptbl_unhold: invalid pdir_idx"));
695 KASSERT((pmap != kernel_pmap),
696 ("ptbl_unhold: unholding kernel ptbl!"));
698 ptbl = pmap->pm_pdir[pdir_idx];
700 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
701 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
702 ("ptbl_unhold: non kva ptbl"));
704 /* decrement hold count */
705 for (i = 0; i < PTBL_PAGES; i++) {
706 pa = pte_vatopa(mmu, kernel_pmap,
707 (vm_offset_t)ptbl + (i * PAGE_SIZE));
708 m = PHYS_TO_VM_PAGE(pa);
713 * Free ptbl pages if there are no pte etries in this ptbl.
714 * wire_count has the same value for all ptbl pages, so check the last
717 if (m->wire_count == 0) {
718 ptbl_free(mmu, pmap, pdir_idx);
720 //debugf("ptbl_unhold: e (freed ptbl)\n");
728 * Increment hold count for ptbl pages. This routine is used when a new pte
729 * entry is being inserted into the ptbl.
732 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
739 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
742 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
743 ("ptbl_hold: invalid pdir_idx"));
744 KASSERT((pmap != kernel_pmap),
745 ("ptbl_hold: holding kernel ptbl!"));
747 ptbl = pmap->pm_pdir[pdir_idx];
749 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
751 for (i = 0; i < PTBL_PAGES; i++) {
752 pa = pte_vatopa(mmu, kernel_pmap,
753 (vm_offset_t)ptbl + (i * PAGE_SIZE));
754 m = PHYS_TO_VM_PAGE(pa);
759 /* Allocate pv_entry structure. */
766 if (pv_entry_count > pv_entry_high_water)
768 pv = uma_zalloc(pvzone, M_NOWAIT);
773 /* Free pv_entry structure. */
775 pv_free(pv_entry_t pve)
779 uma_zfree(pvzone, pve);
783 /* Allocate and initialize pv_entry structure. */
785 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
789 //int su = (pmap == kernel_pmap);
790 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
791 // (u_int32_t)pmap, va, (u_int32_t)m);
795 panic("pv_insert: no pv entries!");
801 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
802 rw_assert(&pvh_global_lock, RA_WLOCKED);
804 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
806 //debugf("pv_insert: e\n");
809 /* Destroy pv entry. */
811 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
815 //int su = (pmap == kernel_pmap);
816 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
818 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
819 rw_assert(&pvh_global_lock, RA_WLOCKED);
822 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
823 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
824 /* remove from pv_list */
825 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
826 if (TAILQ_EMPTY(&m->md.pv_list))
827 vm_page_aflag_clear(m, PGA_WRITEABLE);
829 /* free pv entry struct */
835 //debugf("pv_remove: e\n");
839 * Clean pte entry, try to free page table page if requested.
841 * Return 1 if ptbl pages were freed, otherwise return 0.
844 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
846 unsigned int pdir_idx = PDIR_IDX(va);
847 unsigned int ptbl_idx = PTBL_IDX(va);
852 //int su = (pmap == kernel_pmap);
853 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
854 // su, (u_int32_t)pmap, va, flags);
856 ptbl = pmap->pm_pdir[pdir_idx];
857 KASSERT(ptbl, ("pte_remove: null ptbl"));
859 pte = &ptbl[ptbl_idx];
861 if (pte == NULL || !PTE_ISVALID(pte))
864 if (PTE_ISWIRED(pte))
865 pmap->pm_stats.wired_count--;
867 /* Handle managed entry. */
868 if (PTE_ISMANAGED(pte)) {
869 /* Get vm_page_t for mapped pte. */
870 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
872 if (PTE_ISMODIFIED(pte))
875 if (PTE_ISREFERENCED(pte))
876 vm_page_aflag_set(m, PGA_REFERENCED);
878 pv_remove(pmap, va, m);
881 mtx_lock_spin(&tlbivax_mutex);
884 tlb0_flush_entry(va);
889 mtx_unlock_spin(&tlbivax_mutex);
891 pmap->pm_stats.resident_count--;
893 if (flags & PTBL_UNHOLD) {
894 //debugf("pte_remove: e (unhold)\n");
895 return (ptbl_unhold(mmu, pmap, pdir_idx));
898 //debugf("pte_remove: e\n");
903 * Insert PTE for a given page and virtual address.
906 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
909 unsigned int pdir_idx = PDIR_IDX(va);
910 unsigned int ptbl_idx = PTBL_IDX(va);
913 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
914 pmap == kernel_pmap, pmap, va);
916 /* Get the page table pointer. */
917 ptbl = pmap->pm_pdir[pdir_idx];
920 /* Allocate page table pages. */
921 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
923 KASSERT(nosleep, ("nosleep and NULL ptbl"));
928 * Check if there is valid mapping for requested
929 * va, if there is, remove it.
931 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
932 if (PTE_ISVALID(pte)) {
933 pte_remove(mmu, pmap, va, PTBL_HOLD);
936 * pte is not used, increment hold count
939 if (pmap != kernel_pmap)
940 ptbl_hold(mmu, pmap, pdir_idx);
945 * Insert pv_entry into pv_list for mapped page if part of managed
948 if ((m->oflags & VPO_UNMANAGED) == 0) {
949 flags |= PTE_MANAGED;
951 /* Create and insert pv entry. */
952 pv_insert(pmap, va, m);
955 pmap->pm_stats.resident_count++;
957 mtx_lock_spin(&tlbivax_mutex);
960 tlb0_flush_entry(va);
961 if (pmap->pm_pdir[pdir_idx] == NULL) {
963 * If we just allocated a new page table, hook it in
966 pmap->pm_pdir[pdir_idx] = ptbl;
968 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
969 pte->rpn = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
970 pte->flags |= (PTE_VALID | flags);
973 mtx_unlock_spin(&tlbivax_mutex);
977 /* Return the pa for the given pmap/va. */
979 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
984 pte = pte_find(mmu, pmap, va);
985 if ((pte != NULL) && PTE_ISVALID(pte))
986 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
990 /* Get a pointer to a PTE in a page table. */
992 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
994 unsigned int pdir_idx = PDIR_IDX(va);
995 unsigned int ptbl_idx = PTBL_IDX(va);
997 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
999 if (pmap->pm_pdir[pdir_idx])
1000 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
1005 /**************************************************************************/
1007 /**************************************************************************/
1010 * This is called during booke_init, before the system is really initialized.
1013 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
1015 vm_offset_t phys_kernelend;
1016 struct mem_region *mp, *mp1;
1019 u_int phys_avail_count;
1020 vm_size_t physsz, hwphyssz, kstack0_sz;
1021 vm_offset_t kernel_pdir, kstack0, va;
1022 vm_paddr_t kstack0_phys;
1026 debugf("mmu_booke_bootstrap: entered\n");
1028 /* Set interesting system properties */
1032 /* Initialize invalidation mutex */
1033 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1035 /* Read TLB0 size and associativity. */
1039 * Align kernel start and end address (kernel image).
1040 * Note that kernel end does not necessarily relate to kernsize.
1041 * kernsize is the size of the kernel that is actually mapped.
1043 kernstart = trunc_page(start);
1044 data_start = round_page(kernelend);
1045 data_end = data_start;
1048 * Addresses of preloaded modules (like file systems) use
1049 * physical addresses. Make sure we relocate those into
1050 * virtual addresses.
1052 preload_addr_relocate = kernstart - kernload;
1054 /* Allocate the dynamic per-cpu area. */
1055 dpcpu = (void *)data_end;
1056 data_end += DPCPU_SIZE;
1058 /* Allocate space for the message buffer. */
1059 msgbufp = (struct msgbuf *)data_end;
1060 data_end += msgbufsize;
1061 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
1064 data_end = round_page(data_end);
1066 /* Allocate space for ptbl_bufs. */
1067 ptbl_bufs = (struct ptbl_buf *)data_end;
1068 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1069 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
1072 data_end = round_page(data_end);
1074 /* Allocate PTE tables for kernel KVA. */
1075 kernel_pdir = data_end;
1076 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
1077 PDIR_SIZE - 1) / PDIR_SIZE;
1078 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1079 debugf(" kernel ptbls: %d\n", kernel_ptbls);
1080 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
1082 debugf(" data_end: 0x%08x\n", data_end);
1083 if (data_end - kernstart > kernsize) {
1084 kernsize += tlb1_mapin_region(kernstart + kernsize,
1085 kernload + kernsize, (data_end - kernstart) - kernsize);
1087 data_end = kernstart + kernsize;
1088 debugf(" updated data_end: 0x%08x\n", data_end);
1091 * Clear the structures - note we can only do it safely after the
1092 * possible additional TLB1 translations are in place (above) so that
1093 * all range up to the currently calculated 'data_end' is covered.
1095 dpcpu_init(dpcpu, 0);
1096 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1097 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1099 /*******************************************************/
1100 /* Set the start and end of kva. */
1101 /*******************************************************/
1102 virtual_avail = round_page(data_end);
1103 virtual_end = VM_MAX_KERNEL_ADDRESS;
1105 /* Allocate KVA space for page zero/copy operations. */
1106 zero_page_va = virtual_avail;
1107 virtual_avail += PAGE_SIZE;
1108 zero_page_idle_va = virtual_avail;
1109 virtual_avail += PAGE_SIZE;
1110 copy_page_src_va = virtual_avail;
1111 virtual_avail += PAGE_SIZE;
1112 copy_page_dst_va = virtual_avail;
1113 virtual_avail += PAGE_SIZE;
1114 debugf("zero_page_va = 0x%08x\n", zero_page_va);
1115 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
1116 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1117 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1119 /* Initialize page zero/copy mutexes. */
1120 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1121 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1123 /* Allocate KVA space for ptbl bufs. */
1124 ptbl_buf_pool_vabase = virtual_avail;
1125 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1126 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1127 ptbl_buf_pool_vabase, virtual_avail);
1129 /* Calculate corresponding physical addresses for the kernel region. */
1130 phys_kernelend = kernload + kernsize;
1131 debugf("kernel image and allocated data:\n");
1132 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
1133 debugf(" kernstart = 0x%08x\n", kernstart);
1134 debugf(" kernsize = 0x%08x\n", kernsize);
1136 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1137 panic("mmu_booke_bootstrap: phys_avail too small");
1140 * Remove kernel physical address range from avail regions list. Page
1141 * align all regions. Non-page aligned memory isn't very interesting
1142 * to us. Also, sort the entries for ascending addresses.
1145 /* Retrieve phys/avail mem regions */
1146 mem_regions(&physmem_regions, &physmem_regions_sz,
1147 &availmem_regions, &availmem_regions_sz);
1149 cnt = availmem_regions_sz;
1150 debugf("processing avail regions:\n");
1151 for (mp = availmem_regions; mp->mr_size; mp++) {
1153 e = mp->mr_start + mp->mr_size;
1154 debugf(" %08x-%08x -> ", s, e);
1155 /* Check whether this region holds all of the kernel. */
1156 if (s < kernload && e > phys_kernelend) {
1157 availmem_regions[cnt].mr_start = phys_kernelend;
1158 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1161 /* Look whether this regions starts within the kernel. */
1162 if (s >= kernload && s < phys_kernelend) {
1163 if (e <= phys_kernelend)
1167 /* Now look whether this region ends within the kernel. */
1168 if (e > kernload && e <= phys_kernelend) {
1173 /* Now page align the start and size of the region. */
1179 debugf("%08x-%08x = %x\n", s, e, sz);
1181 /* Check whether some memory is left here. */
1185 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1191 /* Do an insertion sort. */
1192 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1193 if (s < mp1->mr_start)
1196 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1204 availmem_regions_sz = cnt;
1206 /*******************************************************/
1207 /* Steal physical memory for kernel stack from the end */
1208 /* of the first avail region */
1209 /*******************************************************/
1210 kstack0_sz = kstack_pages * PAGE_SIZE;
1211 kstack0_phys = availmem_regions[0].mr_start +
1212 availmem_regions[0].mr_size;
1213 kstack0_phys -= kstack0_sz;
1214 availmem_regions[0].mr_size -= kstack0_sz;
1216 /*******************************************************/
1217 /* Fill in phys_avail table, based on availmem_regions */
1218 /*******************************************************/
1219 phys_avail_count = 0;
1222 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1224 debugf("fill in phys_avail:\n");
1225 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1227 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1228 availmem_regions[i].mr_start,
1229 availmem_regions[i].mr_start +
1230 availmem_regions[i].mr_size,
1231 availmem_regions[i].mr_size);
1233 if (hwphyssz != 0 &&
1234 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1235 debugf(" hw.physmem adjust\n");
1236 if (physsz < hwphyssz) {
1237 phys_avail[j] = availmem_regions[i].mr_start;
1239 availmem_regions[i].mr_start +
1247 phys_avail[j] = availmem_regions[i].mr_start;
1248 phys_avail[j + 1] = availmem_regions[i].mr_start +
1249 availmem_regions[i].mr_size;
1251 physsz += availmem_regions[i].mr_size;
1253 physmem = btoc(physsz);
1255 /* Calculate the last available physical address. */
1256 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1258 Maxmem = powerpc_btop(phys_avail[i + 1]);
1260 debugf("Maxmem = 0x%08lx\n", Maxmem);
1261 debugf("phys_avail_count = %d\n", phys_avail_count);
1262 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1265 /*******************************************************/
1266 /* Initialize (statically allocated) kernel pmap. */
1267 /*******************************************************/
1268 PMAP_LOCK_INIT(kernel_pmap);
1269 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1271 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1272 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1273 debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1274 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1276 /* Initialize kernel pdir */
1277 for (i = 0; i < kernel_ptbls; i++)
1278 kernel_pmap->pm_pdir[kptbl_min + i] =
1279 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1281 for (i = 0; i < MAXCPU; i++) {
1282 kernel_pmap->pm_tid[i] = TID_KERNEL;
1284 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1285 tidbusy[i][TID_KERNEL] = kernel_pmap;
1289 * Fill in PTEs covering kernel code and data. They are not required
1290 * for address translation, as this area is covered by static TLB1
1291 * entries, but for pte_vatopa() to work correctly with kernel area
1294 for (va = kernstart; va < data_end; va += PAGE_SIZE) {
1295 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1296 pte->rpn = kernload + (va - kernstart);
1297 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1300 /* Mark kernel_pmap active on all CPUs */
1301 CPU_FILL(&kernel_pmap->pm_active);
1304 * Initialize the global pv list lock.
1306 rw_init(&pvh_global_lock, "pmap pv global");
1308 /*******************************************************/
1310 /*******************************************************/
1312 /* Enter kstack0 into kernel map, provide guard page */
1313 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1314 thread0.td_kstack = kstack0;
1315 thread0.td_kstack_pages = kstack_pages;
1317 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1318 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
1319 kstack0_phys, kstack0_phys + kstack0_sz);
1320 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1322 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1323 for (i = 0; i < kstack_pages; i++) {
1324 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1325 kstack0 += PAGE_SIZE;
1326 kstack0_phys += PAGE_SIZE;
1329 pmap_bootstrapped = 1;
1331 debugf("virtual_avail = %08x\n", virtual_avail);
1332 debugf("virtual_end = %08x\n", virtual_end);
1334 debugf("mmu_booke_bootstrap: exit\n");
1339 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
1344 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
1345 * have the snapshot of its contents in the s/w tlb1[] table, so use
1346 * these values directly to (re)program AP's TLB1 hardware.
1348 for (i = bp_ntlb1s; i < tlb1_idx; i++) {
1349 /* Skip invalid entries */
1350 if (!(tlb1[i].mas1 & MAS1_VALID))
1353 tlb1_write_entry(i);
1356 set_mas4_defaults();
1361 booke_pmap_init_qpages(void)
1368 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
1369 if (pc->pc_qmap_addr == 0)
1370 panic("pmap_init_qpages: unable to allocate KVA");
1374 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
1377 * Get the physical page address for the given pmap/virtual address.
1380 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1385 pa = pte_vatopa(mmu, pmap, va);
1392 * Extract the physical page address associated with the given
1393 * kernel virtual address.
1396 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1400 /* Check TLB1 mappings */
1401 for (i = 0; i < tlb1_idx; i++) {
1402 if (!(tlb1[i].mas1 & MAS1_VALID))
1404 if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size)
1405 return (tlb1[i].phys + (va - tlb1[i].virt));
1408 return (pte_vatopa(mmu, kernel_pmap, va));
1412 * Initialize the pmap module.
1413 * Called by vm_init, to initialize any structures that the pmap
1414 * system needs to map virtual memory.
1417 mmu_booke_init(mmu_t mmu)
1419 int shpgperproc = PMAP_SHPGPERPROC;
1422 * Initialize the address space (zone) for the pv entries. Set a
1423 * high water mark so that the system can recover from excessive
1424 * numbers of pv entries.
1426 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1427 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1429 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1430 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
1432 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1433 pv_entry_high_water = 9 * (pv_entry_max / 10);
1435 uma_zone_reserve_kva(pvzone, pv_entry_max);
1437 /* Pre-fill pvzone with initial number of pv entries. */
1438 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1440 /* Initialize ptbl allocation. */
1445 * Map a list of wired pages into kernel virtual address space. This is
1446 * intended for temporary mappings which do not need page modification or
1447 * references recorded. Existing mappings in the region are overwritten.
1450 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1455 while (count-- > 0) {
1456 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1463 * Remove page mappings from kernel virtual address space. Intended for
1464 * temporary mappings entered by mmu_booke_qenter.
1467 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1472 while (count-- > 0) {
1473 mmu_booke_kremove(mmu, va);
1479 * Map a wired page into kernel virtual address space.
1482 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1485 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1489 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1491 unsigned int pdir_idx = PDIR_IDX(va);
1492 unsigned int ptbl_idx = PTBL_IDX(va);
1496 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1497 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1499 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
1500 flags |= tlb_calc_wimg(pa, ma);
1502 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1504 mtx_lock_spin(&tlbivax_mutex);
1507 if (PTE_ISVALID(pte)) {
1509 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1511 /* Flush entry from TLB0 */
1512 tlb0_flush_entry(va);
1515 pte->rpn = PTE_RPN_FROM_PA(pa);
1518 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1519 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1520 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1522 /* Flush the real memory from the instruction cache. */
1523 if ((flags & (PTE_I | PTE_G)) == 0) {
1524 __syncicache((void *)va, PAGE_SIZE);
1528 mtx_unlock_spin(&tlbivax_mutex);
1532 * Remove a page from kernel page table.
1535 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1537 unsigned int pdir_idx = PDIR_IDX(va);
1538 unsigned int ptbl_idx = PTBL_IDX(va);
1541 // CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1543 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1544 (va <= VM_MAX_KERNEL_ADDRESS)),
1545 ("mmu_booke_kremove: invalid va"));
1547 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1549 if (!PTE_ISVALID(pte)) {
1551 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1556 mtx_lock_spin(&tlbivax_mutex);
1559 /* Invalidate entry in TLB0, update PTE. */
1560 tlb0_flush_entry(va);
1565 mtx_unlock_spin(&tlbivax_mutex);
1569 * Initialize pmap associated with process 0.
1572 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1575 PMAP_LOCK_INIT(pmap);
1576 mmu_booke_pinit(mmu, pmap);
1577 PCPU_SET(curpmap, pmap);
1581 * Initialize a preallocated and zeroed pmap structure,
1582 * such as one in a vmspace structure.
1585 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1589 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1590 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1592 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1594 for (i = 0; i < MAXCPU; i++)
1595 pmap->pm_tid[i] = TID_NONE;
1596 CPU_ZERO(&kernel_pmap->pm_active);
1597 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1598 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1599 TAILQ_INIT(&pmap->pm_ptbl_list);
1603 * Release any resources held by the given physical map.
1604 * Called when a pmap initialized by mmu_booke_pinit is being released.
1605 * Should only be called if the map contains no valid mappings.
1608 mmu_booke_release(mmu_t mmu, pmap_t pmap)
1611 KASSERT(pmap->pm_stats.resident_count == 0,
1612 ("pmap_release: pmap resident count %ld != 0",
1613 pmap->pm_stats.resident_count));
1617 * Insert the given physical page at the specified virtual address in the
1618 * target physical map with the protection requested. If specified the page
1619 * will be wired down.
1622 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1623 vm_prot_t prot, u_int flags, int8_t psind)
1627 rw_wlock(&pvh_global_lock);
1629 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
1630 rw_wunlock(&pvh_global_lock);
1636 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1637 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
1642 int error, su, sync;
1644 pa = VM_PAGE_TO_PHYS(m);
1645 su = (pmap == kernel_pmap);
1648 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1649 // "pa=0x%08x prot=0x%08x flags=%#x)\n",
1650 // (u_int32_t)pmap, su, pmap->pm_tid,
1651 // (u_int32_t)m, va, pa, prot, flags);
1654 KASSERT(((va >= virtual_avail) &&
1655 (va <= VM_MAX_KERNEL_ADDRESS)),
1656 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1658 KASSERT((va <= VM_MAXUSER_ADDRESS),
1659 ("mmu_booke_enter_locked: user pmap, non user va"));
1661 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1662 VM_OBJECT_ASSERT_LOCKED(m->object);
1664 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1667 * If there is an existing mapping, and the physical address has not
1668 * changed, must be protection or wiring change.
1670 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1671 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1674 * Before actually updating pte->flags we calculate and
1675 * prepare its new value in a helper var.
1678 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1680 /* Wiring change, just update stats. */
1681 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
1682 if (!PTE_ISWIRED(pte)) {
1684 pmap->pm_stats.wired_count++;
1687 if (PTE_ISWIRED(pte)) {
1688 flags &= ~PTE_WIRED;
1689 pmap->pm_stats.wired_count--;
1693 if (prot & VM_PROT_WRITE) {
1694 /* Add write permissions. */
1699 if ((flags & PTE_MANAGED) != 0)
1700 vm_page_aflag_set(m, PGA_WRITEABLE);
1702 /* Handle modified pages, sense modify status. */
1705 * The PTE_MODIFIED flag could be set by underlying
1706 * TLB misses since we last read it (above), possibly
1707 * other CPUs could update it so we check in the PTE
1708 * directly rather than rely on that saved local flags
1711 if (PTE_ISMODIFIED(pte))
1715 if (prot & VM_PROT_EXECUTE) {
1721 * Check existing flags for execute permissions: if we
1722 * are turning execute permissions on, icache should
1725 if ((pte->flags & (PTE_UX | PTE_SX)) == 0)
1729 flags &= ~PTE_REFERENCED;
1732 * The new flags value is all calculated -- only now actually
1735 mtx_lock_spin(&tlbivax_mutex);
1738 tlb0_flush_entry(va);
1742 mtx_unlock_spin(&tlbivax_mutex);
1746 * If there is an existing mapping, but it's for a different
1747 * physical address, pte_enter() will delete the old mapping.
1749 //if ((pte != NULL) && PTE_ISVALID(pte))
1750 // debugf("mmu_booke_enter_locked: replace\n");
1752 // debugf("mmu_booke_enter_locked: new\n");
1754 /* Now set up the flags and install the new mapping. */
1755 flags = (PTE_SR | PTE_VALID);
1761 if (prot & VM_PROT_WRITE) {
1766 if ((m->oflags & VPO_UNMANAGED) == 0)
1767 vm_page_aflag_set(m, PGA_WRITEABLE);
1770 if (prot & VM_PROT_EXECUTE) {
1776 /* If its wired update stats. */
1777 if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
1780 error = pte_enter(mmu, pmap, m, va, flags,
1781 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
1783 return (KERN_RESOURCE_SHORTAGE);
1785 if ((flags & PMAP_ENTER_WIRED) != 0)
1786 pmap->pm_stats.wired_count++;
1788 /* Flush the real memory from the instruction cache. */
1789 if (prot & VM_PROT_EXECUTE)
1793 if (sync && (su || pmap == PCPU_GET(curpmap))) {
1794 __syncicache((void *)va, PAGE_SIZE);
1798 return (KERN_SUCCESS);
1802 * Maps a sequence of resident pages belonging to the same object.
1803 * The sequence begins with the given page m_start. This page is
1804 * mapped at the given virtual address start. Each subsequent page is
1805 * mapped at a virtual address that is offset from start by the same
1806 * amount as the page is offset from m_start within the object. The
1807 * last page in the sequence is the page with the largest offset from
1808 * m_start that can be mapped at a virtual address less than the given
1809 * virtual address end. Not every virtual page between start and end
1810 * is mapped; only those for which a resident page exists with the
1811 * corresponding offset from m_start are mapped.
1814 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1815 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1818 vm_pindex_t diff, psize;
1820 VM_OBJECT_ASSERT_LOCKED(m_start->object);
1822 psize = atop(end - start);
1824 rw_wlock(&pvh_global_lock);
1826 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1827 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1828 prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1829 PMAP_ENTER_NOSLEEP, 0);
1830 m = TAILQ_NEXT(m, listq);
1832 rw_wunlock(&pvh_global_lock);
1837 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1841 rw_wlock(&pvh_global_lock);
1843 mmu_booke_enter_locked(mmu, pmap, va, m,
1844 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
1846 rw_wunlock(&pvh_global_lock);
1851 * Remove the given range of addresses from the specified map.
1853 * It is assumed that the start and end are properly rounded to the page size.
1856 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1861 int su = (pmap == kernel_pmap);
1863 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1864 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1867 KASSERT(((va >= virtual_avail) &&
1868 (va <= VM_MAX_KERNEL_ADDRESS)),
1869 ("mmu_booke_remove: kernel pmap, non kernel va"));
1871 KASSERT((va <= VM_MAXUSER_ADDRESS),
1872 ("mmu_booke_remove: user pmap, non user va"));
1875 if (PMAP_REMOVE_DONE(pmap)) {
1876 //debugf("mmu_booke_remove: e (empty)\n");
1880 hold_flag = PTBL_HOLD_FLAG(pmap);
1881 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1883 rw_wlock(&pvh_global_lock);
1885 for (; va < endva; va += PAGE_SIZE) {
1886 pte = pte_find(mmu, pmap, va);
1887 if ((pte != NULL) && PTE_ISVALID(pte))
1888 pte_remove(mmu, pmap, va, hold_flag);
1891 rw_wunlock(&pvh_global_lock);
1893 //debugf("mmu_booke_remove: e\n");
1897 * Remove physical page from all pmaps in which it resides.
1900 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1905 rw_wlock(&pvh_global_lock);
1906 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1907 pvn = TAILQ_NEXT(pv, pv_link);
1909 PMAP_LOCK(pv->pv_pmap);
1910 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1911 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1912 PMAP_UNLOCK(pv->pv_pmap);
1914 vm_page_aflag_clear(m, PGA_WRITEABLE);
1915 rw_wunlock(&pvh_global_lock);
1919 * Map a range of physical addresses into kernel virtual address space.
1922 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1923 vm_paddr_t pa_end, int prot)
1925 vm_offset_t sva = *virt;
1926 vm_offset_t va = sva;
1928 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1929 // sva, pa_start, pa_end);
1931 while (pa_start < pa_end) {
1932 mmu_booke_kenter(mmu, va, pa_start);
1934 pa_start += PAGE_SIZE;
1938 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1943 * The pmap must be activated before it's address space can be accessed in any
1947 mmu_booke_activate(mmu_t mmu, struct thread *td)
1952 pmap = &td->td_proc->p_vmspace->vm_pmap;
1954 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1955 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1957 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1961 cpuid = PCPU_GET(cpuid);
1962 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
1963 PCPU_SET(curpmap, pmap);
1965 if (pmap->pm_tid[cpuid] == TID_NONE)
1968 /* Load PID0 register with pmap tid value. */
1969 mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
1970 __asm __volatile("isync");
1972 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
1976 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1977 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1981 * Deactivate the specified process's address space.
1984 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1988 pmap = &td->td_proc->p_vmspace->vm_pmap;
1990 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
1991 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1993 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
1995 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
1996 PCPU_SET(curpmap, NULL);
2000 * Copy the range specified by src_addr/len
2001 * from the source map to the range dst_addr/len
2002 * in the destination map.
2004 * This routine is only advisory and need not do anything.
2007 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
2008 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
2014 * Set the physical protection on the specified range of this map as requested.
2017 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2024 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2025 mmu_booke_remove(mmu, pmap, sva, eva);
2029 if (prot & VM_PROT_WRITE)
2033 for (va = sva; va < eva; va += PAGE_SIZE) {
2034 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2035 if (PTE_ISVALID(pte)) {
2036 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2038 mtx_lock_spin(&tlbivax_mutex);
2041 /* Handle modified pages. */
2042 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
2045 tlb0_flush_entry(va);
2046 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2049 mtx_unlock_spin(&tlbivax_mutex);
2057 * Clear the write and modified bits in each of the given page's mappings.
2060 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2065 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2066 ("mmu_booke_remove_write: page %p is not managed", m));
2069 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2070 * set by another thread while the object is locked. Thus,
2071 * if PGA_WRITEABLE is clear, no page table entries need updating.
2073 VM_OBJECT_ASSERT_WLOCKED(m->object);
2074 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2076 rw_wlock(&pvh_global_lock);
2077 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2078 PMAP_LOCK(pv->pv_pmap);
2079 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2080 if (PTE_ISVALID(pte)) {
2081 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2083 mtx_lock_spin(&tlbivax_mutex);
2086 /* Handle modified pages. */
2087 if (PTE_ISMODIFIED(pte))
2090 /* Flush mapping from TLB0. */
2091 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2094 mtx_unlock_spin(&tlbivax_mutex);
2097 PMAP_UNLOCK(pv->pv_pmap);
2099 vm_page_aflag_clear(m, PGA_WRITEABLE);
2100 rw_wunlock(&pvh_global_lock);
2104 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2113 va = trunc_page(va);
2114 sz = round_page(sz);
2116 rw_wlock(&pvh_global_lock);
2117 pmap = PCPU_GET(curpmap);
2118 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2121 pte = pte_find(mmu, pm, va);
2122 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2128 /* Create a mapping in the active pmap. */
2130 m = PHYS_TO_VM_PAGE(pa);
2132 pte_enter(mmu, pmap, m, addr,
2133 PTE_SR | PTE_VALID | PTE_UR, FALSE);
2134 __syncicache((void *)addr, PAGE_SIZE);
2135 pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2138 __syncicache((void *)va, PAGE_SIZE);
2143 rw_wunlock(&pvh_global_lock);
2147 * Atomically extract and hold the physical page with the given
2148 * pmap and virtual address pair if that mapping permits the given
2152 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2164 pte = pte_find(mmu, pmap, va);
2165 if ((pte != NULL) && PTE_ISVALID(pte)) {
2166 if (pmap == kernel_pmap)
2171 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2172 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2174 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2185 * Initialize a vm_page's machine-dependent fields.
2188 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2191 TAILQ_INIT(&m->md.pv_list);
2195 * mmu_booke_zero_page_area zeros the specified hardware page by
2196 * mapping it into virtual memory and using bzero to clear
2199 * off and size must reside within a single page.
2202 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2206 /* XXX KASSERT off and size are within a single page? */
2208 mtx_lock(&zero_page_mutex);
2211 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2212 bzero((caddr_t)va + off, size);
2213 mmu_booke_kremove(mmu, va);
2215 mtx_unlock(&zero_page_mutex);
2219 * mmu_booke_zero_page zeros the specified hardware page.
2222 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2225 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
2229 * mmu_booke_copy_page copies the specified (machine independent) page by
2230 * mapping the page into virtual memory and using memcopy to copy the page,
2231 * one machine dependent page at a time.
2234 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2236 vm_offset_t sva, dva;
2238 sva = copy_page_src_va;
2239 dva = copy_page_dst_va;
2241 mtx_lock(©_page_mutex);
2242 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2243 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2244 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2245 mmu_booke_kremove(mmu, dva);
2246 mmu_booke_kremove(mmu, sva);
2247 mtx_unlock(©_page_mutex);
2251 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
2252 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
2255 vm_offset_t a_pg_offset, b_pg_offset;
2258 mtx_lock(©_page_mutex);
2259 while (xfersize > 0) {
2260 a_pg_offset = a_offset & PAGE_MASK;
2261 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2262 mmu_booke_kenter(mmu, copy_page_src_va,
2263 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
2264 a_cp = (char *)copy_page_src_va + a_pg_offset;
2265 b_pg_offset = b_offset & PAGE_MASK;
2266 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2267 mmu_booke_kenter(mmu, copy_page_dst_va,
2268 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
2269 b_cp = (char *)copy_page_dst_va + b_pg_offset;
2270 bcopy(a_cp, b_cp, cnt);
2271 mmu_booke_kremove(mmu, copy_page_dst_va);
2272 mmu_booke_kremove(mmu, copy_page_src_va);
2277 mtx_unlock(©_page_mutex);
2281 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2282 * into virtual memory and using bzero to clear its contents. This is intended
2283 * to be called from the vm_pagezero process only and outside of Giant. No
2287 mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2291 va = zero_page_idle_va;
2292 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2293 bzero((caddr_t)va, PAGE_SIZE);
2294 mmu_booke_kremove(mmu, va);
2298 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
2305 paddr = VM_PAGE_TO_PHYS(m);
2307 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2308 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m));
2311 qaddr = PCPU_GET(qmap_addr);
2313 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(qaddr)][PTBL_IDX(qaddr)]);
2315 KASSERT(pte->flags == 0, ("mmu_booke_quick_enter_page: PTE busy"));
2318 * XXX: tlbivax is broadcast to other cores, but qaddr should
2319 * not be present in other TLBs. Is there a better instruction
2320 * sequence to use? Or just forget it & use mmu_booke_kenter()...
2322 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
2323 __asm __volatile("isync; msync");
2325 pte->rpn = paddr & ~PTE_PA_MASK;
2328 /* Flush the real memory from the instruction cache. */
2329 if ((flags & (PTE_I | PTE_G)) == 0)
2330 __syncicache((void *)qaddr, PAGE_SIZE);
2336 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
2340 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(addr)][PTBL_IDX(addr)]);
2342 KASSERT(PCPU_GET(qmap_addr) == addr,
2343 ("mmu_booke_quick_remove_page: invalid address"));
2344 KASSERT(pte->flags != 0,
2345 ("mmu_booke_quick_remove_page: PTE not in use"));
2353 * Return whether or not the specified physical page was modified
2354 * in any of physical maps.
2357 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2363 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2364 ("mmu_booke_is_modified: page %p is not managed", m));
2368 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2369 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
2370 * is clear, no PTEs can be modified.
2372 VM_OBJECT_ASSERT_WLOCKED(m->object);
2373 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2375 rw_wlock(&pvh_global_lock);
2376 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2377 PMAP_LOCK(pv->pv_pmap);
2378 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2380 if (PTE_ISMODIFIED(pte))
2383 PMAP_UNLOCK(pv->pv_pmap);
2387 rw_wunlock(&pvh_global_lock);
2392 * Return whether or not the specified virtual address is eligible
2396 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2403 * Return whether or not the specified physical page was referenced
2404 * in any physical maps.
2407 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
2413 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2414 ("mmu_booke_is_referenced: page %p is not managed", m));
2416 rw_wlock(&pvh_global_lock);
2417 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2418 PMAP_LOCK(pv->pv_pmap);
2419 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2421 if (PTE_ISREFERENCED(pte))
2424 PMAP_UNLOCK(pv->pv_pmap);
2428 rw_wunlock(&pvh_global_lock);
2433 * Clear the modify bits on the specified physical page.
2436 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2441 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2442 ("mmu_booke_clear_modify: page %p is not managed", m));
2443 VM_OBJECT_ASSERT_WLOCKED(m->object);
2444 KASSERT(!vm_page_xbusied(m),
2445 ("mmu_booke_clear_modify: page %p is exclusive busied", m));
2448 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
2449 * If the object containing the page is locked and the page is not
2450 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
2452 if ((m->aflags & PGA_WRITEABLE) == 0)
2454 rw_wlock(&pvh_global_lock);
2455 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2456 PMAP_LOCK(pv->pv_pmap);
2457 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2459 mtx_lock_spin(&tlbivax_mutex);
2462 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2463 tlb0_flush_entry(pv->pv_va);
2464 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2469 mtx_unlock_spin(&tlbivax_mutex);
2471 PMAP_UNLOCK(pv->pv_pmap);
2473 rw_wunlock(&pvh_global_lock);
2477 * Return a count of reference bits for a page, clearing those bits.
2478 * It is not necessary for every reference bit to be cleared, but it
2479 * is necessary that 0 only be returned when there are truly no
2480 * reference bits set.
2482 * XXX: The exact number of bits to check and clear is a matter that
2483 * should be tested and standardized at some point in the future for
2484 * optimal aging of shared pages.
2487 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2493 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2494 ("mmu_booke_ts_referenced: page %p is not managed", m));
2496 rw_wlock(&pvh_global_lock);
2497 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2498 PMAP_LOCK(pv->pv_pmap);
2499 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2501 if (PTE_ISREFERENCED(pte)) {
2502 mtx_lock_spin(&tlbivax_mutex);
2505 tlb0_flush_entry(pv->pv_va);
2506 pte->flags &= ~PTE_REFERENCED;
2509 mtx_unlock_spin(&tlbivax_mutex);
2512 PMAP_UNLOCK(pv->pv_pmap);
2517 PMAP_UNLOCK(pv->pv_pmap);
2519 rw_wunlock(&pvh_global_lock);
2524 * Clear the wired attribute from the mappings for the specified range of
2525 * addresses in the given pmap. Every valid mapping within that range must
2526 * have the wired attribute set. In contrast, invalid mappings cannot have
2527 * the wired attribute set, so they are ignored.
2529 * The wired attribute of the page table entry is not a hardware feature, so
2530 * there is no need to invalidate any TLB entries.
2533 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2539 for (va = sva; va < eva; va += PAGE_SIZE) {
2540 if ((pte = pte_find(mmu, pmap, va)) != NULL &&
2542 if (!PTE_ISWIRED(pte))
2543 panic("mmu_booke_unwire: pte %p isn't wired",
2545 pte->flags &= ~PTE_WIRED;
2546 pmap->pm_stats.wired_count--;
2554 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2555 * page. This count may be changed upwards or downwards in the future; it is
2556 * only necessary that true be returned for a small subset of pmaps for proper
2560 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2566 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2567 ("mmu_booke_page_exists_quick: page %p is not managed", m));
2570 rw_wlock(&pvh_global_lock);
2571 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2572 if (pv->pv_pmap == pmap) {
2579 rw_wunlock(&pvh_global_lock);
2584 * Return the number of managed mappings to the given physical page that are
2588 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2594 if ((m->oflags & VPO_UNMANAGED) != 0)
2596 rw_wlock(&pvh_global_lock);
2597 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2598 PMAP_LOCK(pv->pv_pmap);
2599 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2600 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2602 PMAP_UNLOCK(pv->pv_pmap);
2604 rw_wunlock(&pvh_global_lock);
2609 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2615 * This currently does not work for entries that
2616 * overlap TLB1 entries.
2618 for (i = 0; i < tlb1_idx; i ++) {
2619 if (tlb1_iomapped(i, pa, size, &va) == 0)
2627 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2633 /* Minidumps are based on virtual memory addresses. */
2635 *va = (void *)(vm_offset_t)pa;
2639 /* Raw physical memory dumps don't have a virtual address. */
2640 /* We always map a 256MB page at 256M. */
2641 gran = 256 * 1024 * 1024;
2642 ppa = pa & ~(gran - 1);
2645 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
2647 if (sz > (gran - ofs))
2648 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
2653 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
2659 /* Minidumps are based on virtual memory addresses. */
2660 /* Nothing to do... */
2664 /* Raw physical memory dumps don't have a virtual address. */
2666 tlb1[tlb1_idx].mas1 = 0;
2667 tlb1[tlb1_idx].mas2 = 0;
2668 tlb1[tlb1_idx].mas3 = 0;
2669 tlb1_write_entry(tlb1_idx);
2671 gran = 256 * 1024 * 1024;
2672 ppa = pa & ~(gran - 1);
2674 if (sz > (gran - ofs)) {
2676 tlb1[tlb1_idx].mas1 = 0;
2677 tlb1[tlb1_idx].mas2 = 0;
2678 tlb1[tlb1_idx].mas3 = 0;
2679 tlb1_write_entry(tlb1_idx);
2683 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2686 mmu_booke_scan_init(mmu_t mmu)
2693 /* Initialize phys. segments for dumpsys(). */
2694 memset(&dump_map, 0, sizeof(dump_map));
2695 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
2696 &availmem_regions_sz);
2697 for (i = 0; i < physmem_regions_sz; i++) {
2698 dump_map[i].pa_start = physmem_regions[i].mr_start;
2699 dump_map[i].pa_size = physmem_regions[i].mr_size;
2704 /* Virtual segments for minidumps: */
2705 memset(&dump_map, 0, sizeof(dump_map));
2707 /* 1st: kernel .data and .bss. */
2708 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2709 dump_map[0].pa_size =
2710 round_page((uintptr_t)_end) - dump_map[0].pa_start;
2712 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2713 dump_map[1].pa_start = data_start;
2714 dump_map[1].pa_size = data_end - data_start;
2716 /* 3rd: kernel VM. */
2717 va = dump_map[1].pa_start + dump_map[1].pa_size;
2718 /* Find start of next chunk (from va). */
2719 while (va < virtual_end) {
2720 /* Don't dump the buffer cache. */
2721 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2722 va = kmi.buffer_eva;
2725 pte = pte_find(mmu, kernel_pmap, va);
2726 if (pte != NULL && PTE_ISVALID(pte))
2730 if (va < virtual_end) {
2731 dump_map[2].pa_start = va;
2733 /* Find last page in chunk. */
2734 while (va < virtual_end) {
2735 /* Don't run into the buffer cache. */
2736 if (va == kmi.buffer_sva)
2738 pte = pte_find(mmu, kernel_pmap, va);
2739 if (pte == NULL || !PTE_ISVALID(pte))
2743 dump_map[2].pa_size = va - dump_map[2].pa_start;
2748 * Map a set of physical memory pages into the kernel virtual address space.
2749 * Return a pointer to where it is mapped. This routine is intended to be used
2750 * for mapping device memory, NOT real memory.
2753 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2756 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
2760 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2768 * Check if this is premapped in TLB1. Note: this should probably also
2769 * check whether a sequence of TLB1 entries exist that match the
2770 * requirement, but now only checks the easy case.
2772 if (ma == VM_MEMATTR_DEFAULT) {
2773 for (i = 0; i < tlb1_idx; i++) {
2774 if (!(tlb1[i].mas1 & MAS1_VALID))
2776 if (pa >= tlb1[i].phys &&
2777 (pa + size) <= (tlb1[i].phys + tlb1[i].size))
2778 return (void *)(tlb1[i].virt +
2779 (vm_offset_t)(pa - tlb1[i].phys));
2783 size = roundup(size, PAGE_SIZE);
2786 * We leave a hole for device direct mapping between the maximum user
2787 * address (0x8000000) and the minimum KVA address (0xc0000000). If
2788 * devices are in there, just map them 1:1. If not, map them to the
2789 * device mapping area about VM_MAX_KERNEL_ADDRESS. These mapped
2790 * addresses should be pulled from an allocator, but since we do not
2791 * ever free TLB1 entries, it is safe just to increment a counter.
2792 * Note that there isn't a lot of address space here (128 MB) and it
2793 * is not at all difficult to imagine running out, since that is a 4:1
2794 * compression from the 0xc0000000 - 0xf0000000 address space that gets
2797 if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) &&
2798 (pa + size - 1) < VM_MIN_KERNEL_ADDRESS)
2801 va = atomic_fetchadd_int(&tlb1_map_base, size);
2805 sz = 1 << (ilog2(size) & ~1);
2807 printf("Wiring VA=%x to PA=%llx (size=%x), "
2808 "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2809 tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma));
2819 * 'Unmap' a range mapped by mmu_booke_mapdev().
2822 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2824 #ifdef SUPPORTS_SHRINKING_TLB1
2825 vm_offset_t base, offset;
2828 * Unmap only if this is inside kernel virtual space.
2830 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2831 base = trunc_page(va);
2832 offset = va & PAGE_MASK;
2833 size = roundup(offset + size, PAGE_SIZE);
2834 kva_free(base, size);
2840 * mmu_booke_object_init_pt preloads the ptes for a given object into the
2841 * specified pmap. This eliminates the blast of soft faults on process startup
2842 * and immediately after an mmap.
2845 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2846 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2849 VM_OBJECT_ASSERT_WLOCKED(object);
2850 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2851 ("mmu_booke_object_init_pt: non-device object"));
2855 * Perform the pmap work for mincore.
2858 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2859 vm_paddr_t *locked_pa)
2862 /* XXX: this should be implemented at some point */
2866 /**************************************************************************/
2868 /**************************************************************************/
2871 * Allocate a TID. If necessary, steal one from someone else.
2872 * The new TID is flushed from the TLB before returning.
2875 tid_alloc(pmap_t pmap)
2880 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2882 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2884 thiscpu = PCPU_GET(cpuid);
2886 tid = PCPU_GET(tid_next);
2889 PCPU_SET(tid_next, tid + 1);
2891 /* If we are stealing TID then clear the relevant pmap's field */
2892 if (tidbusy[thiscpu][tid] != NULL) {
2894 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2896 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2898 /* Flush all entries from TLB0 matching this TID. */
2899 tid_flush(tid, tlb0_ways, tlb0_entries_per_way);
2902 tidbusy[thiscpu][tid] = pmap;
2903 pmap->pm_tid[thiscpu] = tid;
2904 __asm __volatile("msync; isync");
2906 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2907 PCPU_GET(tid_next));
2912 /**************************************************************************/
2914 /**************************************************************************/
2917 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2927 if (mas1 & MAS1_VALID)
2932 if (mas1 & MAS1_IPROT)
2937 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2938 tid = MAS1_GETTID(mas1);
2940 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2943 size = tsize2size(tsize);
2945 debugf("%3d: (%s) [AS=%d] "
2946 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2947 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2948 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2951 /* Convert TLB0 va and way number to tlb0[] table index. */
2952 static inline unsigned int
2953 tlb0_tableidx(vm_offset_t va, unsigned int way)
2957 idx = (way * TLB0_ENTRIES_PER_WAY);
2958 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2963 * Invalidate TLB0 entry.
2966 tlb0_flush_entry(vm_offset_t va)
2969 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2971 mtx_assert(&tlbivax_mutex, MA_OWNED);
2973 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2974 __asm __volatile("isync; msync");
2975 __asm __volatile("tlbsync; msync");
2977 CTR1(KTR_PMAP, "%s: e", __func__);
2980 /* Print out contents of the MAS registers for each TLB0 entry */
2982 tlb0_print_tlbentries(void)
2984 uint32_t mas0, mas1, mas2, mas3, mas7;
2985 int entryidx, way, idx;
2987 debugf("TLB0 entries:\n");
2988 for (way = 0; way < TLB0_WAYS; way ++)
2989 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2991 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2992 mtspr(SPR_MAS0, mas0);
2993 __asm __volatile("isync");
2995 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2996 mtspr(SPR_MAS2, mas2);
2998 __asm __volatile("isync; tlbre");
3000 mas1 = mfspr(SPR_MAS1);
3001 mas2 = mfspr(SPR_MAS2);
3002 mas3 = mfspr(SPR_MAS3);
3003 mas7 = mfspr(SPR_MAS7);
3005 idx = tlb0_tableidx(mas2, way);
3006 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3010 /**************************************************************************/
3012 /**************************************************************************/
3015 * TLB1 mapping notes:
3017 * TLB1[0] Kernel text and data.
3018 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
3019 * windows, other devices mappings.
3023 * Write given entry to TLB1 hardware.
3024 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
3027 tlb1_write_entry(unsigned int idx)
3031 //debugf("tlb1_write_entry: s\n");
3034 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
3035 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
3037 mtspr(SPR_MAS0, mas0);
3038 __asm __volatile("isync");
3039 mtspr(SPR_MAS1, tlb1[idx].mas1);
3040 __asm __volatile("isync");
3041 mtspr(SPR_MAS2, tlb1[idx].mas2);
3042 __asm __volatile("isync");
3043 mtspr(SPR_MAS3, tlb1[idx].mas3);
3044 __asm __volatile("isync");
3045 mtspr(SPR_MAS7, tlb1[idx].mas7);
3046 __asm __volatile("isync; tlbwe; isync; msync");
3048 //debugf("tlb1_write_entry: e\n");
3052 * Return the largest uint value log such that 2^log <= num.
3055 ilog2(unsigned int num)
3059 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
3064 * Convert TLB TSIZE value to mapped region size.
3067 tsize2size(unsigned int tsize)
3072 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
3075 return ((1 << (2 * tsize)) * 1024);
3079 * Convert region size (must be power of 4) to TLB TSIZE value.
3082 size2tsize(vm_size_t size)
3085 return (ilog2(size) / 2 - 5);
3089 * Register permanent kernel mapping in TLB1.
3091 * Entries are created starting from index 0 (current free entry is
3092 * kept in tlb1_idx) and are not supposed to be invalidated.
3095 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
3101 index = atomic_fetchadd_int(&tlb1_idx, 1);
3102 if (index >= TLB1_ENTRIES) {
3103 printf("tlb1_set_entry: TLB1 full!\n");
3107 /* Convert size to TSIZE */
3108 tsize = size2tsize(size);
3110 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
3111 /* XXX TS is hard coded to 0 for now as we only use single address space */
3112 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
3115 * Atomicity is preserved by the atomic increment above since nothing
3116 * is ever removed from tlb1.
3119 tlb1[index].phys = pa;
3120 tlb1[index].virt = va;
3121 tlb1[index].size = size;
3122 tlb1[index].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
3123 tlb1[index].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
3124 tlb1[index].mas2 = (va & MAS2_EPN_MASK) | flags;
3126 /* Set supervisor RWX permission bits */
3127 tlb1[index].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
3128 tlb1[index].mas7 = (pa >> 32) & MAS7_RPN;
3130 tlb1_write_entry(index);
3133 * XXX in general TLB1 updates should be propagated between CPUs,
3134 * since current design assumes to have the same TLB1 set-up on all
3141 * Map in contiguous RAM region into the TLB1 using maximum of
3142 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
3144 * If necessary round up last entry size and return total size
3145 * used by all allocated entries.
3148 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
3150 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
3151 vm_size_t mapped, pgsz, base, mask;
3154 /* Round up to the next 1M */
3155 size = (size + (1 << 20) - 1) & ~((1 << 20) - 1);
3160 pgsz = 64*1024*1024;
3161 while (mapped < size) {
3162 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
3163 while (pgsz > (size - mapped))
3169 /* We under-map. Correct for this. */
3170 if (mapped < size) {
3171 while (pgs[idx - 1] == pgsz) {
3175 /* XXX We may increase beyond out starting point. */
3184 /* Align address to the boundary */
3186 va = (va + mask) & ~mask;
3187 pa = (pa + mask) & ~mask;
3190 for (idx = 0; idx < nents; idx++) {
3192 debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz);
3193 tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM);
3198 mapped = (va - base);
3199 printf("mapped size 0x%08x (wasted space 0x%08x)\n",
3200 mapped, mapped - size);
3205 * TLB1 initialization routine, to be called after the very first
3206 * assembler level setup done in locore.S.
3211 uint32_t mas0, mas1, mas2, mas3, mas7;
3215 if (bootinfo != NULL && bootinfo[0] != 1) {
3216 tlb1_idx = *((uint16_t *)(bootinfo + 8));
3220 /* The first entry/entries are used to map the kernel. */
3221 for (i = 0; i < tlb1_idx; i++) {
3222 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3223 mtspr(SPR_MAS0, mas0);
3224 __asm __volatile("isync; tlbre");
3226 mas1 = mfspr(SPR_MAS1);
3227 if ((mas1 & MAS1_VALID) == 0)
3230 mas2 = mfspr(SPR_MAS2);
3231 mas3 = mfspr(SPR_MAS3);
3232 mas7 = mfspr(SPR_MAS7);
3234 tlb1[i].mas1 = mas1;
3235 tlb1[i].mas2 = mfspr(SPR_MAS2);
3236 tlb1[i].mas3 = mas3;
3237 tlb1[i].mas7 = mas7;
3238 tlb1[i].virt = mas2 & MAS2_EPN_MASK;
3239 tlb1[i].phys = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
3243 kernload = tlb1[i].phys;
3245 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3246 tlb1[i].size = (tsz > 0) ? tsize2size(tsz) : 0;
3247 kernsize += tlb1[i].size;
3251 bp_ntlb1s = tlb1_idx;
3254 /* Purge the remaining entries */
3255 for (i = tlb1_idx; i < TLB1_ENTRIES; i++)
3256 tlb1_write_entry(i);
3258 /* Setup TLB miss defaults */
3259 set_mas4_defaults();
3263 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
3269 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
3271 for (i = 0; i < tlb1_idx; i++) {
3272 if (!(tlb1[i].mas1 & MAS1_VALID))
3274 if (pa >= tlb1[i].phys && (pa + size) <=
3275 (tlb1[i].phys + tlb1[i].size))
3276 return (tlb1[i].virt + (pa - tlb1[i].phys));
3279 pa_base = trunc_page(pa);
3280 size = roundup(size + (pa - pa_base), PAGE_SIZE);
3281 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
3282 va = tlb1_map_base + (pa - pa_base);
3285 sz = 1 << (ilog2(size) & ~1);
3286 tlb1_set_entry(tlb1_map_base, pa_base, sz, _TLB_ENTRY_IO);
3289 tlb1_map_base += sz;
3293 bp_ntlb1s = tlb1_idx;
3300 * Setup MAS4 defaults.
3301 * These values are loaded to MAS0-2 on a TLB miss.
3304 set_mas4_defaults(void)
3308 /* Defaults: TLB0, PID0, TSIZED=4K */
3309 mas4 = MAS4_TLBSELD0;
3310 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3314 mtspr(SPR_MAS4, mas4);
3315 __asm __volatile("isync");
3319 * Print out contents of the MAS registers for each TLB1 entry
3322 tlb1_print_tlbentries(void)
3324 uint32_t mas0, mas1, mas2, mas3, mas7;
3327 debugf("TLB1 entries:\n");
3328 for (i = 0; i < TLB1_ENTRIES; i++) {
3330 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3331 mtspr(SPR_MAS0, mas0);
3333 __asm __volatile("isync; tlbre");
3335 mas1 = mfspr(SPR_MAS1);
3336 mas2 = mfspr(SPR_MAS2);
3337 mas3 = mfspr(SPR_MAS3);
3338 mas7 = mfspr(SPR_MAS7);
3340 tlb_print_entry(i, mas1, mas2, mas3, mas7);
3345 * Print out contents of the in-ram tlb1 table.
3348 tlb1_print_entries(void)
3352 debugf("tlb1[] table entries:\n");
3353 for (i = 0; i < TLB1_ENTRIES; i++)
3354 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3,
3359 * Return 0 if the physical IO range is encompassed by one of the
3360 * the TLB1 entries, otherwise return related error code.
3363 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3366 vm_paddr_t pa_start;
3368 unsigned int entry_tsize;
3369 vm_size_t entry_size;
3371 *va = (vm_offset_t)NULL;
3373 /* Skip invalid entries */
3374 if (!(tlb1[i].mas1 & MAS1_VALID))
3378 * The entry must be cache-inhibited, guarded, and r/w
3379 * so it can function as an i/o page
3381 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3382 if (prot != (MAS2_I | MAS2_G))
3385 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3386 if (prot != (MAS3_SR | MAS3_SW))
3389 /* The address should be within the entry range. */
3390 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3391 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3393 entry_size = tsize2size(entry_tsize);
3394 pa_start = (((vm_paddr_t)tlb1[i].mas7 & MAS7_RPN) << 32) |
3395 (tlb1[i].mas3 & MAS3_RPN);
3396 pa_end = pa_start + entry_size;
3398 if ((pa < pa_start) || ((pa + size) > pa_end))
3401 /* Return virtual address of this mapping. */
3402 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);