2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * Some hw specific parts of this pmap were derived or influenced
27 * by NetBSD's ibm4xx pmap module. More generic code is shared with
28 * a few other pmap modules from the FreeBSD tree.
34 * Kernel and user threads run within one common virtual address space
37 * Virtual address space layout:
38 * -----------------------------
39 * 0x0000_0000 - 0xafff_ffff : user process
40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
43 * 0xc100_0000 - 0xfeef_ffff : KVA
44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region
51 #include <sys/cdefs.h>
52 __FBSDID("$FreeBSD$");
54 #include "opt_kstack_pages.h"
56 #include <sys/param.h>
58 #include <sys/malloc.h>
62 #include <sys/queue.h>
63 #include <sys/systm.h>
64 #include <sys/kernel.h>
65 #include <sys/kerneldump.h>
66 #include <sys/linker.h>
67 #include <sys/msgbuf.h>
69 #include <sys/mutex.h>
70 #include <sys/rwlock.h>
71 #include <sys/sched.h>
73 #include <sys/vmmeter.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_pager.h>
86 #include <machine/cpu.h>
87 #include <machine/pcb.h>
88 #include <machine/platform.h>
90 #include <machine/tlb.h>
91 #include <machine/spr.h>
92 #include <machine/md_var.h>
93 #include <machine/mmuvar.h>
94 #include <machine/pmap.h>
95 #include <machine/pte.h>
100 #define debugf(fmt, args...) printf(fmt, ##args)
102 #define debugf(fmt, args...)
105 #define TODO panic("%s: not implemented", __func__);
107 extern unsigned char _etext[];
108 extern unsigned char _end[];
110 extern uint32_t *bootinfo;
113 extern uint32_t bp_ntlb1s;
117 vm_offset_t kernstart;
120 /* Message buffer and tables. */
121 static vm_offset_t data_start;
122 static vm_size_t data_end;
124 /* Phys/avail memory regions. */
125 static struct mem_region *availmem_regions;
126 static int availmem_regions_sz;
127 static struct mem_region *physmem_regions;
128 static int physmem_regions_sz;
130 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
131 static vm_offset_t zero_page_va;
132 static struct mtx zero_page_mutex;
134 static struct mtx tlbivax_mutex;
137 * Reserved KVA space for mmu_booke_zero_page_idle. This is used
138 * by idle thred only, no lock required.
140 static vm_offset_t zero_page_idle_va;
142 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
143 static vm_offset_t copy_page_src_va;
144 static vm_offset_t copy_page_dst_va;
145 static struct mtx copy_page_mutex;
147 /**************************************************************************/
149 /**************************************************************************/
151 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
152 vm_prot_t, u_int flags, int8_t psind);
154 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
155 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
158 * If user pmap is processed with mmu_booke_remove and the resident count
159 * drops to 0, there are no more pages to remove, so we need not continue.
161 #define PMAP_REMOVE_DONE(pmap) \
162 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
164 extern void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way);
165 extern int elf32_nxstack;
167 /**************************************************************************/
168 /* TLB and TID handling */
169 /**************************************************************************/
171 /* Translation ID busy table */
172 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
175 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
176 * core revisions and should be read from h/w registers during early config.
178 uint32_t tlb0_entries;
180 uint32_t tlb0_entries_per_way;
181 uint32_t tlb1_entries;
183 #define TLB0_ENTRIES (tlb0_entries)
184 #define TLB0_WAYS (tlb0_ways)
185 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
187 #define TLB1_ENTRIES (tlb1_entries)
188 #define TLB1_MAXENTRIES 64
190 /* In-ram copy of the TLB1 */
191 static tlb_entry_t tlb1[TLB1_MAXENTRIES];
193 /* Next free entry in the TLB1 */
194 static unsigned int tlb1_idx;
195 static vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS;
197 static tlbtid_t tid_alloc(struct pmap *);
199 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
201 static int tlb1_set_entry(vm_offset_t, vm_paddr_t, vm_size_t, uint32_t);
202 static void tlb1_write_entry(unsigned int);
203 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
204 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
206 static vm_size_t tsize2size(unsigned int);
207 static unsigned int size2tsize(vm_size_t);
208 static unsigned int ilog2(unsigned int);
210 static void set_mas4_defaults(void);
212 static inline void tlb0_flush_entry(vm_offset_t);
213 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
215 /**************************************************************************/
216 /* Page table management */
217 /**************************************************************************/
219 static struct rwlock_padalign pvh_global_lock;
221 /* Data for the pv entry allocation mechanism */
222 static uma_zone_t pvzone;
223 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
225 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
227 #ifndef PMAP_SHPGPERPROC
228 #define PMAP_SHPGPERPROC 200
231 static void ptbl_init(void);
232 static struct ptbl_buf *ptbl_buf_alloc(void);
233 static void ptbl_buf_free(struct ptbl_buf *);
234 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
236 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
237 static void ptbl_free(mmu_t, pmap_t, unsigned int);
238 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
239 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
241 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
242 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
243 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
244 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
246 static pv_entry_t pv_alloc(void);
247 static void pv_free(pv_entry_t);
248 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
249 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
251 static void booke_pmap_init_qpages(void);
253 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
254 #define PTBL_BUFS (128 * 16)
257 TAILQ_ENTRY(ptbl_buf) link; /* list link */
258 vm_offset_t kva; /* va of mapping */
261 /* ptbl free list and a lock used for access synchronization. */
262 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
263 static struct mtx ptbl_buf_freelist_lock;
265 /* Base address of kva space allocated fot ptbl bufs. */
266 static vm_offset_t ptbl_buf_pool_vabase;
268 /* Pointer to ptbl_buf structures. */
269 static struct ptbl_buf *ptbl_bufs;
272 void pmap_bootstrap_ap(volatile uint32_t *);
276 * Kernel MMU interface
278 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
279 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
280 vm_size_t, vm_offset_t);
281 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
282 static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
283 vm_offset_t, vm_page_t *, vm_offset_t, int);
284 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
285 vm_prot_t, u_int flags, int8_t psind);
286 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
287 vm_page_t, vm_prot_t);
288 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
290 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
291 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
293 static void mmu_booke_init(mmu_t);
294 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
295 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
296 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
297 static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
298 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
300 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
302 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
303 vm_object_t, vm_pindex_t, vm_size_t);
304 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
305 static void mmu_booke_page_init(mmu_t, vm_page_t);
306 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
307 static void mmu_booke_pinit(mmu_t, pmap_t);
308 static void mmu_booke_pinit0(mmu_t, pmap_t);
309 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
311 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
312 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
313 static void mmu_booke_release(mmu_t, pmap_t);
314 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
315 static void mmu_booke_remove_all(mmu_t, vm_page_t);
316 static void mmu_booke_remove_write(mmu_t, vm_page_t);
317 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
318 static void mmu_booke_zero_page(mmu_t, vm_page_t);
319 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
320 static void mmu_booke_zero_page_idle(mmu_t, vm_page_t);
321 static void mmu_booke_activate(mmu_t, struct thread *);
322 static void mmu_booke_deactivate(mmu_t, struct thread *);
323 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
324 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
325 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
326 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
327 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
328 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
329 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
330 static void mmu_booke_kremove(mmu_t, vm_offset_t);
331 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
332 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
334 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
336 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
338 static void mmu_booke_scan_init(mmu_t);
339 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
340 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
342 static mmu_method_t mmu_booke_methods[] = {
343 /* pmap dispatcher interface */
344 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
345 MMUMETHOD(mmu_copy, mmu_booke_copy),
346 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
347 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
348 MMUMETHOD(mmu_enter, mmu_booke_enter),
349 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
350 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
351 MMUMETHOD(mmu_extract, mmu_booke_extract),
352 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
353 MMUMETHOD(mmu_init, mmu_booke_init),
354 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
355 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
356 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
357 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
358 MMUMETHOD(mmu_map, mmu_booke_map),
359 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
360 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
361 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
362 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
363 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
364 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
365 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
366 MMUMETHOD(mmu_protect, mmu_booke_protect),
367 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
368 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
369 MMUMETHOD(mmu_release, mmu_booke_release),
370 MMUMETHOD(mmu_remove, mmu_booke_remove),
371 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
372 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
373 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
374 MMUMETHOD(mmu_unwire, mmu_booke_unwire),
375 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
376 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
377 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
378 MMUMETHOD(mmu_activate, mmu_booke_activate),
379 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
380 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
381 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
383 /* Internal interfaces */
384 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
385 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
386 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
387 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
388 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
389 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
390 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
391 /* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
392 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
394 /* dumpsys() support */
395 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
396 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
397 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
402 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
404 static __inline uint32_t
405 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
410 if (ma != VM_MEMATTR_DEFAULT) {
412 case VM_MEMATTR_UNCACHEABLE:
413 return (PTE_I | PTE_G);
414 case VM_MEMATTR_WRITE_COMBINING:
415 case VM_MEMATTR_WRITE_BACK:
416 case VM_MEMATTR_PREFETCHABLE:
418 case VM_MEMATTR_WRITE_THROUGH:
419 return (PTE_W | PTE_M);
424 * Assume the page is cache inhibited and access is guarded unless
425 * it's in our available memory array.
427 attrib = _TLB_ENTRY_IO;
428 for (i = 0; i < physmem_regions_sz; i++) {
429 if ((pa >= physmem_regions[i].mr_start) &&
430 (pa < (physmem_regions[i].mr_start +
431 physmem_regions[i].mr_size))) {
432 attrib = _TLB_ENTRY_MEM;
449 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
452 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
453 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
455 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
456 ("tlb_miss_lock: tried to lock self"));
458 tlb_lock(pc->pc_booke_tlb_lock);
460 CTR1(KTR_PMAP, "%s: locked", __func__);
467 tlb_miss_unlock(void)
475 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
477 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
478 __func__, pc->pc_cpuid);
480 tlb_unlock(pc->pc_booke_tlb_lock);
482 CTR1(KTR_PMAP, "%s: unlocked", __func__);
488 /* Return number of entries in TLB0. */
490 tlb0_get_tlbconf(void)
494 tlb0_cfg = mfspr(SPR_TLB0CFG);
495 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
496 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
497 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
500 /* Return number of entries in TLB1. */
502 tlb1_get_tlbconf(void)
506 tlb1_cfg = mfspr(SPR_TLB1CFG);
507 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
510 /* Initialize pool of kva ptbl buffers. */
516 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
517 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
518 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
519 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
521 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
522 TAILQ_INIT(&ptbl_buf_freelist);
524 for (i = 0; i < PTBL_BUFS; i++) {
525 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
526 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
530 /* Get a ptbl_buf from the freelist. */
531 static struct ptbl_buf *
534 struct ptbl_buf *buf;
536 mtx_lock(&ptbl_buf_freelist_lock);
537 buf = TAILQ_FIRST(&ptbl_buf_freelist);
539 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
540 mtx_unlock(&ptbl_buf_freelist_lock);
542 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
547 /* Return ptbl buff to free pool. */
549 ptbl_buf_free(struct ptbl_buf *buf)
552 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
554 mtx_lock(&ptbl_buf_freelist_lock);
555 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
556 mtx_unlock(&ptbl_buf_freelist_lock);
560 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
563 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
565 struct ptbl_buf *pbuf;
567 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
569 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
571 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
572 if (pbuf->kva == (vm_offset_t)ptbl) {
573 /* Remove from pmap ptbl buf list. */
574 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
576 /* Free corresponding ptbl buf. */
582 /* Allocate page table. */
584 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
586 vm_page_t mtbl[PTBL_PAGES];
588 struct ptbl_buf *pbuf;
593 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
594 (pmap == kernel_pmap), pdir_idx);
596 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
597 ("ptbl_alloc: invalid pdir_idx"));
598 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
599 ("pte_alloc: valid ptbl entry exists!"));
601 pbuf = ptbl_buf_alloc();
603 panic("pte_alloc: couldn't alloc kernel virtual memory");
605 ptbl = (pte_t *)pbuf->kva;
607 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
609 /* Allocate ptbl pages, this will sleep! */
610 for (i = 0; i < PTBL_PAGES; i++) {
611 pidx = (PTBL_PAGES * pdir_idx) + i;
612 while ((m = vm_page_alloc(NULL, pidx,
613 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
615 rw_wunlock(&pvh_global_lock);
617 ptbl_free_pmap_ptbl(pmap, ptbl);
618 for (j = 0; j < i; j++)
619 vm_page_free(mtbl[j]);
620 atomic_subtract_int(&vm_cnt.v_wire_count, i);
624 rw_wlock(&pvh_global_lock);
630 /* Map allocated pages into kernel_pmap. */
631 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
633 /* Zero whole ptbl. */
634 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
636 /* Add pbuf to the pmap ptbl bufs list. */
637 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
642 /* Free ptbl pages and invalidate pdir entry. */
644 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
652 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
653 (pmap == kernel_pmap), pdir_idx);
655 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
656 ("ptbl_free: invalid pdir_idx"));
658 ptbl = pmap->pm_pdir[pdir_idx];
660 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
662 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
665 * Invalidate the pdir entry as soon as possible, so that other CPUs
666 * don't attempt to look up the page tables we are releasing.
668 mtx_lock_spin(&tlbivax_mutex);
671 pmap->pm_pdir[pdir_idx] = NULL;
674 mtx_unlock_spin(&tlbivax_mutex);
676 for (i = 0; i < PTBL_PAGES; i++) {
677 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
678 pa = pte_vatopa(mmu, kernel_pmap, va);
679 m = PHYS_TO_VM_PAGE(pa);
680 vm_page_free_zero(m);
681 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
682 mmu_booke_kremove(mmu, va);
685 ptbl_free_pmap_ptbl(pmap, ptbl);
689 * Decrement ptbl pages hold count and attempt to free ptbl pages.
690 * Called when removing pte entry from ptbl.
692 * Return 1 if ptbl pages were freed.
695 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
702 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
703 (pmap == kernel_pmap), pdir_idx);
705 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
706 ("ptbl_unhold: invalid pdir_idx"));
707 KASSERT((pmap != kernel_pmap),
708 ("ptbl_unhold: unholding kernel ptbl!"));
710 ptbl = pmap->pm_pdir[pdir_idx];
712 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
713 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
714 ("ptbl_unhold: non kva ptbl"));
716 /* decrement hold count */
717 for (i = 0; i < PTBL_PAGES; i++) {
718 pa = pte_vatopa(mmu, kernel_pmap,
719 (vm_offset_t)ptbl + (i * PAGE_SIZE));
720 m = PHYS_TO_VM_PAGE(pa);
725 * Free ptbl pages if there are no pte etries in this ptbl.
726 * wire_count has the same value for all ptbl pages, so check the last
729 if (m->wire_count == 0) {
730 ptbl_free(mmu, pmap, pdir_idx);
732 //debugf("ptbl_unhold: e (freed ptbl)\n");
740 * Increment hold count for ptbl pages. This routine is used when a new pte
741 * entry is being inserted into the ptbl.
744 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
751 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
754 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
755 ("ptbl_hold: invalid pdir_idx"));
756 KASSERT((pmap != kernel_pmap),
757 ("ptbl_hold: holding kernel ptbl!"));
759 ptbl = pmap->pm_pdir[pdir_idx];
761 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
763 for (i = 0; i < PTBL_PAGES; i++) {
764 pa = pte_vatopa(mmu, kernel_pmap,
765 (vm_offset_t)ptbl + (i * PAGE_SIZE));
766 m = PHYS_TO_VM_PAGE(pa);
771 /* Allocate pv_entry structure. */
778 if (pv_entry_count > pv_entry_high_water)
780 pv = uma_zalloc(pvzone, M_NOWAIT);
785 /* Free pv_entry structure. */
787 pv_free(pv_entry_t pve)
791 uma_zfree(pvzone, pve);
795 /* Allocate and initialize pv_entry structure. */
797 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
801 //int su = (pmap == kernel_pmap);
802 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
803 // (u_int32_t)pmap, va, (u_int32_t)m);
807 panic("pv_insert: no pv entries!");
813 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
814 rw_assert(&pvh_global_lock, RA_WLOCKED);
816 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
818 //debugf("pv_insert: e\n");
821 /* Destroy pv entry. */
823 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
827 //int su = (pmap == kernel_pmap);
828 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
830 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
831 rw_assert(&pvh_global_lock, RA_WLOCKED);
834 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
835 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
836 /* remove from pv_list */
837 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
838 if (TAILQ_EMPTY(&m->md.pv_list))
839 vm_page_aflag_clear(m, PGA_WRITEABLE);
841 /* free pv entry struct */
847 //debugf("pv_remove: e\n");
851 * Clean pte entry, try to free page table page if requested.
853 * Return 1 if ptbl pages were freed, otherwise return 0.
856 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
858 unsigned int pdir_idx = PDIR_IDX(va);
859 unsigned int ptbl_idx = PTBL_IDX(va);
864 //int su = (pmap == kernel_pmap);
865 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
866 // su, (u_int32_t)pmap, va, flags);
868 ptbl = pmap->pm_pdir[pdir_idx];
869 KASSERT(ptbl, ("pte_remove: null ptbl"));
871 pte = &ptbl[ptbl_idx];
873 if (pte == NULL || !PTE_ISVALID(pte))
876 if (PTE_ISWIRED(pte))
877 pmap->pm_stats.wired_count--;
879 /* Handle managed entry. */
880 if (PTE_ISMANAGED(pte)) {
881 /* Get vm_page_t for mapped pte. */
882 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
884 if (PTE_ISMODIFIED(pte))
887 if (PTE_ISREFERENCED(pte))
888 vm_page_aflag_set(m, PGA_REFERENCED);
890 pv_remove(pmap, va, m);
893 mtx_lock_spin(&tlbivax_mutex);
896 tlb0_flush_entry(va);
901 mtx_unlock_spin(&tlbivax_mutex);
903 pmap->pm_stats.resident_count--;
905 if (flags & PTBL_UNHOLD) {
906 //debugf("pte_remove: e (unhold)\n");
907 return (ptbl_unhold(mmu, pmap, pdir_idx));
910 //debugf("pte_remove: e\n");
915 * Insert PTE for a given page and virtual address.
918 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
921 unsigned int pdir_idx = PDIR_IDX(va);
922 unsigned int ptbl_idx = PTBL_IDX(va);
925 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
926 pmap == kernel_pmap, pmap, va);
928 /* Get the page table pointer. */
929 ptbl = pmap->pm_pdir[pdir_idx];
932 /* Allocate page table pages. */
933 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
935 KASSERT(nosleep, ("nosleep and NULL ptbl"));
940 * Check if there is valid mapping for requested
941 * va, if there is, remove it.
943 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
944 if (PTE_ISVALID(pte)) {
945 pte_remove(mmu, pmap, va, PTBL_HOLD);
948 * pte is not used, increment hold count
951 if (pmap != kernel_pmap)
952 ptbl_hold(mmu, pmap, pdir_idx);
957 * Insert pv_entry into pv_list for mapped page if part of managed
960 if ((m->oflags & VPO_UNMANAGED) == 0) {
961 flags |= PTE_MANAGED;
963 /* Create and insert pv entry. */
964 pv_insert(pmap, va, m);
967 pmap->pm_stats.resident_count++;
969 mtx_lock_spin(&tlbivax_mutex);
972 tlb0_flush_entry(va);
973 if (pmap->pm_pdir[pdir_idx] == NULL) {
975 * If we just allocated a new page table, hook it in
978 pmap->pm_pdir[pdir_idx] = ptbl;
980 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
981 pte->rpn = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
982 pte->flags |= (PTE_VALID | flags);
985 mtx_unlock_spin(&tlbivax_mutex);
989 /* Return the pa for the given pmap/va. */
991 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
996 pte = pte_find(mmu, pmap, va);
997 if ((pte != NULL) && PTE_ISVALID(pte))
998 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1002 /* Get a pointer to a PTE in a page table. */
1004 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1006 unsigned int pdir_idx = PDIR_IDX(va);
1007 unsigned int ptbl_idx = PTBL_IDX(va);
1009 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
1011 if (pmap->pm_pdir[pdir_idx])
1012 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
1017 /**************************************************************************/
1019 /**************************************************************************/
1022 * This is called during booke_init, before the system is really initialized.
1025 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
1027 vm_offset_t phys_kernelend;
1028 struct mem_region *mp, *mp1;
1031 u_int phys_avail_count;
1032 vm_size_t physsz, hwphyssz, kstack0_sz;
1033 vm_offset_t kernel_pdir, kstack0, va;
1034 vm_paddr_t kstack0_phys;
1038 debugf("mmu_booke_bootstrap: entered\n");
1040 /* Set interesting system properties */
1044 /* Initialize invalidation mutex */
1045 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1047 /* Read TLB0 size and associativity. */
1051 * Align kernel start and end address (kernel image).
1052 * Note that kernel end does not necessarily relate to kernsize.
1053 * kernsize is the size of the kernel that is actually mapped.
1055 kernstart = trunc_page(start);
1056 data_start = round_page(kernelend);
1057 data_end = data_start;
1060 * Addresses of preloaded modules (like file systems) use
1061 * physical addresses. Make sure we relocate those into
1062 * virtual addresses.
1064 preload_addr_relocate = kernstart - kernload;
1066 /* Allocate the dynamic per-cpu area. */
1067 dpcpu = (void *)data_end;
1068 data_end += DPCPU_SIZE;
1070 /* Allocate space for the message buffer. */
1071 msgbufp = (struct msgbuf *)data_end;
1072 data_end += msgbufsize;
1073 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
1076 data_end = round_page(data_end);
1078 /* Allocate space for ptbl_bufs. */
1079 ptbl_bufs = (struct ptbl_buf *)data_end;
1080 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1081 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
1084 data_end = round_page(data_end);
1086 /* Allocate PTE tables for kernel KVA. */
1087 kernel_pdir = data_end;
1088 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
1089 PDIR_SIZE - 1) / PDIR_SIZE;
1090 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1091 debugf(" kernel ptbls: %d\n", kernel_ptbls);
1092 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
1094 debugf(" data_end: 0x%08x\n", data_end);
1095 if (data_end - kernstart > kernsize) {
1096 kernsize += tlb1_mapin_region(kernstart + kernsize,
1097 kernload + kernsize, (data_end - kernstart) - kernsize);
1099 data_end = kernstart + kernsize;
1100 debugf(" updated data_end: 0x%08x\n", data_end);
1103 * Clear the structures - note we can only do it safely after the
1104 * possible additional TLB1 translations are in place (above) so that
1105 * all range up to the currently calculated 'data_end' is covered.
1107 dpcpu_init(dpcpu, 0);
1108 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1109 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1111 /*******************************************************/
1112 /* Set the start and end of kva. */
1113 /*******************************************************/
1114 virtual_avail = round_page(data_end);
1115 virtual_end = VM_MAX_KERNEL_ADDRESS;
1117 /* Allocate KVA space for page zero/copy operations. */
1118 zero_page_va = virtual_avail;
1119 virtual_avail += PAGE_SIZE;
1120 zero_page_idle_va = virtual_avail;
1121 virtual_avail += PAGE_SIZE;
1122 copy_page_src_va = virtual_avail;
1123 virtual_avail += PAGE_SIZE;
1124 copy_page_dst_va = virtual_avail;
1125 virtual_avail += PAGE_SIZE;
1126 debugf("zero_page_va = 0x%08x\n", zero_page_va);
1127 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
1128 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1129 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1131 /* Initialize page zero/copy mutexes. */
1132 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1133 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1135 /* Allocate KVA space for ptbl bufs. */
1136 ptbl_buf_pool_vabase = virtual_avail;
1137 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1138 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1139 ptbl_buf_pool_vabase, virtual_avail);
1141 /* Calculate corresponding physical addresses for the kernel region. */
1142 phys_kernelend = kernload + kernsize;
1143 debugf("kernel image and allocated data:\n");
1144 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
1145 debugf(" kernstart = 0x%08x\n", kernstart);
1146 debugf(" kernsize = 0x%08x\n", kernsize);
1148 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1149 panic("mmu_booke_bootstrap: phys_avail too small");
1152 * Remove kernel physical address range from avail regions list. Page
1153 * align all regions. Non-page aligned memory isn't very interesting
1154 * to us. Also, sort the entries for ascending addresses.
1157 /* Retrieve phys/avail mem regions */
1158 mem_regions(&physmem_regions, &physmem_regions_sz,
1159 &availmem_regions, &availmem_regions_sz);
1161 cnt = availmem_regions_sz;
1162 debugf("processing avail regions:\n");
1163 for (mp = availmem_regions; mp->mr_size; mp++) {
1165 e = mp->mr_start + mp->mr_size;
1166 debugf(" %08x-%08x -> ", s, e);
1167 /* Check whether this region holds all of the kernel. */
1168 if (s < kernload && e > phys_kernelend) {
1169 availmem_regions[cnt].mr_start = phys_kernelend;
1170 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1173 /* Look whether this regions starts within the kernel. */
1174 if (s >= kernload && s < phys_kernelend) {
1175 if (e <= phys_kernelend)
1179 /* Now look whether this region ends within the kernel. */
1180 if (e > kernload && e <= phys_kernelend) {
1185 /* Now page align the start and size of the region. */
1191 debugf("%08x-%08x = %x\n", s, e, sz);
1193 /* Check whether some memory is left here. */
1197 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1203 /* Do an insertion sort. */
1204 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1205 if (s < mp1->mr_start)
1208 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1216 availmem_regions_sz = cnt;
1218 /*******************************************************/
1219 /* Steal physical memory for kernel stack from the end */
1220 /* of the first avail region */
1221 /*******************************************************/
1222 kstack0_sz = kstack_pages * PAGE_SIZE;
1223 kstack0_phys = availmem_regions[0].mr_start +
1224 availmem_regions[0].mr_size;
1225 kstack0_phys -= kstack0_sz;
1226 availmem_regions[0].mr_size -= kstack0_sz;
1228 /*******************************************************/
1229 /* Fill in phys_avail table, based on availmem_regions */
1230 /*******************************************************/
1231 phys_avail_count = 0;
1234 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1236 debugf("fill in phys_avail:\n");
1237 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1239 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
1240 availmem_regions[i].mr_start,
1241 availmem_regions[i].mr_start +
1242 availmem_regions[i].mr_size,
1243 availmem_regions[i].mr_size);
1245 if (hwphyssz != 0 &&
1246 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1247 debugf(" hw.physmem adjust\n");
1248 if (physsz < hwphyssz) {
1249 phys_avail[j] = availmem_regions[i].mr_start;
1251 availmem_regions[i].mr_start +
1259 phys_avail[j] = availmem_regions[i].mr_start;
1260 phys_avail[j + 1] = availmem_regions[i].mr_start +
1261 availmem_regions[i].mr_size;
1263 physsz += availmem_regions[i].mr_size;
1265 physmem = btoc(physsz);
1267 /* Calculate the last available physical address. */
1268 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1270 Maxmem = powerpc_btop(phys_avail[i + 1]);
1272 debugf("Maxmem = 0x%08lx\n", Maxmem);
1273 debugf("phys_avail_count = %d\n", phys_avail_count);
1274 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1277 /*******************************************************/
1278 /* Initialize (statically allocated) kernel pmap. */
1279 /*******************************************************/
1280 PMAP_LOCK_INIT(kernel_pmap);
1281 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1283 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1284 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1285 debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1286 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1288 /* Initialize kernel pdir */
1289 for (i = 0; i < kernel_ptbls; i++)
1290 kernel_pmap->pm_pdir[kptbl_min + i] =
1291 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1293 for (i = 0; i < MAXCPU; i++) {
1294 kernel_pmap->pm_tid[i] = TID_KERNEL;
1296 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1297 tidbusy[i][TID_KERNEL] = kernel_pmap;
1301 * Fill in PTEs covering kernel code and data. They are not required
1302 * for address translation, as this area is covered by static TLB1
1303 * entries, but for pte_vatopa() to work correctly with kernel area
1306 for (va = kernstart; va < data_end; va += PAGE_SIZE) {
1307 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1308 pte->rpn = kernload + (va - kernstart);
1309 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1312 /* Mark kernel_pmap active on all CPUs */
1313 CPU_FILL(&kernel_pmap->pm_active);
1316 * Initialize the global pv list lock.
1318 rw_init(&pvh_global_lock, "pmap pv global");
1320 /*******************************************************/
1322 /*******************************************************/
1324 /* Enter kstack0 into kernel map, provide guard page */
1325 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1326 thread0.td_kstack = kstack0;
1327 thread0.td_kstack_pages = kstack_pages;
1329 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1330 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
1331 kstack0_phys, kstack0_phys + kstack0_sz);
1332 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1334 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1335 for (i = 0; i < kstack_pages; i++) {
1336 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1337 kstack0 += PAGE_SIZE;
1338 kstack0_phys += PAGE_SIZE;
1341 pmap_bootstrapped = 1;
1343 debugf("virtual_avail = %08x\n", virtual_avail);
1344 debugf("virtual_end = %08x\n", virtual_end);
1346 debugf("mmu_booke_bootstrap: exit\n");
1351 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
1356 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
1357 * have the snapshot of its contents in the s/w tlb1[] table, so use
1358 * these values directly to (re)program AP's TLB1 hardware.
1360 for (i = bp_ntlb1s; i < tlb1_idx; i++) {
1361 /* Skip invalid entries */
1362 if (!(tlb1[i].mas1 & MAS1_VALID))
1365 tlb1_write_entry(i);
1368 set_mas4_defaults();
1373 booke_pmap_init_qpages(void)
1380 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
1381 if (pc->pc_qmap_addr == 0)
1382 panic("pmap_init_qpages: unable to allocate KVA");
1386 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
1389 * Get the physical page address for the given pmap/virtual address.
1392 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1397 pa = pte_vatopa(mmu, pmap, va);
1404 * Extract the physical page address associated with the given
1405 * kernel virtual address.
1408 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1412 /* Check TLB1 mappings */
1413 for (i = 0; i < tlb1_idx; i++) {
1414 if (!(tlb1[i].mas1 & MAS1_VALID))
1416 if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size)
1417 return (tlb1[i].phys + (va - tlb1[i].virt));
1420 return (pte_vatopa(mmu, kernel_pmap, va));
1424 * Initialize the pmap module.
1425 * Called by vm_init, to initialize any structures that the pmap
1426 * system needs to map virtual memory.
1429 mmu_booke_init(mmu_t mmu)
1431 int shpgperproc = PMAP_SHPGPERPROC;
1434 * Initialize the address space (zone) for the pv entries. Set a
1435 * high water mark so that the system can recover from excessive
1436 * numbers of pv entries.
1438 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1439 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1441 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1442 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
1444 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1445 pv_entry_high_water = 9 * (pv_entry_max / 10);
1447 uma_zone_reserve_kva(pvzone, pv_entry_max);
1449 /* Pre-fill pvzone with initial number of pv entries. */
1450 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1452 /* Initialize ptbl allocation. */
1457 * Map a list of wired pages into kernel virtual address space. This is
1458 * intended for temporary mappings which do not need page modification or
1459 * references recorded. Existing mappings in the region are overwritten.
1462 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1467 while (count-- > 0) {
1468 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1475 * Remove page mappings from kernel virtual address space. Intended for
1476 * temporary mappings entered by mmu_booke_qenter.
1479 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1484 while (count-- > 0) {
1485 mmu_booke_kremove(mmu, va);
1491 * Map a wired page into kernel virtual address space.
1494 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1497 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1501 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1503 unsigned int pdir_idx = PDIR_IDX(va);
1504 unsigned int ptbl_idx = PTBL_IDX(va);
1508 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1509 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1511 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
1512 flags |= tlb_calc_wimg(pa, ma);
1514 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1516 mtx_lock_spin(&tlbivax_mutex);
1519 if (PTE_ISVALID(pte)) {
1521 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1523 /* Flush entry from TLB0 */
1524 tlb0_flush_entry(va);
1527 pte->rpn = PTE_RPN_FROM_PA(pa);
1530 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1531 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1532 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1534 /* Flush the real memory from the instruction cache. */
1535 if ((flags & (PTE_I | PTE_G)) == 0) {
1536 __syncicache((void *)va, PAGE_SIZE);
1540 mtx_unlock_spin(&tlbivax_mutex);
1544 * Remove a page from kernel page table.
1547 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1549 unsigned int pdir_idx = PDIR_IDX(va);
1550 unsigned int ptbl_idx = PTBL_IDX(va);
1553 // CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1555 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1556 (va <= VM_MAX_KERNEL_ADDRESS)),
1557 ("mmu_booke_kremove: invalid va"));
1559 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1561 if (!PTE_ISVALID(pte)) {
1563 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1568 mtx_lock_spin(&tlbivax_mutex);
1571 /* Invalidate entry in TLB0, update PTE. */
1572 tlb0_flush_entry(va);
1577 mtx_unlock_spin(&tlbivax_mutex);
1581 * Initialize pmap associated with process 0.
1584 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1587 PMAP_LOCK_INIT(pmap);
1588 mmu_booke_pinit(mmu, pmap);
1589 PCPU_SET(curpmap, pmap);
1593 * Initialize a preallocated and zeroed pmap structure,
1594 * such as one in a vmspace structure.
1597 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1601 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1602 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1604 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1606 for (i = 0; i < MAXCPU; i++)
1607 pmap->pm_tid[i] = TID_NONE;
1608 CPU_ZERO(&kernel_pmap->pm_active);
1609 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1610 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1611 TAILQ_INIT(&pmap->pm_ptbl_list);
1615 * Release any resources held by the given physical map.
1616 * Called when a pmap initialized by mmu_booke_pinit is being released.
1617 * Should only be called if the map contains no valid mappings.
1620 mmu_booke_release(mmu_t mmu, pmap_t pmap)
1623 KASSERT(pmap->pm_stats.resident_count == 0,
1624 ("pmap_release: pmap resident count %ld != 0",
1625 pmap->pm_stats.resident_count));
1629 * Insert the given physical page at the specified virtual address in the
1630 * target physical map with the protection requested. If specified the page
1631 * will be wired down.
1634 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1635 vm_prot_t prot, u_int flags, int8_t psind)
1639 rw_wlock(&pvh_global_lock);
1641 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
1642 rw_wunlock(&pvh_global_lock);
1648 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1649 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
1654 int error, su, sync;
1656 pa = VM_PAGE_TO_PHYS(m);
1657 su = (pmap == kernel_pmap);
1660 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1661 // "pa=0x%08x prot=0x%08x flags=%#x)\n",
1662 // (u_int32_t)pmap, su, pmap->pm_tid,
1663 // (u_int32_t)m, va, pa, prot, flags);
1666 KASSERT(((va >= virtual_avail) &&
1667 (va <= VM_MAX_KERNEL_ADDRESS)),
1668 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1670 KASSERT((va <= VM_MAXUSER_ADDRESS),
1671 ("mmu_booke_enter_locked: user pmap, non user va"));
1673 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1674 VM_OBJECT_ASSERT_LOCKED(m->object);
1676 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1679 * If there is an existing mapping, and the physical address has not
1680 * changed, must be protection or wiring change.
1682 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1683 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1686 * Before actually updating pte->flags we calculate and
1687 * prepare its new value in a helper var.
1690 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1692 /* Wiring change, just update stats. */
1693 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
1694 if (!PTE_ISWIRED(pte)) {
1696 pmap->pm_stats.wired_count++;
1699 if (PTE_ISWIRED(pte)) {
1700 flags &= ~PTE_WIRED;
1701 pmap->pm_stats.wired_count--;
1705 if (prot & VM_PROT_WRITE) {
1706 /* Add write permissions. */
1711 if ((flags & PTE_MANAGED) != 0)
1712 vm_page_aflag_set(m, PGA_WRITEABLE);
1714 /* Handle modified pages, sense modify status. */
1717 * The PTE_MODIFIED flag could be set by underlying
1718 * TLB misses since we last read it (above), possibly
1719 * other CPUs could update it so we check in the PTE
1720 * directly rather than rely on that saved local flags
1723 if (PTE_ISMODIFIED(pte))
1727 if (prot & VM_PROT_EXECUTE) {
1733 * Check existing flags for execute permissions: if we
1734 * are turning execute permissions on, icache should
1737 if ((pte->flags & (PTE_UX | PTE_SX)) == 0)
1741 flags &= ~PTE_REFERENCED;
1744 * The new flags value is all calculated -- only now actually
1747 mtx_lock_spin(&tlbivax_mutex);
1750 tlb0_flush_entry(va);
1754 mtx_unlock_spin(&tlbivax_mutex);
1758 * If there is an existing mapping, but it's for a different
1759 * physical address, pte_enter() will delete the old mapping.
1761 //if ((pte != NULL) && PTE_ISVALID(pte))
1762 // debugf("mmu_booke_enter_locked: replace\n");
1764 // debugf("mmu_booke_enter_locked: new\n");
1766 /* Now set up the flags and install the new mapping. */
1767 flags = (PTE_SR | PTE_VALID);
1773 if (prot & VM_PROT_WRITE) {
1778 if ((m->oflags & VPO_UNMANAGED) == 0)
1779 vm_page_aflag_set(m, PGA_WRITEABLE);
1782 if (prot & VM_PROT_EXECUTE) {
1788 /* If its wired update stats. */
1789 if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
1792 error = pte_enter(mmu, pmap, m, va, flags,
1793 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
1795 return (KERN_RESOURCE_SHORTAGE);
1797 if ((flags & PMAP_ENTER_WIRED) != 0)
1798 pmap->pm_stats.wired_count++;
1800 /* Flush the real memory from the instruction cache. */
1801 if (prot & VM_PROT_EXECUTE)
1805 if (sync && (su || pmap == PCPU_GET(curpmap))) {
1806 __syncicache((void *)va, PAGE_SIZE);
1810 return (KERN_SUCCESS);
1814 * Maps a sequence of resident pages belonging to the same object.
1815 * The sequence begins with the given page m_start. This page is
1816 * mapped at the given virtual address start. Each subsequent page is
1817 * mapped at a virtual address that is offset from start by the same
1818 * amount as the page is offset from m_start within the object. The
1819 * last page in the sequence is the page with the largest offset from
1820 * m_start that can be mapped at a virtual address less than the given
1821 * virtual address end. Not every virtual page between start and end
1822 * is mapped; only those for which a resident page exists with the
1823 * corresponding offset from m_start are mapped.
1826 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1827 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1830 vm_pindex_t diff, psize;
1832 VM_OBJECT_ASSERT_LOCKED(m_start->object);
1834 psize = atop(end - start);
1836 rw_wlock(&pvh_global_lock);
1838 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1839 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1840 prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1841 PMAP_ENTER_NOSLEEP, 0);
1842 m = TAILQ_NEXT(m, listq);
1844 rw_wunlock(&pvh_global_lock);
1849 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1853 rw_wlock(&pvh_global_lock);
1855 mmu_booke_enter_locked(mmu, pmap, va, m,
1856 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
1858 rw_wunlock(&pvh_global_lock);
1863 * Remove the given range of addresses from the specified map.
1865 * It is assumed that the start and end are properly rounded to the page size.
1868 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1873 int su = (pmap == kernel_pmap);
1875 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1876 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1879 KASSERT(((va >= virtual_avail) &&
1880 (va <= VM_MAX_KERNEL_ADDRESS)),
1881 ("mmu_booke_remove: kernel pmap, non kernel va"));
1883 KASSERT((va <= VM_MAXUSER_ADDRESS),
1884 ("mmu_booke_remove: user pmap, non user va"));
1887 if (PMAP_REMOVE_DONE(pmap)) {
1888 //debugf("mmu_booke_remove: e (empty)\n");
1892 hold_flag = PTBL_HOLD_FLAG(pmap);
1893 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1895 rw_wlock(&pvh_global_lock);
1897 for (; va < endva; va += PAGE_SIZE) {
1898 pte = pte_find(mmu, pmap, va);
1899 if ((pte != NULL) && PTE_ISVALID(pte))
1900 pte_remove(mmu, pmap, va, hold_flag);
1903 rw_wunlock(&pvh_global_lock);
1905 //debugf("mmu_booke_remove: e\n");
1909 * Remove physical page from all pmaps in which it resides.
1912 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1917 rw_wlock(&pvh_global_lock);
1918 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1919 pvn = TAILQ_NEXT(pv, pv_link);
1921 PMAP_LOCK(pv->pv_pmap);
1922 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1923 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1924 PMAP_UNLOCK(pv->pv_pmap);
1926 vm_page_aflag_clear(m, PGA_WRITEABLE);
1927 rw_wunlock(&pvh_global_lock);
1931 * Map a range of physical addresses into kernel virtual address space.
1934 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1935 vm_paddr_t pa_end, int prot)
1937 vm_offset_t sva = *virt;
1938 vm_offset_t va = sva;
1940 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1941 // sva, pa_start, pa_end);
1943 while (pa_start < pa_end) {
1944 mmu_booke_kenter(mmu, va, pa_start);
1946 pa_start += PAGE_SIZE;
1950 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1955 * The pmap must be activated before it's address space can be accessed in any
1959 mmu_booke_activate(mmu_t mmu, struct thread *td)
1964 pmap = &td->td_proc->p_vmspace->vm_pmap;
1966 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1967 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1969 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1973 cpuid = PCPU_GET(cpuid);
1974 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
1975 PCPU_SET(curpmap, pmap);
1977 if (pmap->pm_tid[cpuid] == TID_NONE)
1980 /* Load PID0 register with pmap tid value. */
1981 mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
1982 __asm __volatile("isync");
1984 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
1988 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1989 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1993 * Deactivate the specified process's address space.
1996 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
2000 pmap = &td->td_proc->p_vmspace->vm_pmap;
2002 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
2003 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2005 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
2007 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
2008 PCPU_SET(curpmap, NULL);
2012 * Copy the range specified by src_addr/len
2013 * from the source map to the range dst_addr/len
2014 * in the destination map.
2016 * This routine is only advisory and need not do anything.
2019 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
2020 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
2026 * Set the physical protection on the specified range of this map as requested.
2029 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2036 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2037 mmu_booke_remove(mmu, pmap, sva, eva);
2041 if (prot & VM_PROT_WRITE)
2045 for (va = sva; va < eva; va += PAGE_SIZE) {
2046 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2047 if (PTE_ISVALID(pte)) {
2048 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2050 mtx_lock_spin(&tlbivax_mutex);
2053 /* Handle modified pages. */
2054 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
2057 tlb0_flush_entry(va);
2058 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2061 mtx_unlock_spin(&tlbivax_mutex);
2069 * Clear the write and modified bits in each of the given page's mappings.
2072 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2077 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2078 ("mmu_booke_remove_write: page %p is not managed", m));
2081 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2082 * set by another thread while the object is locked. Thus,
2083 * if PGA_WRITEABLE is clear, no page table entries need updating.
2085 VM_OBJECT_ASSERT_WLOCKED(m->object);
2086 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2088 rw_wlock(&pvh_global_lock);
2089 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2090 PMAP_LOCK(pv->pv_pmap);
2091 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2092 if (PTE_ISVALID(pte)) {
2093 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2095 mtx_lock_spin(&tlbivax_mutex);
2098 /* Handle modified pages. */
2099 if (PTE_ISMODIFIED(pte))
2102 /* Flush mapping from TLB0. */
2103 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2106 mtx_unlock_spin(&tlbivax_mutex);
2109 PMAP_UNLOCK(pv->pv_pmap);
2111 vm_page_aflag_clear(m, PGA_WRITEABLE);
2112 rw_wunlock(&pvh_global_lock);
2116 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2125 va = trunc_page(va);
2126 sz = round_page(sz);
2128 rw_wlock(&pvh_global_lock);
2129 pmap = PCPU_GET(curpmap);
2130 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2133 pte = pte_find(mmu, pm, va);
2134 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2140 /* Create a mapping in the active pmap. */
2142 m = PHYS_TO_VM_PAGE(pa);
2144 pte_enter(mmu, pmap, m, addr,
2145 PTE_SR | PTE_VALID | PTE_UR, FALSE);
2146 __syncicache((void *)addr, PAGE_SIZE);
2147 pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2150 __syncicache((void *)va, PAGE_SIZE);
2155 rw_wunlock(&pvh_global_lock);
2159 * Atomically extract and hold the physical page with the given
2160 * pmap and virtual address pair if that mapping permits the given
2164 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2176 pte = pte_find(mmu, pmap, va);
2177 if ((pte != NULL) && PTE_ISVALID(pte)) {
2178 if (pmap == kernel_pmap)
2183 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2184 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2186 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2197 * Initialize a vm_page's machine-dependent fields.
2200 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2203 TAILQ_INIT(&m->md.pv_list);
2207 * mmu_booke_zero_page_area zeros the specified hardware page by
2208 * mapping it into virtual memory and using bzero to clear
2211 * off and size must reside within a single page.
2214 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2218 /* XXX KASSERT off and size are within a single page? */
2220 mtx_lock(&zero_page_mutex);
2223 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2224 bzero((caddr_t)va + off, size);
2225 mmu_booke_kremove(mmu, va);
2227 mtx_unlock(&zero_page_mutex);
2231 * mmu_booke_zero_page zeros the specified hardware page.
2234 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2237 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
2241 * mmu_booke_copy_page copies the specified (machine independent) page by
2242 * mapping the page into virtual memory and using memcopy to copy the page,
2243 * one machine dependent page at a time.
2246 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2248 vm_offset_t sva, dva;
2250 sva = copy_page_src_va;
2251 dva = copy_page_dst_va;
2253 mtx_lock(©_page_mutex);
2254 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2255 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2256 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2257 mmu_booke_kremove(mmu, dva);
2258 mmu_booke_kremove(mmu, sva);
2259 mtx_unlock(©_page_mutex);
2263 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
2264 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
2267 vm_offset_t a_pg_offset, b_pg_offset;
2270 mtx_lock(©_page_mutex);
2271 while (xfersize > 0) {
2272 a_pg_offset = a_offset & PAGE_MASK;
2273 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2274 mmu_booke_kenter(mmu, copy_page_src_va,
2275 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
2276 a_cp = (char *)copy_page_src_va + a_pg_offset;
2277 b_pg_offset = b_offset & PAGE_MASK;
2278 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2279 mmu_booke_kenter(mmu, copy_page_dst_va,
2280 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
2281 b_cp = (char *)copy_page_dst_va + b_pg_offset;
2282 bcopy(a_cp, b_cp, cnt);
2283 mmu_booke_kremove(mmu, copy_page_dst_va);
2284 mmu_booke_kremove(mmu, copy_page_src_va);
2289 mtx_unlock(©_page_mutex);
2293 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2294 * into virtual memory and using bzero to clear its contents. This is intended
2295 * to be called from the vm_pagezero process only and outside of Giant. No
2299 mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2303 va = zero_page_idle_va;
2304 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2305 bzero((caddr_t)va, PAGE_SIZE);
2306 mmu_booke_kremove(mmu, va);
2310 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
2317 paddr = VM_PAGE_TO_PHYS(m);
2319 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2320 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m));
2323 qaddr = PCPU_GET(qmap_addr);
2325 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(qaddr)][PTBL_IDX(qaddr)]);
2327 KASSERT(pte->flags == 0, ("mmu_booke_quick_enter_page: PTE busy"));
2330 * XXX: tlbivax is broadcast to other cores, but qaddr should
2331 * not be present in other TLBs. Is there a better instruction
2332 * sequence to use? Or just forget it & use mmu_booke_kenter()...
2334 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
2335 __asm __volatile("isync; msync");
2337 pte->rpn = paddr & ~PTE_PA_MASK;
2340 /* Flush the real memory from the instruction cache. */
2341 if ((flags & (PTE_I | PTE_G)) == 0)
2342 __syncicache((void *)qaddr, PAGE_SIZE);
2348 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
2352 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(addr)][PTBL_IDX(addr)]);
2354 KASSERT(PCPU_GET(qmap_addr) == addr,
2355 ("mmu_booke_quick_remove_page: invalid address"));
2356 KASSERT(pte->flags != 0,
2357 ("mmu_booke_quick_remove_page: PTE not in use"));
2365 * Return whether or not the specified physical page was modified
2366 * in any of physical maps.
2369 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2375 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2376 ("mmu_booke_is_modified: page %p is not managed", m));
2380 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2381 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
2382 * is clear, no PTEs can be modified.
2384 VM_OBJECT_ASSERT_WLOCKED(m->object);
2385 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2387 rw_wlock(&pvh_global_lock);
2388 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2389 PMAP_LOCK(pv->pv_pmap);
2390 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2392 if (PTE_ISMODIFIED(pte))
2395 PMAP_UNLOCK(pv->pv_pmap);
2399 rw_wunlock(&pvh_global_lock);
2404 * Return whether or not the specified virtual address is eligible
2408 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2415 * Return whether or not the specified physical page was referenced
2416 * in any physical maps.
2419 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
2425 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2426 ("mmu_booke_is_referenced: page %p is not managed", m));
2428 rw_wlock(&pvh_global_lock);
2429 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2430 PMAP_LOCK(pv->pv_pmap);
2431 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2433 if (PTE_ISREFERENCED(pte))
2436 PMAP_UNLOCK(pv->pv_pmap);
2440 rw_wunlock(&pvh_global_lock);
2445 * Clear the modify bits on the specified physical page.
2448 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2453 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2454 ("mmu_booke_clear_modify: page %p is not managed", m));
2455 VM_OBJECT_ASSERT_WLOCKED(m->object);
2456 KASSERT(!vm_page_xbusied(m),
2457 ("mmu_booke_clear_modify: page %p is exclusive busied", m));
2460 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
2461 * If the object containing the page is locked and the page is not
2462 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
2464 if ((m->aflags & PGA_WRITEABLE) == 0)
2466 rw_wlock(&pvh_global_lock);
2467 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2468 PMAP_LOCK(pv->pv_pmap);
2469 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2471 mtx_lock_spin(&tlbivax_mutex);
2474 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2475 tlb0_flush_entry(pv->pv_va);
2476 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2481 mtx_unlock_spin(&tlbivax_mutex);
2483 PMAP_UNLOCK(pv->pv_pmap);
2485 rw_wunlock(&pvh_global_lock);
2489 * Return a count of reference bits for a page, clearing those bits.
2490 * It is not necessary for every reference bit to be cleared, but it
2491 * is necessary that 0 only be returned when there are truly no
2492 * reference bits set.
2494 * XXX: The exact number of bits to check and clear is a matter that
2495 * should be tested and standardized at some point in the future for
2496 * optimal aging of shared pages.
2499 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2505 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2506 ("mmu_booke_ts_referenced: page %p is not managed", m));
2508 rw_wlock(&pvh_global_lock);
2509 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2510 PMAP_LOCK(pv->pv_pmap);
2511 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2513 if (PTE_ISREFERENCED(pte)) {
2514 mtx_lock_spin(&tlbivax_mutex);
2517 tlb0_flush_entry(pv->pv_va);
2518 pte->flags &= ~PTE_REFERENCED;
2521 mtx_unlock_spin(&tlbivax_mutex);
2524 PMAP_UNLOCK(pv->pv_pmap);
2529 PMAP_UNLOCK(pv->pv_pmap);
2531 rw_wunlock(&pvh_global_lock);
2536 * Clear the wired attribute from the mappings for the specified range of
2537 * addresses in the given pmap. Every valid mapping within that range must
2538 * have the wired attribute set. In contrast, invalid mappings cannot have
2539 * the wired attribute set, so they are ignored.
2541 * The wired attribute of the page table entry is not a hardware feature, so
2542 * there is no need to invalidate any TLB entries.
2545 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2551 for (va = sva; va < eva; va += PAGE_SIZE) {
2552 if ((pte = pte_find(mmu, pmap, va)) != NULL &&
2554 if (!PTE_ISWIRED(pte))
2555 panic("mmu_booke_unwire: pte %p isn't wired",
2557 pte->flags &= ~PTE_WIRED;
2558 pmap->pm_stats.wired_count--;
2566 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2567 * page. This count may be changed upwards or downwards in the future; it is
2568 * only necessary that true be returned for a small subset of pmaps for proper
2572 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2578 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2579 ("mmu_booke_page_exists_quick: page %p is not managed", m));
2582 rw_wlock(&pvh_global_lock);
2583 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2584 if (pv->pv_pmap == pmap) {
2591 rw_wunlock(&pvh_global_lock);
2596 * Return the number of managed mappings to the given physical page that are
2600 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2606 if ((m->oflags & VPO_UNMANAGED) != 0)
2608 rw_wlock(&pvh_global_lock);
2609 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2610 PMAP_LOCK(pv->pv_pmap);
2611 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2612 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2614 PMAP_UNLOCK(pv->pv_pmap);
2616 rw_wunlock(&pvh_global_lock);
2621 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2627 * This currently does not work for entries that
2628 * overlap TLB1 entries.
2630 for (i = 0; i < tlb1_idx; i ++) {
2631 if (tlb1_iomapped(i, pa, size, &va) == 0)
2639 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2645 /* Minidumps are based on virtual memory addresses. */
2647 *va = (void *)(vm_offset_t)pa;
2651 /* Raw physical memory dumps don't have a virtual address. */
2652 /* We always map a 256MB page at 256M. */
2653 gran = 256 * 1024 * 1024;
2654 ppa = pa & ~(gran - 1);
2657 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
2659 if (sz > (gran - ofs))
2660 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
2665 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
2671 /* Minidumps are based on virtual memory addresses. */
2672 /* Nothing to do... */
2676 /* Raw physical memory dumps don't have a virtual address. */
2678 tlb1[tlb1_idx].mas1 = 0;
2679 tlb1[tlb1_idx].mas2 = 0;
2680 tlb1[tlb1_idx].mas3 = 0;
2681 tlb1_write_entry(tlb1_idx);
2683 gran = 256 * 1024 * 1024;
2684 ppa = pa & ~(gran - 1);
2686 if (sz > (gran - ofs)) {
2688 tlb1[tlb1_idx].mas1 = 0;
2689 tlb1[tlb1_idx].mas2 = 0;
2690 tlb1[tlb1_idx].mas3 = 0;
2691 tlb1_write_entry(tlb1_idx);
2695 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2698 mmu_booke_scan_init(mmu_t mmu)
2705 /* Initialize phys. segments for dumpsys(). */
2706 memset(&dump_map, 0, sizeof(dump_map));
2707 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
2708 &availmem_regions_sz);
2709 for (i = 0; i < physmem_regions_sz; i++) {
2710 dump_map[i].pa_start = physmem_regions[i].mr_start;
2711 dump_map[i].pa_size = physmem_regions[i].mr_size;
2716 /* Virtual segments for minidumps: */
2717 memset(&dump_map, 0, sizeof(dump_map));
2719 /* 1st: kernel .data and .bss. */
2720 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2721 dump_map[0].pa_size =
2722 round_page((uintptr_t)_end) - dump_map[0].pa_start;
2724 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2725 dump_map[1].pa_start = data_start;
2726 dump_map[1].pa_size = data_end - data_start;
2728 /* 3rd: kernel VM. */
2729 va = dump_map[1].pa_start + dump_map[1].pa_size;
2730 /* Find start of next chunk (from va). */
2731 while (va < virtual_end) {
2732 /* Don't dump the buffer cache. */
2733 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2734 va = kmi.buffer_eva;
2737 pte = pte_find(mmu, kernel_pmap, va);
2738 if (pte != NULL && PTE_ISVALID(pte))
2742 if (va < virtual_end) {
2743 dump_map[2].pa_start = va;
2745 /* Find last page in chunk. */
2746 while (va < virtual_end) {
2747 /* Don't run into the buffer cache. */
2748 if (va == kmi.buffer_sva)
2750 pte = pte_find(mmu, kernel_pmap, va);
2751 if (pte == NULL || !PTE_ISVALID(pte))
2755 dump_map[2].pa_size = va - dump_map[2].pa_start;
2760 * Map a set of physical memory pages into the kernel virtual address space.
2761 * Return a pointer to where it is mapped. This routine is intended to be used
2762 * for mapping device memory, NOT real memory.
2765 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2768 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
2772 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2780 * Check if this is premapped in TLB1. Note: this should probably also
2781 * check whether a sequence of TLB1 entries exist that match the
2782 * requirement, but now only checks the easy case.
2784 if (ma == VM_MEMATTR_DEFAULT) {
2785 for (i = 0; i < tlb1_idx; i++) {
2786 if (!(tlb1[i].mas1 & MAS1_VALID))
2788 if (pa >= tlb1[i].phys &&
2789 (pa + size) <= (tlb1[i].phys + tlb1[i].size))
2790 return (void *)(tlb1[i].virt +
2791 (vm_offset_t)(pa - tlb1[i].phys));
2795 size = roundup(size, PAGE_SIZE);
2798 * We leave a hole for device direct mapping between the maximum user
2799 * address (0x8000000) and the minimum KVA address (0xc0000000). If
2800 * devices are in there, just map them 1:1. If not, map them to the
2801 * device mapping area about VM_MAX_KERNEL_ADDRESS. These mapped
2802 * addresses should be pulled from an allocator, but since we do not
2803 * ever free TLB1 entries, it is safe just to increment a counter.
2804 * Note that there isn't a lot of address space here (128 MB) and it
2805 * is not at all difficult to imagine running out, since that is a 4:1
2806 * compression from the 0xc0000000 - 0xf0000000 address space that gets
2809 if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) &&
2810 (pa + size - 1) < VM_MIN_KERNEL_ADDRESS)
2813 va = atomic_fetchadd_int(&tlb1_map_base, size);
2817 sz = 1 << (ilog2(size) & ~1);
2821 } while (va % sz != 0);
2824 printf("Wiring VA=%x to PA=%llx (size=%x), "
2825 "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2826 tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma));
2836 * 'Unmap' a range mapped by mmu_booke_mapdev().
2839 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2841 #ifdef SUPPORTS_SHRINKING_TLB1
2842 vm_offset_t base, offset;
2845 * Unmap only if this is inside kernel virtual space.
2847 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2848 base = trunc_page(va);
2849 offset = va & PAGE_MASK;
2850 size = roundup(offset + size, PAGE_SIZE);
2851 kva_free(base, size);
2857 * mmu_booke_object_init_pt preloads the ptes for a given object into the
2858 * specified pmap. This eliminates the blast of soft faults on process startup
2859 * and immediately after an mmap.
2862 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2863 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2866 VM_OBJECT_ASSERT_WLOCKED(object);
2867 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2868 ("mmu_booke_object_init_pt: non-device object"));
2872 * Perform the pmap work for mincore.
2875 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2876 vm_paddr_t *locked_pa)
2879 /* XXX: this should be implemented at some point */
2883 /**************************************************************************/
2885 /**************************************************************************/
2888 * Allocate a TID. If necessary, steal one from someone else.
2889 * The new TID is flushed from the TLB before returning.
2892 tid_alloc(pmap_t pmap)
2897 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2899 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2901 thiscpu = PCPU_GET(cpuid);
2903 tid = PCPU_GET(tid_next);
2906 PCPU_SET(tid_next, tid + 1);
2908 /* If we are stealing TID then clear the relevant pmap's field */
2909 if (tidbusy[thiscpu][tid] != NULL) {
2911 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2913 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2915 /* Flush all entries from TLB0 matching this TID. */
2916 tid_flush(tid, tlb0_ways, tlb0_entries_per_way);
2919 tidbusy[thiscpu][tid] = pmap;
2920 pmap->pm_tid[thiscpu] = tid;
2921 __asm __volatile("msync; isync");
2923 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2924 PCPU_GET(tid_next));
2929 /**************************************************************************/
2931 /**************************************************************************/
2934 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2944 if (mas1 & MAS1_VALID)
2949 if (mas1 & MAS1_IPROT)
2954 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2955 tid = MAS1_GETTID(mas1);
2957 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2960 size = tsize2size(tsize);
2962 debugf("%3d: (%s) [AS=%d] "
2963 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2964 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2965 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2968 /* Convert TLB0 va and way number to tlb0[] table index. */
2969 static inline unsigned int
2970 tlb0_tableidx(vm_offset_t va, unsigned int way)
2974 idx = (way * TLB0_ENTRIES_PER_WAY);
2975 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2980 * Invalidate TLB0 entry.
2983 tlb0_flush_entry(vm_offset_t va)
2986 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2988 mtx_assert(&tlbivax_mutex, MA_OWNED);
2990 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2991 __asm __volatile("isync; msync");
2992 __asm __volatile("tlbsync; msync");
2994 CTR1(KTR_PMAP, "%s: e", __func__);
2997 /* Print out contents of the MAS registers for each TLB0 entry */
2999 tlb0_print_tlbentries(void)
3001 uint32_t mas0, mas1, mas2, mas3, mas7;
3002 int entryidx, way, idx;
3004 debugf("TLB0 entries:\n");
3005 for (way = 0; way < TLB0_WAYS; way ++)
3006 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
3008 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
3009 mtspr(SPR_MAS0, mas0);
3010 __asm __volatile("isync");
3012 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
3013 mtspr(SPR_MAS2, mas2);
3015 __asm __volatile("isync; tlbre");
3017 mas1 = mfspr(SPR_MAS1);
3018 mas2 = mfspr(SPR_MAS2);
3019 mas3 = mfspr(SPR_MAS3);
3020 mas7 = mfspr(SPR_MAS7);
3022 idx = tlb0_tableidx(mas2, way);
3023 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3027 /**************************************************************************/
3029 /**************************************************************************/
3032 * TLB1 mapping notes:
3034 * TLB1[0] Kernel text and data.
3035 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
3036 * windows, other devices mappings.
3040 * Write given entry to TLB1 hardware.
3041 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
3044 tlb1_write_entry(unsigned int idx)
3048 //debugf("tlb1_write_entry: s\n");
3051 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
3052 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
3054 mtspr(SPR_MAS0, mas0);
3055 __asm __volatile("isync");
3056 mtspr(SPR_MAS1, tlb1[idx].mas1);
3057 __asm __volatile("isync");
3058 mtspr(SPR_MAS2, tlb1[idx].mas2);
3059 __asm __volatile("isync");
3060 mtspr(SPR_MAS3, tlb1[idx].mas3);
3061 __asm __volatile("isync");
3062 switch ((mfpvr() >> 16) & 0xFFFF) {
3066 __asm __volatile("isync");
3069 mtspr(SPR_MAS7, tlb1[idx].mas7);
3070 __asm __volatile("isync");
3076 __asm __volatile("tlbwe; isync; msync");
3078 //debugf("tlb1_write_entry: e\n");
3082 * Return the largest uint value log such that 2^log <= num.
3085 ilog2(unsigned int num)
3089 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
3094 * Convert TLB TSIZE value to mapped region size.
3097 tsize2size(unsigned int tsize)
3102 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
3105 return ((1 << (2 * tsize)) * 1024);
3109 * Convert region size (must be power of 4) to TLB TSIZE value.
3112 size2tsize(vm_size_t size)
3115 return (ilog2(size) / 2 - 5);
3119 * Register permanent kernel mapping in TLB1.
3121 * Entries are created starting from index 0 (current free entry is
3122 * kept in tlb1_idx) and are not supposed to be invalidated.
3125 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
3131 index = atomic_fetchadd_int(&tlb1_idx, 1);
3132 if (index >= TLB1_ENTRIES) {
3133 printf("tlb1_set_entry: TLB1 full!\n");
3137 /* Convert size to TSIZE */
3138 tsize = size2tsize(size);
3140 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
3141 /* XXX TS is hard coded to 0 for now as we only use single address space */
3142 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
3145 * Atomicity is preserved by the atomic increment above since nothing
3146 * is ever removed from tlb1.
3149 tlb1[index].phys = pa;
3150 tlb1[index].virt = va;
3151 tlb1[index].size = size;
3152 tlb1[index].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
3153 tlb1[index].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
3154 tlb1[index].mas2 = (va & MAS2_EPN_MASK) | flags;
3156 /* Set supervisor RWX permission bits */
3157 tlb1[index].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
3158 tlb1[index].mas7 = (pa >> 32) & MAS7_RPN;
3160 tlb1_write_entry(index);
3163 * XXX in general TLB1 updates should be propagated between CPUs,
3164 * since current design assumes to have the same TLB1 set-up on all
3171 * Map in contiguous RAM region into the TLB1 using maximum of
3172 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
3174 * If necessary round up last entry size and return total size
3175 * used by all allocated entries.
3178 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
3180 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
3181 vm_size_t mapped, pgsz, base, mask;
3184 /* Round up to the next 1M */
3185 size = (size + (1 << 20) - 1) & ~((1 << 20) - 1);
3190 pgsz = 64*1024*1024;
3191 while (mapped < size) {
3192 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
3193 while (pgsz > (size - mapped))
3199 /* We under-map. Correct for this. */
3200 if (mapped < size) {
3201 while (pgs[idx - 1] == pgsz) {
3205 /* XXX We may increase beyond out starting point. */
3214 /* Align address to the boundary */
3216 va = (va + mask) & ~mask;
3217 pa = (pa + mask) & ~mask;
3220 for (idx = 0; idx < nents; idx++) {
3222 debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz);
3223 tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM);
3228 mapped = (va - base);
3229 printf("mapped size 0x%08x (wasted space 0x%08x)\n",
3230 mapped, mapped - size);
3235 * TLB1 initialization routine, to be called after the very first
3236 * assembler level setup done in locore.S.
3241 uint32_t mas0, mas1, mas2, mas3, mas7;
3249 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
3250 mtspr(SPR_MAS0, mas0);
3251 __asm __volatile("isync; tlbre");
3253 mas1 = mfspr(SPR_MAS1);
3254 mas2 = mfspr(SPR_MAS2);
3255 mas3 = mfspr(SPR_MAS3);
3256 mas7 = mfspr(SPR_MAS7);
3258 tlb1[0].mas1 = mas1;
3259 tlb1[0].mas2 = mfspr(SPR_MAS2);
3260 tlb1[0].mas3 = mas3;
3261 tlb1[0].mas7 = mas7;
3262 tlb1[0].virt = mas2 & MAS2_EPN_MASK;
3263 tlb1[0].phys = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
3266 kernload = tlb1[0].phys;
3268 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3269 tlb1[0].size = (tsz > 0) ? tsize2size(tsz) : 0;
3270 kernsize += tlb1[0].size;
3273 bp_ntlb1s = tlb1_idx;
3276 /* Purge the remaining entries */
3277 for (i = tlb1_idx; i < TLB1_ENTRIES; i++)
3278 tlb1_write_entry(i);
3280 /* Setup TLB miss defaults */
3281 set_mas4_defaults();
3285 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
3291 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
3293 for (i = 0; i < tlb1_idx; i++) {
3294 if (!(tlb1[i].mas1 & MAS1_VALID))
3296 if (pa >= tlb1[i].phys && (pa + size) <=
3297 (tlb1[i].phys + tlb1[i].size))
3298 return (tlb1[i].virt + (pa - tlb1[i].phys));
3301 pa_base = trunc_page(pa);
3302 size = roundup(size + (pa - pa_base), PAGE_SIZE);
3303 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
3304 va = tlb1_map_base + (pa - pa_base);
3307 sz = 1 << (ilog2(size) & ~1);
3308 tlb1_set_entry(tlb1_map_base, pa_base, sz, _TLB_ENTRY_IO);
3311 tlb1_map_base += sz;
3315 bp_ntlb1s = tlb1_idx;
3322 * Setup MAS4 defaults.
3323 * These values are loaded to MAS0-2 on a TLB miss.
3326 set_mas4_defaults(void)
3330 /* Defaults: TLB0, PID0, TSIZED=4K */
3331 mas4 = MAS4_TLBSELD0;
3332 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3336 mtspr(SPR_MAS4, mas4);
3337 __asm __volatile("isync");
3341 * Print out contents of the MAS registers for each TLB1 entry
3344 tlb1_print_tlbentries(void)
3346 uint32_t mas0, mas1, mas2, mas3, mas7;
3349 debugf("TLB1 entries:\n");
3350 for (i = 0; i < TLB1_ENTRIES; i++) {
3352 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3353 mtspr(SPR_MAS0, mas0);
3355 __asm __volatile("isync; tlbre");
3357 mas1 = mfspr(SPR_MAS1);
3358 mas2 = mfspr(SPR_MAS2);
3359 mas3 = mfspr(SPR_MAS3);
3360 mas7 = mfspr(SPR_MAS7);
3362 tlb_print_entry(i, mas1, mas2, mas3, mas7);
3367 * Print out contents of the in-ram tlb1 table.
3370 tlb1_print_entries(void)
3374 debugf("tlb1[] table entries:\n");
3375 for (i = 0; i < TLB1_ENTRIES; i++)
3376 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3,
3381 * Return 0 if the physical IO range is encompassed by one of the
3382 * the TLB1 entries, otherwise return related error code.
3385 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3388 vm_paddr_t pa_start;
3390 unsigned int entry_tsize;
3391 vm_size_t entry_size;
3393 *va = (vm_offset_t)NULL;
3395 /* Skip invalid entries */
3396 if (!(tlb1[i].mas1 & MAS1_VALID))
3400 * The entry must be cache-inhibited, guarded, and r/w
3401 * so it can function as an i/o page
3403 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3404 if (prot != (MAS2_I | MAS2_G))
3407 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3408 if (prot != (MAS3_SR | MAS3_SW))
3411 /* The address should be within the entry range. */
3412 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3413 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3415 entry_size = tsize2size(entry_tsize);
3416 pa_start = (((vm_paddr_t)tlb1[i].mas7 & MAS7_RPN) << 32) |
3417 (tlb1[i].mas3 & MAS3_RPN);
3418 pa_end = pa_start + entry_size;
3420 if ((pa < pa_start) || ((pa + size) > pa_end))
3423 /* Return virtual address of this mapping. */
3424 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);