2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * Some hw specific parts of this pmap were derived or influenced
27 * by NetBSD's ibm4xx pmap module. More generic code is shared with
28 * a few other pmap modules from the FreeBSD tree.
34 * Kernel and user threads run within one common virtual address space
38 * Virtual address space layout:
39 * -----------------------------
40 * 0x0000_0000 - 0x7fff_ffff : user process
41 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
42 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
43 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
44 * 0xc100_0000 - 0xffff_ffff : KVA
45 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
46 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
47 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
48 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
51 * Virtual address space layout:
52 * -----------------------------
53 * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process
54 * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries
55 * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region
56 * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack
57 * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved
58 * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data
59 * endkernel - msgbufp-1 : flat device tree
60 * msgbufp - ptbl_bufs-1 : message buffer
61 * ptbl_bufs - kernel_pdir-1 : kernel page tables
62 * kernel_pdir - kernel_pp2d-1 : kernel page directory
63 * kernel_pp2d - . : kernel pointers to page directory
64 * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy
65 * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs
66 * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables
67 * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space
68 * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region
69 * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region
70 * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map
71 * 0xf000_0000_0000_0000 - +Maxmem : physmem map
72 * - 0xffff_ffff_ffff_ffff : device direct map
75 #include <sys/cdefs.h>
76 __FBSDID("$FreeBSD$");
78 #include "opt_kstack_pages.h"
80 #include <sys/param.h>
82 #include <sys/malloc.h>
86 #include <sys/queue.h>
87 #include <sys/systm.h>
88 #include <sys/kernel.h>
89 #include <sys/kerneldump.h>
90 #include <sys/linker.h>
91 #include <sys/msgbuf.h>
93 #include <sys/mutex.h>
94 #include <sys/rwlock.h>
95 #include <sys/sched.h>
97 #include <sys/vmmeter.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_pageout.h>
103 #include <vm/vm_extern.h>
104 #include <vm/vm_object.h>
105 #include <vm/vm_param.h>
106 #include <vm/vm_map.h>
107 #include <vm/vm_pager.h>
110 #include <machine/_inttypes.h>
111 #include <machine/cpu.h>
112 #include <machine/pcb.h>
113 #include <machine/platform.h>
115 #include <machine/tlb.h>
116 #include <machine/spr.h>
117 #include <machine/md_var.h>
118 #include <machine/mmuvar.h>
119 #include <machine/pmap.h>
120 #include <machine/pte.h>
124 #define SPARSE_MAPDEV
126 #define debugf(fmt, args...) printf(fmt, ##args)
128 #define debugf(fmt, args...)
132 #define PRI0ptrX "016lx"
134 #define PRI0ptrX "08x"
137 #define TODO panic("%s: not implemented", __func__);
139 extern unsigned char _etext[];
140 extern unsigned char _end[];
142 extern uint32_t *bootinfo;
145 vm_offset_t kernstart;
148 /* Message buffer and tables. */
149 static vm_offset_t data_start;
150 static vm_size_t data_end;
152 /* Phys/avail memory regions. */
153 static struct mem_region *availmem_regions;
154 static int availmem_regions_sz;
155 static struct mem_region *physmem_regions;
156 static int physmem_regions_sz;
158 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
159 static vm_offset_t zero_page_va;
160 static struct mtx zero_page_mutex;
162 static struct mtx tlbivax_mutex;
164 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
165 static vm_offset_t copy_page_src_va;
166 static vm_offset_t copy_page_dst_va;
167 static struct mtx copy_page_mutex;
169 /**************************************************************************/
171 /**************************************************************************/
173 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
174 vm_prot_t, u_int flags, int8_t psind);
176 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
177 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
179 unsigned int kernel_pdirs;
183 * If user pmap is processed with mmu_booke_remove and the resident count
184 * drops to 0, there are no more pages to remove, so we need not continue.
186 #define PMAP_REMOVE_DONE(pmap) \
187 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
189 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
190 extern int elf32_nxstack;
193 /**************************************************************************/
194 /* TLB and TID handling */
195 /**************************************************************************/
197 /* Translation ID busy table */
198 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
201 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
202 * core revisions and should be read from h/w registers during early config.
204 uint32_t tlb0_entries;
206 uint32_t tlb0_entries_per_way;
207 uint32_t tlb1_entries;
209 #define TLB0_ENTRIES (tlb0_entries)
210 #define TLB0_WAYS (tlb0_ways)
211 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
213 #define TLB1_ENTRIES (tlb1_entries)
215 static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
217 static tlbtid_t tid_alloc(struct pmap *);
218 static void tid_flush(tlbtid_t tid);
221 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
223 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
226 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
227 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
228 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
229 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
231 static vm_size_t tsize2size(unsigned int);
232 static unsigned int size2tsize(vm_size_t);
233 static unsigned int ilog2(unsigned int);
235 static void set_mas4_defaults(void);
237 static inline void tlb0_flush_entry(vm_offset_t);
238 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
240 /**************************************************************************/
241 /* Page table management */
242 /**************************************************************************/
244 static struct rwlock_padalign pvh_global_lock;
246 /* Data for the pv entry allocation mechanism */
247 static uma_zone_t pvzone;
248 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
250 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
252 #ifndef PMAP_SHPGPERPROC
253 #define PMAP_SHPGPERPROC 200
256 static void ptbl_init(void);
257 static struct ptbl_buf *ptbl_buf_alloc(void);
258 static void ptbl_buf_free(struct ptbl_buf *);
259 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
262 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
263 unsigned int, boolean_t);
264 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int);
265 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
266 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
268 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
269 static void ptbl_free(mmu_t, pmap_t, unsigned int);
270 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
271 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
274 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
275 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
276 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
277 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
278 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
280 static pv_entry_t pv_alloc(void);
281 static void pv_free(pv_entry_t);
282 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
283 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
285 static void booke_pmap_init_qpages(void);
287 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
289 #define PTBL_BUFS (16UL * 16 * 16)
291 #define PTBL_BUFS (128 * 16)
295 TAILQ_ENTRY(ptbl_buf) link; /* list link */
296 vm_offset_t kva; /* va of mapping */
299 /* ptbl free list and a lock used for access synchronization. */
300 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
301 static struct mtx ptbl_buf_freelist_lock;
303 /* Base address of kva space allocated fot ptbl bufs. */
304 static vm_offset_t ptbl_buf_pool_vabase;
306 /* Pointer to ptbl_buf structures. */
307 static struct ptbl_buf *ptbl_bufs;
310 extern tlb_entry_t __boot_tlb1[];
311 void pmap_bootstrap_ap(volatile uint32_t *);
315 * Kernel MMU interface
317 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
318 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
319 vm_size_t, vm_offset_t);
320 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
321 static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
322 vm_offset_t, vm_page_t *, vm_offset_t, int);
323 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
324 vm_prot_t, u_int flags, int8_t psind);
325 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
326 vm_page_t, vm_prot_t);
327 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
329 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
330 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
332 static void mmu_booke_init(mmu_t);
333 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
334 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
335 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
336 static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
337 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
339 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
341 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
342 vm_object_t, vm_pindex_t, vm_size_t);
343 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
344 static void mmu_booke_page_init(mmu_t, vm_page_t);
345 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
346 static void mmu_booke_pinit(mmu_t, pmap_t);
347 static void mmu_booke_pinit0(mmu_t, pmap_t);
348 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
350 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
351 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
352 static void mmu_booke_release(mmu_t, pmap_t);
353 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
354 static void mmu_booke_remove_all(mmu_t, vm_page_t);
355 static void mmu_booke_remove_write(mmu_t, vm_page_t);
356 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
357 static void mmu_booke_zero_page(mmu_t, vm_page_t);
358 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
359 static void mmu_booke_activate(mmu_t, struct thread *);
360 static void mmu_booke_deactivate(mmu_t, struct thread *);
361 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
362 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
363 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
364 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
365 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
366 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
367 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
368 static void mmu_booke_kremove(mmu_t, vm_offset_t);
369 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
370 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
372 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
374 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
376 static void mmu_booke_scan_init(mmu_t);
377 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
378 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
379 static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
380 vm_size_t sz, vm_memattr_t mode);
382 static mmu_method_t mmu_booke_methods[] = {
383 /* pmap dispatcher interface */
384 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
385 MMUMETHOD(mmu_copy, mmu_booke_copy),
386 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
387 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
388 MMUMETHOD(mmu_enter, mmu_booke_enter),
389 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
390 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
391 MMUMETHOD(mmu_extract, mmu_booke_extract),
392 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
393 MMUMETHOD(mmu_init, mmu_booke_init),
394 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
395 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
396 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
397 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
398 MMUMETHOD(mmu_map, mmu_booke_map),
399 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
400 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
401 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
402 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
403 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
404 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
405 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
406 MMUMETHOD(mmu_protect, mmu_booke_protect),
407 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
408 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
409 MMUMETHOD(mmu_release, mmu_booke_release),
410 MMUMETHOD(mmu_remove, mmu_booke_remove),
411 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
412 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
413 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
414 MMUMETHOD(mmu_unwire, mmu_booke_unwire),
415 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
416 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
417 MMUMETHOD(mmu_activate, mmu_booke_activate),
418 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
419 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
420 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
422 /* Internal interfaces */
423 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
424 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
425 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
426 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
427 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
428 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
429 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
430 MMUMETHOD(mmu_kremove, mmu_booke_kremove),
431 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
432 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
434 /* dumpsys() support */
435 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
436 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
437 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
442 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
444 static __inline uint32_t
445 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
450 if (ma != VM_MEMATTR_DEFAULT) {
452 case VM_MEMATTR_UNCACHEABLE:
453 return (MAS2_I | MAS2_G);
454 case VM_MEMATTR_WRITE_COMBINING:
455 case VM_MEMATTR_WRITE_BACK:
456 case VM_MEMATTR_PREFETCHABLE:
458 case VM_MEMATTR_WRITE_THROUGH:
459 return (MAS2_W | MAS2_M);
460 case VM_MEMATTR_CACHEABLE:
466 * Assume the page is cache inhibited and access is guarded unless
467 * it's in our available memory array.
469 attrib = _TLB_ENTRY_IO;
470 for (i = 0; i < physmem_regions_sz; i++) {
471 if ((pa >= physmem_regions[i].mr_start) &&
472 (pa < (physmem_regions[i].mr_start +
473 physmem_regions[i].mr_size))) {
474 attrib = _TLB_ENTRY_MEM;
491 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
494 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
495 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
497 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
498 ("tlb_miss_lock: tried to lock self"));
500 tlb_lock(pc->pc_booke_tlb_lock);
502 CTR1(KTR_PMAP, "%s: locked", __func__);
509 tlb_miss_unlock(void)
517 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
519 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
520 __func__, pc->pc_cpuid);
522 tlb_unlock(pc->pc_booke_tlb_lock);
524 CTR1(KTR_PMAP, "%s: unlocked", __func__);
530 /* Return number of entries in TLB0. */
532 tlb0_get_tlbconf(void)
536 tlb0_cfg = mfspr(SPR_TLB0CFG);
537 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
538 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
539 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
542 /* Return number of entries in TLB1. */
544 tlb1_get_tlbconf(void)
548 tlb1_cfg = mfspr(SPR_TLB1CFG);
549 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
552 /**************************************************************************/
553 /* Page table related */
554 /**************************************************************************/
557 /* Initialize pool of kva ptbl buffers. */
563 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
564 TAILQ_INIT(&ptbl_buf_freelist);
566 for (i = 0; i < PTBL_BUFS; i++) {
567 ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
568 i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
569 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
573 /* Get an sf_buf from the freelist. */
574 static struct ptbl_buf *
577 struct ptbl_buf *buf;
579 mtx_lock(&ptbl_buf_freelist_lock);
580 buf = TAILQ_FIRST(&ptbl_buf_freelist);
582 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
583 mtx_unlock(&ptbl_buf_freelist_lock);
588 /* Return ptbl buff to free pool. */
590 ptbl_buf_free(struct ptbl_buf *buf)
592 mtx_lock(&ptbl_buf_freelist_lock);
593 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
594 mtx_unlock(&ptbl_buf_freelist_lock);
598 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
601 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
603 struct ptbl_buf *pbuf;
605 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
606 if (pbuf->kva == (vm_offset_t) ptbl) {
607 /* Remove from pmap ptbl buf list. */
608 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
610 /* Free corresponding ptbl buf. */
618 /* Get a pointer to a PTE in a page table. */
619 static __inline pte_t *
620 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
625 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
627 pdir = pmap->pm_pp2d[PP2D_IDX(va)];
630 ptbl = pdir[PDIR_IDX(va)];
631 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
635 * Search the list of allocated pdir bufs and find on list of allocated pdirs
638 ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
640 struct ptbl_buf *pbuf;
642 TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
643 if (pbuf->kva == (vm_offset_t) pdir) {
644 /* Remove from pmap ptbl buf list. */
645 TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
647 /* Free corresponding pdir buf. */
654 /* Free pdir pages and invalidate pdir entry. */
656 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
664 pdir = pmap->pm_pp2d[pp2d_idx];
666 KASSERT((pdir != NULL), ("pdir_free: null pdir"));
668 pmap->pm_pp2d[pp2d_idx] = NULL;
670 for (i = 0; i < PDIR_PAGES; i++) {
671 va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
672 pa = pte_vatopa(mmu, kernel_pmap, va);
673 m = PHYS_TO_VM_PAGE(pa);
674 vm_page_free_zero(m);
675 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
679 ptbl_free_pmap_pdir(mmu, pmap, pdir);
683 * Decrement pdir pages hold count and attempt to free pdir pages. Called
684 * when removing directory entry from pdir.
686 * Return 1 if pdir pages were freed.
689 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
696 KASSERT((pmap != kernel_pmap),
697 ("pdir_unhold: unholding kernel pdir!"));
699 pdir = pmap->pm_pp2d[pp2d_idx];
701 KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS),
702 ("pdir_unhold: non kva pdir"));
704 /* decrement hold count */
705 for (i = 0; i < PDIR_PAGES; i++) {
706 pa = pte_vatopa(mmu, kernel_pmap,
707 (vm_offset_t) pdir + (i * PAGE_SIZE));
708 m = PHYS_TO_VM_PAGE(pa);
713 * Free pdir pages if there are no dir entries in this pdir.
714 * wire_count has the same value for all ptbl pages, so check the
717 if (m->wire_count == 0) {
718 pdir_free(mmu, pmap, pp2d_idx);
725 * Increment hold count for pdir pages. This routine is used when new ptlb
726 * entry is being inserted into pdir.
729 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
735 KASSERT((pmap != kernel_pmap),
736 ("pdir_hold: holding kernel pdir!"));
738 KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
740 for (i = 0; i < PDIR_PAGES; i++) {
741 pa = pte_vatopa(mmu, kernel_pmap,
742 (vm_offset_t) pdir + (i * PAGE_SIZE));
743 m = PHYS_TO_VM_PAGE(pa);
748 /* Allocate page table. */
750 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
753 vm_page_t mtbl [PTBL_PAGES];
755 struct ptbl_buf *pbuf;
761 KASSERT((pdir[pdir_idx] == NULL),
762 ("%s: valid ptbl entry exists!", __func__));
764 pbuf = ptbl_buf_alloc();
766 panic("%s: couldn't alloc kernel virtual memory", __func__);
768 ptbl = (pte_t *) pbuf->kva;
770 for (i = 0; i < PTBL_PAGES; i++) {
771 pidx = (PTBL_PAGES * pdir_idx) + i;
772 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
773 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
775 rw_wunlock(&pvh_global_lock);
777 ptbl_free_pmap_ptbl(pmap, ptbl);
778 for (j = 0; j < i; j++)
779 vm_page_free(mtbl[j]);
780 atomic_subtract_int(&vm_cnt.v_wire_count, i);
784 rw_wlock(&pvh_global_lock);
790 /* Mapin allocated pages into kernel_pmap. */
791 mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
792 /* Zero whole ptbl. */
793 bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
795 /* Add pbuf to the pmap ptbl bufs list. */
796 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
801 /* Free ptbl pages and invalidate pdir entry. */
803 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
811 ptbl = pdir[pdir_idx];
813 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
815 pdir[pdir_idx] = NULL;
817 for (i = 0; i < PTBL_PAGES; i++) {
818 va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
819 pa = pte_vatopa(mmu, kernel_pmap, va);
820 m = PHYS_TO_VM_PAGE(pa);
821 vm_page_free_zero(m);
822 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
826 ptbl_free_pmap_ptbl(pmap, ptbl);
830 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
831 * when removing pte entry from ptbl.
833 * Return 1 if ptbl pages were freed.
836 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
846 pp2d_idx = PP2D_IDX(va);
847 pdir_idx = PDIR_IDX(va);
849 KASSERT((pmap != kernel_pmap),
850 ("ptbl_unhold: unholding kernel ptbl!"));
852 pdir = pmap->pm_pp2d[pp2d_idx];
853 ptbl = pdir[pdir_idx];
855 KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS),
856 ("ptbl_unhold: non kva ptbl"));
858 /* decrement hold count */
859 for (i = 0; i < PTBL_PAGES; i++) {
860 pa = pte_vatopa(mmu, kernel_pmap,
861 (vm_offset_t) ptbl + (i * PAGE_SIZE));
862 m = PHYS_TO_VM_PAGE(pa);
867 * Free ptbl pages if there are no pte entries in this ptbl.
868 * wire_count has the same value for all ptbl pages, so check the
871 if (m->wire_count == 0) {
872 /* A pair of indirect entries might point to this ptbl page */
874 tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
875 TLB_SIZE_1M, MAS6_SIND);
876 tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
877 TLB_SIZE_1M, MAS6_SIND);
879 ptbl_free(mmu, pmap, pdir, pdir_idx);
880 pdir_unhold(mmu, pmap, pp2d_idx);
887 * Increment hold count for ptbl pages. This routine is used when new pte
888 * entry is being inserted into ptbl.
891 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
898 KASSERT((pmap != kernel_pmap),
899 ("ptbl_hold: holding kernel ptbl!"));
901 ptbl = pdir[pdir_idx];
903 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
905 for (i = 0; i < PTBL_PAGES; i++) {
906 pa = pte_vatopa(mmu, kernel_pmap,
907 (vm_offset_t) ptbl + (i * PAGE_SIZE));
908 m = PHYS_TO_VM_PAGE(pa);
914 /* Initialize pool of kva ptbl buffers. */
920 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
921 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
922 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
923 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
925 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
926 TAILQ_INIT(&ptbl_buf_freelist);
928 for (i = 0; i < PTBL_BUFS; i++) {
930 ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
931 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
935 /* Get a ptbl_buf from the freelist. */
936 static struct ptbl_buf *
939 struct ptbl_buf *buf;
941 mtx_lock(&ptbl_buf_freelist_lock);
942 buf = TAILQ_FIRST(&ptbl_buf_freelist);
944 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
945 mtx_unlock(&ptbl_buf_freelist_lock);
947 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
952 /* Return ptbl buff to free pool. */
954 ptbl_buf_free(struct ptbl_buf *buf)
957 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
959 mtx_lock(&ptbl_buf_freelist_lock);
960 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
961 mtx_unlock(&ptbl_buf_freelist_lock);
965 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
968 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
970 struct ptbl_buf *pbuf;
972 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
974 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
976 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
977 if (pbuf->kva == (vm_offset_t)ptbl) {
978 /* Remove from pmap ptbl buf list. */
979 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
981 /* Free corresponding ptbl buf. */
987 /* Allocate page table. */
989 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
991 vm_page_t mtbl[PTBL_PAGES];
993 struct ptbl_buf *pbuf;
998 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
999 (pmap == kernel_pmap), pdir_idx);
1001 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1002 ("ptbl_alloc: invalid pdir_idx"));
1003 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
1004 ("pte_alloc: valid ptbl entry exists!"));
1006 pbuf = ptbl_buf_alloc();
1008 panic("pte_alloc: couldn't alloc kernel virtual memory");
1010 ptbl = (pte_t *)pbuf->kva;
1012 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
1014 for (i = 0; i < PTBL_PAGES; i++) {
1015 pidx = (PTBL_PAGES * pdir_idx) + i;
1016 while ((m = vm_page_alloc(NULL, pidx,
1017 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
1019 rw_wunlock(&pvh_global_lock);
1021 ptbl_free_pmap_ptbl(pmap, ptbl);
1022 for (j = 0; j < i; j++)
1023 vm_page_free(mtbl[j]);
1024 atomic_subtract_int(&vm_cnt.v_wire_count, i);
1028 rw_wlock(&pvh_global_lock);
1034 /* Map allocated pages into kernel_pmap. */
1035 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
1037 /* Zero whole ptbl. */
1038 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
1040 /* Add pbuf to the pmap ptbl bufs list. */
1041 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
1046 /* Free ptbl pages and invalidate pdir entry. */
1048 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1056 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1057 (pmap == kernel_pmap), pdir_idx);
1059 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1060 ("ptbl_free: invalid pdir_idx"));
1062 ptbl = pmap->pm_pdir[pdir_idx];
1064 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
1066 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
1069 * Invalidate the pdir entry as soon as possible, so that other CPUs
1070 * don't attempt to look up the page tables we are releasing.
1072 mtx_lock_spin(&tlbivax_mutex);
1075 pmap->pm_pdir[pdir_idx] = NULL;
1078 mtx_unlock_spin(&tlbivax_mutex);
1080 for (i = 0; i < PTBL_PAGES; i++) {
1081 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
1082 pa = pte_vatopa(mmu, kernel_pmap, va);
1083 m = PHYS_TO_VM_PAGE(pa);
1084 vm_page_free_zero(m);
1085 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1086 mmu_booke_kremove(mmu, va);
1089 ptbl_free_pmap_ptbl(pmap, ptbl);
1093 * Decrement ptbl pages hold count and attempt to free ptbl pages.
1094 * Called when removing pte entry from ptbl.
1096 * Return 1 if ptbl pages were freed.
1099 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1106 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1107 (pmap == kernel_pmap), pdir_idx);
1109 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1110 ("ptbl_unhold: invalid pdir_idx"));
1111 KASSERT((pmap != kernel_pmap),
1112 ("ptbl_unhold: unholding kernel ptbl!"));
1114 ptbl = pmap->pm_pdir[pdir_idx];
1116 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
1117 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
1118 ("ptbl_unhold: non kva ptbl"));
1120 /* decrement hold count */
1121 for (i = 0; i < PTBL_PAGES; i++) {
1122 pa = pte_vatopa(mmu, kernel_pmap,
1123 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1124 m = PHYS_TO_VM_PAGE(pa);
1129 * Free ptbl pages if there are no pte etries in this ptbl.
1130 * wire_count has the same value for all ptbl pages, so check the last
1133 if (m->wire_count == 0) {
1134 ptbl_free(mmu, pmap, pdir_idx);
1136 //debugf("ptbl_unhold: e (freed ptbl)\n");
1144 * Increment hold count for ptbl pages. This routine is used when a new pte
1145 * entry is being inserted into the ptbl.
1148 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1155 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
1158 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1159 ("ptbl_hold: invalid pdir_idx"));
1160 KASSERT((pmap != kernel_pmap),
1161 ("ptbl_hold: holding kernel ptbl!"));
1163 ptbl = pmap->pm_pdir[pdir_idx];
1165 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
1167 for (i = 0; i < PTBL_PAGES; i++) {
1168 pa = pte_vatopa(mmu, kernel_pmap,
1169 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1170 m = PHYS_TO_VM_PAGE(pa);
1176 /* Allocate pv_entry structure. */
1183 if (pv_entry_count > pv_entry_high_water)
1184 pagedaemon_wakeup();
1185 pv = uma_zalloc(pvzone, M_NOWAIT);
1190 /* Free pv_entry structure. */
1191 static __inline void
1192 pv_free(pv_entry_t pve)
1196 uma_zfree(pvzone, pve);
1200 /* Allocate and initialize pv_entry structure. */
1202 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
1206 //int su = (pmap == kernel_pmap);
1207 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
1208 // (u_int32_t)pmap, va, (u_int32_t)m);
1212 panic("pv_insert: no pv entries!");
1214 pve->pv_pmap = pmap;
1217 /* add to pv_list */
1218 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1219 rw_assert(&pvh_global_lock, RA_WLOCKED);
1221 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
1223 //debugf("pv_insert: e\n");
1226 /* Destroy pv entry. */
1228 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
1232 //int su = (pmap == kernel_pmap);
1233 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
1235 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1236 rw_assert(&pvh_global_lock, RA_WLOCKED);
1239 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
1240 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
1241 /* remove from pv_list */
1242 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
1243 if (TAILQ_EMPTY(&m->md.pv_list))
1244 vm_page_aflag_clear(m, PGA_WRITEABLE);
1246 /* free pv entry struct */
1252 //debugf("pv_remove: e\n");
1255 #ifdef __powerpc64__
1257 * Clean pte entry, try to free page table page if requested.
1259 * Return 1 if ptbl pages were freed, otherwise return 0.
1262 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
1267 pte = pte_find(mmu, pmap, va);
1268 KASSERT(pte != NULL, ("%s: NULL pte", __func__));
1270 if (!PTE_ISVALID(pte))
1273 /* Get vm_page_t for mapped pte. */
1274 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1276 if (PTE_ISWIRED(pte))
1277 pmap->pm_stats.wired_count--;
1279 /* Handle managed entry. */
1280 if (PTE_ISMANAGED(pte)) {
1282 /* Handle modified pages. */
1283 if (PTE_ISMODIFIED(pte))
1286 /* Referenced pages. */
1287 if (PTE_ISREFERENCED(pte))
1288 vm_page_aflag_set(m, PGA_REFERENCED);
1290 /* Remove pv_entry from pv_list. */
1291 pv_remove(pmap, va, m);
1293 mtx_lock_spin(&tlbivax_mutex);
1296 tlb0_flush_entry(va);
1300 mtx_unlock_spin(&tlbivax_mutex);
1302 pmap->pm_stats.resident_count--;
1304 if (flags & PTBL_UNHOLD) {
1305 return (ptbl_unhold(mmu, pmap, va));
1311 * allocate a page of pointers to page directories, do not preallocate the
1315 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
1317 vm_page_t mtbl [PDIR_PAGES];
1319 struct ptbl_buf *pbuf;
1325 pbuf = ptbl_buf_alloc();
1328 panic("%s: couldn't alloc kernel virtual memory", __func__);
1330 /* Allocate pdir pages, this will sleep! */
1331 for (i = 0; i < PDIR_PAGES; i++) {
1332 pidx = (PDIR_PAGES * pp2d_idx) + i;
1333 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
1334 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
1342 /* Mapin allocated pages into kernel_pmap. */
1343 pdir = (pte_t **) pbuf->kva;
1344 pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
1346 /* Zero whole pdir. */
1347 bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
1349 /* Add pdir to the pmap pdir bufs list. */
1350 TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
1356 * Insert PTE for a given page and virtual address.
1359 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1362 unsigned int pp2d_idx = PP2D_IDX(va);
1363 unsigned int pdir_idx = PDIR_IDX(va);
1364 unsigned int ptbl_idx = PTBL_IDX(va);
1368 /* Get the page directory pointer. */
1369 pdir = pmap->pm_pp2d[pp2d_idx];
1371 pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
1373 /* Get the page table pointer. */
1374 ptbl = pdir[pdir_idx];
1377 /* Allocate page table pages. */
1378 ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
1380 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1385 * Check if there is valid mapping for requested va, if there
1388 pte = &pdir[pdir_idx][ptbl_idx];
1389 if (PTE_ISVALID(pte)) {
1390 pte_remove(mmu, pmap, va, PTBL_HOLD);
1393 * pte is not used, increment hold count for ptbl
1396 if (pmap != kernel_pmap)
1397 ptbl_hold(mmu, pmap, pdir, pdir_idx);
1401 if (pdir[pdir_idx] == NULL) {
1402 if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
1403 pdir_hold(mmu, pmap, pdir);
1404 pdir[pdir_idx] = ptbl;
1406 if (pmap->pm_pp2d[pp2d_idx] == NULL)
1407 pmap->pm_pp2d[pp2d_idx] = pdir;
1410 * Insert pv_entry into pv_list for mapped page if part of managed
1413 if ((m->oflags & VPO_UNMANAGED) == 0) {
1414 flags |= PTE_MANAGED;
1416 /* Create and insert pv entry. */
1417 pv_insert(pmap, va, m);
1420 mtx_lock_spin(&tlbivax_mutex);
1423 tlb0_flush_entry(va);
1424 pmap->pm_stats.resident_count++;
1425 pte = &pdir[pdir_idx][ptbl_idx];
1426 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1427 *pte |= (PTE_VALID | flags);
1430 mtx_unlock_spin(&tlbivax_mutex);
1435 /* Return the pa for the given pmap/va. */
1437 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1442 pte = pte_find(mmu, pmap, va);
1443 if ((pte != NULL) && PTE_ISVALID(pte))
1444 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1449 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
1451 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1458 /* Initialize kernel pdir */
1459 for (i = 0; i < kernel_pdirs; i++) {
1460 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
1461 (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
1462 for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
1463 j < PDIR_NENTRIES; j++) {
1464 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
1465 (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
1466 (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
1471 * Fill in PTEs covering kernel code and data. They are not required
1472 * for address translation, as this area is covered by static TLB1
1473 * entries, but for pte_vatopa() to work correctly with kernel area
1476 for (va = addr; va < data_end; va += PAGE_SIZE) {
1477 pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
1478 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1479 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1480 PTE_VALID | PTE_PS_4KB;
1485 * Clean pte entry, try to free page table page if requested.
1487 * Return 1 if ptbl pages were freed, otherwise return 0.
1490 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
1492 unsigned int pdir_idx = PDIR_IDX(va);
1493 unsigned int ptbl_idx = PTBL_IDX(va);
1498 //int su = (pmap == kernel_pmap);
1499 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
1500 // su, (u_int32_t)pmap, va, flags);
1502 ptbl = pmap->pm_pdir[pdir_idx];
1503 KASSERT(ptbl, ("pte_remove: null ptbl"));
1505 pte = &ptbl[ptbl_idx];
1507 if (pte == NULL || !PTE_ISVALID(pte))
1510 if (PTE_ISWIRED(pte))
1511 pmap->pm_stats.wired_count--;
1513 /* Get vm_page_t for mapped pte. */
1514 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1516 /* Handle managed entry. */
1517 if (PTE_ISMANAGED(pte)) {
1519 if (PTE_ISMODIFIED(pte))
1522 if (PTE_ISREFERENCED(pte))
1523 vm_page_aflag_set(m, PGA_REFERENCED);
1525 pv_remove(pmap, va, m);
1526 } else if (m->md.pv_tracked) {
1528 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
1529 * used. This is needed by the NCSW support code for fast
1530 * VA<->PA translation.
1532 pv_remove(pmap, va, m);
1533 if (TAILQ_EMPTY(&m->md.pv_list))
1534 m->md.pv_tracked = false;
1537 mtx_lock_spin(&tlbivax_mutex);
1540 tlb0_flush_entry(va);
1544 mtx_unlock_spin(&tlbivax_mutex);
1546 pmap->pm_stats.resident_count--;
1548 if (flags & PTBL_UNHOLD) {
1549 //debugf("pte_remove: e (unhold)\n");
1550 return (ptbl_unhold(mmu, pmap, pdir_idx));
1553 //debugf("pte_remove: e\n");
1558 * Insert PTE for a given page and virtual address.
1561 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1564 unsigned int pdir_idx = PDIR_IDX(va);
1565 unsigned int ptbl_idx = PTBL_IDX(va);
1568 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
1569 pmap == kernel_pmap, pmap, va);
1571 /* Get the page table pointer. */
1572 ptbl = pmap->pm_pdir[pdir_idx];
1575 /* Allocate page table pages. */
1576 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
1578 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1583 * Check if there is valid mapping for requested
1584 * va, if there is, remove it.
1586 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
1587 if (PTE_ISVALID(pte)) {
1588 pte_remove(mmu, pmap, va, PTBL_HOLD);
1591 * pte is not used, increment hold count
1594 if (pmap != kernel_pmap)
1595 ptbl_hold(mmu, pmap, pdir_idx);
1600 * Insert pv_entry into pv_list for mapped page if part of managed
1603 if ((m->oflags & VPO_UNMANAGED) == 0) {
1604 flags |= PTE_MANAGED;
1606 /* Create and insert pv entry. */
1607 pv_insert(pmap, va, m);
1610 pmap->pm_stats.resident_count++;
1612 mtx_lock_spin(&tlbivax_mutex);
1615 tlb0_flush_entry(va);
1616 if (pmap->pm_pdir[pdir_idx] == NULL) {
1618 * If we just allocated a new page table, hook it in
1621 pmap->pm_pdir[pdir_idx] = ptbl;
1623 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
1624 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1625 *pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
1628 mtx_unlock_spin(&tlbivax_mutex);
1632 /* Return the pa for the given pmap/va. */
1634 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1639 pte = pte_find(mmu, pmap, va);
1640 if ((pte != NULL) && PTE_ISVALID(pte))
1641 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1645 /* Get a pointer to a PTE in a page table. */
1647 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1649 unsigned int pdir_idx = PDIR_IDX(va);
1650 unsigned int ptbl_idx = PTBL_IDX(va);
1652 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
1654 if (pmap->pm_pdir[pdir_idx])
1655 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
1660 /* Set up kernel page tables. */
1662 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1668 /* Initialize kernel pdir */
1669 for (i = 0; i < kernel_ptbls; i++)
1670 kernel_pmap->pm_pdir[kptbl_min + i] =
1671 (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
1674 * Fill in PTEs covering kernel code and data. They are not required
1675 * for address translation, as this area is covered by static TLB1
1676 * entries, but for pte_vatopa() to work correctly with kernel area
1679 for (va = addr; va < data_end; va += PAGE_SIZE) {
1680 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1681 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1682 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1683 PTE_VALID | PTE_PS_4KB;
1688 /**************************************************************************/
1690 /**************************************************************************/
1693 * This is called during booke_init, before the system is really initialized.
1696 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
1698 vm_paddr_t phys_kernelend;
1699 struct mem_region *mp, *mp1;
1701 vm_paddr_t s, e, sz;
1702 vm_paddr_t physsz, hwphyssz;
1703 u_int phys_avail_count;
1704 vm_size_t kstack0_sz;
1705 vm_offset_t kernel_pdir, kstack0;
1706 vm_paddr_t kstack0_phys;
1709 debugf("mmu_booke_bootstrap: entered\n");
1711 /* Set interesting system properties */
1713 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
1717 /* Initialize invalidation mutex */
1718 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1720 /* Read TLB0 size and associativity. */
1724 * Align kernel start and end address (kernel image).
1725 * Note that kernel end does not necessarily relate to kernsize.
1726 * kernsize is the size of the kernel that is actually mapped.
1728 kernstart = trunc_page(start);
1729 data_start = round_page(kernelend);
1730 data_end = data_start;
1733 * Addresses of preloaded modules (like file systems) use
1734 * physical addresses. Make sure we relocate those into
1735 * virtual addresses.
1737 preload_addr_relocate = kernstart - kernload;
1739 /* Allocate the dynamic per-cpu area. */
1740 dpcpu = (void *)data_end;
1741 data_end += DPCPU_SIZE;
1743 /* Allocate space for the message buffer. */
1744 msgbufp = (struct msgbuf *)data_end;
1745 data_end += msgbufsize;
1746 debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1747 (uintptr_t)msgbufp, data_end);
1749 data_end = round_page(data_end);
1751 /* Allocate space for ptbl_bufs. */
1752 ptbl_bufs = (struct ptbl_buf *)data_end;
1753 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1754 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1755 (uintptr_t)ptbl_bufs, data_end);
1757 data_end = round_page(data_end);
1759 /* Allocate PTE tables for kernel KVA. */
1760 kernel_pdir = data_end;
1761 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
1763 #ifdef __powerpc64__
1764 kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
1765 data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
1767 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1768 debugf(" kernel ptbls: %d\n", kernel_ptbls);
1769 debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1770 kernel_pdir, data_end);
1772 debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
1773 if (data_end - kernstart > kernsize) {
1774 kernsize += tlb1_mapin_region(kernstart + kernsize,
1775 kernload + kernsize, (data_end - kernstart) - kernsize);
1777 data_end = kernstart + kernsize;
1778 debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
1781 * Clear the structures - note we can only do it safely after the
1782 * possible additional TLB1 translations are in place (above) so that
1783 * all range up to the currently calculated 'data_end' is covered.
1785 dpcpu_init(dpcpu, 0);
1786 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1787 #ifdef __powerpc64__
1788 memset((void *)kernel_pdir, 0,
1789 kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
1790 kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1792 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1795 /*******************************************************/
1796 /* Set the start and end of kva. */
1797 /*******************************************************/
1798 virtual_avail = round_page(data_end);
1799 virtual_end = VM_MAX_KERNEL_ADDRESS;
1801 /* Allocate KVA space for page zero/copy operations. */
1802 zero_page_va = virtual_avail;
1803 virtual_avail += PAGE_SIZE;
1804 copy_page_src_va = virtual_avail;
1805 virtual_avail += PAGE_SIZE;
1806 copy_page_dst_va = virtual_avail;
1807 virtual_avail += PAGE_SIZE;
1808 debugf("zero_page_va = 0x%08x\n", zero_page_va);
1809 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1810 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1812 /* Initialize page zero/copy mutexes. */
1813 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1814 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1816 /* Allocate KVA space for ptbl bufs. */
1817 ptbl_buf_pool_vabase = virtual_avail;
1818 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1819 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1820 ptbl_buf_pool_vabase, virtual_avail);
1822 /* Calculate corresponding physical addresses for the kernel region. */
1823 phys_kernelend = kernload + kernsize;
1824 debugf("kernel image and allocated data:\n");
1825 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
1826 debugf(" kernstart = 0x%08x\n", kernstart);
1827 debugf(" kernsize = 0x%08x\n", kernsize);
1829 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1830 panic("mmu_booke_bootstrap: phys_avail too small");
1833 * Remove kernel physical address range from avail regions list. Page
1834 * align all regions. Non-page aligned memory isn't very interesting
1835 * to us. Also, sort the entries for ascending addresses.
1838 /* Retrieve phys/avail mem regions */
1839 mem_regions(&physmem_regions, &physmem_regions_sz,
1840 &availmem_regions, &availmem_regions_sz);
1842 cnt = availmem_regions_sz;
1843 debugf("processing avail regions:\n");
1844 for (mp = availmem_regions; mp->mr_size; mp++) {
1846 e = mp->mr_start + mp->mr_size;
1847 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
1848 /* Check whether this region holds all of the kernel. */
1849 if (s < kernload && e > phys_kernelend) {
1850 availmem_regions[cnt].mr_start = phys_kernelend;
1851 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1854 /* Look whether this regions starts within the kernel. */
1855 if (s >= kernload && s < phys_kernelend) {
1856 if (e <= phys_kernelend)
1860 /* Now look whether this region ends within the kernel. */
1861 if (e > kernload && e <= phys_kernelend) {
1866 /* Now page align the start and size of the region. */
1872 debugf("%09jx-%09jx = %jx\n",
1873 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
1875 /* Check whether some memory is left here. */
1879 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1885 /* Do an insertion sort. */
1886 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1887 if (s < mp1->mr_start)
1890 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1898 availmem_regions_sz = cnt;
1900 /*******************************************************/
1901 /* Steal physical memory for kernel stack from the end */
1902 /* of the first avail region */
1903 /*******************************************************/
1904 kstack0_sz = kstack_pages * PAGE_SIZE;
1905 kstack0_phys = availmem_regions[0].mr_start +
1906 availmem_regions[0].mr_size;
1907 kstack0_phys -= kstack0_sz;
1908 availmem_regions[0].mr_size -= kstack0_sz;
1910 /*******************************************************/
1911 /* Fill in phys_avail table, based on availmem_regions */
1912 /*******************************************************/
1913 phys_avail_count = 0;
1916 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1918 debugf("fill in phys_avail:\n");
1919 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1921 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
1922 (uintmax_t)availmem_regions[i].mr_start,
1923 (uintmax_t)availmem_regions[i].mr_start +
1924 availmem_regions[i].mr_size,
1925 (uintmax_t)availmem_regions[i].mr_size);
1927 if (hwphyssz != 0 &&
1928 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1929 debugf(" hw.physmem adjust\n");
1930 if (physsz < hwphyssz) {
1931 phys_avail[j] = availmem_regions[i].mr_start;
1933 availmem_regions[i].mr_start +
1941 phys_avail[j] = availmem_regions[i].mr_start;
1942 phys_avail[j + 1] = availmem_regions[i].mr_start +
1943 availmem_regions[i].mr_size;
1945 physsz += availmem_regions[i].mr_size;
1947 physmem = btoc(physsz);
1949 /* Calculate the last available physical address. */
1950 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1952 Maxmem = powerpc_btop(phys_avail[i + 1]);
1954 debugf("Maxmem = 0x%08lx\n", Maxmem);
1955 debugf("phys_avail_count = %d\n", phys_avail_count);
1956 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
1957 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
1959 /*******************************************************/
1960 /* Initialize (statically allocated) kernel pmap. */
1961 /*******************************************************/
1962 PMAP_LOCK_INIT(kernel_pmap);
1963 #ifndef __powerpc64__
1964 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1967 debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
1968 kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
1969 for (i = 0; i < MAXCPU; i++) {
1970 kernel_pmap->pm_tid[i] = TID_KERNEL;
1972 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1973 tidbusy[i][TID_KERNEL] = kernel_pmap;
1976 /* Mark kernel_pmap active on all CPUs */
1977 CPU_FILL(&kernel_pmap->pm_active);
1980 * Initialize the global pv list lock.
1982 rw_init(&pvh_global_lock, "pmap pv global");
1984 /*******************************************************/
1986 /*******************************************************/
1988 /* Enter kstack0 into kernel map, provide guard page */
1989 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1990 thread0.td_kstack = kstack0;
1991 thread0.td_kstack_pages = kstack_pages;
1993 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1994 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
1995 kstack0_phys, kstack0_phys + kstack0_sz);
1996 debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
1997 kstack0, kstack0 + kstack0_sz);
1999 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
2000 for (i = 0; i < kstack_pages; i++) {
2001 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
2002 kstack0 += PAGE_SIZE;
2003 kstack0_phys += PAGE_SIZE;
2006 pmap_bootstrapped = 1;
2008 debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
2009 debugf("virtual_end = %"PRI0ptrX"\n", virtual_end);
2011 debugf("mmu_booke_bootstrap: exit\n");
2018 tlb_entry_t *e, tmp;
2021 /* Prepare TLB1 image for AP processors */
2023 for (i = 0; i < TLB1_ENTRIES; i++) {
2024 tlb1_read_entry(&tmp, i);
2026 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
2027 memcpy(e++, &tmp, sizeof(tmp));
2032 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
2037 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
2038 * have the snapshot of its contents in the s/w __boot_tlb1[] table
2039 * created by tlb1_ap_prep(), so use these values directly to
2040 * (re)program AP's TLB1 hardware.
2042 * Start at index 1 because index 0 has the kernel map.
2044 for (i = 1; i < TLB1_ENTRIES; i++) {
2045 if (__boot_tlb1[i].mas1 & MAS1_VALID)
2046 tlb1_write_entry(&__boot_tlb1[i], i);
2049 set_mas4_defaults();
2054 booke_pmap_init_qpages(void)
2061 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
2062 if (pc->pc_qmap_addr == 0)
2063 panic("pmap_init_qpages: unable to allocate KVA");
2067 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
2070 * Get the physical page address for the given pmap/virtual address.
2073 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
2078 pa = pte_vatopa(mmu, pmap, va);
2085 * Extract the physical page address associated with the given
2086 * kernel virtual address.
2089 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
2094 /* Check TLB1 mappings */
2095 for (i = 0; i < TLB1_ENTRIES; i++) {
2096 tlb1_read_entry(&e, i);
2097 if (!(e.mas1 & MAS1_VALID))
2099 if (va >= e.virt && va < e.virt + e.size)
2100 return (e.phys + (va - e.virt));
2103 return (pte_vatopa(mmu, kernel_pmap, va));
2107 * Initialize the pmap module.
2108 * Called by vm_init, to initialize any structures that the pmap
2109 * system needs to map virtual memory.
2112 mmu_booke_init(mmu_t mmu)
2114 int shpgperproc = PMAP_SHPGPERPROC;
2117 * Initialize the address space (zone) for the pv entries. Set a
2118 * high water mark so that the system can recover from excessive
2119 * numbers of pv entries.
2121 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
2122 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
2124 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
2125 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
2127 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
2128 pv_entry_high_water = 9 * (pv_entry_max / 10);
2130 uma_zone_reserve_kva(pvzone, pv_entry_max);
2132 /* Pre-fill pvzone with initial number of pv entries. */
2133 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
2135 /* Initialize ptbl allocation. */
2140 * Map a list of wired pages into kernel virtual address space. This is
2141 * intended for temporary mappings which do not need page modification or
2142 * references recorded. Existing mappings in the region are overwritten.
2145 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
2150 while (count-- > 0) {
2151 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2158 * Remove page mappings from kernel virtual address space. Intended for
2159 * temporary mappings entered by mmu_booke_qenter.
2162 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
2167 while (count-- > 0) {
2168 mmu_booke_kremove(mmu, va);
2174 * Map a wired page into kernel virtual address space.
2177 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
2180 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
2184 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
2189 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2190 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
2192 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2193 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
2194 flags |= PTE_PS_4KB;
2196 pte = pte_find(mmu, kernel_pmap, va);
2197 KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
2199 mtx_lock_spin(&tlbivax_mutex);
2202 if (PTE_ISVALID(pte)) {
2204 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
2206 /* Flush entry from TLB0 */
2207 tlb0_flush_entry(va);
2210 *pte = PTE_RPN_FROM_PA(pa) | flags;
2212 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
2213 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
2214 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
2216 /* Flush the real memory from the instruction cache. */
2217 if ((flags & (PTE_I | PTE_G)) == 0)
2218 __syncicache((void *)va, PAGE_SIZE);
2221 mtx_unlock_spin(&tlbivax_mutex);
2225 * Remove a page from kernel page table.
2228 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
2232 CTR2(KTR_PMAP,"%s: s (va = 0x%08x)\n", __func__, va);
2234 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2235 (va <= VM_MAX_KERNEL_ADDRESS)),
2236 ("mmu_booke_kremove: invalid va"));
2238 pte = pte_find(mmu, kernel_pmap, va);
2240 if (!PTE_ISVALID(pte)) {
2242 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
2247 mtx_lock_spin(&tlbivax_mutex);
2250 /* Invalidate entry in TLB0, update PTE. */
2251 tlb0_flush_entry(va);
2255 mtx_unlock_spin(&tlbivax_mutex);
2259 * Initialize pmap associated with process 0.
2262 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
2265 PMAP_LOCK_INIT(pmap);
2266 mmu_booke_pinit(mmu, pmap);
2267 PCPU_SET(curpmap, pmap);
2271 * Initialize a preallocated and zeroed pmap structure,
2272 * such as one in a vmspace structure.
2275 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
2279 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
2280 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
2282 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
2284 for (i = 0; i < MAXCPU; i++)
2285 pmap->pm_tid[i] = TID_NONE;
2286 CPU_ZERO(&kernel_pmap->pm_active);
2287 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2288 #ifdef __powerpc64__
2289 bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
2290 TAILQ_INIT(&pmap->pm_pdir_list);
2292 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
2294 TAILQ_INIT(&pmap->pm_ptbl_list);
2298 * Release any resources held by the given physical map.
2299 * Called when a pmap initialized by mmu_booke_pinit is being released.
2300 * Should only be called if the map contains no valid mappings.
2303 mmu_booke_release(mmu_t mmu, pmap_t pmap)
2306 KASSERT(pmap->pm_stats.resident_count == 0,
2307 ("pmap_release: pmap resident count %ld != 0",
2308 pmap->pm_stats.resident_count));
2312 * Insert the given physical page at the specified virtual address in the
2313 * target physical map with the protection requested. If specified the page
2314 * will be wired down.
2317 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2318 vm_prot_t prot, u_int flags, int8_t psind)
2322 rw_wlock(&pvh_global_lock);
2324 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
2326 rw_wunlock(&pvh_global_lock);
2331 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2332 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
2337 int error, su, sync;
2339 pa = VM_PAGE_TO_PHYS(m);
2340 su = (pmap == kernel_pmap);
2343 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
2344 // "pa=0x%08x prot=0x%08x flags=%#x)\n",
2345 // (u_int32_t)pmap, su, pmap->pm_tid,
2346 // (u_int32_t)m, va, pa, prot, flags);
2349 KASSERT(((va >= virtual_avail) &&
2350 (va <= VM_MAX_KERNEL_ADDRESS)),
2351 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
2353 KASSERT((va <= VM_MAXUSER_ADDRESS),
2354 ("mmu_booke_enter_locked: user pmap, non user va"));
2356 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2357 VM_OBJECT_ASSERT_LOCKED(m->object);
2359 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2362 * If there is an existing mapping, and the physical address has not
2363 * changed, must be protection or wiring change.
2365 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
2366 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
2369 * Before actually updating pte->flags we calculate and
2370 * prepare its new value in a helper var.
2373 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
2375 /* Wiring change, just update stats. */
2376 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
2377 if (!PTE_ISWIRED(pte)) {
2379 pmap->pm_stats.wired_count++;
2382 if (PTE_ISWIRED(pte)) {
2383 flags &= ~PTE_WIRED;
2384 pmap->pm_stats.wired_count--;
2388 if (prot & VM_PROT_WRITE) {
2389 /* Add write permissions. */
2394 if ((flags & PTE_MANAGED) != 0)
2395 vm_page_aflag_set(m, PGA_WRITEABLE);
2397 /* Handle modified pages, sense modify status. */
2400 * The PTE_MODIFIED flag could be set by underlying
2401 * TLB misses since we last read it (above), possibly
2402 * other CPUs could update it so we check in the PTE
2403 * directly rather than rely on that saved local flags
2406 if (PTE_ISMODIFIED(pte))
2410 if (prot & VM_PROT_EXECUTE) {
2416 * Check existing flags for execute permissions: if we
2417 * are turning execute permissions on, icache should
2420 if ((*pte & (PTE_UX | PTE_SX)) == 0)
2424 flags &= ~PTE_REFERENCED;
2427 * The new flags value is all calculated -- only now actually
2430 mtx_lock_spin(&tlbivax_mutex);
2433 tlb0_flush_entry(va);
2434 *pte &= ~PTE_FLAGS_MASK;
2438 mtx_unlock_spin(&tlbivax_mutex);
2442 * If there is an existing mapping, but it's for a different
2443 * physical address, pte_enter() will delete the old mapping.
2445 //if ((pte != NULL) && PTE_ISVALID(pte))
2446 // debugf("mmu_booke_enter_locked: replace\n");
2448 // debugf("mmu_booke_enter_locked: new\n");
2450 /* Now set up the flags and install the new mapping. */
2451 flags = (PTE_SR | PTE_VALID);
2457 if (prot & VM_PROT_WRITE) {
2462 if ((m->oflags & VPO_UNMANAGED) == 0)
2463 vm_page_aflag_set(m, PGA_WRITEABLE);
2466 if (prot & VM_PROT_EXECUTE) {
2472 /* If its wired update stats. */
2473 if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
2476 error = pte_enter(mmu, pmap, m, va, flags,
2477 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
2479 return (KERN_RESOURCE_SHORTAGE);
2481 if ((flags & PMAP_ENTER_WIRED) != 0)
2482 pmap->pm_stats.wired_count++;
2484 /* Flush the real memory from the instruction cache. */
2485 if (prot & VM_PROT_EXECUTE)
2489 if (sync && (su || pmap == PCPU_GET(curpmap))) {
2490 __syncicache((void *)va, PAGE_SIZE);
2494 return (KERN_SUCCESS);
2498 * Maps a sequence of resident pages belonging to the same object.
2499 * The sequence begins with the given page m_start. This page is
2500 * mapped at the given virtual address start. Each subsequent page is
2501 * mapped at a virtual address that is offset from start by the same
2502 * amount as the page is offset from m_start within the object. The
2503 * last page in the sequence is the page with the largest offset from
2504 * m_start that can be mapped at a virtual address less than the given
2505 * virtual address end. Not every virtual page between start and end
2506 * is mapped; only those for which a resident page exists with the
2507 * corresponding offset from m_start are mapped.
2510 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
2511 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
2514 vm_pindex_t diff, psize;
2516 VM_OBJECT_ASSERT_LOCKED(m_start->object);
2518 psize = atop(end - start);
2520 rw_wlock(&pvh_global_lock);
2522 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2523 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
2524 prot & (VM_PROT_READ | VM_PROT_EXECUTE),
2525 PMAP_ENTER_NOSLEEP, 0);
2526 m = TAILQ_NEXT(m, listq);
2528 rw_wunlock(&pvh_global_lock);
2533 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2537 rw_wlock(&pvh_global_lock);
2539 mmu_booke_enter_locked(mmu, pmap, va, m,
2540 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
2542 rw_wunlock(&pvh_global_lock);
2547 * Remove the given range of addresses from the specified map.
2549 * It is assumed that the start and end are properly rounded to the page size.
2552 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
2557 int su = (pmap == kernel_pmap);
2559 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
2560 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
2563 KASSERT(((va >= virtual_avail) &&
2564 (va <= VM_MAX_KERNEL_ADDRESS)),
2565 ("mmu_booke_remove: kernel pmap, non kernel va"));
2567 KASSERT((va <= VM_MAXUSER_ADDRESS),
2568 ("mmu_booke_remove: user pmap, non user va"));
2571 if (PMAP_REMOVE_DONE(pmap)) {
2572 //debugf("mmu_booke_remove: e (empty)\n");
2576 hold_flag = PTBL_HOLD_FLAG(pmap);
2577 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
2579 rw_wlock(&pvh_global_lock);
2581 for (; va < endva; va += PAGE_SIZE) {
2582 pte = pte_find(mmu, pmap, va);
2583 if ((pte != NULL) && PTE_ISVALID(pte))
2584 pte_remove(mmu, pmap, va, hold_flag);
2587 rw_wunlock(&pvh_global_lock);
2589 //debugf("mmu_booke_remove: e\n");
2593 * Remove physical page from all pmaps in which it resides.
2596 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
2601 rw_wlock(&pvh_global_lock);
2602 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
2603 pvn = TAILQ_NEXT(pv, pv_link);
2605 PMAP_LOCK(pv->pv_pmap);
2606 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
2607 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
2608 PMAP_UNLOCK(pv->pv_pmap);
2610 vm_page_aflag_clear(m, PGA_WRITEABLE);
2611 rw_wunlock(&pvh_global_lock);
2615 * Map a range of physical addresses into kernel virtual address space.
2618 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
2619 vm_paddr_t pa_end, int prot)
2621 vm_offset_t sva = *virt;
2622 vm_offset_t va = sva;
2624 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
2625 // sva, pa_start, pa_end);
2627 while (pa_start < pa_end) {
2628 mmu_booke_kenter(mmu, va, pa_start);
2630 pa_start += PAGE_SIZE;
2634 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
2639 * The pmap must be activated before it's address space can be accessed in any
2643 mmu_booke_activate(mmu_t mmu, struct thread *td)
2648 pmap = &td->td_proc->p_vmspace->vm_pmap;
2650 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
2651 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2653 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
2657 cpuid = PCPU_GET(cpuid);
2658 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
2659 PCPU_SET(curpmap, pmap);
2661 if (pmap->pm_tid[cpuid] == TID_NONE)
2664 /* Load PID0 register with pmap tid value. */
2665 mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
2666 __asm __volatile("isync");
2668 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
2672 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
2673 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
2677 * Deactivate the specified process's address space.
2680 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
2684 pmap = &td->td_proc->p_vmspace->vm_pmap;
2686 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
2687 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2689 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
2691 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
2692 PCPU_SET(curpmap, NULL);
2696 * Copy the range specified by src_addr/len
2697 * from the source map to the range dst_addr/len
2698 * in the destination map.
2700 * This routine is only advisory and need not do anything.
2703 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
2704 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
2710 * Set the physical protection on the specified range of this map as requested.
2713 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2720 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2721 mmu_booke_remove(mmu, pmap, sva, eva);
2725 if (prot & VM_PROT_WRITE)
2729 for (va = sva; va < eva; va += PAGE_SIZE) {
2730 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2731 if (PTE_ISVALID(pte)) {
2732 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2734 mtx_lock_spin(&tlbivax_mutex);
2737 /* Handle modified pages. */
2738 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
2741 tlb0_flush_entry(va);
2742 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2745 mtx_unlock_spin(&tlbivax_mutex);
2753 * Clear the write and modified bits in each of the given page's mappings.
2756 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2761 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2762 ("mmu_booke_remove_write: page %p is not managed", m));
2765 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2766 * set by another thread while the object is locked. Thus,
2767 * if PGA_WRITEABLE is clear, no page table entries need updating.
2769 VM_OBJECT_ASSERT_WLOCKED(m->object);
2770 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2772 rw_wlock(&pvh_global_lock);
2773 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2774 PMAP_LOCK(pv->pv_pmap);
2775 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2776 if (PTE_ISVALID(pte)) {
2777 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2779 mtx_lock_spin(&tlbivax_mutex);
2782 /* Handle modified pages. */
2783 if (PTE_ISMODIFIED(pte))
2786 /* Flush mapping from TLB0. */
2787 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2790 mtx_unlock_spin(&tlbivax_mutex);
2793 PMAP_UNLOCK(pv->pv_pmap);
2795 vm_page_aflag_clear(m, PGA_WRITEABLE);
2796 rw_wunlock(&pvh_global_lock);
2800 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2809 va = trunc_page(va);
2810 sz = round_page(sz);
2812 rw_wlock(&pvh_global_lock);
2813 pmap = PCPU_GET(curpmap);
2814 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2817 pte = pte_find(mmu, pm, va);
2818 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2824 /* Create a mapping in the active pmap. */
2826 m = PHYS_TO_VM_PAGE(pa);
2828 pte_enter(mmu, pmap, m, addr,
2829 PTE_SR | PTE_VALID | PTE_UR, FALSE);
2830 __syncicache((void *)addr, PAGE_SIZE);
2831 pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2834 __syncicache((void *)va, PAGE_SIZE);
2839 rw_wunlock(&pvh_global_lock);
2843 * Atomically extract and hold the physical page with the given
2844 * pmap and virtual address pair if that mapping permits the given
2848 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2860 pte = pte_find(mmu, pmap, va);
2861 if ((pte != NULL) && PTE_ISVALID(pte)) {
2862 if (pmap == kernel_pmap)
2867 if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2868 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2870 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2881 * Initialize a vm_page's machine-dependent fields.
2884 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2887 TAILQ_INIT(&m->md.pv_list);
2891 * mmu_booke_zero_page_area zeros the specified hardware page by
2892 * mapping it into virtual memory and using bzero to clear
2895 * off and size must reside within a single page.
2898 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2902 /* XXX KASSERT off and size are within a single page? */
2904 mtx_lock(&zero_page_mutex);
2907 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2908 bzero((caddr_t)va + off, size);
2909 mmu_booke_kremove(mmu, va);
2911 mtx_unlock(&zero_page_mutex);
2915 * mmu_booke_zero_page zeros the specified hardware page.
2918 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2920 vm_offset_t off, va;
2922 mtx_lock(&zero_page_mutex);
2925 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2926 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
2927 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
2928 mmu_booke_kremove(mmu, va);
2930 mtx_unlock(&zero_page_mutex);
2934 * mmu_booke_copy_page copies the specified (machine independent) page by
2935 * mapping the page into virtual memory and using memcopy to copy the page,
2936 * one machine dependent page at a time.
2939 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2941 vm_offset_t sva, dva;
2943 sva = copy_page_src_va;
2944 dva = copy_page_dst_va;
2946 mtx_lock(©_page_mutex);
2947 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2948 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2949 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2950 mmu_booke_kremove(mmu, dva);
2951 mmu_booke_kremove(mmu, sva);
2952 mtx_unlock(©_page_mutex);
2956 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
2957 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
2960 vm_offset_t a_pg_offset, b_pg_offset;
2963 mtx_lock(©_page_mutex);
2964 while (xfersize > 0) {
2965 a_pg_offset = a_offset & PAGE_MASK;
2966 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2967 mmu_booke_kenter(mmu, copy_page_src_va,
2968 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
2969 a_cp = (char *)copy_page_src_va + a_pg_offset;
2970 b_pg_offset = b_offset & PAGE_MASK;
2971 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2972 mmu_booke_kenter(mmu, copy_page_dst_va,
2973 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
2974 b_cp = (char *)copy_page_dst_va + b_pg_offset;
2975 bcopy(a_cp, b_cp, cnt);
2976 mmu_booke_kremove(mmu, copy_page_dst_va);
2977 mmu_booke_kremove(mmu, copy_page_src_va);
2982 mtx_unlock(©_page_mutex);
2986 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
2993 paddr = VM_PAGE_TO_PHYS(m);
2995 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2996 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
2997 flags |= PTE_PS_4KB;
3000 qaddr = PCPU_GET(qmap_addr);
3002 pte = pte_find(mmu, kernel_pmap, qaddr);
3004 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
3007 * XXX: tlbivax is broadcast to other cores, but qaddr should
3008 * not be present in other TLBs. Is there a better instruction
3009 * sequence to use? Or just forget it & use mmu_booke_kenter()...
3011 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
3012 __asm __volatile("isync; msync");
3014 *pte = PTE_RPN_FROM_PA(paddr) | flags;
3016 /* Flush the real memory from the instruction cache. */
3017 if ((flags & (PTE_I | PTE_G)) == 0)
3018 __syncicache((void *)qaddr, PAGE_SIZE);
3024 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
3028 pte = pte_find(mmu, kernel_pmap, addr);
3030 KASSERT(PCPU_GET(qmap_addr) == addr,
3031 ("mmu_booke_quick_remove_page: invalid address"));
3033 ("mmu_booke_quick_remove_page: PTE not in use"));
3040 * Return whether or not the specified physical page was modified
3041 * in any of physical maps.
3044 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
3050 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3051 ("mmu_booke_is_modified: page %p is not managed", m));
3055 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3056 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
3057 * is clear, no PTEs can be modified.
3059 VM_OBJECT_ASSERT_WLOCKED(m->object);
3060 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3062 rw_wlock(&pvh_global_lock);
3063 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3064 PMAP_LOCK(pv->pv_pmap);
3065 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3067 if (PTE_ISMODIFIED(pte))
3070 PMAP_UNLOCK(pv->pv_pmap);
3074 rw_wunlock(&pvh_global_lock);
3079 * Return whether or not the specified virtual address is eligible
3083 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
3090 * Return whether or not the specified physical page was referenced
3091 * in any physical maps.
3094 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
3100 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3101 ("mmu_booke_is_referenced: page %p is not managed", m));
3103 rw_wlock(&pvh_global_lock);
3104 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3105 PMAP_LOCK(pv->pv_pmap);
3106 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3108 if (PTE_ISREFERENCED(pte))
3111 PMAP_UNLOCK(pv->pv_pmap);
3115 rw_wunlock(&pvh_global_lock);
3120 * Clear the modify bits on the specified physical page.
3123 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
3128 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3129 ("mmu_booke_clear_modify: page %p is not managed", m));
3130 VM_OBJECT_ASSERT_WLOCKED(m->object);
3131 KASSERT(!vm_page_xbusied(m),
3132 ("mmu_booke_clear_modify: page %p is exclusive busied", m));
3135 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
3136 * If the object containing the page is locked and the page is not
3137 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
3139 if ((m->aflags & PGA_WRITEABLE) == 0)
3141 rw_wlock(&pvh_global_lock);
3142 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3143 PMAP_LOCK(pv->pv_pmap);
3144 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3146 mtx_lock_spin(&tlbivax_mutex);
3149 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
3150 tlb0_flush_entry(pv->pv_va);
3151 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
3156 mtx_unlock_spin(&tlbivax_mutex);
3158 PMAP_UNLOCK(pv->pv_pmap);
3160 rw_wunlock(&pvh_global_lock);
3164 * Return a count of reference bits for a page, clearing those bits.
3165 * It is not necessary for every reference bit to be cleared, but it
3166 * is necessary that 0 only be returned when there are truly no
3167 * reference bits set.
3169 * As an optimization, update the page's dirty field if a modified bit is
3170 * found while counting reference bits. This opportunistic update can be
3171 * performed at low cost and can eliminate the need for some future calls
3172 * to pmap_is_modified(). However, since this function stops after
3173 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3174 * dirty pages. Those dirty pages will only be detected by a future call
3175 * to pmap_is_modified().
3178 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
3184 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3185 ("mmu_booke_ts_referenced: page %p is not managed", m));
3187 rw_wlock(&pvh_global_lock);
3188 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3189 PMAP_LOCK(pv->pv_pmap);
3190 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3192 if (PTE_ISMODIFIED(pte))
3194 if (PTE_ISREFERENCED(pte)) {
3195 mtx_lock_spin(&tlbivax_mutex);
3198 tlb0_flush_entry(pv->pv_va);
3199 *pte &= ~PTE_REFERENCED;
3202 mtx_unlock_spin(&tlbivax_mutex);
3204 if (++count >= PMAP_TS_REFERENCED_MAX) {
3205 PMAP_UNLOCK(pv->pv_pmap);
3210 PMAP_UNLOCK(pv->pv_pmap);
3212 rw_wunlock(&pvh_global_lock);
3217 * Clear the wired attribute from the mappings for the specified range of
3218 * addresses in the given pmap. Every valid mapping within that range must
3219 * have the wired attribute set. In contrast, invalid mappings cannot have
3220 * the wired attribute set, so they are ignored.
3222 * The wired attribute of the page table entry is not a hardware feature, so
3223 * there is no need to invalidate any TLB entries.
3226 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3232 for (va = sva; va < eva; va += PAGE_SIZE) {
3233 if ((pte = pte_find(mmu, pmap, va)) != NULL &&
3235 if (!PTE_ISWIRED(pte))
3236 panic("mmu_booke_unwire: pte %p isn't wired",
3239 pmap->pm_stats.wired_count--;
3247 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
3248 * page. This count may be changed upwards or downwards in the future; it is
3249 * only necessary that true be returned for a small subset of pmaps for proper
3253 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
3259 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3260 ("mmu_booke_page_exists_quick: page %p is not managed", m));
3263 rw_wlock(&pvh_global_lock);
3264 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3265 if (pv->pv_pmap == pmap) {
3272 rw_wunlock(&pvh_global_lock);
3277 * Return the number of managed mappings to the given physical page that are
3281 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
3287 if ((m->oflags & VPO_UNMANAGED) != 0)
3289 rw_wlock(&pvh_global_lock);
3290 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3291 PMAP_LOCK(pv->pv_pmap);
3292 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
3293 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
3295 PMAP_UNLOCK(pv->pv_pmap);
3297 rw_wunlock(&pvh_global_lock);
3302 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3308 * This currently does not work for entries that
3309 * overlap TLB1 entries.
3311 for (i = 0; i < TLB1_ENTRIES; i ++) {
3312 if (tlb1_iomapped(i, pa, size, &va) == 0)
3320 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
3326 /* Minidumps are based on virtual memory addresses. */
3328 *va = (void *)(vm_offset_t)pa;
3332 /* Raw physical memory dumps don't have a virtual address. */
3333 /* We always map a 256MB page at 256M. */
3334 gran = 256 * 1024 * 1024;
3335 ppa = rounddown2(pa, gran);
3338 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
3340 if (sz > (gran - ofs))
3341 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
3346 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
3354 /* Minidumps are based on virtual memory addresses. */
3355 /* Nothing to do... */
3359 for (i = 0; i < TLB1_ENTRIES; i++) {
3360 tlb1_read_entry(&e, i);
3361 if (!(e.mas1 & MAS1_VALID))
3365 /* Raw physical memory dumps don't have a virtual address. */
3370 tlb1_write_entry(&e, i);
3372 gran = 256 * 1024 * 1024;
3373 ppa = rounddown2(pa, gran);
3375 if (sz > (gran - ofs)) {
3380 tlb1_write_entry(&e, i);
3384 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
3387 mmu_booke_scan_init(mmu_t mmu)
3394 /* Initialize phys. segments for dumpsys(). */
3395 memset(&dump_map, 0, sizeof(dump_map));
3396 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
3397 &availmem_regions_sz);
3398 for (i = 0; i < physmem_regions_sz; i++) {
3399 dump_map[i].pa_start = physmem_regions[i].mr_start;
3400 dump_map[i].pa_size = physmem_regions[i].mr_size;
3405 /* Virtual segments for minidumps: */
3406 memset(&dump_map, 0, sizeof(dump_map));
3408 /* 1st: kernel .data and .bss. */
3409 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
3410 dump_map[0].pa_size =
3411 round_page((uintptr_t)_end) - dump_map[0].pa_start;
3413 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
3414 dump_map[1].pa_start = data_start;
3415 dump_map[1].pa_size = data_end - data_start;
3417 /* 3rd: kernel VM. */
3418 va = dump_map[1].pa_start + dump_map[1].pa_size;
3419 /* Find start of next chunk (from va). */
3420 while (va < virtual_end) {
3421 /* Don't dump the buffer cache. */
3422 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
3423 va = kmi.buffer_eva;
3426 pte = pte_find(mmu, kernel_pmap, va);
3427 if (pte != NULL && PTE_ISVALID(pte))
3431 if (va < virtual_end) {
3432 dump_map[2].pa_start = va;
3434 /* Find last page in chunk. */
3435 while (va < virtual_end) {
3436 /* Don't run into the buffer cache. */
3437 if (va == kmi.buffer_sva)
3439 pte = pte_find(mmu, kernel_pmap, va);
3440 if (pte == NULL || !PTE_ISVALID(pte))
3444 dump_map[2].pa_size = va - dump_map[2].pa_start;
3449 * Map a set of physical memory pages into the kernel virtual address space.
3450 * Return a pointer to where it is mapped. This routine is intended to be used
3451 * for mapping device memory, NOT real memory.
3454 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3457 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
3461 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3465 uintptr_t va, tmpva;
3470 * Check if this is premapped in TLB1. Note: this should probably also
3471 * check whether a sequence of TLB1 entries exist that match the
3472 * requirement, but now only checks the easy case.
3474 if (ma == VM_MEMATTR_DEFAULT) {
3475 for (i = 0; i < TLB1_ENTRIES; i++) {
3476 tlb1_read_entry(&e, i);
3477 if (!(e.mas1 & MAS1_VALID))
3480 (pa + size) <= (e.phys + e.size))
3481 return (void *)(e.virt +
3482 (vm_offset_t)(pa - e.phys));
3486 size = roundup(size, PAGE_SIZE);
3489 * The device mapping area is between VM_MAXUSER_ADDRESS and
3490 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing.
3492 #ifdef SPARSE_MAPDEV
3494 * With a sparse mapdev, align to the largest starting region. This
3495 * could feasibly be optimized for a 'best-fit' alignment, but that
3496 * calculation could be very costly.
3499 tmpva = tlb1_map_base;
3500 va = roundup(tlb1_map_base, 1 << flsl(size));
3501 #ifdef __powerpc64__
3502 } while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
3504 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
3507 #ifdef __powerpc64__
3508 va = atomic_fetchadd_long(&tlb1_map_base, size);
3510 va = atomic_fetchadd_int(&tlb1_map_base, size);
3516 sz = 1 << (ilog2(size) & ~1);
3520 } while (va % sz != 0);
3523 printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
3524 va, (uintmax_t)pa, sz);
3525 tlb1_set_entry(va, pa, sz,
3526 _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma));
3536 * 'Unmap' a range mapped by mmu_booke_mapdev().
3539 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
3541 #ifdef SUPPORTS_SHRINKING_TLB1
3542 vm_offset_t base, offset;
3545 * Unmap only if this is inside kernel virtual space.
3547 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
3548 base = trunc_page(va);
3549 offset = va & PAGE_MASK;
3550 size = roundup(offset + size, PAGE_SIZE);
3551 kva_free(base, size);
3557 * mmu_booke_object_init_pt preloads the ptes for a given object into the
3558 * specified pmap. This eliminates the blast of soft faults on process startup
3559 * and immediately after an mmap.
3562 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3563 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3566 VM_OBJECT_ASSERT_WLOCKED(object);
3567 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3568 ("mmu_booke_object_init_pt: non-device object"));
3572 * Perform the pmap work for mincore.
3575 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3576 vm_paddr_t *locked_pa)
3579 /* XXX: this should be implemented at some point */
3584 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
3592 /* Check TLB1 mappings */
3593 for (i = 0; i < TLB1_ENTRIES; i++) {
3594 tlb1_read_entry(&e, i);
3595 if (!(e.mas1 & MAS1_VALID))
3597 if (addr >= e.virt && addr < e.virt + e.size)
3600 if (i < TLB1_ENTRIES) {
3601 /* Only allow full mappings to be modified for now. */
3602 /* Validate the range. */
3603 for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
3604 tlb1_read_entry(&e, j);
3605 if (va != e.virt || (sz - (va - addr) < e.size))
3608 for (va = addr; va < addr + sz; va += e.size, i++) {
3609 tlb1_read_entry(&e, i);
3610 e.mas2 &= ~MAS2_WIMGE_MASK;
3611 e.mas2 |= tlb_calc_wimg(e.phys, mode);
3614 * Write it out to the TLB. Should really re-sync with other
3617 tlb1_write_entry(&e, i);
3622 /* Not in TLB1, try through pmap */
3623 /* First validate the range. */
3624 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3625 pte = pte_find(mmu, kernel_pmap, va);
3626 if (pte == NULL || !PTE_ISVALID(pte))
3630 mtx_lock_spin(&tlbivax_mutex);
3632 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3633 pte = pte_find(mmu, kernel_pmap, va);
3634 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
3635 *pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
3636 tlb0_flush_entry(va);
3639 mtx_unlock_spin(&tlbivax_mutex);
3644 /**************************************************************************/
3646 /**************************************************************************/
3649 * Allocate a TID. If necessary, steal one from someone else.
3650 * The new TID is flushed from the TLB before returning.
3653 tid_alloc(pmap_t pmap)
3658 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
3660 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
3662 thiscpu = PCPU_GET(cpuid);
3664 tid = PCPU_GET(tid_next);
3667 PCPU_SET(tid_next, tid + 1);
3669 /* If we are stealing TID then clear the relevant pmap's field */
3670 if (tidbusy[thiscpu][tid] != NULL) {
3672 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
3674 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
3676 /* Flush all entries from TLB0 matching this TID. */
3680 tidbusy[thiscpu][tid] = pmap;
3681 pmap->pm_tid[thiscpu] = tid;
3682 __asm __volatile("msync; isync");
3684 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
3685 PCPU_GET(tid_next));
3690 /**************************************************************************/
3692 /**************************************************************************/
3695 #ifdef __powerpc64__
3696 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
3698 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
3709 if (mas1 & MAS1_VALID)
3714 if (mas1 & MAS1_IPROT)
3719 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
3720 tid = MAS1_GETTID(mas1);
3722 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3725 size = tsize2size(tsize);
3727 debugf("%3d: (%s) [AS=%d] "
3728 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
3729 "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
3730 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
3733 /* Convert TLB0 va and way number to tlb0[] table index. */
3734 static inline unsigned int
3735 tlb0_tableidx(vm_offset_t va, unsigned int way)
3739 idx = (way * TLB0_ENTRIES_PER_WAY);
3740 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
3745 * Invalidate TLB0 entry.
3748 tlb0_flush_entry(vm_offset_t va)
3751 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
3753 mtx_assert(&tlbivax_mutex, MA_OWNED);
3755 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
3756 __asm __volatile("isync; msync");
3757 __asm __volatile("tlbsync; msync");
3759 CTR1(KTR_PMAP, "%s: e", __func__);
3762 /* Print out contents of the MAS registers for each TLB0 entry */
3764 tlb0_print_tlbentries(void)
3766 uint32_t mas0, mas1, mas3, mas7;
3767 #ifdef __powerpc64__
3772 int entryidx, way, idx;
3774 debugf("TLB0 entries:\n");
3775 for (way = 0; way < TLB0_WAYS; way ++)
3776 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
3778 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
3779 mtspr(SPR_MAS0, mas0);
3780 __asm __volatile("isync");
3782 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
3783 mtspr(SPR_MAS2, mas2);
3785 __asm __volatile("isync; tlbre");
3787 mas1 = mfspr(SPR_MAS1);
3788 mas2 = mfspr(SPR_MAS2);
3789 mas3 = mfspr(SPR_MAS3);
3790 mas7 = mfspr(SPR_MAS7);
3792 idx = tlb0_tableidx(mas2, way);
3793 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3797 /**************************************************************************/
3799 /**************************************************************************/
3802 * TLB1 mapping notes:
3804 * TLB1[0] Kernel text and data.
3805 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
3806 * windows, other devices mappings.
3810 * Read an entry from given TLB1 slot.
3813 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
3818 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
3821 __asm __volatile("wrteei 0");
3823 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
3824 mtspr(SPR_MAS0, mas0);
3825 __asm __volatile("isync; tlbre");
3827 entry->mas1 = mfspr(SPR_MAS1);
3828 entry->mas2 = mfspr(SPR_MAS2);
3829 entry->mas3 = mfspr(SPR_MAS3);
3831 switch ((mfpvr() >> 16) & 0xFFFF) {
3836 entry->mas7 = mfspr(SPR_MAS7);
3844 entry->virt = entry->mas2 & MAS2_EPN_MASK;
3845 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
3846 (entry->mas3 & MAS3_RPN);
3848 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
3852 * Write given entry to TLB1 hardware.
3855 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
3860 //debugf("tlb1_write_entry: s\n");
3863 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
3864 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
3867 __asm __volatile("wrteei 0");
3869 mtspr(SPR_MAS0, mas0);
3870 __asm __volatile("isync");
3871 mtspr(SPR_MAS1, e->mas1);
3872 __asm __volatile("isync");
3873 mtspr(SPR_MAS2, e->mas2);
3874 __asm __volatile("isync");
3875 mtspr(SPR_MAS3, e->mas3);
3876 __asm __volatile("isync");
3877 switch ((mfpvr() >> 16) & 0xFFFF) {
3882 __asm __volatile("isync");
3885 mtspr(SPR_MAS7, e->mas7);
3886 __asm __volatile("isync");
3892 __asm __volatile("tlbwe; isync; msync");
3895 //debugf("tlb1_write_entry: e\n");
3899 * Return the largest uint value log such that 2^log <= num.
3902 ilog2(unsigned int num)
3906 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
3911 * Convert TLB TSIZE value to mapped region size.
3914 tsize2size(unsigned int tsize)
3919 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
3922 return ((1 << (2 * tsize)) * 1024);
3926 * Convert region size (must be power of 4) to TLB TSIZE value.
3929 size2tsize(vm_size_t size)
3932 return (ilog2(size) / 2 - 5);
3936 * Register permanent kernel mapping in TLB1.
3938 * Entries are created starting from index 0 (current free entry is
3939 * kept in tlb1_idx) and are not supposed to be invalidated.
3942 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
3949 for (index = 0; index < TLB1_ENTRIES; index++) {
3950 tlb1_read_entry(&e, index);
3951 if ((e.mas1 & MAS1_VALID) == 0)
3953 /* Check if we're just updating the flags, and update them. */
3954 if (e.phys == pa && e.virt == va && e.size == size) {
3955 e.mas2 = (va & MAS2_EPN_MASK) | flags;
3956 tlb1_write_entry(&e, index);
3960 if (index >= TLB1_ENTRIES) {
3961 printf("tlb1_set_entry: TLB1 full!\n");
3965 /* Convert size to TSIZE */
3966 tsize = size2tsize(size);
3968 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
3969 /* XXX TS is hard coded to 0 for now as we only use single address space */
3970 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
3975 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
3976 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
3977 e.mas2 = (va & MAS2_EPN_MASK) | flags;
3979 /* Set supervisor RWX permission bits */
3980 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
3981 e.mas7 = (pa >> 32) & MAS7_RPN;
3983 tlb1_write_entry(&e, index);
3986 * XXX in general TLB1 updates should be propagated between CPUs,
3987 * since current design assumes to have the same TLB1 set-up on all
3994 * Map in contiguous RAM region into the TLB1 using maximum of
3995 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
3997 * If necessary round up last entry size and return total size
3998 * used by all allocated entries.
4001 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
4003 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
4004 vm_size_t mapped, pgsz, base, mask;
4007 /* Round up to the next 1M */
4008 size = roundup2(size, 1 << 20);
4013 pgsz = 64*1024*1024;
4014 while (mapped < size) {
4015 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
4016 while (pgsz > (size - mapped))
4022 /* We under-map. Correct for this. */
4023 if (mapped < size) {
4024 while (pgs[idx - 1] == pgsz) {
4028 /* XXX We may increase beyond out starting point. */
4037 /* Align address to the boundary */
4039 va = (va + mask) & ~mask;
4040 pa = (pa + mask) & ~mask;
4043 for (idx = 0; idx < nents; idx++) {
4045 debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz);
4046 tlb1_set_entry(va, pa, pgsz,
4047 _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM);
4052 mapped = (va - base);
4053 printf("mapped size 0x%"PRI0ptrX" (wasted space 0x%"PRIxPTR")\n",
4054 mapped, mapped - size);
4059 * TLB1 initialization routine, to be called after the very first
4060 * assembler level setup done in locore.S.
4065 uint32_t mas0, mas1, mas2, mas3, mas7;
4070 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
4071 mtspr(SPR_MAS0, mas0);
4072 __asm __volatile("isync; tlbre");
4074 mas1 = mfspr(SPR_MAS1);
4075 mas2 = mfspr(SPR_MAS2);
4076 mas3 = mfspr(SPR_MAS3);
4077 mas7 = mfspr(SPR_MAS7);
4079 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
4082 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4083 kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
4085 /* Setup TLB miss defaults */
4086 set_mas4_defaults();
4090 * pmap_early_io_unmap() should be used in short conjunction with
4091 * pmap_early_io_map(), as in the following snippet:
4093 * x = pmap_early_io_map(...);
4094 * <do something with x>
4095 * pmap_early_io_unmap(x, size);
4097 * And avoiding more allocations between.
4100 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
4106 size = roundup(size, PAGE_SIZE);
4108 for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
4109 tlb1_read_entry(&e, i);
4110 if (!(e.mas1 & MAS1_VALID))
4112 if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
4114 e.mas1 &= ~MAS1_VALID;
4115 tlb1_write_entry(&e, i);
4118 if (tlb1_map_base == va + isize)
4119 tlb1_map_base -= isize;
4123 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
4130 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
4132 for (i = 0; i < TLB1_ENTRIES; i++) {
4133 tlb1_read_entry(&e, i);
4134 if (!(e.mas1 & MAS1_VALID))
4136 if (pa >= e.phys && (pa + size) <=
4138 return (e.virt + (pa - e.phys));
4141 pa_base = rounddown(pa, PAGE_SIZE);
4142 size = roundup(size + (pa - pa_base), PAGE_SIZE);
4143 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
4144 va = tlb1_map_base + (pa - pa_base);
4147 sz = 1 << (ilog2(size) & ~1);
4148 tlb1_set_entry(tlb1_map_base, pa_base, sz,
4149 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
4152 tlb1_map_base += sz;
4159 pmap_track_page(pmap_t pmap, vm_offset_t va)
4163 struct pv_entry *pve;
4165 va = trunc_page(va);
4166 pa = pmap_kextract(va);
4168 rw_wlock(&pvh_global_lock);
4170 page = PHYS_TO_VM_PAGE(pa);
4172 TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
4173 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
4177 page->md.pv_tracked = true;
4178 pv_insert(pmap, va, page);
4181 rw_wunlock(&pvh_global_lock);
4186 * Setup MAS4 defaults.
4187 * These values are loaded to MAS0-2 on a TLB miss.
4190 set_mas4_defaults(void)
4194 /* Defaults: TLB0, PID0, TSIZED=4K */
4195 mas4 = MAS4_TLBSELD0;
4196 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
4200 mtspr(SPR_MAS4, mas4);
4201 __asm __volatile("isync");
4205 * Print out contents of the MAS registers for each TLB1 entry
4208 tlb1_print_tlbentries(void)
4210 uint32_t mas0, mas1, mas3, mas7;
4211 #ifdef __powerpc64__
4218 debugf("TLB1 entries:\n");
4219 for (i = 0; i < TLB1_ENTRIES; i++) {
4221 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
4222 mtspr(SPR_MAS0, mas0);
4224 __asm __volatile("isync; tlbre");
4226 mas1 = mfspr(SPR_MAS1);
4227 mas2 = mfspr(SPR_MAS2);
4228 mas3 = mfspr(SPR_MAS3);
4229 mas7 = mfspr(SPR_MAS7);
4231 tlb_print_entry(i, mas1, mas2, mas3, mas7);
4236 * Return 0 if the physical IO range is encompassed by one of the
4237 * the TLB1 entries, otherwise return related error code.
4240 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
4243 vm_paddr_t pa_start;
4245 unsigned int entry_tsize;
4246 vm_size_t entry_size;
4249 *va = (vm_offset_t)NULL;
4251 tlb1_read_entry(&e, i);
4252 /* Skip invalid entries */
4253 if (!(e.mas1 & MAS1_VALID))
4257 * The entry must be cache-inhibited, guarded, and r/w
4258 * so it can function as an i/o page
4260 prot = e.mas2 & (MAS2_I | MAS2_G);
4261 if (prot != (MAS2_I | MAS2_G))
4264 prot = e.mas3 & (MAS3_SR | MAS3_SW);
4265 if (prot != (MAS3_SR | MAS3_SW))
4268 /* The address should be within the entry range. */
4269 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4270 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
4272 entry_size = tsize2size(entry_tsize);
4273 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) |
4274 (e.mas3 & MAS3_RPN);
4275 pa_end = pa_start + entry_size;
4277 if ((pa < pa_start) || ((pa + size) > pa_end))
4280 /* Return virtual address of this mapping. */
4281 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
4286 * Invalidate all TLB0 entries which match the given TID. Note this is
4287 * dedicated for cases when invalidations should NOT be propagated to other
4291 tid_flush(tlbtid_t tid)
4294 uint32_t mas0, mas1, mas2;
4298 /* Don't evict kernel translations */
4299 if (tid == TID_KERNEL)
4303 __asm __volatile("wrteei 0");
4305 for (way = 0; way < TLB0_WAYS; way++)
4306 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
4308 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
4309 mtspr(SPR_MAS0, mas0);
4310 __asm __volatile("isync");
4312 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
4313 mtspr(SPR_MAS2, mas2);
4315 __asm __volatile("isync; tlbre");
4317 mas1 = mfspr(SPR_MAS1);
4319 if (!(mas1 & MAS1_VALID))
4321 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
4323 mas1 &= ~MAS1_VALID;
4324 mtspr(SPR_MAS1, mas1);
4325 __asm __volatile("isync; tlbwe; isync; msync");