2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Some hw specific parts of this pmap were derived or influenced
29 * by NetBSD's ibm4xx pmap module. More generic code is shared with
30 * a few other pmap modules from the FreeBSD tree.
36 * Kernel and user threads run within one common virtual address space
40 * Virtual address space layout:
41 * -----------------------------
42 * 0x0000_0000 - 0x7fff_ffff : user process
43 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
44 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
45 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
46 * 0xc100_0000 - 0xffff_ffff : KVA
47 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
48 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
49 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
50 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
53 * Virtual address space layout:
54 * -----------------------------
55 * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process
56 * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries
57 * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region
58 * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack
59 * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved
60 * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data
61 * endkernel - msgbufp-1 : flat device tree
62 * msgbufp - ptbl_bufs-1 : message buffer
63 * ptbl_bufs - kernel_pdir-1 : kernel page tables
64 * kernel_pdir - kernel_pp2d-1 : kernel page directory
65 * kernel_pp2d - . : kernel pointers to page directory
66 * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy
67 * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs
68 * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables
69 * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space
70 * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region
71 * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region
72 * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map
73 * 0xf000_0000_0000_0000 - +Maxmem : physmem map
74 * - 0xffff_ffff_ffff_ffff : device direct map
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
80 #include "opt_kstack_pages.h"
82 #include <sys/param.h>
84 #include <sys/malloc.h>
88 #include <sys/queue.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/kerneldump.h>
92 #include <sys/linker.h>
93 #include <sys/msgbuf.h>
95 #include <sys/mutex.h>
96 #include <sys/rwlock.h>
97 #include <sys/sched.h>
99 #include <sys/vmmeter.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_extern.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_pager.h>
112 #include <machine/_inttypes.h>
113 #include <machine/cpu.h>
114 #include <machine/pcb.h>
115 #include <machine/platform.h>
117 #include <machine/tlb.h>
118 #include <machine/spr.h>
119 #include <machine/md_var.h>
120 #include <machine/mmuvar.h>
121 #include <machine/pmap.h>
122 #include <machine/pte.h>
126 #define SPARSE_MAPDEV
128 #define debugf(fmt, args...) printf(fmt, ##args)
130 #define debugf(fmt, args...)
134 #define PRI0ptrX "016lx"
136 #define PRI0ptrX "08x"
139 #define TODO panic("%s: not implemented", __func__);
141 extern unsigned char _etext[];
142 extern unsigned char _end[];
144 extern uint32_t *bootinfo;
147 vm_offset_t kernstart;
150 /* Message buffer and tables. */
151 static vm_offset_t data_start;
152 static vm_size_t data_end;
154 /* Phys/avail memory regions. */
155 static struct mem_region *availmem_regions;
156 static int availmem_regions_sz;
157 static struct mem_region *physmem_regions;
158 static int physmem_regions_sz;
160 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
161 static vm_offset_t zero_page_va;
162 static struct mtx zero_page_mutex;
164 static struct mtx tlbivax_mutex;
166 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
167 static vm_offset_t copy_page_src_va;
168 static vm_offset_t copy_page_dst_va;
169 static struct mtx copy_page_mutex;
171 /**************************************************************************/
173 /**************************************************************************/
175 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
176 vm_prot_t, u_int flags, int8_t psind);
178 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
179 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
181 unsigned int kernel_pdirs;
185 * If user pmap is processed with mmu_booke_remove and the resident count
186 * drops to 0, there are no more pages to remove, so we need not continue.
188 #define PMAP_REMOVE_DONE(pmap) \
189 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
191 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
192 extern int elf32_nxstack;
195 /**************************************************************************/
196 /* TLB and TID handling */
197 /**************************************************************************/
199 /* Translation ID busy table */
200 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
203 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
204 * core revisions and should be read from h/w registers during early config.
206 uint32_t tlb0_entries;
208 uint32_t tlb0_entries_per_way;
209 uint32_t tlb1_entries;
211 #define TLB0_ENTRIES (tlb0_entries)
212 #define TLB0_WAYS (tlb0_ways)
213 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
215 #define TLB1_ENTRIES (tlb1_entries)
217 static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
219 static tlbtid_t tid_alloc(struct pmap *);
220 static void tid_flush(tlbtid_t tid);
223 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
225 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
228 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
229 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
230 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
231 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
233 static vm_size_t tsize2size(unsigned int);
234 static unsigned int size2tsize(vm_size_t);
235 static unsigned int ilog2(unsigned int);
237 static void set_mas4_defaults(void);
239 static inline void tlb0_flush_entry(vm_offset_t);
240 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
242 /**************************************************************************/
243 /* Page table management */
244 /**************************************************************************/
246 static struct rwlock_padalign pvh_global_lock;
248 /* Data for the pv entry allocation mechanism */
249 static uma_zone_t pvzone;
250 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
252 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
254 #ifndef PMAP_SHPGPERPROC
255 #define PMAP_SHPGPERPROC 200
258 static void ptbl_init(void);
259 static struct ptbl_buf *ptbl_buf_alloc(void);
260 static void ptbl_buf_free(struct ptbl_buf *);
261 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
264 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
265 unsigned int, boolean_t);
266 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int);
267 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
268 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
270 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
271 static void ptbl_free(mmu_t, pmap_t, unsigned int);
272 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
273 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
276 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
277 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
278 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
279 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
280 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
282 static pv_entry_t pv_alloc(void);
283 static void pv_free(pv_entry_t);
284 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
285 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
287 static void booke_pmap_init_qpages(void);
289 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
291 #define PTBL_BUFS (16UL * 16 * 16)
293 #define PTBL_BUFS (128 * 16)
297 TAILQ_ENTRY(ptbl_buf) link; /* list link */
298 vm_offset_t kva; /* va of mapping */
301 /* ptbl free list and a lock used for access synchronization. */
302 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
303 static struct mtx ptbl_buf_freelist_lock;
305 /* Base address of kva space allocated fot ptbl bufs. */
306 static vm_offset_t ptbl_buf_pool_vabase;
308 /* Pointer to ptbl_buf structures. */
309 static struct ptbl_buf *ptbl_bufs;
312 extern tlb_entry_t __boot_tlb1[];
313 void pmap_bootstrap_ap(volatile uint32_t *);
317 * Kernel MMU interface
319 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
320 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
321 vm_size_t, vm_offset_t);
322 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
323 static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
324 vm_offset_t, vm_page_t *, vm_offset_t, int);
325 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
326 vm_prot_t, u_int flags, int8_t psind);
327 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
328 vm_page_t, vm_prot_t);
329 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
331 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
332 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
334 static void mmu_booke_init(mmu_t);
335 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
336 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
337 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
338 static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
339 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
341 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
343 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
344 vm_object_t, vm_pindex_t, vm_size_t);
345 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
346 static void mmu_booke_page_init(mmu_t, vm_page_t);
347 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
348 static void mmu_booke_pinit(mmu_t, pmap_t);
349 static void mmu_booke_pinit0(mmu_t, pmap_t);
350 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
352 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
353 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
354 static void mmu_booke_release(mmu_t, pmap_t);
355 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
356 static void mmu_booke_remove_all(mmu_t, vm_page_t);
357 static void mmu_booke_remove_write(mmu_t, vm_page_t);
358 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
359 static void mmu_booke_zero_page(mmu_t, vm_page_t);
360 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
361 static void mmu_booke_activate(mmu_t, struct thread *);
362 static void mmu_booke_deactivate(mmu_t, struct thread *);
363 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
364 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
365 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
366 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
367 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
368 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
369 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
370 static void mmu_booke_kremove(mmu_t, vm_offset_t);
371 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
372 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
374 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
376 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
378 static void mmu_booke_scan_init(mmu_t);
379 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
380 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
381 static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
382 vm_size_t sz, vm_memattr_t mode);
384 static mmu_method_t mmu_booke_methods[] = {
385 /* pmap dispatcher interface */
386 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
387 MMUMETHOD(mmu_copy, mmu_booke_copy),
388 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
389 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
390 MMUMETHOD(mmu_enter, mmu_booke_enter),
391 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
392 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
393 MMUMETHOD(mmu_extract, mmu_booke_extract),
394 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
395 MMUMETHOD(mmu_init, mmu_booke_init),
396 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
397 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
398 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
399 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
400 MMUMETHOD(mmu_map, mmu_booke_map),
401 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
402 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
403 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
404 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
405 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
406 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
407 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
408 MMUMETHOD(mmu_protect, mmu_booke_protect),
409 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
410 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
411 MMUMETHOD(mmu_release, mmu_booke_release),
412 MMUMETHOD(mmu_remove, mmu_booke_remove),
413 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
414 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
415 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
416 MMUMETHOD(mmu_unwire, mmu_booke_unwire),
417 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
418 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
419 MMUMETHOD(mmu_activate, mmu_booke_activate),
420 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
421 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
422 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
424 /* Internal interfaces */
425 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
426 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
427 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
428 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
429 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
430 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
431 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
432 MMUMETHOD(mmu_kremove, mmu_booke_kremove),
433 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
434 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
436 /* dumpsys() support */
437 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
438 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
439 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
444 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
446 static __inline uint32_t
447 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
452 if (ma != VM_MEMATTR_DEFAULT) {
454 case VM_MEMATTR_UNCACHEABLE:
455 return (MAS2_I | MAS2_G);
456 case VM_MEMATTR_WRITE_COMBINING:
457 case VM_MEMATTR_WRITE_BACK:
458 case VM_MEMATTR_PREFETCHABLE:
460 case VM_MEMATTR_WRITE_THROUGH:
461 return (MAS2_W | MAS2_M);
462 case VM_MEMATTR_CACHEABLE:
468 * Assume the page is cache inhibited and access is guarded unless
469 * it's in our available memory array.
471 attrib = _TLB_ENTRY_IO;
472 for (i = 0; i < physmem_regions_sz; i++) {
473 if ((pa >= physmem_regions[i].mr_start) &&
474 (pa < (physmem_regions[i].mr_start +
475 physmem_regions[i].mr_size))) {
476 attrib = _TLB_ENTRY_MEM;
493 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
496 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
497 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
499 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
500 ("tlb_miss_lock: tried to lock self"));
502 tlb_lock(pc->pc_booke_tlb_lock);
504 CTR1(KTR_PMAP, "%s: locked", __func__);
511 tlb_miss_unlock(void)
519 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
521 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
522 __func__, pc->pc_cpuid);
524 tlb_unlock(pc->pc_booke_tlb_lock);
526 CTR1(KTR_PMAP, "%s: unlocked", __func__);
532 /* Return number of entries in TLB0. */
534 tlb0_get_tlbconf(void)
538 tlb0_cfg = mfspr(SPR_TLB0CFG);
539 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
540 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
541 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
544 /* Return number of entries in TLB1. */
546 tlb1_get_tlbconf(void)
550 tlb1_cfg = mfspr(SPR_TLB1CFG);
551 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
554 /**************************************************************************/
555 /* Page table related */
556 /**************************************************************************/
559 /* Initialize pool of kva ptbl buffers. */
565 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
566 TAILQ_INIT(&ptbl_buf_freelist);
568 for (i = 0; i < PTBL_BUFS; i++) {
569 ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
570 i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
571 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
575 /* Get an sf_buf from the freelist. */
576 static struct ptbl_buf *
579 struct ptbl_buf *buf;
581 mtx_lock(&ptbl_buf_freelist_lock);
582 buf = TAILQ_FIRST(&ptbl_buf_freelist);
584 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
585 mtx_unlock(&ptbl_buf_freelist_lock);
590 /* Return ptbl buff to free pool. */
592 ptbl_buf_free(struct ptbl_buf *buf)
594 mtx_lock(&ptbl_buf_freelist_lock);
595 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
596 mtx_unlock(&ptbl_buf_freelist_lock);
600 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
603 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
605 struct ptbl_buf *pbuf;
607 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
608 if (pbuf->kva == (vm_offset_t) ptbl) {
609 /* Remove from pmap ptbl buf list. */
610 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
612 /* Free corresponding ptbl buf. */
620 /* Get a pointer to a PTE in a page table. */
621 static __inline pte_t *
622 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
627 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
629 pdir = pmap->pm_pp2d[PP2D_IDX(va)];
632 ptbl = pdir[PDIR_IDX(va)];
633 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
637 * Search the list of allocated pdir bufs and find on list of allocated pdirs
640 ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
642 struct ptbl_buf *pbuf;
644 TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
645 if (pbuf->kva == (vm_offset_t) pdir) {
646 /* Remove from pmap ptbl buf list. */
647 TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
649 /* Free corresponding pdir buf. */
656 /* Free pdir pages and invalidate pdir entry. */
658 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
666 pdir = pmap->pm_pp2d[pp2d_idx];
668 KASSERT((pdir != NULL), ("pdir_free: null pdir"));
670 pmap->pm_pp2d[pp2d_idx] = NULL;
672 for (i = 0; i < PDIR_PAGES; i++) {
673 va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
674 pa = pte_vatopa(mmu, kernel_pmap, va);
675 m = PHYS_TO_VM_PAGE(pa);
676 vm_page_free_zero(m);
677 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
681 ptbl_free_pmap_pdir(mmu, pmap, pdir);
685 * Decrement pdir pages hold count and attempt to free pdir pages. Called
686 * when removing directory entry from pdir.
688 * Return 1 if pdir pages were freed.
691 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
698 KASSERT((pmap != kernel_pmap),
699 ("pdir_unhold: unholding kernel pdir!"));
701 pdir = pmap->pm_pp2d[pp2d_idx];
703 KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS),
704 ("pdir_unhold: non kva pdir"));
706 /* decrement hold count */
707 for (i = 0; i < PDIR_PAGES; i++) {
708 pa = pte_vatopa(mmu, kernel_pmap,
709 (vm_offset_t) pdir + (i * PAGE_SIZE));
710 m = PHYS_TO_VM_PAGE(pa);
715 * Free pdir pages if there are no dir entries in this pdir.
716 * wire_count has the same value for all ptbl pages, so check the
719 if (m->wire_count == 0) {
720 pdir_free(mmu, pmap, pp2d_idx);
727 * Increment hold count for pdir pages. This routine is used when new ptlb
728 * entry is being inserted into pdir.
731 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
737 KASSERT((pmap != kernel_pmap),
738 ("pdir_hold: holding kernel pdir!"));
740 KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
742 for (i = 0; i < PDIR_PAGES; i++) {
743 pa = pte_vatopa(mmu, kernel_pmap,
744 (vm_offset_t) pdir + (i * PAGE_SIZE));
745 m = PHYS_TO_VM_PAGE(pa);
750 /* Allocate page table. */
752 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
755 vm_page_t mtbl [PTBL_PAGES];
757 struct ptbl_buf *pbuf;
763 KASSERT((pdir[pdir_idx] == NULL),
764 ("%s: valid ptbl entry exists!", __func__));
766 pbuf = ptbl_buf_alloc();
768 panic("%s: couldn't alloc kernel virtual memory", __func__);
770 ptbl = (pte_t *) pbuf->kva;
772 for (i = 0; i < PTBL_PAGES; i++) {
773 pidx = (PTBL_PAGES * pdir_idx) + i;
774 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
775 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
777 rw_wunlock(&pvh_global_lock);
779 ptbl_free_pmap_ptbl(pmap, ptbl);
780 for (j = 0; j < i; j++)
781 vm_page_free(mtbl[j]);
782 atomic_subtract_int(&vm_cnt.v_wire_count, i);
786 rw_wlock(&pvh_global_lock);
792 /* Mapin allocated pages into kernel_pmap. */
793 mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
794 /* Zero whole ptbl. */
795 bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
797 /* Add pbuf to the pmap ptbl bufs list. */
798 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
803 /* Free ptbl pages and invalidate pdir entry. */
805 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
813 ptbl = pdir[pdir_idx];
815 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
817 pdir[pdir_idx] = NULL;
819 for (i = 0; i < PTBL_PAGES; i++) {
820 va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
821 pa = pte_vatopa(mmu, kernel_pmap, va);
822 m = PHYS_TO_VM_PAGE(pa);
823 vm_page_free_zero(m);
824 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
828 ptbl_free_pmap_ptbl(pmap, ptbl);
832 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
833 * when removing pte entry from ptbl.
835 * Return 1 if ptbl pages were freed.
838 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
848 pp2d_idx = PP2D_IDX(va);
849 pdir_idx = PDIR_IDX(va);
851 KASSERT((pmap != kernel_pmap),
852 ("ptbl_unhold: unholding kernel ptbl!"));
854 pdir = pmap->pm_pp2d[pp2d_idx];
855 ptbl = pdir[pdir_idx];
857 KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS),
858 ("ptbl_unhold: non kva ptbl"));
860 /* decrement hold count */
861 for (i = 0; i < PTBL_PAGES; i++) {
862 pa = pte_vatopa(mmu, kernel_pmap,
863 (vm_offset_t) ptbl + (i * PAGE_SIZE));
864 m = PHYS_TO_VM_PAGE(pa);
869 * Free ptbl pages if there are no pte entries in this ptbl.
870 * wire_count has the same value for all ptbl pages, so check the
873 if (m->wire_count == 0) {
874 /* A pair of indirect entries might point to this ptbl page */
876 tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
877 TLB_SIZE_1M, MAS6_SIND);
878 tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
879 TLB_SIZE_1M, MAS6_SIND);
881 ptbl_free(mmu, pmap, pdir, pdir_idx);
882 pdir_unhold(mmu, pmap, pp2d_idx);
889 * Increment hold count for ptbl pages. This routine is used when new pte
890 * entry is being inserted into ptbl.
893 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
900 KASSERT((pmap != kernel_pmap),
901 ("ptbl_hold: holding kernel ptbl!"));
903 ptbl = pdir[pdir_idx];
905 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
907 for (i = 0; i < PTBL_PAGES; i++) {
908 pa = pte_vatopa(mmu, kernel_pmap,
909 (vm_offset_t) ptbl + (i * PAGE_SIZE));
910 m = PHYS_TO_VM_PAGE(pa);
916 /* Initialize pool of kva ptbl buffers. */
922 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
923 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
924 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
925 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
927 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
928 TAILQ_INIT(&ptbl_buf_freelist);
930 for (i = 0; i < PTBL_BUFS; i++) {
932 ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
933 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
937 /* Get a ptbl_buf from the freelist. */
938 static struct ptbl_buf *
941 struct ptbl_buf *buf;
943 mtx_lock(&ptbl_buf_freelist_lock);
944 buf = TAILQ_FIRST(&ptbl_buf_freelist);
946 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
947 mtx_unlock(&ptbl_buf_freelist_lock);
949 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
954 /* Return ptbl buff to free pool. */
956 ptbl_buf_free(struct ptbl_buf *buf)
959 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
961 mtx_lock(&ptbl_buf_freelist_lock);
962 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
963 mtx_unlock(&ptbl_buf_freelist_lock);
967 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
970 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
972 struct ptbl_buf *pbuf;
974 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
976 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
978 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
979 if (pbuf->kva == (vm_offset_t)ptbl) {
980 /* Remove from pmap ptbl buf list. */
981 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
983 /* Free corresponding ptbl buf. */
989 /* Allocate page table. */
991 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
993 vm_page_t mtbl[PTBL_PAGES];
995 struct ptbl_buf *pbuf;
1000 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1001 (pmap == kernel_pmap), pdir_idx);
1003 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1004 ("ptbl_alloc: invalid pdir_idx"));
1005 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
1006 ("pte_alloc: valid ptbl entry exists!"));
1008 pbuf = ptbl_buf_alloc();
1010 panic("pte_alloc: couldn't alloc kernel virtual memory");
1012 ptbl = (pte_t *)pbuf->kva;
1014 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
1016 for (i = 0; i < PTBL_PAGES; i++) {
1017 pidx = (PTBL_PAGES * pdir_idx) + i;
1018 while ((m = vm_page_alloc(NULL, pidx,
1019 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
1021 rw_wunlock(&pvh_global_lock);
1023 ptbl_free_pmap_ptbl(pmap, ptbl);
1024 for (j = 0; j < i; j++)
1025 vm_page_free(mtbl[j]);
1026 atomic_subtract_int(&vm_cnt.v_wire_count, i);
1030 rw_wlock(&pvh_global_lock);
1036 /* Map allocated pages into kernel_pmap. */
1037 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
1039 /* Zero whole ptbl. */
1040 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
1042 /* Add pbuf to the pmap ptbl bufs list. */
1043 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
1048 /* Free ptbl pages and invalidate pdir entry. */
1050 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1058 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1059 (pmap == kernel_pmap), pdir_idx);
1061 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1062 ("ptbl_free: invalid pdir_idx"));
1064 ptbl = pmap->pm_pdir[pdir_idx];
1066 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
1068 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
1071 * Invalidate the pdir entry as soon as possible, so that other CPUs
1072 * don't attempt to look up the page tables we are releasing.
1074 mtx_lock_spin(&tlbivax_mutex);
1077 pmap->pm_pdir[pdir_idx] = NULL;
1080 mtx_unlock_spin(&tlbivax_mutex);
1082 for (i = 0; i < PTBL_PAGES; i++) {
1083 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
1084 pa = pte_vatopa(mmu, kernel_pmap, va);
1085 m = PHYS_TO_VM_PAGE(pa);
1086 vm_page_free_zero(m);
1087 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1088 mmu_booke_kremove(mmu, va);
1091 ptbl_free_pmap_ptbl(pmap, ptbl);
1095 * Decrement ptbl pages hold count and attempt to free ptbl pages.
1096 * Called when removing pte entry from ptbl.
1098 * Return 1 if ptbl pages were freed.
1101 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1108 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1109 (pmap == kernel_pmap), pdir_idx);
1111 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1112 ("ptbl_unhold: invalid pdir_idx"));
1113 KASSERT((pmap != kernel_pmap),
1114 ("ptbl_unhold: unholding kernel ptbl!"));
1116 ptbl = pmap->pm_pdir[pdir_idx];
1118 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
1119 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
1120 ("ptbl_unhold: non kva ptbl"));
1122 /* decrement hold count */
1123 for (i = 0; i < PTBL_PAGES; i++) {
1124 pa = pte_vatopa(mmu, kernel_pmap,
1125 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1126 m = PHYS_TO_VM_PAGE(pa);
1131 * Free ptbl pages if there are no pte etries in this ptbl.
1132 * wire_count has the same value for all ptbl pages, so check the last
1135 if (m->wire_count == 0) {
1136 ptbl_free(mmu, pmap, pdir_idx);
1138 //debugf("ptbl_unhold: e (freed ptbl)\n");
1146 * Increment hold count for ptbl pages. This routine is used when a new pte
1147 * entry is being inserted into the ptbl.
1150 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1157 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
1160 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1161 ("ptbl_hold: invalid pdir_idx"));
1162 KASSERT((pmap != kernel_pmap),
1163 ("ptbl_hold: holding kernel ptbl!"));
1165 ptbl = pmap->pm_pdir[pdir_idx];
1167 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
1169 for (i = 0; i < PTBL_PAGES; i++) {
1170 pa = pte_vatopa(mmu, kernel_pmap,
1171 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1172 m = PHYS_TO_VM_PAGE(pa);
1178 /* Allocate pv_entry structure. */
1185 if (pv_entry_count > pv_entry_high_water)
1186 pagedaemon_wakeup();
1187 pv = uma_zalloc(pvzone, M_NOWAIT);
1192 /* Free pv_entry structure. */
1193 static __inline void
1194 pv_free(pv_entry_t pve)
1198 uma_zfree(pvzone, pve);
1202 /* Allocate and initialize pv_entry structure. */
1204 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
1208 //int su = (pmap == kernel_pmap);
1209 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
1210 // (u_int32_t)pmap, va, (u_int32_t)m);
1214 panic("pv_insert: no pv entries!");
1216 pve->pv_pmap = pmap;
1219 /* add to pv_list */
1220 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1221 rw_assert(&pvh_global_lock, RA_WLOCKED);
1223 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
1225 //debugf("pv_insert: e\n");
1228 /* Destroy pv entry. */
1230 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
1234 //int su = (pmap == kernel_pmap);
1235 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
1237 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1238 rw_assert(&pvh_global_lock, RA_WLOCKED);
1241 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
1242 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
1243 /* remove from pv_list */
1244 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
1245 if (TAILQ_EMPTY(&m->md.pv_list))
1246 vm_page_aflag_clear(m, PGA_WRITEABLE);
1248 /* free pv entry struct */
1254 //debugf("pv_remove: e\n");
1257 #ifdef __powerpc64__
1259 * Clean pte entry, try to free page table page if requested.
1261 * Return 1 if ptbl pages were freed, otherwise return 0.
1264 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
1269 pte = pte_find(mmu, pmap, va);
1270 KASSERT(pte != NULL, ("%s: NULL pte", __func__));
1272 if (!PTE_ISVALID(pte))
1275 /* Get vm_page_t for mapped pte. */
1276 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1278 if (PTE_ISWIRED(pte))
1279 pmap->pm_stats.wired_count--;
1281 /* Handle managed entry. */
1282 if (PTE_ISMANAGED(pte)) {
1284 /* Handle modified pages. */
1285 if (PTE_ISMODIFIED(pte))
1288 /* Referenced pages. */
1289 if (PTE_ISREFERENCED(pte))
1290 vm_page_aflag_set(m, PGA_REFERENCED);
1292 /* Remove pv_entry from pv_list. */
1293 pv_remove(pmap, va, m);
1294 } else if (m->md.pv_tracked) {
1295 pv_remove(pmap, va, m);
1296 if (TAILQ_EMPTY(&m->md.pv_list))
1297 m->md.pv_tracked = false;
1299 mtx_lock_spin(&tlbivax_mutex);
1302 tlb0_flush_entry(va);
1306 mtx_unlock_spin(&tlbivax_mutex);
1308 pmap->pm_stats.resident_count--;
1310 if (flags & PTBL_UNHOLD) {
1311 return (ptbl_unhold(mmu, pmap, va));
1317 * allocate a page of pointers to page directories, do not preallocate the
1321 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
1323 vm_page_t mtbl [PDIR_PAGES];
1325 struct ptbl_buf *pbuf;
1331 pbuf = ptbl_buf_alloc();
1334 panic("%s: couldn't alloc kernel virtual memory", __func__);
1336 /* Allocate pdir pages, this will sleep! */
1337 for (i = 0; i < PDIR_PAGES; i++) {
1338 pidx = (PDIR_PAGES * pp2d_idx) + i;
1339 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
1340 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
1348 /* Mapin allocated pages into kernel_pmap. */
1349 pdir = (pte_t **) pbuf->kva;
1350 pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
1352 /* Zero whole pdir. */
1353 bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
1355 /* Add pdir to the pmap pdir bufs list. */
1356 TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
1362 * Insert PTE for a given page and virtual address.
1365 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1368 unsigned int pp2d_idx = PP2D_IDX(va);
1369 unsigned int pdir_idx = PDIR_IDX(va);
1370 unsigned int ptbl_idx = PTBL_IDX(va);
1374 /* Get the page directory pointer. */
1375 pdir = pmap->pm_pp2d[pp2d_idx];
1377 pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
1379 /* Get the page table pointer. */
1380 ptbl = pdir[pdir_idx];
1383 /* Allocate page table pages. */
1384 ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
1386 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1391 * Check if there is valid mapping for requested va, if there
1394 pte = &pdir[pdir_idx][ptbl_idx];
1395 if (PTE_ISVALID(pte)) {
1396 pte_remove(mmu, pmap, va, PTBL_HOLD);
1399 * pte is not used, increment hold count for ptbl
1402 if (pmap != kernel_pmap)
1403 ptbl_hold(mmu, pmap, pdir, pdir_idx);
1407 if (pdir[pdir_idx] == NULL) {
1408 if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
1409 pdir_hold(mmu, pmap, pdir);
1410 pdir[pdir_idx] = ptbl;
1412 if (pmap->pm_pp2d[pp2d_idx] == NULL)
1413 pmap->pm_pp2d[pp2d_idx] = pdir;
1416 * Insert pv_entry into pv_list for mapped page if part of managed
1419 if ((m->oflags & VPO_UNMANAGED) == 0) {
1420 flags |= PTE_MANAGED;
1422 /* Create and insert pv entry. */
1423 pv_insert(pmap, va, m);
1426 mtx_lock_spin(&tlbivax_mutex);
1429 tlb0_flush_entry(va);
1430 pmap->pm_stats.resident_count++;
1431 pte = &pdir[pdir_idx][ptbl_idx];
1432 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1433 *pte |= (PTE_VALID | flags);
1436 mtx_unlock_spin(&tlbivax_mutex);
1441 /* Return the pa for the given pmap/va. */
1443 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1448 pte = pte_find(mmu, pmap, va);
1449 if ((pte != NULL) && PTE_ISVALID(pte))
1450 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1455 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
1457 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1464 /* Initialize kernel pdir */
1465 for (i = 0; i < kernel_pdirs; i++) {
1466 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
1467 (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
1468 for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
1469 j < PDIR_NENTRIES; j++) {
1470 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
1471 (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
1472 (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
1477 * Fill in PTEs covering kernel code and data. They are not required
1478 * for address translation, as this area is covered by static TLB1
1479 * entries, but for pte_vatopa() to work correctly with kernel area
1482 for (va = addr; va < data_end; va += PAGE_SIZE) {
1483 pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
1484 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1485 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1486 PTE_VALID | PTE_PS_4KB;
1491 * Clean pte entry, try to free page table page if requested.
1493 * Return 1 if ptbl pages were freed, otherwise return 0.
1496 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
1498 unsigned int pdir_idx = PDIR_IDX(va);
1499 unsigned int ptbl_idx = PTBL_IDX(va);
1504 //int su = (pmap == kernel_pmap);
1505 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
1506 // su, (u_int32_t)pmap, va, flags);
1508 ptbl = pmap->pm_pdir[pdir_idx];
1509 KASSERT(ptbl, ("pte_remove: null ptbl"));
1511 pte = &ptbl[ptbl_idx];
1513 if (pte == NULL || !PTE_ISVALID(pte))
1516 if (PTE_ISWIRED(pte))
1517 pmap->pm_stats.wired_count--;
1519 /* Get vm_page_t for mapped pte. */
1520 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1522 /* Handle managed entry. */
1523 if (PTE_ISMANAGED(pte)) {
1525 if (PTE_ISMODIFIED(pte))
1528 if (PTE_ISREFERENCED(pte))
1529 vm_page_aflag_set(m, PGA_REFERENCED);
1531 pv_remove(pmap, va, m);
1532 } else if (m->md.pv_tracked) {
1534 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
1535 * used. This is needed by the NCSW support code for fast
1536 * VA<->PA translation.
1538 pv_remove(pmap, va, m);
1539 if (TAILQ_EMPTY(&m->md.pv_list))
1540 m->md.pv_tracked = false;
1543 mtx_lock_spin(&tlbivax_mutex);
1546 tlb0_flush_entry(va);
1550 mtx_unlock_spin(&tlbivax_mutex);
1552 pmap->pm_stats.resident_count--;
1554 if (flags & PTBL_UNHOLD) {
1555 //debugf("pte_remove: e (unhold)\n");
1556 return (ptbl_unhold(mmu, pmap, pdir_idx));
1559 //debugf("pte_remove: e\n");
1564 * Insert PTE for a given page and virtual address.
1567 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1570 unsigned int pdir_idx = PDIR_IDX(va);
1571 unsigned int ptbl_idx = PTBL_IDX(va);
1574 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
1575 pmap == kernel_pmap, pmap, va);
1577 /* Get the page table pointer. */
1578 ptbl = pmap->pm_pdir[pdir_idx];
1581 /* Allocate page table pages. */
1582 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
1584 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1589 * Check if there is valid mapping for requested
1590 * va, if there is, remove it.
1592 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
1593 if (PTE_ISVALID(pte)) {
1594 pte_remove(mmu, pmap, va, PTBL_HOLD);
1597 * pte is not used, increment hold count
1600 if (pmap != kernel_pmap)
1601 ptbl_hold(mmu, pmap, pdir_idx);
1606 * Insert pv_entry into pv_list for mapped page if part of managed
1609 if ((m->oflags & VPO_UNMANAGED) == 0) {
1610 flags |= PTE_MANAGED;
1612 /* Create and insert pv entry. */
1613 pv_insert(pmap, va, m);
1616 pmap->pm_stats.resident_count++;
1618 mtx_lock_spin(&tlbivax_mutex);
1621 tlb0_flush_entry(va);
1622 if (pmap->pm_pdir[pdir_idx] == NULL) {
1624 * If we just allocated a new page table, hook it in
1627 pmap->pm_pdir[pdir_idx] = ptbl;
1629 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
1630 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1631 *pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
1634 mtx_unlock_spin(&tlbivax_mutex);
1638 /* Return the pa for the given pmap/va. */
1640 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1645 pte = pte_find(mmu, pmap, va);
1646 if ((pte != NULL) && PTE_ISVALID(pte))
1647 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1651 /* Get a pointer to a PTE in a page table. */
1653 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1655 unsigned int pdir_idx = PDIR_IDX(va);
1656 unsigned int ptbl_idx = PTBL_IDX(va);
1658 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
1660 if (pmap->pm_pdir[pdir_idx])
1661 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
1666 /* Set up kernel page tables. */
1668 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1674 /* Initialize kernel pdir */
1675 for (i = 0; i < kernel_ptbls; i++)
1676 kernel_pmap->pm_pdir[kptbl_min + i] =
1677 (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
1680 * Fill in PTEs covering kernel code and data. They are not required
1681 * for address translation, as this area is covered by static TLB1
1682 * entries, but for pte_vatopa() to work correctly with kernel area
1685 for (va = addr; va < data_end; va += PAGE_SIZE) {
1686 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1687 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1688 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1689 PTE_VALID | PTE_PS_4KB;
1694 /**************************************************************************/
1696 /**************************************************************************/
1699 * This is called during booke_init, before the system is really initialized.
1702 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
1704 vm_paddr_t phys_kernelend;
1705 struct mem_region *mp, *mp1;
1707 vm_paddr_t s, e, sz;
1708 vm_paddr_t physsz, hwphyssz;
1709 u_int phys_avail_count;
1710 vm_size_t kstack0_sz;
1711 vm_offset_t kernel_pdir, kstack0;
1712 vm_paddr_t kstack0_phys;
1715 debugf("mmu_booke_bootstrap: entered\n");
1717 /* Set interesting system properties */
1719 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
1723 /* Initialize invalidation mutex */
1724 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1726 /* Read TLB0 size and associativity. */
1730 * Align kernel start and end address (kernel image).
1731 * Note that kernel end does not necessarily relate to kernsize.
1732 * kernsize is the size of the kernel that is actually mapped.
1734 kernstart = trunc_page(start);
1735 data_start = round_page(kernelend);
1736 data_end = data_start;
1739 * Addresses of preloaded modules (like file systems) use
1740 * physical addresses. Make sure we relocate those into
1741 * virtual addresses.
1743 preload_addr_relocate = kernstart - kernload;
1745 /* Allocate the dynamic per-cpu area. */
1746 dpcpu = (void *)data_end;
1747 data_end += DPCPU_SIZE;
1749 /* Allocate space for the message buffer. */
1750 msgbufp = (struct msgbuf *)data_end;
1751 data_end += msgbufsize;
1752 debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1753 (uintptr_t)msgbufp, data_end);
1755 data_end = round_page(data_end);
1757 /* Allocate space for ptbl_bufs. */
1758 ptbl_bufs = (struct ptbl_buf *)data_end;
1759 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1760 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1761 (uintptr_t)ptbl_bufs, data_end);
1763 data_end = round_page(data_end);
1765 /* Allocate PTE tables for kernel KVA. */
1766 kernel_pdir = data_end;
1767 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
1769 #ifdef __powerpc64__
1770 kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
1771 data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
1773 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1774 debugf(" kernel ptbls: %d\n", kernel_ptbls);
1775 debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1776 kernel_pdir, data_end);
1778 debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
1779 if (data_end - kernstart > kernsize) {
1780 kernsize += tlb1_mapin_region(kernstart + kernsize,
1781 kernload + kernsize, (data_end - kernstart) - kernsize);
1783 data_end = kernstart + kernsize;
1784 debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
1787 * Clear the structures - note we can only do it safely after the
1788 * possible additional TLB1 translations are in place (above) so that
1789 * all range up to the currently calculated 'data_end' is covered.
1791 dpcpu_init(dpcpu, 0);
1792 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1793 #ifdef __powerpc64__
1794 memset((void *)kernel_pdir, 0,
1795 kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
1796 kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1798 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1801 /*******************************************************/
1802 /* Set the start and end of kva. */
1803 /*******************************************************/
1804 virtual_avail = round_page(data_end);
1805 virtual_end = VM_MAX_KERNEL_ADDRESS;
1807 /* Allocate KVA space for page zero/copy operations. */
1808 zero_page_va = virtual_avail;
1809 virtual_avail += PAGE_SIZE;
1810 copy_page_src_va = virtual_avail;
1811 virtual_avail += PAGE_SIZE;
1812 copy_page_dst_va = virtual_avail;
1813 virtual_avail += PAGE_SIZE;
1814 debugf("zero_page_va = 0x%08x\n", zero_page_va);
1815 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1816 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1818 /* Initialize page zero/copy mutexes. */
1819 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1820 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1822 /* Allocate KVA space for ptbl bufs. */
1823 ptbl_buf_pool_vabase = virtual_avail;
1824 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1825 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1826 ptbl_buf_pool_vabase, virtual_avail);
1828 /* Calculate corresponding physical addresses for the kernel region. */
1829 phys_kernelend = kernload + kernsize;
1830 debugf("kernel image and allocated data:\n");
1831 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
1832 debugf(" kernstart = 0x%08x\n", kernstart);
1833 debugf(" kernsize = 0x%08x\n", kernsize);
1835 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1836 panic("mmu_booke_bootstrap: phys_avail too small");
1839 * Remove kernel physical address range from avail regions list. Page
1840 * align all regions. Non-page aligned memory isn't very interesting
1841 * to us. Also, sort the entries for ascending addresses.
1844 /* Retrieve phys/avail mem regions */
1845 mem_regions(&physmem_regions, &physmem_regions_sz,
1846 &availmem_regions, &availmem_regions_sz);
1848 cnt = availmem_regions_sz;
1849 debugf("processing avail regions:\n");
1850 for (mp = availmem_regions; mp->mr_size; mp++) {
1852 e = mp->mr_start + mp->mr_size;
1853 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
1854 /* Check whether this region holds all of the kernel. */
1855 if (s < kernload && e > phys_kernelend) {
1856 availmem_regions[cnt].mr_start = phys_kernelend;
1857 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1860 /* Look whether this regions starts within the kernel. */
1861 if (s >= kernload && s < phys_kernelend) {
1862 if (e <= phys_kernelend)
1866 /* Now look whether this region ends within the kernel. */
1867 if (e > kernload && e <= phys_kernelend) {
1872 /* Now page align the start and size of the region. */
1878 debugf("%09jx-%09jx = %jx\n",
1879 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
1881 /* Check whether some memory is left here. */
1885 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1891 /* Do an insertion sort. */
1892 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1893 if (s < mp1->mr_start)
1896 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1904 availmem_regions_sz = cnt;
1906 /*******************************************************/
1907 /* Steal physical memory for kernel stack from the end */
1908 /* of the first avail region */
1909 /*******************************************************/
1910 kstack0_sz = kstack_pages * PAGE_SIZE;
1911 kstack0_phys = availmem_regions[0].mr_start +
1912 availmem_regions[0].mr_size;
1913 kstack0_phys -= kstack0_sz;
1914 availmem_regions[0].mr_size -= kstack0_sz;
1916 /*******************************************************/
1917 /* Fill in phys_avail table, based on availmem_regions */
1918 /*******************************************************/
1919 phys_avail_count = 0;
1922 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1924 debugf("fill in phys_avail:\n");
1925 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1927 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
1928 (uintmax_t)availmem_regions[i].mr_start,
1929 (uintmax_t)availmem_regions[i].mr_start +
1930 availmem_regions[i].mr_size,
1931 (uintmax_t)availmem_regions[i].mr_size);
1933 if (hwphyssz != 0 &&
1934 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1935 debugf(" hw.physmem adjust\n");
1936 if (physsz < hwphyssz) {
1937 phys_avail[j] = availmem_regions[i].mr_start;
1939 availmem_regions[i].mr_start +
1947 phys_avail[j] = availmem_regions[i].mr_start;
1948 phys_avail[j + 1] = availmem_regions[i].mr_start +
1949 availmem_regions[i].mr_size;
1951 physsz += availmem_regions[i].mr_size;
1953 physmem = btoc(physsz);
1955 /* Calculate the last available physical address. */
1956 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1958 Maxmem = powerpc_btop(phys_avail[i + 1]);
1960 debugf("Maxmem = 0x%08lx\n", Maxmem);
1961 debugf("phys_avail_count = %d\n", phys_avail_count);
1962 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
1963 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
1965 /*******************************************************/
1966 /* Initialize (statically allocated) kernel pmap. */
1967 /*******************************************************/
1968 PMAP_LOCK_INIT(kernel_pmap);
1969 #ifndef __powerpc64__
1970 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1973 debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
1974 kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
1975 for (i = 0; i < MAXCPU; i++) {
1976 kernel_pmap->pm_tid[i] = TID_KERNEL;
1978 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1979 tidbusy[i][TID_KERNEL] = kernel_pmap;
1982 /* Mark kernel_pmap active on all CPUs */
1983 CPU_FILL(&kernel_pmap->pm_active);
1986 * Initialize the global pv list lock.
1988 rw_init(&pvh_global_lock, "pmap pv global");
1990 /*******************************************************/
1992 /*******************************************************/
1994 /* Enter kstack0 into kernel map, provide guard page */
1995 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1996 thread0.td_kstack = kstack0;
1997 thread0.td_kstack_pages = kstack_pages;
1999 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
2000 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
2001 kstack0_phys, kstack0_phys + kstack0_sz);
2002 debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
2003 kstack0, kstack0 + kstack0_sz);
2005 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
2006 for (i = 0; i < kstack_pages; i++) {
2007 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
2008 kstack0 += PAGE_SIZE;
2009 kstack0_phys += PAGE_SIZE;
2012 pmap_bootstrapped = 1;
2014 debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
2015 debugf("virtual_end = %"PRI0ptrX"\n", virtual_end);
2017 debugf("mmu_booke_bootstrap: exit\n");
2024 tlb_entry_t *e, tmp;
2027 /* Prepare TLB1 image for AP processors */
2029 for (i = 0; i < TLB1_ENTRIES; i++) {
2030 tlb1_read_entry(&tmp, i);
2032 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
2033 memcpy(e++, &tmp, sizeof(tmp));
2038 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
2043 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
2044 * have the snapshot of its contents in the s/w __boot_tlb1[] table
2045 * created by tlb1_ap_prep(), so use these values directly to
2046 * (re)program AP's TLB1 hardware.
2048 * Start at index 1 because index 0 has the kernel map.
2050 for (i = 1; i < TLB1_ENTRIES; i++) {
2051 if (__boot_tlb1[i].mas1 & MAS1_VALID)
2052 tlb1_write_entry(&__boot_tlb1[i], i);
2055 set_mas4_defaults();
2060 booke_pmap_init_qpages(void)
2067 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
2068 if (pc->pc_qmap_addr == 0)
2069 panic("pmap_init_qpages: unable to allocate KVA");
2073 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
2076 * Get the physical page address for the given pmap/virtual address.
2079 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
2084 pa = pte_vatopa(mmu, pmap, va);
2091 * Extract the physical page address associated with the given
2092 * kernel virtual address.
2095 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
2101 if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
2102 p = pte_vatopa(mmu, kernel_pmap, va);
2105 /* Check TLB1 mappings */
2106 for (i = 0; i < TLB1_ENTRIES; i++) {
2107 tlb1_read_entry(&e, i);
2108 if (!(e.mas1 & MAS1_VALID))
2110 if (va >= e.virt && va < e.virt + e.size)
2111 return (e.phys + (va - e.virt));
2119 * Initialize the pmap module.
2120 * Called by vm_init, to initialize any structures that the pmap
2121 * system needs to map virtual memory.
2124 mmu_booke_init(mmu_t mmu)
2126 int shpgperproc = PMAP_SHPGPERPROC;
2129 * Initialize the address space (zone) for the pv entries. Set a
2130 * high water mark so that the system can recover from excessive
2131 * numbers of pv entries.
2133 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
2134 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
2136 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
2137 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
2139 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
2140 pv_entry_high_water = 9 * (pv_entry_max / 10);
2142 uma_zone_reserve_kva(pvzone, pv_entry_max);
2144 /* Pre-fill pvzone with initial number of pv entries. */
2145 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
2147 /* Initialize ptbl allocation. */
2152 * Map a list of wired pages into kernel virtual address space. This is
2153 * intended for temporary mappings which do not need page modification or
2154 * references recorded. Existing mappings in the region are overwritten.
2157 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
2162 while (count-- > 0) {
2163 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2170 * Remove page mappings from kernel virtual address space. Intended for
2171 * temporary mappings entered by mmu_booke_qenter.
2174 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
2179 while (count-- > 0) {
2180 mmu_booke_kremove(mmu, va);
2186 * Map a wired page into kernel virtual address space.
2189 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
2192 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
2196 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
2201 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2202 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
2204 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2205 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
2206 flags |= PTE_PS_4KB;
2208 pte = pte_find(mmu, kernel_pmap, va);
2209 KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
2211 mtx_lock_spin(&tlbivax_mutex);
2214 if (PTE_ISVALID(pte)) {
2216 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
2218 /* Flush entry from TLB0 */
2219 tlb0_flush_entry(va);
2222 *pte = PTE_RPN_FROM_PA(pa) | flags;
2224 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
2225 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
2226 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
2228 /* Flush the real memory from the instruction cache. */
2229 if ((flags & (PTE_I | PTE_G)) == 0)
2230 __syncicache((void *)va, PAGE_SIZE);
2233 mtx_unlock_spin(&tlbivax_mutex);
2237 * Remove a page from kernel page table.
2240 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
2244 CTR2(KTR_PMAP,"%s: s (va = 0x%08x)\n", __func__, va);
2246 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2247 (va <= VM_MAX_KERNEL_ADDRESS)),
2248 ("mmu_booke_kremove: invalid va"));
2250 pte = pte_find(mmu, kernel_pmap, va);
2252 if (!PTE_ISVALID(pte)) {
2254 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
2259 mtx_lock_spin(&tlbivax_mutex);
2262 /* Invalidate entry in TLB0, update PTE. */
2263 tlb0_flush_entry(va);
2267 mtx_unlock_spin(&tlbivax_mutex);
2271 * Initialize pmap associated with process 0.
2274 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
2277 PMAP_LOCK_INIT(pmap);
2278 mmu_booke_pinit(mmu, pmap);
2279 PCPU_SET(curpmap, pmap);
2283 * Initialize a preallocated and zeroed pmap structure,
2284 * such as one in a vmspace structure.
2287 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
2291 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
2292 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
2294 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
2296 for (i = 0; i < MAXCPU; i++)
2297 pmap->pm_tid[i] = TID_NONE;
2298 CPU_ZERO(&kernel_pmap->pm_active);
2299 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2300 #ifdef __powerpc64__
2301 bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
2302 TAILQ_INIT(&pmap->pm_pdir_list);
2304 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
2306 TAILQ_INIT(&pmap->pm_ptbl_list);
2310 * Release any resources held by the given physical map.
2311 * Called when a pmap initialized by mmu_booke_pinit is being released.
2312 * Should only be called if the map contains no valid mappings.
2315 mmu_booke_release(mmu_t mmu, pmap_t pmap)
2318 KASSERT(pmap->pm_stats.resident_count == 0,
2319 ("pmap_release: pmap resident count %ld != 0",
2320 pmap->pm_stats.resident_count));
2324 * Insert the given physical page at the specified virtual address in the
2325 * target physical map with the protection requested. If specified the page
2326 * will be wired down.
2329 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2330 vm_prot_t prot, u_int flags, int8_t psind)
2334 rw_wlock(&pvh_global_lock);
2336 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
2338 rw_wunlock(&pvh_global_lock);
2343 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2344 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
2349 int error, su, sync;
2351 pa = VM_PAGE_TO_PHYS(m);
2352 su = (pmap == kernel_pmap);
2355 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
2356 // "pa=0x%08x prot=0x%08x flags=%#x)\n",
2357 // (u_int32_t)pmap, su, pmap->pm_tid,
2358 // (u_int32_t)m, va, pa, prot, flags);
2361 KASSERT(((va >= virtual_avail) &&
2362 (va <= VM_MAX_KERNEL_ADDRESS)),
2363 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
2365 KASSERT((va <= VM_MAXUSER_ADDRESS),
2366 ("mmu_booke_enter_locked: user pmap, non user va"));
2368 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2369 VM_OBJECT_ASSERT_LOCKED(m->object);
2371 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2374 * If there is an existing mapping, and the physical address has not
2375 * changed, must be protection or wiring change.
2377 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
2378 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
2381 * Before actually updating pte->flags we calculate and
2382 * prepare its new value in a helper var.
2385 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
2387 /* Wiring change, just update stats. */
2388 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
2389 if (!PTE_ISWIRED(pte)) {
2391 pmap->pm_stats.wired_count++;
2394 if (PTE_ISWIRED(pte)) {
2395 flags &= ~PTE_WIRED;
2396 pmap->pm_stats.wired_count--;
2400 if (prot & VM_PROT_WRITE) {
2401 /* Add write permissions. */
2406 if ((flags & PTE_MANAGED) != 0)
2407 vm_page_aflag_set(m, PGA_WRITEABLE);
2409 /* Handle modified pages, sense modify status. */
2412 * The PTE_MODIFIED flag could be set by underlying
2413 * TLB misses since we last read it (above), possibly
2414 * other CPUs could update it so we check in the PTE
2415 * directly rather than rely on that saved local flags
2418 if (PTE_ISMODIFIED(pte))
2422 if (prot & VM_PROT_EXECUTE) {
2428 * Check existing flags for execute permissions: if we
2429 * are turning execute permissions on, icache should
2432 if ((*pte & (PTE_UX | PTE_SX)) == 0)
2436 flags &= ~PTE_REFERENCED;
2439 * The new flags value is all calculated -- only now actually
2442 mtx_lock_spin(&tlbivax_mutex);
2445 tlb0_flush_entry(va);
2446 *pte &= ~PTE_FLAGS_MASK;
2450 mtx_unlock_spin(&tlbivax_mutex);
2454 * If there is an existing mapping, but it's for a different
2455 * physical address, pte_enter() will delete the old mapping.
2457 //if ((pte != NULL) && PTE_ISVALID(pte))
2458 // debugf("mmu_booke_enter_locked: replace\n");
2460 // debugf("mmu_booke_enter_locked: new\n");
2462 /* Now set up the flags and install the new mapping. */
2463 flags = (PTE_SR | PTE_VALID);
2469 if (prot & VM_PROT_WRITE) {
2474 if ((m->oflags & VPO_UNMANAGED) == 0)
2475 vm_page_aflag_set(m, PGA_WRITEABLE);
2478 if (prot & VM_PROT_EXECUTE) {
2484 /* If its wired update stats. */
2485 if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
2488 error = pte_enter(mmu, pmap, m, va, flags,
2489 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
2491 return (KERN_RESOURCE_SHORTAGE);
2493 if ((flags & PMAP_ENTER_WIRED) != 0)
2494 pmap->pm_stats.wired_count++;
2496 /* Flush the real memory from the instruction cache. */
2497 if (prot & VM_PROT_EXECUTE)
2501 if (sync && (su || pmap == PCPU_GET(curpmap))) {
2502 __syncicache((void *)va, PAGE_SIZE);
2506 return (KERN_SUCCESS);
2510 * Maps a sequence of resident pages belonging to the same object.
2511 * The sequence begins with the given page m_start. This page is
2512 * mapped at the given virtual address start. Each subsequent page is
2513 * mapped at a virtual address that is offset from start by the same
2514 * amount as the page is offset from m_start within the object. The
2515 * last page in the sequence is the page with the largest offset from
2516 * m_start that can be mapped at a virtual address less than the given
2517 * virtual address end. Not every virtual page between start and end
2518 * is mapped; only those for which a resident page exists with the
2519 * corresponding offset from m_start are mapped.
2522 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
2523 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
2526 vm_pindex_t diff, psize;
2528 VM_OBJECT_ASSERT_LOCKED(m_start->object);
2530 psize = atop(end - start);
2532 rw_wlock(&pvh_global_lock);
2534 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2535 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
2536 prot & (VM_PROT_READ | VM_PROT_EXECUTE),
2537 PMAP_ENTER_NOSLEEP, 0);
2538 m = TAILQ_NEXT(m, listq);
2540 rw_wunlock(&pvh_global_lock);
2545 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2549 rw_wlock(&pvh_global_lock);
2551 mmu_booke_enter_locked(mmu, pmap, va, m,
2552 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
2554 rw_wunlock(&pvh_global_lock);
2559 * Remove the given range of addresses from the specified map.
2561 * It is assumed that the start and end are properly rounded to the page size.
2564 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
2569 int su = (pmap == kernel_pmap);
2571 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
2572 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
2575 KASSERT(((va >= virtual_avail) &&
2576 (va <= VM_MAX_KERNEL_ADDRESS)),
2577 ("mmu_booke_remove: kernel pmap, non kernel va"));
2579 KASSERT((va <= VM_MAXUSER_ADDRESS),
2580 ("mmu_booke_remove: user pmap, non user va"));
2583 if (PMAP_REMOVE_DONE(pmap)) {
2584 //debugf("mmu_booke_remove: e (empty)\n");
2588 hold_flag = PTBL_HOLD_FLAG(pmap);
2589 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
2591 rw_wlock(&pvh_global_lock);
2593 for (; va < endva; va += PAGE_SIZE) {
2594 pte = pte_find(mmu, pmap, va);
2595 if ((pte != NULL) && PTE_ISVALID(pte))
2596 pte_remove(mmu, pmap, va, hold_flag);
2599 rw_wunlock(&pvh_global_lock);
2601 //debugf("mmu_booke_remove: e\n");
2605 * Remove physical page from all pmaps in which it resides.
2608 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
2613 rw_wlock(&pvh_global_lock);
2614 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
2615 pvn = TAILQ_NEXT(pv, pv_link);
2617 PMAP_LOCK(pv->pv_pmap);
2618 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
2619 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
2620 PMAP_UNLOCK(pv->pv_pmap);
2622 vm_page_aflag_clear(m, PGA_WRITEABLE);
2623 rw_wunlock(&pvh_global_lock);
2627 * Map a range of physical addresses into kernel virtual address space.
2630 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
2631 vm_paddr_t pa_end, int prot)
2633 vm_offset_t sva = *virt;
2634 vm_offset_t va = sva;
2636 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
2637 // sva, pa_start, pa_end);
2639 while (pa_start < pa_end) {
2640 mmu_booke_kenter(mmu, va, pa_start);
2642 pa_start += PAGE_SIZE;
2646 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
2651 * The pmap must be activated before it's address space can be accessed in any
2655 mmu_booke_activate(mmu_t mmu, struct thread *td)
2660 pmap = &td->td_proc->p_vmspace->vm_pmap;
2662 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
2663 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2665 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
2669 cpuid = PCPU_GET(cpuid);
2670 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
2671 PCPU_SET(curpmap, pmap);
2673 if (pmap->pm_tid[cpuid] == TID_NONE)
2676 /* Load PID0 register with pmap tid value. */
2677 mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
2678 __asm __volatile("isync");
2680 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
2684 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
2685 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
2689 * Deactivate the specified process's address space.
2692 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
2696 pmap = &td->td_proc->p_vmspace->vm_pmap;
2698 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
2699 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2701 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
2703 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
2704 PCPU_SET(curpmap, NULL);
2708 * Copy the range specified by src_addr/len
2709 * from the source map to the range dst_addr/len
2710 * in the destination map.
2712 * This routine is only advisory and need not do anything.
2715 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
2716 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
2722 * Set the physical protection on the specified range of this map as requested.
2725 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2732 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2733 mmu_booke_remove(mmu, pmap, sva, eva);
2737 if (prot & VM_PROT_WRITE)
2741 for (va = sva; va < eva; va += PAGE_SIZE) {
2742 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2743 if (PTE_ISVALID(pte)) {
2744 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2746 mtx_lock_spin(&tlbivax_mutex);
2749 /* Handle modified pages. */
2750 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
2753 tlb0_flush_entry(va);
2754 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2757 mtx_unlock_spin(&tlbivax_mutex);
2765 * Clear the write and modified bits in each of the given page's mappings.
2768 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2773 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2774 ("mmu_booke_remove_write: page %p is not managed", m));
2777 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2778 * set by another thread while the object is locked. Thus,
2779 * if PGA_WRITEABLE is clear, no page table entries need updating.
2781 VM_OBJECT_ASSERT_WLOCKED(m->object);
2782 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2784 rw_wlock(&pvh_global_lock);
2785 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2786 PMAP_LOCK(pv->pv_pmap);
2787 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2788 if (PTE_ISVALID(pte)) {
2789 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2791 mtx_lock_spin(&tlbivax_mutex);
2794 /* Handle modified pages. */
2795 if (PTE_ISMODIFIED(pte))
2798 /* Flush mapping from TLB0. */
2799 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2802 mtx_unlock_spin(&tlbivax_mutex);
2805 PMAP_UNLOCK(pv->pv_pmap);
2807 vm_page_aflag_clear(m, PGA_WRITEABLE);
2808 rw_wunlock(&pvh_global_lock);
2812 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2821 va = trunc_page(va);
2822 sz = round_page(sz);
2824 rw_wlock(&pvh_global_lock);
2825 pmap = PCPU_GET(curpmap);
2826 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2829 pte = pte_find(mmu, pm, va);
2830 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2836 /* Create a mapping in the active pmap. */
2838 m = PHYS_TO_VM_PAGE(pa);
2840 pte_enter(mmu, pmap, m, addr,
2841 PTE_SR | PTE_VALID | PTE_UR, FALSE);
2842 __syncicache((void *)addr, PAGE_SIZE);
2843 pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2846 __syncicache((void *)va, PAGE_SIZE);
2851 rw_wunlock(&pvh_global_lock);
2855 * Atomically extract and hold the physical page with the given
2856 * pmap and virtual address pair if that mapping permits the given
2860 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2872 pte = pte_find(mmu, pmap, va);
2873 if ((pte != NULL) && PTE_ISVALID(pte)) {
2874 if (pmap == kernel_pmap)
2879 if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2880 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2882 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2893 * Initialize a vm_page's machine-dependent fields.
2896 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2899 m->md.pv_tracked = 0;
2900 TAILQ_INIT(&m->md.pv_list);
2904 * mmu_booke_zero_page_area zeros the specified hardware page by
2905 * mapping it into virtual memory and using bzero to clear
2908 * off and size must reside within a single page.
2911 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2915 /* XXX KASSERT off and size are within a single page? */
2917 mtx_lock(&zero_page_mutex);
2920 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2921 bzero((caddr_t)va + off, size);
2922 mmu_booke_kremove(mmu, va);
2924 mtx_unlock(&zero_page_mutex);
2928 * mmu_booke_zero_page zeros the specified hardware page.
2931 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2933 vm_offset_t off, va;
2935 mtx_lock(&zero_page_mutex);
2938 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2939 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
2940 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
2941 mmu_booke_kremove(mmu, va);
2943 mtx_unlock(&zero_page_mutex);
2947 * mmu_booke_copy_page copies the specified (machine independent) page by
2948 * mapping the page into virtual memory and using memcopy to copy the page,
2949 * one machine dependent page at a time.
2952 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2954 vm_offset_t sva, dva;
2956 sva = copy_page_src_va;
2957 dva = copy_page_dst_va;
2959 mtx_lock(©_page_mutex);
2960 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2961 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2962 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2963 mmu_booke_kremove(mmu, dva);
2964 mmu_booke_kremove(mmu, sva);
2965 mtx_unlock(©_page_mutex);
2969 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
2970 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
2973 vm_offset_t a_pg_offset, b_pg_offset;
2976 mtx_lock(©_page_mutex);
2977 while (xfersize > 0) {
2978 a_pg_offset = a_offset & PAGE_MASK;
2979 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2980 mmu_booke_kenter(mmu, copy_page_src_va,
2981 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
2982 a_cp = (char *)copy_page_src_va + a_pg_offset;
2983 b_pg_offset = b_offset & PAGE_MASK;
2984 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2985 mmu_booke_kenter(mmu, copy_page_dst_va,
2986 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
2987 b_cp = (char *)copy_page_dst_va + b_pg_offset;
2988 bcopy(a_cp, b_cp, cnt);
2989 mmu_booke_kremove(mmu, copy_page_dst_va);
2990 mmu_booke_kremove(mmu, copy_page_src_va);
2995 mtx_unlock(©_page_mutex);
2999 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
3006 paddr = VM_PAGE_TO_PHYS(m);
3008 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
3009 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
3010 flags |= PTE_PS_4KB;
3013 qaddr = PCPU_GET(qmap_addr);
3015 pte = pte_find(mmu, kernel_pmap, qaddr);
3017 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
3020 * XXX: tlbivax is broadcast to other cores, but qaddr should
3021 * not be present in other TLBs. Is there a better instruction
3022 * sequence to use? Or just forget it & use mmu_booke_kenter()...
3024 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
3025 __asm __volatile("isync; msync");
3027 *pte = PTE_RPN_FROM_PA(paddr) | flags;
3029 /* Flush the real memory from the instruction cache. */
3030 if ((flags & (PTE_I | PTE_G)) == 0)
3031 __syncicache((void *)qaddr, PAGE_SIZE);
3037 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
3041 pte = pte_find(mmu, kernel_pmap, addr);
3043 KASSERT(PCPU_GET(qmap_addr) == addr,
3044 ("mmu_booke_quick_remove_page: invalid address"));
3046 ("mmu_booke_quick_remove_page: PTE not in use"));
3053 * Return whether or not the specified physical page was modified
3054 * in any of physical maps.
3057 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
3063 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3064 ("mmu_booke_is_modified: page %p is not managed", m));
3068 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3069 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
3070 * is clear, no PTEs can be modified.
3072 VM_OBJECT_ASSERT_WLOCKED(m->object);
3073 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3075 rw_wlock(&pvh_global_lock);
3076 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3077 PMAP_LOCK(pv->pv_pmap);
3078 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3080 if (PTE_ISMODIFIED(pte))
3083 PMAP_UNLOCK(pv->pv_pmap);
3087 rw_wunlock(&pvh_global_lock);
3092 * Return whether or not the specified virtual address is eligible
3096 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
3103 * Return whether or not the specified physical page was referenced
3104 * in any physical maps.
3107 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
3113 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3114 ("mmu_booke_is_referenced: page %p is not managed", m));
3116 rw_wlock(&pvh_global_lock);
3117 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3118 PMAP_LOCK(pv->pv_pmap);
3119 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3121 if (PTE_ISREFERENCED(pte))
3124 PMAP_UNLOCK(pv->pv_pmap);
3128 rw_wunlock(&pvh_global_lock);
3133 * Clear the modify bits on the specified physical page.
3136 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
3141 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3142 ("mmu_booke_clear_modify: page %p is not managed", m));
3143 VM_OBJECT_ASSERT_WLOCKED(m->object);
3144 KASSERT(!vm_page_xbusied(m),
3145 ("mmu_booke_clear_modify: page %p is exclusive busied", m));
3148 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
3149 * If the object containing the page is locked and the page is not
3150 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
3152 if ((m->aflags & PGA_WRITEABLE) == 0)
3154 rw_wlock(&pvh_global_lock);
3155 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3156 PMAP_LOCK(pv->pv_pmap);
3157 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3159 mtx_lock_spin(&tlbivax_mutex);
3162 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
3163 tlb0_flush_entry(pv->pv_va);
3164 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
3169 mtx_unlock_spin(&tlbivax_mutex);
3171 PMAP_UNLOCK(pv->pv_pmap);
3173 rw_wunlock(&pvh_global_lock);
3177 * Return a count of reference bits for a page, clearing those bits.
3178 * It is not necessary for every reference bit to be cleared, but it
3179 * is necessary that 0 only be returned when there are truly no
3180 * reference bits set.
3182 * As an optimization, update the page's dirty field if a modified bit is
3183 * found while counting reference bits. This opportunistic update can be
3184 * performed at low cost and can eliminate the need for some future calls
3185 * to pmap_is_modified(). However, since this function stops after
3186 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3187 * dirty pages. Those dirty pages will only be detected by a future call
3188 * to pmap_is_modified().
3191 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
3197 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3198 ("mmu_booke_ts_referenced: page %p is not managed", m));
3200 rw_wlock(&pvh_global_lock);
3201 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3202 PMAP_LOCK(pv->pv_pmap);
3203 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3205 if (PTE_ISMODIFIED(pte))
3207 if (PTE_ISREFERENCED(pte)) {
3208 mtx_lock_spin(&tlbivax_mutex);
3211 tlb0_flush_entry(pv->pv_va);
3212 *pte &= ~PTE_REFERENCED;
3215 mtx_unlock_spin(&tlbivax_mutex);
3217 if (++count >= PMAP_TS_REFERENCED_MAX) {
3218 PMAP_UNLOCK(pv->pv_pmap);
3223 PMAP_UNLOCK(pv->pv_pmap);
3225 rw_wunlock(&pvh_global_lock);
3230 * Clear the wired attribute from the mappings for the specified range of
3231 * addresses in the given pmap. Every valid mapping within that range must
3232 * have the wired attribute set. In contrast, invalid mappings cannot have
3233 * the wired attribute set, so they are ignored.
3235 * The wired attribute of the page table entry is not a hardware feature, so
3236 * there is no need to invalidate any TLB entries.
3239 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3245 for (va = sva; va < eva; va += PAGE_SIZE) {
3246 if ((pte = pte_find(mmu, pmap, va)) != NULL &&
3248 if (!PTE_ISWIRED(pte))
3249 panic("mmu_booke_unwire: pte %p isn't wired",
3252 pmap->pm_stats.wired_count--;
3260 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
3261 * page. This count may be changed upwards or downwards in the future; it is
3262 * only necessary that true be returned for a small subset of pmaps for proper
3266 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
3272 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3273 ("mmu_booke_page_exists_quick: page %p is not managed", m));
3276 rw_wlock(&pvh_global_lock);
3277 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3278 if (pv->pv_pmap == pmap) {
3285 rw_wunlock(&pvh_global_lock);
3290 * Return the number of managed mappings to the given physical page that are
3294 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
3300 if ((m->oflags & VPO_UNMANAGED) != 0)
3302 rw_wlock(&pvh_global_lock);
3303 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3304 PMAP_LOCK(pv->pv_pmap);
3305 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
3306 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
3308 PMAP_UNLOCK(pv->pv_pmap);
3310 rw_wunlock(&pvh_global_lock);
3315 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3321 * This currently does not work for entries that
3322 * overlap TLB1 entries.
3324 for (i = 0; i < TLB1_ENTRIES; i ++) {
3325 if (tlb1_iomapped(i, pa, size, &va) == 0)
3333 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
3339 /* Minidumps are based on virtual memory addresses. */
3341 *va = (void *)(vm_offset_t)pa;
3345 /* Raw physical memory dumps don't have a virtual address. */
3346 /* We always map a 256MB page at 256M. */
3347 gran = 256 * 1024 * 1024;
3348 ppa = rounddown2(pa, gran);
3351 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
3353 if (sz > (gran - ofs))
3354 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
3359 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
3367 /* Minidumps are based on virtual memory addresses. */
3368 /* Nothing to do... */
3372 for (i = 0; i < TLB1_ENTRIES; i++) {
3373 tlb1_read_entry(&e, i);
3374 if (!(e.mas1 & MAS1_VALID))
3378 /* Raw physical memory dumps don't have a virtual address. */
3383 tlb1_write_entry(&e, i);
3385 gran = 256 * 1024 * 1024;
3386 ppa = rounddown2(pa, gran);
3388 if (sz > (gran - ofs)) {
3393 tlb1_write_entry(&e, i);
3397 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
3400 mmu_booke_scan_init(mmu_t mmu)
3407 /* Initialize phys. segments for dumpsys(). */
3408 memset(&dump_map, 0, sizeof(dump_map));
3409 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
3410 &availmem_regions_sz);
3411 for (i = 0; i < physmem_regions_sz; i++) {
3412 dump_map[i].pa_start = physmem_regions[i].mr_start;
3413 dump_map[i].pa_size = physmem_regions[i].mr_size;
3418 /* Virtual segments for minidumps: */
3419 memset(&dump_map, 0, sizeof(dump_map));
3421 /* 1st: kernel .data and .bss. */
3422 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
3423 dump_map[0].pa_size =
3424 round_page((uintptr_t)_end) - dump_map[0].pa_start;
3426 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
3427 dump_map[1].pa_start = data_start;
3428 dump_map[1].pa_size = data_end - data_start;
3430 /* 3rd: kernel VM. */
3431 va = dump_map[1].pa_start + dump_map[1].pa_size;
3432 /* Find start of next chunk (from va). */
3433 while (va < virtual_end) {
3434 /* Don't dump the buffer cache. */
3435 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
3436 va = kmi.buffer_eva;
3439 pte = pte_find(mmu, kernel_pmap, va);
3440 if (pte != NULL && PTE_ISVALID(pte))
3444 if (va < virtual_end) {
3445 dump_map[2].pa_start = va;
3447 /* Find last page in chunk. */
3448 while (va < virtual_end) {
3449 /* Don't run into the buffer cache. */
3450 if (va == kmi.buffer_sva)
3452 pte = pte_find(mmu, kernel_pmap, va);
3453 if (pte == NULL || !PTE_ISVALID(pte))
3457 dump_map[2].pa_size = va - dump_map[2].pa_start;
3462 * Map a set of physical memory pages into the kernel virtual address space.
3463 * Return a pointer to where it is mapped. This routine is intended to be used
3464 * for mapping device memory, NOT real memory.
3467 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3470 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
3474 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3478 uintptr_t va, tmpva;
3483 * Check if this is premapped in TLB1. Note: this should probably also
3484 * check whether a sequence of TLB1 entries exist that match the
3485 * requirement, but now only checks the easy case.
3487 for (i = 0; i < TLB1_ENTRIES; i++) {
3488 tlb1_read_entry(&e, i);
3489 if (!(e.mas1 & MAS1_VALID))
3492 (pa + size) <= (e.phys + e.size) &&
3493 (ma == VM_MEMATTR_DEFAULT ||
3494 tlb_calc_wimg(pa, ma) ==
3495 (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))))
3496 return (void *)(e.virt +
3497 (vm_offset_t)(pa - e.phys));
3500 size = roundup(size, PAGE_SIZE);
3503 * The device mapping area is between VM_MAXUSER_ADDRESS and
3504 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing.
3506 #ifdef SPARSE_MAPDEV
3508 * With a sparse mapdev, align to the largest starting region. This
3509 * could feasibly be optimized for a 'best-fit' alignment, but that
3510 * calculation could be very costly.
3511 * Align to the smaller of:
3512 * - first set bit in overlap of (pa & size mask)
3513 * - largest size envelope
3515 * It's possible the device mapping may start at a PA that's not larger
3516 * than the size mask, so we need to offset in to maximize the TLB entry
3517 * range and minimize the number of used TLB entries.
3520 tmpva = tlb1_map_base;
3521 sz = ffsl(((1 << flsl(size-1)) - 1) & pa);
3522 sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
3523 va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
3524 #ifdef __powerpc64__
3525 } while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
3527 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
3530 #ifdef __powerpc64__
3531 va = atomic_fetchadd_long(&tlb1_map_base, size);
3533 va = atomic_fetchadd_int(&tlb1_map_base, size);
3539 sz = 1 << (ilog2(size) & ~1);
3540 /* Align size to PA */
3544 } while (pa % sz != 0);
3546 /* Now align from there to VA */
3550 } while (va % sz != 0);
3553 printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
3554 va, (uintmax_t)pa, sz);
3555 if (tlb1_set_entry(va, pa, sz,
3556 _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0)
3567 * 'Unmap' a range mapped by mmu_booke_mapdev().
3570 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
3572 #ifdef SUPPORTS_SHRINKING_TLB1
3573 vm_offset_t base, offset;
3576 * Unmap only if this is inside kernel virtual space.
3578 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
3579 base = trunc_page(va);
3580 offset = va & PAGE_MASK;
3581 size = roundup(offset + size, PAGE_SIZE);
3582 kva_free(base, size);
3588 * mmu_booke_object_init_pt preloads the ptes for a given object into the
3589 * specified pmap. This eliminates the blast of soft faults on process startup
3590 * and immediately after an mmap.
3593 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3594 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3597 VM_OBJECT_ASSERT_WLOCKED(object);
3598 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3599 ("mmu_booke_object_init_pt: non-device object"));
3603 * Perform the pmap work for mincore.
3606 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3607 vm_paddr_t *locked_pa)
3610 /* XXX: this should be implemented at some point */
3615 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
3623 /* Check TLB1 mappings */
3624 for (i = 0; i < TLB1_ENTRIES; i++) {
3625 tlb1_read_entry(&e, i);
3626 if (!(e.mas1 & MAS1_VALID))
3628 if (addr >= e.virt && addr < e.virt + e.size)
3631 if (i < TLB1_ENTRIES) {
3632 /* Only allow full mappings to be modified for now. */
3633 /* Validate the range. */
3634 for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
3635 tlb1_read_entry(&e, j);
3636 if (va != e.virt || (sz - (va - addr) < e.size))
3639 for (va = addr; va < addr + sz; va += e.size, i++) {
3640 tlb1_read_entry(&e, i);
3641 e.mas2 &= ~MAS2_WIMGE_MASK;
3642 e.mas2 |= tlb_calc_wimg(e.phys, mode);
3645 * Write it out to the TLB. Should really re-sync with other
3648 tlb1_write_entry(&e, i);
3653 /* Not in TLB1, try through pmap */
3654 /* First validate the range. */
3655 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3656 pte = pte_find(mmu, kernel_pmap, va);
3657 if (pte == NULL || !PTE_ISVALID(pte))
3661 mtx_lock_spin(&tlbivax_mutex);
3663 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3664 pte = pte_find(mmu, kernel_pmap, va);
3665 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
3666 *pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
3667 tlb0_flush_entry(va);
3670 mtx_unlock_spin(&tlbivax_mutex);
3675 /**************************************************************************/
3677 /**************************************************************************/
3680 * Allocate a TID. If necessary, steal one from someone else.
3681 * The new TID is flushed from the TLB before returning.
3684 tid_alloc(pmap_t pmap)
3689 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
3691 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
3693 thiscpu = PCPU_GET(cpuid);
3695 tid = PCPU_GET(tid_next);
3698 PCPU_SET(tid_next, tid + 1);
3700 /* If we are stealing TID then clear the relevant pmap's field */
3701 if (tidbusy[thiscpu][tid] != NULL) {
3703 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
3705 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
3707 /* Flush all entries from TLB0 matching this TID. */
3711 tidbusy[thiscpu][tid] = pmap;
3712 pmap->pm_tid[thiscpu] = tid;
3713 __asm __volatile("msync; isync");
3715 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
3716 PCPU_GET(tid_next));
3721 /**************************************************************************/
3723 /**************************************************************************/
3726 #ifdef __powerpc64__
3727 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
3729 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
3740 if (mas1 & MAS1_VALID)
3745 if (mas1 & MAS1_IPROT)
3750 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
3751 tid = MAS1_GETTID(mas1);
3753 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3756 size = tsize2size(tsize);
3758 debugf("%3d: (%s) [AS=%d] "
3759 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
3760 "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
3761 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
3764 /* Convert TLB0 va and way number to tlb0[] table index. */
3765 static inline unsigned int
3766 tlb0_tableidx(vm_offset_t va, unsigned int way)
3770 idx = (way * TLB0_ENTRIES_PER_WAY);
3771 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
3776 * Invalidate TLB0 entry.
3779 tlb0_flush_entry(vm_offset_t va)
3782 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
3784 mtx_assert(&tlbivax_mutex, MA_OWNED);
3786 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
3787 __asm __volatile("isync; msync");
3788 __asm __volatile("tlbsync; msync");
3790 CTR1(KTR_PMAP, "%s: e", __func__);
3793 /* Print out contents of the MAS registers for each TLB0 entry */
3795 tlb0_print_tlbentries(void)
3797 uint32_t mas0, mas1, mas3, mas7;
3798 #ifdef __powerpc64__
3803 int entryidx, way, idx;
3805 debugf("TLB0 entries:\n");
3806 for (way = 0; way < TLB0_WAYS; way ++)
3807 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
3809 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
3810 mtspr(SPR_MAS0, mas0);
3811 __asm __volatile("isync");
3813 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
3814 mtspr(SPR_MAS2, mas2);
3816 __asm __volatile("isync; tlbre");
3818 mas1 = mfspr(SPR_MAS1);
3819 mas2 = mfspr(SPR_MAS2);
3820 mas3 = mfspr(SPR_MAS3);
3821 mas7 = mfspr(SPR_MAS7);
3823 idx = tlb0_tableidx(mas2, way);
3824 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3828 /**************************************************************************/
3830 /**************************************************************************/
3833 * TLB1 mapping notes:
3835 * TLB1[0] Kernel text and data.
3836 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
3837 * windows, other devices mappings.
3841 * Read an entry from given TLB1 slot.
3844 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
3849 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
3852 __asm __volatile("wrteei 0");
3854 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
3855 mtspr(SPR_MAS0, mas0);
3856 __asm __volatile("isync; tlbre");
3858 entry->mas1 = mfspr(SPR_MAS1);
3859 entry->mas2 = mfspr(SPR_MAS2);
3860 entry->mas3 = mfspr(SPR_MAS3);
3862 switch ((mfpvr() >> 16) & 0xFFFF) {
3867 entry->mas7 = mfspr(SPR_MAS7);
3875 entry->virt = entry->mas2 & MAS2_EPN_MASK;
3876 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
3877 (entry->mas3 & MAS3_RPN);
3879 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
3882 struct tlbwrite_args {
3888 tlb1_write_entry_int(void *arg)
3890 struct tlbwrite_args *args = arg;
3894 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(args->idx);
3896 mtspr(SPR_MAS0, mas0);
3897 __asm __volatile("isync");
3898 mtspr(SPR_MAS1, args->e->mas1);
3899 __asm __volatile("isync");
3900 mtspr(SPR_MAS2, args->e->mas2);
3901 __asm __volatile("isync");
3902 mtspr(SPR_MAS3, args->e->mas3);
3903 __asm __volatile("isync");
3904 switch ((mfpvr() >> 16) & 0xFFFF) {
3909 __asm __volatile("isync");
3912 mtspr(SPR_MAS7, args->e->mas7);
3913 __asm __volatile("isync");
3919 __asm __volatile("tlbwe; isync; msync");
3924 tlb1_write_entry_sync(void *arg)
3926 /* Empty synchronization point for smp_rendezvous(). */
3930 * Write given entry to TLB1 hardware.
3933 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
3935 struct tlbwrite_args args;
3941 if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) {
3943 smp_rendezvous(tlb1_write_entry_sync,
3944 tlb1_write_entry_int,
3945 tlb1_write_entry_sync, &args);
3952 __asm __volatile("wrteei 0");
3953 tlb1_write_entry_int(&args);
3959 * Return the largest uint value log such that 2^log <= num.
3962 ilog2(unsigned int num)
3966 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
3971 * Convert TLB TSIZE value to mapped region size.
3974 tsize2size(unsigned int tsize)
3979 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
3982 return ((1 << (2 * tsize)) * 1024);
3986 * Convert region size (must be power of 4) to TLB TSIZE value.
3989 size2tsize(vm_size_t size)
3992 return (ilog2(size) / 2 - 5);
3996 * Register permanent kernel mapping in TLB1.
3998 * Entries are created starting from index 0 (current free entry is
3999 * kept in tlb1_idx) and are not supposed to be invalidated.
4002 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
4009 for (index = 0; index < TLB1_ENTRIES; index++) {
4010 tlb1_read_entry(&e, index);
4011 if ((e.mas1 & MAS1_VALID) == 0)
4013 /* Check if we're just updating the flags, and update them. */
4014 if (e.phys == pa && e.virt == va && e.size == size) {
4015 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4016 tlb1_write_entry(&e, index);
4020 if (index >= TLB1_ENTRIES) {
4021 printf("tlb1_set_entry: TLB1 full!\n");
4025 /* Convert size to TSIZE */
4026 tsize = size2tsize(size);
4028 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
4029 /* XXX TS is hard coded to 0 for now as we only use single address space */
4030 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
4035 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
4036 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
4037 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4039 /* Set supervisor RWX permission bits */
4040 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
4041 e.mas7 = (pa >> 32) & MAS7_RPN;
4043 tlb1_write_entry(&e, index);
4046 * XXX in general TLB1 updates should be propagated between CPUs,
4047 * since current design assumes to have the same TLB1 set-up on all
4054 * Map in contiguous RAM region into the TLB1 using maximum of
4055 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
4057 * If necessary round up last entry size and return total size
4058 * used by all allocated entries.
4061 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
4063 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
4064 vm_size_t mapped, pgsz, base, mask;
4067 /* Round up to the next 1M */
4068 size = roundup2(size, 1 << 20);
4073 pgsz = 64*1024*1024;
4074 while (mapped < size) {
4075 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
4076 while (pgsz > (size - mapped))
4082 /* We under-map. Correct for this. */
4083 if (mapped < size) {
4084 while (pgs[idx - 1] == pgsz) {
4088 /* XXX We may increase beyond out starting point. */
4097 /* Align address to the boundary */
4099 va = (va + mask) & ~mask;
4100 pa = (pa + mask) & ~mask;
4103 for (idx = 0; idx < nents; idx++) {
4105 debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz);
4106 tlb1_set_entry(va, pa, pgsz,
4107 _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM);
4112 mapped = (va - base);
4113 printf("mapped size 0x%"PRI0ptrX" (wasted space 0x%"PRIxPTR")\n",
4114 mapped, mapped - size);
4119 * TLB1 initialization routine, to be called after the very first
4120 * assembler level setup done in locore.S.
4125 uint32_t mas0, mas1, mas2, mas3, mas7;
4130 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
4131 mtspr(SPR_MAS0, mas0);
4132 __asm __volatile("isync; tlbre");
4134 mas1 = mfspr(SPR_MAS1);
4135 mas2 = mfspr(SPR_MAS2);
4136 mas3 = mfspr(SPR_MAS3);
4137 mas7 = mfspr(SPR_MAS7);
4139 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
4142 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4143 kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
4145 /* Setup TLB miss defaults */
4146 set_mas4_defaults();
4150 * pmap_early_io_unmap() should be used in short conjunction with
4151 * pmap_early_io_map(), as in the following snippet:
4153 * x = pmap_early_io_map(...);
4154 * <do something with x>
4155 * pmap_early_io_unmap(x, size);
4157 * And avoiding more allocations between.
4160 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
4166 size = roundup(size, PAGE_SIZE);
4168 for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
4169 tlb1_read_entry(&e, i);
4170 if (!(e.mas1 & MAS1_VALID))
4172 if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
4174 e.mas1 &= ~MAS1_VALID;
4175 tlb1_write_entry(&e, i);
4178 if (tlb1_map_base == va + isize)
4179 tlb1_map_base -= isize;
4183 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
4190 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
4192 for (i = 0; i < TLB1_ENTRIES; i++) {
4193 tlb1_read_entry(&e, i);
4194 if (!(e.mas1 & MAS1_VALID))
4196 if (pa >= e.phys && (pa + size) <=
4198 return (e.virt + (pa - e.phys));
4201 pa_base = rounddown(pa, PAGE_SIZE);
4202 size = roundup(size + (pa - pa_base), PAGE_SIZE);
4203 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
4204 va = tlb1_map_base + (pa - pa_base);
4207 sz = 1 << (ilog2(size) & ~1);
4208 tlb1_set_entry(tlb1_map_base, pa_base, sz,
4209 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
4212 tlb1_map_base += sz;
4219 pmap_track_page(pmap_t pmap, vm_offset_t va)
4223 struct pv_entry *pve;
4225 va = trunc_page(va);
4226 pa = pmap_kextract(va);
4227 page = PHYS_TO_VM_PAGE(pa);
4229 rw_wlock(&pvh_global_lock);
4232 TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
4233 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
4237 page->md.pv_tracked = true;
4238 pv_insert(pmap, va, page);
4241 rw_wunlock(&pvh_global_lock);
4246 * Setup MAS4 defaults.
4247 * These values are loaded to MAS0-2 on a TLB miss.
4250 set_mas4_defaults(void)
4254 /* Defaults: TLB0, PID0, TSIZED=4K */
4255 mas4 = MAS4_TLBSELD0;
4256 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
4260 mtspr(SPR_MAS4, mas4);
4261 __asm __volatile("isync");
4265 * Print out contents of the MAS registers for each TLB1 entry
4268 tlb1_print_tlbentries(void)
4270 uint32_t mas0, mas1, mas3, mas7;
4271 #ifdef __powerpc64__
4278 debugf("TLB1 entries:\n");
4279 for (i = 0; i < TLB1_ENTRIES; i++) {
4281 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
4282 mtspr(SPR_MAS0, mas0);
4284 __asm __volatile("isync; tlbre");
4286 mas1 = mfspr(SPR_MAS1);
4287 mas2 = mfspr(SPR_MAS2);
4288 mas3 = mfspr(SPR_MAS3);
4289 mas7 = mfspr(SPR_MAS7);
4291 tlb_print_entry(i, mas1, mas2, mas3, mas7);
4296 * Return 0 if the physical IO range is encompassed by one of the
4297 * the TLB1 entries, otherwise return related error code.
4300 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
4303 vm_paddr_t pa_start;
4305 unsigned int entry_tsize;
4306 vm_size_t entry_size;
4309 *va = (vm_offset_t)NULL;
4311 tlb1_read_entry(&e, i);
4312 /* Skip invalid entries */
4313 if (!(e.mas1 & MAS1_VALID))
4317 * The entry must be cache-inhibited, guarded, and r/w
4318 * so it can function as an i/o page
4320 prot = e.mas2 & (MAS2_I | MAS2_G);
4321 if (prot != (MAS2_I | MAS2_G))
4324 prot = e.mas3 & (MAS3_SR | MAS3_SW);
4325 if (prot != (MAS3_SR | MAS3_SW))
4328 /* The address should be within the entry range. */
4329 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4330 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
4332 entry_size = tsize2size(entry_tsize);
4333 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) |
4334 (e.mas3 & MAS3_RPN);
4335 pa_end = pa_start + entry_size;
4337 if ((pa < pa_start) || ((pa + size) > pa_end))
4340 /* Return virtual address of this mapping. */
4341 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
4346 * Invalidate all TLB0 entries which match the given TID. Note this is
4347 * dedicated for cases when invalidations should NOT be propagated to other
4351 tid_flush(tlbtid_t tid)
4354 uint32_t mas0, mas1, mas2;
4358 /* Don't evict kernel translations */
4359 if (tid == TID_KERNEL)
4363 __asm __volatile("wrteei 0");
4365 for (way = 0; way < TLB0_WAYS; way++)
4366 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
4368 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
4369 mtspr(SPR_MAS0, mas0);
4370 __asm __volatile("isync");
4372 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
4373 mtspr(SPR_MAS2, mas2);
4375 __asm __volatile("isync; tlbre");
4377 mas1 = mfspr(SPR_MAS1);
4379 if (!(mas1 & MAS1_VALID))
4381 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
4383 mas1 &= ~MAS1_VALID;
4384 mtspr(SPR_MAS1, mas1);
4385 __asm __volatile("isync; tlbwe; isync; msync");