2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Some hw specific parts of this pmap were derived or influenced
29 * by NetBSD's ibm4xx pmap module. More generic code is shared with
30 * a few other pmap modules from the FreeBSD tree.
36 * Kernel and user threads run within one common virtual address space
40 * Virtual address space layout:
41 * -----------------------------
42 * 0x0000_0000 - 0x7fff_ffff : user process
43 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
44 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
45 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
46 * 0xc100_0000 - 0xffff_ffff : KVA
47 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
48 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
49 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
50 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
53 * Virtual address space layout:
54 * -----------------------------
55 * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process
56 * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries
57 * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region
58 * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack
59 * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved
60 * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data
61 * endkernel - msgbufp-1 : flat device tree
62 * msgbufp - ptbl_bufs-1 : message buffer
63 * ptbl_bufs - kernel_pdir-1 : kernel page tables
64 * kernel_pdir - kernel_pp2d-1 : kernel page directory
65 * kernel_pp2d - . : kernel pointers to page directory
66 * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy
67 * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs
68 * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables
69 * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space
70 * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region
71 * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region
72 * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map
73 * 0xf000_0000_0000_0000 - +Maxmem : physmem map
74 * - 0xffff_ffff_ffff_ffff : device direct map
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
80 #include "opt_kstack_pages.h"
82 #include <sys/param.h>
84 #include <sys/malloc.h>
88 #include <sys/queue.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/kerneldump.h>
92 #include <sys/linker.h>
93 #include <sys/msgbuf.h>
95 #include <sys/mutex.h>
96 #include <sys/rwlock.h>
97 #include <sys/sched.h>
99 #include <sys/vmmeter.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_extern.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_pager.h>
112 #include <machine/_inttypes.h>
113 #include <machine/cpu.h>
114 #include <machine/pcb.h>
115 #include <machine/platform.h>
117 #include <machine/tlb.h>
118 #include <machine/spr.h>
119 #include <machine/md_var.h>
120 #include <machine/mmuvar.h>
121 #include <machine/pmap.h>
122 #include <machine/pte.h>
126 #define SPARSE_MAPDEV
128 #define debugf(fmt, args...) printf(fmt, ##args)
130 #define debugf(fmt, args...)
134 #define PRI0ptrX "016lx"
136 #define PRI0ptrX "08x"
139 #define TODO panic("%s: not implemented", __func__);
141 extern unsigned char _etext[];
142 extern unsigned char _end[];
144 extern uint32_t *bootinfo;
147 vm_offset_t kernstart;
150 /* Message buffer and tables. */
151 static vm_offset_t data_start;
152 static vm_size_t data_end;
154 /* Phys/avail memory regions. */
155 static struct mem_region *availmem_regions;
156 static int availmem_regions_sz;
157 static struct mem_region *physmem_regions;
158 static int physmem_regions_sz;
160 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
161 static vm_offset_t zero_page_va;
162 static struct mtx zero_page_mutex;
164 static struct mtx tlbivax_mutex;
166 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
167 static vm_offset_t copy_page_src_va;
168 static vm_offset_t copy_page_dst_va;
169 static struct mtx copy_page_mutex;
171 /**************************************************************************/
173 /**************************************************************************/
175 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
176 vm_prot_t, u_int flags, int8_t psind);
178 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
179 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
181 unsigned int kernel_pdirs;
185 * If user pmap is processed with mmu_booke_remove and the resident count
186 * drops to 0, there are no more pages to remove, so we need not continue.
188 #define PMAP_REMOVE_DONE(pmap) \
189 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
191 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
192 extern int elf32_nxstack;
195 /**************************************************************************/
196 /* TLB and TID handling */
197 /**************************************************************************/
199 /* Translation ID busy table */
200 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
203 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
204 * core revisions and should be read from h/w registers during early config.
206 uint32_t tlb0_entries;
208 uint32_t tlb0_entries_per_way;
209 uint32_t tlb1_entries;
211 #define TLB0_ENTRIES (tlb0_entries)
212 #define TLB0_WAYS (tlb0_ways)
213 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
215 #define TLB1_ENTRIES (tlb1_entries)
217 static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
219 static tlbtid_t tid_alloc(struct pmap *);
220 static void tid_flush(tlbtid_t tid);
223 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
225 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
228 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
229 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
230 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
231 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
233 static vm_size_t tsize2size(unsigned int);
234 static unsigned int size2tsize(vm_size_t);
235 static unsigned int ilog2(unsigned int);
237 static void set_mas4_defaults(void);
239 static inline void tlb0_flush_entry(vm_offset_t);
240 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
242 /**************************************************************************/
243 /* Page table management */
244 /**************************************************************************/
246 static struct rwlock_padalign pvh_global_lock;
248 /* Data for the pv entry allocation mechanism */
249 static uma_zone_t pvzone;
250 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
252 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
254 #ifndef PMAP_SHPGPERPROC
255 #define PMAP_SHPGPERPROC 200
258 static void ptbl_init(void);
259 static struct ptbl_buf *ptbl_buf_alloc(void);
260 static void ptbl_buf_free(struct ptbl_buf *);
261 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
264 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
265 unsigned int, boolean_t);
266 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int);
267 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
268 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
270 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
271 static void ptbl_free(mmu_t, pmap_t, unsigned int);
272 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
273 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
276 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
277 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
278 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
279 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
280 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
282 static pv_entry_t pv_alloc(void);
283 static void pv_free(pv_entry_t);
284 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
285 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
287 static void booke_pmap_init_qpages(void);
289 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
291 #define PTBL_BUFS (16UL * 16 * 16)
293 #define PTBL_BUFS (128 * 16)
297 TAILQ_ENTRY(ptbl_buf) link; /* list link */
298 vm_offset_t kva; /* va of mapping */
301 /* ptbl free list and a lock used for access synchronization. */
302 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
303 static struct mtx ptbl_buf_freelist_lock;
305 /* Base address of kva space allocated fot ptbl bufs. */
306 static vm_offset_t ptbl_buf_pool_vabase;
308 /* Pointer to ptbl_buf structures. */
309 static struct ptbl_buf *ptbl_bufs;
312 extern tlb_entry_t __boot_tlb1[];
313 void pmap_bootstrap_ap(volatile uint32_t *);
317 * Kernel MMU interface
319 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
320 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
321 vm_size_t, vm_offset_t);
322 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
323 static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
324 vm_offset_t, vm_page_t *, vm_offset_t, int);
325 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
326 vm_prot_t, u_int flags, int8_t psind);
327 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
328 vm_page_t, vm_prot_t);
329 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
331 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
332 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
334 static void mmu_booke_init(mmu_t);
335 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
336 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
337 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
338 static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
339 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
341 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
343 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
344 vm_object_t, vm_pindex_t, vm_size_t);
345 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
346 static void mmu_booke_page_init(mmu_t, vm_page_t);
347 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
348 static void mmu_booke_pinit(mmu_t, pmap_t);
349 static void mmu_booke_pinit0(mmu_t, pmap_t);
350 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
352 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
353 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
354 static void mmu_booke_release(mmu_t, pmap_t);
355 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
356 static void mmu_booke_remove_all(mmu_t, vm_page_t);
357 static void mmu_booke_remove_write(mmu_t, vm_page_t);
358 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
359 static void mmu_booke_zero_page(mmu_t, vm_page_t);
360 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
361 static void mmu_booke_activate(mmu_t, struct thread *);
362 static void mmu_booke_deactivate(mmu_t, struct thread *);
363 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
364 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
365 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
366 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
367 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
368 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
369 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
370 static void mmu_booke_kremove(mmu_t, vm_offset_t);
371 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
372 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
374 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
376 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
378 static void mmu_booke_scan_init(mmu_t);
379 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
380 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
381 static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
382 vm_size_t sz, vm_memattr_t mode);
384 static mmu_method_t mmu_booke_methods[] = {
385 /* pmap dispatcher interface */
386 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
387 MMUMETHOD(mmu_copy, mmu_booke_copy),
388 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
389 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
390 MMUMETHOD(mmu_enter, mmu_booke_enter),
391 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
392 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
393 MMUMETHOD(mmu_extract, mmu_booke_extract),
394 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
395 MMUMETHOD(mmu_init, mmu_booke_init),
396 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
397 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
398 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
399 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
400 MMUMETHOD(mmu_map, mmu_booke_map),
401 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
402 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
403 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
404 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
405 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
406 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
407 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
408 MMUMETHOD(mmu_protect, mmu_booke_protect),
409 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
410 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
411 MMUMETHOD(mmu_release, mmu_booke_release),
412 MMUMETHOD(mmu_remove, mmu_booke_remove),
413 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
414 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
415 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
416 MMUMETHOD(mmu_unwire, mmu_booke_unwire),
417 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
418 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
419 MMUMETHOD(mmu_activate, mmu_booke_activate),
420 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
421 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
422 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
424 /* Internal interfaces */
425 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
426 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
427 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
428 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
429 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
430 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
431 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
432 MMUMETHOD(mmu_kremove, mmu_booke_kremove),
433 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
434 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
436 /* dumpsys() support */
437 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
438 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
439 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
444 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
446 static __inline uint32_t
447 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
452 if (ma != VM_MEMATTR_DEFAULT) {
454 case VM_MEMATTR_UNCACHEABLE:
455 return (MAS2_I | MAS2_G);
456 case VM_MEMATTR_WRITE_COMBINING:
457 case VM_MEMATTR_WRITE_BACK:
458 case VM_MEMATTR_PREFETCHABLE:
460 case VM_MEMATTR_WRITE_THROUGH:
461 return (MAS2_W | MAS2_M);
462 case VM_MEMATTR_CACHEABLE:
468 * Assume the page is cache inhibited and access is guarded unless
469 * it's in our available memory array.
471 attrib = _TLB_ENTRY_IO;
472 for (i = 0; i < physmem_regions_sz; i++) {
473 if ((pa >= physmem_regions[i].mr_start) &&
474 (pa < (physmem_regions[i].mr_start +
475 physmem_regions[i].mr_size))) {
476 attrib = _TLB_ENTRY_MEM;
493 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
496 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
497 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
499 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
500 ("tlb_miss_lock: tried to lock self"));
502 tlb_lock(pc->pc_booke_tlb_lock);
504 CTR1(KTR_PMAP, "%s: locked", __func__);
511 tlb_miss_unlock(void)
519 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
521 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
522 __func__, pc->pc_cpuid);
524 tlb_unlock(pc->pc_booke_tlb_lock);
526 CTR1(KTR_PMAP, "%s: unlocked", __func__);
532 /* Return number of entries in TLB0. */
534 tlb0_get_tlbconf(void)
538 tlb0_cfg = mfspr(SPR_TLB0CFG);
539 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
540 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
541 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
544 /* Return number of entries in TLB1. */
546 tlb1_get_tlbconf(void)
550 tlb1_cfg = mfspr(SPR_TLB1CFG);
551 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
554 /**************************************************************************/
555 /* Page table related */
556 /**************************************************************************/
559 /* Initialize pool of kva ptbl buffers. */
565 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
566 TAILQ_INIT(&ptbl_buf_freelist);
568 for (i = 0; i < PTBL_BUFS; i++) {
569 ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
570 i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
571 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
575 /* Get an sf_buf from the freelist. */
576 static struct ptbl_buf *
579 struct ptbl_buf *buf;
581 mtx_lock(&ptbl_buf_freelist_lock);
582 buf = TAILQ_FIRST(&ptbl_buf_freelist);
584 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
585 mtx_unlock(&ptbl_buf_freelist_lock);
590 /* Return ptbl buff to free pool. */
592 ptbl_buf_free(struct ptbl_buf *buf)
594 mtx_lock(&ptbl_buf_freelist_lock);
595 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
596 mtx_unlock(&ptbl_buf_freelist_lock);
600 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
603 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
605 struct ptbl_buf *pbuf;
607 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
608 if (pbuf->kva == (vm_offset_t) ptbl) {
609 /* Remove from pmap ptbl buf list. */
610 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
612 /* Free corresponding ptbl buf. */
620 /* Get a pointer to a PTE in a page table. */
621 static __inline pte_t *
622 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
627 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
629 pdir = pmap->pm_pp2d[PP2D_IDX(va)];
632 ptbl = pdir[PDIR_IDX(va)];
633 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
637 * Search the list of allocated pdir bufs and find on list of allocated pdirs
640 ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
642 struct ptbl_buf *pbuf;
644 TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
645 if (pbuf->kva == (vm_offset_t) pdir) {
646 /* Remove from pmap ptbl buf list. */
647 TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
649 /* Free corresponding pdir buf. */
656 /* Free pdir pages and invalidate pdir entry. */
658 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
666 pdir = pmap->pm_pp2d[pp2d_idx];
668 KASSERT((pdir != NULL), ("pdir_free: null pdir"));
670 pmap->pm_pp2d[pp2d_idx] = NULL;
672 for (i = 0; i < PDIR_PAGES; i++) {
673 va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
674 pa = pte_vatopa(mmu, kernel_pmap, va);
675 m = PHYS_TO_VM_PAGE(pa);
676 vm_page_free_zero(m);
677 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
681 ptbl_free_pmap_pdir(mmu, pmap, pdir);
685 * Decrement pdir pages hold count and attempt to free pdir pages. Called
686 * when removing directory entry from pdir.
688 * Return 1 if pdir pages were freed.
691 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
698 KASSERT((pmap != kernel_pmap),
699 ("pdir_unhold: unholding kernel pdir!"));
701 pdir = pmap->pm_pp2d[pp2d_idx];
703 KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS),
704 ("pdir_unhold: non kva pdir"));
706 /* decrement hold count */
707 for (i = 0; i < PDIR_PAGES; i++) {
708 pa = pte_vatopa(mmu, kernel_pmap,
709 (vm_offset_t) pdir + (i * PAGE_SIZE));
710 m = PHYS_TO_VM_PAGE(pa);
715 * Free pdir pages if there are no dir entries in this pdir.
716 * wire_count has the same value for all ptbl pages, so check the
719 if (m->wire_count == 0) {
720 pdir_free(mmu, pmap, pp2d_idx);
727 * Increment hold count for pdir pages. This routine is used when new ptlb
728 * entry is being inserted into pdir.
731 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
737 KASSERT((pmap != kernel_pmap),
738 ("pdir_hold: holding kernel pdir!"));
740 KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
742 for (i = 0; i < PDIR_PAGES; i++) {
743 pa = pte_vatopa(mmu, kernel_pmap,
744 (vm_offset_t) pdir + (i * PAGE_SIZE));
745 m = PHYS_TO_VM_PAGE(pa);
750 /* Allocate page table. */
752 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
755 vm_page_t mtbl [PTBL_PAGES];
757 struct ptbl_buf *pbuf;
763 KASSERT((pdir[pdir_idx] == NULL),
764 ("%s: valid ptbl entry exists!", __func__));
766 pbuf = ptbl_buf_alloc();
768 panic("%s: couldn't alloc kernel virtual memory", __func__);
770 ptbl = (pte_t *) pbuf->kva;
772 for (i = 0; i < PTBL_PAGES; i++) {
773 pidx = (PTBL_PAGES * pdir_idx) + i;
774 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
775 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
777 rw_wunlock(&pvh_global_lock);
779 ptbl_free_pmap_ptbl(pmap, ptbl);
780 for (j = 0; j < i; j++)
781 vm_page_free(mtbl[j]);
782 atomic_subtract_int(&vm_cnt.v_wire_count, i);
786 rw_wlock(&pvh_global_lock);
792 /* Mapin allocated pages into kernel_pmap. */
793 mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
794 /* Zero whole ptbl. */
795 bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
797 /* Add pbuf to the pmap ptbl bufs list. */
798 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
803 /* Free ptbl pages and invalidate pdir entry. */
805 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
813 ptbl = pdir[pdir_idx];
815 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
817 pdir[pdir_idx] = NULL;
819 for (i = 0; i < PTBL_PAGES; i++) {
820 va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
821 pa = pte_vatopa(mmu, kernel_pmap, va);
822 m = PHYS_TO_VM_PAGE(pa);
823 vm_page_free_zero(m);
824 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
828 ptbl_free_pmap_ptbl(pmap, ptbl);
832 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
833 * when removing pte entry from ptbl.
835 * Return 1 if ptbl pages were freed.
838 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
848 pp2d_idx = PP2D_IDX(va);
849 pdir_idx = PDIR_IDX(va);
851 KASSERT((pmap != kernel_pmap),
852 ("ptbl_unhold: unholding kernel ptbl!"));
854 pdir = pmap->pm_pp2d[pp2d_idx];
855 ptbl = pdir[pdir_idx];
857 KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS),
858 ("ptbl_unhold: non kva ptbl"));
860 /* decrement hold count */
861 for (i = 0; i < PTBL_PAGES; i++) {
862 pa = pte_vatopa(mmu, kernel_pmap,
863 (vm_offset_t) ptbl + (i * PAGE_SIZE));
864 m = PHYS_TO_VM_PAGE(pa);
869 * Free ptbl pages if there are no pte entries in this ptbl.
870 * wire_count has the same value for all ptbl pages, so check the
873 if (m->wire_count == 0) {
874 /* A pair of indirect entries might point to this ptbl page */
876 tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
877 TLB_SIZE_1M, MAS6_SIND);
878 tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
879 TLB_SIZE_1M, MAS6_SIND);
881 ptbl_free(mmu, pmap, pdir, pdir_idx);
882 pdir_unhold(mmu, pmap, pp2d_idx);
889 * Increment hold count for ptbl pages. This routine is used when new pte
890 * entry is being inserted into ptbl.
893 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
900 KASSERT((pmap != kernel_pmap),
901 ("ptbl_hold: holding kernel ptbl!"));
903 ptbl = pdir[pdir_idx];
905 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
907 for (i = 0; i < PTBL_PAGES; i++) {
908 pa = pte_vatopa(mmu, kernel_pmap,
909 (vm_offset_t) ptbl + (i * PAGE_SIZE));
910 m = PHYS_TO_VM_PAGE(pa);
916 /* Initialize pool of kva ptbl buffers. */
922 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
923 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
924 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
925 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
927 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
928 TAILQ_INIT(&ptbl_buf_freelist);
930 for (i = 0; i < PTBL_BUFS; i++) {
932 ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
933 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
937 /* Get a ptbl_buf from the freelist. */
938 static struct ptbl_buf *
941 struct ptbl_buf *buf;
943 mtx_lock(&ptbl_buf_freelist_lock);
944 buf = TAILQ_FIRST(&ptbl_buf_freelist);
946 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
947 mtx_unlock(&ptbl_buf_freelist_lock);
949 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
954 /* Return ptbl buff to free pool. */
956 ptbl_buf_free(struct ptbl_buf *buf)
959 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
961 mtx_lock(&ptbl_buf_freelist_lock);
962 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
963 mtx_unlock(&ptbl_buf_freelist_lock);
967 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
970 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
972 struct ptbl_buf *pbuf;
974 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
976 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
978 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
979 if (pbuf->kva == (vm_offset_t)ptbl) {
980 /* Remove from pmap ptbl buf list. */
981 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
983 /* Free corresponding ptbl buf. */
989 /* Allocate page table. */
991 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
993 vm_page_t mtbl[PTBL_PAGES];
995 struct ptbl_buf *pbuf;
1000 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1001 (pmap == kernel_pmap), pdir_idx);
1003 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1004 ("ptbl_alloc: invalid pdir_idx"));
1005 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
1006 ("pte_alloc: valid ptbl entry exists!"));
1008 pbuf = ptbl_buf_alloc();
1010 panic("pte_alloc: couldn't alloc kernel virtual memory");
1012 ptbl = (pte_t *)pbuf->kva;
1014 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
1016 for (i = 0; i < PTBL_PAGES; i++) {
1017 pidx = (PTBL_PAGES * pdir_idx) + i;
1018 while ((m = vm_page_alloc(NULL, pidx,
1019 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
1021 rw_wunlock(&pvh_global_lock);
1023 ptbl_free_pmap_ptbl(pmap, ptbl);
1024 for (j = 0; j < i; j++)
1025 vm_page_free(mtbl[j]);
1026 atomic_subtract_int(&vm_cnt.v_wire_count, i);
1030 rw_wlock(&pvh_global_lock);
1036 /* Map allocated pages into kernel_pmap. */
1037 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
1039 /* Zero whole ptbl. */
1040 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
1042 /* Add pbuf to the pmap ptbl bufs list. */
1043 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
1048 /* Free ptbl pages and invalidate pdir entry. */
1050 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1058 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1059 (pmap == kernel_pmap), pdir_idx);
1061 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1062 ("ptbl_free: invalid pdir_idx"));
1064 ptbl = pmap->pm_pdir[pdir_idx];
1066 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
1068 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
1071 * Invalidate the pdir entry as soon as possible, so that other CPUs
1072 * don't attempt to look up the page tables we are releasing.
1074 mtx_lock_spin(&tlbivax_mutex);
1077 pmap->pm_pdir[pdir_idx] = NULL;
1080 mtx_unlock_spin(&tlbivax_mutex);
1082 for (i = 0; i < PTBL_PAGES; i++) {
1083 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
1084 pa = pte_vatopa(mmu, kernel_pmap, va);
1085 m = PHYS_TO_VM_PAGE(pa);
1086 vm_page_free_zero(m);
1087 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1088 mmu_booke_kremove(mmu, va);
1091 ptbl_free_pmap_ptbl(pmap, ptbl);
1095 * Decrement ptbl pages hold count and attempt to free ptbl pages.
1096 * Called when removing pte entry from ptbl.
1098 * Return 1 if ptbl pages were freed.
1101 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1108 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1109 (pmap == kernel_pmap), pdir_idx);
1111 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1112 ("ptbl_unhold: invalid pdir_idx"));
1113 KASSERT((pmap != kernel_pmap),
1114 ("ptbl_unhold: unholding kernel ptbl!"));
1116 ptbl = pmap->pm_pdir[pdir_idx];
1118 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
1119 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
1120 ("ptbl_unhold: non kva ptbl"));
1122 /* decrement hold count */
1123 for (i = 0; i < PTBL_PAGES; i++) {
1124 pa = pte_vatopa(mmu, kernel_pmap,
1125 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1126 m = PHYS_TO_VM_PAGE(pa);
1131 * Free ptbl pages if there are no pte etries in this ptbl.
1132 * wire_count has the same value for all ptbl pages, so check the last
1135 if (m->wire_count == 0) {
1136 ptbl_free(mmu, pmap, pdir_idx);
1138 //debugf("ptbl_unhold: e (freed ptbl)\n");
1146 * Increment hold count for ptbl pages. This routine is used when a new pte
1147 * entry is being inserted into the ptbl.
1150 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1157 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
1160 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1161 ("ptbl_hold: invalid pdir_idx"));
1162 KASSERT((pmap != kernel_pmap),
1163 ("ptbl_hold: holding kernel ptbl!"));
1165 ptbl = pmap->pm_pdir[pdir_idx];
1167 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
1169 for (i = 0; i < PTBL_PAGES; i++) {
1170 pa = pte_vatopa(mmu, kernel_pmap,
1171 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1172 m = PHYS_TO_VM_PAGE(pa);
1178 /* Allocate pv_entry structure. */
1185 if (pv_entry_count > pv_entry_high_water)
1186 pagedaemon_wakeup();
1187 pv = uma_zalloc(pvzone, M_NOWAIT);
1192 /* Free pv_entry structure. */
1193 static __inline void
1194 pv_free(pv_entry_t pve)
1198 uma_zfree(pvzone, pve);
1202 /* Allocate and initialize pv_entry structure. */
1204 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
1208 //int su = (pmap == kernel_pmap);
1209 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
1210 // (u_int32_t)pmap, va, (u_int32_t)m);
1214 panic("pv_insert: no pv entries!");
1216 pve->pv_pmap = pmap;
1219 /* add to pv_list */
1220 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1221 rw_assert(&pvh_global_lock, RA_WLOCKED);
1223 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
1225 //debugf("pv_insert: e\n");
1228 /* Destroy pv entry. */
1230 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
1234 //int su = (pmap == kernel_pmap);
1235 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
1237 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1238 rw_assert(&pvh_global_lock, RA_WLOCKED);
1241 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
1242 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
1243 /* remove from pv_list */
1244 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
1245 if (TAILQ_EMPTY(&m->md.pv_list))
1246 vm_page_aflag_clear(m, PGA_WRITEABLE);
1248 /* free pv entry struct */
1254 //debugf("pv_remove: e\n");
1257 #ifdef __powerpc64__
1259 * Clean pte entry, try to free page table page if requested.
1261 * Return 1 if ptbl pages were freed, otherwise return 0.
1264 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
1269 pte = pte_find(mmu, pmap, va);
1270 KASSERT(pte != NULL, ("%s: NULL pte", __func__));
1272 if (!PTE_ISVALID(pte))
1275 /* Get vm_page_t for mapped pte. */
1276 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1278 if (PTE_ISWIRED(pte))
1279 pmap->pm_stats.wired_count--;
1281 /* Handle managed entry. */
1282 if (PTE_ISMANAGED(pte)) {
1284 /* Handle modified pages. */
1285 if (PTE_ISMODIFIED(pte))
1288 /* Referenced pages. */
1289 if (PTE_ISREFERENCED(pte))
1290 vm_page_aflag_set(m, PGA_REFERENCED);
1292 /* Remove pv_entry from pv_list. */
1293 pv_remove(pmap, va, m);
1295 mtx_lock_spin(&tlbivax_mutex);
1298 tlb0_flush_entry(va);
1302 mtx_unlock_spin(&tlbivax_mutex);
1304 pmap->pm_stats.resident_count--;
1306 if (flags & PTBL_UNHOLD) {
1307 return (ptbl_unhold(mmu, pmap, va));
1313 * allocate a page of pointers to page directories, do not preallocate the
1317 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
1319 vm_page_t mtbl [PDIR_PAGES];
1321 struct ptbl_buf *pbuf;
1327 pbuf = ptbl_buf_alloc();
1330 panic("%s: couldn't alloc kernel virtual memory", __func__);
1332 /* Allocate pdir pages, this will sleep! */
1333 for (i = 0; i < PDIR_PAGES; i++) {
1334 pidx = (PDIR_PAGES * pp2d_idx) + i;
1335 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
1336 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
1344 /* Mapin allocated pages into kernel_pmap. */
1345 pdir = (pte_t **) pbuf->kva;
1346 pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
1348 /* Zero whole pdir. */
1349 bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
1351 /* Add pdir to the pmap pdir bufs list. */
1352 TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
1358 * Insert PTE for a given page and virtual address.
1361 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1364 unsigned int pp2d_idx = PP2D_IDX(va);
1365 unsigned int pdir_idx = PDIR_IDX(va);
1366 unsigned int ptbl_idx = PTBL_IDX(va);
1370 /* Get the page directory pointer. */
1371 pdir = pmap->pm_pp2d[pp2d_idx];
1373 pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
1375 /* Get the page table pointer. */
1376 ptbl = pdir[pdir_idx];
1379 /* Allocate page table pages. */
1380 ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
1382 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1387 * Check if there is valid mapping for requested va, if there
1390 pte = &pdir[pdir_idx][ptbl_idx];
1391 if (PTE_ISVALID(pte)) {
1392 pte_remove(mmu, pmap, va, PTBL_HOLD);
1395 * pte is not used, increment hold count for ptbl
1398 if (pmap != kernel_pmap)
1399 ptbl_hold(mmu, pmap, pdir, pdir_idx);
1403 if (pdir[pdir_idx] == NULL) {
1404 if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
1405 pdir_hold(mmu, pmap, pdir);
1406 pdir[pdir_idx] = ptbl;
1408 if (pmap->pm_pp2d[pp2d_idx] == NULL)
1409 pmap->pm_pp2d[pp2d_idx] = pdir;
1412 * Insert pv_entry into pv_list for mapped page if part of managed
1415 if ((m->oflags & VPO_UNMANAGED) == 0) {
1416 flags |= PTE_MANAGED;
1418 /* Create and insert pv entry. */
1419 pv_insert(pmap, va, m);
1422 mtx_lock_spin(&tlbivax_mutex);
1425 tlb0_flush_entry(va);
1426 pmap->pm_stats.resident_count++;
1427 pte = &pdir[pdir_idx][ptbl_idx];
1428 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1429 *pte |= (PTE_VALID | flags);
1432 mtx_unlock_spin(&tlbivax_mutex);
1437 /* Return the pa for the given pmap/va. */
1439 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1444 pte = pte_find(mmu, pmap, va);
1445 if ((pte != NULL) && PTE_ISVALID(pte))
1446 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1451 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
1453 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1460 /* Initialize kernel pdir */
1461 for (i = 0; i < kernel_pdirs; i++) {
1462 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
1463 (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
1464 for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
1465 j < PDIR_NENTRIES; j++) {
1466 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
1467 (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
1468 (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
1473 * Fill in PTEs covering kernel code and data. They are not required
1474 * for address translation, as this area is covered by static TLB1
1475 * entries, but for pte_vatopa() to work correctly with kernel area
1478 for (va = addr; va < data_end; va += PAGE_SIZE) {
1479 pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
1480 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1481 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1482 PTE_VALID | PTE_PS_4KB;
1487 * Clean pte entry, try to free page table page if requested.
1489 * Return 1 if ptbl pages were freed, otherwise return 0.
1492 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
1494 unsigned int pdir_idx = PDIR_IDX(va);
1495 unsigned int ptbl_idx = PTBL_IDX(va);
1500 //int su = (pmap == kernel_pmap);
1501 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
1502 // su, (u_int32_t)pmap, va, flags);
1504 ptbl = pmap->pm_pdir[pdir_idx];
1505 KASSERT(ptbl, ("pte_remove: null ptbl"));
1507 pte = &ptbl[ptbl_idx];
1509 if (pte == NULL || !PTE_ISVALID(pte))
1512 if (PTE_ISWIRED(pte))
1513 pmap->pm_stats.wired_count--;
1515 /* Get vm_page_t for mapped pte. */
1516 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1518 /* Handle managed entry. */
1519 if (PTE_ISMANAGED(pte)) {
1521 if (PTE_ISMODIFIED(pte))
1524 if (PTE_ISREFERENCED(pte))
1525 vm_page_aflag_set(m, PGA_REFERENCED);
1527 pv_remove(pmap, va, m);
1528 } else if (m->md.pv_tracked) {
1530 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
1531 * used. This is needed by the NCSW support code for fast
1532 * VA<->PA translation.
1534 pv_remove(pmap, va, m);
1535 if (TAILQ_EMPTY(&m->md.pv_list))
1536 m->md.pv_tracked = false;
1539 mtx_lock_spin(&tlbivax_mutex);
1542 tlb0_flush_entry(va);
1546 mtx_unlock_spin(&tlbivax_mutex);
1548 pmap->pm_stats.resident_count--;
1550 if (flags & PTBL_UNHOLD) {
1551 //debugf("pte_remove: e (unhold)\n");
1552 return (ptbl_unhold(mmu, pmap, pdir_idx));
1555 //debugf("pte_remove: e\n");
1560 * Insert PTE for a given page and virtual address.
1563 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1566 unsigned int pdir_idx = PDIR_IDX(va);
1567 unsigned int ptbl_idx = PTBL_IDX(va);
1570 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
1571 pmap == kernel_pmap, pmap, va);
1573 /* Get the page table pointer. */
1574 ptbl = pmap->pm_pdir[pdir_idx];
1577 /* Allocate page table pages. */
1578 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
1580 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1585 * Check if there is valid mapping for requested
1586 * va, if there is, remove it.
1588 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
1589 if (PTE_ISVALID(pte)) {
1590 pte_remove(mmu, pmap, va, PTBL_HOLD);
1593 * pte is not used, increment hold count
1596 if (pmap != kernel_pmap)
1597 ptbl_hold(mmu, pmap, pdir_idx);
1602 * Insert pv_entry into pv_list for mapped page if part of managed
1605 if ((m->oflags & VPO_UNMANAGED) == 0) {
1606 flags |= PTE_MANAGED;
1608 /* Create and insert pv entry. */
1609 pv_insert(pmap, va, m);
1612 pmap->pm_stats.resident_count++;
1614 mtx_lock_spin(&tlbivax_mutex);
1617 tlb0_flush_entry(va);
1618 if (pmap->pm_pdir[pdir_idx] == NULL) {
1620 * If we just allocated a new page table, hook it in
1623 pmap->pm_pdir[pdir_idx] = ptbl;
1625 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
1626 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1627 *pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
1630 mtx_unlock_spin(&tlbivax_mutex);
1634 /* Return the pa for the given pmap/va. */
1636 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1641 pte = pte_find(mmu, pmap, va);
1642 if ((pte != NULL) && PTE_ISVALID(pte))
1643 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1647 /* Get a pointer to a PTE in a page table. */
1649 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1651 unsigned int pdir_idx = PDIR_IDX(va);
1652 unsigned int ptbl_idx = PTBL_IDX(va);
1654 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
1656 if (pmap->pm_pdir[pdir_idx])
1657 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
1662 /* Set up kernel page tables. */
1664 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1670 /* Initialize kernel pdir */
1671 for (i = 0; i < kernel_ptbls; i++)
1672 kernel_pmap->pm_pdir[kptbl_min + i] =
1673 (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
1676 * Fill in PTEs covering kernel code and data. They are not required
1677 * for address translation, as this area is covered by static TLB1
1678 * entries, but for pte_vatopa() to work correctly with kernel area
1681 for (va = addr; va < data_end; va += PAGE_SIZE) {
1682 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1683 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1684 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1685 PTE_VALID | PTE_PS_4KB;
1690 /**************************************************************************/
1692 /**************************************************************************/
1695 * This is called during booke_init, before the system is really initialized.
1698 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
1700 vm_paddr_t phys_kernelend;
1701 struct mem_region *mp, *mp1;
1703 vm_paddr_t s, e, sz;
1704 vm_paddr_t physsz, hwphyssz;
1705 u_int phys_avail_count;
1706 vm_size_t kstack0_sz;
1707 vm_offset_t kernel_pdir, kstack0;
1708 vm_paddr_t kstack0_phys;
1711 debugf("mmu_booke_bootstrap: entered\n");
1713 /* Set interesting system properties */
1715 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
1719 /* Initialize invalidation mutex */
1720 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1722 /* Read TLB0 size and associativity. */
1726 * Align kernel start and end address (kernel image).
1727 * Note that kernel end does not necessarily relate to kernsize.
1728 * kernsize is the size of the kernel that is actually mapped.
1730 kernstart = trunc_page(start);
1731 data_start = round_page(kernelend);
1732 data_end = data_start;
1735 * Addresses of preloaded modules (like file systems) use
1736 * physical addresses. Make sure we relocate those into
1737 * virtual addresses.
1739 preload_addr_relocate = kernstart - kernload;
1741 /* Allocate the dynamic per-cpu area. */
1742 dpcpu = (void *)data_end;
1743 data_end += DPCPU_SIZE;
1745 /* Allocate space for the message buffer. */
1746 msgbufp = (struct msgbuf *)data_end;
1747 data_end += msgbufsize;
1748 debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1749 (uintptr_t)msgbufp, data_end);
1751 data_end = round_page(data_end);
1753 /* Allocate space for ptbl_bufs. */
1754 ptbl_bufs = (struct ptbl_buf *)data_end;
1755 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1756 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1757 (uintptr_t)ptbl_bufs, data_end);
1759 data_end = round_page(data_end);
1761 /* Allocate PTE tables for kernel KVA. */
1762 kernel_pdir = data_end;
1763 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
1765 #ifdef __powerpc64__
1766 kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
1767 data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
1769 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1770 debugf(" kernel ptbls: %d\n", kernel_ptbls);
1771 debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1772 kernel_pdir, data_end);
1774 debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
1775 if (data_end - kernstart > kernsize) {
1776 kernsize += tlb1_mapin_region(kernstart + kernsize,
1777 kernload + kernsize, (data_end - kernstart) - kernsize);
1779 data_end = kernstart + kernsize;
1780 debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
1783 * Clear the structures - note we can only do it safely after the
1784 * possible additional TLB1 translations are in place (above) so that
1785 * all range up to the currently calculated 'data_end' is covered.
1787 dpcpu_init(dpcpu, 0);
1788 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1789 #ifdef __powerpc64__
1790 memset((void *)kernel_pdir, 0,
1791 kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
1792 kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1794 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1797 /*******************************************************/
1798 /* Set the start and end of kva. */
1799 /*******************************************************/
1800 virtual_avail = round_page(data_end);
1801 virtual_end = VM_MAX_KERNEL_ADDRESS;
1803 /* Allocate KVA space for page zero/copy operations. */
1804 zero_page_va = virtual_avail;
1805 virtual_avail += PAGE_SIZE;
1806 copy_page_src_va = virtual_avail;
1807 virtual_avail += PAGE_SIZE;
1808 copy_page_dst_va = virtual_avail;
1809 virtual_avail += PAGE_SIZE;
1810 debugf("zero_page_va = 0x%08x\n", zero_page_va);
1811 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1812 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1814 /* Initialize page zero/copy mutexes. */
1815 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1816 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1818 /* Allocate KVA space for ptbl bufs. */
1819 ptbl_buf_pool_vabase = virtual_avail;
1820 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1821 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1822 ptbl_buf_pool_vabase, virtual_avail);
1824 /* Calculate corresponding physical addresses for the kernel region. */
1825 phys_kernelend = kernload + kernsize;
1826 debugf("kernel image and allocated data:\n");
1827 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
1828 debugf(" kernstart = 0x%08x\n", kernstart);
1829 debugf(" kernsize = 0x%08x\n", kernsize);
1831 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1832 panic("mmu_booke_bootstrap: phys_avail too small");
1835 * Remove kernel physical address range from avail regions list. Page
1836 * align all regions. Non-page aligned memory isn't very interesting
1837 * to us. Also, sort the entries for ascending addresses.
1840 /* Retrieve phys/avail mem regions */
1841 mem_regions(&physmem_regions, &physmem_regions_sz,
1842 &availmem_regions, &availmem_regions_sz);
1844 cnt = availmem_regions_sz;
1845 debugf("processing avail regions:\n");
1846 for (mp = availmem_regions; mp->mr_size; mp++) {
1848 e = mp->mr_start + mp->mr_size;
1849 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
1850 /* Check whether this region holds all of the kernel. */
1851 if (s < kernload && e > phys_kernelend) {
1852 availmem_regions[cnt].mr_start = phys_kernelend;
1853 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1856 /* Look whether this regions starts within the kernel. */
1857 if (s >= kernload && s < phys_kernelend) {
1858 if (e <= phys_kernelend)
1862 /* Now look whether this region ends within the kernel. */
1863 if (e > kernload && e <= phys_kernelend) {
1868 /* Now page align the start and size of the region. */
1874 debugf("%09jx-%09jx = %jx\n",
1875 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
1877 /* Check whether some memory is left here. */
1881 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1887 /* Do an insertion sort. */
1888 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1889 if (s < mp1->mr_start)
1892 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1900 availmem_regions_sz = cnt;
1902 /*******************************************************/
1903 /* Steal physical memory for kernel stack from the end */
1904 /* of the first avail region */
1905 /*******************************************************/
1906 kstack0_sz = kstack_pages * PAGE_SIZE;
1907 kstack0_phys = availmem_regions[0].mr_start +
1908 availmem_regions[0].mr_size;
1909 kstack0_phys -= kstack0_sz;
1910 availmem_regions[0].mr_size -= kstack0_sz;
1912 /*******************************************************/
1913 /* Fill in phys_avail table, based on availmem_regions */
1914 /*******************************************************/
1915 phys_avail_count = 0;
1918 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1920 debugf("fill in phys_avail:\n");
1921 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1923 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
1924 (uintmax_t)availmem_regions[i].mr_start,
1925 (uintmax_t)availmem_regions[i].mr_start +
1926 availmem_regions[i].mr_size,
1927 (uintmax_t)availmem_regions[i].mr_size);
1929 if (hwphyssz != 0 &&
1930 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1931 debugf(" hw.physmem adjust\n");
1932 if (physsz < hwphyssz) {
1933 phys_avail[j] = availmem_regions[i].mr_start;
1935 availmem_regions[i].mr_start +
1943 phys_avail[j] = availmem_regions[i].mr_start;
1944 phys_avail[j + 1] = availmem_regions[i].mr_start +
1945 availmem_regions[i].mr_size;
1947 physsz += availmem_regions[i].mr_size;
1949 physmem = btoc(physsz);
1951 /* Calculate the last available physical address. */
1952 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1954 Maxmem = powerpc_btop(phys_avail[i + 1]);
1956 debugf("Maxmem = 0x%08lx\n", Maxmem);
1957 debugf("phys_avail_count = %d\n", phys_avail_count);
1958 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
1959 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
1961 /*******************************************************/
1962 /* Initialize (statically allocated) kernel pmap. */
1963 /*******************************************************/
1964 PMAP_LOCK_INIT(kernel_pmap);
1965 #ifndef __powerpc64__
1966 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1969 debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
1970 kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
1971 for (i = 0; i < MAXCPU; i++) {
1972 kernel_pmap->pm_tid[i] = TID_KERNEL;
1974 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1975 tidbusy[i][TID_KERNEL] = kernel_pmap;
1978 /* Mark kernel_pmap active on all CPUs */
1979 CPU_FILL(&kernel_pmap->pm_active);
1982 * Initialize the global pv list lock.
1984 rw_init(&pvh_global_lock, "pmap pv global");
1986 /*******************************************************/
1988 /*******************************************************/
1990 /* Enter kstack0 into kernel map, provide guard page */
1991 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1992 thread0.td_kstack = kstack0;
1993 thread0.td_kstack_pages = kstack_pages;
1995 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1996 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
1997 kstack0_phys, kstack0_phys + kstack0_sz);
1998 debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
1999 kstack0, kstack0 + kstack0_sz);
2001 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
2002 for (i = 0; i < kstack_pages; i++) {
2003 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
2004 kstack0 += PAGE_SIZE;
2005 kstack0_phys += PAGE_SIZE;
2008 pmap_bootstrapped = 1;
2010 debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
2011 debugf("virtual_end = %"PRI0ptrX"\n", virtual_end);
2013 debugf("mmu_booke_bootstrap: exit\n");
2020 tlb_entry_t *e, tmp;
2023 /* Prepare TLB1 image for AP processors */
2025 for (i = 0; i < TLB1_ENTRIES; i++) {
2026 tlb1_read_entry(&tmp, i);
2028 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
2029 memcpy(e++, &tmp, sizeof(tmp));
2034 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
2039 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
2040 * have the snapshot of its contents in the s/w __boot_tlb1[] table
2041 * created by tlb1_ap_prep(), so use these values directly to
2042 * (re)program AP's TLB1 hardware.
2044 * Start at index 1 because index 0 has the kernel map.
2046 for (i = 1; i < TLB1_ENTRIES; i++) {
2047 if (__boot_tlb1[i].mas1 & MAS1_VALID)
2048 tlb1_write_entry(&__boot_tlb1[i], i);
2051 set_mas4_defaults();
2056 booke_pmap_init_qpages(void)
2063 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
2064 if (pc->pc_qmap_addr == 0)
2065 panic("pmap_init_qpages: unable to allocate KVA");
2069 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
2072 * Get the physical page address for the given pmap/virtual address.
2075 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
2080 pa = pte_vatopa(mmu, pmap, va);
2087 * Extract the physical page address associated with the given
2088 * kernel virtual address.
2091 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
2097 p = pte_vatopa(mmu, kernel_pmap, va);
2100 /* Check TLB1 mappings */
2101 for (i = 0; i < TLB1_ENTRIES; i++) {
2102 tlb1_read_entry(&e, i);
2103 if (!(e.mas1 & MAS1_VALID))
2105 if (va >= e.virt && va < e.virt + e.size)
2106 return (e.phys + (va - e.virt));
2114 * Initialize the pmap module.
2115 * Called by vm_init, to initialize any structures that the pmap
2116 * system needs to map virtual memory.
2119 mmu_booke_init(mmu_t mmu)
2121 int shpgperproc = PMAP_SHPGPERPROC;
2124 * Initialize the address space (zone) for the pv entries. Set a
2125 * high water mark so that the system can recover from excessive
2126 * numbers of pv entries.
2128 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
2129 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
2131 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
2132 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
2134 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
2135 pv_entry_high_water = 9 * (pv_entry_max / 10);
2137 uma_zone_reserve_kva(pvzone, pv_entry_max);
2139 /* Pre-fill pvzone with initial number of pv entries. */
2140 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
2142 /* Initialize ptbl allocation. */
2147 * Map a list of wired pages into kernel virtual address space. This is
2148 * intended for temporary mappings which do not need page modification or
2149 * references recorded. Existing mappings in the region are overwritten.
2152 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
2157 while (count-- > 0) {
2158 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2165 * Remove page mappings from kernel virtual address space. Intended for
2166 * temporary mappings entered by mmu_booke_qenter.
2169 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
2174 while (count-- > 0) {
2175 mmu_booke_kremove(mmu, va);
2181 * Map a wired page into kernel virtual address space.
2184 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
2187 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
2191 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
2196 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2197 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
2199 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2200 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
2201 flags |= PTE_PS_4KB;
2203 pte = pte_find(mmu, kernel_pmap, va);
2204 KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
2206 mtx_lock_spin(&tlbivax_mutex);
2209 if (PTE_ISVALID(pte)) {
2211 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
2213 /* Flush entry from TLB0 */
2214 tlb0_flush_entry(va);
2217 *pte = PTE_RPN_FROM_PA(pa) | flags;
2219 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
2220 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
2221 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
2223 /* Flush the real memory from the instruction cache. */
2224 if ((flags & (PTE_I | PTE_G)) == 0)
2225 __syncicache((void *)va, PAGE_SIZE);
2228 mtx_unlock_spin(&tlbivax_mutex);
2232 * Remove a page from kernel page table.
2235 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
2239 CTR2(KTR_PMAP,"%s: s (va = 0x%08x)\n", __func__, va);
2241 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2242 (va <= VM_MAX_KERNEL_ADDRESS)),
2243 ("mmu_booke_kremove: invalid va"));
2245 pte = pte_find(mmu, kernel_pmap, va);
2247 if (!PTE_ISVALID(pte)) {
2249 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
2254 mtx_lock_spin(&tlbivax_mutex);
2257 /* Invalidate entry in TLB0, update PTE. */
2258 tlb0_flush_entry(va);
2262 mtx_unlock_spin(&tlbivax_mutex);
2266 * Initialize pmap associated with process 0.
2269 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
2272 PMAP_LOCK_INIT(pmap);
2273 mmu_booke_pinit(mmu, pmap);
2274 PCPU_SET(curpmap, pmap);
2278 * Initialize a preallocated and zeroed pmap structure,
2279 * such as one in a vmspace structure.
2282 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
2286 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
2287 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
2289 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
2291 for (i = 0; i < MAXCPU; i++)
2292 pmap->pm_tid[i] = TID_NONE;
2293 CPU_ZERO(&kernel_pmap->pm_active);
2294 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2295 #ifdef __powerpc64__
2296 bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
2297 TAILQ_INIT(&pmap->pm_pdir_list);
2299 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
2301 TAILQ_INIT(&pmap->pm_ptbl_list);
2305 * Release any resources held by the given physical map.
2306 * Called when a pmap initialized by mmu_booke_pinit is being released.
2307 * Should only be called if the map contains no valid mappings.
2310 mmu_booke_release(mmu_t mmu, pmap_t pmap)
2313 KASSERT(pmap->pm_stats.resident_count == 0,
2314 ("pmap_release: pmap resident count %ld != 0",
2315 pmap->pm_stats.resident_count));
2319 * Insert the given physical page at the specified virtual address in the
2320 * target physical map with the protection requested. If specified the page
2321 * will be wired down.
2324 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2325 vm_prot_t prot, u_int flags, int8_t psind)
2329 rw_wlock(&pvh_global_lock);
2331 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
2333 rw_wunlock(&pvh_global_lock);
2338 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2339 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
2344 int error, su, sync;
2346 pa = VM_PAGE_TO_PHYS(m);
2347 su = (pmap == kernel_pmap);
2350 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
2351 // "pa=0x%08x prot=0x%08x flags=%#x)\n",
2352 // (u_int32_t)pmap, su, pmap->pm_tid,
2353 // (u_int32_t)m, va, pa, prot, flags);
2356 KASSERT(((va >= virtual_avail) &&
2357 (va <= VM_MAX_KERNEL_ADDRESS)),
2358 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
2360 KASSERT((va <= VM_MAXUSER_ADDRESS),
2361 ("mmu_booke_enter_locked: user pmap, non user va"));
2363 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2364 VM_OBJECT_ASSERT_LOCKED(m->object);
2366 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2369 * If there is an existing mapping, and the physical address has not
2370 * changed, must be protection or wiring change.
2372 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
2373 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
2376 * Before actually updating pte->flags we calculate and
2377 * prepare its new value in a helper var.
2380 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
2382 /* Wiring change, just update stats. */
2383 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
2384 if (!PTE_ISWIRED(pte)) {
2386 pmap->pm_stats.wired_count++;
2389 if (PTE_ISWIRED(pte)) {
2390 flags &= ~PTE_WIRED;
2391 pmap->pm_stats.wired_count--;
2395 if (prot & VM_PROT_WRITE) {
2396 /* Add write permissions. */
2401 if ((flags & PTE_MANAGED) != 0)
2402 vm_page_aflag_set(m, PGA_WRITEABLE);
2404 /* Handle modified pages, sense modify status. */
2407 * The PTE_MODIFIED flag could be set by underlying
2408 * TLB misses since we last read it (above), possibly
2409 * other CPUs could update it so we check in the PTE
2410 * directly rather than rely on that saved local flags
2413 if (PTE_ISMODIFIED(pte))
2417 if (prot & VM_PROT_EXECUTE) {
2423 * Check existing flags for execute permissions: if we
2424 * are turning execute permissions on, icache should
2427 if ((*pte & (PTE_UX | PTE_SX)) == 0)
2431 flags &= ~PTE_REFERENCED;
2434 * The new flags value is all calculated -- only now actually
2437 mtx_lock_spin(&tlbivax_mutex);
2440 tlb0_flush_entry(va);
2441 *pte &= ~PTE_FLAGS_MASK;
2445 mtx_unlock_spin(&tlbivax_mutex);
2449 * If there is an existing mapping, but it's for a different
2450 * physical address, pte_enter() will delete the old mapping.
2452 //if ((pte != NULL) && PTE_ISVALID(pte))
2453 // debugf("mmu_booke_enter_locked: replace\n");
2455 // debugf("mmu_booke_enter_locked: new\n");
2457 /* Now set up the flags and install the new mapping. */
2458 flags = (PTE_SR | PTE_VALID);
2464 if (prot & VM_PROT_WRITE) {
2469 if ((m->oflags & VPO_UNMANAGED) == 0)
2470 vm_page_aflag_set(m, PGA_WRITEABLE);
2473 if (prot & VM_PROT_EXECUTE) {
2479 /* If its wired update stats. */
2480 if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
2483 error = pte_enter(mmu, pmap, m, va, flags,
2484 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
2486 return (KERN_RESOURCE_SHORTAGE);
2488 if ((flags & PMAP_ENTER_WIRED) != 0)
2489 pmap->pm_stats.wired_count++;
2491 /* Flush the real memory from the instruction cache. */
2492 if (prot & VM_PROT_EXECUTE)
2496 if (sync && (su || pmap == PCPU_GET(curpmap))) {
2497 __syncicache((void *)va, PAGE_SIZE);
2501 return (KERN_SUCCESS);
2505 * Maps a sequence of resident pages belonging to the same object.
2506 * The sequence begins with the given page m_start. This page is
2507 * mapped at the given virtual address start. Each subsequent page is
2508 * mapped at a virtual address that is offset from start by the same
2509 * amount as the page is offset from m_start within the object. The
2510 * last page in the sequence is the page with the largest offset from
2511 * m_start that can be mapped at a virtual address less than the given
2512 * virtual address end. Not every virtual page between start and end
2513 * is mapped; only those for which a resident page exists with the
2514 * corresponding offset from m_start are mapped.
2517 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
2518 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
2521 vm_pindex_t diff, psize;
2523 VM_OBJECT_ASSERT_LOCKED(m_start->object);
2525 psize = atop(end - start);
2527 rw_wlock(&pvh_global_lock);
2529 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2530 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
2531 prot & (VM_PROT_READ | VM_PROT_EXECUTE),
2532 PMAP_ENTER_NOSLEEP, 0);
2533 m = TAILQ_NEXT(m, listq);
2535 rw_wunlock(&pvh_global_lock);
2540 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2544 rw_wlock(&pvh_global_lock);
2546 mmu_booke_enter_locked(mmu, pmap, va, m,
2547 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
2549 rw_wunlock(&pvh_global_lock);
2554 * Remove the given range of addresses from the specified map.
2556 * It is assumed that the start and end are properly rounded to the page size.
2559 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
2564 int su = (pmap == kernel_pmap);
2566 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
2567 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
2570 KASSERT(((va >= virtual_avail) &&
2571 (va <= VM_MAX_KERNEL_ADDRESS)),
2572 ("mmu_booke_remove: kernel pmap, non kernel va"));
2574 KASSERT((va <= VM_MAXUSER_ADDRESS),
2575 ("mmu_booke_remove: user pmap, non user va"));
2578 if (PMAP_REMOVE_DONE(pmap)) {
2579 //debugf("mmu_booke_remove: e (empty)\n");
2583 hold_flag = PTBL_HOLD_FLAG(pmap);
2584 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
2586 rw_wlock(&pvh_global_lock);
2588 for (; va < endva; va += PAGE_SIZE) {
2589 pte = pte_find(mmu, pmap, va);
2590 if ((pte != NULL) && PTE_ISVALID(pte))
2591 pte_remove(mmu, pmap, va, hold_flag);
2594 rw_wunlock(&pvh_global_lock);
2596 //debugf("mmu_booke_remove: e\n");
2600 * Remove physical page from all pmaps in which it resides.
2603 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
2608 rw_wlock(&pvh_global_lock);
2609 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
2610 pvn = TAILQ_NEXT(pv, pv_link);
2612 PMAP_LOCK(pv->pv_pmap);
2613 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
2614 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
2615 PMAP_UNLOCK(pv->pv_pmap);
2617 vm_page_aflag_clear(m, PGA_WRITEABLE);
2618 rw_wunlock(&pvh_global_lock);
2622 * Map a range of physical addresses into kernel virtual address space.
2625 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
2626 vm_paddr_t pa_end, int prot)
2628 vm_offset_t sva = *virt;
2629 vm_offset_t va = sva;
2631 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
2632 // sva, pa_start, pa_end);
2634 while (pa_start < pa_end) {
2635 mmu_booke_kenter(mmu, va, pa_start);
2637 pa_start += PAGE_SIZE;
2641 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
2646 * The pmap must be activated before it's address space can be accessed in any
2650 mmu_booke_activate(mmu_t mmu, struct thread *td)
2655 pmap = &td->td_proc->p_vmspace->vm_pmap;
2657 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
2658 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2660 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
2664 cpuid = PCPU_GET(cpuid);
2665 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
2666 PCPU_SET(curpmap, pmap);
2668 if (pmap->pm_tid[cpuid] == TID_NONE)
2671 /* Load PID0 register with pmap tid value. */
2672 mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
2673 __asm __volatile("isync");
2675 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
2679 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
2680 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
2684 * Deactivate the specified process's address space.
2687 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
2691 pmap = &td->td_proc->p_vmspace->vm_pmap;
2693 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
2694 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2696 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
2698 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
2699 PCPU_SET(curpmap, NULL);
2703 * Copy the range specified by src_addr/len
2704 * from the source map to the range dst_addr/len
2705 * in the destination map.
2707 * This routine is only advisory and need not do anything.
2710 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
2711 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
2717 * Set the physical protection on the specified range of this map as requested.
2720 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2727 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2728 mmu_booke_remove(mmu, pmap, sva, eva);
2732 if (prot & VM_PROT_WRITE)
2736 for (va = sva; va < eva; va += PAGE_SIZE) {
2737 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2738 if (PTE_ISVALID(pte)) {
2739 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2741 mtx_lock_spin(&tlbivax_mutex);
2744 /* Handle modified pages. */
2745 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
2748 tlb0_flush_entry(va);
2749 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2752 mtx_unlock_spin(&tlbivax_mutex);
2760 * Clear the write and modified bits in each of the given page's mappings.
2763 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2768 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2769 ("mmu_booke_remove_write: page %p is not managed", m));
2772 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2773 * set by another thread while the object is locked. Thus,
2774 * if PGA_WRITEABLE is clear, no page table entries need updating.
2776 VM_OBJECT_ASSERT_WLOCKED(m->object);
2777 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2779 rw_wlock(&pvh_global_lock);
2780 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2781 PMAP_LOCK(pv->pv_pmap);
2782 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2783 if (PTE_ISVALID(pte)) {
2784 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2786 mtx_lock_spin(&tlbivax_mutex);
2789 /* Handle modified pages. */
2790 if (PTE_ISMODIFIED(pte))
2793 /* Flush mapping from TLB0. */
2794 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2797 mtx_unlock_spin(&tlbivax_mutex);
2800 PMAP_UNLOCK(pv->pv_pmap);
2802 vm_page_aflag_clear(m, PGA_WRITEABLE);
2803 rw_wunlock(&pvh_global_lock);
2807 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2816 va = trunc_page(va);
2817 sz = round_page(sz);
2819 rw_wlock(&pvh_global_lock);
2820 pmap = PCPU_GET(curpmap);
2821 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2824 pte = pte_find(mmu, pm, va);
2825 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2831 /* Create a mapping in the active pmap. */
2833 m = PHYS_TO_VM_PAGE(pa);
2835 pte_enter(mmu, pmap, m, addr,
2836 PTE_SR | PTE_VALID | PTE_UR, FALSE);
2837 __syncicache((void *)addr, PAGE_SIZE);
2838 pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2841 __syncicache((void *)va, PAGE_SIZE);
2846 rw_wunlock(&pvh_global_lock);
2850 * Atomically extract and hold the physical page with the given
2851 * pmap and virtual address pair if that mapping permits the given
2855 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2867 pte = pte_find(mmu, pmap, va);
2868 if ((pte != NULL) && PTE_ISVALID(pte)) {
2869 if (pmap == kernel_pmap)
2874 if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2875 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2877 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2888 * Initialize a vm_page's machine-dependent fields.
2891 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2894 m->md.pv_tracked = 0;
2895 TAILQ_INIT(&m->md.pv_list);
2899 * mmu_booke_zero_page_area zeros the specified hardware page by
2900 * mapping it into virtual memory and using bzero to clear
2903 * off and size must reside within a single page.
2906 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2910 /* XXX KASSERT off and size are within a single page? */
2912 mtx_lock(&zero_page_mutex);
2915 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2916 bzero((caddr_t)va + off, size);
2917 mmu_booke_kremove(mmu, va);
2919 mtx_unlock(&zero_page_mutex);
2923 * mmu_booke_zero_page zeros the specified hardware page.
2926 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2928 vm_offset_t off, va;
2930 mtx_lock(&zero_page_mutex);
2933 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2934 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
2935 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
2936 mmu_booke_kremove(mmu, va);
2938 mtx_unlock(&zero_page_mutex);
2942 * mmu_booke_copy_page copies the specified (machine independent) page by
2943 * mapping the page into virtual memory and using memcopy to copy the page,
2944 * one machine dependent page at a time.
2947 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2949 vm_offset_t sva, dva;
2951 sva = copy_page_src_va;
2952 dva = copy_page_dst_va;
2954 mtx_lock(©_page_mutex);
2955 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2956 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2957 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2958 mmu_booke_kremove(mmu, dva);
2959 mmu_booke_kremove(mmu, sva);
2960 mtx_unlock(©_page_mutex);
2964 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
2965 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
2968 vm_offset_t a_pg_offset, b_pg_offset;
2971 mtx_lock(©_page_mutex);
2972 while (xfersize > 0) {
2973 a_pg_offset = a_offset & PAGE_MASK;
2974 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2975 mmu_booke_kenter(mmu, copy_page_src_va,
2976 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
2977 a_cp = (char *)copy_page_src_va + a_pg_offset;
2978 b_pg_offset = b_offset & PAGE_MASK;
2979 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2980 mmu_booke_kenter(mmu, copy_page_dst_va,
2981 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
2982 b_cp = (char *)copy_page_dst_va + b_pg_offset;
2983 bcopy(a_cp, b_cp, cnt);
2984 mmu_booke_kremove(mmu, copy_page_dst_va);
2985 mmu_booke_kremove(mmu, copy_page_src_va);
2990 mtx_unlock(©_page_mutex);
2994 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
3001 paddr = VM_PAGE_TO_PHYS(m);
3003 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
3004 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
3005 flags |= PTE_PS_4KB;
3008 qaddr = PCPU_GET(qmap_addr);
3010 pte = pte_find(mmu, kernel_pmap, qaddr);
3012 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
3015 * XXX: tlbivax is broadcast to other cores, but qaddr should
3016 * not be present in other TLBs. Is there a better instruction
3017 * sequence to use? Or just forget it & use mmu_booke_kenter()...
3019 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
3020 __asm __volatile("isync; msync");
3022 *pte = PTE_RPN_FROM_PA(paddr) | flags;
3024 /* Flush the real memory from the instruction cache. */
3025 if ((flags & (PTE_I | PTE_G)) == 0)
3026 __syncicache((void *)qaddr, PAGE_SIZE);
3032 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
3036 pte = pte_find(mmu, kernel_pmap, addr);
3038 KASSERT(PCPU_GET(qmap_addr) == addr,
3039 ("mmu_booke_quick_remove_page: invalid address"));
3041 ("mmu_booke_quick_remove_page: PTE not in use"));
3048 * Return whether or not the specified physical page was modified
3049 * in any of physical maps.
3052 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
3058 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3059 ("mmu_booke_is_modified: page %p is not managed", m));
3063 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3064 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
3065 * is clear, no PTEs can be modified.
3067 VM_OBJECT_ASSERT_WLOCKED(m->object);
3068 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3070 rw_wlock(&pvh_global_lock);
3071 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3072 PMAP_LOCK(pv->pv_pmap);
3073 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3075 if (PTE_ISMODIFIED(pte))
3078 PMAP_UNLOCK(pv->pv_pmap);
3082 rw_wunlock(&pvh_global_lock);
3087 * Return whether or not the specified virtual address is eligible
3091 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
3098 * Return whether or not the specified physical page was referenced
3099 * in any physical maps.
3102 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
3108 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3109 ("mmu_booke_is_referenced: page %p is not managed", m));
3111 rw_wlock(&pvh_global_lock);
3112 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3113 PMAP_LOCK(pv->pv_pmap);
3114 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3116 if (PTE_ISREFERENCED(pte))
3119 PMAP_UNLOCK(pv->pv_pmap);
3123 rw_wunlock(&pvh_global_lock);
3128 * Clear the modify bits on the specified physical page.
3131 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
3136 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3137 ("mmu_booke_clear_modify: page %p is not managed", m));
3138 VM_OBJECT_ASSERT_WLOCKED(m->object);
3139 KASSERT(!vm_page_xbusied(m),
3140 ("mmu_booke_clear_modify: page %p is exclusive busied", m));
3143 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
3144 * If the object containing the page is locked and the page is not
3145 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
3147 if ((m->aflags & PGA_WRITEABLE) == 0)
3149 rw_wlock(&pvh_global_lock);
3150 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3151 PMAP_LOCK(pv->pv_pmap);
3152 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3154 mtx_lock_spin(&tlbivax_mutex);
3157 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
3158 tlb0_flush_entry(pv->pv_va);
3159 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
3164 mtx_unlock_spin(&tlbivax_mutex);
3166 PMAP_UNLOCK(pv->pv_pmap);
3168 rw_wunlock(&pvh_global_lock);
3172 * Return a count of reference bits for a page, clearing those bits.
3173 * It is not necessary for every reference bit to be cleared, but it
3174 * is necessary that 0 only be returned when there are truly no
3175 * reference bits set.
3177 * As an optimization, update the page's dirty field if a modified bit is
3178 * found while counting reference bits. This opportunistic update can be
3179 * performed at low cost and can eliminate the need for some future calls
3180 * to pmap_is_modified(). However, since this function stops after
3181 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3182 * dirty pages. Those dirty pages will only be detected by a future call
3183 * to pmap_is_modified().
3186 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
3192 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3193 ("mmu_booke_ts_referenced: page %p is not managed", m));
3195 rw_wlock(&pvh_global_lock);
3196 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3197 PMAP_LOCK(pv->pv_pmap);
3198 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3200 if (PTE_ISMODIFIED(pte))
3202 if (PTE_ISREFERENCED(pte)) {
3203 mtx_lock_spin(&tlbivax_mutex);
3206 tlb0_flush_entry(pv->pv_va);
3207 *pte &= ~PTE_REFERENCED;
3210 mtx_unlock_spin(&tlbivax_mutex);
3212 if (++count >= PMAP_TS_REFERENCED_MAX) {
3213 PMAP_UNLOCK(pv->pv_pmap);
3218 PMAP_UNLOCK(pv->pv_pmap);
3220 rw_wunlock(&pvh_global_lock);
3225 * Clear the wired attribute from the mappings for the specified range of
3226 * addresses in the given pmap. Every valid mapping within that range must
3227 * have the wired attribute set. In contrast, invalid mappings cannot have
3228 * the wired attribute set, so they are ignored.
3230 * The wired attribute of the page table entry is not a hardware feature, so
3231 * there is no need to invalidate any TLB entries.
3234 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3240 for (va = sva; va < eva; va += PAGE_SIZE) {
3241 if ((pte = pte_find(mmu, pmap, va)) != NULL &&
3243 if (!PTE_ISWIRED(pte))
3244 panic("mmu_booke_unwire: pte %p isn't wired",
3247 pmap->pm_stats.wired_count--;
3255 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
3256 * page. This count may be changed upwards or downwards in the future; it is
3257 * only necessary that true be returned for a small subset of pmaps for proper
3261 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
3267 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3268 ("mmu_booke_page_exists_quick: page %p is not managed", m));
3271 rw_wlock(&pvh_global_lock);
3272 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3273 if (pv->pv_pmap == pmap) {
3280 rw_wunlock(&pvh_global_lock);
3285 * Return the number of managed mappings to the given physical page that are
3289 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
3295 if ((m->oflags & VPO_UNMANAGED) != 0)
3297 rw_wlock(&pvh_global_lock);
3298 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3299 PMAP_LOCK(pv->pv_pmap);
3300 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
3301 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
3303 PMAP_UNLOCK(pv->pv_pmap);
3305 rw_wunlock(&pvh_global_lock);
3310 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3316 * This currently does not work for entries that
3317 * overlap TLB1 entries.
3319 for (i = 0; i < TLB1_ENTRIES; i ++) {
3320 if (tlb1_iomapped(i, pa, size, &va) == 0)
3328 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
3334 /* Minidumps are based on virtual memory addresses. */
3336 *va = (void *)(vm_offset_t)pa;
3340 /* Raw physical memory dumps don't have a virtual address. */
3341 /* We always map a 256MB page at 256M. */
3342 gran = 256 * 1024 * 1024;
3343 ppa = rounddown2(pa, gran);
3346 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
3348 if (sz > (gran - ofs))
3349 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
3354 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
3362 /* Minidumps are based on virtual memory addresses. */
3363 /* Nothing to do... */
3367 for (i = 0; i < TLB1_ENTRIES; i++) {
3368 tlb1_read_entry(&e, i);
3369 if (!(e.mas1 & MAS1_VALID))
3373 /* Raw physical memory dumps don't have a virtual address. */
3378 tlb1_write_entry(&e, i);
3380 gran = 256 * 1024 * 1024;
3381 ppa = rounddown2(pa, gran);
3383 if (sz > (gran - ofs)) {
3388 tlb1_write_entry(&e, i);
3392 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
3395 mmu_booke_scan_init(mmu_t mmu)
3402 /* Initialize phys. segments for dumpsys(). */
3403 memset(&dump_map, 0, sizeof(dump_map));
3404 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
3405 &availmem_regions_sz);
3406 for (i = 0; i < physmem_regions_sz; i++) {
3407 dump_map[i].pa_start = physmem_regions[i].mr_start;
3408 dump_map[i].pa_size = physmem_regions[i].mr_size;
3413 /* Virtual segments for minidumps: */
3414 memset(&dump_map, 0, sizeof(dump_map));
3416 /* 1st: kernel .data and .bss. */
3417 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
3418 dump_map[0].pa_size =
3419 round_page((uintptr_t)_end) - dump_map[0].pa_start;
3421 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
3422 dump_map[1].pa_start = data_start;
3423 dump_map[1].pa_size = data_end - data_start;
3425 /* 3rd: kernel VM. */
3426 va = dump_map[1].pa_start + dump_map[1].pa_size;
3427 /* Find start of next chunk (from va). */
3428 while (va < virtual_end) {
3429 /* Don't dump the buffer cache. */
3430 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
3431 va = kmi.buffer_eva;
3434 pte = pte_find(mmu, kernel_pmap, va);
3435 if (pte != NULL && PTE_ISVALID(pte))
3439 if (va < virtual_end) {
3440 dump_map[2].pa_start = va;
3442 /* Find last page in chunk. */
3443 while (va < virtual_end) {
3444 /* Don't run into the buffer cache. */
3445 if (va == kmi.buffer_sva)
3447 pte = pte_find(mmu, kernel_pmap, va);
3448 if (pte == NULL || !PTE_ISVALID(pte))
3452 dump_map[2].pa_size = va - dump_map[2].pa_start;
3457 * Map a set of physical memory pages into the kernel virtual address space.
3458 * Return a pointer to where it is mapped. This routine is intended to be used
3459 * for mapping device memory, NOT real memory.
3462 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3465 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
3469 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3473 uintptr_t va, tmpva;
3478 * Check if this is premapped in TLB1. Note: this should probably also
3479 * check whether a sequence of TLB1 entries exist that match the
3480 * requirement, but now only checks the easy case.
3482 for (i = 0; i < TLB1_ENTRIES; i++) {
3483 tlb1_read_entry(&e, i);
3484 if (!(e.mas1 & MAS1_VALID))
3487 (pa + size) <= (e.phys + e.size) &&
3488 (ma == VM_MEMATTR_DEFAULT ||
3489 tlb_calc_wimg(pa, ma) ==
3490 (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))))
3491 return (void *)(e.virt +
3492 (vm_offset_t)(pa - e.phys));
3495 size = roundup(size, PAGE_SIZE);
3498 * The device mapping area is between VM_MAXUSER_ADDRESS and
3499 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing.
3501 #ifdef SPARSE_MAPDEV
3503 * With a sparse mapdev, align to the largest starting region. This
3504 * could feasibly be optimized for a 'best-fit' alignment, but that
3505 * calculation could be very costly.
3506 * Align to the smaller of:
3507 * - first set bit in overlap of (pa & size mask)
3508 * - largest size envelope
3510 * It's possible the device mapping may start at a PA that's not larger
3511 * than the size mask, so we need to offset in to maximize the TLB entry
3512 * range and minimize the number of used TLB entries.
3515 tmpva = tlb1_map_base;
3516 sz = ffsl(((1 << flsl(size-1)) - 1) & pa);
3517 sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
3518 va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
3519 #ifdef __powerpc64__
3520 } while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
3522 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
3525 #ifdef __powerpc64__
3526 va = atomic_fetchadd_long(&tlb1_map_base, size);
3528 va = atomic_fetchadd_int(&tlb1_map_base, size);
3534 sz = 1 << (ilog2(size) & ~1);
3535 /* Align size to PA */
3539 } while (pa % sz != 0);
3541 /* Now align from there to VA */
3545 } while (va % sz != 0);
3548 printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
3549 va, (uintmax_t)pa, sz);
3550 if (tlb1_set_entry(va, pa, sz,
3551 _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0)
3562 * 'Unmap' a range mapped by mmu_booke_mapdev().
3565 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
3567 #ifdef SUPPORTS_SHRINKING_TLB1
3568 vm_offset_t base, offset;
3571 * Unmap only if this is inside kernel virtual space.
3573 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
3574 base = trunc_page(va);
3575 offset = va & PAGE_MASK;
3576 size = roundup(offset + size, PAGE_SIZE);
3577 kva_free(base, size);
3583 * mmu_booke_object_init_pt preloads the ptes for a given object into the
3584 * specified pmap. This eliminates the blast of soft faults on process startup
3585 * and immediately after an mmap.
3588 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3589 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3592 VM_OBJECT_ASSERT_WLOCKED(object);
3593 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3594 ("mmu_booke_object_init_pt: non-device object"));
3598 * Perform the pmap work for mincore.
3601 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3602 vm_paddr_t *locked_pa)
3605 /* XXX: this should be implemented at some point */
3610 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
3618 /* Check TLB1 mappings */
3619 for (i = 0; i < TLB1_ENTRIES; i++) {
3620 tlb1_read_entry(&e, i);
3621 if (!(e.mas1 & MAS1_VALID))
3623 if (addr >= e.virt && addr < e.virt + e.size)
3626 if (i < TLB1_ENTRIES) {
3627 /* Only allow full mappings to be modified for now. */
3628 /* Validate the range. */
3629 for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
3630 tlb1_read_entry(&e, j);
3631 if (va != e.virt || (sz - (va - addr) < e.size))
3634 for (va = addr; va < addr + sz; va += e.size, i++) {
3635 tlb1_read_entry(&e, i);
3636 e.mas2 &= ~MAS2_WIMGE_MASK;
3637 e.mas2 |= tlb_calc_wimg(e.phys, mode);
3640 * Write it out to the TLB. Should really re-sync with other
3643 tlb1_write_entry(&e, i);
3648 /* Not in TLB1, try through pmap */
3649 /* First validate the range. */
3650 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3651 pte = pte_find(mmu, kernel_pmap, va);
3652 if (pte == NULL || !PTE_ISVALID(pte))
3656 mtx_lock_spin(&tlbivax_mutex);
3658 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3659 pte = pte_find(mmu, kernel_pmap, va);
3660 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
3661 *pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
3662 tlb0_flush_entry(va);
3665 mtx_unlock_spin(&tlbivax_mutex);
3670 /**************************************************************************/
3672 /**************************************************************************/
3675 * Allocate a TID. If necessary, steal one from someone else.
3676 * The new TID is flushed from the TLB before returning.
3679 tid_alloc(pmap_t pmap)
3684 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
3686 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
3688 thiscpu = PCPU_GET(cpuid);
3690 tid = PCPU_GET(tid_next);
3693 PCPU_SET(tid_next, tid + 1);
3695 /* If we are stealing TID then clear the relevant pmap's field */
3696 if (tidbusy[thiscpu][tid] != NULL) {
3698 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
3700 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
3702 /* Flush all entries from TLB0 matching this TID. */
3706 tidbusy[thiscpu][tid] = pmap;
3707 pmap->pm_tid[thiscpu] = tid;
3708 __asm __volatile("msync; isync");
3710 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
3711 PCPU_GET(tid_next));
3716 /**************************************************************************/
3718 /**************************************************************************/
3721 #ifdef __powerpc64__
3722 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
3724 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
3735 if (mas1 & MAS1_VALID)
3740 if (mas1 & MAS1_IPROT)
3745 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
3746 tid = MAS1_GETTID(mas1);
3748 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3751 size = tsize2size(tsize);
3753 debugf("%3d: (%s) [AS=%d] "
3754 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
3755 "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
3756 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
3759 /* Convert TLB0 va and way number to tlb0[] table index. */
3760 static inline unsigned int
3761 tlb0_tableidx(vm_offset_t va, unsigned int way)
3765 idx = (way * TLB0_ENTRIES_PER_WAY);
3766 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
3771 * Invalidate TLB0 entry.
3774 tlb0_flush_entry(vm_offset_t va)
3777 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
3779 mtx_assert(&tlbivax_mutex, MA_OWNED);
3781 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
3782 __asm __volatile("isync; msync");
3783 __asm __volatile("tlbsync; msync");
3785 CTR1(KTR_PMAP, "%s: e", __func__);
3788 /* Print out contents of the MAS registers for each TLB0 entry */
3790 tlb0_print_tlbentries(void)
3792 uint32_t mas0, mas1, mas3, mas7;
3793 #ifdef __powerpc64__
3798 int entryidx, way, idx;
3800 debugf("TLB0 entries:\n");
3801 for (way = 0; way < TLB0_WAYS; way ++)
3802 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
3804 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
3805 mtspr(SPR_MAS0, mas0);
3806 __asm __volatile("isync");
3808 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
3809 mtspr(SPR_MAS2, mas2);
3811 __asm __volatile("isync; tlbre");
3813 mas1 = mfspr(SPR_MAS1);
3814 mas2 = mfspr(SPR_MAS2);
3815 mas3 = mfspr(SPR_MAS3);
3816 mas7 = mfspr(SPR_MAS7);
3818 idx = tlb0_tableidx(mas2, way);
3819 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3823 /**************************************************************************/
3825 /**************************************************************************/
3828 * TLB1 mapping notes:
3830 * TLB1[0] Kernel text and data.
3831 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
3832 * windows, other devices mappings.
3836 * Read an entry from given TLB1 slot.
3839 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
3844 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
3847 __asm __volatile("wrteei 0");
3849 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
3850 mtspr(SPR_MAS0, mas0);
3851 __asm __volatile("isync; tlbre");
3853 entry->mas1 = mfspr(SPR_MAS1);
3854 entry->mas2 = mfspr(SPR_MAS2);
3855 entry->mas3 = mfspr(SPR_MAS3);
3857 switch ((mfpvr() >> 16) & 0xFFFF) {
3862 entry->mas7 = mfspr(SPR_MAS7);
3870 entry->virt = entry->mas2 & MAS2_EPN_MASK;
3871 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
3872 (entry->mas3 & MAS3_RPN);
3874 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
3877 struct tlbwrite_args {
3883 tlb1_write_entry_int(void *arg)
3885 struct tlbwrite_args *args = arg;
3889 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(args->idx);
3891 mtspr(SPR_MAS0, mas0);
3892 __asm __volatile("isync");
3893 mtspr(SPR_MAS1, args->e->mas1);
3894 __asm __volatile("isync");
3895 mtspr(SPR_MAS2, args->e->mas2);
3896 __asm __volatile("isync");
3897 mtspr(SPR_MAS3, args->e->mas3);
3898 __asm __volatile("isync");
3899 switch ((mfpvr() >> 16) & 0xFFFF) {
3904 __asm __volatile("isync");
3907 mtspr(SPR_MAS7, args->e->mas7);
3908 __asm __volatile("isync");
3914 __asm __volatile("tlbwe; isync; msync");
3919 tlb1_write_entry_sync(void *arg)
3921 /* Empty synchronization point for smp_rendezvous(). */
3925 * Write given entry to TLB1 hardware.
3928 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
3930 struct tlbwrite_args args;
3936 if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) {
3938 smp_rendezvous(tlb1_write_entry_sync,
3939 tlb1_write_entry_int,
3940 tlb1_write_entry_sync, &args);
3947 __asm __volatile("wrteei 0");
3948 tlb1_write_entry_int(&args);
3954 * Return the largest uint value log such that 2^log <= num.
3957 ilog2(unsigned int num)
3961 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
3966 * Convert TLB TSIZE value to mapped region size.
3969 tsize2size(unsigned int tsize)
3974 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
3977 return ((1 << (2 * tsize)) * 1024);
3981 * Convert region size (must be power of 4) to TLB TSIZE value.
3984 size2tsize(vm_size_t size)
3987 return (ilog2(size) / 2 - 5);
3991 * Register permanent kernel mapping in TLB1.
3993 * Entries are created starting from index 0 (current free entry is
3994 * kept in tlb1_idx) and are not supposed to be invalidated.
3997 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
4004 for (index = 0; index < TLB1_ENTRIES; index++) {
4005 tlb1_read_entry(&e, index);
4006 if ((e.mas1 & MAS1_VALID) == 0)
4008 /* Check if we're just updating the flags, and update them. */
4009 if (e.phys == pa && e.virt == va && e.size == size) {
4010 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4011 tlb1_write_entry(&e, index);
4015 if (index >= TLB1_ENTRIES) {
4016 printf("tlb1_set_entry: TLB1 full!\n");
4020 /* Convert size to TSIZE */
4021 tsize = size2tsize(size);
4023 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
4024 /* XXX TS is hard coded to 0 for now as we only use single address space */
4025 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
4030 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
4031 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
4032 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4034 /* Set supervisor RWX permission bits */
4035 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
4036 e.mas7 = (pa >> 32) & MAS7_RPN;
4038 tlb1_write_entry(&e, index);
4041 * XXX in general TLB1 updates should be propagated between CPUs,
4042 * since current design assumes to have the same TLB1 set-up on all
4049 * Map in contiguous RAM region into the TLB1 using maximum of
4050 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
4052 * If necessary round up last entry size and return total size
4053 * used by all allocated entries.
4056 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
4058 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
4059 vm_size_t mapped, pgsz, base, mask;
4062 /* Round up to the next 1M */
4063 size = roundup2(size, 1 << 20);
4068 pgsz = 64*1024*1024;
4069 while (mapped < size) {
4070 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
4071 while (pgsz > (size - mapped))
4077 /* We under-map. Correct for this. */
4078 if (mapped < size) {
4079 while (pgs[idx - 1] == pgsz) {
4083 /* XXX We may increase beyond out starting point. */
4092 /* Align address to the boundary */
4094 va = (va + mask) & ~mask;
4095 pa = (pa + mask) & ~mask;
4098 for (idx = 0; idx < nents; idx++) {
4100 debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz);
4101 tlb1_set_entry(va, pa, pgsz,
4102 _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM);
4107 mapped = (va - base);
4108 printf("mapped size 0x%"PRI0ptrX" (wasted space 0x%"PRIxPTR")\n",
4109 mapped, mapped - size);
4114 * TLB1 initialization routine, to be called after the very first
4115 * assembler level setup done in locore.S.
4120 uint32_t mas0, mas1, mas2, mas3, mas7;
4125 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
4126 mtspr(SPR_MAS0, mas0);
4127 __asm __volatile("isync; tlbre");
4129 mas1 = mfspr(SPR_MAS1);
4130 mas2 = mfspr(SPR_MAS2);
4131 mas3 = mfspr(SPR_MAS3);
4132 mas7 = mfspr(SPR_MAS7);
4134 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
4137 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4138 kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
4140 /* Setup TLB miss defaults */
4141 set_mas4_defaults();
4145 * pmap_early_io_unmap() should be used in short conjunction with
4146 * pmap_early_io_map(), as in the following snippet:
4148 * x = pmap_early_io_map(...);
4149 * <do something with x>
4150 * pmap_early_io_unmap(x, size);
4152 * And avoiding more allocations between.
4155 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
4161 size = roundup(size, PAGE_SIZE);
4163 for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
4164 tlb1_read_entry(&e, i);
4165 if (!(e.mas1 & MAS1_VALID))
4167 if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
4169 e.mas1 &= ~MAS1_VALID;
4170 tlb1_write_entry(&e, i);
4173 if (tlb1_map_base == va + isize)
4174 tlb1_map_base -= isize;
4178 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
4185 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
4187 for (i = 0; i < TLB1_ENTRIES; i++) {
4188 tlb1_read_entry(&e, i);
4189 if (!(e.mas1 & MAS1_VALID))
4191 if (pa >= e.phys && (pa + size) <=
4193 return (e.virt + (pa - e.phys));
4196 pa_base = rounddown(pa, PAGE_SIZE);
4197 size = roundup(size + (pa - pa_base), PAGE_SIZE);
4198 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
4199 va = tlb1_map_base + (pa - pa_base);
4202 sz = 1 << (ilog2(size) & ~1);
4203 tlb1_set_entry(tlb1_map_base, pa_base, sz,
4204 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
4207 tlb1_map_base += sz;
4214 pmap_track_page(pmap_t pmap, vm_offset_t va)
4218 struct pv_entry *pve;
4220 va = trunc_page(va);
4221 pa = pmap_kextract(va);
4223 rw_wlock(&pvh_global_lock);
4225 page = PHYS_TO_VM_PAGE(pa);
4227 TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
4228 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
4232 page->md.pv_tracked = true;
4233 pv_insert(pmap, va, page);
4236 rw_wunlock(&pvh_global_lock);
4241 * Setup MAS4 defaults.
4242 * These values are loaded to MAS0-2 on a TLB miss.
4245 set_mas4_defaults(void)
4249 /* Defaults: TLB0, PID0, TSIZED=4K */
4250 mas4 = MAS4_TLBSELD0;
4251 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
4255 mtspr(SPR_MAS4, mas4);
4256 __asm __volatile("isync");
4260 * Print out contents of the MAS registers for each TLB1 entry
4263 tlb1_print_tlbentries(void)
4265 uint32_t mas0, mas1, mas3, mas7;
4266 #ifdef __powerpc64__
4273 debugf("TLB1 entries:\n");
4274 for (i = 0; i < TLB1_ENTRIES; i++) {
4276 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
4277 mtspr(SPR_MAS0, mas0);
4279 __asm __volatile("isync; tlbre");
4281 mas1 = mfspr(SPR_MAS1);
4282 mas2 = mfspr(SPR_MAS2);
4283 mas3 = mfspr(SPR_MAS3);
4284 mas7 = mfspr(SPR_MAS7);
4286 tlb_print_entry(i, mas1, mas2, mas3, mas7);
4291 * Return 0 if the physical IO range is encompassed by one of the
4292 * the TLB1 entries, otherwise return related error code.
4295 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
4298 vm_paddr_t pa_start;
4300 unsigned int entry_tsize;
4301 vm_size_t entry_size;
4304 *va = (vm_offset_t)NULL;
4306 tlb1_read_entry(&e, i);
4307 /* Skip invalid entries */
4308 if (!(e.mas1 & MAS1_VALID))
4312 * The entry must be cache-inhibited, guarded, and r/w
4313 * so it can function as an i/o page
4315 prot = e.mas2 & (MAS2_I | MAS2_G);
4316 if (prot != (MAS2_I | MAS2_G))
4319 prot = e.mas3 & (MAS3_SR | MAS3_SW);
4320 if (prot != (MAS3_SR | MAS3_SW))
4323 /* The address should be within the entry range. */
4324 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4325 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
4327 entry_size = tsize2size(entry_tsize);
4328 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) |
4329 (e.mas3 & MAS3_RPN);
4330 pa_end = pa_start + entry_size;
4332 if ((pa < pa_start) || ((pa + size) > pa_end))
4335 /* Return virtual address of this mapping. */
4336 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
4341 * Invalidate all TLB0 entries which match the given TID. Note this is
4342 * dedicated for cases when invalidations should NOT be propagated to other
4346 tid_flush(tlbtid_t tid)
4349 uint32_t mas0, mas1, mas2;
4353 /* Don't evict kernel translations */
4354 if (tid == TID_KERNEL)
4358 __asm __volatile("wrteei 0");
4360 for (way = 0; way < TLB0_WAYS; way++)
4361 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
4363 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
4364 mtspr(SPR_MAS0, mas0);
4365 __asm __volatile("isync");
4367 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
4368 mtspr(SPR_MAS2, mas2);
4370 __asm __volatile("isync; tlbre");
4372 mas1 = mfspr(SPR_MAS1);
4374 if (!(mas1 & MAS1_VALID))
4376 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
4378 mas1 &= ~MAS1_VALID;
4379 mtspr(SPR_MAS1, mas1);
4380 __asm __volatile("isync; tlbwe; isync; msync");