2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Some hw specific parts of this pmap were derived or influenced
29 * by NetBSD's ibm4xx pmap module. More generic code is shared with
30 * a few other pmap modules from the FreeBSD tree.
36 * Kernel and user threads run within one common virtual address space
40 * Virtual address space layout:
41 * -----------------------------
42 * 0x0000_0000 - 0x7fff_ffff : user process
43 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
44 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
45 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
46 * 0xc100_0000 - 0xffff_ffff : KVA
47 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
48 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
49 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
50 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
53 * Virtual address space layout:
54 * -----------------------------
55 * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process
56 * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries
57 * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region
58 * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack
59 * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved
60 * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data
61 * endkernel - msgbufp-1 : flat device tree
62 * msgbufp - ptbl_bufs-1 : message buffer
63 * ptbl_bufs - kernel_pdir-1 : kernel page tables
64 * kernel_pdir - kernel_pp2d-1 : kernel page directory
65 * kernel_pp2d - . : kernel pointers to page directory
66 * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy
67 * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs
68 * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables
69 * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space
70 * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region
71 * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region
72 * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map
73 * 0xf000_0000_0000_0000 - +Maxmem : physmem map
74 * - 0xffff_ffff_ffff_ffff : device direct map
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
80 #include "opt_kstack_pages.h"
82 #include <sys/param.h>
84 #include <sys/malloc.h>
88 #include <sys/queue.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/kerneldump.h>
92 #include <sys/linker.h>
93 #include <sys/msgbuf.h>
95 #include <sys/mutex.h>
96 #include <sys/rwlock.h>
97 #include <sys/sched.h>
99 #include <sys/vmmeter.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_extern.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_pager.h>
112 #include <machine/_inttypes.h>
113 #include <machine/cpu.h>
114 #include <machine/pcb.h>
115 #include <machine/platform.h>
117 #include <machine/tlb.h>
118 #include <machine/spr.h>
119 #include <machine/md_var.h>
120 #include <machine/mmuvar.h>
121 #include <machine/pmap.h>
122 #include <machine/pte.h>
126 #define SPARSE_MAPDEV
128 #define debugf(fmt, args...) printf(fmt, ##args)
130 #define debugf(fmt, args...)
134 #define PRI0ptrX "016lx"
136 #define PRI0ptrX "08x"
139 #define TODO panic("%s: not implemented", __func__);
141 extern unsigned char _etext[];
142 extern unsigned char _end[];
144 extern uint32_t *bootinfo;
147 vm_offset_t kernstart;
150 /* Message buffer and tables. */
151 static vm_offset_t data_start;
152 static vm_size_t data_end;
154 /* Phys/avail memory regions. */
155 static struct mem_region *availmem_regions;
156 static int availmem_regions_sz;
157 static struct mem_region *physmem_regions;
158 static int physmem_regions_sz;
160 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
161 static vm_offset_t zero_page_va;
162 static struct mtx zero_page_mutex;
164 static struct mtx tlbivax_mutex;
166 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
167 static vm_offset_t copy_page_src_va;
168 static vm_offset_t copy_page_dst_va;
169 static struct mtx copy_page_mutex;
171 /**************************************************************************/
173 /**************************************************************************/
175 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
176 vm_prot_t, u_int flags, int8_t psind);
178 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
179 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
181 unsigned int kernel_pdirs;
185 * If user pmap is processed with mmu_booke_remove and the resident count
186 * drops to 0, there are no more pages to remove, so we need not continue.
188 #define PMAP_REMOVE_DONE(pmap) \
189 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
191 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
192 extern int elf32_nxstack;
195 /**************************************************************************/
196 /* TLB and TID handling */
197 /**************************************************************************/
199 /* Translation ID busy table */
200 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
203 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
204 * core revisions and should be read from h/w registers during early config.
206 uint32_t tlb0_entries;
208 uint32_t tlb0_entries_per_way;
209 uint32_t tlb1_entries;
211 #define TLB0_ENTRIES (tlb0_entries)
212 #define TLB0_WAYS (tlb0_ways)
213 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
215 #define TLB1_ENTRIES (tlb1_entries)
217 static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
219 static tlbtid_t tid_alloc(struct pmap *);
220 static void tid_flush(tlbtid_t tid);
223 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
225 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
228 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
229 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
230 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
231 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
233 static vm_size_t tsize2size(unsigned int);
234 static unsigned int size2tsize(vm_size_t);
235 static unsigned int ilog2(unsigned int);
237 static void set_mas4_defaults(void);
239 static inline void tlb0_flush_entry(vm_offset_t);
240 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
242 /**************************************************************************/
243 /* Page table management */
244 /**************************************************************************/
246 static struct rwlock_padalign pvh_global_lock;
248 /* Data for the pv entry allocation mechanism */
249 static uma_zone_t pvzone;
250 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
252 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
254 #ifndef PMAP_SHPGPERPROC
255 #define PMAP_SHPGPERPROC 200
258 static void ptbl_init(void);
259 static struct ptbl_buf *ptbl_buf_alloc(void);
260 static void ptbl_buf_free(struct ptbl_buf *);
261 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
264 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
265 unsigned int, boolean_t);
266 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int);
267 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
268 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
270 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
271 static void ptbl_free(mmu_t, pmap_t, unsigned int);
272 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
273 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
276 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
277 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
278 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
279 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
280 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
282 static pv_entry_t pv_alloc(void);
283 static void pv_free(pv_entry_t);
284 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
285 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
287 static void booke_pmap_init_qpages(void);
289 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
291 #define PTBL_BUFS (16UL * 16 * 16)
293 #define PTBL_BUFS (128 * 16)
297 TAILQ_ENTRY(ptbl_buf) link; /* list link */
298 vm_offset_t kva; /* va of mapping */
301 /* ptbl free list and a lock used for access synchronization. */
302 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
303 static struct mtx ptbl_buf_freelist_lock;
305 /* Base address of kva space allocated fot ptbl bufs. */
306 static vm_offset_t ptbl_buf_pool_vabase;
308 /* Pointer to ptbl_buf structures. */
309 static struct ptbl_buf *ptbl_bufs;
312 extern tlb_entry_t __boot_tlb1[];
313 void pmap_bootstrap_ap(volatile uint32_t *);
317 * Kernel MMU interface
319 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
320 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
321 vm_size_t, vm_offset_t);
322 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
323 static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
324 vm_offset_t, vm_page_t *, vm_offset_t, int);
325 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
326 vm_prot_t, u_int flags, int8_t psind);
327 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
328 vm_page_t, vm_prot_t);
329 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
331 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
332 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
334 static void mmu_booke_init(mmu_t);
335 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
336 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
337 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
338 static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
339 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
341 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
343 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
344 vm_object_t, vm_pindex_t, vm_size_t);
345 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
346 static void mmu_booke_page_init(mmu_t, vm_page_t);
347 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
348 static void mmu_booke_pinit(mmu_t, pmap_t);
349 static void mmu_booke_pinit0(mmu_t, pmap_t);
350 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
352 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
353 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
354 static void mmu_booke_release(mmu_t, pmap_t);
355 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
356 static void mmu_booke_remove_all(mmu_t, vm_page_t);
357 static void mmu_booke_remove_write(mmu_t, vm_page_t);
358 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
359 static void mmu_booke_zero_page(mmu_t, vm_page_t);
360 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
361 static void mmu_booke_activate(mmu_t, struct thread *);
362 static void mmu_booke_deactivate(mmu_t, struct thread *);
363 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
364 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
365 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
366 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
367 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
368 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
369 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
370 static void mmu_booke_kremove(mmu_t, vm_offset_t);
371 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
372 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
374 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
376 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
378 static void mmu_booke_scan_init(mmu_t);
379 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
380 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
381 static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
382 vm_size_t sz, vm_memattr_t mode);
383 static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
384 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
385 static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
386 int *is_user, vm_offset_t *decoded_addr);
389 static mmu_method_t mmu_booke_methods[] = {
390 /* pmap dispatcher interface */
391 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
392 MMUMETHOD(mmu_copy, mmu_booke_copy),
393 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
394 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
395 MMUMETHOD(mmu_enter, mmu_booke_enter),
396 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
397 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
398 MMUMETHOD(mmu_extract, mmu_booke_extract),
399 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
400 MMUMETHOD(mmu_init, mmu_booke_init),
401 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
402 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
403 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
404 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
405 MMUMETHOD(mmu_map, mmu_booke_map),
406 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
407 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
408 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
409 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
410 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
411 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
412 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
413 MMUMETHOD(mmu_protect, mmu_booke_protect),
414 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
415 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
416 MMUMETHOD(mmu_release, mmu_booke_release),
417 MMUMETHOD(mmu_remove, mmu_booke_remove),
418 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
419 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
420 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
421 MMUMETHOD(mmu_unwire, mmu_booke_unwire),
422 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
423 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
424 MMUMETHOD(mmu_activate, mmu_booke_activate),
425 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
426 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
427 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
429 /* Internal interfaces */
430 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
431 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
432 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
433 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
434 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
435 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
436 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
437 MMUMETHOD(mmu_kremove, mmu_booke_kremove),
438 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
439 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
440 MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr),
441 MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
443 /* dumpsys() support */
444 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
445 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
446 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
451 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
453 static __inline uint32_t
454 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
459 if (ma != VM_MEMATTR_DEFAULT) {
461 case VM_MEMATTR_UNCACHEABLE:
462 return (MAS2_I | MAS2_G);
463 case VM_MEMATTR_WRITE_COMBINING:
464 case VM_MEMATTR_WRITE_BACK:
465 case VM_MEMATTR_PREFETCHABLE:
467 case VM_MEMATTR_WRITE_THROUGH:
468 return (MAS2_W | MAS2_M);
469 case VM_MEMATTR_CACHEABLE:
475 * Assume the page is cache inhibited and access is guarded unless
476 * it's in our available memory array.
478 attrib = _TLB_ENTRY_IO;
479 for (i = 0; i < physmem_regions_sz; i++) {
480 if ((pa >= physmem_regions[i].mr_start) &&
481 (pa < (physmem_regions[i].mr_start +
482 physmem_regions[i].mr_size))) {
483 attrib = _TLB_ENTRY_MEM;
500 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
503 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
504 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke.tlb_lock);
506 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
507 ("tlb_miss_lock: tried to lock self"));
509 tlb_lock(pc->pc_booke.tlb_lock);
511 CTR1(KTR_PMAP, "%s: locked", __func__);
518 tlb_miss_unlock(void)
526 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
528 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
529 __func__, pc->pc_cpuid);
531 tlb_unlock(pc->pc_booke.tlb_lock);
533 CTR1(KTR_PMAP, "%s: unlocked", __func__);
539 /* Return number of entries in TLB0. */
541 tlb0_get_tlbconf(void)
545 tlb0_cfg = mfspr(SPR_TLB0CFG);
546 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
547 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
548 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
551 /* Return number of entries in TLB1. */
553 tlb1_get_tlbconf(void)
557 tlb1_cfg = mfspr(SPR_TLB1CFG);
558 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
561 /**************************************************************************/
562 /* Page table related */
563 /**************************************************************************/
566 /* Initialize pool of kva ptbl buffers. */
572 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
573 TAILQ_INIT(&ptbl_buf_freelist);
575 for (i = 0; i < PTBL_BUFS; i++) {
576 ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
577 i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
578 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
582 /* Get an sf_buf from the freelist. */
583 static struct ptbl_buf *
586 struct ptbl_buf *buf;
588 mtx_lock(&ptbl_buf_freelist_lock);
589 buf = TAILQ_FIRST(&ptbl_buf_freelist);
591 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
592 mtx_unlock(&ptbl_buf_freelist_lock);
597 /* Return ptbl buff to free pool. */
599 ptbl_buf_free(struct ptbl_buf *buf)
601 mtx_lock(&ptbl_buf_freelist_lock);
602 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
603 mtx_unlock(&ptbl_buf_freelist_lock);
607 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
610 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
612 struct ptbl_buf *pbuf;
614 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
615 if (pbuf->kva == (vm_offset_t) ptbl) {
616 /* Remove from pmap ptbl buf list. */
617 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
619 /* Free corresponding ptbl buf. */
627 /* Get a pointer to a PTE in a page table. */
628 static __inline pte_t *
629 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
634 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
636 pdir = pmap->pm_pp2d[PP2D_IDX(va)];
639 ptbl = pdir[PDIR_IDX(va)];
640 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
644 * Search the list of allocated pdir bufs and find on list of allocated pdirs
647 ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
649 struct ptbl_buf *pbuf;
651 TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
652 if (pbuf->kva == (vm_offset_t) pdir) {
653 /* Remove from pmap ptbl buf list. */
654 TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
656 /* Free corresponding pdir buf. */
663 /* Free pdir pages and invalidate pdir entry. */
665 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
673 pdir = pmap->pm_pp2d[pp2d_idx];
675 KASSERT((pdir != NULL), ("pdir_free: null pdir"));
677 pmap->pm_pp2d[pp2d_idx] = NULL;
679 for (i = 0; i < PDIR_PAGES; i++) {
680 va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
681 pa = pte_vatopa(mmu, kernel_pmap, va);
682 m = PHYS_TO_VM_PAGE(pa);
683 vm_page_free_zero(m);
688 ptbl_free_pmap_pdir(mmu, pmap, pdir);
692 * Decrement pdir pages hold count and attempt to free pdir pages. Called
693 * when removing directory entry from pdir.
695 * Return 1 if pdir pages were freed.
698 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
705 KASSERT((pmap != kernel_pmap),
706 ("pdir_unhold: unholding kernel pdir!"));
708 pdir = pmap->pm_pp2d[pp2d_idx];
710 KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS),
711 ("pdir_unhold: non kva pdir"));
713 /* decrement hold count */
714 for (i = 0; i < PDIR_PAGES; i++) {
715 pa = pte_vatopa(mmu, kernel_pmap,
716 (vm_offset_t) pdir + (i * PAGE_SIZE));
717 m = PHYS_TO_VM_PAGE(pa);
722 * Free pdir pages if there are no dir entries in this pdir.
723 * wire_count has the same value for all ptbl pages, so check the
726 if (m->wire_count == 0) {
727 pdir_free(mmu, pmap, pp2d_idx);
734 * Increment hold count for pdir pages. This routine is used when new ptlb
735 * entry is being inserted into pdir.
738 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
744 KASSERT((pmap != kernel_pmap),
745 ("pdir_hold: holding kernel pdir!"));
747 KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
749 for (i = 0; i < PDIR_PAGES; i++) {
750 pa = pte_vatopa(mmu, kernel_pmap,
751 (vm_offset_t) pdir + (i * PAGE_SIZE));
752 m = PHYS_TO_VM_PAGE(pa);
757 /* Allocate page table. */
759 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
762 vm_page_t mtbl [PTBL_PAGES];
764 struct ptbl_buf *pbuf;
770 KASSERT((pdir[pdir_idx] == NULL),
771 ("%s: valid ptbl entry exists!", __func__));
773 pbuf = ptbl_buf_alloc();
775 panic("%s: couldn't alloc kernel virtual memory", __func__);
777 ptbl = (pte_t *) pbuf->kva;
779 for (i = 0; i < PTBL_PAGES; i++) {
780 pidx = (PTBL_PAGES * pdir_idx) + i;
781 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
782 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
784 rw_wunlock(&pvh_global_lock);
786 ptbl_free_pmap_ptbl(pmap, ptbl);
787 for (j = 0; j < i; j++)
788 vm_page_free(mtbl[j]);
793 rw_wlock(&pvh_global_lock);
799 /* Mapin allocated pages into kernel_pmap. */
800 mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
801 /* Zero whole ptbl. */
802 bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
804 /* Add pbuf to the pmap ptbl bufs list. */
805 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
810 /* Free ptbl pages and invalidate pdir entry. */
812 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
820 ptbl = pdir[pdir_idx];
822 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
824 pdir[pdir_idx] = NULL;
826 for (i = 0; i < PTBL_PAGES; i++) {
827 va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
828 pa = pte_vatopa(mmu, kernel_pmap, va);
829 m = PHYS_TO_VM_PAGE(pa);
830 vm_page_free_zero(m);
835 ptbl_free_pmap_ptbl(pmap, ptbl);
839 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
840 * when removing pte entry from ptbl.
842 * Return 1 if ptbl pages were freed.
845 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
855 pp2d_idx = PP2D_IDX(va);
856 pdir_idx = PDIR_IDX(va);
858 KASSERT((pmap != kernel_pmap),
859 ("ptbl_unhold: unholding kernel ptbl!"));
861 pdir = pmap->pm_pp2d[pp2d_idx];
862 ptbl = pdir[pdir_idx];
864 KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS),
865 ("ptbl_unhold: non kva ptbl"));
867 /* decrement hold count */
868 for (i = 0; i < PTBL_PAGES; i++) {
869 pa = pte_vatopa(mmu, kernel_pmap,
870 (vm_offset_t) ptbl + (i * PAGE_SIZE));
871 m = PHYS_TO_VM_PAGE(pa);
876 * Free ptbl pages if there are no pte entries in this ptbl.
877 * wire_count has the same value for all ptbl pages, so check the
880 if (m->wire_count == 0) {
881 /* A pair of indirect entries might point to this ptbl page */
883 tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
884 TLB_SIZE_1M, MAS6_SIND);
885 tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
886 TLB_SIZE_1M, MAS6_SIND);
888 ptbl_free(mmu, pmap, pdir, pdir_idx);
889 pdir_unhold(mmu, pmap, pp2d_idx);
896 * Increment hold count for ptbl pages. This routine is used when new pte
897 * entry is being inserted into ptbl.
900 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
907 KASSERT((pmap != kernel_pmap),
908 ("ptbl_hold: holding kernel ptbl!"));
910 ptbl = pdir[pdir_idx];
912 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
914 for (i = 0; i < PTBL_PAGES; i++) {
915 pa = pte_vatopa(mmu, kernel_pmap,
916 (vm_offset_t) ptbl + (i * PAGE_SIZE));
917 m = PHYS_TO_VM_PAGE(pa);
923 /* Initialize pool of kva ptbl buffers. */
929 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
930 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
931 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
932 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
934 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
935 TAILQ_INIT(&ptbl_buf_freelist);
937 for (i = 0; i < PTBL_BUFS; i++) {
939 ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
940 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
944 /* Get a ptbl_buf from the freelist. */
945 static struct ptbl_buf *
948 struct ptbl_buf *buf;
950 mtx_lock(&ptbl_buf_freelist_lock);
951 buf = TAILQ_FIRST(&ptbl_buf_freelist);
953 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
954 mtx_unlock(&ptbl_buf_freelist_lock);
956 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
961 /* Return ptbl buff to free pool. */
963 ptbl_buf_free(struct ptbl_buf *buf)
966 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
968 mtx_lock(&ptbl_buf_freelist_lock);
969 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
970 mtx_unlock(&ptbl_buf_freelist_lock);
974 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
977 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
979 struct ptbl_buf *pbuf;
981 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
983 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
985 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
986 if (pbuf->kva == (vm_offset_t)ptbl) {
987 /* Remove from pmap ptbl buf list. */
988 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
990 /* Free corresponding ptbl buf. */
996 /* Allocate page table. */
998 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
1000 vm_page_t mtbl[PTBL_PAGES];
1002 struct ptbl_buf *pbuf;
1007 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1008 (pmap == kernel_pmap), pdir_idx);
1010 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1011 ("ptbl_alloc: invalid pdir_idx"));
1012 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
1013 ("pte_alloc: valid ptbl entry exists!"));
1015 pbuf = ptbl_buf_alloc();
1017 panic("pte_alloc: couldn't alloc kernel virtual memory");
1019 ptbl = (pte_t *)pbuf->kva;
1021 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
1023 for (i = 0; i < PTBL_PAGES; i++) {
1024 pidx = (PTBL_PAGES * pdir_idx) + i;
1025 while ((m = vm_page_alloc(NULL, pidx,
1026 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
1028 rw_wunlock(&pvh_global_lock);
1030 ptbl_free_pmap_ptbl(pmap, ptbl);
1031 for (j = 0; j < i; j++)
1032 vm_page_free(mtbl[j]);
1037 rw_wlock(&pvh_global_lock);
1043 /* Map allocated pages into kernel_pmap. */
1044 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
1046 /* Zero whole ptbl. */
1047 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
1049 /* Add pbuf to the pmap ptbl bufs list. */
1050 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
1055 /* Free ptbl pages and invalidate pdir entry. */
1057 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1065 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1066 (pmap == kernel_pmap), pdir_idx);
1068 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1069 ("ptbl_free: invalid pdir_idx"));
1071 ptbl = pmap->pm_pdir[pdir_idx];
1073 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
1075 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
1078 * Invalidate the pdir entry as soon as possible, so that other CPUs
1079 * don't attempt to look up the page tables we are releasing.
1081 mtx_lock_spin(&tlbivax_mutex);
1084 pmap->pm_pdir[pdir_idx] = NULL;
1087 mtx_unlock_spin(&tlbivax_mutex);
1089 for (i = 0; i < PTBL_PAGES; i++) {
1090 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
1091 pa = pte_vatopa(mmu, kernel_pmap, va);
1092 m = PHYS_TO_VM_PAGE(pa);
1093 vm_page_free_zero(m);
1095 mmu_booke_kremove(mmu, va);
1098 ptbl_free_pmap_ptbl(pmap, ptbl);
1102 * Decrement ptbl pages hold count and attempt to free ptbl pages.
1103 * Called when removing pte entry from ptbl.
1105 * Return 1 if ptbl pages were freed.
1108 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1115 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1116 (pmap == kernel_pmap), pdir_idx);
1118 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1119 ("ptbl_unhold: invalid pdir_idx"));
1120 KASSERT((pmap != kernel_pmap),
1121 ("ptbl_unhold: unholding kernel ptbl!"));
1123 ptbl = pmap->pm_pdir[pdir_idx];
1125 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
1126 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
1127 ("ptbl_unhold: non kva ptbl"));
1129 /* decrement hold count */
1130 for (i = 0; i < PTBL_PAGES; i++) {
1131 pa = pte_vatopa(mmu, kernel_pmap,
1132 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1133 m = PHYS_TO_VM_PAGE(pa);
1138 * Free ptbl pages if there are no pte etries in this ptbl.
1139 * wire_count has the same value for all ptbl pages, so check the last
1142 if (m->wire_count == 0) {
1143 ptbl_free(mmu, pmap, pdir_idx);
1145 //debugf("ptbl_unhold: e (freed ptbl)\n");
1153 * Increment hold count for ptbl pages. This routine is used when a new pte
1154 * entry is being inserted into the ptbl.
1157 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1164 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
1167 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1168 ("ptbl_hold: invalid pdir_idx"));
1169 KASSERT((pmap != kernel_pmap),
1170 ("ptbl_hold: holding kernel ptbl!"));
1172 ptbl = pmap->pm_pdir[pdir_idx];
1174 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
1176 for (i = 0; i < PTBL_PAGES; i++) {
1177 pa = pte_vatopa(mmu, kernel_pmap,
1178 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1179 m = PHYS_TO_VM_PAGE(pa);
1185 /* Allocate pv_entry structure. */
1192 if (pv_entry_count > pv_entry_high_water)
1193 pagedaemon_wakeup(0); /* XXX powerpc NUMA */
1194 pv = uma_zalloc(pvzone, M_NOWAIT);
1199 /* Free pv_entry structure. */
1200 static __inline void
1201 pv_free(pv_entry_t pve)
1205 uma_zfree(pvzone, pve);
1209 /* Allocate and initialize pv_entry structure. */
1211 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
1215 //int su = (pmap == kernel_pmap);
1216 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
1217 // (u_int32_t)pmap, va, (u_int32_t)m);
1221 panic("pv_insert: no pv entries!");
1223 pve->pv_pmap = pmap;
1226 /* add to pv_list */
1227 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1228 rw_assert(&pvh_global_lock, RA_WLOCKED);
1230 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
1232 //debugf("pv_insert: e\n");
1235 /* Destroy pv entry. */
1237 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
1241 //int su = (pmap == kernel_pmap);
1242 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
1244 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1245 rw_assert(&pvh_global_lock, RA_WLOCKED);
1248 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
1249 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
1250 /* remove from pv_list */
1251 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
1252 if (TAILQ_EMPTY(&m->md.pv_list))
1253 vm_page_aflag_clear(m, PGA_WRITEABLE);
1255 /* free pv entry struct */
1261 //debugf("pv_remove: e\n");
1264 #ifdef __powerpc64__
1266 * Clean pte entry, try to free page table page if requested.
1268 * Return 1 if ptbl pages were freed, otherwise return 0.
1271 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
1276 pte = pte_find(mmu, pmap, va);
1277 KASSERT(pte != NULL, ("%s: NULL pte", __func__));
1279 if (!PTE_ISVALID(pte))
1282 /* Get vm_page_t for mapped pte. */
1283 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1285 if (PTE_ISWIRED(pte))
1286 pmap->pm_stats.wired_count--;
1288 /* Handle managed entry. */
1289 if (PTE_ISMANAGED(pte)) {
1291 /* Handle modified pages. */
1292 if (PTE_ISMODIFIED(pte))
1295 /* Referenced pages. */
1296 if (PTE_ISREFERENCED(pte))
1297 vm_page_aflag_set(m, PGA_REFERENCED);
1299 /* Remove pv_entry from pv_list. */
1300 pv_remove(pmap, va, m);
1301 } else if (m->md.pv_tracked) {
1302 pv_remove(pmap, va, m);
1303 if (TAILQ_EMPTY(&m->md.pv_list))
1304 m->md.pv_tracked = false;
1306 mtx_lock_spin(&tlbivax_mutex);
1309 tlb0_flush_entry(va);
1313 mtx_unlock_spin(&tlbivax_mutex);
1315 pmap->pm_stats.resident_count--;
1317 if (flags & PTBL_UNHOLD) {
1318 return (ptbl_unhold(mmu, pmap, va));
1324 * allocate a page of pointers to page directories, do not preallocate the
1328 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
1330 vm_page_t mtbl [PDIR_PAGES];
1332 struct ptbl_buf *pbuf;
1338 pbuf = ptbl_buf_alloc();
1341 panic("%s: couldn't alloc kernel virtual memory", __func__);
1343 /* Allocate pdir pages, this will sleep! */
1344 for (i = 0; i < PDIR_PAGES; i++) {
1345 pidx = (PDIR_PAGES * pp2d_idx) + i;
1346 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
1347 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
1355 /* Mapin allocated pages into kernel_pmap. */
1356 pdir = (pte_t **) pbuf->kva;
1357 pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
1359 /* Zero whole pdir. */
1360 bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
1362 /* Add pdir to the pmap pdir bufs list. */
1363 TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
1369 * Insert PTE for a given page and virtual address.
1372 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1375 unsigned int pp2d_idx = PP2D_IDX(va);
1376 unsigned int pdir_idx = PDIR_IDX(va);
1377 unsigned int ptbl_idx = PTBL_IDX(va);
1381 /* Get the page directory pointer. */
1382 pdir = pmap->pm_pp2d[pp2d_idx];
1384 pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
1386 /* Get the page table pointer. */
1387 ptbl = pdir[pdir_idx];
1390 /* Allocate page table pages. */
1391 ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
1393 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1398 * Check if there is valid mapping for requested va, if there
1401 pte = &pdir[pdir_idx][ptbl_idx];
1402 if (PTE_ISVALID(pte)) {
1403 pte_remove(mmu, pmap, va, PTBL_HOLD);
1406 * pte is not used, increment hold count for ptbl
1409 if (pmap != kernel_pmap)
1410 ptbl_hold(mmu, pmap, pdir, pdir_idx);
1414 if (pdir[pdir_idx] == NULL) {
1415 if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
1416 pdir_hold(mmu, pmap, pdir);
1417 pdir[pdir_idx] = ptbl;
1419 if (pmap->pm_pp2d[pp2d_idx] == NULL)
1420 pmap->pm_pp2d[pp2d_idx] = pdir;
1423 * Insert pv_entry into pv_list for mapped page if part of managed
1426 if ((m->oflags & VPO_UNMANAGED) == 0) {
1427 flags |= PTE_MANAGED;
1429 /* Create and insert pv entry. */
1430 pv_insert(pmap, va, m);
1433 mtx_lock_spin(&tlbivax_mutex);
1436 tlb0_flush_entry(va);
1437 pmap->pm_stats.resident_count++;
1438 pte = &pdir[pdir_idx][ptbl_idx];
1439 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1440 *pte |= (PTE_VALID | flags);
1443 mtx_unlock_spin(&tlbivax_mutex);
1448 /* Return the pa for the given pmap/va. */
1450 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1455 pte = pte_find(mmu, pmap, va);
1456 if ((pte != NULL) && PTE_ISVALID(pte))
1457 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1462 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
1464 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1471 /* Initialize kernel pdir */
1472 for (i = 0; i < kernel_pdirs; i++) {
1473 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
1474 (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
1475 for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
1476 j < PDIR_NENTRIES; j++) {
1477 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
1478 (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
1479 (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
1484 * Fill in PTEs covering kernel code and data. They are not required
1485 * for address translation, as this area is covered by static TLB1
1486 * entries, but for pte_vatopa() to work correctly with kernel area
1489 for (va = addr; va < data_end; va += PAGE_SIZE) {
1490 pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
1491 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1492 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1493 PTE_VALID | PTE_PS_4KB;
1498 * Clean pte entry, try to free page table page if requested.
1500 * Return 1 if ptbl pages were freed, otherwise return 0.
1503 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
1505 unsigned int pdir_idx = PDIR_IDX(va);
1506 unsigned int ptbl_idx = PTBL_IDX(va);
1511 //int su = (pmap == kernel_pmap);
1512 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
1513 // su, (u_int32_t)pmap, va, flags);
1515 ptbl = pmap->pm_pdir[pdir_idx];
1516 KASSERT(ptbl, ("pte_remove: null ptbl"));
1518 pte = &ptbl[ptbl_idx];
1520 if (pte == NULL || !PTE_ISVALID(pte))
1523 if (PTE_ISWIRED(pte))
1524 pmap->pm_stats.wired_count--;
1526 /* Get vm_page_t for mapped pte. */
1527 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1529 /* Handle managed entry. */
1530 if (PTE_ISMANAGED(pte)) {
1532 if (PTE_ISMODIFIED(pte))
1535 if (PTE_ISREFERENCED(pte))
1536 vm_page_aflag_set(m, PGA_REFERENCED);
1538 pv_remove(pmap, va, m);
1539 } else if (m->md.pv_tracked) {
1541 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
1542 * used. This is needed by the NCSW support code for fast
1543 * VA<->PA translation.
1545 pv_remove(pmap, va, m);
1546 if (TAILQ_EMPTY(&m->md.pv_list))
1547 m->md.pv_tracked = false;
1550 mtx_lock_spin(&tlbivax_mutex);
1553 tlb0_flush_entry(va);
1557 mtx_unlock_spin(&tlbivax_mutex);
1559 pmap->pm_stats.resident_count--;
1561 if (flags & PTBL_UNHOLD) {
1562 //debugf("pte_remove: e (unhold)\n");
1563 return (ptbl_unhold(mmu, pmap, pdir_idx));
1566 //debugf("pte_remove: e\n");
1571 * Insert PTE for a given page and virtual address.
1574 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1577 unsigned int pdir_idx = PDIR_IDX(va);
1578 unsigned int ptbl_idx = PTBL_IDX(va);
1581 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
1582 pmap == kernel_pmap, pmap, va);
1584 /* Get the page table pointer. */
1585 ptbl = pmap->pm_pdir[pdir_idx];
1588 /* Allocate page table pages. */
1589 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
1591 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1596 * Check if there is valid mapping for requested
1597 * va, if there is, remove it.
1599 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
1600 if (PTE_ISVALID(pte)) {
1601 pte_remove(mmu, pmap, va, PTBL_HOLD);
1604 * pte is not used, increment hold count
1607 if (pmap != kernel_pmap)
1608 ptbl_hold(mmu, pmap, pdir_idx);
1613 * Insert pv_entry into pv_list for mapped page if part of managed
1616 if ((m->oflags & VPO_UNMANAGED) == 0) {
1617 flags |= PTE_MANAGED;
1619 /* Create and insert pv entry. */
1620 pv_insert(pmap, va, m);
1623 pmap->pm_stats.resident_count++;
1625 mtx_lock_spin(&tlbivax_mutex);
1628 tlb0_flush_entry(va);
1629 if (pmap->pm_pdir[pdir_idx] == NULL) {
1631 * If we just allocated a new page table, hook it in
1634 pmap->pm_pdir[pdir_idx] = ptbl;
1636 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
1637 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1638 *pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
1641 mtx_unlock_spin(&tlbivax_mutex);
1645 /* Return the pa for the given pmap/va. */
1647 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1652 pte = pte_find(mmu, pmap, va);
1653 if ((pte != NULL) && PTE_ISVALID(pte))
1654 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1658 /* Get a pointer to a PTE in a page table. */
1660 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1662 unsigned int pdir_idx = PDIR_IDX(va);
1663 unsigned int ptbl_idx = PTBL_IDX(va);
1665 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
1667 if (pmap->pm_pdir[pdir_idx])
1668 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
1673 /* Set up kernel page tables. */
1675 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1681 /* Initialize kernel pdir */
1682 for (i = 0; i < kernel_ptbls; i++)
1683 kernel_pmap->pm_pdir[kptbl_min + i] =
1684 (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
1687 * Fill in PTEs covering kernel code and data. They are not required
1688 * for address translation, as this area is covered by static TLB1
1689 * entries, but for pte_vatopa() to work correctly with kernel area
1692 for (va = addr; va < data_end; va += PAGE_SIZE) {
1693 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1694 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1695 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1696 PTE_VALID | PTE_PS_4KB;
1701 /**************************************************************************/
1703 /**************************************************************************/
1706 * This is called during booke_init, before the system is really initialized.
1709 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
1711 vm_paddr_t phys_kernelend;
1712 struct mem_region *mp, *mp1;
1714 vm_paddr_t s, e, sz;
1715 vm_paddr_t physsz, hwphyssz;
1716 u_int phys_avail_count;
1717 vm_size_t kstack0_sz;
1718 vm_offset_t kernel_pdir, kstack0;
1719 vm_paddr_t kstack0_phys;
1722 debugf("mmu_booke_bootstrap: entered\n");
1724 /* Set interesting system properties */
1726 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
1730 /* Initialize invalidation mutex */
1731 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1733 /* Read TLB0 size and associativity. */
1737 * Align kernel start and end address (kernel image).
1738 * Note that kernel end does not necessarily relate to kernsize.
1739 * kernsize is the size of the kernel that is actually mapped.
1741 kernstart = trunc_page(start);
1742 data_start = round_page(kernelend);
1743 data_end = data_start;
1746 * Addresses of preloaded modules (like file systems) use
1747 * physical addresses. Make sure we relocate those into
1748 * virtual addresses.
1750 preload_addr_relocate = kernstart - kernload;
1752 /* Allocate the dynamic per-cpu area. */
1753 dpcpu = (void *)data_end;
1754 data_end += DPCPU_SIZE;
1756 /* Allocate space for the message buffer. */
1757 msgbufp = (struct msgbuf *)data_end;
1758 data_end += msgbufsize;
1759 debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1760 (uintptr_t)msgbufp, data_end);
1762 data_end = round_page(data_end);
1764 /* Allocate space for ptbl_bufs. */
1765 ptbl_bufs = (struct ptbl_buf *)data_end;
1766 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1767 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1768 (uintptr_t)ptbl_bufs, data_end);
1770 data_end = round_page(data_end);
1772 /* Allocate PTE tables for kernel KVA. */
1773 kernel_pdir = data_end;
1774 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
1776 #ifdef __powerpc64__
1777 kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
1778 data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
1780 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1781 debugf(" kernel ptbls: %d\n", kernel_ptbls);
1782 debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1783 kernel_pdir, data_end);
1785 debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
1786 if (data_end - kernstart > kernsize) {
1787 kernsize += tlb1_mapin_region(kernstart + kernsize,
1788 kernload + kernsize, (data_end - kernstart) - kernsize);
1790 data_end = kernstart + kernsize;
1791 debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
1794 * Clear the structures - note we can only do it safely after the
1795 * possible additional TLB1 translations are in place (above) so that
1796 * all range up to the currently calculated 'data_end' is covered.
1798 dpcpu_init(dpcpu, 0);
1799 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1800 #ifdef __powerpc64__
1801 memset((void *)kernel_pdir, 0,
1802 kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
1803 kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1805 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1808 /*******************************************************/
1809 /* Set the start and end of kva. */
1810 /*******************************************************/
1811 virtual_avail = round_page(data_end);
1812 virtual_end = VM_MAX_KERNEL_ADDRESS;
1814 /* Allocate KVA space for page zero/copy operations. */
1815 zero_page_va = virtual_avail;
1816 virtual_avail += PAGE_SIZE;
1817 copy_page_src_va = virtual_avail;
1818 virtual_avail += PAGE_SIZE;
1819 copy_page_dst_va = virtual_avail;
1820 virtual_avail += PAGE_SIZE;
1821 debugf("zero_page_va = 0x%08x\n", zero_page_va);
1822 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1823 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1825 /* Initialize page zero/copy mutexes. */
1826 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1827 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1829 /* Allocate KVA space for ptbl bufs. */
1830 ptbl_buf_pool_vabase = virtual_avail;
1831 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1832 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1833 ptbl_buf_pool_vabase, virtual_avail);
1835 /* Calculate corresponding physical addresses for the kernel region. */
1836 phys_kernelend = kernload + kernsize;
1837 debugf("kernel image and allocated data:\n");
1838 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
1839 debugf(" kernstart = 0x%08x\n", kernstart);
1840 debugf(" kernsize = 0x%08x\n", kernsize);
1842 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1843 panic("mmu_booke_bootstrap: phys_avail too small");
1846 * Remove kernel physical address range from avail regions list. Page
1847 * align all regions. Non-page aligned memory isn't very interesting
1848 * to us. Also, sort the entries for ascending addresses.
1851 /* Retrieve phys/avail mem regions */
1852 mem_regions(&physmem_regions, &physmem_regions_sz,
1853 &availmem_regions, &availmem_regions_sz);
1855 cnt = availmem_regions_sz;
1856 debugf("processing avail regions:\n");
1857 for (mp = availmem_regions; mp->mr_size; mp++) {
1859 e = mp->mr_start + mp->mr_size;
1860 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
1861 /* Check whether this region holds all of the kernel. */
1862 if (s < kernload && e > phys_kernelend) {
1863 availmem_regions[cnt].mr_start = phys_kernelend;
1864 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1867 /* Look whether this regions starts within the kernel. */
1868 if (s >= kernload && s < phys_kernelend) {
1869 if (e <= phys_kernelend)
1873 /* Now look whether this region ends within the kernel. */
1874 if (e > kernload && e <= phys_kernelend) {
1879 /* Now page align the start and size of the region. */
1885 debugf("%09jx-%09jx = %jx\n",
1886 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
1888 /* Check whether some memory is left here. */
1892 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1898 /* Do an insertion sort. */
1899 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1900 if (s < mp1->mr_start)
1903 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1911 availmem_regions_sz = cnt;
1913 /*******************************************************/
1914 /* Steal physical memory for kernel stack from the end */
1915 /* of the first avail region */
1916 /*******************************************************/
1917 kstack0_sz = kstack_pages * PAGE_SIZE;
1918 kstack0_phys = availmem_regions[0].mr_start +
1919 availmem_regions[0].mr_size;
1920 kstack0_phys -= kstack0_sz;
1921 availmem_regions[0].mr_size -= kstack0_sz;
1923 /*******************************************************/
1924 /* Fill in phys_avail table, based on availmem_regions */
1925 /*******************************************************/
1926 phys_avail_count = 0;
1929 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1931 debugf("fill in phys_avail:\n");
1932 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1934 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
1935 (uintmax_t)availmem_regions[i].mr_start,
1936 (uintmax_t)availmem_regions[i].mr_start +
1937 availmem_regions[i].mr_size,
1938 (uintmax_t)availmem_regions[i].mr_size);
1940 if (hwphyssz != 0 &&
1941 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1942 debugf(" hw.physmem adjust\n");
1943 if (physsz < hwphyssz) {
1944 phys_avail[j] = availmem_regions[i].mr_start;
1946 availmem_regions[i].mr_start +
1954 phys_avail[j] = availmem_regions[i].mr_start;
1955 phys_avail[j + 1] = availmem_regions[i].mr_start +
1956 availmem_regions[i].mr_size;
1958 physsz += availmem_regions[i].mr_size;
1960 physmem = btoc(physsz);
1962 /* Calculate the last available physical address. */
1963 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1965 Maxmem = powerpc_btop(phys_avail[i + 1]);
1967 debugf("Maxmem = 0x%08lx\n", Maxmem);
1968 debugf("phys_avail_count = %d\n", phys_avail_count);
1969 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
1970 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
1972 /*******************************************************/
1973 /* Initialize (statically allocated) kernel pmap. */
1974 /*******************************************************/
1975 PMAP_LOCK_INIT(kernel_pmap);
1976 #ifndef __powerpc64__
1977 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1980 debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
1981 kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
1982 for (i = 0; i < MAXCPU; i++) {
1983 kernel_pmap->pm_tid[i] = TID_KERNEL;
1985 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1986 tidbusy[i][TID_KERNEL] = kernel_pmap;
1989 /* Mark kernel_pmap active on all CPUs */
1990 CPU_FILL(&kernel_pmap->pm_active);
1993 * Initialize the global pv list lock.
1995 rw_init(&pvh_global_lock, "pmap pv global");
1997 /*******************************************************/
1999 /*******************************************************/
2001 /* Enter kstack0 into kernel map, provide guard page */
2002 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2003 thread0.td_kstack = kstack0;
2004 thread0.td_kstack_pages = kstack_pages;
2006 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
2007 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
2008 kstack0_phys, kstack0_phys + kstack0_sz);
2009 debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
2010 kstack0, kstack0 + kstack0_sz);
2012 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
2013 for (i = 0; i < kstack_pages; i++) {
2014 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
2015 kstack0 += PAGE_SIZE;
2016 kstack0_phys += PAGE_SIZE;
2019 pmap_bootstrapped = 1;
2021 debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
2022 debugf("virtual_end = %"PRI0ptrX"\n", virtual_end);
2024 debugf("mmu_booke_bootstrap: exit\n");
2031 tlb_entry_t *e, tmp;
2034 /* Prepare TLB1 image for AP processors */
2036 for (i = 0; i < TLB1_ENTRIES; i++) {
2037 tlb1_read_entry(&tmp, i);
2039 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
2040 memcpy(e++, &tmp, sizeof(tmp));
2045 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
2050 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
2051 * have the snapshot of its contents in the s/w __boot_tlb1[] table
2052 * created by tlb1_ap_prep(), so use these values directly to
2053 * (re)program AP's TLB1 hardware.
2055 * Start at index 1 because index 0 has the kernel map.
2057 for (i = 1; i < TLB1_ENTRIES; i++) {
2058 if (__boot_tlb1[i].mas1 & MAS1_VALID)
2059 tlb1_write_entry(&__boot_tlb1[i], i);
2062 set_mas4_defaults();
2067 booke_pmap_init_qpages(void)
2074 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
2075 if (pc->pc_qmap_addr == 0)
2076 panic("pmap_init_qpages: unable to allocate KVA");
2080 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
2083 * Get the physical page address for the given pmap/virtual address.
2086 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
2091 pa = pte_vatopa(mmu, pmap, va);
2098 * Extract the physical page address associated with the given
2099 * kernel virtual address.
2102 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
2108 if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
2109 p = pte_vatopa(mmu, kernel_pmap, va);
2112 /* Check TLB1 mappings */
2113 for (i = 0; i < TLB1_ENTRIES; i++) {
2114 tlb1_read_entry(&e, i);
2115 if (!(e.mas1 & MAS1_VALID))
2117 if (va >= e.virt && va < e.virt + e.size)
2118 return (e.phys + (va - e.virt));
2126 * Initialize the pmap module.
2127 * Called by vm_init, to initialize any structures that the pmap
2128 * system needs to map virtual memory.
2131 mmu_booke_init(mmu_t mmu)
2133 int shpgperproc = PMAP_SHPGPERPROC;
2136 * Initialize the address space (zone) for the pv entries. Set a
2137 * high water mark so that the system can recover from excessive
2138 * numbers of pv entries.
2140 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
2141 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
2143 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
2144 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
2146 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
2147 pv_entry_high_water = 9 * (pv_entry_max / 10);
2149 uma_zone_reserve_kva(pvzone, pv_entry_max);
2151 /* Pre-fill pvzone with initial number of pv entries. */
2152 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
2154 /* Initialize ptbl allocation. */
2159 * Map a list of wired pages into kernel virtual address space. This is
2160 * intended for temporary mappings which do not need page modification or
2161 * references recorded. Existing mappings in the region are overwritten.
2164 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
2169 while (count-- > 0) {
2170 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2177 * Remove page mappings from kernel virtual address space. Intended for
2178 * temporary mappings entered by mmu_booke_qenter.
2181 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
2186 while (count-- > 0) {
2187 mmu_booke_kremove(mmu, va);
2193 * Map a wired page into kernel virtual address space.
2196 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
2199 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
2203 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
2208 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2209 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
2211 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2212 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
2213 flags |= PTE_PS_4KB;
2215 pte = pte_find(mmu, kernel_pmap, va);
2216 KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
2218 mtx_lock_spin(&tlbivax_mutex);
2221 if (PTE_ISVALID(pte)) {
2223 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
2225 /* Flush entry from TLB0 */
2226 tlb0_flush_entry(va);
2229 *pte = PTE_RPN_FROM_PA(pa) | flags;
2231 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
2232 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
2233 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
2235 /* Flush the real memory from the instruction cache. */
2236 if ((flags & (PTE_I | PTE_G)) == 0)
2237 __syncicache((void *)va, PAGE_SIZE);
2240 mtx_unlock_spin(&tlbivax_mutex);
2244 * Remove a page from kernel page table.
2247 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
2251 CTR2(KTR_PMAP,"%s: s (va = 0x%08x)\n", __func__, va);
2253 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2254 (va <= VM_MAX_KERNEL_ADDRESS)),
2255 ("mmu_booke_kremove: invalid va"));
2257 pte = pte_find(mmu, kernel_pmap, va);
2259 if (!PTE_ISVALID(pte)) {
2261 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
2266 mtx_lock_spin(&tlbivax_mutex);
2269 /* Invalidate entry in TLB0, update PTE. */
2270 tlb0_flush_entry(va);
2274 mtx_unlock_spin(&tlbivax_mutex);
2278 * Provide a kernel pointer corresponding to a given userland pointer.
2279 * The returned pointer is valid until the next time this function is
2280 * called in this thread. This is used internally in copyin/copyout.
2283 mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
2284 void **kaddr, size_t ulen, size_t *klen)
2287 if ((uintptr_t)uaddr + ulen > VM_MAXUSER_ADDRESS + PAGE_SIZE)
2290 *kaddr = (void *)(uintptr_t)uaddr;
2298 * Figure out where a given kernel pointer (usually in a fault) points
2299 * to from the VM's perspective, potentially remapping into userland's
2303 mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
2304 vm_offset_t *decoded_addr)
2307 if (addr < VM_MAXUSER_ADDRESS)
2312 *decoded_addr = addr;
2317 * Initialize pmap associated with process 0.
2320 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
2323 PMAP_LOCK_INIT(pmap);
2324 mmu_booke_pinit(mmu, pmap);
2325 PCPU_SET(curpmap, pmap);
2329 * Initialize a preallocated and zeroed pmap structure,
2330 * such as one in a vmspace structure.
2333 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
2337 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
2338 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
2340 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
2342 for (i = 0; i < MAXCPU; i++)
2343 pmap->pm_tid[i] = TID_NONE;
2344 CPU_ZERO(&kernel_pmap->pm_active);
2345 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2346 #ifdef __powerpc64__
2347 bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
2348 TAILQ_INIT(&pmap->pm_pdir_list);
2350 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
2352 TAILQ_INIT(&pmap->pm_ptbl_list);
2356 * Release any resources held by the given physical map.
2357 * Called when a pmap initialized by mmu_booke_pinit is being released.
2358 * Should only be called if the map contains no valid mappings.
2361 mmu_booke_release(mmu_t mmu, pmap_t pmap)
2364 KASSERT(pmap->pm_stats.resident_count == 0,
2365 ("pmap_release: pmap resident count %ld != 0",
2366 pmap->pm_stats.resident_count));
2370 * Insert the given physical page at the specified virtual address in the
2371 * target physical map with the protection requested. If specified the page
2372 * will be wired down.
2375 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2376 vm_prot_t prot, u_int flags, int8_t psind)
2380 rw_wlock(&pvh_global_lock);
2382 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
2384 rw_wunlock(&pvh_global_lock);
2389 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2390 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
2395 int error, su, sync;
2397 pa = VM_PAGE_TO_PHYS(m);
2398 su = (pmap == kernel_pmap);
2401 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
2402 // "pa=0x%08x prot=0x%08x flags=%#x)\n",
2403 // (u_int32_t)pmap, su, pmap->pm_tid,
2404 // (u_int32_t)m, va, pa, prot, flags);
2407 KASSERT(((va >= virtual_avail) &&
2408 (va <= VM_MAX_KERNEL_ADDRESS)),
2409 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
2411 KASSERT((va <= VM_MAXUSER_ADDRESS),
2412 ("mmu_booke_enter_locked: user pmap, non user va"));
2414 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2415 VM_OBJECT_ASSERT_LOCKED(m->object);
2417 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2420 * If there is an existing mapping, and the physical address has not
2421 * changed, must be protection or wiring change.
2423 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
2424 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
2427 * Before actually updating pte->flags we calculate and
2428 * prepare its new value in a helper var.
2431 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
2433 /* Wiring change, just update stats. */
2434 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
2435 if (!PTE_ISWIRED(pte)) {
2437 pmap->pm_stats.wired_count++;
2440 if (PTE_ISWIRED(pte)) {
2441 flags &= ~PTE_WIRED;
2442 pmap->pm_stats.wired_count--;
2446 if (prot & VM_PROT_WRITE) {
2447 /* Add write permissions. */
2452 if ((flags & PTE_MANAGED) != 0)
2453 vm_page_aflag_set(m, PGA_WRITEABLE);
2455 /* Handle modified pages, sense modify status. */
2458 * The PTE_MODIFIED flag could be set by underlying
2459 * TLB misses since we last read it (above), possibly
2460 * other CPUs could update it so we check in the PTE
2461 * directly rather than rely on that saved local flags
2464 if (PTE_ISMODIFIED(pte))
2468 if (prot & VM_PROT_EXECUTE) {
2474 * Check existing flags for execute permissions: if we
2475 * are turning execute permissions on, icache should
2478 if ((*pte & (PTE_UX | PTE_SX)) == 0)
2482 flags &= ~PTE_REFERENCED;
2485 * The new flags value is all calculated -- only now actually
2488 mtx_lock_spin(&tlbivax_mutex);
2491 tlb0_flush_entry(va);
2492 *pte &= ~PTE_FLAGS_MASK;
2496 mtx_unlock_spin(&tlbivax_mutex);
2500 * If there is an existing mapping, but it's for a different
2501 * physical address, pte_enter() will delete the old mapping.
2503 //if ((pte != NULL) && PTE_ISVALID(pte))
2504 // debugf("mmu_booke_enter_locked: replace\n");
2506 // debugf("mmu_booke_enter_locked: new\n");
2508 /* Now set up the flags and install the new mapping. */
2509 flags = (PTE_SR | PTE_VALID);
2515 if (prot & VM_PROT_WRITE) {
2520 if ((m->oflags & VPO_UNMANAGED) == 0)
2521 vm_page_aflag_set(m, PGA_WRITEABLE);
2524 if (prot & VM_PROT_EXECUTE) {
2530 /* If its wired update stats. */
2531 if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
2534 error = pte_enter(mmu, pmap, m, va, flags,
2535 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
2537 return (KERN_RESOURCE_SHORTAGE);
2539 if ((flags & PMAP_ENTER_WIRED) != 0)
2540 pmap->pm_stats.wired_count++;
2542 /* Flush the real memory from the instruction cache. */
2543 if (prot & VM_PROT_EXECUTE)
2547 if (sync && (su || pmap == PCPU_GET(curpmap))) {
2548 __syncicache((void *)va, PAGE_SIZE);
2552 return (KERN_SUCCESS);
2556 * Maps a sequence of resident pages belonging to the same object.
2557 * The sequence begins with the given page m_start. This page is
2558 * mapped at the given virtual address start. Each subsequent page is
2559 * mapped at a virtual address that is offset from start by the same
2560 * amount as the page is offset from m_start within the object. The
2561 * last page in the sequence is the page with the largest offset from
2562 * m_start that can be mapped at a virtual address less than the given
2563 * virtual address end. Not every virtual page between start and end
2564 * is mapped; only those for which a resident page exists with the
2565 * corresponding offset from m_start are mapped.
2568 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
2569 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
2572 vm_pindex_t diff, psize;
2574 VM_OBJECT_ASSERT_LOCKED(m_start->object);
2576 psize = atop(end - start);
2578 rw_wlock(&pvh_global_lock);
2580 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2581 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
2582 prot & (VM_PROT_READ | VM_PROT_EXECUTE),
2583 PMAP_ENTER_NOSLEEP, 0);
2584 m = TAILQ_NEXT(m, listq);
2586 rw_wunlock(&pvh_global_lock);
2591 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2595 rw_wlock(&pvh_global_lock);
2597 mmu_booke_enter_locked(mmu, pmap, va, m,
2598 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
2600 rw_wunlock(&pvh_global_lock);
2605 * Remove the given range of addresses from the specified map.
2607 * It is assumed that the start and end are properly rounded to the page size.
2610 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
2615 int su = (pmap == kernel_pmap);
2617 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
2618 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
2621 KASSERT(((va >= virtual_avail) &&
2622 (va <= VM_MAX_KERNEL_ADDRESS)),
2623 ("mmu_booke_remove: kernel pmap, non kernel va"));
2625 KASSERT((va <= VM_MAXUSER_ADDRESS),
2626 ("mmu_booke_remove: user pmap, non user va"));
2629 if (PMAP_REMOVE_DONE(pmap)) {
2630 //debugf("mmu_booke_remove: e (empty)\n");
2634 hold_flag = PTBL_HOLD_FLAG(pmap);
2635 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
2637 rw_wlock(&pvh_global_lock);
2639 for (; va < endva; va += PAGE_SIZE) {
2640 pte = pte_find(mmu, pmap, va);
2641 if ((pte != NULL) && PTE_ISVALID(pte))
2642 pte_remove(mmu, pmap, va, hold_flag);
2645 rw_wunlock(&pvh_global_lock);
2647 //debugf("mmu_booke_remove: e\n");
2651 * Remove physical page from all pmaps in which it resides.
2654 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
2659 rw_wlock(&pvh_global_lock);
2660 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
2661 pvn = TAILQ_NEXT(pv, pv_link);
2663 PMAP_LOCK(pv->pv_pmap);
2664 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
2665 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
2666 PMAP_UNLOCK(pv->pv_pmap);
2668 vm_page_aflag_clear(m, PGA_WRITEABLE);
2669 rw_wunlock(&pvh_global_lock);
2673 * Map a range of physical addresses into kernel virtual address space.
2676 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
2677 vm_paddr_t pa_end, int prot)
2679 vm_offset_t sva = *virt;
2680 vm_offset_t va = sva;
2682 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
2683 // sva, pa_start, pa_end);
2685 while (pa_start < pa_end) {
2686 mmu_booke_kenter(mmu, va, pa_start);
2688 pa_start += PAGE_SIZE;
2692 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
2697 * The pmap must be activated before it's address space can be accessed in any
2701 mmu_booke_activate(mmu_t mmu, struct thread *td)
2706 pmap = &td->td_proc->p_vmspace->vm_pmap;
2708 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
2709 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2711 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
2715 cpuid = PCPU_GET(cpuid);
2716 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
2717 PCPU_SET(curpmap, pmap);
2719 if (pmap->pm_tid[cpuid] == TID_NONE)
2722 /* Load PID0 register with pmap tid value. */
2723 mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
2724 __asm __volatile("isync");
2726 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
2730 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
2731 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
2735 * Deactivate the specified process's address space.
2738 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
2742 pmap = &td->td_proc->p_vmspace->vm_pmap;
2744 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
2745 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2747 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
2749 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
2750 PCPU_SET(curpmap, NULL);
2754 * Copy the range specified by src_addr/len
2755 * from the source map to the range dst_addr/len
2756 * in the destination map.
2758 * This routine is only advisory and need not do anything.
2761 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
2762 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
2768 * Set the physical protection on the specified range of this map as requested.
2771 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2778 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2779 mmu_booke_remove(mmu, pmap, sva, eva);
2783 if (prot & VM_PROT_WRITE)
2787 for (va = sva; va < eva; va += PAGE_SIZE) {
2788 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2789 if (PTE_ISVALID(pte)) {
2790 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2792 mtx_lock_spin(&tlbivax_mutex);
2795 /* Handle modified pages. */
2796 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
2799 tlb0_flush_entry(va);
2800 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2803 mtx_unlock_spin(&tlbivax_mutex);
2811 * Clear the write and modified bits in each of the given page's mappings.
2814 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2819 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2820 ("mmu_booke_remove_write: page %p is not managed", m));
2823 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2824 * set by another thread while the object is locked. Thus,
2825 * if PGA_WRITEABLE is clear, no page table entries need updating.
2827 VM_OBJECT_ASSERT_WLOCKED(m->object);
2828 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2830 rw_wlock(&pvh_global_lock);
2831 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2832 PMAP_LOCK(pv->pv_pmap);
2833 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2834 if (PTE_ISVALID(pte)) {
2835 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2837 mtx_lock_spin(&tlbivax_mutex);
2840 /* Handle modified pages. */
2841 if (PTE_ISMODIFIED(pte))
2844 /* Flush mapping from TLB0. */
2845 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2848 mtx_unlock_spin(&tlbivax_mutex);
2851 PMAP_UNLOCK(pv->pv_pmap);
2853 vm_page_aflag_clear(m, PGA_WRITEABLE);
2854 rw_wunlock(&pvh_global_lock);
2858 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2867 va = trunc_page(va);
2868 sz = round_page(sz);
2870 rw_wlock(&pvh_global_lock);
2871 pmap = PCPU_GET(curpmap);
2872 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2875 pte = pte_find(mmu, pm, va);
2876 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2882 /* Create a mapping in the active pmap. */
2884 m = PHYS_TO_VM_PAGE(pa);
2886 pte_enter(mmu, pmap, m, addr,
2887 PTE_SR | PTE_VALID | PTE_UR, FALSE);
2888 __syncicache((void *)addr, PAGE_SIZE);
2889 pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2892 __syncicache((void *)va, PAGE_SIZE);
2897 rw_wunlock(&pvh_global_lock);
2901 * Atomically extract and hold the physical page with the given
2902 * pmap and virtual address pair if that mapping permits the given
2906 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2918 pte = pte_find(mmu, pmap, va);
2919 if ((pte != NULL) && PTE_ISVALID(pte)) {
2920 if (pmap == kernel_pmap)
2925 if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2926 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2928 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2939 * Initialize a vm_page's machine-dependent fields.
2942 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2945 m->md.pv_tracked = 0;
2946 TAILQ_INIT(&m->md.pv_list);
2950 * mmu_booke_zero_page_area zeros the specified hardware page by
2951 * mapping it into virtual memory and using bzero to clear
2954 * off and size must reside within a single page.
2957 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2961 /* XXX KASSERT off and size are within a single page? */
2963 mtx_lock(&zero_page_mutex);
2966 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2967 bzero((caddr_t)va + off, size);
2968 mmu_booke_kremove(mmu, va);
2970 mtx_unlock(&zero_page_mutex);
2974 * mmu_booke_zero_page zeros the specified hardware page.
2977 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2979 vm_offset_t off, va;
2981 mtx_lock(&zero_page_mutex);
2984 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2985 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
2986 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
2987 mmu_booke_kremove(mmu, va);
2989 mtx_unlock(&zero_page_mutex);
2993 * mmu_booke_copy_page copies the specified (machine independent) page by
2994 * mapping the page into virtual memory and using memcopy to copy the page,
2995 * one machine dependent page at a time.
2998 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
3000 vm_offset_t sva, dva;
3002 sva = copy_page_src_va;
3003 dva = copy_page_dst_va;
3005 mtx_lock(©_page_mutex);
3006 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
3007 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
3008 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
3009 mmu_booke_kremove(mmu, dva);
3010 mmu_booke_kremove(mmu, sva);
3011 mtx_unlock(©_page_mutex);
3015 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
3016 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
3019 vm_offset_t a_pg_offset, b_pg_offset;
3022 mtx_lock(©_page_mutex);
3023 while (xfersize > 0) {
3024 a_pg_offset = a_offset & PAGE_MASK;
3025 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
3026 mmu_booke_kenter(mmu, copy_page_src_va,
3027 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
3028 a_cp = (char *)copy_page_src_va + a_pg_offset;
3029 b_pg_offset = b_offset & PAGE_MASK;
3030 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
3031 mmu_booke_kenter(mmu, copy_page_dst_va,
3032 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
3033 b_cp = (char *)copy_page_dst_va + b_pg_offset;
3034 bcopy(a_cp, b_cp, cnt);
3035 mmu_booke_kremove(mmu, copy_page_dst_va);
3036 mmu_booke_kremove(mmu, copy_page_src_va);
3041 mtx_unlock(©_page_mutex);
3045 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
3052 paddr = VM_PAGE_TO_PHYS(m);
3054 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
3055 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
3056 flags |= PTE_PS_4KB;
3059 qaddr = PCPU_GET(qmap_addr);
3061 pte = pte_find(mmu, kernel_pmap, qaddr);
3063 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
3066 * XXX: tlbivax is broadcast to other cores, but qaddr should
3067 * not be present in other TLBs. Is there a better instruction
3068 * sequence to use? Or just forget it & use mmu_booke_kenter()...
3070 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
3071 __asm __volatile("isync; msync");
3073 *pte = PTE_RPN_FROM_PA(paddr) | flags;
3075 /* Flush the real memory from the instruction cache. */
3076 if ((flags & (PTE_I | PTE_G)) == 0)
3077 __syncicache((void *)qaddr, PAGE_SIZE);
3083 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
3087 pte = pte_find(mmu, kernel_pmap, addr);
3089 KASSERT(PCPU_GET(qmap_addr) == addr,
3090 ("mmu_booke_quick_remove_page: invalid address"));
3092 ("mmu_booke_quick_remove_page: PTE not in use"));
3099 * Return whether or not the specified physical page was modified
3100 * in any of physical maps.
3103 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
3109 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3110 ("mmu_booke_is_modified: page %p is not managed", m));
3114 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3115 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
3116 * is clear, no PTEs can be modified.
3118 VM_OBJECT_ASSERT_WLOCKED(m->object);
3119 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3121 rw_wlock(&pvh_global_lock);
3122 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3123 PMAP_LOCK(pv->pv_pmap);
3124 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3126 if (PTE_ISMODIFIED(pte))
3129 PMAP_UNLOCK(pv->pv_pmap);
3133 rw_wunlock(&pvh_global_lock);
3138 * Return whether or not the specified virtual address is eligible
3142 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
3149 * Return whether or not the specified physical page was referenced
3150 * in any physical maps.
3153 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
3159 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3160 ("mmu_booke_is_referenced: page %p is not managed", m));
3162 rw_wlock(&pvh_global_lock);
3163 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3164 PMAP_LOCK(pv->pv_pmap);
3165 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3167 if (PTE_ISREFERENCED(pte))
3170 PMAP_UNLOCK(pv->pv_pmap);
3174 rw_wunlock(&pvh_global_lock);
3179 * Clear the modify bits on the specified physical page.
3182 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
3187 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3188 ("mmu_booke_clear_modify: page %p is not managed", m));
3189 VM_OBJECT_ASSERT_WLOCKED(m->object);
3190 KASSERT(!vm_page_xbusied(m),
3191 ("mmu_booke_clear_modify: page %p is exclusive busied", m));
3194 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
3195 * If the object containing the page is locked and the page is not
3196 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
3198 if ((m->aflags & PGA_WRITEABLE) == 0)
3200 rw_wlock(&pvh_global_lock);
3201 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3202 PMAP_LOCK(pv->pv_pmap);
3203 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3205 mtx_lock_spin(&tlbivax_mutex);
3208 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
3209 tlb0_flush_entry(pv->pv_va);
3210 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
3215 mtx_unlock_spin(&tlbivax_mutex);
3217 PMAP_UNLOCK(pv->pv_pmap);
3219 rw_wunlock(&pvh_global_lock);
3223 * Return a count of reference bits for a page, clearing those bits.
3224 * It is not necessary for every reference bit to be cleared, but it
3225 * is necessary that 0 only be returned when there are truly no
3226 * reference bits set.
3228 * As an optimization, update the page's dirty field if a modified bit is
3229 * found while counting reference bits. This opportunistic update can be
3230 * performed at low cost and can eliminate the need for some future calls
3231 * to pmap_is_modified(). However, since this function stops after
3232 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3233 * dirty pages. Those dirty pages will only be detected by a future call
3234 * to pmap_is_modified().
3237 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
3243 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3244 ("mmu_booke_ts_referenced: page %p is not managed", m));
3246 rw_wlock(&pvh_global_lock);
3247 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3248 PMAP_LOCK(pv->pv_pmap);
3249 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3251 if (PTE_ISMODIFIED(pte))
3253 if (PTE_ISREFERENCED(pte)) {
3254 mtx_lock_spin(&tlbivax_mutex);
3257 tlb0_flush_entry(pv->pv_va);
3258 *pte &= ~PTE_REFERENCED;
3261 mtx_unlock_spin(&tlbivax_mutex);
3263 if (++count >= PMAP_TS_REFERENCED_MAX) {
3264 PMAP_UNLOCK(pv->pv_pmap);
3269 PMAP_UNLOCK(pv->pv_pmap);
3271 rw_wunlock(&pvh_global_lock);
3276 * Clear the wired attribute from the mappings for the specified range of
3277 * addresses in the given pmap. Every valid mapping within that range must
3278 * have the wired attribute set. In contrast, invalid mappings cannot have
3279 * the wired attribute set, so they are ignored.
3281 * The wired attribute of the page table entry is not a hardware feature, so
3282 * there is no need to invalidate any TLB entries.
3285 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3291 for (va = sva; va < eva; va += PAGE_SIZE) {
3292 if ((pte = pte_find(mmu, pmap, va)) != NULL &&
3294 if (!PTE_ISWIRED(pte))
3295 panic("mmu_booke_unwire: pte %p isn't wired",
3298 pmap->pm_stats.wired_count--;
3306 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
3307 * page. This count may be changed upwards or downwards in the future; it is
3308 * only necessary that true be returned for a small subset of pmaps for proper
3312 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
3318 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3319 ("mmu_booke_page_exists_quick: page %p is not managed", m));
3322 rw_wlock(&pvh_global_lock);
3323 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3324 if (pv->pv_pmap == pmap) {
3331 rw_wunlock(&pvh_global_lock);
3336 * Return the number of managed mappings to the given physical page that are
3340 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
3346 if ((m->oflags & VPO_UNMANAGED) != 0)
3348 rw_wlock(&pvh_global_lock);
3349 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3350 PMAP_LOCK(pv->pv_pmap);
3351 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
3352 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
3354 PMAP_UNLOCK(pv->pv_pmap);
3356 rw_wunlock(&pvh_global_lock);
3361 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3367 * This currently does not work for entries that
3368 * overlap TLB1 entries.
3370 for (i = 0; i < TLB1_ENTRIES; i ++) {
3371 if (tlb1_iomapped(i, pa, size, &va) == 0)
3379 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
3385 /* Minidumps are based on virtual memory addresses. */
3387 *va = (void *)(vm_offset_t)pa;
3391 /* Raw physical memory dumps don't have a virtual address. */
3392 /* We always map a 256MB page at 256M. */
3393 gran = 256 * 1024 * 1024;
3394 ppa = rounddown2(pa, gran);
3397 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
3399 if (sz > (gran - ofs))
3400 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
3405 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
3413 /* Minidumps are based on virtual memory addresses. */
3414 /* Nothing to do... */
3418 for (i = 0; i < TLB1_ENTRIES; i++) {
3419 tlb1_read_entry(&e, i);
3420 if (!(e.mas1 & MAS1_VALID))
3424 /* Raw physical memory dumps don't have a virtual address. */
3429 tlb1_write_entry(&e, i);
3431 gran = 256 * 1024 * 1024;
3432 ppa = rounddown2(pa, gran);
3434 if (sz > (gran - ofs)) {
3439 tlb1_write_entry(&e, i);
3443 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
3446 mmu_booke_scan_init(mmu_t mmu)
3453 /* Initialize phys. segments for dumpsys(). */
3454 memset(&dump_map, 0, sizeof(dump_map));
3455 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
3456 &availmem_regions_sz);
3457 for (i = 0; i < physmem_regions_sz; i++) {
3458 dump_map[i].pa_start = physmem_regions[i].mr_start;
3459 dump_map[i].pa_size = physmem_regions[i].mr_size;
3464 /* Virtual segments for minidumps: */
3465 memset(&dump_map, 0, sizeof(dump_map));
3467 /* 1st: kernel .data and .bss. */
3468 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
3469 dump_map[0].pa_size =
3470 round_page((uintptr_t)_end) - dump_map[0].pa_start;
3472 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
3473 dump_map[1].pa_start = data_start;
3474 dump_map[1].pa_size = data_end - data_start;
3476 /* 3rd: kernel VM. */
3477 va = dump_map[1].pa_start + dump_map[1].pa_size;
3478 /* Find start of next chunk (from va). */
3479 while (va < virtual_end) {
3480 /* Don't dump the buffer cache. */
3481 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
3482 va = kmi.buffer_eva;
3485 pte = pte_find(mmu, kernel_pmap, va);
3486 if (pte != NULL && PTE_ISVALID(pte))
3490 if (va < virtual_end) {
3491 dump_map[2].pa_start = va;
3493 /* Find last page in chunk. */
3494 while (va < virtual_end) {
3495 /* Don't run into the buffer cache. */
3496 if (va == kmi.buffer_sva)
3498 pte = pte_find(mmu, kernel_pmap, va);
3499 if (pte == NULL || !PTE_ISVALID(pte))
3503 dump_map[2].pa_size = va - dump_map[2].pa_start;
3508 * Map a set of physical memory pages into the kernel virtual address space.
3509 * Return a pointer to where it is mapped. This routine is intended to be used
3510 * for mapping device memory, NOT real memory.
3513 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3516 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
3520 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3524 uintptr_t va, tmpva;
3529 * Check if this is premapped in TLB1. Note: this should probably also
3530 * check whether a sequence of TLB1 entries exist that match the
3531 * requirement, but now only checks the easy case.
3533 for (i = 0; i < TLB1_ENTRIES; i++) {
3534 tlb1_read_entry(&e, i);
3535 if (!(e.mas1 & MAS1_VALID))
3538 (pa + size) <= (e.phys + e.size) &&
3539 (ma == VM_MEMATTR_DEFAULT ||
3540 tlb_calc_wimg(pa, ma) ==
3541 (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))))
3542 return (void *)(e.virt +
3543 (vm_offset_t)(pa - e.phys));
3546 size = roundup(size, PAGE_SIZE);
3549 * The device mapping area is between VM_MAXUSER_ADDRESS and
3550 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing.
3552 #ifdef SPARSE_MAPDEV
3554 * With a sparse mapdev, align to the largest starting region. This
3555 * could feasibly be optimized for a 'best-fit' alignment, but that
3556 * calculation could be very costly.
3557 * Align to the smaller of:
3558 * - first set bit in overlap of (pa & size mask)
3559 * - largest size envelope
3561 * It's possible the device mapping may start at a PA that's not larger
3562 * than the size mask, so we need to offset in to maximize the TLB entry
3563 * range and minimize the number of used TLB entries.
3566 tmpva = tlb1_map_base;
3567 sz = ffsl(((1 << flsl(size-1)) - 1) & pa);
3568 sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
3569 va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
3570 #ifdef __powerpc64__
3571 } while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
3573 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
3576 #ifdef __powerpc64__
3577 va = atomic_fetchadd_long(&tlb1_map_base, size);
3579 va = atomic_fetchadd_int(&tlb1_map_base, size);
3585 sz = 1 << (ilog2(size) & ~1);
3586 /* Align size to PA */
3590 } while (pa % sz != 0);
3592 /* Now align from there to VA */
3596 } while (va % sz != 0);
3599 printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
3600 va, (uintmax_t)pa, sz);
3601 if (tlb1_set_entry(va, pa, sz,
3602 _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0)
3613 * 'Unmap' a range mapped by mmu_booke_mapdev().
3616 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
3618 #ifdef SUPPORTS_SHRINKING_TLB1
3619 vm_offset_t base, offset;
3622 * Unmap only if this is inside kernel virtual space.
3624 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
3625 base = trunc_page(va);
3626 offset = va & PAGE_MASK;
3627 size = roundup(offset + size, PAGE_SIZE);
3628 kva_free(base, size);
3634 * mmu_booke_object_init_pt preloads the ptes for a given object into the
3635 * specified pmap. This eliminates the blast of soft faults on process startup
3636 * and immediately after an mmap.
3639 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3640 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3643 VM_OBJECT_ASSERT_WLOCKED(object);
3644 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3645 ("mmu_booke_object_init_pt: non-device object"));
3649 * Perform the pmap work for mincore.
3652 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3653 vm_paddr_t *locked_pa)
3656 /* XXX: this should be implemented at some point */
3661 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
3669 /* Check TLB1 mappings */
3670 for (i = 0; i < TLB1_ENTRIES; i++) {
3671 tlb1_read_entry(&e, i);
3672 if (!(e.mas1 & MAS1_VALID))
3674 if (addr >= e.virt && addr < e.virt + e.size)
3677 if (i < TLB1_ENTRIES) {
3678 /* Only allow full mappings to be modified for now. */
3679 /* Validate the range. */
3680 for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
3681 tlb1_read_entry(&e, j);
3682 if (va != e.virt || (sz - (va - addr) < e.size))
3685 for (va = addr; va < addr + sz; va += e.size, i++) {
3686 tlb1_read_entry(&e, i);
3687 e.mas2 &= ~MAS2_WIMGE_MASK;
3688 e.mas2 |= tlb_calc_wimg(e.phys, mode);
3691 * Write it out to the TLB. Should really re-sync with other
3694 tlb1_write_entry(&e, i);
3699 /* Not in TLB1, try through pmap */
3700 /* First validate the range. */
3701 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3702 pte = pte_find(mmu, kernel_pmap, va);
3703 if (pte == NULL || !PTE_ISVALID(pte))
3707 mtx_lock_spin(&tlbivax_mutex);
3709 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3710 pte = pte_find(mmu, kernel_pmap, va);
3711 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
3712 *pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
3713 tlb0_flush_entry(va);
3716 mtx_unlock_spin(&tlbivax_mutex);
3721 /**************************************************************************/
3723 /**************************************************************************/
3726 * Allocate a TID. If necessary, steal one from someone else.
3727 * The new TID is flushed from the TLB before returning.
3730 tid_alloc(pmap_t pmap)
3735 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
3737 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
3739 thiscpu = PCPU_GET(cpuid);
3741 tid = PCPU_GET(booke.tid_next);
3744 PCPU_SET(booke.tid_next, tid + 1);
3746 /* If we are stealing TID then clear the relevant pmap's field */
3747 if (tidbusy[thiscpu][tid] != NULL) {
3749 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
3751 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
3753 /* Flush all entries from TLB0 matching this TID. */
3757 tidbusy[thiscpu][tid] = pmap;
3758 pmap->pm_tid[thiscpu] = tid;
3759 __asm __volatile("msync; isync");
3761 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
3762 PCPU_GET(booke.tid_next));
3767 /**************************************************************************/
3769 /**************************************************************************/
3772 #ifdef __powerpc64__
3773 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
3775 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
3786 if (mas1 & MAS1_VALID)
3791 if (mas1 & MAS1_IPROT)
3796 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
3797 tid = MAS1_GETTID(mas1);
3799 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3802 size = tsize2size(tsize);
3804 debugf("%3d: (%s) [AS=%d] "
3805 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
3806 "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
3807 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
3810 /* Convert TLB0 va and way number to tlb0[] table index. */
3811 static inline unsigned int
3812 tlb0_tableidx(vm_offset_t va, unsigned int way)
3816 idx = (way * TLB0_ENTRIES_PER_WAY);
3817 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
3822 * Invalidate TLB0 entry.
3825 tlb0_flush_entry(vm_offset_t va)
3828 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
3830 mtx_assert(&tlbivax_mutex, MA_OWNED);
3832 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
3833 __asm __volatile("isync; msync");
3834 __asm __volatile("tlbsync; msync");
3836 CTR1(KTR_PMAP, "%s: e", __func__);
3839 /* Print out contents of the MAS registers for each TLB0 entry */
3841 tlb0_print_tlbentries(void)
3843 uint32_t mas0, mas1, mas3, mas7;
3844 #ifdef __powerpc64__
3849 int entryidx, way, idx;
3851 debugf("TLB0 entries:\n");
3852 for (way = 0; way < TLB0_WAYS; way ++)
3853 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
3855 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
3856 mtspr(SPR_MAS0, mas0);
3857 __asm __volatile("isync");
3859 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
3860 mtspr(SPR_MAS2, mas2);
3862 __asm __volatile("isync; tlbre");
3864 mas1 = mfspr(SPR_MAS1);
3865 mas2 = mfspr(SPR_MAS2);
3866 mas3 = mfspr(SPR_MAS3);
3867 mas7 = mfspr(SPR_MAS7);
3869 idx = tlb0_tableidx(mas2, way);
3870 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3874 /**************************************************************************/
3876 /**************************************************************************/
3879 * TLB1 mapping notes:
3881 * TLB1[0] Kernel text and data.
3882 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
3883 * windows, other devices mappings.
3887 * Read an entry from given TLB1 slot.
3890 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
3895 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
3898 __asm __volatile("wrteei 0");
3900 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
3901 mtspr(SPR_MAS0, mas0);
3902 __asm __volatile("isync; tlbre");
3904 entry->mas1 = mfspr(SPR_MAS1);
3905 entry->mas2 = mfspr(SPR_MAS2);
3906 entry->mas3 = mfspr(SPR_MAS3);
3908 switch ((mfpvr() >> 16) & 0xFFFF) {
3913 entry->mas7 = mfspr(SPR_MAS7);
3921 entry->virt = entry->mas2 & MAS2_EPN_MASK;
3922 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
3923 (entry->mas3 & MAS3_RPN);
3925 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
3928 struct tlbwrite_args {
3934 tlb1_write_entry_int(void *arg)
3936 struct tlbwrite_args *args = arg;
3940 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(args->idx);
3942 mtspr(SPR_MAS0, mas0);
3943 __asm __volatile("isync");
3944 mtspr(SPR_MAS1, args->e->mas1);
3945 __asm __volatile("isync");
3946 mtspr(SPR_MAS2, args->e->mas2);
3947 __asm __volatile("isync");
3948 mtspr(SPR_MAS3, args->e->mas3);
3949 __asm __volatile("isync");
3950 switch ((mfpvr() >> 16) & 0xFFFF) {
3955 __asm __volatile("isync");
3958 mtspr(SPR_MAS7, args->e->mas7);
3959 __asm __volatile("isync");
3965 __asm __volatile("tlbwe; isync; msync");
3970 tlb1_write_entry_sync(void *arg)
3972 /* Empty synchronization point for smp_rendezvous(). */
3976 * Write given entry to TLB1 hardware.
3979 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
3981 struct tlbwrite_args args;
3987 if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) {
3989 smp_rendezvous(tlb1_write_entry_sync,
3990 tlb1_write_entry_int,
3991 tlb1_write_entry_sync, &args);
3998 __asm __volatile("wrteei 0");
3999 tlb1_write_entry_int(&args);
4005 * Return the largest uint value log such that 2^log <= num.
4008 ilog2(unsigned int num)
4012 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
4017 * Convert TLB TSIZE value to mapped region size.
4020 tsize2size(unsigned int tsize)
4025 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
4028 return ((1 << (2 * tsize)) * 1024);
4032 * Convert region size (must be power of 4) to TLB TSIZE value.
4035 size2tsize(vm_size_t size)
4038 return (ilog2(size) / 2 - 5);
4042 * Register permanent kernel mapping in TLB1.
4044 * Entries are created starting from index 0 (current free entry is
4045 * kept in tlb1_idx) and are not supposed to be invalidated.
4048 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
4055 for (index = 0; index < TLB1_ENTRIES; index++) {
4056 tlb1_read_entry(&e, index);
4057 if ((e.mas1 & MAS1_VALID) == 0)
4059 /* Check if we're just updating the flags, and update them. */
4060 if (e.phys == pa && e.virt == va && e.size == size) {
4061 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4062 tlb1_write_entry(&e, index);
4066 if (index >= TLB1_ENTRIES) {
4067 printf("tlb1_set_entry: TLB1 full!\n");
4071 /* Convert size to TSIZE */
4072 tsize = size2tsize(size);
4074 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
4075 /* XXX TS is hard coded to 0 for now as we only use single address space */
4076 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
4081 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
4082 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
4083 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4085 /* Set supervisor RWX permission bits */
4086 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
4087 e.mas7 = (pa >> 32) & MAS7_RPN;
4089 tlb1_write_entry(&e, index);
4092 * XXX in general TLB1 updates should be propagated between CPUs,
4093 * since current design assumes to have the same TLB1 set-up on all
4100 * Map in contiguous RAM region into the TLB1 using maximum of
4101 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
4103 * If necessary round up last entry size and return total size
4104 * used by all allocated entries.
4107 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
4109 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
4110 vm_size_t mapped, pgsz, base, mask;
4113 /* Round up to the next 1M */
4114 size = roundup2(size, 1 << 20);
4119 pgsz = 64*1024*1024;
4120 while (mapped < size) {
4121 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
4122 while (pgsz > (size - mapped))
4128 /* We under-map. Correct for this. */
4129 if (mapped < size) {
4130 while (pgs[idx - 1] == pgsz) {
4134 /* XXX We may increase beyond out starting point. */
4143 /* Align address to the boundary */
4145 va = (va + mask) & ~mask;
4146 pa = (pa + mask) & ~mask;
4149 for (idx = 0; idx < nents; idx++) {
4151 debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz);
4152 tlb1_set_entry(va, pa, pgsz,
4153 _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM);
4158 mapped = (va - base);
4159 printf("mapped size 0x%"PRI0ptrX" (wasted space 0x%"PRIxPTR")\n",
4160 mapped, mapped - size);
4165 * TLB1 initialization routine, to be called after the very first
4166 * assembler level setup done in locore.S.
4171 uint32_t mas0, mas1, mas2, mas3, mas7;
4176 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
4177 mtspr(SPR_MAS0, mas0);
4178 __asm __volatile("isync; tlbre");
4180 mas1 = mfspr(SPR_MAS1);
4181 mas2 = mfspr(SPR_MAS2);
4182 mas3 = mfspr(SPR_MAS3);
4183 mas7 = mfspr(SPR_MAS7);
4185 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
4188 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4189 kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
4191 /* Setup TLB miss defaults */
4192 set_mas4_defaults();
4196 * pmap_early_io_unmap() should be used in short conjunction with
4197 * pmap_early_io_map(), as in the following snippet:
4199 * x = pmap_early_io_map(...);
4200 * <do something with x>
4201 * pmap_early_io_unmap(x, size);
4203 * And avoiding more allocations between.
4206 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
4212 size = roundup(size, PAGE_SIZE);
4214 for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
4215 tlb1_read_entry(&e, i);
4216 if (!(e.mas1 & MAS1_VALID))
4218 if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
4220 e.mas1 &= ~MAS1_VALID;
4221 tlb1_write_entry(&e, i);
4224 if (tlb1_map_base == va + isize)
4225 tlb1_map_base -= isize;
4229 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
4236 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
4238 for (i = 0; i < TLB1_ENTRIES; i++) {
4239 tlb1_read_entry(&e, i);
4240 if (!(e.mas1 & MAS1_VALID))
4242 if (pa >= e.phys && (pa + size) <=
4244 return (e.virt + (pa - e.phys));
4247 pa_base = rounddown(pa, PAGE_SIZE);
4248 size = roundup(size + (pa - pa_base), PAGE_SIZE);
4249 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
4250 va = tlb1_map_base + (pa - pa_base);
4253 sz = 1 << (ilog2(size) & ~1);
4254 tlb1_set_entry(tlb1_map_base, pa_base, sz,
4255 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
4258 tlb1_map_base += sz;
4265 pmap_track_page(pmap_t pmap, vm_offset_t va)
4269 struct pv_entry *pve;
4271 va = trunc_page(va);
4272 pa = pmap_kextract(va);
4273 page = PHYS_TO_VM_PAGE(pa);
4275 rw_wlock(&pvh_global_lock);
4278 TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
4279 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
4283 page->md.pv_tracked = true;
4284 pv_insert(pmap, va, page);
4287 rw_wunlock(&pvh_global_lock);
4292 * Setup MAS4 defaults.
4293 * These values are loaded to MAS0-2 on a TLB miss.
4296 set_mas4_defaults(void)
4300 /* Defaults: TLB0, PID0, TSIZED=4K */
4301 mas4 = MAS4_TLBSELD0;
4302 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
4306 mtspr(SPR_MAS4, mas4);
4307 __asm __volatile("isync");
4311 * Print out contents of the MAS registers for each TLB1 entry
4314 tlb1_print_tlbentries(void)
4316 uint32_t mas0, mas1, mas3, mas7;
4317 #ifdef __powerpc64__
4324 debugf("TLB1 entries:\n");
4325 for (i = 0; i < TLB1_ENTRIES; i++) {
4327 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
4328 mtspr(SPR_MAS0, mas0);
4330 __asm __volatile("isync; tlbre");
4332 mas1 = mfspr(SPR_MAS1);
4333 mas2 = mfspr(SPR_MAS2);
4334 mas3 = mfspr(SPR_MAS3);
4335 mas7 = mfspr(SPR_MAS7);
4337 tlb_print_entry(i, mas1, mas2, mas3, mas7);
4342 * Return 0 if the physical IO range is encompassed by one of the
4343 * the TLB1 entries, otherwise return related error code.
4346 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
4349 vm_paddr_t pa_start;
4351 unsigned int entry_tsize;
4352 vm_size_t entry_size;
4355 *va = (vm_offset_t)NULL;
4357 tlb1_read_entry(&e, i);
4358 /* Skip invalid entries */
4359 if (!(e.mas1 & MAS1_VALID))
4363 * The entry must be cache-inhibited, guarded, and r/w
4364 * so it can function as an i/o page
4366 prot = e.mas2 & (MAS2_I | MAS2_G);
4367 if (prot != (MAS2_I | MAS2_G))
4370 prot = e.mas3 & (MAS3_SR | MAS3_SW);
4371 if (prot != (MAS3_SR | MAS3_SW))
4374 /* The address should be within the entry range. */
4375 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4376 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
4378 entry_size = tsize2size(entry_tsize);
4379 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) |
4380 (e.mas3 & MAS3_RPN);
4381 pa_end = pa_start + entry_size;
4383 if ((pa < pa_start) || ((pa + size) > pa_end))
4386 /* Return virtual address of this mapping. */
4387 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
4392 * Invalidate all TLB0 entries which match the given TID. Note this is
4393 * dedicated for cases when invalidations should NOT be propagated to other
4397 tid_flush(tlbtid_t tid)
4400 uint32_t mas0, mas1, mas2;
4404 /* Don't evict kernel translations */
4405 if (tid == TID_KERNEL)
4409 __asm __volatile("wrteei 0");
4411 for (way = 0; way < TLB0_WAYS; way++)
4412 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
4414 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
4415 mtspr(SPR_MAS0, mas0);
4416 __asm __volatile("isync");
4418 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
4419 mtspr(SPR_MAS2, mas2);
4421 __asm __volatile("isync; tlbre");
4423 mas1 = mfspr(SPR_MAS1);
4425 if (!(mas1 & MAS1_VALID))
4427 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
4429 mas1 &= ~MAS1_VALID;
4430 mtspr(SPR_MAS1, mas1);
4431 __asm __volatile("isync; tlbwe; isync; msync");