2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Some hw specific parts of this pmap were derived or influenced
29 * by NetBSD's ibm4xx pmap module. More generic code is shared with
30 * a few other pmap modules from the FreeBSD tree.
36 * Kernel and user threads run within one common virtual address space
40 * Virtual address space layout:
41 * -----------------------------
42 * 0x0000_0000 - 0x7fff_ffff : user process
43 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
44 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved
45 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
46 * 0xc100_0000 - 0xffff_ffff : KVA
47 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
48 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
49 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0
50 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space
53 * Virtual address space layout:
54 * -----------------------------
55 * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process
56 * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries
57 * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region
58 * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack
59 * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved
60 * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data
61 * endkernel - msgbufp-1 : flat device tree
62 * msgbufp - ptbl_bufs-1 : message buffer
63 * ptbl_bufs - kernel_pdir-1 : kernel page tables
64 * kernel_pdir - kernel_pp2d-1 : kernel page directory
65 * kernel_pp2d - . : kernel pointers to page directory
66 * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy
67 * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs
68 * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables
69 * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space
70 * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region
71 * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region
72 * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map
73 * 0xf000_0000_0000_0000 - +Maxmem : physmem map
74 * - 0xffff_ffff_ffff_ffff : device direct map
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
80 #include "opt_kstack_pages.h"
82 #include <sys/param.h>
84 #include <sys/malloc.h>
88 #include <sys/queue.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/kerneldump.h>
92 #include <sys/linker.h>
93 #include <sys/msgbuf.h>
95 #include <sys/mutex.h>
96 #include <sys/rwlock.h>
97 #include <sys/sched.h>
99 #include <sys/vmmeter.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_extern.h>
106 #include <vm/vm_object.h>
107 #include <vm/vm_param.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_pager.h>
110 #include <vm/vm_phys.h>
111 #include <vm/vm_pagequeue.h>
114 #include <machine/_inttypes.h>
115 #include <machine/cpu.h>
116 #include <machine/pcb.h>
117 #include <machine/platform.h>
119 #include <machine/tlb.h>
120 #include <machine/spr.h>
121 #include <machine/md_var.h>
122 #include <machine/mmuvar.h>
123 #include <machine/pmap.h>
124 #include <machine/pte.h>
128 #define SPARSE_MAPDEV
130 #define debugf(fmt, args...) printf(fmt, ##args)
132 #define debugf(fmt, args...)
136 #define PRI0ptrX "016lx"
138 #define PRI0ptrX "08x"
141 #define TODO panic("%s: not implemented", __func__);
143 extern unsigned char _etext[];
144 extern unsigned char _end[];
146 extern uint32_t *bootinfo;
149 vm_offset_t kernstart;
152 /* Message buffer and tables. */
153 static vm_offset_t data_start;
154 static vm_size_t data_end;
156 /* Phys/avail memory regions. */
157 static struct mem_region *availmem_regions;
158 static int availmem_regions_sz;
159 static struct mem_region *physmem_regions;
160 static int physmem_regions_sz;
162 /* Reserved KVA space and mutex for mmu_booke_zero_page. */
163 static vm_offset_t zero_page_va;
164 static struct mtx zero_page_mutex;
166 static struct mtx tlbivax_mutex;
168 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
169 static vm_offset_t copy_page_src_va;
170 static vm_offset_t copy_page_dst_va;
171 static struct mtx copy_page_mutex;
173 /**************************************************************************/
175 /**************************************************************************/
177 static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
178 vm_prot_t, u_int flags, int8_t psind);
180 unsigned int kptbl_min; /* Index of the first kernel ptbl. */
181 unsigned int kernel_ptbls; /* Number of KVA ptbls. */
183 unsigned int kernel_pdirs;
187 * If user pmap is processed with mmu_booke_remove and the resident count
188 * drops to 0, there are no more pages to remove, so we need not continue.
190 #define PMAP_REMOVE_DONE(pmap) \
191 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
193 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
194 extern int elf32_nxstack;
197 /**************************************************************************/
198 /* TLB and TID handling */
199 /**************************************************************************/
201 /* Translation ID busy table */
202 static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
205 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
206 * core revisions and should be read from h/w registers during early config.
208 uint32_t tlb0_entries;
210 uint32_t tlb0_entries_per_way;
211 uint32_t tlb1_entries;
213 #define TLB0_ENTRIES (tlb0_entries)
214 #define TLB0_WAYS (tlb0_ways)
215 #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
217 #define TLB1_ENTRIES (tlb1_entries)
219 static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
221 static tlbtid_t tid_alloc(struct pmap *);
222 static void tid_flush(tlbtid_t tid);
225 static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
227 static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
230 static void tlb1_read_entry(tlb_entry_t *, unsigned int);
231 static void tlb1_write_entry(tlb_entry_t *, unsigned int);
232 static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
233 static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
235 static vm_size_t tsize2size(unsigned int);
236 static unsigned int size2tsize(vm_size_t);
237 static unsigned int ilog2(unsigned int);
239 static void set_mas4_defaults(void);
241 static inline void tlb0_flush_entry(vm_offset_t);
242 static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
244 /**************************************************************************/
245 /* Page table management */
246 /**************************************************************************/
248 static struct rwlock_padalign pvh_global_lock;
250 /* Data for the pv entry allocation mechanism */
251 static uma_zone_t pvzone;
252 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
254 #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
256 #ifndef PMAP_SHPGPERPROC
257 #define PMAP_SHPGPERPROC 200
260 static void ptbl_init(void);
261 static struct ptbl_buf *ptbl_buf_alloc(void);
262 static void ptbl_buf_free(struct ptbl_buf *);
263 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
266 static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
267 unsigned int, boolean_t);
268 static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int);
269 static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
270 static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
272 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
273 static void ptbl_free(mmu_t, pmap_t, unsigned int);
274 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
275 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
278 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
279 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
280 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
281 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
282 static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
284 static pv_entry_t pv_alloc(void);
285 static void pv_free(pv_entry_t);
286 static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
287 static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
289 static void booke_pmap_init_qpages(void);
291 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
293 #define PTBL_BUFS (16UL * 16 * 16)
295 #define PTBL_BUFS (128 * 16)
299 TAILQ_ENTRY(ptbl_buf) link; /* list link */
300 vm_offset_t kva; /* va of mapping */
303 /* ptbl free list and a lock used for access synchronization. */
304 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
305 static struct mtx ptbl_buf_freelist_lock;
307 /* Base address of kva space allocated fot ptbl bufs. */
308 static vm_offset_t ptbl_buf_pool_vabase;
310 /* Pointer to ptbl_buf structures. */
311 static struct ptbl_buf *ptbl_bufs;
314 extern tlb_entry_t __boot_tlb1[];
315 void pmap_bootstrap_ap(volatile uint32_t *);
319 * Kernel MMU interface
321 static void mmu_booke_clear_modify(mmu_t, vm_page_t);
322 static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
323 vm_size_t, vm_offset_t);
324 static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
325 static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
326 vm_offset_t, vm_page_t *, vm_offset_t, int);
327 static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
328 vm_prot_t, u_int flags, int8_t psind);
329 static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
330 vm_page_t, vm_prot_t);
331 static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
333 static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
334 static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
336 static void mmu_booke_init(mmu_t);
337 static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
338 static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
339 static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
340 static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
341 static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
343 static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
345 static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
346 vm_object_t, vm_pindex_t, vm_size_t);
347 static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
348 static void mmu_booke_page_init(mmu_t, vm_page_t);
349 static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
350 static void mmu_booke_pinit(mmu_t, pmap_t);
351 static void mmu_booke_pinit0(mmu_t, pmap_t);
352 static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
354 static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
355 static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
356 static void mmu_booke_release(mmu_t, pmap_t);
357 static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
358 static void mmu_booke_remove_all(mmu_t, vm_page_t);
359 static void mmu_booke_remove_write(mmu_t, vm_page_t);
360 static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
361 static void mmu_booke_zero_page(mmu_t, vm_page_t);
362 static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
363 static void mmu_booke_activate(mmu_t, struct thread *);
364 static void mmu_booke_deactivate(mmu_t, struct thread *);
365 static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
366 static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
367 static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
368 static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
369 static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
370 static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
371 static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
372 static void mmu_booke_kremove(mmu_t, vm_offset_t);
373 static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
374 static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
376 static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
378 static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
380 static void mmu_booke_scan_init(mmu_t);
381 static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
382 static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
383 static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
384 vm_size_t sz, vm_memattr_t mode);
385 static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
386 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
387 static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
388 int *is_user, vm_offset_t *decoded_addr);
391 static mmu_method_t mmu_booke_methods[] = {
392 /* pmap dispatcher interface */
393 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
394 MMUMETHOD(mmu_copy, mmu_booke_copy),
395 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
396 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
397 MMUMETHOD(mmu_enter, mmu_booke_enter),
398 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
399 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
400 MMUMETHOD(mmu_extract, mmu_booke_extract),
401 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
402 MMUMETHOD(mmu_init, mmu_booke_init),
403 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
404 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
405 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
406 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
407 MMUMETHOD(mmu_map, mmu_booke_map),
408 MMUMETHOD(mmu_mincore, mmu_booke_mincore),
409 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
410 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
411 MMUMETHOD(mmu_page_init, mmu_booke_page_init),
412 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
413 MMUMETHOD(mmu_pinit, mmu_booke_pinit),
414 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
415 MMUMETHOD(mmu_protect, mmu_booke_protect),
416 MMUMETHOD(mmu_qenter, mmu_booke_qenter),
417 MMUMETHOD(mmu_qremove, mmu_booke_qremove),
418 MMUMETHOD(mmu_release, mmu_booke_release),
419 MMUMETHOD(mmu_remove, mmu_booke_remove),
420 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
421 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
422 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
423 MMUMETHOD(mmu_unwire, mmu_booke_unwire),
424 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
425 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
426 MMUMETHOD(mmu_activate, mmu_booke_activate),
427 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
428 MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
429 MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
431 /* Internal interfaces */
432 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
433 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
434 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
435 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
436 MMUMETHOD(mmu_kenter, mmu_booke_kenter),
437 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
438 MMUMETHOD(mmu_kextract, mmu_booke_kextract),
439 MMUMETHOD(mmu_kremove, mmu_booke_kremove),
440 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
441 MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
442 MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr),
443 MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
445 /* dumpsys() support */
446 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
447 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
448 MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
453 MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
455 static __inline uint32_t
456 tlb_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
461 if (ma != VM_MEMATTR_DEFAULT) {
463 case VM_MEMATTR_UNCACHEABLE:
464 return (MAS2_I | MAS2_G);
465 case VM_MEMATTR_WRITE_COMBINING:
466 case VM_MEMATTR_WRITE_BACK:
467 case VM_MEMATTR_PREFETCHABLE:
469 case VM_MEMATTR_WRITE_THROUGH:
470 return (MAS2_W | MAS2_M);
471 case VM_MEMATTR_CACHEABLE:
477 * Assume the page is cache inhibited and access is guarded unless
478 * it's in our available memory array.
480 attrib = _TLB_ENTRY_IO;
481 for (i = 0; i < physmem_regions_sz; i++) {
482 if ((pa >= physmem_regions[i].mr_start) &&
483 (pa < (physmem_regions[i].mr_start +
484 physmem_regions[i].mr_size))) {
485 attrib = _TLB_ENTRY_MEM;
502 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
505 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
506 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke.tlb_lock);
508 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
509 ("tlb_miss_lock: tried to lock self"));
511 tlb_lock(pc->pc_booke.tlb_lock);
513 CTR1(KTR_PMAP, "%s: locked", __func__);
520 tlb_miss_unlock(void)
528 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
530 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
531 __func__, pc->pc_cpuid);
533 tlb_unlock(pc->pc_booke.tlb_lock);
535 CTR1(KTR_PMAP, "%s: unlocked", __func__);
541 /* Return number of entries in TLB0. */
543 tlb0_get_tlbconf(void)
547 tlb0_cfg = mfspr(SPR_TLB0CFG);
548 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
549 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
550 tlb0_entries_per_way = tlb0_entries / tlb0_ways;
553 /* Return number of entries in TLB1. */
555 tlb1_get_tlbconf(void)
559 tlb1_cfg = mfspr(SPR_TLB1CFG);
560 tlb1_entries = tlb1_cfg & TLBCFG_NENTRY_MASK;
563 /**************************************************************************/
564 /* Page table related */
565 /**************************************************************************/
568 /* Initialize pool of kva ptbl buffers. */
574 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
575 TAILQ_INIT(&ptbl_buf_freelist);
577 for (i = 0; i < PTBL_BUFS; i++) {
578 ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
579 i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
580 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
584 /* Get an sf_buf from the freelist. */
585 static struct ptbl_buf *
588 struct ptbl_buf *buf;
590 mtx_lock(&ptbl_buf_freelist_lock);
591 buf = TAILQ_FIRST(&ptbl_buf_freelist);
593 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
594 mtx_unlock(&ptbl_buf_freelist_lock);
599 /* Return ptbl buff to free pool. */
601 ptbl_buf_free(struct ptbl_buf *buf)
603 mtx_lock(&ptbl_buf_freelist_lock);
604 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
605 mtx_unlock(&ptbl_buf_freelist_lock);
609 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
612 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
614 struct ptbl_buf *pbuf;
616 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
617 if (pbuf->kva == (vm_offset_t) ptbl) {
618 /* Remove from pmap ptbl buf list. */
619 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
621 /* Free corresponding ptbl buf. */
629 /* Get a pointer to a PTE in a page table. */
630 static __inline pte_t *
631 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
636 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
638 pdir = pmap->pm_pp2d[PP2D_IDX(va)];
641 ptbl = pdir[PDIR_IDX(va)];
642 return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
646 * Search the list of allocated pdir bufs and find on list of allocated pdirs
649 ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
651 struct ptbl_buf *pbuf;
653 TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
654 if (pbuf->kva == (vm_offset_t) pdir) {
655 /* Remove from pmap ptbl buf list. */
656 TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
658 /* Free corresponding pdir buf. */
665 /* Free pdir pages and invalidate pdir entry. */
667 pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
675 pdir = pmap->pm_pp2d[pp2d_idx];
677 KASSERT((pdir != NULL), ("pdir_free: null pdir"));
679 pmap->pm_pp2d[pp2d_idx] = NULL;
681 for (i = 0; i < PDIR_PAGES; i++) {
682 va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
683 pa = pte_vatopa(mmu, kernel_pmap, va);
684 m = PHYS_TO_VM_PAGE(pa);
685 vm_page_free_zero(m);
690 ptbl_free_pmap_pdir(mmu, pmap, pdir);
694 * Decrement pdir pages hold count and attempt to free pdir pages. Called
695 * when removing directory entry from pdir.
697 * Return 1 if pdir pages were freed.
700 pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
707 KASSERT((pmap != kernel_pmap),
708 ("pdir_unhold: unholding kernel pdir!"));
710 pdir = pmap->pm_pp2d[pp2d_idx];
712 KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS),
713 ("pdir_unhold: non kva pdir"));
715 /* decrement hold count */
716 for (i = 0; i < PDIR_PAGES; i++) {
717 pa = pte_vatopa(mmu, kernel_pmap,
718 (vm_offset_t) pdir + (i * PAGE_SIZE));
719 m = PHYS_TO_VM_PAGE(pa);
724 * Free pdir pages if there are no dir entries in this pdir.
725 * wire_count has the same value for all ptbl pages, so check the
728 if (m->wire_count == 0) {
729 pdir_free(mmu, pmap, pp2d_idx);
736 * Increment hold count for pdir pages. This routine is used when new ptlb
737 * entry is being inserted into pdir.
740 pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
746 KASSERT((pmap != kernel_pmap),
747 ("pdir_hold: holding kernel pdir!"));
749 KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
751 for (i = 0; i < PDIR_PAGES; i++) {
752 pa = pte_vatopa(mmu, kernel_pmap,
753 (vm_offset_t) pdir + (i * PAGE_SIZE));
754 m = PHYS_TO_VM_PAGE(pa);
759 /* Allocate page table. */
761 ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
764 vm_page_t mtbl [PTBL_PAGES];
766 struct ptbl_buf *pbuf;
772 KASSERT((pdir[pdir_idx] == NULL),
773 ("%s: valid ptbl entry exists!", __func__));
775 pbuf = ptbl_buf_alloc();
777 panic("%s: couldn't alloc kernel virtual memory", __func__);
779 ptbl = (pte_t *) pbuf->kva;
781 for (i = 0; i < PTBL_PAGES; i++) {
782 pidx = (PTBL_PAGES * pdir_idx) + i;
783 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
784 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
786 rw_wunlock(&pvh_global_lock);
788 ptbl_free_pmap_ptbl(pmap, ptbl);
789 for (j = 0; j < i; j++)
790 vm_page_free(mtbl[j]);
795 rw_wlock(&pvh_global_lock);
801 /* Mapin allocated pages into kernel_pmap. */
802 mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
803 /* Zero whole ptbl. */
804 bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
806 /* Add pbuf to the pmap ptbl bufs list. */
807 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
812 /* Free ptbl pages and invalidate pdir entry. */
814 ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
822 ptbl = pdir[pdir_idx];
824 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
826 pdir[pdir_idx] = NULL;
828 for (i = 0; i < PTBL_PAGES; i++) {
829 va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
830 pa = pte_vatopa(mmu, kernel_pmap, va);
831 m = PHYS_TO_VM_PAGE(pa);
832 vm_page_free_zero(m);
837 ptbl_free_pmap_ptbl(pmap, ptbl);
841 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
842 * when removing pte entry from ptbl.
844 * Return 1 if ptbl pages were freed.
847 ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
857 pp2d_idx = PP2D_IDX(va);
858 pdir_idx = PDIR_IDX(va);
860 KASSERT((pmap != kernel_pmap),
861 ("ptbl_unhold: unholding kernel ptbl!"));
863 pdir = pmap->pm_pp2d[pp2d_idx];
864 ptbl = pdir[pdir_idx];
866 KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS),
867 ("ptbl_unhold: non kva ptbl"));
869 /* decrement hold count */
870 for (i = 0; i < PTBL_PAGES; i++) {
871 pa = pte_vatopa(mmu, kernel_pmap,
872 (vm_offset_t) ptbl + (i * PAGE_SIZE));
873 m = PHYS_TO_VM_PAGE(pa);
878 * Free ptbl pages if there are no pte entries in this ptbl.
879 * wire_count has the same value for all ptbl pages, so check the
882 if (m->wire_count == 0) {
883 /* A pair of indirect entries might point to this ptbl page */
885 tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
886 TLB_SIZE_1M, MAS6_SIND);
887 tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
888 TLB_SIZE_1M, MAS6_SIND);
890 ptbl_free(mmu, pmap, pdir, pdir_idx);
891 pdir_unhold(mmu, pmap, pp2d_idx);
898 * Increment hold count for ptbl pages. This routine is used when new pte
899 * entry is being inserted into ptbl.
902 ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
909 KASSERT((pmap != kernel_pmap),
910 ("ptbl_hold: holding kernel ptbl!"));
912 ptbl = pdir[pdir_idx];
914 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
916 for (i = 0; i < PTBL_PAGES; i++) {
917 pa = pte_vatopa(mmu, kernel_pmap,
918 (vm_offset_t) ptbl + (i * PAGE_SIZE));
919 m = PHYS_TO_VM_PAGE(pa);
925 /* Initialize pool of kva ptbl buffers. */
931 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
932 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
933 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
934 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
936 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
937 TAILQ_INIT(&ptbl_buf_freelist);
939 for (i = 0; i < PTBL_BUFS; i++) {
941 ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
942 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
946 /* Get a ptbl_buf from the freelist. */
947 static struct ptbl_buf *
950 struct ptbl_buf *buf;
952 mtx_lock(&ptbl_buf_freelist_lock);
953 buf = TAILQ_FIRST(&ptbl_buf_freelist);
955 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
956 mtx_unlock(&ptbl_buf_freelist_lock);
958 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
963 /* Return ptbl buff to free pool. */
965 ptbl_buf_free(struct ptbl_buf *buf)
968 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
970 mtx_lock(&ptbl_buf_freelist_lock);
971 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
972 mtx_unlock(&ptbl_buf_freelist_lock);
976 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
979 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
981 struct ptbl_buf *pbuf;
983 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
985 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
987 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
988 if (pbuf->kva == (vm_offset_t)ptbl) {
989 /* Remove from pmap ptbl buf list. */
990 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
992 /* Free corresponding ptbl buf. */
998 /* Allocate page table. */
1000 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
1002 vm_page_t mtbl[PTBL_PAGES];
1004 struct ptbl_buf *pbuf;
1009 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1010 (pmap == kernel_pmap), pdir_idx);
1012 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1013 ("ptbl_alloc: invalid pdir_idx"));
1014 KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
1015 ("pte_alloc: valid ptbl entry exists!"));
1017 pbuf = ptbl_buf_alloc();
1019 panic("pte_alloc: couldn't alloc kernel virtual memory");
1021 ptbl = (pte_t *)pbuf->kva;
1023 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
1025 for (i = 0; i < PTBL_PAGES; i++) {
1026 pidx = (PTBL_PAGES * pdir_idx) + i;
1027 while ((m = vm_page_alloc(NULL, pidx,
1028 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
1030 rw_wunlock(&pvh_global_lock);
1032 ptbl_free_pmap_ptbl(pmap, ptbl);
1033 for (j = 0; j < i; j++)
1034 vm_page_free(mtbl[j]);
1039 rw_wlock(&pvh_global_lock);
1045 /* Map allocated pages into kernel_pmap. */
1046 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
1048 /* Zero whole ptbl. */
1049 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
1051 /* Add pbuf to the pmap ptbl bufs list. */
1052 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
1057 /* Free ptbl pages and invalidate pdir entry. */
1059 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1067 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1068 (pmap == kernel_pmap), pdir_idx);
1070 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1071 ("ptbl_free: invalid pdir_idx"));
1073 ptbl = pmap->pm_pdir[pdir_idx];
1075 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
1077 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
1080 * Invalidate the pdir entry as soon as possible, so that other CPUs
1081 * don't attempt to look up the page tables we are releasing.
1083 mtx_lock_spin(&tlbivax_mutex);
1086 pmap->pm_pdir[pdir_idx] = NULL;
1089 mtx_unlock_spin(&tlbivax_mutex);
1091 for (i = 0; i < PTBL_PAGES; i++) {
1092 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
1093 pa = pte_vatopa(mmu, kernel_pmap, va);
1094 m = PHYS_TO_VM_PAGE(pa);
1095 vm_page_free_zero(m);
1097 mmu_booke_kremove(mmu, va);
1100 ptbl_free_pmap_ptbl(pmap, ptbl);
1104 * Decrement ptbl pages hold count and attempt to free ptbl pages.
1105 * Called when removing pte entry from ptbl.
1107 * Return 1 if ptbl pages were freed.
1110 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1117 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
1118 (pmap == kernel_pmap), pdir_idx);
1120 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1121 ("ptbl_unhold: invalid pdir_idx"));
1122 KASSERT((pmap != kernel_pmap),
1123 ("ptbl_unhold: unholding kernel ptbl!"));
1125 ptbl = pmap->pm_pdir[pdir_idx];
1127 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
1128 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
1129 ("ptbl_unhold: non kva ptbl"));
1131 /* decrement hold count */
1132 for (i = 0; i < PTBL_PAGES; i++) {
1133 pa = pte_vatopa(mmu, kernel_pmap,
1134 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1135 m = PHYS_TO_VM_PAGE(pa);
1140 * Free ptbl pages if there are no pte etries in this ptbl.
1141 * wire_count has the same value for all ptbl pages, so check the last
1144 if (m->wire_count == 0) {
1145 ptbl_free(mmu, pmap, pdir_idx);
1147 //debugf("ptbl_unhold: e (freed ptbl)\n");
1155 * Increment hold count for ptbl pages. This routine is used when a new pte
1156 * entry is being inserted into the ptbl.
1159 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
1166 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
1169 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
1170 ("ptbl_hold: invalid pdir_idx"));
1171 KASSERT((pmap != kernel_pmap),
1172 ("ptbl_hold: holding kernel ptbl!"));
1174 ptbl = pmap->pm_pdir[pdir_idx];
1176 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
1178 for (i = 0; i < PTBL_PAGES; i++) {
1179 pa = pte_vatopa(mmu, kernel_pmap,
1180 (vm_offset_t)ptbl + (i * PAGE_SIZE));
1181 m = PHYS_TO_VM_PAGE(pa);
1187 /* Allocate pv_entry structure. */
1194 if (pv_entry_count > pv_entry_high_water)
1195 pagedaemon_wakeup(0); /* XXX powerpc NUMA */
1196 pv = uma_zalloc(pvzone, M_NOWAIT);
1201 /* Free pv_entry structure. */
1202 static __inline void
1203 pv_free(pv_entry_t pve)
1207 uma_zfree(pvzone, pve);
1211 /* Allocate and initialize pv_entry structure. */
1213 pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
1217 //int su = (pmap == kernel_pmap);
1218 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
1219 // (u_int32_t)pmap, va, (u_int32_t)m);
1223 panic("pv_insert: no pv entries!");
1225 pve->pv_pmap = pmap;
1228 /* add to pv_list */
1229 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1230 rw_assert(&pvh_global_lock, RA_WLOCKED);
1232 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
1234 //debugf("pv_insert: e\n");
1237 /* Destroy pv entry. */
1239 pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
1243 //int su = (pmap == kernel_pmap);
1244 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
1246 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1247 rw_assert(&pvh_global_lock, RA_WLOCKED);
1250 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
1251 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
1252 /* remove from pv_list */
1253 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
1254 if (TAILQ_EMPTY(&m->md.pv_list))
1255 vm_page_aflag_clear(m, PGA_WRITEABLE);
1257 /* free pv entry struct */
1263 //debugf("pv_remove: e\n");
1266 #ifdef __powerpc64__
1268 * Clean pte entry, try to free page table page if requested.
1270 * Return 1 if ptbl pages were freed, otherwise return 0.
1273 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
1278 pte = pte_find(mmu, pmap, va);
1279 KASSERT(pte != NULL, ("%s: NULL pte", __func__));
1281 if (!PTE_ISVALID(pte))
1284 /* Get vm_page_t for mapped pte. */
1285 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1287 if (PTE_ISWIRED(pte))
1288 pmap->pm_stats.wired_count--;
1290 /* Handle managed entry. */
1291 if (PTE_ISMANAGED(pte)) {
1293 /* Handle modified pages. */
1294 if (PTE_ISMODIFIED(pte))
1297 /* Referenced pages. */
1298 if (PTE_ISREFERENCED(pte))
1299 vm_page_aflag_set(m, PGA_REFERENCED);
1301 /* Remove pv_entry from pv_list. */
1302 pv_remove(pmap, va, m);
1303 } else if (m->md.pv_tracked) {
1304 pv_remove(pmap, va, m);
1305 if (TAILQ_EMPTY(&m->md.pv_list))
1306 m->md.pv_tracked = false;
1308 mtx_lock_spin(&tlbivax_mutex);
1311 tlb0_flush_entry(va);
1315 mtx_unlock_spin(&tlbivax_mutex);
1317 pmap->pm_stats.resident_count--;
1319 if (flags & PTBL_UNHOLD) {
1320 return (ptbl_unhold(mmu, pmap, va));
1326 * allocate a page of pointers to page directories, do not preallocate the
1330 pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
1332 vm_page_t mtbl [PDIR_PAGES];
1334 struct ptbl_buf *pbuf;
1340 pbuf = ptbl_buf_alloc();
1343 panic("%s: couldn't alloc kernel virtual memory", __func__);
1345 /* Allocate pdir pages, this will sleep! */
1346 for (i = 0; i < PDIR_PAGES; i++) {
1347 pidx = (PDIR_PAGES * pp2d_idx) + i;
1348 req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
1349 while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
1357 /* Mapin allocated pages into kernel_pmap. */
1358 pdir = (pte_t **) pbuf->kva;
1359 pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
1361 /* Zero whole pdir. */
1362 bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
1364 /* Add pdir to the pmap pdir bufs list. */
1365 TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
1371 * Insert PTE for a given page and virtual address.
1374 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1377 unsigned int pp2d_idx = PP2D_IDX(va);
1378 unsigned int pdir_idx = PDIR_IDX(va);
1379 unsigned int ptbl_idx = PTBL_IDX(va);
1383 /* Get the page directory pointer. */
1384 pdir = pmap->pm_pp2d[pp2d_idx];
1386 pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
1388 /* Get the page table pointer. */
1389 ptbl = pdir[pdir_idx];
1392 /* Allocate page table pages. */
1393 ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
1395 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1400 * Check if there is valid mapping for requested va, if there
1403 pte = &pdir[pdir_idx][ptbl_idx];
1404 if (PTE_ISVALID(pte)) {
1405 pte_remove(mmu, pmap, va, PTBL_HOLD);
1408 * pte is not used, increment hold count for ptbl
1411 if (pmap != kernel_pmap)
1412 ptbl_hold(mmu, pmap, pdir, pdir_idx);
1416 if (pdir[pdir_idx] == NULL) {
1417 if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
1418 pdir_hold(mmu, pmap, pdir);
1419 pdir[pdir_idx] = ptbl;
1421 if (pmap->pm_pp2d[pp2d_idx] == NULL)
1422 pmap->pm_pp2d[pp2d_idx] = pdir;
1425 * Insert pv_entry into pv_list for mapped page if part of managed
1428 if ((m->oflags & VPO_UNMANAGED) == 0) {
1429 flags |= PTE_MANAGED;
1431 /* Create and insert pv entry. */
1432 pv_insert(pmap, va, m);
1435 mtx_lock_spin(&tlbivax_mutex);
1438 tlb0_flush_entry(va);
1439 pmap->pm_stats.resident_count++;
1440 pte = &pdir[pdir_idx][ptbl_idx];
1441 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1442 *pte |= (PTE_VALID | flags);
1445 mtx_unlock_spin(&tlbivax_mutex);
1450 /* Return the pa for the given pmap/va. */
1452 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1457 pte = pte_find(mmu, pmap, va);
1458 if ((pte != NULL) && PTE_ISVALID(pte))
1459 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1464 /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
1466 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1473 /* Initialize kernel pdir */
1474 for (i = 0; i < kernel_pdirs; i++) {
1475 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
1476 (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
1477 for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
1478 j < PDIR_NENTRIES; j++) {
1479 kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
1480 (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
1481 (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
1486 * Fill in PTEs covering kernel code and data. They are not required
1487 * for address translation, as this area is covered by static TLB1
1488 * entries, but for pte_vatopa() to work correctly with kernel area
1491 for (va = addr; va < data_end; va += PAGE_SIZE) {
1492 pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
1493 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1494 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1495 PTE_VALID | PTE_PS_4KB;
1500 * Clean pte entry, try to free page table page if requested.
1502 * Return 1 if ptbl pages were freed, otherwise return 0.
1505 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
1507 unsigned int pdir_idx = PDIR_IDX(va);
1508 unsigned int ptbl_idx = PTBL_IDX(va);
1513 //int su = (pmap == kernel_pmap);
1514 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
1515 // su, (u_int32_t)pmap, va, flags);
1517 ptbl = pmap->pm_pdir[pdir_idx];
1518 KASSERT(ptbl, ("pte_remove: null ptbl"));
1520 pte = &ptbl[ptbl_idx];
1522 if (pte == NULL || !PTE_ISVALID(pte))
1525 if (PTE_ISWIRED(pte))
1526 pmap->pm_stats.wired_count--;
1528 /* Get vm_page_t for mapped pte. */
1529 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1531 /* Handle managed entry. */
1532 if (PTE_ISMANAGED(pte)) {
1534 if (PTE_ISMODIFIED(pte))
1537 if (PTE_ISREFERENCED(pte))
1538 vm_page_aflag_set(m, PGA_REFERENCED);
1540 pv_remove(pmap, va, m);
1541 } else if (m->md.pv_tracked) {
1543 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is
1544 * used. This is needed by the NCSW support code for fast
1545 * VA<->PA translation.
1547 pv_remove(pmap, va, m);
1548 if (TAILQ_EMPTY(&m->md.pv_list))
1549 m->md.pv_tracked = false;
1552 mtx_lock_spin(&tlbivax_mutex);
1555 tlb0_flush_entry(va);
1559 mtx_unlock_spin(&tlbivax_mutex);
1561 pmap->pm_stats.resident_count--;
1563 if (flags & PTBL_UNHOLD) {
1564 //debugf("pte_remove: e (unhold)\n");
1565 return (ptbl_unhold(mmu, pmap, pdir_idx));
1568 //debugf("pte_remove: e\n");
1573 * Insert PTE for a given page and virtual address.
1576 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
1579 unsigned int pdir_idx = PDIR_IDX(va);
1580 unsigned int ptbl_idx = PTBL_IDX(va);
1583 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
1584 pmap == kernel_pmap, pmap, va);
1586 /* Get the page table pointer. */
1587 ptbl = pmap->pm_pdir[pdir_idx];
1590 /* Allocate page table pages. */
1591 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
1593 KASSERT(nosleep, ("nosleep and NULL ptbl"));
1598 * Check if there is valid mapping for requested
1599 * va, if there is, remove it.
1601 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
1602 if (PTE_ISVALID(pte)) {
1603 pte_remove(mmu, pmap, va, PTBL_HOLD);
1606 * pte is not used, increment hold count
1609 if (pmap != kernel_pmap)
1610 ptbl_hold(mmu, pmap, pdir_idx);
1615 * Insert pv_entry into pv_list for mapped page if part of managed
1618 if ((m->oflags & VPO_UNMANAGED) == 0) {
1619 flags |= PTE_MANAGED;
1621 /* Create and insert pv entry. */
1622 pv_insert(pmap, va, m);
1625 pmap->pm_stats.resident_count++;
1627 mtx_lock_spin(&tlbivax_mutex);
1630 tlb0_flush_entry(va);
1631 if (pmap->pm_pdir[pdir_idx] == NULL) {
1633 * If we just allocated a new page table, hook it in
1636 pmap->pm_pdir[pdir_idx] = ptbl;
1638 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
1639 *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
1640 *pte |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */
1643 mtx_unlock_spin(&tlbivax_mutex);
1647 /* Return the pa for the given pmap/va. */
1649 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1654 pte = pte_find(mmu, pmap, va);
1655 if ((pte != NULL) && PTE_ISVALID(pte))
1656 pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
1660 /* Get a pointer to a PTE in a page table. */
1662 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1664 unsigned int pdir_idx = PDIR_IDX(va);
1665 unsigned int ptbl_idx = PTBL_IDX(va);
1667 KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
1669 if (pmap->pm_pdir[pdir_idx])
1670 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
1675 /* Set up kernel page tables. */
1677 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
1683 /* Initialize kernel pdir */
1684 for (i = 0; i < kernel_ptbls; i++)
1685 kernel_pmap->pm_pdir[kptbl_min + i] =
1686 (pte_t *)(pdir + (i * PAGE_SIZE * PTBL_PAGES));
1689 * Fill in PTEs covering kernel code and data. They are not required
1690 * for address translation, as this area is covered by static TLB1
1691 * entries, but for pte_vatopa() to work correctly with kernel area
1694 for (va = addr; va < data_end; va += PAGE_SIZE) {
1695 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1696 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
1697 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1698 PTE_VALID | PTE_PS_4KB;
1703 /**************************************************************************/
1705 /**************************************************************************/
1708 * This is called during booke_init, before the system is really initialized.
1711 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
1713 vm_paddr_t phys_kernelend;
1714 struct mem_region *mp, *mp1;
1716 vm_paddr_t s, e, sz;
1717 vm_paddr_t physsz, hwphyssz;
1718 u_int phys_avail_count;
1719 vm_size_t kstack0_sz;
1720 vm_offset_t kernel_pdir, kstack0;
1721 vm_paddr_t kstack0_phys;
1724 debugf("mmu_booke_bootstrap: entered\n");
1726 /* Set interesting system properties */
1728 #if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
1732 /* Initialize invalidation mutex */
1733 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1735 /* Read TLB0 size and associativity. */
1739 * Align kernel start and end address (kernel image).
1740 * Note that kernel end does not necessarily relate to kernsize.
1741 * kernsize is the size of the kernel that is actually mapped.
1743 kernstart = trunc_page(start);
1744 data_start = round_page(kernelend);
1745 data_end = data_start;
1748 * Addresses of preloaded modules (like file systems) use
1749 * physical addresses. Make sure we relocate those into
1750 * virtual addresses.
1752 preload_addr_relocate = kernstart - kernload;
1754 /* Allocate the dynamic per-cpu area. */
1755 dpcpu = (void *)data_end;
1756 data_end += DPCPU_SIZE;
1758 /* Allocate space for the message buffer. */
1759 msgbufp = (struct msgbuf *)data_end;
1760 data_end += msgbufsize;
1761 debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1762 (uintptr_t)msgbufp, data_end);
1764 data_end = round_page(data_end);
1766 /* Allocate space for ptbl_bufs. */
1767 ptbl_bufs = (struct ptbl_buf *)data_end;
1768 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1769 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1770 (uintptr_t)ptbl_bufs, data_end);
1772 data_end = round_page(data_end);
1774 /* Allocate PTE tables for kernel KVA. */
1775 kernel_pdir = data_end;
1776 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
1778 #ifdef __powerpc64__
1779 kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
1780 data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
1782 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1783 debugf(" kernel ptbls: %d\n", kernel_ptbls);
1784 debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
1785 kernel_pdir, data_end);
1787 debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
1788 if (data_end - kernstart > kernsize) {
1789 kernsize += tlb1_mapin_region(kernstart + kernsize,
1790 kernload + kernsize, (data_end - kernstart) - kernsize);
1792 data_end = kernstart + kernsize;
1793 debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
1796 * Clear the structures - note we can only do it safely after the
1797 * possible additional TLB1 translations are in place (above) so that
1798 * all range up to the currently calculated 'data_end' is covered.
1800 dpcpu_init(dpcpu, 0);
1801 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1802 #ifdef __powerpc64__
1803 memset((void *)kernel_pdir, 0,
1804 kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
1805 kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1807 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1810 /*******************************************************/
1811 /* Set the start and end of kva. */
1812 /*******************************************************/
1813 virtual_avail = round_page(data_end);
1814 virtual_end = VM_MAX_KERNEL_ADDRESS;
1816 /* Allocate KVA space for page zero/copy operations. */
1817 zero_page_va = virtual_avail;
1818 virtual_avail += PAGE_SIZE;
1819 copy_page_src_va = virtual_avail;
1820 virtual_avail += PAGE_SIZE;
1821 copy_page_dst_va = virtual_avail;
1822 virtual_avail += PAGE_SIZE;
1823 debugf("zero_page_va = 0x%08x\n", zero_page_va);
1824 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1825 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1827 /* Initialize page zero/copy mutexes. */
1828 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1829 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1831 /* Allocate KVA space for ptbl bufs. */
1832 ptbl_buf_pool_vabase = virtual_avail;
1833 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1834 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1835 ptbl_buf_pool_vabase, virtual_avail);
1837 /* Calculate corresponding physical addresses for the kernel region. */
1838 phys_kernelend = kernload + kernsize;
1839 debugf("kernel image and allocated data:\n");
1840 debugf(" kernload = 0x%09llx\n", (uint64_t)kernload);
1841 debugf(" kernstart = 0x%08x\n", kernstart);
1842 debugf(" kernsize = 0x%08x\n", kernsize);
1844 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1845 panic("mmu_booke_bootstrap: phys_avail too small");
1848 * Remove kernel physical address range from avail regions list. Page
1849 * align all regions. Non-page aligned memory isn't very interesting
1850 * to us. Also, sort the entries for ascending addresses.
1853 /* Retrieve phys/avail mem regions */
1854 mem_regions(&physmem_regions, &physmem_regions_sz,
1855 &availmem_regions, &availmem_regions_sz);
1857 cnt = availmem_regions_sz;
1858 debugf("processing avail regions:\n");
1859 for (mp = availmem_regions; mp->mr_size; mp++) {
1861 e = mp->mr_start + mp->mr_size;
1862 debugf(" %09jx-%09jx -> ", (uintmax_t)s, (uintmax_t)e);
1863 /* Check whether this region holds all of the kernel. */
1864 if (s < kernload && e > phys_kernelend) {
1865 availmem_regions[cnt].mr_start = phys_kernelend;
1866 availmem_regions[cnt++].mr_size = e - phys_kernelend;
1869 /* Look whether this regions starts within the kernel. */
1870 if (s >= kernload && s < phys_kernelend) {
1871 if (e <= phys_kernelend)
1875 /* Now look whether this region ends within the kernel. */
1876 if (e > kernload && e <= phys_kernelend) {
1881 /* Now page align the start and size of the region. */
1887 debugf("%09jx-%09jx = %jx\n",
1888 (uintmax_t)s, (uintmax_t)e, (uintmax_t)sz);
1890 /* Check whether some memory is left here. */
1894 (cnt - (mp - availmem_regions)) * sizeof(*mp));
1900 /* Do an insertion sort. */
1901 for (mp1 = availmem_regions; mp1 < mp; mp1++)
1902 if (s < mp1->mr_start)
1905 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1913 availmem_regions_sz = cnt;
1915 /*******************************************************/
1916 /* Steal physical memory for kernel stack from the end */
1917 /* of the first avail region */
1918 /*******************************************************/
1919 kstack0_sz = kstack_pages * PAGE_SIZE;
1920 kstack0_phys = availmem_regions[0].mr_start +
1921 availmem_regions[0].mr_size;
1922 kstack0_phys -= kstack0_sz;
1923 availmem_regions[0].mr_size -= kstack0_sz;
1925 /*******************************************************/
1926 /* Fill in phys_avail table, based on availmem_regions */
1927 /*******************************************************/
1928 phys_avail_count = 0;
1931 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1933 debugf("fill in phys_avail:\n");
1934 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1936 debugf(" region: 0x%jx - 0x%jx (0x%jx)\n",
1937 (uintmax_t)availmem_regions[i].mr_start,
1938 (uintmax_t)availmem_regions[i].mr_start +
1939 availmem_regions[i].mr_size,
1940 (uintmax_t)availmem_regions[i].mr_size);
1942 if (hwphyssz != 0 &&
1943 (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1944 debugf(" hw.physmem adjust\n");
1945 if (physsz < hwphyssz) {
1946 phys_avail[j] = availmem_regions[i].mr_start;
1948 availmem_regions[i].mr_start +
1956 phys_avail[j] = availmem_regions[i].mr_start;
1957 phys_avail[j + 1] = availmem_regions[i].mr_start +
1958 availmem_regions[i].mr_size;
1960 physsz += availmem_regions[i].mr_size;
1962 physmem = btoc(physsz);
1964 /* Calculate the last available physical address. */
1965 for (i = 0; phys_avail[i + 2] != 0; i += 2)
1967 Maxmem = powerpc_btop(phys_avail[i + 1]);
1969 debugf("Maxmem = 0x%08lx\n", Maxmem);
1970 debugf("phys_avail_count = %d\n", phys_avail_count);
1971 debugf("physsz = 0x%09jx physmem = %jd (0x%09jx)\n",
1972 (uintmax_t)physsz, (uintmax_t)physmem, (uintmax_t)physmem);
1974 /*******************************************************/
1975 /* Initialize (statically allocated) kernel pmap. */
1976 /*******************************************************/
1977 PMAP_LOCK_INIT(kernel_pmap);
1978 #ifndef __powerpc64__
1979 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1982 debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
1983 kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
1984 for (i = 0; i < MAXCPU; i++) {
1985 kernel_pmap->pm_tid[i] = TID_KERNEL;
1987 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1988 tidbusy[i][TID_KERNEL] = kernel_pmap;
1991 /* Mark kernel_pmap active on all CPUs */
1992 CPU_FILL(&kernel_pmap->pm_active);
1995 * Initialize the global pv list lock.
1997 rw_init(&pvh_global_lock, "pmap pv global");
1999 /*******************************************************/
2001 /*******************************************************/
2003 /* Enter kstack0 into kernel map, provide guard page */
2004 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2005 thread0.td_kstack = kstack0;
2006 thread0.td_kstack_pages = kstack_pages;
2008 debugf("kstack_sz = 0x%08x\n", kstack0_sz);
2009 debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
2010 kstack0_phys, kstack0_phys + kstack0_sz);
2011 debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
2012 kstack0, kstack0 + kstack0_sz);
2014 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
2015 for (i = 0; i < kstack_pages; i++) {
2016 mmu_booke_kenter(mmu, kstack0, kstack0_phys);
2017 kstack0 += PAGE_SIZE;
2018 kstack0_phys += PAGE_SIZE;
2021 pmap_bootstrapped = 1;
2023 debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
2024 debugf("virtual_end = %"PRI0ptrX"\n", virtual_end);
2026 debugf("mmu_booke_bootstrap: exit\n");
2033 tlb_entry_t *e, tmp;
2036 /* Prepare TLB1 image for AP processors */
2038 for (i = 0; i < TLB1_ENTRIES; i++) {
2039 tlb1_read_entry(&tmp, i);
2041 if ((tmp.mas1 & MAS1_VALID) && (tmp.mas2 & _TLB_ENTRY_SHARED))
2042 memcpy(e++, &tmp, sizeof(tmp));
2047 pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
2052 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
2053 * have the snapshot of its contents in the s/w __boot_tlb1[] table
2054 * created by tlb1_ap_prep(), so use these values directly to
2055 * (re)program AP's TLB1 hardware.
2057 * Start at index 1 because index 0 has the kernel map.
2059 for (i = 1; i < TLB1_ENTRIES; i++) {
2060 if (__boot_tlb1[i].mas1 & MAS1_VALID)
2061 tlb1_write_entry(&__boot_tlb1[i], i);
2064 set_mas4_defaults();
2069 booke_pmap_init_qpages(void)
2076 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
2077 if (pc->pc_qmap_addr == 0)
2078 panic("pmap_init_qpages: unable to allocate KVA");
2082 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
2085 * Get the physical page address for the given pmap/virtual address.
2088 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
2093 pa = pte_vatopa(mmu, pmap, va);
2100 * Extract the physical page address associated with the given
2101 * kernel virtual address.
2104 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
2110 if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
2111 p = pte_vatopa(mmu, kernel_pmap, va);
2114 /* Check TLB1 mappings */
2115 for (i = 0; i < TLB1_ENTRIES; i++) {
2116 tlb1_read_entry(&e, i);
2117 if (!(e.mas1 & MAS1_VALID))
2119 if (va >= e.virt && va < e.virt + e.size)
2120 return (e.phys + (va - e.virt));
2128 * Initialize the pmap module.
2129 * Called by vm_init, to initialize any structures that the pmap
2130 * system needs to map virtual memory.
2133 mmu_booke_init(mmu_t mmu)
2135 int shpgperproc = PMAP_SHPGPERPROC;
2138 * Initialize the address space (zone) for the pv entries. Set a
2139 * high water mark so that the system can recover from excessive
2140 * numbers of pv entries.
2142 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
2143 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
2145 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
2146 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count;
2148 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
2149 pv_entry_high_water = 9 * (pv_entry_max / 10);
2151 uma_zone_reserve_kva(pvzone, pv_entry_max);
2153 /* Pre-fill pvzone with initial number of pv entries. */
2154 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
2156 /* Initialize ptbl allocation. */
2161 * Map a list of wired pages into kernel virtual address space. This is
2162 * intended for temporary mappings which do not need page modification or
2163 * references recorded. Existing mappings in the region are overwritten.
2166 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
2171 while (count-- > 0) {
2172 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2179 * Remove page mappings from kernel virtual address space. Intended for
2180 * temporary mappings entered by mmu_booke_qenter.
2183 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
2188 while (count-- > 0) {
2189 mmu_booke_kremove(mmu, va);
2195 * Map a wired page into kernel virtual address space.
2198 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
2201 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
2205 mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
2210 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2211 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
2213 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
2214 flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
2215 flags |= PTE_PS_4KB;
2217 pte = pte_find(mmu, kernel_pmap, va);
2218 KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
2220 mtx_lock_spin(&tlbivax_mutex);
2223 if (PTE_ISVALID(pte)) {
2225 CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
2227 /* Flush entry from TLB0 */
2228 tlb0_flush_entry(va);
2231 *pte = PTE_RPN_FROM_PA(pa) | flags;
2233 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
2234 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
2235 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
2237 /* Flush the real memory from the instruction cache. */
2238 if ((flags & (PTE_I | PTE_G)) == 0)
2239 __syncicache((void *)va, PAGE_SIZE);
2242 mtx_unlock_spin(&tlbivax_mutex);
2246 * Remove a page from kernel page table.
2249 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
2253 CTR2(KTR_PMAP,"%s: s (va = 0x%08x)\n", __func__, va);
2255 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
2256 (va <= VM_MAX_KERNEL_ADDRESS)),
2257 ("mmu_booke_kremove: invalid va"));
2259 pte = pte_find(mmu, kernel_pmap, va);
2261 if (!PTE_ISVALID(pte)) {
2263 CTR1(KTR_PMAP, "%s: invalid pte", __func__);
2268 mtx_lock_spin(&tlbivax_mutex);
2271 /* Invalidate entry in TLB0, update PTE. */
2272 tlb0_flush_entry(va);
2276 mtx_unlock_spin(&tlbivax_mutex);
2280 * Provide a kernel pointer corresponding to a given userland pointer.
2281 * The returned pointer is valid until the next time this function is
2282 * called in this thread. This is used internally in copyin/copyout.
2285 mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
2286 void **kaddr, size_t ulen, size_t *klen)
2289 if ((uintptr_t)uaddr + ulen > VM_MAXUSER_ADDRESS + PAGE_SIZE)
2292 *kaddr = (void *)(uintptr_t)uaddr;
2300 * Figure out where a given kernel pointer (usually in a fault) points
2301 * to from the VM's perspective, potentially remapping into userland's
2305 mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
2306 vm_offset_t *decoded_addr)
2309 if (addr < VM_MAXUSER_ADDRESS)
2314 *decoded_addr = addr;
2319 * Initialize pmap associated with process 0.
2322 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
2325 PMAP_LOCK_INIT(pmap);
2326 mmu_booke_pinit(mmu, pmap);
2327 PCPU_SET(curpmap, pmap);
2331 * Initialize a preallocated and zeroed pmap structure,
2332 * such as one in a vmspace structure.
2335 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
2339 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
2340 curthread->td_proc->p_pid, curthread->td_proc->p_comm);
2342 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
2344 for (i = 0; i < MAXCPU; i++)
2345 pmap->pm_tid[i] = TID_NONE;
2346 CPU_ZERO(&kernel_pmap->pm_active);
2347 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
2348 #ifdef __powerpc64__
2349 bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
2350 TAILQ_INIT(&pmap->pm_pdir_list);
2352 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
2354 TAILQ_INIT(&pmap->pm_ptbl_list);
2358 * Release any resources held by the given physical map.
2359 * Called when a pmap initialized by mmu_booke_pinit is being released.
2360 * Should only be called if the map contains no valid mappings.
2363 mmu_booke_release(mmu_t mmu, pmap_t pmap)
2366 KASSERT(pmap->pm_stats.resident_count == 0,
2367 ("pmap_release: pmap resident count %ld != 0",
2368 pmap->pm_stats.resident_count));
2372 * Insert the given physical page at the specified virtual address in the
2373 * target physical map with the protection requested. If specified the page
2374 * will be wired down.
2377 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2378 vm_prot_t prot, u_int flags, int8_t psind)
2382 rw_wlock(&pvh_global_lock);
2384 error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
2386 rw_wunlock(&pvh_global_lock);
2391 mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2392 vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
2397 int error, su, sync;
2399 pa = VM_PAGE_TO_PHYS(m);
2400 su = (pmap == kernel_pmap);
2403 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
2404 // "pa=0x%08x prot=0x%08x flags=%#x)\n",
2405 // (u_int32_t)pmap, su, pmap->pm_tid,
2406 // (u_int32_t)m, va, pa, prot, flags);
2409 KASSERT(((va >= virtual_avail) &&
2410 (va <= VM_MAX_KERNEL_ADDRESS)),
2411 ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
2413 KASSERT((va <= VM_MAXUSER_ADDRESS),
2414 ("mmu_booke_enter_locked: user pmap, non user va"));
2416 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2417 VM_OBJECT_ASSERT_LOCKED(m->object);
2419 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2422 * If there is an existing mapping, and the physical address has not
2423 * changed, must be protection or wiring change.
2425 if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
2426 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
2429 * Before actually updating pte->flags we calculate and
2430 * prepare its new value in a helper var.
2433 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
2435 /* Wiring change, just update stats. */
2436 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
2437 if (!PTE_ISWIRED(pte)) {
2439 pmap->pm_stats.wired_count++;
2442 if (PTE_ISWIRED(pte)) {
2443 flags &= ~PTE_WIRED;
2444 pmap->pm_stats.wired_count--;
2448 if (prot & VM_PROT_WRITE) {
2449 /* Add write permissions. */
2454 if ((flags & PTE_MANAGED) != 0)
2455 vm_page_aflag_set(m, PGA_WRITEABLE);
2457 /* Handle modified pages, sense modify status. */
2460 * The PTE_MODIFIED flag could be set by underlying
2461 * TLB misses since we last read it (above), possibly
2462 * other CPUs could update it so we check in the PTE
2463 * directly rather than rely on that saved local flags
2466 if (PTE_ISMODIFIED(pte))
2470 if (prot & VM_PROT_EXECUTE) {
2476 * Check existing flags for execute permissions: if we
2477 * are turning execute permissions on, icache should
2480 if ((*pte & (PTE_UX | PTE_SX)) == 0)
2484 flags &= ~PTE_REFERENCED;
2487 * The new flags value is all calculated -- only now actually
2490 mtx_lock_spin(&tlbivax_mutex);
2493 tlb0_flush_entry(va);
2494 *pte &= ~PTE_FLAGS_MASK;
2498 mtx_unlock_spin(&tlbivax_mutex);
2502 * If there is an existing mapping, but it's for a different
2503 * physical address, pte_enter() will delete the old mapping.
2505 //if ((pte != NULL) && PTE_ISVALID(pte))
2506 // debugf("mmu_booke_enter_locked: replace\n");
2508 // debugf("mmu_booke_enter_locked: new\n");
2510 /* Now set up the flags and install the new mapping. */
2511 flags = (PTE_SR | PTE_VALID);
2517 if (prot & VM_PROT_WRITE) {
2522 if ((m->oflags & VPO_UNMANAGED) == 0)
2523 vm_page_aflag_set(m, PGA_WRITEABLE);
2526 if (prot & VM_PROT_EXECUTE) {
2532 /* If its wired update stats. */
2533 if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
2536 error = pte_enter(mmu, pmap, m, va, flags,
2537 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
2539 return (KERN_RESOURCE_SHORTAGE);
2541 if ((flags & PMAP_ENTER_WIRED) != 0)
2542 pmap->pm_stats.wired_count++;
2544 /* Flush the real memory from the instruction cache. */
2545 if (prot & VM_PROT_EXECUTE)
2549 if (sync && (su || pmap == PCPU_GET(curpmap))) {
2550 __syncicache((void *)va, PAGE_SIZE);
2554 return (KERN_SUCCESS);
2558 * Maps a sequence of resident pages belonging to the same object.
2559 * The sequence begins with the given page m_start. This page is
2560 * mapped at the given virtual address start. Each subsequent page is
2561 * mapped at a virtual address that is offset from start by the same
2562 * amount as the page is offset from m_start within the object. The
2563 * last page in the sequence is the page with the largest offset from
2564 * m_start that can be mapped at a virtual address less than the given
2565 * virtual address end. Not every virtual page between start and end
2566 * is mapped; only those for which a resident page exists with the
2567 * corresponding offset from m_start are mapped.
2570 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
2571 vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
2574 vm_pindex_t diff, psize;
2576 VM_OBJECT_ASSERT_LOCKED(m_start->object);
2578 psize = atop(end - start);
2580 rw_wlock(&pvh_global_lock);
2582 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2583 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
2584 prot & (VM_PROT_READ | VM_PROT_EXECUTE),
2585 PMAP_ENTER_NOSLEEP, 0);
2586 m = TAILQ_NEXT(m, listq);
2588 rw_wunlock(&pvh_global_lock);
2593 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
2597 rw_wlock(&pvh_global_lock);
2599 mmu_booke_enter_locked(mmu, pmap, va, m,
2600 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
2602 rw_wunlock(&pvh_global_lock);
2607 * Remove the given range of addresses from the specified map.
2609 * It is assumed that the start and end are properly rounded to the page size.
2612 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
2617 int su = (pmap == kernel_pmap);
2619 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
2620 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
2623 KASSERT(((va >= virtual_avail) &&
2624 (va <= VM_MAX_KERNEL_ADDRESS)),
2625 ("mmu_booke_remove: kernel pmap, non kernel va"));
2627 KASSERT((va <= VM_MAXUSER_ADDRESS),
2628 ("mmu_booke_remove: user pmap, non user va"));
2631 if (PMAP_REMOVE_DONE(pmap)) {
2632 //debugf("mmu_booke_remove: e (empty)\n");
2636 hold_flag = PTBL_HOLD_FLAG(pmap);
2637 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
2639 rw_wlock(&pvh_global_lock);
2641 for (; va < endva; va += PAGE_SIZE) {
2642 pte = pte_find(mmu, pmap, va);
2643 if ((pte != NULL) && PTE_ISVALID(pte))
2644 pte_remove(mmu, pmap, va, hold_flag);
2647 rw_wunlock(&pvh_global_lock);
2649 //debugf("mmu_booke_remove: e\n");
2653 * Remove physical page from all pmaps in which it resides.
2656 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
2661 rw_wlock(&pvh_global_lock);
2662 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
2663 pvn = TAILQ_NEXT(pv, pv_link);
2665 PMAP_LOCK(pv->pv_pmap);
2666 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
2667 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
2668 PMAP_UNLOCK(pv->pv_pmap);
2670 vm_page_aflag_clear(m, PGA_WRITEABLE);
2671 rw_wunlock(&pvh_global_lock);
2675 * Map a range of physical addresses into kernel virtual address space.
2678 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
2679 vm_paddr_t pa_end, int prot)
2681 vm_offset_t sva = *virt;
2682 vm_offset_t va = sva;
2684 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
2685 // sva, pa_start, pa_end);
2687 while (pa_start < pa_end) {
2688 mmu_booke_kenter(mmu, va, pa_start);
2690 pa_start += PAGE_SIZE;
2694 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
2699 * The pmap must be activated before it's address space can be accessed in any
2703 mmu_booke_activate(mmu_t mmu, struct thread *td)
2708 pmap = &td->td_proc->p_vmspace->vm_pmap;
2710 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
2711 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2713 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
2717 cpuid = PCPU_GET(cpuid);
2718 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
2719 PCPU_SET(curpmap, pmap);
2721 if (pmap->pm_tid[cpuid] == TID_NONE)
2724 /* Load PID0 register with pmap tid value. */
2725 mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
2726 __asm __volatile("isync");
2728 mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0);
2732 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
2733 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
2737 * Deactivate the specified process's address space.
2740 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
2744 pmap = &td->td_proc->p_vmspace->vm_pmap;
2746 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
2747 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
2749 td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0);
2751 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
2752 PCPU_SET(curpmap, NULL);
2756 * Copy the range specified by src_addr/len
2757 * from the source map to the range dst_addr/len
2758 * in the destination map.
2760 * This routine is only advisory and need not do anything.
2763 mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
2764 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
2770 * Set the physical protection on the specified range of this map as requested.
2773 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2780 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2781 mmu_booke_remove(mmu, pmap, sva, eva);
2785 if (prot & VM_PROT_WRITE)
2789 for (va = sva; va < eva; va += PAGE_SIZE) {
2790 if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2791 if (PTE_ISVALID(pte)) {
2792 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2794 mtx_lock_spin(&tlbivax_mutex);
2797 /* Handle modified pages. */
2798 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
2801 tlb0_flush_entry(va);
2802 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2805 mtx_unlock_spin(&tlbivax_mutex);
2813 * Clear the write and modified bits in each of the given page's mappings.
2816 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2821 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2822 ("mmu_booke_remove_write: page %p is not managed", m));
2825 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2826 * set by another thread while the object is locked. Thus,
2827 * if PGA_WRITEABLE is clear, no page table entries need updating.
2829 VM_OBJECT_ASSERT_WLOCKED(m->object);
2830 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2832 rw_wlock(&pvh_global_lock);
2833 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2834 PMAP_LOCK(pv->pv_pmap);
2835 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2836 if (PTE_ISVALID(pte)) {
2837 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2839 mtx_lock_spin(&tlbivax_mutex);
2842 /* Handle modified pages. */
2843 if (PTE_ISMODIFIED(pte))
2846 /* Flush mapping from TLB0. */
2847 *pte &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2850 mtx_unlock_spin(&tlbivax_mutex);
2853 PMAP_UNLOCK(pv->pv_pmap);
2855 vm_page_aflag_clear(m, PGA_WRITEABLE);
2856 rw_wunlock(&pvh_global_lock);
2860 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2869 va = trunc_page(va);
2870 sz = round_page(sz);
2872 rw_wlock(&pvh_global_lock);
2873 pmap = PCPU_GET(curpmap);
2874 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2877 pte = pte_find(mmu, pm, va);
2878 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2884 /* Create a mapping in the active pmap. */
2886 m = PHYS_TO_VM_PAGE(pa);
2888 pte_enter(mmu, pmap, m, addr,
2889 PTE_SR | PTE_VALID | PTE_UR, FALSE);
2890 __syncicache((void *)addr, PAGE_SIZE);
2891 pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2894 __syncicache((void *)va, PAGE_SIZE);
2899 rw_wunlock(&pvh_global_lock);
2903 * Atomically extract and hold the physical page with the given
2904 * pmap and virtual address pair if that mapping permits the given
2908 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2920 pte = pte_find(mmu, pmap, va);
2921 if ((pte != NULL) && PTE_ISVALID(pte)) {
2922 if (pmap == kernel_pmap)
2927 if ((*pte & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2928 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2930 m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2941 * Initialize a vm_page's machine-dependent fields.
2944 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2947 m->md.pv_tracked = 0;
2948 TAILQ_INIT(&m->md.pv_list);
2952 * mmu_booke_zero_page_area zeros the specified hardware page by
2953 * mapping it into virtual memory and using bzero to clear
2956 * off and size must reside within a single page.
2959 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2963 /* XXX KASSERT off and size are within a single page? */
2965 mtx_lock(&zero_page_mutex);
2968 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2969 bzero((caddr_t)va + off, size);
2970 mmu_booke_kremove(mmu, va);
2972 mtx_unlock(&zero_page_mutex);
2976 * mmu_booke_zero_page zeros the specified hardware page.
2979 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2981 vm_offset_t off, va;
2983 mtx_lock(&zero_page_mutex);
2986 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2987 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
2988 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
2989 mmu_booke_kremove(mmu, va);
2991 mtx_unlock(&zero_page_mutex);
2995 * mmu_booke_copy_page copies the specified (machine independent) page by
2996 * mapping the page into virtual memory and using memcopy to copy the page,
2997 * one machine dependent page at a time.
3000 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
3002 vm_offset_t sva, dva;
3004 sva = copy_page_src_va;
3005 dva = copy_page_dst_va;
3007 mtx_lock(©_page_mutex);
3008 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
3009 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
3010 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
3011 mmu_booke_kremove(mmu, dva);
3012 mmu_booke_kremove(mmu, sva);
3013 mtx_unlock(©_page_mutex);
3017 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
3018 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
3021 vm_offset_t a_pg_offset, b_pg_offset;
3024 mtx_lock(©_page_mutex);
3025 while (xfersize > 0) {
3026 a_pg_offset = a_offset & PAGE_MASK;
3027 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
3028 mmu_booke_kenter(mmu, copy_page_src_va,
3029 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
3030 a_cp = (char *)copy_page_src_va + a_pg_offset;
3031 b_pg_offset = b_offset & PAGE_MASK;
3032 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
3033 mmu_booke_kenter(mmu, copy_page_dst_va,
3034 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
3035 b_cp = (char *)copy_page_dst_va + b_pg_offset;
3036 bcopy(a_cp, b_cp, cnt);
3037 mmu_booke_kremove(mmu, copy_page_dst_va);
3038 mmu_booke_kremove(mmu, copy_page_src_va);
3043 mtx_unlock(©_page_mutex);
3047 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
3054 paddr = VM_PAGE_TO_PHYS(m);
3056 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
3057 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
3058 flags |= PTE_PS_4KB;
3061 qaddr = PCPU_GET(qmap_addr);
3063 pte = pte_find(mmu, kernel_pmap, qaddr);
3065 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
3068 * XXX: tlbivax is broadcast to other cores, but qaddr should
3069 * not be present in other TLBs. Is there a better instruction
3070 * sequence to use? Or just forget it & use mmu_booke_kenter()...
3072 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK));
3073 __asm __volatile("isync; msync");
3075 *pte = PTE_RPN_FROM_PA(paddr) | flags;
3077 /* Flush the real memory from the instruction cache. */
3078 if ((flags & (PTE_I | PTE_G)) == 0)
3079 __syncicache((void *)qaddr, PAGE_SIZE);
3085 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
3089 pte = pte_find(mmu, kernel_pmap, addr);
3091 KASSERT(PCPU_GET(qmap_addr) == addr,
3092 ("mmu_booke_quick_remove_page: invalid address"));
3094 ("mmu_booke_quick_remove_page: PTE not in use"));
3101 * Return whether or not the specified physical page was modified
3102 * in any of physical maps.
3105 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
3111 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3112 ("mmu_booke_is_modified: page %p is not managed", m));
3116 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3117 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
3118 * is clear, no PTEs can be modified.
3120 VM_OBJECT_ASSERT_WLOCKED(m->object);
3121 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3123 rw_wlock(&pvh_global_lock);
3124 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3125 PMAP_LOCK(pv->pv_pmap);
3126 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3128 if (PTE_ISMODIFIED(pte))
3131 PMAP_UNLOCK(pv->pv_pmap);
3135 rw_wunlock(&pvh_global_lock);
3140 * Return whether or not the specified virtual address is eligible
3144 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
3151 * Return whether or not the specified physical page was referenced
3152 * in any physical maps.
3155 mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
3161 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3162 ("mmu_booke_is_referenced: page %p is not managed", m));
3164 rw_wlock(&pvh_global_lock);
3165 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3166 PMAP_LOCK(pv->pv_pmap);
3167 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3169 if (PTE_ISREFERENCED(pte))
3172 PMAP_UNLOCK(pv->pv_pmap);
3176 rw_wunlock(&pvh_global_lock);
3181 * Clear the modify bits on the specified physical page.
3184 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
3189 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3190 ("mmu_booke_clear_modify: page %p is not managed", m));
3191 VM_OBJECT_ASSERT_WLOCKED(m->object);
3192 KASSERT(!vm_page_xbusied(m),
3193 ("mmu_booke_clear_modify: page %p is exclusive busied", m));
3196 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
3197 * If the object containing the page is locked and the page is not
3198 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
3200 if ((m->aflags & PGA_WRITEABLE) == 0)
3202 rw_wlock(&pvh_global_lock);
3203 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3204 PMAP_LOCK(pv->pv_pmap);
3205 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3207 mtx_lock_spin(&tlbivax_mutex);
3210 if (*pte & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
3211 tlb0_flush_entry(pv->pv_va);
3212 *pte &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
3217 mtx_unlock_spin(&tlbivax_mutex);
3219 PMAP_UNLOCK(pv->pv_pmap);
3221 rw_wunlock(&pvh_global_lock);
3225 * Return a count of reference bits for a page, clearing those bits.
3226 * It is not necessary for every reference bit to be cleared, but it
3227 * is necessary that 0 only be returned when there are truly no
3228 * reference bits set.
3230 * As an optimization, update the page's dirty field if a modified bit is
3231 * found while counting reference bits. This opportunistic update can be
3232 * performed at low cost and can eliminate the need for some future calls
3233 * to pmap_is_modified(). However, since this function stops after
3234 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3235 * dirty pages. Those dirty pages will only be detected by a future call
3236 * to pmap_is_modified().
3239 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
3245 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3246 ("mmu_booke_ts_referenced: page %p is not managed", m));
3248 rw_wlock(&pvh_global_lock);
3249 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3250 PMAP_LOCK(pv->pv_pmap);
3251 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
3253 if (PTE_ISMODIFIED(pte))
3255 if (PTE_ISREFERENCED(pte)) {
3256 mtx_lock_spin(&tlbivax_mutex);
3259 tlb0_flush_entry(pv->pv_va);
3260 *pte &= ~PTE_REFERENCED;
3263 mtx_unlock_spin(&tlbivax_mutex);
3265 if (++count >= PMAP_TS_REFERENCED_MAX) {
3266 PMAP_UNLOCK(pv->pv_pmap);
3271 PMAP_UNLOCK(pv->pv_pmap);
3273 rw_wunlock(&pvh_global_lock);
3278 * Clear the wired attribute from the mappings for the specified range of
3279 * addresses in the given pmap. Every valid mapping within that range must
3280 * have the wired attribute set. In contrast, invalid mappings cannot have
3281 * the wired attribute set, so they are ignored.
3283 * The wired attribute of the page table entry is not a hardware feature, so
3284 * there is no need to invalidate any TLB entries.
3287 mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3293 for (va = sva; va < eva; va += PAGE_SIZE) {
3294 if ((pte = pte_find(mmu, pmap, va)) != NULL &&
3296 if (!PTE_ISWIRED(pte))
3297 panic("mmu_booke_unwire: pte %p isn't wired",
3300 pmap->pm_stats.wired_count--;
3308 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
3309 * page. This count may be changed upwards or downwards in the future; it is
3310 * only necessary that true be returned for a small subset of pmaps for proper
3314 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
3320 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3321 ("mmu_booke_page_exists_quick: page %p is not managed", m));
3324 rw_wlock(&pvh_global_lock);
3325 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3326 if (pv->pv_pmap == pmap) {
3333 rw_wunlock(&pvh_global_lock);
3338 * Return the number of managed mappings to the given physical page that are
3342 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
3348 if ((m->oflags & VPO_UNMANAGED) != 0)
3350 rw_wlock(&pvh_global_lock);
3351 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3352 PMAP_LOCK(pv->pv_pmap);
3353 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
3354 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
3356 PMAP_UNLOCK(pv->pv_pmap);
3358 rw_wunlock(&pvh_global_lock);
3363 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3369 * This currently does not work for entries that
3370 * overlap TLB1 entries.
3372 for (i = 0; i < TLB1_ENTRIES; i ++) {
3373 if (tlb1_iomapped(i, pa, size, &va) == 0)
3381 mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
3387 /* Minidumps are based on virtual memory addresses. */
3389 *va = (void *)(vm_offset_t)pa;
3393 /* Raw physical memory dumps don't have a virtual address. */
3394 /* We always map a 256MB page at 256M. */
3395 gran = 256 * 1024 * 1024;
3396 ppa = rounddown2(pa, gran);
3399 tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
3401 if (sz > (gran - ofs))
3402 tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
3407 mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
3415 /* Minidumps are based on virtual memory addresses. */
3416 /* Nothing to do... */
3420 for (i = 0; i < TLB1_ENTRIES; i++) {
3421 tlb1_read_entry(&e, i);
3422 if (!(e.mas1 & MAS1_VALID))
3426 /* Raw physical memory dumps don't have a virtual address. */
3431 tlb1_write_entry(&e, i);
3433 gran = 256 * 1024 * 1024;
3434 ppa = rounddown2(pa, gran);
3436 if (sz > (gran - ofs)) {
3441 tlb1_write_entry(&e, i);
3445 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
3448 mmu_booke_scan_init(mmu_t mmu)
3455 /* Initialize phys. segments for dumpsys(). */
3456 memset(&dump_map, 0, sizeof(dump_map));
3457 mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
3458 &availmem_regions_sz);
3459 for (i = 0; i < physmem_regions_sz; i++) {
3460 dump_map[i].pa_start = physmem_regions[i].mr_start;
3461 dump_map[i].pa_size = physmem_regions[i].mr_size;
3466 /* Virtual segments for minidumps: */
3467 memset(&dump_map, 0, sizeof(dump_map));
3469 /* 1st: kernel .data and .bss. */
3470 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
3471 dump_map[0].pa_size =
3472 round_page((uintptr_t)_end) - dump_map[0].pa_start;
3474 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
3475 dump_map[1].pa_start = data_start;
3476 dump_map[1].pa_size = data_end - data_start;
3478 /* 3rd: kernel VM. */
3479 va = dump_map[1].pa_start + dump_map[1].pa_size;
3480 /* Find start of next chunk (from va). */
3481 while (va < virtual_end) {
3482 /* Don't dump the buffer cache. */
3483 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
3484 va = kmi.buffer_eva;
3487 pte = pte_find(mmu, kernel_pmap, va);
3488 if (pte != NULL && PTE_ISVALID(pte))
3492 if (va < virtual_end) {
3493 dump_map[2].pa_start = va;
3495 /* Find last page in chunk. */
3496 while (va < virtual_end) {
3497 /* Don't run into the buffer cache. */
3498 if (va == kmi.buffer_sva)
3500 pte = pte_find(mmu, kernel_pmap, va);
3501 if (pte == NULL || !PTE_ISVALID(pte))
3505 dump_map[2].pa_size = va - dump_map[2].pa_start;
3510 * Map a set of physical memory pages into the kernel virtual address space.
3511 * Return a pointer to where it is mapped. This routine is intended to be used
3512 * for mapping device memory, NOT real memory.
3515 mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
3518 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
3522 mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3526 uintptr_t va, tmpva;
3531 * Check if this is premapped in TLB1. Note: this should probably also
3532 * check whether a sequence of TLB1 entries exist that match the
3533 * requirement, but now only checks the easy case.
3535 for (i = 0; i < TLB1_ENTRIES; i++) {
3536 tlb1_read_entry(&e, i);
3537 if (!(e.mas1 & MAS1_VALID))
3540 (pa + size) <= (e.phys + e.size) &&
3541 (ma == VM_MEMATTR_DEFAULT ||
3542 tlb_calc_wimg(pa, ma) ==
3543 (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))))
3544 return (void *)(e.virt +
3545 (vm_offset_t)(pa - e.phys));
3548 size = roundup(size, PAGE_SIZE);
3551 * The device mapping area is between VM_MAXUSER_ADDRESS and
3552 * VM_MIN_KERNEL_ADDRESS. This gives 1GB of device addressing.
3554 #ifdef SPARSE_MAPDEV
3556 * With a sparse mapdev, align to the largest starting region. This
3557 * could feasibly be optimized for a 'best-fit' alignment, but that
3558 * calculation could be very costly.
3559 * Align to the smaller of:
3560 * - first set bit in overlap of (pa & size mask)
3561 * - largest size envelope
3563 * It's possible the device mapping may start at a PA that's not larger
3564 * than the size mask, so we need to offset in to maximize the TLB entry
3565 * range and minimize the number of used TLB entries.
3568 tmpva = tlb1_map_base;
3569 sz = ffsl(((1 << flsl(size-1)) - 1) & pa);
3570 sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
3571 va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
3572 #ifdef __powerpc64__
3573 } while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
3575 } while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
3578 #ifdef __powerpc64__
3579 va = atomic_fetchadd_long(&tlb1_map_base, size);
3581 va = atomic_fetchadd_int(&tlb1_map_base, size);
3587 sz = 1 << (ilog2(size) & ~1);
3588 /* Align size to PA */
3592 } while (pa % sz != 0);
3594 /* Now align from there to VA */
3598 } while (va % sz != 0);
3601 printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
3602 va, (uintmax_t)pa, sz);
3603 if (tlb1_set_entry(va, pa, sz,
3604 _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0)
3615 * 'Unmap' a range mapped by mmu_booke_mapdev().
3618 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
3620 #ifdef SUPPORTS_SHRINKING_TLB1
3621 vm_offset_t base, offset;
3624 * Unmap only if this is inside kernel virtual space.
3626 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
3627 base = trunc_page(va);
3628 offset = va & PAGE_MASK;
3629 size = roundup(offset + size, PAGE_SIZE);
3630 kva_free(base, size);
3636 * mmu_booke_object_init_pt preloads the ptes for a given object into the
3637 * specified pmap. This eliminates the blast of soft faults on process startup
3638 * and immediately after an mmap.
3641 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3642 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3645 VM_OBJECT_ASSERT_WLOCKED(object);
3646 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3647 ("mmu_booke_object_init_pt: non-device object"));
3651 * Perform the pmap work for mincore.
3654 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
3655 vm_paddr_t *locked_pa)
3658 /* XXX: this should be implemented at some point */
3663 mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
3671 /* Check TLB1 mappings */
3672 for (i = 0; i < TLB1_ENTRIES; i++) {
3673 tlb1_read_entry(&e, i);
3674 if (!(e.mas1 & MAS1_VALID))
3676 if (addr >= e.virt && addr < e.virt + e.size)
3679 if (i < TLB1_ENTRIES) {
3680 /* Only allow full mappings to be modified for now. */
3681 /* Validate the range. */
3682 for (j = i, va = addr; va < addr + sz; va += e.size, j++) {
3683 tlb1_read_entry(&e, j);
3684 if (va != e.virt || (sz - (va - addr) < e.size))
3687 for (va = addr; va < addr + sz; va += e.size, i++) {
3688 tlb1_read_entry(&e, i);
3689 e.mas2 &= ~MAS2_WIMGE_MASK;
3690 e.mas2 |= tlb_calc_wimg(e.phys, mode);
3693 * Write it out to the TLB. Should really re-sync with other
3696 tlb1_write_entry(&e, i);
3701 /* Not in TLB1, try through pmap */
3702 /* First validate the range. */
3703 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3704 pte = pte_find(mmu, kernel_pmap, va);
3705 if (pte == NULL || !PTE_ISVALID(pte))
3709 mtx_lock_spin(&tlbivax_mutex);
3711 for (va = addr; va < addr + sz; va += PAGE_SIZE) {
3712 pte = pte_find(mmu, kernel_pmap, va);
3713 *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
3714 *pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
3715 tlb0_flush_entry(va);
3718 mtx_unlock_spin(&tlbivax_mutex);
3723 /**************************************************************************/
3725 /**************************************************************************/
3728 * Allocate a TID. If necessary, steal one from someone else.
3729 * The new TID is flushed from the TLB before returning.
3732 tid_alloc(pmap_t pmap)
3737 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
3739 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
3741 thiscpu = PCPU_GET(cpuid);
3743 tid = PCPU_GET(booke.tid_next);
3746 PCPU_SET(booke.tid_next, tid + 1);
3748 /* If we are stealing TID then clear the relevant pmap's field */
3749 if (tidbusy[thiscpu][tid] != NULL) {
3751 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
3753 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
3755 /* Flush all entries from TLB0 matching this TID. */
3759 tidbusy[thiscpu][tid] = pmap;
3760 pmap->pm_tid[thiscpu] = tid;
3761 __asm __volatile("msync; isync");
3763 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
3764 PCPU_GET(booke.tid_next));
3769 /**************************************************************************/
3771 /**************************************************************************/
3774 #ifdef __powerpc64__
3775 tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
3777 tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
3788 if (mas1 & MAS1_VALID)
3793 if (mas1 & MAS1_IPROT)
3798 as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
3799 tid = MAS1_GETTID(mas1);
3801 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3804 size = tsize2size(tsize);
3806 debugf("%3d: (%s) [AS=%d] "
3807 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
3808 "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
3809 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
3812 /* Convert TLB0 va and way number to tlb0[] table index. */
3813 static inline unsigned int
3814 tlb0_tableidx(vm_offset_t va, unsigned int way)
3818 idx = (way * TLB0_ENTRIES_PER_WAY);
3819 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
3824 * Invalidate TLB0 entry.
3827 tlb0_flush_entry(vm_offset_t va)
3830 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
3832 mtx_assert(&tlbivax_mutex, MA_OWNED);
3834 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
3835 __asm __volatile("isync; msync");
3836 __asm __volatile("tlbsync; msync");
3838 CTR1(KTR_PMAP, "%s: e", __func__);
3841 /* Print out contents of the MAS registers for each TLB0 entry */
3843 tlb0_print_tlbentries(void)
3845 uint32_t mas0, mas1, mas3, mas7;
3846 #ifdef __powerpc64__
3851 int entryidx, way, idx;
3853 debugf("TLB0 entries:\n");
3854 for (way = 0; way < TLB0_WAYS; way ++)
3855 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
3857 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
3858 mtspr(SPR_MAS0, mas0);
3859 __asm __volatile("isync");
3861 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
3862 mtspr(SPR_MAS2, mas2);
3864 __asm __volatile("isync; tlbre");
3866 mas1 = mfspr(SPR_MAS1);
3867 mas2 = mfspr(SPR_MAS2);
3868 mas3 = mfspr(SPR_MAS3);
3869 mas7 = mfspr(SPR_MAS7);
3871 idx = tlb0_tableidx(mas2, way);
3872 tlb_print_entry(idx, mas1, mas2, mas3, mas7);
3876 /**************************************************************************/
3878 /**************************************************************************/
3881 * TLB1 mapping notes:
3883 * TLB1[0] Kernel text and data.
3884 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI
3885 * windows, other devices mappings.
3889 * Read an entry from given TLB1 slot.
3892 tlb1_read_entry(tlb_entry_t *entry, unsigned int slot)
3897 KASSERT((entry != NULL), ("%s(): Entry is NULL!", __func__));
3900 __asm __volatile("wrteei 0");
3902 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(slot);
3903 mtspr(SPR_MAS0, mas0);
3904 __asm __volatile("isync; tlbre");
3906 entry->mas1 = mfspr(SPR_MAS1);
3907 entry->mas2 = mfspr(SPR_MAS2);
3908 entry->mas3 = mfspr(SPR_MAS3);
3910 switch ((mfpvr() >> 16) & 0xFFFF) {
3915 entry->mas7 = mfspr(SPR_MAS7);
3923 entry->virt = entry->mas2 & MAS2_EPN_MASK;
3924 entry->phys = ((vm_paddr_t)(entry->mas7 & MAS7_RPN) << 32) |
3925 (entry->mas3 & MAS3_RPN);
3927 tsize2size((entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT);
3930 struct tlbwrite_args {
3936 tlb1_write_entry_int(void *arg)
3938 struct tlbwrite_args *args = arg;
3942 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(args->idx);
3944 mtspr(SPR_MAS0, mas0);
3945 __asm __volatile("isync");
3946 mtspr(SPR_MAS1, args->e->mas1);
3947 __asm __volatile("isync");
3948 mtspr(SPR_MAS2, args->e->mas2);
3949 __asm __volatile("isync");
3950 mtspr(SPR_MAS3, args->e->mas3);
3951 __asm __volatile("isync");
3952 switch ((mfpvr() >> 16) & 0xFFFF) {
3957 __asm __volatile("isync");
3960 mtspr(SPR_MAS7, args->e->mas7);
3961 __asm __volatile("isync");
3967 __asm __volatile("tlbwe; isync; msync");
3972 tlb1_write_entry_sync(void *arg)
3974 /* Empty synchronization point for smp_rendezvous(). */
3978 * Write given entry to TLB1 hardware.
3981 tlb1_write_entry(tlb_entry_t *e, unsigned int idx)
3983 struct tlbwrite_args args;
3989 if ((e->mas2 & _TLB_ENTRY_SHARED) && smp_started) {
3991 smp_rendezvous(tlb1_write_entry_sync,
3992 tlb1_write_entry_int,
3993 tlb1_write_entry_sync, &args);
4000 __asm __volatile("wrteei 0");
4001 tlb1_write_entry_int(&args);
4007 * Return the largest uint value log such that 2^log <= num.
4010 ilog2(unsigned int num)
4014 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
4019 * Convert TLB TSIZE value to mapped region size.
4022 tsize2size(unsigned int tsize)
4027 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
4030 return ((1 << (2 * tsize)) * 1024);
4034 * Convert region size (must be power of 4) to TLB TSIZE value.
4037 size2tsize(vm_size_t size)
4040 return (ilog2(size) / 2 - 5);
4044 * Register permanent kernel mapping in TLB1.
4046 * Entries are created starting from index 0 (current free entry is
4047 * kept in tlb1_idx) and are not supposed to be invalidated.
4050 tlb1_set_entry(vm_offset_t va, vm_paddr_t pa, vm_size_t size,
4057 for (index = 0; index < TLB1_ENTRIES; index++) {
4058 tlb1_read_entry(&e, index);
4059 if ((e.mas1 & MAS1_VALID) == 0)
4061 /* Check if we're just updating the flags, and update them. */
4062 if (e.phys == pa && e.virt == va && e.size == size) {
4063 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4064 tlb1_write_entry(&e, index);
4068 if (index >= TLB1_ENTRIES) {
4069 printf("tlb1_set_entry: TLB1 full!\n");
4073 /* Convert size to TSIZE */
4074 tsize = size2tsize(size);
4076 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
4077 /* XXX TS is hard coded to 0 for now as we only use single address space */
4078 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
4083 e.mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
4084 e.mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
4085 e.mas2 = (va & MAS2_EPN_MASK) | flags;
4087 /* Set supervisor RWX permission bits */
4088 e.mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
4089 e.mas7 = (pa >> 32) & MAS7_RPN;
4091 tlb1_write_entry(&e, index);
4094 * XXX in general TLB1 updates should be propagated between CPUs,
4095 * since current design assumes to have the same TLB1 set-up on all
4102 * Map in contiguous RAM region into the TLB1 using maximum of
4103 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
4105 * If necessary round up last entry size and return total size
4106 * used by all allocated entries.
4109 tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
4111 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
4112 vm_size_t mapped, pgsz, base, mask;
4115 /* Round up to the next 1M */
4116 size = roundup2(size, 1 << 20);
4121 pgsz = 64*1024*1024;
4122 while (mapped < size) {
4123 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
4124 while (pgsz > (size - mapped))
4130 /* We under-map. Correct for this. */
4131 if (mapped < size) {
4132 while (pgs[idx - 1] == pgsz) {
4136 /* XXX We may increase beyond out starting point. */
4145 /* Align address to the boundary */
4147 va = (va + mask) & ~mask;
4148 pa = (pa + mask) & ~mask;
4151 for (idx = 0; idx < nents; idx++) {
4153 debugf("%u: %llx -> %x, size=%x\n", idx, pa, va, pgsz);
4154 tlb1_set_entry(va, pa, pgsz,
4155 _TLB_ENTRY_SHARED | _TLB_ENTRY_MEM);
4160 mapped = (va - base);
4161 printf("mapped size 0x%"PRI0ptrX" (wasted space 0x%"PRIxPTR")\n",
4162 mapped, mapped - size);
4167 * TLB1 initialization routine, to be called after the very first
4168 * assembler level setup done in locore.S.
4173 uint32_t mas0, mas1, mas2, mas3, mas7;
4178 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
4179 mtspr(SPR_MAS0, mas0);
4180 __asm __volatile("isync; tlbre");
4182 mas1 = mfspr(SPR_MAS1);
4183 mas2 = mfspr(SPR_MAS2);
4184 mas3 = mfspr(SPR_MAS3);
4185 mas7 = mfspr(SPR_MAS7);
4187 kernload = ((vm_paddr_t)(mas7 & MAS7_RPN) << 32) |
4190 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4191 kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
4193 /* Setup TLB miss defaults */
4194 set_mas4_defaults();
4198 * pmap_early_io_unmap() should be used in short conjunction with
4199 * pmap_early_io_map(), as in the following snippet:
4201 * x = pmap_early_io_map(...);
4202 * <do something with x>
4203 * pmap_early_io_unmap(x, size);
4205 * And avoiding more allocations between.
4208 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
4214 size = roundup(size, PAGE_SIZE);
4216 for (i = 0; i < TLB1_ENTRIES && size > 0; i++) {
4217 tlb1_read_entry(&e, i);
4218 if (!(e.mas1 & MAS1_VALID))
4220 if (va <= e.virt && (va + isize) >= (e.virt + e.size)) {
4222 e.mas1 &= ~MAS1_VALID;
4223 tlb1_write_entry(&e, i);
4226 if (tlb1_map_base == va + isize)
4227 tlb1_map_base -= isize;
4231 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
4238 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
4240 for (i = 0; i < TLB1_ENTRIES; i++) {
4241 tlb1_read_entry(&e, i);
4242 if (!(e.mas1 & MAS1_VALID))
4244 if (pa >= e.phys && (pa + size) <=
4246 return (e.virt + (pa - e.phys));
4249 pa_base = rounddown(pa, PAGE_SIZE);
4250 size = roundup(size + (pa - pa_base), PAGE_SIZE);
4251 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1));
4252 va = tlb1_map_base + (pa - pa_base);
4255 sz = 1 << (ilog2(size) & ~1);
4256 tlb1_set_entry(tlb1_map_base, pa_base, sz,
4257 _TLB_ENTRY_SHARED | _TLB_ENTRY_IO);
4260 tlb1_map_base += sz;
4267 pmap_track_page(pmap_t pmap, vm_offset_t va)
4271 struct pv_entry *pve;
4273 va = trunc_page(va);
4274 pa = pmap_kextract(va);
4275 page = PHYS_TO_VM_PAGE(pa);
4277 rw_wlock(&pvh_global_lock);
4280 TAILQ_FOREACH(pve, &page->md.pv_list, pv_link) {
4281 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
4285 page->md.pv_tracked = true;
4286 pv_insert(pmap, va, page);
4289 rw_wunlock(&pvh_global_lock);
4294 * Setup MAS4 defaults.
4295 * These values are loaded to MAS0-2 on a TLB miss.
4298 set_mas4_defaults(void)
4302 /* Defaults: TLB0, PID0, TSIZED=4K */
4303 mas4 = MAS4_TLBSELD0;
4304 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
4308 mtspr(SPR_MAS4, mas4);
4309 __asm __volatile("isync");
4313 * Print out contents of the MAS registers for each TLB1 entry
4316 tlb1_print_tlbentries(void)
4318 uint32_t mas0, mas1, mas3, mas7;
4319 #ifdef __powerpc64__
4326 debugf("TLB1 entries:\n");
4327 for (i = 0; i < TLB1_ENTRIES; i++) {
4329 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
4330 mtspr(SPR_MAS0, mas0);
4332 __asm __volatile("isync; tlbre");
4334 mas1 = mfspr(SPR_MAS1);
4335 mas2 = mfspr(SPR_MAS2);
4336 mas3 = mfspr(SPR_MAS3);
4337 mas7 = mfspr(SPR_MAS7);
4339 tlb_print_entry(i, mas1, mas2, mas3, mas7);
4344 * Return 0 if the physical IO range is encompassed by one of the
4345 * the TLB1 entries, otherwise return related error code.
4348 tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
4351 vm_paddr_t pa_start;
4353 unsigned int entry_tsize;
4354 vm_size_t entry_size;
4357 *va = (vm_offset_t)NULL;
4359 tlb1_read_entry(&e, i);
4360 /* Skip invalid entries */
4361 if (!(e.mas1 & MAS1_VALID))
4365 * The entry must be cache-inhibited, guarded, and r/w
4366 * so it can function as an i/o page
4368 prot = e.mas2 & (MAS2_I | MAS2_G);
4369 if (prot != (MAS2_I | MAS2_G))
4372 prot = e.mas3 & (MAS3_SR | MAS3_SW);
4373 if (prot != (MAS3_SR | MAS3_SW))
4376 /* The address should be within the entry range. */
4377 entry_tsize = (e.mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4378 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
4380 entry_size = tsize2size(entry_tsize);
4381 pa_start = (((vm_paddr_t)e.mas7 & MAS7_RPN) << 32) |
4382 (e.mas3 & MAS3_RPN);
4383 pa_end = pa_start + entry_size;
4385 if ((pa < pa_start) || ((pa + size) > pa_end))
4388 /* Return virtual address of this mapping. */
4389 *va = (e.mas2 & MAS2_EPN_MASK) + (pa - pa_start);
4394 * Invalidate all TLB0 entries which match the given TID. Note this is
4395 * dedicated for cases when invalidations should NOT be propagated to other
4399 tid_flush(tlbtid_t tid)
4402 uint32_t mas0, mas1, mas2;
4406 /* Don't evict kernel translations */
4407 if (tid == TID_KERNEL)
4411 __asm __volatile("wrteei 0");
4413 for (way = 0; way < TLB0_WAYS; way++)
4414 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
4416 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
4417 mtspr(SPR_MAS0, mas0);
4418 __asm __volatile("isync");
4420 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
4421 mtspr(SPR_MAS2, mas2);
4423 __asm __volatile("isync; tlbre");
4425 mas1 = mfspr(SPR_MAS1);
4427 if (!(mas1 & MAS1_VALID))
4429 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid)
4431 mas1 &= ~MAS1_VALID;
4432 mtspr(SPR_MAS1, mas1);
4433 __asm __volatile("isync; tlbwe; isync; msync");