]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/powerpc/aim/mmu_radix.c
powerpc/pmap: Add pmap_sync_icache() for radix pmap
[FreeBSD/FreeBSD.git] / sys / powerpc / aim / mmu_radix.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Matthew Macy
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 #include "opt_platform.h"
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/conf.h>
37 #include <sys/bitstring.h>
38 #include <sys/queue.h>
39 #include <sys/cpuset.h>
40 #include <sys/endian.h>
41 #include <sys/kerneldump.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/syslog.h>
45 #include <sys/msgbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/mman.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/rwlock.h>
51 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
54 #include <sys/vmem.h>
55 #include <sys/vmmeter.h>
56 #include <sys/smp.h>
57
58 #include <sys/kdb.h>
59
60 #include <dev/ofw/openfirm.h>
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_phys.h>
72 #include <vm/vm_reserv.h>
73 #include <vm/vm_dumpset.h>
74 #include <vm/uma.h>
75
76 #include <machine/_inttypes.h>
77 #include <machine/cpu.h>
78 #include <machine/platform.h>
79 #include <machine/frame.h>
80 #include <machine/md_var.h>
81 #include <machine/psl.h>
82 #include <machine/bat.h>
83 #include <machine/hid.h>
84 #include <machine/pte.h>
85 #include <machine/sr.h>
86 #include <machine/trap.h>
87 #include <machine/mmuvar.h>
88
89 /* For pseries bit. */
90 #include <powerpc/pseries/phyp-hvcall.h>
91
92 #ifdef INVARIANTS
93 #include <vm/uma_dbg.h>
94 #endif
95
96 #define PPC_BITLSHIFT(bit)      (sizeof(long)*NBBY - 1 - (bit))
97 #define PPC_BIT(bit)            (1UL << PPC_BITLSHIFT(bit))
98 #define PPC_BITLSHIFT_VAL(val, bit) ((val) << PPC_BITLSHIFT(bit))
99
100 #include "opt_ddb.h"
101
102 #ifdef DDB
103 static void pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va);
104 #endif
105
106 #define PG_W    RPTE_WIRED
107 #define PG_V    RPTE_VALID
108 #define PG_MANAGED      RPTE_MANAGED
109 #define PG_PROMOTED     RPTE_PROMOTED
110 #define PG_M    RPTE_C
111 #define PG_A    RPTE_R
112 #define PG_X    RPTE_EAA_X
113 #define PG_RW   RPTE_EAA_W
114 #define PG_PTE_CACHE RPTE_ATTR_MASK
115
116 #define RPTE_SHIFT 9
117 #define NLS_MASK ((1UL<<5)-1)
118 #define RPTE_ENTRIES (1UL<<RPTE_SHIFT)
119 #define RPTE_MASK (RPTE_ENTRIES-1)
120
121 #define NLB_SHIFT 0
122 #define NLB_MASK (((1UL<<52)-1) << 8)
123
124 extern int nkpt;
125 extern caddr_t crashdumpmap;
126
127 #define RIC_FLUSH_TLB 0
128 #define RIC_FLUSH_PWC 1
129 #define RIC_FLUSH_ALL 2
130
131 #define POWER9_TLB_SETS_RADIX   128     /* # sets in POWER9 TLB Radix mode */
132
133 #define PPC_INST_TLBIE                  0x7c000264
134 #define PPC_INST_TLBIEL                 0x7c000224
135 #define PPC_INST_SLBIA                  0x7c0003e4
136
137 #define ___PPC_RA(a)    (((a) & 0x1f) << 16)
138 #define ___PPC_RB(b)    (((b) & 0x1f) << 11)
139 #define ___PPC_RS(s)    (((s) & 0x1f) << 21)
140 #define ___PPC_RT(t)    ___PPC_RS(t)
141 #define ___PPC_R(r)     (((r) & 0x1) << 16)
142 #define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
143 #define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
144
145 #define PPC_SLBIA(IH)   __XSTRING(.long PPC_INST_SLBIA | \
146                                        ((IH & 0x7) << 21))
147 #define PPC_TLBIE_5(rb,rs,ric,prs,r)                            \
148         __XSTRING(.long PPC_INST_TLBIE |                        \
149                           ___PPC_RB(rb) | ___PPC_RS(rs) |       \
150                           ___PPC_RIC(ric) | ___PPC_PRS(prs) |   \
151                           ___PPC_R(r))
152
153 #define PPC_TLBIEL(rb,rs,ric,prs,r) \
154          __XSTRING(.long PPC_INST_TLBIEL | \
155                            ___PPC_RB(rb) | ___PPC_RS(rs) |      \
156                            ___PPC_RIC(ric) | ___PPC_PRS(prs) |  \
157                            ___PPC_R(r))
158
159 #define PPC_INVALIDATE_ERAT             PPC_SLBIA(7)
160
161 static __inline void
162 ttusync(void)
163 {
164         __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
165 }
166
167 #define TLBIEL_INVAL_SEL_MASK   0xc00   /* invalidation selector */
168 #define  TLBIEL_INVAL_PAGE      0x000   /* invalidate a single page */
169 #define  TLBIEL_INVAL_SET_PID   0x400   /* invalidate a set for the current PID */
170 #define  TLBIEL_INVAL_SET_LPID  0x800   /* invalidate a set for current LPID */
171 #define  TLBIEL_INVAL_SET       0xc00   /* invalidate a set for all LPIDs */
172
173 #define TLBIE_ACTUAL_PAGE_MASK          0xe0
174 #define  TLBIE_ACTUAL_PAGE_4K           0x00
175 #define  TLBIE_ACTUAL_PAGE_64K          0xa0
176 #define  TLBIE_ACTUAL_PAGE_2M           0x20
177 #define  TLBIE_ACTUAL_PAGE_1G           0x40
178
179 #define TLBIE_PRS_PARTITION_SCOPE       0x0
180 #define TLBIE_PRS_PROCESS_SCOPE 0x1
181
182 #define TLBIE_RIC_INVALIDATE_TLB        0x0     /* Invalidate just TLB */
183 #define TLBIE_RIC_INVALIDATE_PWC        0x1     /* Invalidate just PWC */
184 #define TLBIE_RIC_INVALIDATE_ALL        0x2     /* Invalidate TLB, PWC,
185                                                  * cached {proc, part}tab entries
186                                                  */
187 #define TLBIE_RIC_INVALIDATE_SEQ        0x3     /* HPT - only:
188                                                  * Invalidate a range of translations
189                                                  */
190
191 static __always_inline void
192 radix_tlbie(uint8_t ric, uint8_t prs, uint16_t is, uint32_t pid, uint32_t lpid,
193                         vm_offset_t va, uint16_t ap)
194 {
195         uint64_t rb, rs;
196
197         MPASS((va & PAGE_MASK) == 0);
198
199         rs = ((uint64_t)pid << 32) | lpid;
200         rb = va | is | ap;
201         __asm __volatile(PPC_TLBIE_5(%0, %1, %2, %3, 1) : :
202                 "r" (rb), "r" (rs), "i" (ric), "i" (prs) : "memory");
203 }
204
205 static __inline void
206 radix_tlbie_fixup(uint32_t pid, vm_offset_t va, int ap)
207 {
208
209         __asm __volatile("ptesync" ::: "memory");
210         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
211             TLBIEL_INVAL_PAGE, 0, 0, va, ap);
212         __asm __volatile("ptesync" ::: "memory");
213         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
214             TLBIEL_INVAL_PAGE, pid, 0, va, ap);
215 }
216
217 static __inline void
218 radix_tlbie_invlpg_user_4k(uint32_t pid, vm_offset_t va)
219 {
220
221         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
222                 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_4K);
223         radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_4K);
224 }
225
226 static __inline void
227 radix_tlbie_invlpg_user_2m(uint32_t pid, vm_offset_t va)
228 {
229
230         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
231                 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_2M);
232         radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_2M);
233 }
234
235 static __inline void
236 radix_tlbie_invlpwc_user(uint32_t pid)
237 {
238
239         radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
240                 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
241 }
242
243 static __inline void
244 radix_tlbie_flush_user(uint32_t pid)
245 {
246
247         radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
248                 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
249 }
250
251 static __inline void
252 radix_tlbie_invlpg_kernel_4k(vm_offset_t va)
253 {
254
255         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
256             TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_4K);
257         radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_4K);
258 }
259
260 static __inline void
261 radix_tlbie_invlpg_kernel_2m(vm_offset_t va)
262 {
263
264         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
265             TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_2M);
266         radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_2M);
267 }
268
269 /* 1GB pages aren't currently supported. */
270 static __inline __unused void
271 radix_tlbie_invlpg_kernel_1g(vm_offset_t va)
272 {
273
274         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
275             TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_1G);
276         radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_1G);
277 }
278
279 static __inline void
280 radix_tlbie_invlpwc_kernel(void)
281 {
282
283         radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
284             TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
285 }
286
287 static __inline void
288 radix_tlbie_flush_kernel(void)
289 {
290
291         radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
292             TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
293 }
294
295 static __inline vm_pindex_t
296 pmap_l3e_pindex(vm_offset_t va)
297 {
298         return ((va & PG_FRAME) >> L3_PAGE_SIZE_SHIFT);
299 }
300
301 static __inline vm_pindex_t
302 pmap_pml3e_index(vm_offset_t va)
303 {
304
305         return ((va >> L3_PAGE_SIZE_SHIFT) & RPTE_MASK);
306 }
307
308 static __inline vm_pindex_t
309 pmap_pml2e_index(vm_offset_t va)
310 {
311         return ((va >> L2_PAGE_SIZE_SHIFT) & RPTE_MASK);
312 }
313
314 static __inline vm_pindex_t
315 pmap_pml1e_index(vm_offset_t va)
316 {
317         return ((va & PG_FRAME) >> L1_PAGE_SIZE_SHIFT);
318 }
319
320 /* Return various clipped indexes for a given VA */
321 static __inline vm_pindex_t
322 pmap_pte_index(vm_offset_t va)
323 {
324
325         return ((va >> PAGE_SHIFT) & RPTE_MASK);
326 }
327
328 /* Return a pointer to the PT slot that corresponds to a VA */
329 static __inline pt_entry_t *
330 pmap_l3e_to_pte(pt_entry_t *l3e, vm_offset_t va)
331 {
332         pt_entry_t *pte;
333         vm_paddr_t ptepa;
334
335         ptepa = (be64toh(*l3e) & NLB_MASK);
336         pte = (pt_entry_t *)PHYS_TO_DMAP(ptepa);
337         return (&pte[pmap_pte_index(va)]);
338 }
339
340 /* Return a pointer to the PD slot that corresponds to a VA */
341 static __inline pt_entry_t *
342 pmap_l2e_to_l3e(pt_entry_t *l2e, vm_offset_t va)
343 {
344         pt_entry_t *l3e;
345         vm_paddr_t l3pa;
346
347         l3pa = (be64toh(*l2e) & NLB_MASK);
348         l3e = (pml3_entry_t *)PHYS_TO_DMAP(l3pa);
349         return (&l3e[pmap_pml3e_index(va)]);
350 }
351
352 /* Return a pointer to the PD slot that corresponds to a VA */
353 static __inline pt_entry_t *
354 pmap_l1e_to_l2e(pt_entry_t *l1e, vm_offset_t va)
355 {
356         pt_entry_t *l2e;
357         vm_paddr_t l2pa;
358
359         l2pa = (be64toh(*l1e) & NLB_MASK);
360
361         l2e = (pml2_entry_t *)PHYS_TO_DMAP(l2pa);
362         return (&l2e[pmap_pml2e_index(va)]);
363 }
364
365 static __inline pml1_entry_t *
366 pmap_pml1e(pmap_t pmap, vm_offset_t va)
367 {
368
369         return (&pmap->pm_pml1[pmap_pml1e_index(va)]);
370 }
371
372 static pt_entry_t *
373 pmap_pml2e(pmap_t pmap, vm_offset_t va)
374 {
375         pt_entry_t *l1e;
376
377         l1e = pmap_pml1e(pmap, va);
378         if (l1e == NULL || (be64toh(*l1e) & RPTE_VALID) == 0)
379                 return (NULL);
380         return (pmap_l1e_to_l2e(l1e, va));
381 }
382
383 static __inline pt_entry_t *
384 pmap_pml3e(pmap_t pmap, vm_offset_t va)
385 {
386         pt_entry_t *l2e;
387
388         l2e = pmap_pml2e(pmap, va);
389         if (l2e == NULL || (be64toh(*l2e) & RPTE_VALID) == 0)
390                 return (NULL);
391         return (pmap_l2e_to_l3e(l2e, va));
392 }
393
394 static __inline pt_entry_t *
395 pmap_pte(pmap_t pmap, vm_offset_t va)
396 {
397         pt_entry_t *l3e;
398
399         l3e = pmap_pml3e(pmap, va);
400         if (l3e == NULL || (be64toh(*l3e) & RPTE_VALID) == 0)
401                 return (NULL);
402         return (pmap_l3e_to_pte(l3e, va));
403 }
404
405 int nkpt = 64;
406 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
407     "Number of kernel page table pages allocated on bootup");
408
409 vm_paddr_t dmaplimit;
410
411 SYSCTL_DECL(_vm_pmap);
412
413 #ifdef INVARIANTS
414 #define VERBOSE_PMAP 0
415 #define VERBOSE_PROTECT 0
416 static int pmap_logging;
417 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_logging, CTLFLAG_RWTUN,
418     &pmap_logging, 0, "verbose debug logging");
419 #endif
420
421 static u_int64_t        KPTphys;        /* phys addr of kernel level 1 */
422
423 //static vm_paddr_t     KERNend;        /* phys addr of end of bootstrap data */
424
425 static vm_offset_t qframe = 0;
426 static struct mtx qframe_mtx;
427
428 void mmu_radix_activate(struct thread *);
429 void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int);
430 void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
431     vm_size_t);
432 void mmu_radix_clear_modify(vm_page_t);
433 void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
434 int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *);
435 int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
436 void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
437         vm_prot_t);
438 void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
439 vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va);
440 vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
441 void mmu_radix_kenter(vm_offset_t, vm_paddr_t);
442 vm_paddr_t mmu_radix_kextract(vm_offset_t);
443 void mmu_radix_kremove(vm_offset_t);
444 boolean_t mmu_radix_is_modified(vm_page_t);
445 boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
446 boolean_t mmu_radix_is_referenced(vm_page_t);
447 void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t,
448         vm_pindex_t, vm_size_t);
449 boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t);
450 void mmu_radix_page_init(vm_page_t);
451 boolean_t mmu_radix_page_is_mapped(vm_page_t m);
452 void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t);
453 int mmu_radix_page_wired_mappings(vm_page_t);
454 int mmu_radix_pinit(pmap_t);
455 void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
456 bool mmu_radix_ps_enabled(pmap_t);
457 void mmu_radix_qenter(vm_offset_t, vm_page_t *, int);
458 void mmu_radix_qremove(vm_offset_t, int);
459 vm_offset_t mmu_radix_quick_enter_page(vm_page_t);
460 void mmu_radix_quick_remove_page(vm_offset_t);
461 boolean_t mmu_radix_ts_referenced(vm_page_t);
462 void mmu_radix_release(pmap_t);
463 void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t);
464 void mmu_radix_remove_all(vm_page_t);
465 void mmu_radix_remove_pages(pmap_t);
466 void mmu_radix_remove_write(vm_page_t);
467 void mmu_radix_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz);
468 void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t);
469 void mmu_radix_zero_page(vm_page_t);
470 void mmu_radix_zero_page_area(vm_page_t, int, int);
471 int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
472 void mmu_radix_page_array_startup(long pages);
473
474 #include "mmu_oea64.h"
475
476 /*
477  * Kernel MMU interface
478  */
479
480 static void     mmu_radix_bootstrap(vm_offset_t, vm_offset_t);
481
482 static void mmu_radix_copy_page(vm_page_t, vm_page_t);
483 static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
484     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
485 static void mmu_radix_growkernel(vm_offset_t);
486 static void mmu_radix_init(void);
487 static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
488 static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
489 static void mmu_radix_pinit0(pmap_t);
490
491 static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t);
492 static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
493 static void mmu_radix_unmapdev(vm_offset_t, vm_size_t);
494 static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
495 static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t);
496 static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
497 static void mmu_radix_scan_init(void);
498 static void     mmu_radix_cpu_bootstrap(int ap);
499 static void     mmu_radix_tlbie_all(void);
500
501 static struct pmap_funcs mmu_radix_methods = {
502         .bootstrap = mmu_radix_bootstrap,
503         .copy_page = mmu_radix_copy_page,
504         .copy_pages = mmu_radix_copy_pages,
505         .cpu_bootstrap = mmu_radix_cpu_bootstrap,
506         .growkernel = mmu_radix_growkernel,
507         .init = mmu_radix_init,
508         .map =                  mmu_radix_map,
509         .mincore =              mmu_radix_mincore,
510         .pinit = mmu_radix_pinit,
511         .pinit0 = mmu_radix_pinit0,
512
513         .mapdev = mmu_radix_mapdev,
514         .mapdev_attr = mmu_radix_mapdev_attr,
515         .unmapdev = mmu_radix_unmapdev,
516         .kenter_attr = mmu_radix_kenter_attr,
517         .dev_direct_mapped = mmu_radix_dev_direct_mapped,
518         .dumpsys_pa_init = mmu_radix_scan_init,
519         .dumpsys_map_chunk = mmu_radix_dumpsys_map,
520         .page_is_mapped = mmu_radix_page_is_mapped,
521         .ps_enabled = mmu_radix_ps_enabled,
522         .object_init_pt = mmu_radix_object_init_pt,
523         .protect = mmu_radix_protect,
524         /* pmap dispatcher interface */
525         .clear_modify = mmu_radix_clear_modify,
526         .copy = mmu_radix_copy,
527         .enter = mmu_radix_enter,
528         .enter_object = mmu_radix_enter_object,
529         .enter_quick = mmu_radix_enter_quick,
530         .extract = mmu_radix_extract,
531         .extract_and_hold = mmu_radix_extract_and_hold,
532         .is_modified = mmu_radix_is_modified,
533         .is_prefaultable = mmu_radix_is_prefaultable,
534         .is_referenced = mmu_radix_is_referenced,
535         .ts_referenced = mmu_radix_ts_referenced,
536         .page_exists_quick = mmu_radix_page_exists_quick,
537         .page_init = mmu_radix_page_init,
538         .page_wired_mappings =  mmu_radix_page_wired_mappings,
539         .qenter = mmu_radix_qenter,
540         .qremove = mmu_radix_qremove,
541         .release = mmu_radix_release,
542         .remove = mmu_radix_remove,
543         .remove_all = mmu_radix_remove_all,
544         .remove_write = mmu_radix_remove_write,
545         .sync_icache = mmu_radix_sync_icache,
546         .unwire = mmu_radix_unwire,
547         .zero_page = mmu_radix_zero_page,
548         .zero_page_area = mmu_radix_zero_page_area,
549         .activate = mmu_radix_activate,
550         .quick_enter_page =  mmu_radix_quick_enter_page,
551         .quick_remove_page =  mmu_radix_quick_remove_page,
552         .page_set_memattr = mmu_radix_page_set_memattr,
553         .page_array_startup =  mmu_radix_page_array_startup,
554
555         /* Internal interfaces */
556         .kenter = mmu_radix_kenter,
557         .kextract = mmu_radix_kextract,
558         .kremove = mmu_radix_kremove,
559         .change_attr = mmu_radix_change_attr,
560         .decode_kernel_ptr =  mmu_radix_decode_kernel_ptr,
561
562         .tlbie_all = mmu_radix_tlbie_all,
563 };
564
565 MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods);
566
567 static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
568         struct rwlock **lockp);
569 static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va);
570 static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *);
571 static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
572     struct spglist *free, struct rwlock **lockp);
573 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
574     pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
575 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
576 static bool pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *pde,
577     struct spglist *free);
578 static bool     pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
579         pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp);
580
581 static bool     pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e,
582                     u_int flags, struct rwlock **lockp);
583 #if VM_NRESERVLEVEL > 0
584 static void     pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
585         struct rwlock **lockp);
586 #endif
587 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
588 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
589 static vm_page_t mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
590         vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate);
591
592 static bool     pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
593         vm_prot_t prot, struct rwlock **lockp);
594 static int      pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde,
595         u_int flags, vm_page_t m, struct rwlock **lockp);
596
597 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
598 static void free_pv_chunk(struct pv_chunk *pc);
599 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp);
600 static vm_page_t pmap_allocl3e(pmap_t pmap, vm_offset_t va,
601         struct rwlock **lockp);
602 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
603         struct rwlock **lockp);
604 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
605     struct spglist *free);
606 static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free);
607
608 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start);
609 static void pmap_invalidate_all(pmap_t pmap);
610 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush);
611
612 /*
613  * Internal flags for pmap_enter()'s helper functions.
614  */
615 #define PMAP_ENTER_NORECLAIM    0x1000000       /* Don't reclaim PV entries. */
616 #define PMAP_ENTER_NOREPLACE    0x2000000       /* Don't replace mappings. */
617
618 #define UNIMPLEMENTED() panic("%s not implemented", __func__)
619 #define UNTESTED() panic("%s not yet tested", __func__)
620
621 /* Number of supported PID bits */
622 static unsigned int isa3_pid_bits;
623
624 /* PID to start allocating from */
625 static unsigned int isa3_base_pid;
626
627 #define PROCTAB_SIZE_SHIFT      (isa3_pid_bits + 4)
628 #define PROCTAB_ENTRIES (1ul << isa3_pid_bits)
629
630 /*
631  * Map of physical memory regions.
632  */
633 static struct   mem_region *regions, *pregions;
634 static struct   numa_mem_region *numa_pregions;
635 static u_int    phys_avail_count;
636 static int      regions_sz, pregions_sz, numa_pregions_sz;
637 static struct pate *isa3_parttab;
638 static struct prte *isa3_proctab;
639 static vmem_t *asid_arena;
640
641 extern void bs_remap_earlyboot(void);
642
643 #define RADIX_PGD_SIZE_SHIFT    16
644 #define RADIX_PGD_SIZE  (1UL << RADIX_PGD_SIZE_SHIFT)
645
646 #define RADIX_PGD_INDEX_SHIFT   (RADIX_PGD_SIZE_SHIFT-3)
647 #define NL2EPG (PAGE_SIZE/sizeof(pml2_entry_t))
648 #define NL3EPG (PAGE_SIZE/sizeof(pml3_entry_t))
649
650 #define NUPML1E         (RADIX_PGD_SIZE/sizeof(uint64_t))       /* number of userland PML1 pages */
651 #define NUPDPE          (NUPML1E * NL2EPG)/* number of userland PDP pages */
652 #define NUPDE           (NUPDPE * NL3EPG)       /* number of userland PD entries */
653
654 /* POWER9 only permits a 64k partition table size. */
655 #define PARTTAB_SIZE_SHIFT      16
656 #define PARTTAB_SIZE    (1UL << PARTTAB_SIZE_SHIFT)
657
658 #define PARTTAB_HR              (1UL << 63) /* host uses radix */
659 #define PARTTAB_GR              (1UL << 63) /* guest uses radix must match host */
660
661 /* TLB flush actions. Used as argument to tlbiel_flush() */
662 enum {
663         TLB_INVAL_SCOPE_LPID = 2,       /* invalidate TLBs for current LPID */
664         TLB_INVAL_SCOPE_GLOBAL = 3,     /* invalidate all TLBs */
665 };
666
667 #define NPV_LIST_LOCKS  MAXCPU
668 static int pmap_initialized;
669 static vm_paddr_t proctab0pa;
670 static vm_paddr_t parttab_phys;
671 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
672
673 /*
674  * Data for the pv entry allocation mechanism.
675  * Updates to pv_invl_gen are protected by the pv_list_locks[]
676  * elements, but reads are not.
677  */
678 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
679 static struct mtx __exclusive_cache_line pv_chunks_mutex;
680 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
681 static struct md_page *pv_table;
682 static struct md_page pv_dummy;
683
684 #ifdef PV_STATS
685 #define PV_STAT(x)      do { x ; } while (0)
686 #else
687 #define PV_STAT(x)      do { } while (0)
688 #endif
689
690 #define pa_radix_index(pa)      ((pa) >> L3_PAGE_SIZE_SHIFT)
691 #define pa_to_pvh(pa)   (&pv_table[pa_radix_index(pa)])
692
693 #define PHYS_TO_PV_LIST_LOCK(pa)        \
694                         (&pv_list_locks[pa_radix_index(pa) % NPV_LIST_LOCKS])
695
696 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)  do {    \
697         struct rwlock **_lockp = (lockp);               \
698         struct rwlock *_new_lock;                       \
699                                                         \
700         _new_lock = PHYS_TO_PV_LIST_LOCK(pa);           \
701         if (_new_lock != *_lockp) {                     \
702                 if (*_lockp != NULL)                    \
703                         rw_wunlock(*_lockp);            \
704                 *_lockp = _new_lock;                    \
705                 rw_wlock(*_lockp);                      \
706         }                                               \
707 } while (0)
708
709 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)        \
710         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
711
712 #define RELEASE_PV_LIST_LOCK(lockp)             do {    \
713         struct rwlock **_lockp = (lockp);               \
714                                                         \
715         if (*_lockp != NULL) {                          \
716                 rw_wunlock(*_lockp);                    \
717                 *_lockp = NULL;                         \
718         }                                               \
719 } while (0)
720
721 #define VM_PAGE_TO_PV_LIST_LOCK(m)      \
722         PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
723
724 /*
725  * We support 52 bits, hence:
726  * bits 52 - 31 = 21, 0b10101
727  * RTS encoding details
728  * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
729  * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
730  */
731 #define RTS_SIZE ((0x2UL << 61) | (0x5UL << 5))
732
733 static int powernv_enabled = 1;
734
735 static __always_inline void
736 tlbiel_radix_set_isa300(uint32_t set, uint32_t is,
737         uint32_t pid, uint32_t ric, uint32_t prs)
738 {
739         uint64_t rb;
740         uint64_t rs;
741
742         rb = PPC_BITLSHIFT_VAL(set, 51) | PPC_BITLSHIFT_VAL(is, 53);
743         rs = PPC_BITLSHIFT_VAL((uint64_t)pid, 31);
744
745         __asm __volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
746                      : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
747                      : "memory");
748 }
749
750 static void
751 tlbiel_flush_isa3(uint32_t num_sets, uint32_t is)
752 {
753         uint32_t set;
754
755         __asm __volatile("ptesync": : :"memory");
756
757         /*
758          * Flush the first set of the TLB, and the entire Page Walk Cache
759          * and partition table entries. Then flush the remaining sets of the
760          * TLB.
761          */
762         if (is == TLB_INVAL_SCOPE_GLOBAL) {
763                 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
764                 for (set = 1; set < num_sets; set++)
765                         tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
766         }
767
768         /* Do the same for process scoped entries. */
769         tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
770         for (set = 1; set < num_sets; set++)
771                 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
772
773         __asm __volatile("ptesync": : :"memory");
774 }
775
776 static void
777 mmu_radix_tlbiel_flush(int scope)
778 {
779         MPASS(scope == TLB_INVAL_SCOPE_LPID ||
780                   scope == TLB_INVAL_SCOPE_GLOBAL);
781
782         tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, scope);
783         __asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
784 }
785
786 static void
787 mmu_radix_tlbie_all(void)
788 {
789         if (powernv_enabled)
790                 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
791         else
792                 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
793 }
794
795 static void
796 mmu_radix_init_amor(void)
797 {
798         /*
799         * In HV mode, we init AMOR (Authority Mask Override Register) so that
800         * the hypervisor and guest can setup IAMR (Instruction Authority Mask
801         * Register), enable key 0 and set it to 1.
802         *
803         * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
804         */
805         mtspr(SPR_AMOR, (3ul << 62));
806 }
807
808 static void
809 mmu_radix_init_iamr(void)
810 {
811         /*
812          * Radix always uses key0 of the IAMR to determine if an access is
813          * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
814          * fetch.
815          */
816         mtspr(SPR_IAMR, (1ul << 62));
817 }
818
819 static void
820 mmu_radix_pid_set(pmap_t pmap)
821 {
822
823         mtspr(SPR_PID, pmap->pm_pid);
824         isync();
825 }
826
827 /* Quick sort callout for comparing physical addresses. */
828 static int
829 pa_cmp(const void *a, const void *b)
830 {
831         const vm_paddr_t *pa = a, *pb = b;
832
833         if (*pa < *pb)
834                 return (-1);
835         else if (*pa > *pb)
836                 return (1);
837         else
838                 return (0);
839 }
840
841 #define pte_load_store(ptep, pte)       atomic_swap_long(ptep, pte)
842 #define pte_load_clear(ptep)            atomic_swap_long(ptep, 0)
843 #define pte_store(ptep, pte) do {          \
844         MPASS((pte) & (RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_X));  \
845         *(u_long *)(ptep) = htobe64((u_long)((pte) | PG_V | RPTE_LEAF)); \
846 } while (0)
847 /*
848  * NB: should only be used for adding directories - not for direct mappings
849  */
850 #define pde_store(ptep, pa) do {                                \
851         *(u_long *)(ptep) = htobe64((u_long)(pa|RPTE_VALID|RPTE_SHIFT)); \
852 } while (0)
853
854 #define pte_clear(ptep) do {                                    \
855                 *(u_long *)(ptep) = (u_long)(0);                \
856 } while (0)
857
858 #define PMAP_PDE_SUPERPAGE      (1 << 8)        /* supports 2MB superpages */
859
860 /*
861  * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
862  * (PTE) page mappings have identical settings for the following fields:
863  */
864 #define PG_PTE_PROMOTE  (PG_X | PG_MANAGED | PG_W | PG_PTE_CACHE | \
865             PG_M | PG_A | RPTE_EAA_MASK | PG_V)
866
867 static __inline void
868 pmap_resident_count_inc(pmap_t pmap, int count)
869 {
870
871         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
872         pmap->pm_stats.resident_count += count;
873 }
874
875 static __inline void
876 pmap_resident_count_dec(pmap_t pmap, int count)
877 {
878
879         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
880         KASSERT(pmap->pm_stats.resident_count >= count,
881             ("pmap %p resident count underflow %ld %d", pmap,
882             pmap->pm_stats.resident_count, count));
883         pmap->pm_stats.resident_count -= count;
884 }
885
886 static void
887 pagezero(vm_offset_t va)
888 {
889         va = trunc_page(va);
890
891         bzero((void *)va, PAGE_SIZE);
892 }
893
894 static uint64_t
895 allocpages(int n)
896 {
897         u_int64_t ret;
898
899         ret = moea64_bootstrap_alloc(n * PAGE_SIZE, PAGE_SIZE);
900         for (int i = 0; i < n; i++)
901                 pagezero(PHYS_TO_DMAP(ret + i * PAGE_SIZE));
902         return (ret);
903 }
904
905 static pt_entry_t *
906 kvtopte(vm_offset_t va)
907 {
908         pt_entry_t *l3e;
909
910         l3e = pmap_pml3e(kernel_pmap, va);
911         if (l3e == NULL || (be64toh(*l3e) & RPTE_VALID) == 0)
912                 return (NULL);
913         return (pmap_l3e_to_pte(l3e, va));
914 }
915
916 void
917 mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa)
918 {
919         pt_entry_t *pte;
920
921         pte = kvtopte(va);
922         MPASS(pte != NULL);
923         *pte = htobe64(pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | \
924             RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A);
925 }
926
927 bool
928 mmu_radix_ps_enabled(pmap_t pmap)
929 {
930         return (superpages_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
931 }
932
933 static pt_entry_t *
934 pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e)
935 {
936         pml3_entry_t *l3e;
937         pt_entry_t *pte;
938
939         va &= PG_PS_FRAME;
940         l3e = pmap_pml3e(pmap, va);
941         if (l3e == NULL || (be64toh(*l3e) & PG_V) == 0)
942                 return (NULL);
943
944         if (be64toh(*l3e) & RPTE_LEAF) {
945                 *is_l3e = 1;
946                 return (l3e);
947         }
948         *is_l3e = 0;
949         va &= PG_FRAME;
950         pte = pmap_l3e_to_pte(l3e, va);
951         if (pte == NULL || (be64toh(*pte) & PG_V) == 0)
952                 return (NULL);
953         return (pte);
954 }
955
956 int
957 pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags)
958 {
959         pt_entry_t *pte;
960         pt_entry_t startpte, origpte, newpte;
961         vm_page_t m;
962         int is_l3e;
963
964         startpte = 0;
965  retry:
966         if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL)
967                 return (KERN_INVALID_ADDRESS);
968         origpte = newpte = be64toh(*pte);
969         if (startpte == 0) {
970                 startpte = origpte;
971                 if (((flags & VM_PROT_WRITE) && (startpte & PG_M)) ||
972                     ((flags & VM_PROT_READ) && (startpte & PG_A))) {
973                         pmap_invalidate_all(pmap);
974 #ifdef INVARIANTS
975                         if (VERBOSE_PMAP || pmap_logging)
976                                 printf("%s(%p, %#lx, %#x) (%#lx) -- invalidate all\n",
977                                     __func__, pmap, va, flags, origpte);
978 #endif
979                         return (KERN_FAILURE);
980                 }
981         }
982 #ifdef INVARIANTS
983         if (VERBOSE_PMAP || pmap_logging)
984                 printf("%s(%p, %#lx, %#x) (%#lx)\n", __func__, pmap, va,
985                     flags, origpte);
986 #endif
987         PMAP_LOCK(pmap);
988         if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL ||
989             be64toh(*pte) != origpte) {
990                 PMAP_UNLOCK(pmap);
991                 return (KERN_FAILURE);
992         }
993         m = PHYS_TO_VM_PAGE(newpte & PG_FRAME);
994         MPASS(m != NULL);
995         switch (flags) {
996         case VM_PROT_READ:
997                 if ((newpte & (RPTE_EAA_R|RPTE_EAA_X)) == 0)
998                         goto protfail;
999                 newpte |= PG_A;
1000                 vm_page_aflag_set(m, PGA_REFERENCED);
1001                 break;
1002         case VM_PROT_WRITE:
1003                 if ((newpte & RPTE_EAA_W) == 0)
1004                         goto protfail;
1005                 if (is_l3e)
1006                         goto protfail;
1007                 newpte |= PG_M;
1008                 vm_page_dirty(m);
1009                 break;
1010         case VM_PROT_EXECUTE:
1011                 if ((newpte & RPTE_EAA_X) == 0)
1012                         goto protfail;
1013                 newpte |= PG_A;
1014                 vm_page_aflag_set(m, PGA_REFERENCED);
1015                 break;
1016         }
1017
1018         if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte)))
1019                 goto retry;
1020         ptesync();
1021         PMAP_UNLOCK(pmap);
1022         if (startpte == newpte)
1023                 return (KERN_FAILURE);
1024         return (0);
1025  protfail:
1026         PMAP_UNLOCK(pmap);
1027         return (KERN_PROTECTION_FAILURE);
1028 }
1029
1030 /*
1031  * Returns TRUE if the given page is mapped individually or as part of
1032  * a 2mpage.  Otherwise, returns FALSE.
1033  */
1034 boolean_t
1035 mmu_radix_page_is_mapped(vm_page_t m)
1036 {
1037         struct rwlock *lock;
1038         boolean_t rv;
1039
1040         if ((m->oflags & VPO_UNMANAGED) != 0)
1041                 return (FALSE);
1042         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
1043         rw_rlock(lock);
1044         rv = !TAILQ_EMPTY(&m->md.pv_list) ||
1045             ((m->flags & PG_FICTITIOUS) == 0 &&
1046             !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
1047         rw_runlock(lock);
1048         return (rv);
1049 }
1050
1051 /*
1052  * Determine the appropriate bits to set in a PTE or PDE for a specified
1053  * caching mode.
1054  */
1055 static int
1056 pmap_cache_bits(vm_memattr_t ma)
1057 {
1058         if (ma != VM_MEMATTR_DEFAULT) {
1059                 switch (ma) {
1060                 case VM_MEMATTR_UNCACHEABLE:
1061                         return (RPTE_ATTR_GUARDEDIO);
1062                 case VM_MEMATTR_CACHEABLE:
1063                         return (RPTE_ATTR_MEM);
1064                 case VM_MEMATTR_WRITE_BACK:
1065                 case VM_MEMATTR_PREFETCHABLE:
1066                 case VM_MEMATTR_WRITE_COMBINING:
1067                         return (RPTE_ATTR_UNGUARDEDIO);
1068                 }
1069         }
1070         return (0);
1071 }
1072
1073 static void
1074 pmap_invalidate_page(pmap_t pmap, vm_offset_t start)
1075 {
1076         ptesync();
1077         if (pmap == kernel_pmap)
1078                 radix_tlbie_invlpg_kernel_4k(start);
1079         else
1080                 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1081         ttusync();
1082 }
1083
1084 static void
1085 pmap_invalidate_page_2m(pmap_t pmap, vm_offset_t start)
1086 {
1087         ptesync();
1088         if (pmap == kernel_pmap)
1089                 radix_tlbie_invlpg_kernel_2m(start);
1090         else
1091                 radix_tlbie_invlpg_user_2m(pmap->pm_pid, start);
1092         ttusync();
1093 }
1094
1095 static void
1096 pmap_invalidate_pwc(pmap_t pmap)
1097 {
1098         ptesync();
1099         if (pmap == kernel_pmap)
1100                 radix_tlbie_invlpwc_kernel();
1101         else
1102                 radix_tlbie_invlpwc_user(pmap->pm_pid);
1103         ttusync();
1104 }
1105
1106 static void
1107 pmap_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
1108 {
1109         if (((start - end) >> PAGE_SHIFT) > 8) {
1110                 pmap_invalidate_all(pmap);
1111                 return;
1112         }
1113         ptesync();
1114         if (pmap == kernel_pmap) {
1115                 while (start < end) {
1116                         radix_tlbie_invlpg_kernel_4k(start);
1117                         start += PAGE_SIZE;
1118                 }
1119         } else {
1120                 while (start < end) {
1121                         radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1122                         start += PAGE_SIZE;
1123                 }
1124         }
1125         ttusync();
1126 }
1127
1128 static void
1129 pmap_invalidate_all(pmap_t pmap)
1130 {
1131         ptesync();
1132         if (pmap == kernel_pmap)
1133                 radix_tlbie_flush_kernel();
1134         else
1135                 radix_tlbie_flush_user(pmap->pm_pid);
1136         ttusync();
1137 }
1138
1139 static void
1140 pmap_invalidate_l3e_page(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e)
1141 {
1142
1143         /*
1144          * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
1145          * by a promotion that did not invalidate the 512 4KB page mappings
1146          * that might exist in the TLB.  Consequently, at this point, the TLB
1147          * may hold both 4KB and 2MB page mappings for the address range [va,
1148          * va + L3_PAGE_SIZE).  Therefore, the entire range must be invalidated here.
1149          * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
1150          * 4KB page mappings for the address range [va, va + L3_PAGE_SIZE), and so a
1151          * single INVLPG suffices to invalidate the 2MB page mapping from the
1152          * TLB.
1153          */
1154         ptesync();
1155         if ((l3e & PG_PROMOTED) != 0)
1156                 pmap_invalidate_range(pmap, va, va + L3_PAGE_SIZE - 1);
1157         else
1158                 pmap_invalidate_page_2m(pmap, va);
1159
1160         pmap_invalidate_pwc(pmap);
1161 }
1162
1163 static __inline struct pv_chunk *
1164 pv_to_chunk(pv_entry_t pv)
1165 {
1166
1167         return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1168 }
1169
1170 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1171
1172 #define PC_FREE0        0xfffffffffffffffful
1173 #define PC_FREE1        0x3ffffffffffffffful
1174
1175 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1 };
1176
1177 /*
1178  * Ensure that the number of spare PV entries in the specified pmap meets or
1179  * exceeds the given count, "needed".
1180  *
1181  * The given PV list lock may be released.
1182  */
1183 static void
1184 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1185 {
1186         struct pch new_tail;
1187         struct pv_chunk *pc;
1188         vm_page_t m;
1189         int avail, free;
1190         bool reclaimed;
1191
1192         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1193         KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1194
1195         /*
1196          * Newly allocated PV chunks must be stored in a private list until
1197          * the required number of PV chunks have been allocated.  Otherwise,
1198          * reclaim_pv_chunk() could recycle one of these chunks.  In
1199          * contrast, these chunks must be added to the pmap upon allocation.
1200          */
1201         TAILQ_INIT(&new_tail);
1202 retry:
1203         avail = 0;
1204         TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1205                 //              if ((cpu_feature2 & CPUID2_POPCNT) == 0)
1206                 bit_count((bitstr_t *)pc->pc_map, 0,
1207                                   sizeof(pc->pc_map) * NBBY, &free);
1208 #if 0
1209                 free = popcnt_pc_map_pq(pc->pc_map);
1210 #endif
1211                 if (free == 0)
1212                         break;
1213                 avail += free;
1214                 if (avail >= needed)
1215                         break;
1216         }
1217         for (reclaimed = false; avail < needed; avail += _NPCPV) {
1218                 m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1219                 if (m == NULL) {
1220                         m = reclaim_pv_chunk(pmap, lockp);
1221                         if (m == NULL)
1222                                 goto retry;
1223                         reclaimed = true;
1224                 }
1225                 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1226                 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1227                 dump_add_page(m->phys_addr);
1228                 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1229                 pc->pc_pmap = pmap;
1230                 pc->pc_map[0] = PC_FREE0;
1231                 pc->pc_map[1] = PC_FREE1;
1232                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1233                 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
1234                 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
1235
1236                 /*
1237                  * The reclaim might have freed a chunk from the current pmap.
1238                  * If that chunk contained available entries, we need to
1239                  * re-count the number of available entries.
1240                  */
1241                 if (reclaimed)
1242                         goto retry;
1243         }
1244         if (!TAILQ_EMPTY(&new_tail)) {
1245                 mtx_lock(&pv_chunks_mutex);
1246                 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
1247                 mtx_unlock(&pv_chunks_mutex);
1248         }
1249 }
1250
1251 /*
1252  * First find and then remove the pv entry for the specified pmap and virtual
1253  * address from the specified pv list.  Returns the pv entry if found and NULL
1254  * otherwise.  This operation can be performed on pv lists for either 4KB or
1255  * 2MB page mappings.
1256  */
1257 static __inline pv_entry_t
1258 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1259 {
1260         pv_entry_t pv;
1261
1262         TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
1263 #ifdef INVARIANTS
1264                 if (PV_PMAP(pv) == NULL) {
1265                         printf("corrupted pv_chunk/pv %p\n", pv);
1266                         printf("pv_chunk: %64D\n", pv_to_chunk(pv), ":");
1267                 }
1268                 MPASS(PV_PMAP(pv) != NULL);
1269                 MPASS(pv->pv_va != 0);
1270 #endif
1271                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1272                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
1273                         pvh->pv_gen++;
1274                         break;
1275                 }
1276         }
1277         return (pv);
1278 }
1279
1280 /*
1281  * After demotion from a 2MB page mapping to 512 4KB page mappings,
1282  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
1283  * entries for each of the 4KB page mappings.
1284  */
1285 static void
1286 pmap_pv_demote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1287     struct rwlock **lockp)
1288 {
1289         struct md_page *pvh;
1290         struct pv_chunk *pc;
1291         pv_entry_t pv;
1292         vm_offset_t va_last;
1293         vm_page_t m;
1294         int bit, field;
1295
1296         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1297         KASSERT((pa & L3_PAGE_MASK) == 0,
1298             ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
1299         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1300
1301         /*
1302          * Transfer the 2mpage's pv entry for this mapping to the first
1303          * page's pv list.  Once this transfer begins, the pv list lock
1304          * must not be released until the last pv entry is reinstantiated.
1305          */
1306         pvh = pa_to_pvh(pa);
1307         va = trunc_2mpage(va);
1308         pv = pmap_pvh_remove(pvh, pmap, va);
1309         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
1310         m = PHYS_TO_VM_PAGE(pa);
1311         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1312
1313         m->md.pv_gen++;
1314         /* Instantiate the remaining NPTEPG - 1 pv entries. */
1315         PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
1316         va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1317         for (;;) {
1318                 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1319                 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0
1320                     , ("pmap_pv_demote_pde: missing spare"));
1321                 for (field = 0; field < _NPCM; field++) {
1322                         while (pc->pc_map[field]) {
1323                                 bit = cnttzd(pc->pc_map[field]);
1324                                 pc->pc_map[field] &= ~(1ul << bit);
1325                                 pv = &pc->pc_pventry[field * 64 + bit];
1326                                 va += PAGE_SIZE;
1327                                 pv->pv_va = va;
1328                                 m++;
1329                                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1330                             ("pmap_pv_demote_pde: page %p is not managed", m));
1331                                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1332
1333                                 m->md.pv_gen++;
1334                                 if (va == va_last)
1335                                         goto out;
1336                         }
1337                 }
1338                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1339                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1340         }
1341 out:
1342         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1343                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1344                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1345         }
1346         PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
1347         PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
1348 }
1349
1350 static void
1351 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap)
1352 {
1353
1354         if (pmap == NULL)
1355                 return;
1356         pmap_invalidate_all(pmap);
1357         if (pmap != locked_pmap)
1358                 PMAP_UNLOCK(pmap);
1359 }
1360
1361 /*
1362  * We are in a serious low memory condition.  Resort to
1363  * drastic measures to free some pages so we can allocate
1364  * another pv entry chunk.
1365  *
1366  * Returns NULL if PV entries were reclaimed from the specified pmap.
1367  *
1368  * We do not, however, unmap 2mpages because subsequent accesses will
1369  * allocate per-page pv entries until repromotion occurs, thereby
1370  * exacerbating the shortage of free pv entries.
1371  */
1372 static int active_reclaims = 0;
1373 static vm_page_t
1374 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1375 {
1376         struct pv_chunk *pc, *pc_marker, *pc_marker_end;
1377         struct pv_chunk_header pc_marker_b, pc_marker_end_b;
1378         struct md_page *pvh;
1379         pml3_entry_t *l3e;
1380         pmap_t next_pmap, pmap;
1381         pt_entry_t *pte, tpte;
1382         pv_entry_t pv;
1383         vm_offset_t va;
1384         vm_page_t m, m_pc;
1385         struct spglist free;
1386         uint64_t inuse;
1387         int bit, field, freed;
1388
1389         PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1390         KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
1391         pmap = NULL;
1392         m_pc = NULL;
1393         SLIST_INIT(&free);
1394         bzero(&pc_marker_b, sizeof(pc_marker_b));
1395         bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
1396         pc_marker = (struct pv_chunk *)&pc_marker_b;
1397         pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
1398
1399         mtx_lock(&pv_chunks_mutex);
1400         active_reclaims++;
1401         TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
1402         TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
1403         while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
1404             SLIST_EMPTY(&free)) {
1405                 next_pmap = pc->pc_pmap;
1406                 if (next_pmap == NULL) {
1407                         /*
1408                          * The next chunk is a marker.  However, it is
1409                          * not our marker, so active_reclaims must be
1410                          * > 1.  Consequently, the next_chunk code
1411                          * will not rotate the pv_chunks list.
1412                          */
1413                         goto next_chunk;
1414                 }
1415                 mtx_unlock(&pv_chunks_mutex);
1416
1417                 /*
1418                  * A pv_chunk can only be removed from the pc_lru list
1419                  * when both pc_chunks_mutex is owned and the
1420                  * corresponding pmap is locked.
1421                  */
1422                 if (pmap != next_pmap) {
1423                         reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1424                         pmap = next_pmap;
1425                         /* Avoid deadlock and lock recursion. */
1426                         if (pmap > locked_pmap) {
1427                                 RELEASE_PV_LIST_LOCK(lockp);
1428                                 PMAP_LOCK(pmap);
1429                                 mtx_lock(&pv_chunks_mutex);
1430                                 continue;
1431                         } else if (pmap != locked_pmap) {
1432                                 if (PMAP_TRYLOCK(pmap)) {
1433                                         mtx_lock(&pv_chunks_mutex);
1434                                         continue;
1435                                 } else {
1436                                         pmap = NULL; /* pmap is not locked */
1437                                         mtx_lock(&pv_chunks_mutex);
1438                                         pc = TAILQ_NEXT(pc_marker, pc_lru);
1439                                         if (pc == NULL ||
1440                                             pc->pc_pmap != next_pmap)
1441                                                 continue;
1442                                         goto next_chunk;
1443                                 }
1444                         }
1445                 }
1446
1447                 /*
1448                  * Destroy every non-wired, 4 KB page mapping in the chunk.
1449                  */
1450                 freed = 0;
1451                 for (field = 0; field < _NPCM; field++) {
1452                         for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1453                             inuse != 0; inuse &= ~(1UL << bit)) {
1454                                 bit = cnttzd(inuse);
1455                                 pv = &pc->pc_pventry[field * 64 + bit];
1456                                 va = pv->pv_va;
1457                                 l3e = pmap_pml3e(pmap, va);
1458                                 if ((be64toh(*l3e) & RPTE_LEAF) != 0)
1459                                         continue;
1460                                 pte = pmap_l3e_to_pte(l3e, va);
1461                                 if ((be64toh(*pte) & PG_W) != 0)
1462                                         continue;
1463                                 tpte = be64toh(pte_load_clear(pte));
1464                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
1465                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
1466                                         vm_page_dirty(m);
1467                                 if ((tpte & PG_A) != 0)
1468                                         vm_page_aflag_set(m, PGA_REFERENCED);
1469                                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1470                                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
1471
1472                                 m->md.pv_gen++;
1473                                 if (TAILQ_EMPTY(&m->md.pv_list) &&
1474                                     (m->flags & PG_FICTITIOUS) == 0) {
1475                                         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
1476                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
1477                                                 vm_page_aflag_clear(m,
1478                                                     PGA_WRITEABLE);
1479                                         }
1480                                 }
1481                                 pc->pc_map[field] |= 1UL << bit;
1482                                 pmap_unuse_pt(pmap, va, be64toh(*l3e), &free);
1483                                 freed++;
1484                         }
1485                 }
1486                 if (freed == 0) {
1487                         mtx_lock(&pv_chunks_mutex);
1488                         goto next_chunk;
1489                 }
1490                 /* Every freed mapping is for a 4 KB page. */
1491                 pmap_resident_count_dec(pmap, freed);
1492                 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
1493                 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
1494                 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
1495                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1496                 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1) {
1497                         PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1498                         PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1499                         PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1500                         /* Entire chunk is free; return it. */
1501                         m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1502                         dump_drop_page(m_pc->phys_addr);
1503                         mtx_lock(&pv_chunks_mutex);
1504                         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1505                         break;
1506                 }
1507                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1508                 mtx_lock(&pv_chunks_mutex);
1509                 /* One freed pv entry in locked_pmap is sufficient. */
1510                 if (pmap == locked_pmap)
1511                         break;
1512 next_chunk:
1513                 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1514                 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
1515                 if (active_reclaims == 1 && pmap != NULL) {
1516                         /*
1517                          * Rotate the pv chunks list so that we do not
1518                          * scan the same pv chunks that could not be
1519                          * freed (because they contained a wired
1520                          * and/or superpage mapping) on every
1521                          * invocation of reclaim_pv_chunk().
1522                          */
1523                         while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
1524                                 MPASS(pc->pc_pmap != NULL);
1525                                 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1526                                 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1527                         }
1528                 }
1529         }
1530         TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1531         TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
1532         active_reclaims--;
1533         mtx_unlock(&pv_chunks_mutex);
1534         reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1535         if (m_pc == NULL && !SLIST_EMPTY(&free)) {
1536                 m_pc = SLIST_FIRST(&free);
1537                 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
1538                 /* Recycle a freed page table page. */
1539                 m_pc->ref_count = 1;
1540         }
1541         vm_page_free_pages_toq(&free, true);
1542         return (m_pc);
1543 }
1544
1545 /*
1546  * free the pv_entry back to the free list
1547  */
1548 static void
1549 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1550 {
1551         struct pv_chunk *pc;
1552         int idx, field, bit;
1553
1554 #ifdef VERBOSE_PV
1555         if (pmap != kernel_pmap)
1556                 printf("%s(%p, %p)\n", __func__, pmap, pv);
1557 #endif
1558         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1559         PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1560         PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1561         PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1562         pc = pv_to_chunk(pv);
1563         idx = pv - &pc->pc_pventry[0];
1564         field = idx / 64;
1565         bit = idx % 64;
1566         pc->pc_map[field] |= 1ul << bit;
1567         if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1) {
1568                 /* 98% of the time, pc is already at the head of the list. */
1569                 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1570                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1571                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1572                 }
1573                 return;
1574         }
1575         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1576         free_pv_chunk(pc);
1577 }
1578
1579 static void
1580 free_pv_chunk(struct pv_chunk *pc)
1581 {
1582         vm_page_t m;
1583
1584         mtx_lock(&pv_chunks_mutex);
1585         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1586         mtx_unlock(&pv_chunks_mutex);
1587         PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1588         PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1589         PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1590         /* entire chunk is free, return it */
1591         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1592         dump_drop_page(m->phys_addr);
1593         vm_page_unwire_noq(m);
1594         vm_page_free(m);
1595 }
1596
1597 /*
1598  * Returns a new PV entry, allocating a new PV chunk from the system when
1599  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
1600  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
1601  * returned.
1602  *
1603  * The given PV list lock may be released.
1604  */
1605 static pv_entry_t
1606 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1607 {
1608         int bit, field;
1609         pv_entry_t pv;
1610         struct pv_chunk *pc;
1611         vm_page_t m;
1612
1613         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1614         PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1615 retry:
1616         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1617         if (pc != NULL) {
1618                 for (field = 0; field < _NPCM; field++) {
1619                         if (pc->pc_map[field]) {
1620                                 bit = cnttzd(pc->pc_map[field]);
1621                                 break;
1622                         }
1623                 }
1624                 if (field < _NPCM) {
1625                         pv = &pc->pc_pventry[field * 64 + bit];
1626                         pc->pc_map[field] &= ~(1ul << bit);
1627                         /* If this was the last item, move it to tail */
1628                         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1629                                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1630                                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1631                                     pc_list);
1632                         }
1633                         PV_STAT(atomic_add_long(&pv_entry_count, 1));
1634                         PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1635                         MPASS(PV_PMAP(pv) != NULL);
1636                         return (pv);
1637                 }
1638         }
1639         /* No free items, allocate another chunk */
1640         m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
1641         if (m == NULL) {
1642                 if (lockp == NULL) {
1643                         PV_STAT(pc_chunk_tryfail++);
1644                         return (NULL);
1645                 }
1646                 m = reclaim_pv_chunk(pmap, lockp);
1647                 if (m == NULL)
1648                         goto retry;
1649         }
1650         PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1651         PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1652         dump_add_page(m->phys_addr);
1653         pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1654         pc->pc_pmap = pmap;
1655         pc->pc_map[0] = PC_FREE0 & ~1ul;        /* preallocated bit 0 */
1656         pc->pc_map[1] = PC_FREE1;
1657         mtx_lock(&pv_chunks_mutex);
1658         TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1659         mtx_unlock(&pv_chunks_mutex);
1660         pv = &pc->pc_pventry[0];
1661         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1662         PV_STAT(atomic_add_long(&pv_entry_count, 1));
1663         PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1664         MPASS(PV_PMAP(pv) != NULL);
1665         return (pv);
1666 }
1667
1668 #if VM_NRESERVLEVEL > 0
1669 /*
1670  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
1671  * replace the many pv entries for the 4KB page mappings by a single pv entry
1672  * for the 2MB page mapping.
1673  */
1674 static void
1675 pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1676     struct rwlock **lockp)
1677 {
1678         struct md_page *pvh;
1679         pv_entry_t pv;
1680         vm_offset_t va_last;
1681         vm_page_t m;
1682
1683         KASSERT((pa & L3_PAGE_MASK) == 0,
1684             ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
1685         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1686
1687         /*
1688          * Transfer the first page's pv entry for this mapping to the 2mpage's
1689          * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
1690          * a transfer avoids the possibility that get_pv_entry() calls
1691          * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
1692          * mappings that is being promoted.
1693          */
1694         m = PHYS_TO_VM_PAGE(pa);
1695         va = trunc_2mpage(va);
1696         pv = pmap_pvh_remove(&m->md, pmap, va);
1697         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
1698         pvh = pa_to_pvh(pa);
1699         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
1700         pvh->pv_gen++;
1701         /* Free the remaining NPTEPG - 1 pv entries. */
1702         va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1703         do {
1704                 m++;
1705                 va += PAGE_SIZE;
1706                 pmap_pvh_free(&m->md, pmap, va);
1707         } while (va < va_last);
1708 }
1709 #endif /* VM_NRESERVLEVEL > 0 */
1710
1711 /*
1712  * First find and then destroy the pv entry for the specified pmap and virtual
1713  * address.  This operation can be performed on pv lists for either 4KB or 2MB
1714  * page mappings.
1715  */
1716 static void
1717 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1718 {
1719         pv_entry_t pv;
1720
1721         pv = pmap_pvh_remove(pvh, pmap, va);
1722         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
1723         free_pv_entry(pmap, pv);
1724 }
1725
1726 /*
1727  * Conditionally create the PV entry for a 4KB page mapping if the required
1728  * memory can be allocated without resorting to reclamation.
1729  */
1730 static boolean_t
1731 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1732     struct rwlock **lockp)
1733 {
1734         pv_entry_t pv;
1735
1736         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1737         /* Pass NULL instead of the lock pointer to disable reclamation. */
1738         if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1739                 pv->pv_va = va;
1740                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1741                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1742                 m->md.pv_gen++;
1743                 return (TRUE);
1744         } else
1745                 return (FALSE);
1746 }
1747
1748 vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX];
1749 #ifdef INVARIANTS
1750 static void
1751 validate_addr(vm_paddr_t addr, vm_size_t size)
1752 {
1753         vm_paddr_t end = addr + size;
1754         bool found = false;
1755
1756         for (int i = 0; i < 2 * phys_avail_count; i += 2) {
1757                 if (addr >= phys_avail_debug[i] &&
1758                         end <= phys_avail_debug[i + 1]) {
1759                         found = true;
1760                         break;
1761                 }
1762         }
1763         KASSERT(found, ("%#lx-%#lx outside of initial phys_avail array",
1764                                         addr, end));
1765 }
1766 #else
1767 static void validate_addr(vm_paddr_t addr, vm_size_t size) {}
1768 #endif
1769 #define DMAP_PAGE_BITS (RPTE_VALID | RPTE_LEAF | RPTE_EAA_MASK | PG_M | PG_A)
1770
1771 static vm_paddr_t
1772 alloc_pt_page(void)
1773 {
1774         vm_paddr_t page;
1775
1776         page = allocpages(1);
1777         pagezero(PHYS_TO_DMAP(page));
1778         return (page);
1779 }
1780
1781 static void
1782 mmu_radix_dmap_range(vm_paddr_t start, vm_paddr_t end)
1783 {
1784         pt_entry_t *pte, pteval;
1785         vm_paddr_t page;
1786
1787         if (bootverbose)
1788                 printf("%s %lx -> %lx\n", __func__, start, end);
1789         while (start < end) {
1790                 pteval = start | DMAP_PAGE_BITS;
1791                 pte = pmap_pml1e(kernel_pmap, PHYS_TO_DMAP(start));
1792                 if ((be64toh(*pte) & RPTE_VALID) == 0) {
1793                         page = alloc_pt_page();
1794                         pde_store(pte, page);
1795                 }
1796                 pte = pmap_l1e_to_l2e(pte, PHYS_TO_DMAP(start));
1797                 if ((start & L2_PAGE_MASK) == 0 &&
1798                         end - start >= L2_PAGE_SIZE) {
1799                         start += L2_PAGE_SIZE;
1800                         goto done;
1801                 } else if ((be64toh(*pte) & RPTE_VALID) == 0) {
1802                         page = alloc_pt_page();
1803                         pde_store(pte, page);
1804                 }
1805
1806                 pte = pmap_l2e_to_l3e(pte, PHYS_TO_DMAP(start));
1807                 if ((start & L3_PAGE_MASK) == 0 &&
1808                         end - start >= L3_PAGE_SIZE) {
1809                         start += L3_PAGE_SIZE;
1810                         goto done;
1811                 } else if ((be64toh(*pte) & RPTE_VALID) == 0) {
1812                         page = alloc_pt_page();
1813                         pde_store(pte, page);
1814                 }
1815                 pte = pmap_l3e_to_pte(pte, PHYS_TO_DMAP(start));
1816                 start += PAGE_SIZE;
1817         done:
1818                 pte_store(pte, pteval);
1819         }
1820 }
1821
1822 static void
1823 mmu_radix_dmap_populate(vm_size_t hwphyssz)
1824 {
1825         vm_paddr_t start, end;
1826
1827         for (int i = 0; i < pregions_sz; i++) {
1828                 start = pregions[i].mr_start;
1829                 end = start + pregions[i].mr_size;
1830                 if (hwphyssz && start >= hwphyssz)
1831                         break;
1832                 if (hwphyssz && hwphyssz < end)
1833                         end = hwphyssz;
1834                 mmu_radix_dmap_range(start, end);
1835         }
1836 }
1837
1838 static void
1839 mmu_radix_setup_pagetables(vm_size_t hwphyssz)
1840 {
1841         vm_paddr_t ptpages, pages;
1842         pt_entry_t *pte;
1843         vm_paddr_t l1phys;
1844
1845         bzero(kernel_pmap, sizeof(struct pmap));
1846         PMAP_LOCK_INIT(kernel_pmap);
1847
1848         ptpages = allocpages(3);
1849         l1phys = moea64_bootstrap_alloc(RADIX_PGD_SIZE, RADIX_PGD_SIZE);
1850         validate_addr(l1phys, RADIX_PGD_SIZE);
1851         if (bootverbose)
1852                 printf("l1phys=%lx\n", l1phys);
1853         MPASS((l1phys & (RADIX_PGD_SIZE-1)) == 0);
1854         for (int i = 0; i < RADIX_PGD_SIZE/PAGE_SIZE; i++)
1855                 pagezero(PHYS_TO_DMAP(l1phys + i * PAGE_SIZE));
1856         kernel_pmap->pm_pml1 = (pml1_entry_t *)PHYS_TO_DMAP(l1phys);
1857
1858         mmu_radix_dmap_populate(hwphyssz);
1859
1860         /*
1861          * Create page tables for first 128MB of KVA
1862          */
1863         pages = ptpages;
1864         pte = pmap_pml1e(kernel_pmap, VM_MIN_KERNEL_ADDRESS);
1865         *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1866         pages += PAGE_SIZE;
1867         pte = pmap_l1e_to_l2e(pte, VM_MIN_KERNEL_ADDRESS);
1868         *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1869         pages += PAGE_SIZE;
1870         pte = pmap_l2e_to_l3e(pte, VM_MIN_KERNEL_ADDRESS);
1871         /*
1872          * the kernel page table pages need to be preserved in
1873          * phys_avail and not overlap with previous  allocations
1874          */
1875         pages = allocpages(nkpt);
1876         if (bootverbose) {
1877                 printf("phys_avail after dmap populate and nkpt allocation\n");
1878                 for (int j = 0; j < 2 * phys_avail_count; j+=2)
1879                         printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
1880                                    j, phys_avail[j], j + 1, phys_avail[j + 1]);
1881         }
1882         KPTphys = pages;
1883         for (int i = 0; i < nkpt; i++, pte++, pages += PAGE_SIZE)
1884                 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT);
1885         kernel_vm_end = VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE;
1886         if (bootverbose)
1887                 printf("kernel_pmap pml1 %p\n", kernel_pmap->pm_pml1);
1888         /*
1889          * Add a physical memory segment (vm_phys_seg) corresponding to the
1890          * preallocated kernel page table pages so that vm_page structures
1891          * representing these pages will be created.  The vm_page structures
1892          * are required for promotion of the corresponding kernel virtual
1893          * addresses to superpage mappings.
1894          */
1895         vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1896 }
1897
1898 static void
1899 mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end)
1900 {
1901         vm_paddr_t      kpstart, kpend;
1902         vm_size_t       physsz, hwphyssz;
1903         //uint64_t      l2virt;
1904         int             rm_pavail, proctab_size;
1905         int             i, j;
1906
1907         kpstart = start & ~DMAP_BASE_ADDRESS;
1908         kpend = end & ~DMAP_BASE_ADDRESS;
1909
1910         /* Get physical memory regions from firmware */
1911         mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
1912         CTR0(KTR_PMAP, "mmu_radix_early_bootstrap: physical memory");
1913
1914         if (2 * VM_PHYSSEG_MAX < regions_sz)
1915                 panic("mmu_radix_early_bootstrap: phys_avail too small");
1916
1917         if (bootverbose)
1918                 for (int i = 0; i < regions_sz; i++)
1919                         printf("regions[%d].mr_start=%lx regions[%d].mr_size=%lx\n",
1920                             i, regions[i].mr_start, i, regions[i].mr_size);
1921         /*
1922          * XXX workaround a simulator bug
1923          */
1924         for (int i = 0; i < regions_sz; i++)
1925                 if (regions[i].mr_start & PAGE_MASK) {
1926                         regions[i].mr_start += PAGE_MASK;
1927                         regions[i].mr_start &= ~PAGE_MASK;
1928                         regions[i].mr_size &= ~PAGE_MASK;
1929                 }
1930         if (bootverbose)
1931                 for (int i = 0; i < pregions_sz; i++)
1932                         printf("pregions[%d].mr_start=%lx pregions[%d].mr_size=%lx\n",
1933                             i, pregions[i].mr_start, i, pregions[i].mr_size);
1934
1935         phys_avail_count = 0;
1936         physsz = 0;
1937         hwphyssz = 0;
1938         TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1939         for (i = 0, j = 0; i < regions_sz; i++) {
1940                 if (bootverbose)
1941                         printf("regions[%d].mr_start=%016lx regions[%d].mr_size=%016lx\n",
1942                             i, regions[i].mr_start, i, regions[i].mr_size);
1943
1944                 if (regions[i].mr_size < PAGE_SIZE)
1945                         continue;
1946
1947                 if (hwphyssz != 0 &&
1948                     (physsz + regions[i].mr_size) >= hwphyssz) {
1949                         if (physsz < hwphyssz) {
1950                                 phys_avail[j] = regions[i].mr_start;
1951                                 phys_avail[j + 1] = regions[i].mr_start +
1952                                     (hwphyssz - physsz);
1953                                 physsz = hwphyssz;
1954                                 phys_avail_count++;
1955                                 dump_avail[j] = phys_avail[j];
1956                                 dump_avail[j + 1] = phys_avail[j + 1];
1957                         }
1958                         break;
1959                 }
1960                 phys_avail[j] = regions[i].mr_start;
1961                 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
1962                 dump_avail[j] = phys_avail[j];
1963                 dump_avail[j + 1] = phys_avail[j + 1];
1964
1965                 phys_avail_count++;
1966                 physsz += regions[i].mr_size;
1967                 j += 2;
1968         }
1969
1970         /* Check for overlap with the kernel and exception vectors */
1971         rm_pavail = 0;
1972         for (j = 0; j < 2 * phys_avail_count; j+=2) {
1973                 if (phys_avail[j] < EXC_LAST)
1974                         phys_avail[j] += EXC_LAST;
1975
1976                 if (phys_avail[j] >= kpstart &&
1977                     phys_avail[j + 1] <= kpend) {
1978                         phys_avail[j] = phys_avail[j + 1] = ~0;
1979                         rm_pavail++;
1980                         continue;
1981                 }
1982
1983                 if (kpstart >= phys_avail[j] &&
1984                     kpstart < phys_avail[j + 1]) {
1985                         if (kpend < phys_avail[j + 1]) {
1986                                 phys_avail[2 * phys_avail_count] =
1987                                     (kpend & ~PAGE_MASK) + PAGE_SIZE;
1988                                 phys_avail[2 * phys_avail_count + 1] =
1989                                     phys_avail[j + 1];
1990                                 phys_avail_count++;
1991                         }
1992
1993                         phys_avail[j + 1] = kpstart & ~PAGE_MASK;
1994                 }
1995
1996                 if (kpend >= phys_avail[j] &&
1997                     kpend < phys_avail[j + 1]) {
1998                         if (kpstart > phys_avail[j]) {
1999                                 phys_avail[2 * phys_avail_count] = phys_avail[j];
2000                                 phys_avail[2 * phys_avail_count + 1] =
2001                                     kpstart & ~PAGE_MASK;
2002                                 phys_avail_count++;
2003                         }
2004
2005                         phys_avail[j] = (kpend & ~PAGE_MASK) +
2006                             PAGE_SIZE;
2007                 }
2008         }
2009         qsort(phys_avail, 2 * phys_avail_count, sizeof(phys_avail[0]), pa_cmp);
2010         for (i = 0; i < 2 * phys_avail_count; i++)
2011                 phys_avail_debug[i] = phys_avail[i];
2012
2013         /* Remove physical available regions marked for removal (~0) */
2014         if (rm_pavail) {
2015                 phys_avail_count -= rm_pavail;
2016                 for (i = 2 * phys_avail_count;
2017                      i < 2*(phys_avail_count + rm_pavail); i+=2)
2018                         phys_avail[i] = phys_avail[i + 1] = 0;
2019         }
2020         if (bootverbose) {
2021                 printf("phys_avail ranges after filtering:\n");
2022                 for (j = 0; j < 2 * phys_avail_count; j+=2)
2023                         printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
2024                                    j, phys_avail[j], j + 1, phys_avail[j + 1]);
2025         }
2026         physmem = btoc(physsz);
2027
2028         /* XXX assume we're running non-virtualized and
2029          * we don't support BHYVE
2030          */
2031         if (isa3_pid_bits == 0)
2032                 isa3_pid_bits = 20;
2033         if (powernv_enabled) {
2034                 parttab_phys =
2035                     moea64_bootstrap_alloc(PARTTAB_SIZE, PARTTAB_SIZE);
2036                 validate_addr(parttab_phys, PARTTAB_SIZE);
2037                 for (int i = 0; i < PARTTAB_SIZE/PAGE_SIZE; i++)
2038                         pagezero(PHYS_TO_DMAP(parttab_phys + i * PAGE_SIZE));
2039
2040         }
2041         proctab_size = 1UL << PROCTAB_SIZE_SHIFT;
2042         proctab0pa = moea64_bootstrap_alloc(proctab_size, proctab_size);
2043         validate_addr(proctab0pa, proctab_size);
2044         for (int i = 0; i < proctab_size/PAGE_SIZE; i++)
2045                 pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE));
2046
2047         mmu_radix_setup_pagetables(hwphyssz);
2048 }
2049
2050 static void
2051 mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end)
2052 {
2053         int             i;
2054         vm_paddr_t      pa;
2055         void            *dpcpu;
2056         vm_offset_t va;
2057
2058         /*
2059          * Set up the Open Firmware pmap and add its mappings if not in real
2060          * mode.
2061          */
2062         if (bootverbose)
2063                 printf("%s enter\n", __func__);
2064
2065         /*
2066          * Calculate the last available physical address, and reserve the
2067          * vm_page_array (upper bound).
2068          */
2069         Maxmem = 0;
2070         for (i = 0; phys_avail[i + 1] != 0; i += 2)
2071                 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
2072
2073         /*
2074          * Remap any early IO mappings (console framebuffer, etc.)
2075          */
2076         bs_remap_earlyboot();
2077
2078         /*
2079          * Allocate a kernel stack with a guard page for thread0 and map it
2080          * into the kernel page map.
2081          */
2082         pa = allocpages(kstack_pages);
2083         va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2084         virtual_avail = va + kstack_pages * PAGE_SIZE;
2085         CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
2086         thread0.td_kstack = va;
2087         for (i = 0; i < kstack_pages; i++) {
2088                 mmu_radix_kenter(va, pa);
2089                 pa += PAGE_SIZE;
2090                 va += PAGE_SIZE;
2091         }
2092         thread0.td_kstack_pages = kstack_pages;
2093
2094         /*
2095          * Allocate virtual address space for the message buffer.
2096          */
2097         pa = msgbuf_phys = allocpages((msgbufsize + PAGE_MASK)  >> PAGE_SHIFT);
2098         msgbufp = (struct msgbuf *)PHYS_TO_DMAP(pa);
2099
2100         /*
2101          * Allocate virtual address space for the dynamic percpu area.
2102          */
2103         pa = allocpages(DPCPU_SIZE >> PAGE_SHIFT);
2104         dpcpu = (void *)PHYS_TO_DMAP(pa);
2105         dpcpu_init(dpcpu, curcpu);
2106
2107         crashdumpmap = (caddr_t)virtual_avail;
2108         virtual_avail += MAXDUMPPGS * PAGE_SIZE;
2109
2110         /*
2111          * Reserve some special page table entries/VA space for temporary
2112          * mapping of pages.
2113          */
2114 }
2115
2116 static void
2117 mmu_parttab_init(void)
2118 {
2119         uint64_t ptcr;
2120
2121         isa3_parttab = (struct pate *)PHYS_TO_DMAP(parttab_phys);
2122
2123         if (bootverbose)
2124                 printf("%s parttab: %p\n", __func__, isa3_parttab);
2125         ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2126         if (bootverbose)
2127                 printf("setting ptcr %lx\n", ptcr);
2128         mtspr(SPR_PTCR, ptcr);
2129 }
2130
2131 static void
2132 mmu_parttab_update(uint64_t lpid, uint64_t pagetab, uint64_t proctab)
2133 {
2134         uint64_t prev;
2135
2136         if (bootverbose)
2137                 printf("%s isa3_parttab %p lpid %lx pagetab %lx proctab %lx\n", __func__, isa3_parttab,
2138                            lpid, pagetab, proctab);
2139         prev = be64toh(isa3_parttab[lpid].pagetab);
2140         isa3_parttab[lpid].pagetab = htobe64(pagetab);
2141         isa3_parttab[lpid].proctab = htobe64(proctab);
2142
2143         if (prev & PARTTAB_HR) {
2144                 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
2145                              "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2146                 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2147                              "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2148         } else {
2149                 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
2150                              "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2151         }
2152         ttusync();
2153 }
2154
2155 static void
2156 mmu_radix_parttab_init(void)
2157 {
2158         uint64_t pagetab;
2159
2160         mmu_parttab_init();
2161         pagetab = RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | \
2162                          RADIX_PGD_INDEX_SHIFT | PARTTAB_HR;
2163         mmu_parttab_update(0, pagetab, 0);
2164 }
2165
2166 static void
2167 mmu_radix_proctab_register(vm_paddr_t proctabpa, uint64_t table_size)
2168 {
2169         uint64_t pagetab, proctab;
2170
2171         pagetab = be64toh(isa3_parttab[0].pagetab);
2172         proctab = proctabpa | table_size | PARTTAB_GR;
2173         mmu_parttab_update(0, pagetab, proctab);
2174 }
2175
2176 static void
2177 mmu_radix_proctab_init(void)
2178 {
2179
2180         isa3_base_pid = 1;
2181
2182         isa3_proctab = (void*)PHYS_TO_DMAP(proctab0pa);
2183         isa3_proctab->proctab0 =
2184             htobe64(RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) |
2185                 RADIX_PGD_INDEX_SHIFT);
2186
2187         if (powernv_enabled) {
2188                 mmu_radix_proctab_register(proctab0pa, PROCTAB_SIZE_SHIFT - 12);
2189                 __asm __volatile("ptesync" : : : "memory");
2190                 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2191                              "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
2192                 __asm __volatile("eieio; tlbsync; ptesync" : : : "memory");
2193 #ifdef PSERIES
2194         } else {
2195                 int64_t rc;
2196
2197                 rc = phyp_hcall(H_REGISTER_PROC_TBL,
2198                     PROC_TABLE_NEW | PROC_TABLE_RADIX | PROC_TABLE_GTSE,
2199                     proctab0pa, 0, PROCTAB_SIZE_SHIFT - 12);
2200                 if (rc != H_SUCCESS)
2201                         panic("mmu_radix_proctab_init: "
2202                                 "failed to register process table: rc=%jd",
2203                                 (intmax_t)rc);
2204 #endif
2205         }
2206
2207         if (bootverbose)
2208                 printf("process table %p and kernel radix PDE: %p\n",
2209                            isa3_proctab, kernel_pmap->pm_pml1);
2210         mtmsr(mfmsr() | PSL_DR );
2211         mtmsr(mfmsr() &  ~PSL_DR);
2212         kernel_pmap->pm_pid = isa3_base_pid;
2213         isa3_base_pid++;
2214 }
2215
2216 void
2217 mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2218     int advice)
2219 {
2220         struct rwlock *lock;
2221         pml1_entry_t *l1e;
2222         pml2_entry_t *l2e;
2223         pml3_entry_t oldl3e, *l3e;
2224         pt_entry_t *pte;
2225         vm_offset_t va, va_next;
2226         vm_page_t m;
2227         boolean_t anychanged;
2228
2229         if (advice != MADV_DONTNEED && advice != MADV_FREE)
2230                 return;
2231         anychanged = FALSE;
2232         PMAP_LOCK(pmap);
2233         for (; sva < eva; sva = va_next) {
2234                 l1e = pmap_pml1e(pmap, sva);
2235                 if ((be64toh(*l1e) & PG_V) == 0) {
2236                         va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2237                         if (va_next < sva)
2238                                 va_next = eva;
2239                         continue;
2240                 }
2241                 l2e = pmap_l1e_to_l2e(l1e, sva);
2242                 if ((be64toh(*l2e) & PG_V) == 0) {
2243                         va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2244                         if (va_next < sva)
2245                                 va_next = eva;
2246                         continue;
2247                 }
2248                 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2249                 if (va_next < sva)
2250                         va_next = eva;
2251                 l3e = pmap_l2e_to_l3e(l2e, sva);
2252                 oldl3e = be64toh(*l3e);
2253                 if ((oldl3e & PG_V) == 0)
2254                         continue;
2255                 else if ((oldl3e & RPTE_LEAF) != 0) {
2256                         if ((oldl3e & PG_MANAGED) == 0)
2257                                 continue;
2258                         lock = NULL;
2259                         if (!pmap_demote_l3e_locked(pmap, l3e, sva, &lock)) {
2260                                 if (lock != NULL)
2261                                         rw_wunlock(lock);
2262
2263                                 /*
2264                                  * The large page mapping was destroyed.
2265                                  */
2266                                 continue;
2267                         }
2268
2269                         /*
2270                          * Unless the page mappings are wired, remove the
2271                          * mapping to a single page so that a subsequent
2272                          * access may repromote.  Since the underlying page
2273                          * table page is fully populated, this removal never
2274                          * frees a page table page.
2275                          */
2276                         if ((oldl3e & PG_W) == 0) {
2277                                 pte = pmap_l3e_to_pte(l3e, sva);
2278                                 KASSERT((be64toh(*pte) & PG_V) != 0,
2279                                     ("pmap_advise: invalid PTE"));
2280                                 pmap_remove_pte(pmap, pte, sva, be64toh(*l3e), NULL,
2281                                     &lock);
2282                                 anychanged = TRUE;
2283                         }
2284                         if (lock != NULL)
2285                                 rw_wunlock(lock);
2286                 }
2287                 if (va_next > eva)
2288                         va_next = eva;
2289                 va = va_next;
2290                 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next;
2291                          pte++, sva += PAGE_SIZE) {
2292                         MPASS(pte == pmap_pte(pmap, sva));
2293
2294                         if ((be64toh(*pte) & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
2295                                 goto maybe_invlrng;
2296                         else if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2297                                 if (advice == MADV_DONTNEED) {
2298                                         /*
2299                                          * Future calls to pmap_is_modified()
2300                                          * can be avoided by making the page
2301                                          * dirty now.
2302                                          */
2303                                         m = PHYS_TO_VM_PAGE(be64toh(*pte) & PG_FRAME);
2304                                         vm_page_dirty(m);
2305                                 }
2306                                 atomic_clear_long(pte, htobe64(PG_M | PG_A));
2307                         } else if ((be64toh(*pte) & PG_A) != 0)
2308                                 atomic_clear_long(pte, htobe64(PG_A));
2309                         else
2310                                 goto maybe_invlrng;
2311                         anychanged = TRUE;
2312                         continue;
2313 maybe_invlrng:
2314                         if (va != va_next) {
2315                                 anychanged = true;
2316                                 va = va_next;
2317                         }
2318                 }
2319                 if (va != va_next)
2320                         anychanged = true;
2321         }
2322         if (anychanged)
2323                 pmap_invalidate_all(pmap);
2324         PMAP_UNLOCK(pmap);
2325 }
2326
2327 /*
2328  * Routines used in machine-dependent code
2329  */
2330 static void
2331 mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end)
2332 {
2333         uint64_t lpcr;
2334
2335         if (bootverbose)
2336                 printf("%s\n", __func__);
2337         hw_direct_map = 1;
2338         powernv_enabled = (mfmsr() & PSL_HV) ? 1 : 0;
2339         mmu_radix_early_bootstrap(start, end);
2340         if (bootverbose)
2341                 printf("early bootstrap complete\n");
2342         if (powernv_enabled) {
2343                 lpcr = mfspr(SPR_LPCR);
2344                 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2345                 mmu_radix_parttab_init();
2346                 mmu_radix_init_amor();
2347                 if (bootverbose)
2348                         printf("powernv init complete\n");
2349         }
2350         mmu_radix_init_iamr();
2351         mmu_radix_proctab_init();
2352         mmu_radix_pid_set(kernel_pmap);
2353         if (powernv_enabled)
2354                 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2355         else
2356                 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
2357
2358         mmu_radix_late_bootstrap(start, end);
2359         numa_mem_regions(&numa_pregions, &numa_pregions_sz);
2360         if (bootverbose)
2361                 printf("%s done\n", __func__);
2362         pmap_bootstrapped = 1;
2363         dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE);
2364         PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS);
2365 }
2366
2367 static void
2368 mmu_radix_cpu_bootstrap(int ap)
2369 {
2370         uint64_t lpcr;
2371         uint64_t ptcr;
2372
2373         if (powernv_enabled) {
2374                 lpcr = mfspr(SPR_LPCR);
2375                 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2376
2377                 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2378                 mtspr(SPR_PTCR, ptcr);
2379                 mmu_radix_init_amor();
2380         }
2381         mmu_radix_init_iamr();
2382         mmu_radix_pid_set(kernel_pmap);
2383         if (powernv_enabled)
2384                 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2385         else
2386                 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID);
2387 }
2388
2389 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3e, CTLFLAG_RD, 0,
2390     "2MB page mapping counters");
2391
2392 static u_long pmap_l3e_demotions;
2393 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, demotions, CTLFLAG_RD,
2394     &pmap_l3e_demotions, 0, "2MB page demotions");
2395
2396 static u_long pmap_l3e_mappings;
2397 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, mappings, CTLFLAG_RD,
2398     &pmap_l3e_mappings, 0, "2MB page mappings");
2399
2400 static u_long pmap_l3e_p_failures;
2401 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, p_failures, CTLFLAG_RD,
2402     &pmap_l3e_p_failures, 0, "2MB page promotion failures");
2403
2404 static u_long pmap_l3e_promotions;
2405 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, promotions, CTLFLAG_RD,
2406     &pmap_l3e_promotions, 0, "2MB page promotions");
2407
2408 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0,
2409     "1GB page mapping counters");
2410
2411 static u_long pmap_l2e_demotions;
2412 SYSCTL_ULONG(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD,
2413     &pmap_l2e_demotions, 0, "1GB page demotions");
2414
2415 void
2416 mmu_radix_clear_modify(vm_page_t m)
2417 {
2418         struct md_page *pvh;
2419         pmap_t pmap;
2420         pv_entry_t next_pv, pv;
2421         pml3_entry_t oldl3e, *l3e;
2422         pt_entry_t oldpte, *pte;
2423         struct rwlock *lock;
2424         vm_offset_t va;
2425         int md_gen, pvh_gen;
2426
2427         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2428             ("pmap_clear_modify: page %p is not managed", m));
2429         vm_page_assert_busied(m);
2430         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
2431
2432         /*
2433          * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
2434          * If the object containing the page is locked and the page is not
2435          * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
2436          */
2437         if ((m->a.flags & PGA_WRITEABLE) == 0)
2438                 return;
2439         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2440             pa_to_pvh(VM_PAGE_TO_PHYS(m));
2441         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2442         rw_wlock(lock);
2443 restart:
2444         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
2445                 pmap = PV_PMAP(pv);
2446                 if (!PMAP_TRYLOCK(pmap)) {
2447                         pvh_gen = pvh->pv_gen;
2448                         rw_wunlock(lock);
2449                         PMAP_LOCK(pmap);
2450                         rw_wlock(lock);
2451                         if (pvh_gen != pvh->pv_gen) {
2452                                 PMAP_UNLOCK(pmap);
2453                                 goto restart;
2454                         }
2455                 }
2456                 va = pv->pv_va;
2457                 l3e = pmap_pml3e(pmap, va);
2458                 oldl3e = be64toh(*l3e);
2459                 if ((oldl3e & PG_RW) != 0) {
2460                         if (pmap_demote_l3e_locked(pmap, l3e, va, &lock)) {
2461                                 if ((oldl3e & PG_W) == 0) {
2462                                         /*
2463                                          * Write protect the mapping to a
2464                                          * single page so that a subsequent
2465                                          * write access may repromote.
2466                                          */
2467                                         va += VM_PAGE_TO_PHYS(m) - (oldl3e &
2468                                             PG_PS_FRAME);
2469                                         pte = pmap_l3e_to_pte(l3e, va);
2470                                         oldpte = be64toh(*pte);
2471                                         if ((oldpte & PG_V) != 0) {
2472                                                 while (!atomic_cmpset_long(pte,
2473                                                     htobe64(oldpte),
2474                                                         htobe64((oldpte | RPTE_EAA_R) & ~(PG_M | PG_RW))))
2475                                                            oldpte = be64toh(*pte);
2476                                                 vm_page_dirty(m);
2477                                                 pmap_invalidate_page(pmap, va);
2478                                         }
2479                                 }
2480                         }
2481                 }
2482                 PMAP_UNLOCK(pmap);
2483         }
2484         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2485                 pmap = PV_PMAP(pv);
2486                 if (!PMAP_TRYLOCK(pmap)) {
2487                         md_gen = m->md.pv_gen;
2488                         pvh_gen = pvh->pv_gen;
2489                         rw_wunlock(lock);
2490                         PMAP_LOCK(pmap);
2491                         rw_wlock(lock);
2492                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2493                                 PMAP_UNLOCK(pmap);
2494                                 goto restart;
2495                         }
2496                 }
2497                 l3e = pmap_pml3e(pmap, pv->pv_va);
2498                 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_clear_modify: found"
2499                     " a 2mpage in page %p's pv list", m));
2500                 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
2501                 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2502                         atomic_clear_long(pte, htobe64(PG_M));
2503                         pmap_invalidate_page(pmap, pv->pv_va);
2504                 }
2505                 PMAP_UNLOCK(pmap);
2506         }
2507         rw_wunlock(lock);
2508 }
2509
2510 void
2511 mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2512     vm_size_t len, vm_offset_t src_addr)
2513 {
2514         struct rwlock *lock;
2515         struct spglist free;
2516         vm_offset_t addr;
2517         vm_offset_t end_addr = src_addr + len;
2518         vm_offset_t va_next;
2519         vm_page_t dst_pdpg, dstmpte, srcmpte;
2520         bool invalidate_all;
2521
2522         CTR6(KTR_PMAP,
2523             "%s(dst_pmap=%p, src_pmap=%p, dst_addr=%lx, len=%lu, src_addr=%lx)\n",
2524             __func__, dst_pmap, src_pmap, dst_addr, len, src_addr);
2525
2526         if (dst_addr != src_addr)
2527                 return;
2528         lock = NULL;
2529         invalidate_all = false;
2530         if (dst_pmap < src_pmap) {
2531                 PMAP_LOCK(dst_pmap);
2532                 PMAP_LOCK(src_pmap);
2533         } else {
2534                 PMAP_LOCK(src_pmap);
2535                 PMAP_LOCK(dst_pmap);
2536         }
2537
2538         for (addr = src_addr; addr < end_addr; addr = va_next) {
2539                 pml1_entry_t *l1e;
2540                 pml2_entry_t *l2e;
2541                 pml3_entry_t srcptepaddr, *l3e;
2542                 pt_entry_t *src_pte, *dst_pte;
2543
2544                 l1e = pmap_pml1e(src_pmap, addr);
2545                 if ((be64toh(*l1e) & PG_V) == 0) {
2546                         va_next = (addr + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2547                         if (va_next < addr)
2548                                 va_next = end_addr;
2549                         continue;
2550                 }
2551
2552                 l2e = pmap_l1e_to_l2e(l1e, addr);
2553                 if ((be64toh(*l2e) & PG_V) == 0) {
2554                         va_next = (addr + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2555                         if (va_next < addr)
2556                                 va_next = end_addr;
2557                         continue;
2558                 }
2559
2560                 va_next = (addr + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2561                 if (va_next < addr)
2562                         va_next = end_addr;
2563
2564                 l3e = pmap_l2e_to_l3e(l2e, addr);
2565                 srcptepaddr = be64toh(*l3e);
2566                 if (srcptepaddr == 0)
2567                         continue;
2568
2569                 if (srcptepaddr & RPTE_LEAF) {
2570                         if ((addr & L3_PAGE_MASK) != 0 ||
2571                             addr + L3_PAGE_SIZE > end_addr)
2572                                 continue;
2573                         dst_pdpg = pmap_allocl3e(dst_pmap, addr, NULL);
2574                         if (dst_pdpg == NULL)
2575                                 break;
2576                         l3e = (pml3_entry_t *)
2577                             PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
2578                         l3e = &l3e[pmap_pml3e_index(addr)];
2579                         if (be64toh(*l3e) == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
2580                             pmap_pv_insert_l3e(dst_pmap, addr, srcptepaddr,
2581                             PMAP_ENTER_NORECLAIM, &lock))) {
2582                                 *l3e = htobe64(srcptepaddr & ~PG_W);
2583                                 pmap_resident_count_inc(dst_pmap,
2584                                     L3_PAGE_SIZE / PAGE_SIZE);
2585                                 atomic_add_long(&pmap_l3e_mappings, 1);
2586                         } else
2587                                 dst_pdpg->ref_count--;
2588                         continue;
2589                 }
2590
2591                 srcptepaddr &= PG_FRAME;
2592                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
2593                 KASSERT(srcmpte->ref_count > 0,
2594                     ("pmap_copy: source page table page is unused"));
2595
2596                 if (va_next > end_addr)
2597                         va_next = end_addr;
2598
2599                 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
2600                 src_pte = &src_pte[pmap_pte_index(addr)];
2601                 dstmpte = NULL;
2602                 while (addr < va_next) {
2603                         pt_entry_t ptetemp;
2604                         ptetemp = be64toh(*src_pte);
2605                         /*
2606                          * we only virtual copy managed pages
2607                          */
2608                         if ((ptetemp & PG_MANAGED) != 0) {
2609                                 if (dstmpte != NULL &&
2610                                     dstmpte->pindex == pmap_l3e_pindex(addr))
2611                                         dstmpte->ref_count++;
2612                                 else if ((dstmpte = pmap_allocpte(dst_pmap,
2613                                     addr, NULL)) == NULL)
2614                                         goto out;
2615                                 dst_pte = (pt_entry_t *)
2616                                     PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
2617                                 dst_pte = &dst_pte[pmap_pte_index(addr)];
2618                                 if (be64toh(*dst_pte) == 0 &&
2619                                     pmap_try_insert_pv_entry(dst_pmap, addr,
2620                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
2621                                     &lock)) {
2622                                         /*
2623                                          * Clear the wired, modified, and
2624                                          * accessed (referenced) bits
2625                                          * during the copy.
2626                                          */
2627                                         *dst_pte = htobe64(ptetemp & ~(PG_W | PG_M |
2628                                             PG_A));
2629                                         pmap_resident_count_inc(dst_pmap, 1);
2630                                 } else {
2631                                         SLIST_INIT(&free);
2632                                         if (pmap_unwire_ptp(dst_pmap, addr,
2633                                             dstmpte, &free)) {
2634                                                 /*
2635                                                  * Although "addr" is not
2636                                                  * mapped, paging-structure
2637                                                  * caches could nonetheless
2638                                                  * have entries that refer to
2639                                                  * the freed page table pages.
2640                                                  * Invalidate those entries.
2641                                                  */
2642                                                 invalidate_all = true;
2643                                                 vm_page_free_pages_toq(&free,
2644                                                     true);
2645                                         }
2646                                         goto out;
2647                                 }
2648                                 if (dstmpte->ref_count >= srcmpte->ref_count)
2649                                         break;
2650                         }
2651                         addr += PAGE_SIZE;
2652                         if (__predict_false((addr & L3_PAGE_MASK) == 0))
2653                                 src_pte = pmap_pte(src_pmap, addr);
2654                         else
2655                                 src_pte++;
2656                 }
2657         }
2658 out:
2659         if (invalidate_all)
2660                 pmap_invalidate_all(dst_pmap);
2661         if (lock != NULL)
2662                 rw_wunlock(lock);
2663         PMAP_UNLOCK(src_pmap);
2664         PMAP_UNLOCK(dst_pmap);
2665 }
2666
2667 static void
2668 mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst)
2669 {
2670         vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2671         vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2672
2673         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
2674         /*
2675          * XXX slow
2676          */
2677         bcopy((void *)src, (void *)dst, PAGE_SIZE);
2678 }
2679
2680 static void
2681 mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2682     vm_offset_t b_offset, int xfersize)
2683 {
2684         void *a_cp, *b_cp;
2685         vm_offset_t a_pg_offset, b_pg_offset;
2686         int cnt;
2687
2688         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
2689             a_offset, mb, b_offset, xfersize);
2690         
2691         while (xfersize > 0) {
2692                 a_pg_offset = a_offset & PAGE_MASK;
2693                 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2694                 a_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
2695                     VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
2696                     a_pg_offset;
2697                 b_pg_offset = b_offset & PAGE_MASK;
2698                 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2699                 b_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
2700                     VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
2701                     b_pg_offset;
2702                 bcopy(a_cp, b_cp, cnt);
2703                 a_offset += cnt;
2704                 b_offset += cnt;
2705                 xfersize -= cnt;
2706         }
2707 }
2708
2709 #if VM_NRESERVLEVEL > 0
2710 /*
2711  * Tries to promote the 512, contiguous 4KB page mappings that are within a
2712  * single page table page (PTP) to a single 2MB page mapping.  For promotion
2713  * to occur, two conditions must be met: (1) the 4KB page mappings must map
2714  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
2715  * identical characteristics.
2716  */
2717 static int
2718 pmap_promote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va,
2719     struct rwlock **lockp)
2720 {
2721         pml3_entry_t newpde;
2722         pt_entry_t *firstpte, oldpte, pa, *pte;
2723         vm_page_t mpte;
2724
2725         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2726
2727         /*
2728          * Examine the first PTE in the specified PTP.  Abort if this PTE is
2729          * either invalid, unused, or does not map the first 4KB physical page
2730          * within a 2MB page.
2731          */
2732         firstpte = (pt_entry_t *)PHYS_TO_DMAP(be64toh(*pde) & PG_FRAME);
2733 setpde:
2734         newpde = *firstpte;
2735         if ((newpde & ((PG_FRAME & L3_PAGE_MASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
2736                 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2737                     " in pmap %p", va, pmap);
2738                 goto fail;
2739         }
2740         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
2741                 /*
2742                  * When PG_M is already clear, PG_RW can be cleared without
2743                  * a TLB invalidation.
2744                  */
2745                 if (!atomic_cmpset_long(firstpte, htobe64(newpde), htobe64((newpde | RPTE_EAA_R) & ~RPTE_EAA_W)))
2746                         goto setpde;
2747                 newpde &= ~RPTE_EAA_W;
2748         }
2749
2750         /*
2751          * Examine each of the other PTEs in the specified PTP.  Abort if this
2752          * PTE maps an unexpected 4KB physical page or does not have identical
2753          * characteristics to the first PTE.
2754          */
2755         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + L3_PAGE_SIZE - PAGE_SIZE;
2756         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
2757 setpte:
2758                 oldpte = be64toh(*pte);
2759                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
2760                         CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2761                             " in pmap %p", va, pmap);
2762                         goto fail;
2763                 }
2764                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
2765                         /*
2766                          * When PG_M is already clear, PG_RW can be cleared
2767                          * without a TLB invalidation.
2768                          */
2769                         if (!atomic_cmpset_long(pte, htobe64(oldpte), htobe64((oldpte | RPTE_EAA_R) & ~RPTE_EAA_W)))
2770                                 goto setpte;
2771                         oldpte &= ~RPTE_EAA_W;
2772                         CTR2(KTR_PMAP, "pmap_promote_l3e: protect for va %#lx"
2773                             " in pmap %p", (oldpte & PG_FRAME & L3_PAGE_MASK) |
2774                             (va & ~L3_PAGE_MASK), pmap);
2775                 }
2776                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
2777                         CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2778                             " in pmap %p", va, pmap);
2779                         goto fail;
2780                 }
2781                 pa -= PAGE_SIZE;
2782         }
2783
2784         /*
2785          * Save the page table page in its current state until the PDE
2786          * mapping the superpage is demoted by pmap_demote_pde() or
2787          * destroyed by pmap_remove_pde().
2788          */
2789         mpte = PHYS_TO_VM_PAGE(be64toh(*pde) & PG_FRAME);
2790         KASSERT(mpte >= vm_page_array &&
2791             mpte < &vm_page_array[vm_page_array_size],
2792             ("pmap_promote_l3e: page table page is out of range"));
2793         KASSERT(mpte->pindex == pmap_l3e_pindex(va),
2794             ("pmap_promote_l3e: page table page's pindex is wrong"));
2795         if (pmap_insert_pt_page(pmap, mpte)) {
2796                 CTR2(KTR_PMAP,
2797                     "pmap_promote_l3e: failure for va %#lx in pmap %p", va,
2798                     pmap);
2799                 goto fail;
2800         }
2801
2802         /*
2803          * Promote the pv entries.
2804          */
2805         if ((newpde & PG_MANAGED) != 0)
2806                 pmap_pv_promote_l3e(pmap, va, newpde & PG_PS_FRAME, lockp);
2807
2808         pte_store(pde, PG_PROMOTED | newpde);
2809         ptesync();
2810         atomic_add_long(&pmap_l3e_promotions, 1);
2811         CTR2(KTR_PMAP, "pmap_promote_l3e: success for va %#lx"
2812             " in pmap %p", va, pmap);
2813         return (0);
2814  fail:
2815         atomic_add_long(&pmap_l3e_p_failures, 1);
2816         return (KERN_FAILURE);
2817 }
2818 #endif /* VM_NRESERVLEVEL > 0 */
2819
2820 int
2821 mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
2822     vm_prot_t prot, u_int flags, int8_t psind)
2823 {
2824         struct rwlock *lock;
2825         pml3_entry_t *l3e;
2826         pt_entry_t *pte;
2827         pt_entry_t newpte, origpte;
2828         pv_entry_t pv;
2829         vm_paddr_t opa, pa;
2830         vm_page_t mpte, om;
2831         int rv, retrycount;
2832         boolean_t nosleep, invalidate_all, invalidate_page;
2833
2834         va = trunc_page(va);
2835         retrycount = 0;
2836         invalidate_page = invalidate_all = false;
2837         CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va,
2838             m, prot, flags, psind);
2839         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
2840         KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
2841             ("pmap_enter: managed mapping within the clean submap"));
2842         if ((m->oflags & VPO_UNMANAGED) == 0)
2843                 VM_PAGE_OBJECT_BUSY_ASSERT(m);
2844
2845         KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
2846             ("pmap_enter: flags %u has reserved bits set", flags));
2847         pa = VM_PAGE_TO_PHYS(m);
2848         newpte = (pt_entry_t)(pa | PG_A | PG_V | RPTE_LEAF);
2849         if ((flags & VM_PROT_WRITE) != 0)
2850                 newpte |= PG_M;
2851         if ((flags & VM_PROT_READ) != 0)
2852                 newpte |= PG_A;
2853         if (prot & VM_PROT_READ)
2854                 newpte |= RPTE_EAA_R;
2855         if ((prot & VM_PROT_WRITE) != 0)
2856                 newpte |= RPTE_EAA_W;
2857         KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
2858             ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
2859
2860         if (prot & VM_PROT_EXECUTE)
2861                 newpte |= PG_X;
2862         if ((flags & PMAP_ENTER_WIRED) != 0)
2863                 newpte |= PG_W;
2864         if (va >= DMAP_MIN_ADDRESS)
2865                 newpte |= RPTE_EAA_P;
2866         newpte |= pmap_cache_bits(m->md.mdpg_cache_attrs);
2867         /*
2868          * Set modified bit gratuitously for writeable mappings if
2869          * the page is unmanaged. We do not want to take a fault
2870          * to do the dirty bit accounting for these mappings.
2871          */
2872         if ((m->oflags & VPO_UNMANAGED) != 0) {
2873                 if ((newpte & PG_RW) != 0)
2874                         newpte |= PG_M;
2875         } else
2876                 newpte |= PG_MANAGED;
2877
2878         lock = NULL;
2879         PMAP_LOCK(pmap);
2880         if (psind == 1) {
2881                 /* Assert the required virtual and physical alignment. */
2882                 KASSERT((va & L3_PAGE_MASK) == 0, ("pmap_enter: va unaligned"));
2883                 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
2884                 rv = pmap_enter_l3e(pmap, va, newpte | RPTE_LEAF, flags, m, &lock);
2885                 goto out;
2886         }
2887         mpte = NULL;
2888
2889         /*
2890          * In the case that a page table page is not
2891          * resident, we are creating it here.
2892          */
2893 retry:
2894         l3e = pmap_pml3e(pmap, va);
2895         if (l3e != NULL && (be64toh(*l3e) & PG_V) != 0 && ((be64toh(*l3e) & RPTE_LEAF) == 0 ||
2896             pmap_demote_l3e_locked(pmap, l3e, va, &lock))) {
2897                 pte = pmap_l3e_to_pte(l3e, va);
2898                 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
2899                         mpte = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME);
2900                         mpte->ref_count++;
2901                 }
2902         } else if (va < VM_MAXUSER_ADDRESS) {
2903                 /*
2904                  * Here if the pte page isn't mapped, or if it has been
2905                  * deallocated.
2906                  */
2907                 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2908                 mpte = _pmap_allocpte(pmap, pmap_l3e_pindex(va),
2909                     nosleep ? NULL : &lock);
2910                 if (mpte == NULL && nosleep) {
2911                         rv = KERN_RESOURCE_SHORTAGE;
2912                         goto out;
2913                 }
2914                 if (__predict_false(retrycount++ == 6))
2915                         panic("too many retries");
2916                 invalidate_all = true;
2917                 goto retry;
2918         } else
2919                 panic("pmap_enter: invalid page directory va=%#lx", va);
2920
2921         origpte = be64toh(*pte);
2922         pv = NULL;
2923
2924         /*
2925          * Is the specified virtual address already mapped?
2926          */
2927         if ((origpte & PG_V) != 0) {
2928 #ifdef INVARIANTS
2929                 if (VERBOSE_PMAP || pmap_logging) {
2930                         printf("cow fault pmap_enter(%p, %#lx, %p, %#x, %x, %d) --"
2931                             " asid=%lu curpid=%d name=%s origpte0x%lx\n",
2932                             pmap, va, m, prot, flags, psind, pmap->pm_pid,
2933                             curproc->p_pid, curproc->p_comm, origpte);
2934                         pmap_pte_walk(pmap->pm_pml1, va);
2935                 }
2936 #endif
2937                 /*
2938                  * Wiring change, just update stats. We don't worry about
2939                  * wiring PT pages as they remain resident as long as there
2940                  * are valid mappings in them. Hence, if a user page is wired,
2941                  * the PT page will be also.
2942                  */
2943                 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
2944                         pmap->pm_stats.wired_count++;
2945                 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
2946                         pmap->pm_stats.wired_count--;
2947
2948                 /*
2949                  * Remove the extra PT page reference.
2950                  */
2951                 if (mpte != NULL) {
2952                         mpte->ref_count--;
2953                         KASSERT(mpte->ref_count > 0,
2954                             ("pmap_enter: missing reference to page table page,"
2955                              " va: 0x%lx", va));
2956                 }
2957
2958                 /*
2959                  * Has the physical page changed?
2960                  */
2961                 opa = origpte & PG_FRAME;
2962                 if (opa == pa) {
2963                         /*
2964                          * No, might be a protection or wiring change.
2965                          */
2966                         if ((origpte & PG_MANAGED) != 0 &&
2967                             (newpte & PG_RW) != 0)
2968                                 vm_page_aflag_set(m, PGA_WRITEABLE);
2969                         if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) {
2970                                 if ((newpte & (PG_A|PG_M)) != (origpte & (PG_A|PG_M))) {
2971                                         if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte)))
2972                                                 goto retry;
2973                                         if ((newpte & PG_M) != (origpte & PG_M))
2974                                                 vm_page_dirty(m);
2975                                         if ((newpte & PG_A) != (origpte & PG_A))
2976                                                 vm_page_aflag_set(m, PGA_REFERENCED);
2977                                         ptesync();
2978                                 } else
2979                                         invalidate_all = true;
2980                                 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
2981                                         goto unchanged;
2982                         }
2983                         goto validate;
2984                 }
2985
2986                 /*
2987                  * The physical page has changed.  Temporarily invalidate
2988                  * the mapping.  This ensures that all threads sharing the
2989                  * pmap keep a consistent view of the mapping, which is
2990                  * necessary for the correct handling of COW faults.  It
2991                  * also permits reuse of the old mapping's PV entry,
2992                  * avoiding an allocation.
2993                  *
2994                  * For consistency, handle unmanaged mappings the same way.
2995                  */
2996                 origpte = be64toh(pte_load_clear(pte));
2997                 KASSERT((origpte & PG_FRAME) == opa,
2998                     ("pmap_enter: unexpected pa update for %#lx", va));
2999                 if ((origpte & PG_MANAGED) != 0) {
3000                         om = PHYS_TO_VM_PAGE(opa);
3001
3002                         /*
3003                          * The pmap lock is sufficient to synchronize with
3004                          * concurrent calls to pmap_page_test_mappings() and
3005                          * pmap_ts_referenced().
3006                          */
3007                         if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3008                                 vm_page_dirty(om);
3009                         if ((origpte & PG_A) != 0)
3010                                 vm_page_aflag_set(om, PGA_REFERENCED);
3011                         CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
3012                         pv = pmap_pvh_remove(&om->md, pmap, va);
3013                         if ((newpte & PG_MANAGED) == 0)
3014                                 free_pv_entry(pmap, pv);
3015 #ifdef INVARIANTS
3016                         else if (origpte & PG_MANAGED) {
3017                                 if (pv == NULL) {
3018                                         pmap_page_print_mappings(om);
3019                                         MPASS(pv != NULL);
3020                                 }
3021                         }
3022 #endif
3023                         if ((om->a.flags & PGA_WRITEABLE) != 0 &&
3024                             TAILQ_EMPTY(&om->md.pv_list) &&
3025                             ((om->flags & PG_FICTITIOUS) != 0 ||
3026                             TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
3027                                 vm_page_aflag_clear(om, PGA_WRITEABLE);
3028                 }
3029                 if ((origpte & PG_A) != 0)
3030                         invalidate_page = true;
3031                 origpte = 0;
3032         } else {
3033                 if (pmap != kernel_pmap) {
3034 #ifdef INVARIANTS
3035                         if (VERBOSE_PMAP || pmap_logging)
3036                                 printf("pmap_enter(%p, %#lx, %p, %#x, %x, %d) -- asid=%lu curpid=%d name=%s\n",
3037                                     pmap, va, m, prot, flags, psind,
3038                                     pmap->pm_pid, curproc->p_pid,
3039                                     curproc->p_comm);
3040 #endif
3041                 }
3042
3043                 /*
3044                  * Increment the counters.
3045                  */
3046                 if ((newpte & PG_W) != 0)
3047                         pmap->pm_stats.wired_count++;
3048                 pmap_resident_count_inc(pmap, 1);
3049         }
3050
3051         /*
3052          * Enter on the PV list if part of our managed memory.
3053          */
3054         if ((newpte & PG_MANAGED) != 0) {
3055                 if (pv == NULL) {
3056                         pv = get_pv_entry(pmap, &lock);
3057                         pv->pv_va = va;
3058                 }
3059 #ifdef VERBOSE_PV
3060                 else
3061                         printf("reassigning pv: %p to pmap: %p\n",
3062                                    pv, pmap);
3063 #endif
3064                 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3065                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3066                 m->md.pv_gen++;
3067                 if ((newpte & PG_RW) != 0)
3068                         vm_page_aflag_set(m, PGA_WRITEABLE);
3069         }
3070
3071         /*
3072          * Update the PTE.
3073          */
3074         if ((origpte & PG_V) != 0) {
3075 validate:
3076                 origpte = be64toh(pte_load_store(pte, htobe64(newpte)));
3077                 KASSERT((origpte & PG_FRAME) == pa,
3078                     ("pmap_enter: unexpected pa update for %#lx", va));
3079                 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
3080                     (PG_M | PG_RW)) {
3081                         if ((origpte & PG_MANAGED) != 0)
3082                                 vm_page_dirty(m);
3083                         invalidate_page = true;
3084
3085                         /*
3086                          * Although the PTE may still have PG_RW set, TLB
3087                          * invalidation may nonetheless be required because
3088                          * the PTE no longer has PG_M set.
3089                          */
3090                 } else if ((origpte & PG_X) != 0 || (newpte & PG_X) == 0) {
3091                         /*
3092                          * Removing capabilities requires invalidation on POWER
3093                          */
3094                         invalidate_page = true;
3095                         goto unchanged;
3096                 }
3097                 if ((origpte & PG_A) != 0)
3098                         invalidate_page = true;
3099         } else {
3100                 pte_store(pte, newpte);
3101                 ptesync();
3102         }
3103 unchanged:
3104
3105 #if VM_NRESERVLEVEL > 0
3106         /*
3107          * If both the page table page and the reservation are fully
3108          * populated, then attempt promotion.
3109          */
3110         if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
3111             mmu_radix_ps_enabled(pmap) &&
3112             (m->flags & PG_FICTITIOUS) == 0 &&
3113             vm_reserv_level_iffullpop(m) == 0 &&
3114                 pmap_promote_l3e(pmap, l3e, va, &lock) == 0)
3115                 invalidate_all = true;
3116 #endif
3117         if (invalidate_all)
3118                 pmap_invalidate_all(pmap);
3119         else if (invalidate_page)
3120                 pmap_invalidate_page(pmap, va);
3121
3122         rv = KERN_SUCCESS;
3123 out:
3124         if (lock != NULL)
3125                 rw_wunlock(lock);
3126         PMAP_UNLOCK(pmap);
3127
3128         return (rv);
3129 }
3130
3131 /*
3132  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns true
3133  * if successful.  Returns false if (1) a page table page cannot be allocated
3134  * without sleeping, (2) a mapping already exists at the specified virtual
3135  * address, or (3) a PV entry cannot be allocated without reclaiming another
3136  * PV entry.
3137  */
3138 static bool
3139 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3140     struct rwlock **lockp)
3141 {
3142         pml3_entry_t newpde;
3143
3144         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3145         newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs) |
3146             RPTE_LEAF | PG_V;
3147         if ((m->oflags & VPO_UNMANAGED) == 0)
3148                 newpde |= PG_MANAGED;
3149         if (prot & VM_PROT_EXECUTE)
3150                 newpde |= PG_X;
3151         if (prot & VM_PROT_READ)
3152                 newpde |= RPTE_EAA_R;
3153         if (va >= DMAP_MIN_ADDRESS)
3154                 newpde |= RPTE_EAA_P;
3155         return (pmap_enter_l3e(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
3156             PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3157             KERN_SUCCESS);
3158 }
3159
3160 /*
3161  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
3162  * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3163  * otherwise.  Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3164  * a mapping already exists at the specified virtual address.  Returns
3165  * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3166  * page allocation failed.  Returns KERN_RESOURCE_SHORTAGE if
3167  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3168  *
3169  * The parameter "m" is only used when creating a managed, writeable mapping.
3170  */
3171 static int
3172 pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
3173     vm_page_t m, struct rwlock **lockp)
3174 {
3175         struct spglist free;
3176         pml3_entry_t oldl3e, *l3e;
3177         vm_page_t mt, pdpg;
3178
3179         KASSERT((newpde & (PG_M | PG_RW)) != PG_RW,
3180             ("pmap_enter_pde: newpde is missing PG_M"));
3181         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3182
3183         if ((pdpg = pmap_allocl3e(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
3184             NULL : lockp)) == NULL) {
3185                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3186                     " in pmap %p", va, pmap);
3187                 return (KERN_RESOURCE_SHORTAGE);
3188         }
3189         l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
3190         l3e = &l3e[pmap_pml3e_index(va)];
3191         oldl3e = be64toh(*l3e);
3192         if ((oldl3e & PG_V) != 0) {
3193                 KASSERT(pdpg->ref_count > 1,
3194                     ("pmap_enter_pde: pdpg's wire count is too low"));
3195                 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
3196                         pdpg->ref_count--;
3197                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3198                             " in pmap %p", va, pmap);
3199                         return (KERN_FAILURE);
3200                 }
3201                 /* Break the existing mapping(s). */
3202                 SLIST_INIT(&free);
3203                 if ((oldl3e & RPTE_LEAF) != 0) {
3204                         /*
3205                          * The reference to the PD page that was acquired by
3206                          * pmap_allocl3e() ensures that it won't be freed.
3207                          * However, if the PDE resulted from a promotion, then
3208                          * a reserved PT page could be freed.
3209                          */
3210                         (void)pmap_remove_l3e(pmap, l3e, va, &free, lockp);
3211                         pmap_invalidate_l3e_page(pmap, va, oldl3e);
3212                 } else {
3213                         if (pmap_remove_ptes(pmap, va, va + L3_PAGE_SIZE, l3e,
3214                             &free, lockp))
3215                                pmap_invalidate_all(pmap);
3216                 }
3217                 vm_page_free_pages_toq(&free, true);
3218                 if (va >= VM_MAXUSER_ADDRESS) {
3219                         mt = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME);
3220                         if (pmap_insert_pt_page(pmap, mt)) {
3221                                 /*
3222                                  * XXX Currently, this can't happen because
3223                                  * we do not perform pmap_enter(psind == 1)
3224                                  * on the kernel pmap.
3225                                  */
3226                                 panic("pmap_enter_pde: trie insert failed");
3227                         }
3228                 } else
3229                         KASSERT(be64toh(*l3e) == 0, ("pmap_enter_pde: non-zero pde %p",
3230                             l3e));
3231         }
3232         if ((newpde & PG_MANAGED) != 0) {
3233                 /*
3234                  * Abort this mapping if its PV entry could not be created.
3235                  */
3236                 if (!pmap_pv_insert_l3e(pmap, va, newpde, flags, lockp)) {
3237                         SLIST_INIT(&free);
3238                         if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
3239                                 /*
3240                                  * Although "va" is not mapped, paging-
3241                                  * structure caches could nonetheless have
3242                                  * entries that refer to the freed page table
3243                                  * pages.  Invalidate those entries.
3244                                  */
3245                                 pmap_invalidate_page(pmap, va);
3246                                 vm_page_free_pages_toq(&free, true);
3247                         }
3248                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3249                             " in pmap %p", va, pmap);
3250                         return (KERN_RESOURCE_SHORTAGE);
3251                 }
3252                 if ((newpde & PG_RW) != 0) {
3253                         for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
3254                                 vm_page_aflag_set(mt, PGA_WRITEABLE);
3255                 }
3256         }
3257
3258         /*
3259          * Increment counters.
3260          */
3261         if ((newpde & PG_W) != 0)
3262                 pmap->pm_stats.wired_count += L3_PAGE_SIZE / PAGE_SIZE;
3263         pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
3264
3265         /*
3266          * Map the superpage.  (This is not a promoted mapping; there will not
3267          * be any lingering 4KB page mappings in the TLB.)
3268          */
3269         pte_store(l3e, newpde);
3270         ptesync();
3271
3272         atomic_add_long(&pmap_l3e_mappings, 1);
3273         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
3274             " in pmap %p", va, pmap);
3275         return (KERN_SUCCESS);
3276 }
3277
3278 void
3279 mmu_radix_enter_object(pmap_t pmap, vm_offset_t start,
3280     vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
3281 {
3282
3283         struct rwlock *lock;
3284         vm_offset_t va;
3285         vm_page_t m, mpte;
3286         vm_pindex_t diff, psize;
3287         bool invalidate;
3288         VM_OBJECT_ASSERT_LOCKED(m_start->object);
3289
3290         CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
3291             end, m_start, prot);
3292
3293         invalidate = false;
3294         psize = atop(end - start);
3295         mpte = NULL;
3296         m = m_start;
3297         lock = NULL;
3298         PMAP_LOCK(pmap);
3299         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3300                 va = start + ptoa(diff);
3301                 if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end &&
3302                     m->psind == 1 && mmu_radix_ps_enabled(pmap) &&
3303                     pmap_enter_2mpage(pmap, va, m, prot, &lock))
3304                         m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1];
3305                 else
3306                         mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot,
3307                             mpte, &lock, &invalidate);
3308                 m = TAILQ_NEXT(m, listq);
3309         }
3310         ptesync();
3311         if (lock != NULL)
3312                 rw_wunlock(lock);
3313         if (invalidate)
3314                 pmap_invalidate_all(pmap);
3315         PMAP_UNLOCK(pmap);
3316 }
3317
3318 static vm_page_t
3319 mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3320     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate)
3321 {
3322         struct spglist free;
3323         pt_entry_t *pte;
3324         vm_paddr_t pa;
3325
3326         KASSERT(!VA_IS_CLEANMAP(va) ||
3327             (m->oflags & VPO_UNMANAGED) != 0,
3328             ("mmu_radix_enter_quick_locked: managed mapping within the clean submap"));
3329         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3330
3331         /*
3332          * In the case that a page table page is not
3333          * resident, we are creating it here.
3334          */
3335         if (va < VM_MAXUSER_ADDRESS) {
3336                 vm_pindex_t ptepindex;
3337                 pml3_entry_t *ptepa;
3338
3339                 /*
3340                  * Calculate pagetable page index
3341                  */
3342                 ptepindex = pmap_l3e_pindex(va);
3343                 if (mpte && (mpte->pindex == ptepindex)) {
3344                         mpte->ref_count++;
3345                 } else {
3346                         /*
3347                          * Get the page directory entry
3348                          */
3349                         ptepa = pmap_pml3e(pmap, va);
3350
3351                         /*
3352                          * If the page table page is mapped, we just increment
3353                          * the hold count, and activate it.  Otherwise, we
3354                          * attempt to allocate a page table page.  If this
3355                          * attempt fails, we don't retry.  Instead, we give up.
3356                          */
3357                         if (ptepa && (be64toh(*ptepa) & PG_V) != 0) {
3358                                 if (be64toh(*ptepa) & RPTE_LEAF)
3359                                         return (NULL);
3360                                 mpte = PHYS_TO_VM_PAGE(be64toh(*ptepa) & PG_FRAME);
3361                                 mpte->ref_count++;
3362                         } else {
3363                                 /*
3364                                  * Pass NULL instead of the PV list lock
3365                                  * pointer, because we don't intend to sleep.
3366                                  */
3367                                 mpte = _pmap_allocpte(pmap, ptepindex, NULL);
3368                                 if (mpte == NULL)
3369                                         return (mpte);
3370                         }
3371                 }
3372                 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3373                 pte = &pte[pmap_pte_index(va)];
3374         } else {
3375                 mpte = NULL;
3376                 pte = pmap_pte(pmap, va);
3377         }
3378         if (be64toh(*pte)) {
3379                 if (mpte != NULL) {
3380                         mpte->ref_count--;
3381                         mpte = NULL;
3382                 }
3383                 return (mpte);
3384         }
3385
3386         /*
3387          * Enter on the PV list if part of our managed memory.
3388          */
3389         if ((m->oflags & VPO_UNMANAGED) == 0 &&
3390             !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3391                 if (mpte != NULL) {
3392                         SLIST_INIT(&free);
3393                         if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3394                                 /*
3395                                  * Although "va" is not mapped, paging-
3396                                  * structure caches could nonetheless have
3397                                  * entries that refer to the freed page table
3398                                  * pages.  Invalidate those entries.
3399                                  */
3400                                 *invalidate = true;
3401                                 vm_page_free_pages_toq(&free, true);
3402                         }
3403                         mpte = NULL;
3404                 }
3405                 return (mpte);
3406         }
3407
3408         /*
3409          * Increment counters
3410          */
3411         pmap_resident_count_inc(pmap, 1);
3412
3413         pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs);
3414         if (prot & VM_PROT_EXECUTE)
3415                 pa |= PG_X;
3416         else
3417                 pa |= RPTE_EAA_R;
3418         if ((m->oflags & VPO_UNMANAGED) == 0)
3419                 pa |= PG_MANAGED;
3420
3421         pte_store(pte, pa);
3422         return (mpte);
3423 }
3424
3425 void
3426 mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
3427     vm_prot_t prot)
3428 {
3429         struct rwlock *lock;
3430         bool invalidate;
3431
3432         lock = NULL;
3433         invalidate = false;
3434         PMAP_LOCK(pmap);
3435         mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock,
3436             &invalidate);
3437         ptesync();
3438         if (lock != NULL)
3439                 rw_wunlock(lock);
3440         if (invalidate)
3441                 pmap_invalidate_all(pmap);
3442         PMAP_UNLOCK(pmap);
3443 }
3444
3445 vm_paddr_t
3446 mmu_radix_extract(pmap_t pmap, vm_offset_t va)
3447 {
3448         pml3_entry_t *l3e;
3449         pt_entry_t *pte;
3450         vm_paddr_t pa;
3451
3452         l3e = pmap_pml3e(pmap, va);
3453         if (__predict_false(l3e == NULL))
3454                 return (0);
3455         if (be64toh(*l3e) & RPTE_LEAF) {
3456                 pa = (be64toh(*l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK);
3457                 pa |= (va & L3_PAGE_MASK);
3458         } else {
3459                 /*
3460                  * Beware of a concurrent promotion that changes the
3461                  * PDE at this point!  For example, vtopte() must not
3462                  * be used to access the PTE because it would use the
3463                  * new PDE.  It is, however, safe to use the old PDE
3464                  * because the page table page is preserved by the
3465                  * promotion.
3466                  */
3467                 pte = pmap_l3e_to_pte(l3e, va);
3468                 if (__predict_false(pte == NULL))
3469                         return (0);
3470                 pa = be64toh(*pte);
3471                 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3472                 pa |= (va & PAGE_MASK);
3473         }
3474         return (pa);
3475 }
3476
3477 vm_page_t
3478 mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3479 {
3480         pml3_entry_t l3e, *l3ep;
3481         pt_entry_t pte;
3482         vm_paddr_t pa;
3483         vm_page_t m;
3484
3485         pa = 0;
3486         m = NULL;
3487         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
3488         PMAP_LOCK(pmap);
3489         l3ep = pmap_pml3e(pmap, va);
3490         if (l3ep != NULL && (l3e = be64toh(*l3ep))) {
3491                 if (l3e & RPTE_LEAF) {
3492                         if ((l3e & PG_RW) || (prot & VM_PROT_WRITE) == 0)
3493                                 m = PHYS_TO_VM_PAGE((l3e & PG_PS_FRAME) |
3494                                     (va & L3_PAGE_MASK));
3495                 } else {
3496                         /* Native endian PTE, do not pass to pmap functions */
3497                         pte = be64toh(*pmap_l3e_to_pte(l3ep, va));
3498                         if ((pte & PG_V) &&
3499                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0))
3500                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3501                 }
3502                 if (m != NULL && !vm_page_wire_mapped(m))
3503                         m = NULL;
3504         }
3505         PMAP_UNLOCK(pmap);
3506         return (m);
3507 }
3508
3509 static void
3510 mmu_radix_growkernel(vm_offset_t addr)
3511 {
3512         vm_paddr_t paddr;
3513         vm_page_t nkpg;
3514         pml3_entry_t *l3e;
3515         pml2_entry_t *l2e;
3516
3517         CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
3518         if (VM_MIN_KERNEL_ADDRESS < addr &&
3519                 addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE))
3520                 return;
3521
3522         addr = roundup2(addr, L3_PAGE_SIZE);
3523         if (addr - 1 >= vm_map_max(kernel_map))
3524                 addr = vm_map_max(kernel_map);
3525         while (kernel_vm_end < addr) {
3526                 l2e = pmap_pml2e(kernel_pmap, kernel_vm_end);
3527                 if ((be64toh(*l2e) & PG_V) == 0) {
3528                         /* We need a new PDP entry */
3529                         nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
3530                             VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3531                         if (nkpg == NULL)
3532                                 panic("pmap_growkernel: no memory to grow kernel");
3533                         nkpg->pindex = kernel_vm_end >> L2_PAGE_SIZE_SHIFT;
3534                         paddr = VM_PAGE_TO_PHYS(nkpg);
3535                         pde_store(l2e, paddr);
3536                         continue; /* try again */
3537                 }
3538                 l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end);
3539                 if ((be64toh(*l3e) & PG_V) != 0) {
3540                         kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3541                         if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3542                                 kernel_vm_end = vm_map_max(kernel_map);
3543                                 break;
3544                         }
3545                         continue;
3546                 }
3547
3548                 nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
3549                     VM_ALLOC_ZERO);
3550                 if (nkpg == NULL)
3551                         panic("pmap_growkernel: no memory to grow kernel");
3552                 nkpg->pindex = pmap_l3e_pindex(kernel_vm_end);
3553                 paddr = VM_PAGE_TO_PHYS(nkpg);
3554                 pde_store(l3e, paddr);
3555
3556                 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3557                 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3558                         kernel_vm_end = vm_map_max(kernel_map);
3559                         break;
3560                 }
3561         }
3562         ptesync();
3563 }
3564
3565 static MALLOC_DEFINE(M_RADIX_PGD, "radix_pgd", "radix page table root directory");
3566 static uma_zone_t zone_radix_pgd;
3567
3568 static int
3569 radix_pgd_import(void *arg __unused, void **store, int count, int domain __unused,
3570     int flags)
3571 {
3572         int req;
3573
3574         req = VM_ALLOC_WIRED | malloc2vm_flags(flags);
3575         for (int i = 0; i < count; i++) {
3576                 vm_page_t m = vm_page_alloc_noobj_contig(req,
3577                     RADIX_PGD_SIZE / PAGE_SIZE,
3578                     0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE,
3579                     VM_MEMATTR_DEFAULT);
3580                 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3581         }
3582         return (count);
3583 }
3584
3585 static void
3586 radix_pgd_release(void *arg __unused, void **store, int count)
3587 {
3588         vm_page_t m;
3589         struct spglist free;
3590         int page_count;
3591
3592         SLIST_INIT(&free);
3593         page_count = RADIX_PGD_SIZE/PAGE_SIZE;
3594
3595         for (int i = 0; i < count; i++) {
3596                 /*
3597                  * XXX selectively remove dmap and KVA entries so we don't
3598                  * need to bzero
3599                  */
3600                 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
3601                 for (int j = page_count-1; j >= 0; j--) {
3602                         vm_page_unwire_noq(&m[j]);
3603                         SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss);
3604                 }
3605                 vm_page_free_pages_toq(&free, false);
3606         }
3607 }
3608
3609 static void
3610 mmu_radix_init(void)
3611 {
3612         vm_page_t mpte;
3613         vm_size_t s;
3614         int error, i, pv_npg;
3615
3616         /* XXX is this really needed for POWER? */
3617         /* L1TF, reserve page @0 unconditionally */
3618         vm_page_blacklist_add(0, bootverbose);
3619
3620         zone_radix_pgd = uma_zcache_create("radix_pgd_cache",
3621                 RADIX_PGD_SIZE, NULL, NULL,
3622 #ifdef INVARIANTS
3623             trash_init, trash_fini,
3624 #else
3625             NULL, NULL,
3626 #endif
3627                 radix_pgd_import, radix_pgd_release,
3628                 NULL, UMA_ZONE_NOBUCKET);
3629
3630         /*
3631          * Initialize the vm page array entries for the kernel pmap's
3632          * page table pages.
3633          */
3634         PMAP_LOCK(kernel_pmap);
3635         for (i = 0; i < nkpt; i++) {
3636                 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
3637                 KASSERT(mpte >= vm_page_array &&
3638                     mpte < &vm_page_array[vm_page_array_size],
3639                     ("pmap_init: page table page is out of range size: %lu",
3640                      vm_page_array_size));
3641                 mpte->pindex = pmap_l3e_pindex(VM_MIN_KERNEL_ADDRESS) + i;
3642                 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
3643                 MPASS(PHYS_TO_VM_PAGE(mpte->phys_addr) == mpte);
3644                 //pmap_insert_pt_page(kernel_pmap, mpte);
3645                 mpte->ref_count = 1;
3646         }
3647         PMAP_UNLOCK(kernel_pmap);
3648         vm_wire_add(nkpt);
3649
3650         CTR1(KTR_PMAP, "%s()", __func__);
3651         TAILQ_INIT(&pv_dummy.pv_list);
3652
3653         /*
3654          * Are large page mappings enabled?
3655          */
3656         TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
3657         if (superpages_enabled) {
3658                 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
3659                     ("pmap_init: can't assign to pagesizes[1]"));
3660                 pagesizes[1] = L3_PAGE_SIZE;
3661         }
3662
3663         /*
3664          * Initialize the pv chunk list mutex.
3665          */
3666         mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
3667
3668         /*
3669          * Initialize the pool of pv list locks.
3670          */
3671         for (i = 0; i < NPV_LIST_LOCKS; i++)
3672                 rw_init(&pv_list_locks[i], "pmap pv list");
3673
3674         /*
3675          * Calculate the size of the pv head table for superpages.
3676          */
3677         pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L3_PAGE_SIZE);
3678
3679         /*
3680          * Allocate memory for the pv head table for superpages.
3681          */
3682         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
3683         s = round_page(s);
3684         pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
3685         for (i = 0; i < pv_npg; i++)
3686                 TAILQ_INIT(&pv_table[i].pv_list);
3687         TAILQ_INIT(&pv_dummy.pv_list);
3688
3689         pmap_initialized = 1;
3690         mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
3691         error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3692             (vmem_addr_t *)&qframe);
3693
3694         if (error != 0)
3695                 panic("qframe allocation failed");
3696         asid_arena = vmem_create("ASID", isa3_base_pid + 1, (1<<isa3_pid_bits),
3697             1, 1, M_WAITOK);
3698 }
3699
3700 static boolean_t
3701 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3702 {
3703         struct rwlock *lock;
3704         pv_entry_t pv;
3705         struct md_page *pvh;
3706         pt_entry_t *pte, mask;
3707         pmap_t pmap;
3708         int md_gen, pvh_gen;
3709         boolean_t rv;
3710
3711         rv = FALSE;
3712         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3713         rw_rlock(lock);
3714 restart:
3715         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3716                 pmap = PV_PMAP(pv);
3717                 if (!PMAP_TRYLOCK(pmap)) {
3718                         md_gen = m->md.pv_gen;
3719                         rw_runlock(lock);
3720                         PMAP_LOCK(pmap);
3721                         rw_rlock(lock);
3722                         if (md_gen != m->md.pv_gen) {
3723                                 PMAP_UNLOCK(pmap);
3724                                 goto restart;
3725                         }
3726                 }
3727                 pte = pmap_pte(pmap, pv->pv_va);
3728                 mask = 0;
3729                 if (modified)
3730                         mask |= PG_RW | PG_M;
3731                 if (accessed)
3732                         mask |= PG_V | PG_A;
3733                 rv = (be64toh(*pte) & mask) == mask;
3734                 PMAP_UNLOCK(pmap);
3735                 if (rv)
3736                         goto out;
3737         }
3738         if ((m->flags & PG_FICTITIOUS) == 0) {
3739                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3740                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
3741                         pmap = PV_PMAP(pv);
3742                         if (!PMAP_TRYLOCK(pmap)) {
3743                                 md_gen = m->md.pv_gen;
3744                                 pvh_gen = pvh->pv_gen;
3745                                 rw_runlock(lock);
3746                                 PMAP_LOCK(pmap);
3747                                 rw_rlock(lock);
3748                                 if (md_gen != m->md.pv_gen ||
3749                                     pvh_gen != pvh->pv_gen) {
3750                                         PMAP_UNLOCK(pmap);
3751                                         goto restart;
3752                                 }
3753                         }
3754                         pte = pmap_pml3e(pmap, pv->pv_va);
3755                         mask = 0;
3756                         if (modified)
3757                                 mask |= PG_RW | PG_M;
3758                         if (accessed)
3759                                 mask |= PG_V | PG_A;
3760                         rv = (be64toh(*pte) & mask) == mask;
3761                         PMAP_UNLOCK(pmap);
3762                         if (rv)
3763                                 goto out;
3764                 }
3765         }
3766 out:
3767         rw_runlock(lock);
3768         return (rv);
3769 }
3770
3771 /*
3772  *      pmap_is_modified:
3773  *
3774  *      Return whether or not the specified physical page was modified
3775  *      in any physical maps.
3776  */
3777 boolean_t
3778 mmu_radix_is_modified(vm_page_t m)
3779 {
3780
3781         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3782             ("pmap_is_modified: page %p is not managed", m));
3783
3784         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3785         /*
3786          * If the page is not busied then this check is racy.
3787          */
3788         if (!pmap_page_is_write_mapped(m))
3789                 return (FALSE);
3790         return (pmap_page_test_mappings(m, FALSE, TRUE));
3791 }
3792
3793 boolean_t
3794 mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3795 {
3796         pml3_entry_t *l3e;
3797         pt_entry_t *pte;
3798         boolean_t rv;
3799
3800         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
3801         rv = FALSE;
3802         PMAP_LOCK(pmap);
3803         l3e = pmap_pml3e(pmap, addr);
3804         if (l3e != NULL && (be64toh(*l3e) & (RPTE_LEAF | PG_V)) == PG_V) {
3805                 pte = pmap_l3e_to_pte(l3e, addr);
3806                 rv = (be64toh(*pte) & PG_V) == 0;
3807         }
3808         PMAP_UNLOCK(pmap);
3809         return (rv);
3810 }
3811
3812 boolean_t
3813 mmu_radix_is_referenced(vm_page_t m)
3814 {
3815         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3816             ("pmap_is_referenced: page %p is not managed", m));
3817         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3818         return (pmap_page_test_mappings(m, TRUE, FALSE));
3819 }
3820
3821 /*
3822  *      pmap_ts_referenced:
3823  *
3824  *      Return a count of reference bits for a page, clearing those bits.
3825  *      It is not necessary for every reference bit to be cleared, but it
3826  *      is necessary that 0 only be returned when there are truly no
3827  *      reference bits set.
3828  *
3829  *      As an optimization, update the page's dirty field if a modified bit is
3830  *      found while counting reference bits.  This opportunistic update can be
3831  *      performed at low cost and can eliminate the need for some future calls
3832  *      to pmap_is_modified().  However, since this function stops after
3833  *      finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3834  *      dirty pages.  Those dirty pages will only be detected by a future call
3835  *      to pmap_is_modified().
3836  *
3837  *      A DI block is not needed within this function, because
3838  *      invalidations are performed before the PV list lock is
3839  *      released.
3840  */
3841 boolean_t
3842 mmu_radix_ts_referenced(vm_page_t m)
3843 {
3844         struct md_page *pvh;
3845         pv_entry_t pv, pvf;
3846         pmap_t pmap;
3847         struct rwlock *lock;
3848         pml3_entry_t oldl3e, *l3e;
3849         pt_entry_t *pte;
3850         vm_paddr_t pa;
3851         int cleared, md_gen, not_cleared, pvh_gen;
3852         struct spglist free;
3853
3854         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3855         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3856             ("pmap_ts_referenced: page %p is not managed", m));
3857         SLIST_INIT(&free);
3858         cleared = 0;
3859         pa = VM_PAGE_TO_PHYS(m);
3860         lock = PHYS_TO_PV_LIST_LOCK(pa);
3861         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
3862         rw_wlock(lock);
3863 retry:
3864         not_cleared = 0;
3865         if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
3866                 goto small_mappings;
3867         pv = pvf;
3868         do {
3869                 if (pvf == NULL)
3870                         pvf = pv;
3871                 pmap = PV_PMAP(pv);
3872                 if (!PMAP_TRYLOCK(pmap)) {
3873                         pvh_gen = pvh->pv_gen;
3874                         rw_wunlock(lock);
3875                         PMAP_LOCK(pmap);
3876                         rw_wlock(lock);
3877                         if (pvh_gen != pvh->pv_gen) {
3878                                 PMAP_UNLOCK(pmap);
3879                                 goto retry;
3880                         }
3881                 }
3882                 l3e = pmap_pml3e(pmap, pv->pv_va);
3883                 oldl3e = be64toh(*l3e);
3884                 if ((oldl3e & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
3885                         /*
3886                          * Although "oldpde" is mapping a 2MB page, because
3887                          * this function is called at a 4KB page granularity,
3888                          * we only update the 4KB page under test.
3889                          */
3890                         vm_page_dirty(m);
3891                 }
3892                 if ((oldl3e & PG_A) != 0) {
3893                         /*
3894                          * Since this reference bit is shared by 512 4KB
3895                          * pages, it should not be cleared every time it is
3896                          * tested.  Apply a simple "hash" function on the
3897                          * physical page number, the virtual superpage number,
3898                          * and the pmap address to select one 4KB page out of
3899                          * the 512 on which testing the reference bit will
3900                          * result in clearing that reference bit.  This
3901                          * function is designed to avoid the selection of the
3902                          * same 4KB page for every 2MB page mapping.
3903                          *
3904                          * On demotion, a mapping that hasn't been referenced
3905                          * is simply destroyed.  To avoid the possibility of a
3906                          * subsequent page fault on a demoted wired mapping,
3907                          * always leave its reference bit set.  Moreover,
3908                          * since the superpage is wired, the current state of
3909                          * its reference bit won't affect page replacement.
3910                          */
3911                         if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L3_PAGE_SIZE_SHIFT) ^
3912                             (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
3913                             (oldl3e & PG_W) == 0) {
3914                                 atomic_clear_long(l3e, htobe64(PG_A));
3915                                 pmap_invalidate_page(pmap, pv->pv_va);
3916                                 cleared++;
3917                                 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3918                                     ("inconsistent pv lock %p %p for page %p",
3919                                     lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3920                         } else
3921                                 not_cleared++;
3922                 }
3923                 PMAP_UNLOCK(pmap);
3924                 /* Rotate the PV list if it has more than one entry. */
3925                 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3926                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
3927                         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
3928                         pvh->pv_gen++;
3929                 }
3930                 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
3931                         goto out;
3932         } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
3933 small_mappings:
3934         if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
3935                 goto out;
3936         pv = pvf;
3937         do {
3938                 if (pvf == NULL)
3939                         pvf = pv;
3940                 pmap = PV_PMAP(pv);
3941                 if (!PMAP_TRYLOCK(pmap)) {
3942                         pvh_gen = pvh->pv_gen;
3943                         md_gen = m->md.pv_gen;
3944                         rw_wunlock(lock);
3945                         PMAP_LOCK(pmap);
3946                         rw_wlock(lock);
3947                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3948                                 PMAP_UNLOCK(pmap);
3949                                 goto retry;
3950                         }
3951                 }
3952                 l3e = pmap_pml3e(pmap, pv->pv_va);
3953                 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0,
3954                     ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
3955                     m));
3956                 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
3957                 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW))
3958                         vm_page_dirty(m);
3959                 if ((be64toh(*pte) & PG_A) != 0) {
3960                         atomic_clear_long(pte, htobe64(PG_A));
3961                         pmap_invalidate_page(pmap, pv->pv_va);
3962                         cleared++;
3963                 }
3964                 PMAP_UNLOCK(pmap);
3965                 /* Rotate the PV list if it has more than one entry. */
3966                 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3967                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
3968                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3969                         m->md.pv_gen++;
3970                 }
3971         } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
3972             not_cleared < PMAP_TS_REFERENCED_MAX);
3973 out:
3974         rw_wunlock(lock);
3975         vm_page_free_pages_toq(&free, true);
3976         return (cleared + not_cleared);
3977 }
3978
3979 static vm_offset_t
3980 mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start,
3981     vm_paddr_t end, int prot __unused)
3982 {
3983
3984         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
3985                  prot);
3986         return (PHYS_TO_DMAP(start));
3987 }
3988
3989 void
3990 mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
3991     vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3992 {
3993         pml3_entry_t *l3e;
3994         vm_paddr_t pa, ptepa;
3995         vm_page_t p, pdpg;
3996         vm_memattr_t ma;
3997
3998         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
3999             object, pindex, size);
4000         VM_OBJECT_ASSERT_WLOCKED(object);
4001         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
4002                         ("pmap_object_init_pt: non-device object"));
4003         /* NB: size can be logically ored with addr here */
4004         if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) {
4005                 if (!mmu_radix_ps_enabled(pmap))
4006                         return;
4007                 if (!vm_object_populate(object, pindex, pindex + atop(size)))
4008                         return;
4009                 p = vm_page_lookup(object, pindex);
4010                 KASSERT(p->valid == VM_PAGE_BITS_ALL,
4011                     ("pmap_object_init_pt: invalid page %p", p));
4012                 ma = p->md.mdpg_cache_attrs;
4013
4014                 /*
4015                  * Abort the mapping if the first page is not physically
4016                  * aligned to a 2MB page boundary.
4017                  */
4018                 ptepa = VM_PAGE_TO_PHYS(p);
4019                 if (ptepa & L3_PAGE_MASK)
4020                         return;
4021
4022                 /*
4023                  * Skip the first page.  Abort the mapping if the rest of
4024                  * the pages are not physically contiguous or have differing
4025                  * memory attributes.
4026                  */
4027                 p = TAILQ_NEXT(p, listq);
4028                 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
4029                     pa += PAGE_SIZE) {
4030                         KASSERT(p->valid == VM_PAGE_BITS_ALL,
4031                             ("pmap_object_init_pt: invalid page %p", p));
4032                         if (pa != VM_PAGE_TO_PHYS(p) ||
4033                             ma != p->md.mdpg_cache_attrs)
4034                                 return;
4035                         p = TAILQ_NEXT(p, listq);
4036                 }
4037
4038                 PMAP_LOCK(pmap);
4039                 for (pa = ptepa | pmap_cache_bits(ma);
4040                     pa < ptepa + size; pa += L3_PAGE_SIZE) {
4041                         pdpg = pmap_allocl3e(pmap, addr, NULL);
4042                         if (pdpg == NULL) {
4043                                 /*
4044                                  * The creation of mappings below is only an
4045                                  * optimization.  If a page directory page
4046                                  * cannot be allocated without blocking,
4047                                  * continue on to the next mapping rather than
4048                                  * blocking.
4049                                  */
4050                                 addr += L3_PAGE_SIZE;
4051                                 continue;
4052                         }
4053                         l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
4054                         l3e = &l3e[pmap_pml3e_index(addr)];
4055                         if ((be64toh(*l3e) & PG_V) == 0) {
4056                                 pa |= PG_M | PG_A | PG_RW;
4057                                 pte_store(l3e, pa);
4058                                 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
4059                                 atomic_add_long(&pmap_l3e_mappings, 1);
4060                         } else {
4061                                 /* Continue on if the PDE is already valid. */
4062                                 pdpg->ref_count--;
4063                                 KASSERT(pdpg->ref_count > 0,
4064                                     ("pmap_object_init_pt: missing reference "
4065                                     "to page directory page, va: 0x%lx", addr));
4066                         }
4067                         addr += L3_PAGE_SIZE;
4068                 }
4069                 ptesync();
4070                 PMAP_UNLOCK(pmap);
4071         }
4072 }
4073
4074 boolean_t
4075 mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
4076 {
4077         struct md_page *pvh;
4078         struct rwlock *lock;
4079         pv_entry_t pv;
4080         int loops = 0;
4081         boolean_t rv;
4082
4083         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4084             ("pmap_page_exists_quick: page %p is not managed", m));
4085         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
4086         rv = FALSE;
4087         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4088         rw_rlock(lock);
4089         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4090                 if (PV_PMAP(pv) == pmap) {
4091                         rv = TRUE;
4092                         break;
4093                 }
4094                 loops++;
4095                 if (loops >= 16)
4096                         break;
4097         }
4098         if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4099                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4100                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4101                         if (PV_PMAP(pv) == pmap) {
4102                                 rv = TRUE;
4103                                 break;
4104                         }
4105                         loops++;
4106                         if (loops >= 16)
4107                                 break;
4108                 }
4109         }
4110         rw_runlock(lock);
4111         return (rv);
4112 }
4113
4114 void
4115 mmu_radix_page_init(vm_page_t m)
4116 {
4117
4118         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4119         TAILQ_INIT(&m->md.pv_list);
4120         m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
4121 }
4122
4123 int
4124 mmu_radix_page_wired_mappings(vm_page_t m)
4125 {
4126         struct rwlock *lock;
4127         struct md_page *pvh;
4128         pmap_t pmap;
4129         pt_entry_t *pte;
4130         pv_entry_t pv;
4131         int count, md_gen, pvh_gen;
4132
4133         if ((m->oflags & VPO_UNMANAGED) != 0)
4134                 return (0);
4135         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4136         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4137         rw_rlock(lock);
4138 restart:
4139         count = 0;
4140         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4141                 pmap = PV_PMAP(pv);
4142                 if (!PMAP_TRYLOCK(pmap)) {
4143                         md_gen = m->md.pv_gen;
4144                         rw_runlock(lock);
4145                         PMAP_LOCK(pmap);
4146                         rw_rlock(lock);
4147                         if (md_gen != m->md.pv_gen) {
4148                                 PMAP_UNLOCK(pmap);
4149                                 goto restart;
4150                         }
4151                 }
4152                 pte = pmap_pte(pmap, pv->pv_va);
4153                 if ((be64toh(*pte) & PG_W) != 0)
4154                         count++;
4155                 PMAP_UNLOCK(pmap);
4156         }
4157         if ((m->flags & PG_FICTITIOUS) == 0) {
4158                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4159                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4160                         pmap = PV_PMAP(pv);
4161                         if (!PMAP_TRYLOCK(pmap)) {
4162                                 md_gen = m->md.pv_gen;
4163                                 pvh_gen = pvh->pv_gen;
4164                                 rw_runlock(lock);
4165                                 PMAP_LOCK(pmap);
4166                                 rw_rlock(lock);
4167                                 if (md_gen != m->md.pv_gen ||
4168                                     pvh_gen != pvh->pv_gen) {
4169                                         PMAP_UNLOCK(pmap);
4170                                         goto restart;
4171                                 }
4172                         }
4173                         pte = pmap_pml3e(pmap, pv->pv_va);
4174                         if ((be64toh(*pte) & PG_W) != 0)
4175                                 count++;
4176                         PMAP_UNLOCK(pmap);
4177                 }
4178         }
4179         rw_runlock(lock);
4180         return (count);
4181 }
4182
4183 static void
4184 mmu_radix_update_proctab(int pid, pml1_entry_t l1pa)
4185 {
4186         isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE |  l1pa | RADIX_PGD_INDEX_SHIFT);
4187 }
4188
4189 int
4190 mmu_radix_pinit(pmap_t pmap)
4191 {
4192         vmem_addr_t pid;
4193         vm_paddr_t l1pa;
4194
4195         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4196
4197         /*
4198          * allocate the page directory page
4199          */
4200         pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK);
4201
4202         for (int j = 0; j <  RADIX_PGD_SIZE_SHIFT; j++)
4203                 pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE);
4204         vm_radix_init(&pmap->pm_radix);
4205         TAILQ_INIT(&pmap->pm_pvchunk);
4206         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4207         pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4208         vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid);
4209
4210         pmap->pm_pid = pid;
4211         l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1);
4212         mmu_radix_update_proctab(pid, l1pa);
4213         __asm __volatile("ptesync;isync" : : : "memory");
4214
4215         return (1);
4216 }
4217
4218 /*
4219  * This routine is called if the desired page table page does not exist.
4220  *
4221  * If page table page allocation fails, this routine may sleep before
4222  * returning NULL.  It sleeps only if a lock pointer was given.
4223  *
4224  * Note: If a page allocation fails at page table level two or three,
4225  * one or two pages may be held during the wait, only to be released
4226  * afterwards.  This conservative approach is easily argued to avoid
4227  * race conditions.
4228  */
4229 static vm_page_t
4230 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
4231 {
4232         vm_page_t m, pdppg, pdpg;
4233
4234         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4235
4236         /*
4237          * Allocate a page table page.
4238          */
4239         if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
4240                 if (lockp != NULL) {
4241                         RELEASE_PV_LIST_LOCK(lockp);
4242                         PMAP_UNLOCK(pmap);
4243                         vm_wait(NULL);
4244                         PMAP_LOCK(pmap);
4245                 }
4246                 /*
4247                  * Indicate the need to retry.  While waiting, the page table
4248                  * page may have been allocated.
4249                  */
4250                 return (NULL);
4251         }
4252         m->pindex = ptepindex;
4253
4254         /*
4255          * Map the pagetable page into the process address space, if
4256          * it isn't already there.
4257          */
4258
4259         if (ptepindex >= (NUPDE + NUPDPE)) {
4260                 pml1_entry_t *l1e;
4261                 vm_pindex_t pml1index;
4262
4263                 /* Wire up a new PDPE page */
4264                 pml1index = ptepindex - (NUPDE + NUPDPE);
4265                 l1e = &pmap->pm_pml1[pml1index];
4266                 KASSERT((be64toh(*l1e) & PG_V) == 0,
4267                     ("%s: L1 entry %#lx is valid", __func__, *l1e));
4268                 pde_store(l1e, VM_PAGE_TO_PHYS(m));
4269         } else if (ptepindex >= NUPDE) {
4270                 vm_pindex_t pml1index;
4271                 vm_pindex_t pdpindex;
4272                 pml1_entry_t *l1e;
4273                 pml2_entry_t *l2e;
4274
4275                 /* Wire up a new l2e page */
4276                 pdpindex = ptepindex - NUPDE;
4277                 pml1index = pdpindex >> RPTE_SHIFT;
4278
4279                 l1e = &pmap->pm_pml1[pml1index];
4280                 if ((be64toh(*l1e) & PG_V) == 0) {
4281                         /* Have to allocate a new pdp, recurse */
4282                         if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml1index,
4283                                 lockp) == NULL) {
4284                                 vm_page_unwire_noq(m);
4285                                 vm_page_free_zero(m);
4286                                 return (NULL);
4287                         }
4288                 } else {
4289                         /* Add reference to l2e page */
4290                         pdppg = PHYS_TO_VM_PAGE(be64toh(*l1e) & PG_FRAME);
4291                         pdppg->ref_count++;
4292                 }
4293                 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4294
4295                 /* Now find the pdp page */
4296                 l2e = &l2e[pdpindex & RPTE_MASK];
4297                 KASSERT((be64toh(*l2e) & PG_V) == 0,
4298                     ("%s: L2 entry %#lx is valid", __func__, *l2e));
4299                 pde_store(l2e, VM_PAGE_TO_PHYS(m));
4300         } else {
4301                 vm_pindex_t pml1index;
4302                 vm_pindex_t pdpindex;
4303                 pml1_entry_t *l1e;
4304                 pml2_entry_t *l2e;
4305                 pml3_entry_t *l3e;
4306
4307                 /* Wire up a new PTE page */
4308                 pdpindex = ptepindex >> RPTE_SHIFT;
4309                 pml1index = pdpindex >> RPTE_SHIFT;
4310
4311                 /* First, find the pdp and check that its valid. */
4312                 l1e = &pmap->pm_pml1[pml1index];
4313                 if ((be64toh(*l1e) & PG_V) == 0) {
4314                         /* Have to allocate a new pd, recurse */
4315                         if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4316                             lockp) == NULL) {
4317                                 vm_page_unwire_noq(m);
4318                                 vm_page_free_zero(m);
4319                                 return (NULL);
4320                         }
4321                         l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4322                         l2e = &l2e[pdpindex & RPTE_MASK];
4323                 } else {
4324                         l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME);
4325                         l2e = &l2e[pdpindex & RPTE_MASK];
4326                         if ((be64toh(*l2e) & PG_V) == 0) {
4327                                 /* Have to allocate a new pd, recurse */
4328                                 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4329                                     lockp) == NULL) {
4330                                         vm_page_unwire_noq(m);
4331                                         vm_page_free_zero(m);
4332                                         return (NULL);
4333                                 }
4334                         } else {
4335                                 /* Add reference to the pd page */
4336                                 pdpg = PHYS_TO_VM_PAGE(be64toh(*l2e) & PG_FRAME);
4337                                 pdpg->ref_count++;
4338                         }
4339                 }
4340                 l3e = (pml3_entry_t *)PHYS_TO_DMAP(be64toh(*l2e) & PG_FRAME);
4341
4342                 /* Now we know where the page directory page is */
4343                 l3e = &l3e[ptepindex & RPTE_MASK];
4344                 KASSERT((be64toh(*l3e) & PG_V) == 0,
4345                     ("%s: L3 entry %#lx is valid", __func__, *l3e));
4346                 pde_store(l3e, VM_PAGE_TO_PHYS(m));
4347         }
4348
4349         pmap_resident_count_inc(pmap, 1);
4350         return (m);
4351 }
4352 static vm_page_t
4353 pmap_allocl3e(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4354 {
4355         vm_pindex_t pdpindex, ptepindex;
4356         pml2_entry_t *pdpe;
4357         vm_page_t pdpg;
4358
4359 retry:
4360         pdpe = pmap_pml2e(pmap, va);
4361         if (pdpe != NULL && (be64toh(*pdpe) & PG_V) != 0) {
4362                 /* Add a reference to the pd page. */
4363                 pdpg = PHYS_TO_VM_PAGE(be64toh(*pdpe) & PG_FRAME);
4364                 pdpg->ref_count++;
4365         } else {
4366                 /* Allocate a pd page. */
4367                 ptepindex = pmap_l3e_pindex(va);
4368                 pdpindex = ptepindex >> RPTE_SHIFT;
4369                 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
4370                 if (pdpg == NULL && lockp != NULL)
4371                         goto retry;
4372         }
4373         return (pdpg);
4374 }
4375
4376 static vm_page_t
4377 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4378 {
4379         vm_pindex_t ptepindex;
4380         pml3_entry_t *pd;
4381         vm_page_t m;
4382
4383         /*
4384          * Calculate pagetable page index
4385          */
4386         ptepindex = pmap_l3e_pindex(va);
4387 retry:
4388         /*
4389          * Get the page directory entry
4390          */
4391         pd = pmap_pml3e(pmap, va);
4392
4393         /*
4394          * This supports switching from a 2MB page to a
4395          * normal 4K page.
4396          */
4397         if (pd != NULL && (be64toh(*pd) & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V)) {
4398                 if (!pmap_demote_l3e_locked(pmap, pd, va, lockp)) {
4399                         /*
4400                          * Invalidation of the 2MB page mapping may have caused
4401                          * the deallocation of the underlying PD page.
4402                          */
4403                         pd = NULL;
4404                 }
4405         }
4406
4407         /*
4408          * If the page table page is mapped, we just increment the
4409          * hold count, and activate it.
4410          */
4411         if (pd != NULL && (be64toh(*pd) & PG_V) != 0) {
4412                 m = PHYS_TO_VM_PAGE(be64toh(*pd) & PG_FRAME);
4413                 m->ref_count++;
4414         } else {
4415                 /*
4416                  * Here if the pte page isn't mapped, or if it has been
4417                  * deallocated.
4418                  */
4419                 m = _pmap_allocpte(pmap, ptepindex, lockp);
4420                 if (m == NULL && lockp != NULL)
4421                         goto retry;
4422         }
4423         return (m);
4424 }
4425
4426 static void
4427 mmu_radix_pinit0(pmap_t pmap)
4428 {
4429
4430         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4431         PMAP_LOCK_INIT(pmap);
4432         pmap->pm_pml1 = kernel_pmap->pm_pml1;
4433         pmap->pm_pid = kernel_pmap->pm_pid;
4434
4435         vm_radix_init(&pmap->pm_radix);
4436         TAILQ_INIT(&pmap->pm_pvchunk);
4437         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4438         kernel_pmap->pm_flags =
4439                 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4440 }
4441 /*
4442  * pmap_protect_l3e: do the things to protect a 2mpage in a process
4443  */
4444 static boolean_t
4445 pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot)
4446 {
4447         pt_entry_t newpde, oldpde;
4448         vm_offset_t eva, va;
4449         vm_page_t m;
4450         boolean_t anychanged;
4451
4452         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4453         KASSERT((sva & L3_PAGE_MASK) == 0,
4454             ("pmap_protect_l3e: sva is not 2mpage aligned"));
4455         anychanged = FALSE;
4456 retry:
4457         oldpde = newpde = be64toh(*l3e);
4458         if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
4459             (PG_MANAGED | PG_M | PG_RW)) {
4460                 eva = sva + L3_PAGE_SIZE;
4461                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4462                     va < eva; va += PAGE_SIZE, m++)
4463                         vm_page_dirty(m);
4464         }
4465         if ((prot & VM_PROT_WRITE) == 0) {
4466                 newpde &= ~(PG_RW | PG_M);
4467                 newpde |= RPTE_EAA_R;
4468         }
4469         if (prot & VM_PROT_EXECUTE)
4470                 newpde |= PG_X;
4471         if (newpde != oldpde) {
4472                 /*
4473                  * As an optimization to future operations on this PDE, clear
4474                  * PG_PROMOTED.  The impending invalidation will remove any
4475                  * lingering 4KB page mappings from the TLB.
4476                  */
4477                 if (!atomic_cmpset_long(l3e, htobe64(oldpde), htobe64(newpde & ~PG_PROMOTED)))
4478                         goto retry;
4479                 anychanged = TRUE;
4480         }
4481         return (anychanged);
4482 }
4483
4484 void
4485 mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
4486     vm_prot_t prot)
4487 {
4488         vm_offset_t va_next;
4489         pml1_entry_t *l1e;
4490         pml2_entry_t *l2e;
4491         pml3_entry_t ptpaddr, *l3e;
4492         pt_entry_t *pte;
4493         boolean_t anychanged;
4494
4495         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva,
4496             prot);
4497
4498         KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4499         if (prot == VM_PROT_NONE) {
4500                 mmu_radix_remove(pmap, sva, eva);
4501                 return;
4502         }
4503
4504         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
4505             (VM_PROT_WRITE|VM_PROT_EXECUTE))
4506                 return;
4507
4508 #ifdef INVARIANTS
4509         if (VERBOSE_PROTECT || pmap_logging)
4510                 printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n",
4511                            pmap, sva, eva, prot, pmap->pm_pid);
4512 #endif
4513         anychanged = FALSE;
4514
4515         PMAP_LOCK(pmap);
4516         for (; sva < eva; sva = va_next) {
4517                 l1e = pmap_pml1e(pmap, sva);
4518                 if ((be64toh(*l1e) & PG_V) == 0) {
4519                         va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
4520                         if (va_next < sva)
4521                                 va_next = eva;
4522                         continue;
4523                 }
4524
4525                 l2e = pmap_l1e_to_l2e(l1e, sva);
4526                 if ((be64toh(*l2e) & PG_V) == 0) {
4527                         va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
4528                         if (va_next < sva)
4529                                 va_next = eva;
4530                         continue;
4531                 }
4532
4533                 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
4534                 if (va_next < sva)
4535                         va_next = eva;
4536
4537                 l3e = pmap_l2e_to_l3e(l2e, sva);
4538                 ptpaddr = be64toh(*l3e);
4539
4540                 /*
4541                  * Weed out invalid mappings.
4542                  */
4543                 if (ptpaddr == 0)
4544                         continue;
4545
4546                 /*
4547                  * Check for large page.
4548                  */
4549                 if ((ptpaddr & RPTE_LEAF) != 0) {
4550                         /*
4551                          * Are we protecting the entire large page?  If not,
4552                          * demote the mapping and fall through.
4553                          */
4554                         if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
4555                                 if (pmap_protect_l3e(pmap, l3e, sva, prot))
4556                                         anychanged = TRUE;
4557                                 continue;
4558                         } else if (!pmap_demote_l3e(pmap, l3e, sva)) {
4559                                 /*
4560                                  * The large page mapping was destroyed.
4561                                  */
4562                                 continue;
4563                         }
4564                 }
4565
4566                 if (va_next > eva)
4567                         va_next = eva;
4568
4569                 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
4570                     sva += PAGE_SIZE) {
4571                         pt_entry_t obits, pbits;
4572                         vm_page_t m;
4573
4574 retry:
4575                         MPASS(pte == pmap_pte(pmap, sva));
4576                         obits = pbits = be64toh(*pte);
4577                         if ((pbits & PG_V) == 0)
4578                                 continue;
4579
4580                         if ((prot & VM_PROT_WRITE) == 0) {
4581                                 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
4582                                     (PG_MANAGED | PG_M | PG_RW)) {
4583                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
4584                                         vm_page_dirty(m);
4585                                 }
4586                                 pbits &= ~(PG_RW | PG_M);
4587                                 pbits |= RPTE_EAA_R;
4588                         }
4589                         if (prot & VM_PROT_EXECUTE)
4590                                 pbits |= PG_X;
4591
4592                         if (pbits != obits) {
4593                                 if (!atomic_cmpset_long(pte, htobe64(obits), htobe64(pbits)))
4594                                         goto retry;
4595                                 if (obits & (PG_A|PG_M)) {
4596                                         anychanged = TRUE;
4597 #ifdef INVARIANTS
4598                                         if (VERBOSE_PROTECT || pmap_logging)
4599                                                 printf("%#lx %#lx -> %#lx\n",
4600                                                     sva, obits, pbits);
4601 #endif
4602                                 }
4603                         }
4604                 }
4605         }
4606         if (anychanged)
4607                 pmap_invalidate_all(pmap);
4608         PMAP_UNLOCK(pmap);
4609 }
4610
4611 void
4612 mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count)
4613 {
4614
4615         CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count);
4616         pt_entry_t oldpte, pa, *pte;
4617         vm_page_t m;
4618         uint64_t cache_bits, attr_bits;
4619         vm_offset_t va;
4620
4621         oldpte = 0;
4622         attr_bits = RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
4623         va = sva;
4624         pte = kvtopte(va);
4625         while (va < sva + PAGE_SIZE * count) {
4626                 if (__predict_false((va & L3_PAGE_MASK) == 0))
4627                         pte = kvtopte(va);
4628                 MPASS(pte == pmap_pte(kernel_pmap, va));
4629
4630                 /*
4631                  * XXX there has to be a more efficient way than traversing
4632                  * the page table every time - but go for correctness for
4633                  * today
4634                  */
4635
4636                 m = *ma++;
4637                 cache_bits = pmap_cache_bits(m->md.mdpg_cache_attrs);
4638                 pa = VM_PAGE_TO_PHYS(m) | cache_bits | attr_bits;
4639                 if (be64toh(*pte) != pa) {
4640                         oldpte |= be64toh(*pte);
4641                         pte_store(pte, pa);
4642                 }
4643                 va += PAGE_SIZE;
4644                 pte++;
4645         }
4646         if (__predict_false((oldpte & RPTE_VALID) != 0))
4647                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
4648                     PAGE_SIZE);
4649         else
4650                 ptesync();
4651 }
4652
4653 void
4654 mmu_radix_qremove(vm_offset_t sva, int count)
4655 {
4656         vm_offset_t va;
4657         pt_entry_t *pte;
4658
4659         CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count);
4660         KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva));
4661
4662         va = sva;
4663         pte = kvtopte(va);
4664         while (va < sva + PAGE_SIZE * count) {
4665                 if (__predict_false((va & L3_PAGE_MASK) == 0))
4666                         pte = kvtopte(va);
4667                 pte_clear(pte);
4668                 pte++;
4669                 va += PAGE_SIZE;
4670         }
4671         pmap_invalidate_range(kernel_pmap, sva, va);
4672 }
4673
4674 /***************************************************
4675  * Page table page management routines.....
4676  ***************************************************/
4677 /*
4678  * Schedule the specified unused page table page to be freed.  Specifically,
4679  * add the page to the specified list of pages that will be released to the
4680  * physical memory manager after the TLB has been updated.
4681  */
4682 static __inline void
4683 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
4684     boolean_t set_PG_ZERO)
4685 {
4686
4687         if (set_PG_ZERO)
4688                 m->flags |= PG_ZERO;
4689         else
4690                 m->flags &= ~PG_ZERO;
4691         SLIST_INSERT_HEAD(free, m, plinks.s.ss);
4692 }
4693
4694 /*
4695  * Inserts the specified page table page into the specified pmap's collection
4696  * of idle page table pages.  Each of a pmap's page table pages is responsible
4697  * for mapping a distinct range of virtual addresses.  The pmap's collection is
4698  * ordered by this virtual address range.
4699  */
4700 static __inline int
4701 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
4702 {
4703
4704         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4705         return (vm_radix_insert(&pmap->pm_radix, mpte));
4706 }
4707
4708 /*
4709  * Removes the page table page mapping the specified virtual address from the
4710  * specified pmap's collection of idle page table pages, and returns it.
4711  * Otherwise, returns NULL if there is no page table page corresponding to the
4712  * specified virtual address.
4713  */
4714 static __inline vm_page_t
4715 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4716 {
4717
4718         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4719         return (vm_radix_remove(&pmap->pm_radix, pmap_l3e_pindex(va)));
4720 }
4721
4722 /*
4723  * Decrements a page table page's wire count, which is used to record the
4724  * number of valid page table entries within the page.  If the wire count
4725  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
4726  * page table page was unmapped and FALSE otherwise.
4727  */
4728 static inline boolean_t
4729 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4730 {
4731
4732         --m->ref_count;
4733         if (m->ref_count == 0) {
4734                 _pmap_unwire_ptp(pmap, va, m, free);
4735                 return (TRUE);
4736         } else
4737                 return (FALSE);
4738 }
4739
4740 static void
4741 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4742 {
4743
4744         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4745         /*
4746          * unmap the page table page
4747          */
4748         if (m->pindex >= (NUPDE + NUPDPE)) {
4749                 /* PDP page */
4750                 pml1_entry_t *pml1;
4751                 pml1 = pmap_pml1e(pmap, va);
4752                 *pml1 = 0;
4753         } else if (m->pindex >= NUPDE) {
4754                 /* PD page */
4755                 pml2_entry_t *l2e;
4756                 l2e = pmap_pml2e(pmap, va);
4757                 *l2e = 0;
4758         } else {
4759                 /* PTE page */
4760                 pml3_entry_t *l3e;
4761                 l3e = pmap_pml3e(pmap, va);
4762                 *l3e = 0;
4763         }
4764         pmap_resident_count_dec(pmap, 1);
4765         if (m->pindex < NUPDE) {
4766                 /* We just released a PT, unhold the matching PD */
4767                 vm_page_t pdpg;
4768
4769                 pdpg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml2e(pmap, va)) & PG_FRAME);
4770                 pmap_unwire_ptp(pmap, va, pdpg, free);
4771         }
4772         if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
4773                 /* We just released a PD, unhold the matching PDP */
4774                 vm_page_t pdppg;
4775
4776                 pdppg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml1e(pmap, va)) & PG_FRAME);
4777                 pmap_unwire_ptp(pmap, va, pdppg, free);
4778         }
4779
4780         /*
4781          * Put page on a list so that it is released after
4782          * *ALL* TLB shootdown is done
4783          */
4784         pmap_add_delayed_free_list(m, free, TRUE);
4785 }
4786
4787 /*
4788  * After removing a page table entry, this routine is used to
4789  * conditionally free the page, and manage the hold/wire counts.
4790  */
4791 static int
4792 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde,
4793     struct spglist *free)
4794 {
4795         vm_page_t mpte;
4796
4797         if (va >= VM_MAXUSER_ADDRESS)
4798                 return (0);
4799         KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4800         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4801         return (pmap_unwire_ptp(pmap, va, mpte, free));
4802 }
4803
4804 void
4805 mmu_radix_release(pmap_t pmap)
4806 {
4807
4808         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4809         KASSERT(pmap->pm_stats.resident_count == 0,
4810             ("pmap_release: pmap resident count %ld != 0",
4811             pmap->pm_stats.resident_count));
4812         KASSERT(vm_radix_is_empty(&pmap->pm_radix),
4813             ("pmap_release: pmap has reserved page table page(s)"));
4814
4815         pmap_invalidate_all(pmap);
4816         isa3_proctab[pmap->pm_pid].proctab0 = 0;
4817         uma_zfree(zone_radix_pgd, pmap->pm_pml1);
4818         vmem_free(asid_arena, pmap->pm_pid, 1);
4819 }
4820
4821 /*
4822  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
4823  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
4824  * false if the PV entry cannot be allocated without resorting to reclamation.
4825  */
4826 static bool
4827 pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t pde, u_int flags,
4828     struct rwlock **lockp)
4829 {
4830         struct md_page *pvh;
4831         pv_entry_t pv;
4832         vm_paddr_t pa;
4833
4834         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4835         /* Pass NULL instead of the lock pointer to disable reclamation. */
4836         if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
4837             NULL : lockp)) == NULL)
4838                 return (false);
4839         pv->pv_va = va;
4840         pa = pde & PG_PS_FRAME;
4841         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4842         pvh = pa_to_pvh(pa);
4843         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
4844         pvh->pv_gen++;
4845         return (true);
4846 }
4847
4848 /*
4849  * Fills a page table page with mappings to consecutive physical pages.
4850  */
4851 static void
4852 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
4853 {
4854         pt_entry_t *pte;
4855
4856         for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
4857                 *pte = htobe64(newpte);
4858                 newpte += PAGE_SIZE;
4859         }
4860 }
4861
4862 static boolean_t
4863 pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va)
4864 {
4865         struct rwlock *lock;
4866         boolean_t rv;
4867
4868         lock = NULL;
4869         rv = pmap_demote_l3e_locked(pmap, pde, va, &lock);
4870         if (lock != NULL)
4871                 rw_wunlock(lock);
4872         return (rv);
4873 }
4874
4875 static boolean_t
4876 pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
4877     struct rwlock **lockp)
4878 {
4879         pml3_entry_t oldpde;
4880         pt_entry_t *firstpte;
4881         vm_paddr_t mptepa;
4882         vm_page_t mpte;
4883         struct spglist free;
4884         vm_offset_t sva;
4885
4886         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4887         oldpde = be64toh(*l3e);
4888         KASSERT((oldpde & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
4889             ("pmap_demote_l3e: oldpde is missing RPTE_LEAF and/or PG_V %lx",
4890             oldpde));
4891         if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
4892             NULL) {
4893                 KASSERT((oldpde & PG_W) == 0,
4894                     ("pmap_demote_l3e: page table page for a wired mapping"
4895                     " is missing"));
4896
4897                 /*
4898                  * Invalidate the 2MB page mapping and return "failure" if the
4899                  * mapping was never accessed or the allocation of the new
4900                  * page table page fails.  If the 2MB page mapping belongs to
4901                  * the direct map region of the kernel's address space, then
4902                  * the page allocation request specifies the highest possible
4903                  * priority (VM_ALLOC_INTERRUPT).  Otherwise, the priority is
4904                  * normal.  Page table pages are preallocated for every other
4905                  * part of the kernel address space, so the direct map region
4906                  * is the only part of the kernel address space that must be
4907                  * handled here.
4908                  */
4909                 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc_noobj(
4910                     (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS ?
4911                     VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED)) == NULL) {
4912                         SLIST_INIT(&free);
4913                         sva = trunc_2mpage(va);
4914                         pmap_remove_l3e(pmap, l3e, sva, &free, lockp);
4915                         pmap_invalidate_l3e_page(pmap, sva, oldpde);
4916                         vm_page_free_pages_toq(&free, true);
4917                         CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx"
4918                             " in pmap %p", va, pmap);
4919                         return (FALSE);
4920                 }
4921                 mpte->pindex = pmap_l3e_pindex(va);
4922                 if (va < VM_MAXUSER_ADDRESS)
4923                         pmap_resident_count_inc(pmap, 1);
4924         }
4925         mptepa = VM_PAGE_TO_PHYS(mpte);
4926         firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
4927         KASSERT((oldpde & PG_A) != 0,
4928             ("pmap_demote_l3e: oldpde is missing PG_A"));
4929         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
4930             ("pmap_demote_l3e: oldpde is missing PG_M"));
4931
4932         /*
4933          * If the page table page is new, initialize it.
4934          */
4935         if (mpte->ref_count == 1) {
4936                 mpte->ref_count = NPTEPG;
4937                 pmap_fill_ptp(firstpte, oldpde);
4938         }
4939
4940         KASSERT((be64toh(*firstpte) & PG_FRAME) == (oldpde & PG_FRAME),
4941             ("pmap_demote_l3e: firstpte and newpte map different physical"
4942             " addresses"));
4943
4944         /*
4945          * If the mapping has changed attributes, update the page table
4946          * entries.
4947          */
4948         if ((be64toh(*firstpte) & PG_PTE_PROMOTE) != (oldpde & PG_PTE_PROMOTE))
4949                 pmap_fill_ptp(firstpte, oldpde);
4950
4951         /*
4952          * The spare PV entries must be reserved prior to demoting the
4953          * mapping, that is, prior to changing the PDE.  Otherwise, the state
4954          * of the PDE and the PV lists will be inconsistent, which can result
4955          * in reclaim_pv_chunk() attempting to remove a PV entry from the
4956          * wrong PV list and pmap_pv_demote_l3e() failing to find the expected
4957          * PV entry for the 2MB page mapping that is being demoted.
4958          */
4959         if ((oldpde & PG_MANAGED) != 0)
4960                 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
4961
4962         /*
4963          * Demote the mapping.  This pmap is locked.  The old PDE has
4964          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
4965          * set.  Thus, there is no danger of a race with another
4966          * processor changing the setting of PG_A and/or PG_M between
4967          * the read above and the store below.
4968          */
4969         pde_store(l3e, mptepa);
4970         pmap_invalidate_l3e_page(pmap, trunc_2mpage(va), oldpde);
4971         /*
4972          * Demote the PV entry.
4973          */
4974         if ((oldpde & PG_MANAGED) != 0)
4975                 pmap_pv_demote_l3e(pmap, va, oldpde & PG_PS_FRAME, lockp);
4976
4977         atomic_add_long(&pmap_l3e_demotions, 1);
4978         CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx"
4979             " in pmap %p", va, pmap);
4980         return (TRUE);
4981 }
4982
4983 /*
4984  * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
4985  */
4986 static void
4987 pmap_remove_kernel_l3e(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va)
4988 {
4989         vm_paddr_t mptepa;
4990         vm_page_t mpte;
4991
4992         KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
4993         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4994         mpte = pmap_remove_pt_page(pmap, va);
4995         if (mpte == NULL)
4996                 panic("pmap_remove_kernel_pde: Missing pt page.");
4997
4998         mptepa = VM_PAGE_TO_PHYS(mpte);
4999
5000         /*
5001          * Initialize the page table page.
5002          */
5003         pagezero(PHYS_TO_DMAP(mptepa));
5004
5005         /*
5006          * Demote the mapping.
5007          */
5008         pde_store(l3e, mptepa);
5009         ptesync();
5010 }
5011
5012 /*
5013  * pmap_remove_l3e: do the things to unmap a superpage in a process
5014  */
5015 static int
5016 pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
5017     struct spglist *free, struct rwlock **lockp)
5018 {
5019         struct md_page *pvh;
5020         pml3_entry_t oldpde;
5021         vm_offset_t eva, va;
5022         vm_page_t m, mpte;
5023
5024         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5025         KASSERT((sva & L3_PAGE_MASK) == 0,
5026             ("pmap_remove_l3e: sva is not 2mpage aligned"));
5027         oldpde = be64toh(pte_load_clear(pdq));
5028         if (oldpde & PG_W)
5029                 pmap->pm_stats.wired_count -= (L3_PAGE_SIZE / PAGE_SIZE);
5030         pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5031         if (oldpde & PG_MANAGED) {
5032                 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
5033                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
5034                 pmap_pvh_free(pvh, pmap, sva);
5035                 eva = sva + L3_PAGE_SIZE;
5036                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
5037                     va < eva; va += PAGE_SIZE, m++) {
5038                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
5039                                 vm_page_dirty(m);
5040                         if (oldpde & PG_A)
5041                                 vm_page_aflag_set(m, PGA_REFERENCED);
5042                         if (TAILQ_EMPTY(&m->md.pv_list) &&
5043                             TAILQ_EMPTY(&pvh->pv_list))
5044                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
5045                 }
5046         }
5047         if (pmap == kernel_pmap) {
5048                 pmap_remove_kernel_l3e(pmap, pdq, sva);
5049         } else {
5050                 mpte = pmap_remove_pt_page(pmap, sva);
5051                 if (mpte != NULL) {
5052                         pmap_resident_count_dec(pmap, 1);
5053                         KASSERT(mpte->ref_count == NPTEPG,
5054                             ("pmap_remove_l3e: pte page wire count error"));
5055                         mpte->ref_count = 0;
5056                         pmap_add_delayed_free_list(mpte, free, FALSE);
5057                 }
5058         }
5059         return (pmap_unuse_pt(pmap, sva, be64toh(*pmap_pml2e(pmap, sva)), free));
5060 }
5061
5062 /*
5063  * pmap_remove_pte: do the things to unmap a page in a process
5064  */
5065 static int
5066 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
5067     pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
5068 {
5069         struct md_page *pvh;
5070         pt_entry_t oldpte;
5071         vm_page_t m;
5072
5073         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5074         oldpte = be64toh(pte_load_clear(ptq));
5075         if (oldpte & RPTE_WIRED)
5076                 pmap->pm_stats.wired_count -= 1;
5077         pmap_resident_count_dec(pmap, 1);
5078         if (oldpte & RPTE_MANAGED) {
5079                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
5080                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5081                         vm_page_dirty(m);
5082                 if (oldpte & PG_A)
5083                         vm_page_aflag_set(m, PGA_REFERENCED);
5084                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5085                 pmap_pvh_free(&m->md, pmap, va);
5086                 if (TAILQ_EMPTY(&m->md.pv_list) &&
5087                     (m->flags & PG_FICTITIOUS) == 0) {
5088                         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5089                         if (TAILQ_EMPTY(&pvh->pv_list))
5090                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
5091                 }
5092         }
5093         return (pmap_unuse_pt(pmap, va, ptepde, free));
5094 }
5095
5096 /*
5097  * Remove a single page from a process address space
5098  */
5099 static bool
5100 pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *l3e,
5101     struct spglist *free)
5102 {
5103         struct rwlock *lock;
5104         pt_entry_t *pte;
5105         bool invalidate_all;
5106
5107         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5108         if ((be64toh(*l3e) & RPTE_VALID) == 0) {
5109                 return (false);
5110         }
5111         pte = pmap_l3e_to_pte(l3e, va);
5112         if ((be64toh(*pte) & RPTE_VALID) == 0) {
5113                 return (false);
5114         }
5115         lock = NULL;
5116
5117         invalidate_all = pmap_remove_pte(pmap, pte, va, be64toh(*l3e), free, &lock);
5118         if (lock != NULL)
5119                 rw_wunlock(lock);
5120         if (!invalidate_all)
5121                 pmap_invalidate_page(pmap, va);
5122         return (invalidate_all);
5123 }
5124
5125 /*
5126  * Removes the specified range of addresses from the page table page.
5127  */
5128 static bool
5129 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
5130     pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp)
5131 {
5132         pt_entry_t *pte;
5133         vm_offset_t va;
5134         bool anyvalid;
5135
5136         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5137         anyvalid = false;
5138         va = eva;
5139         for (pte = pmap_l3e_to_pte(l3e, sva); sva != eva; pte++,
5140             sva += PAGE_SIZE) {
5141                 MPASS(pte == pmap_pte(pmap, sva));
5142                 if (*pte == 0) {
5143                         if (va != eva) {
5144                                 anyvalid = true;
5145                                 va = eva;
5146                         }
5147                         continue;
5148                 }
5149                 if (va == eva)
5150                         va = sva;
5151                 if (pmap_remove_pte(pmap, pte, sva, be64toh(*l3e), free, lockp)) {
5152                         anyvalid = true;
5153                         sva += PAGE_SIZE;
5154                         break;
5155                 }
5156         }
5157         if (anyvalid)
5158                 pmap_invalidate_all(pmap);
5159         else if (va != eva)
5160                 pmap_invalidate_range(pmap, va, sva);
5161         return (anyvalid);
5162 }
5163
5164 void
5165 mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5166 {
5167         struct rwlock *lock;
5168         vm_offset_t va_next;
5169         pml1_entry_t *l1e;
5170         pml2_entry_t *l2e;
5171         pml3_entry_t ptpaddr, *l3e;
5172         struct spglist free;
5173         bool anyvalid;
5174
5175         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5176
5177         /*
5178          * Perform an unsynchronized read.  This is, however, safe.
5179          */
5180         if (pmap->pm_stats.resident_count == 0)
5181                 return;
5182
5183         anyvalid = false;
5184         SLIST_INIT(&free);
5185
5186         /* XXX something fishy here */
5187         sva = (sva + PAGE_MASK) & ~PAGE_MASK;
5188         eva = (eva + PAGE_MASK) & ~PAGE_MASK;
5189
5190         PMAP_LOCK(pmap);
5191
5192         /*
5193          * special handling of removing one page.  a very
5194          * common operation and easy to short circuit some
5195          * code.
5196          */
5197         if (sva + PAGE_SIZE == eva) {
5198                 l3e = pmap_pml3e(pmap, sva);
5199                 if (l3e && (be64toh(*l3e) & RPTE_LEAF) == 0) {
5200                         anyvalid = pmap_remove_page(pmap, sva, l3e, &free);
5201                         goto out;
5202                 }
5203         }
5204
5205         lock = NULL;
5206         for (; sva < eva; sva = va_next) {
5207                 if (pmap->pm_stats.resident_count == 0)
5208                         break;
5209                 l1e = pmap_pml1e(pmap, sva);
5210                 if (l1e == NULL || (be64toh(*l1e) & PG_V) == 0) {
5211                         va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5212                         if (va_next < sva)
5213                                 va_next = eva;
5214                         continue;
5215                 }
5216
5217                 l2e = pmap_l1e_to_l2e(l1e, sva);
5218                 if (l2e == NULL || (be64toh(*l2e) & PG_V) == 0) {
5219                         va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5220                         if (va_next < sva)
5221                                 va_next = eva;
5222                         continue;
5223                 }
5224
5225                 /*
5226                  * Calculate index for next page table.
5227                  */
5228                 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5229                 if (va_next < sva)
5230                         va_next = eva;
5231
5232                 l3e = pmap_l2e_to_l3e(l2e, sva);
5233                 ptpaddr = be64toh(*l3e);
5234
5235                 /*
5236                  * Weed out invalid mappings.
5237                  */
5238                 if (ptpaddr == 0)
5239                         continue;
5240
5241                 /*
5242                  * Check for large page.
5243                  */
5244                 if ((ptpaddr & RPTE_LEAF) != 0) {
5245                         /*
5246                          * Are we removing the entire large page?  If not,
5247                          * demote the mapping and fall through.
5248                          */
5249                         if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5250                                 pmap_remove_l3e(pmap, l3e, sva, &free, &lock);
5251                                 anyvalid = true;
5252                                 continue;
5253                         } else if (!pmap_demote_l3e_locked(pmap, l3e, sva,
5254                             &lock)) {
5255                                 /* The large page mapping was destroyed. */
5256                                 continue;
5257                         } else
5258                                 ptpaddr = be64toh(*l3e);
5259                 }
5260
5261                 /*
5262                  * Limit our scan to either the end of the va represented
5263                  * by the current page table page, or to the end of the
5264                  * range being removed.
5265                  */
5266                 if (va_next > eva)
5267                         va_next = eva;
5268
5269                 if (pmap_remove_ptes(pmap, sva, va_next, l3e, &free, &lock))
5270                         anyvalid = true;
5271         }
5272         if (lock != NULL)
5273                 rw_wunlock(lock);
5274 out:
5275         if (anyvalid)
5276                 pmap_invalidate_all(pmap);
5277         PMAP_UNLOCK(pmap);
5278         vm_page_free_pages_toq(&free, true);
5279 }
5280
5281 void
5282 mmu_radix_remove_all(vm_page_t m)
5283 {
5284         struct md_page *pvh;
5285         pv_entry_t pv;
5286         pmap_t pmap;
5287         struct rwlock *lock;
5288         pt_entry_t *pte, tpte;
5289         pml3_entry_t *l3e;
5290         vm_offset_t va;
5291         struct spglist free;
5292         int pvh_gen, md_gen;
5293
5294         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5295         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5296             ("pmap_remove_all: page %p is not managed", m));
5297         SLIST_INIT(&free);
5298         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5299         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5300             pa_to_pvh(VM_PAGE_TO_PHYS(m));
5301 retry:
5302         rw_wlock(lock);
5303         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
5304                 pmap = PV_PMAP(pv);
5305                 if (!PMAP_TRYLOCK(pmap)) {
5306                         pvh_gen = pvh->pv_gen;
5307                         rw_wunlock(lock);
5308                         PMAP_LOCK(pmap);
5309                         rw_wlock(lock);
5310                         if (pvh_gen != pvh->pv_gen) {
5311                                 rw_wunlock(lock);
5312                                 PMAP_UNLOCK(pmap);
5313                                 goto retry;
5314                         }
5315                 }
5316                 va = pv->pv_va;
5317                 l3e = pmap_pml3e(pmap, va);
5318                 (void)pmap_demote_l3e_locked(pmap, l3e, va, &lock);
5319                 PMAP_UNLOCK(pmap);
5320         }
5321         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
5322                 pmap = PV_PMAP(pv);
5323                 if (!PMAP_TRYLOCK(pmap)) {
5324                         pvh_gen = pvh->pv_gen;
5325                         md_gen = m->md.pv_gen;
5326                         rw_wunlock(lock);
5327                         PMAP_LOCK(pmap);
5328                         rw_wlock(lock);
5329                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5330                                 rw_wunlock(lock);
5331                                 PMAP_UNLOCK(pmap);
5332                                 goto retry;
5333                         }
5334                 }
5335                 pmap_resident_count_dec(pmap, 1);
5336                 l3e = pmap_pml3e(pmap, pv->pv_va);
5337                 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_remove_all: found"
5338                     " a 2mpage in page %p's pv list", m));
5339                 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5340                 tpte = be64toh(pte_load_clear(pte));
5341                 if (tpte & PG_W)
5342                         pmap->pm_stats.wired_count--;
5343                 if (tpte & PG_A)
5344                         vm_page_aflag_set(m, PGA_REFERENCED);
5345
5346                 /*
5347                  * Update the vm_page_t clean and reference bits.
5348                  */
5349                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5350                         vm_page_dirty(m);
5351                 pmap_unuse_pt(pmap, pv->pv_va, be64toh(*l3e), &free);
5352                 pmap_invalidate_page(pmap, pv->pv_va);
5353                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5354                 m->md.pv_gen++;
5355                 free_pv_entry(pmap, pv);
5356                 PMAP_UNLOCK(pmap);
5357         }
5358         vm_page_aflag_clear(m, PGA_WRITEABLE);
5359         rw_wunlock(lock);
5360         vm_page_free_pages_toq(&free, true);
5361 }
5362
5363 /*
5364  * Destroy all managed, non-wired mappings in the given user-space
5365  * pmap.  This pmap cannot be active on any processor besides the
5366  * caller.
5367  *
5368  * This function cannot be applied to the kernel pmap.  Moreover, it
5369  * is not intended for general use.  It is only to be used during
5370  * process termination.  Consequently, it can be implemented in ways
5371  * that make it faster than pmap_remove().  First, it can more quickly
5372  * destroy mappings by iterating over the pmap's collection of PV
5373  * entries, rather than searching the page table.  Second, it doesn't
5374  * have to test and clear the page table entries atomically, because
5375  * no processor is currently accessing the user address space.  In
5376  * particular, a page table entry's dirty bit won't change state once
5377  * this function starts.
5378  *
5379  * Although this function destroys all of the pmap's managed,
5380  * non-wired mappings, it can delay and batch the invalidation of TLB
5381  * entries without calling pmap_delayed_invl_started() and
5382  * pmap_delayed_invl_finished().  Because the pmap is not active on
5383  * any other processor, none of these TLB entries will ever be used
5384  * before their eventual invalidation.  Consequently, there is no need
5385  * for either pmap_remove_all() or pmap_remove_write() to wait for
5386  * that eventual TLB invalidation.
5387  */
5388
5389 void
5390 mmu_radix_remove_pages(pmap_t pmap)
5391 {
5392
5393         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
5394         pml3_entry_t ptel3e;
5395         pt_entry_t *pte, tpte;
5396         struct spglist free;
5397         vm_page_t m, mpte, mt;
5398         pv_entry_t pv;
5399         struct md_page *pvh;
5400         struct pv_chunk *pc, *npc;
5401         struct rwlock *lock;
5402         int64_t bit;
5403         uint64_t inuse, bitmask;
5404         int allfree, field, idx;
5405 #ifdef PV_STATS
5406         int freed;
5407 #endif
5408         boolean_t superpage;
5409         vm_paddr_t pa;
5410
5411         /*
5412          * Assert that the given pmap is only active on the current
5413          * CPU.  Unfortunately, we cannot block another CPU from
5414          * activating the pmap while this function is executing.
5415          */
5416         KASSERT(pmap->pm_pid == mfspr(SPR_PID),
5417             ("non-current asid %lu - expected %lu", pmap->pm_pid,
5418             mfspr(SPR_PID)));
5419
5420         lock = NULL;
5421
5422         SLIST_INIT(&free);
5423         PMAP_LOCK(pmap);
5424         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
5425                 allfree = 1;
5426 #ifdef PV_STATS
5427                 freed = 0;
5428 #endif
5429                 for (field = 0; field < _NPCM; field++) {
5430                         inuse = ~pc->pc_map[field] & pc_freemask[field];
5431                         while (inuse != 0) {
5432                                 bit = cnttzd(inuse);
5433                                 bitmask = 1UL << bit;
5434                                 idx = field * 64 + bit;
5435                                 pv = &pc->pc_pventry[idx];
5436                                 inuse &= ~bitmask;
5437
5438                                 pte = pmap_pml2e(pmap, pv->pv_va);
5439                                 ptel3e = be64toh(*pte);
5440                                 pte = pmap_l2e_to_l3e(pte, pv->pv_va);
5441                                 tpte = be64toh(*pte);
5442                                 if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) {
5443                                         superpage = FALSE;
5444                                         ptel3e = tpte;
5445                                         pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
5446                                             PG_FRAME);
5447                                         pte = &pte[pmap_pte_index(pv->pv_va)];
5448                                         tpte = be64toh(*pte);
5449                                 } else {
5450                                         /*
5451                                          * Keep track whether 'tpte' is a
5452                                          * superpage explicitly instead of
5453                                          * relying on RPTE_LEAF being set.
5454                                          *
5455                                          * This is because RPTE_LEAF is numerically
5456                                          * identical to PG_PTE_PAT and thus a
5457                                          * regular page could be mistaken for
5458                                          * a superpage.
5459                                          */
5460                                         superpage = TRUE;
5461                                 }
5462
5463                                 if ((tpte & PG_V) == 0) {
5464                                         panic("bad pte va %lx pte %lx",
5465                                             pv->pv_va, tpte);
5466                                 }
5467
5468 /*
5469  * We cannot remove wired pages from a process' mapping at this time
5470  */
5471                                 if (tpte & PG_W) {
5472                                         allfree = 0;
5473                                         continue;
5474                                 }
5475
5476                                 if (superpage)
5477                                         pa = tpte & PG_PS_FRAME;
5478                                 else
5479                                         pa = tpte & PG_FRAME;
5480
5481                                 m = PHYS_TO_VM_PAGE(pa);
5482                                 KASSERT(m->phys_addr == pa,
5483                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
5484                                     m, (uintmax_t)m->phys_addr,
5485                                     (uintmax_t)tpte));
5486
5487                                 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
5488                                     m < &vm_page_array[vm_page_array_size],
5489                                     ("pmap_remove_pages: bad tpte %#jx",
5490                                     (uintmax_t)tpte));
5491
5492                                 pte_clear(pte);
5493
5494                                 /*
5495                                  * Update the vm_page_t clean/reference bits.
5496                                  */
5497                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5498                                         if (superpage) {
5499                                                 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5500                                                         vm_page_dirty(mt);
5501                                         } else
5502                                                 vm_page_dirty(m);
5503                                 }
5504
5505                                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5506
5507                                 /* Mark free */
5508                                 pc->pc_map[field] |= bitmask;
5509                                 if (superpage) {
5510                                         pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5511                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
5512                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
5513                                         pvh->pv_gen++;
5514                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
5515                                                 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5516                                                         if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
5517                                                             TAILQ_EMPTY(&mt->md.pv_list))
5518                                                                 vm_page_aflag_clear(mt, PGA_WRITEABLE);
5519                                         }
5520                                         mpte = pmap_remove_pt_page(pmap, pv->pv_va);
5521                                         if (mpte != NULL) {
5522                                                 pmap_resident_count_dec(pmap, 1);
5523                                                 KASSERT(mpte->ref_count == NPTEPG,
5524                                                     ("pmap_remove_pages: pte page wire count error"));
5525                                                 mpte->ref_count = 0;
5526                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
5527                                         }
5528                                 } else {
5529                                         pmap_resident_count_dec(pmap, 1);
5530 #ifdef VERBOSE_PV
5531                                         printf("freeing pv (%p, %p)\n",
5532                                                    pmap, pv);
5533 #endif
5534                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5535                                         m->md.pv_gen++;
5536                                         if ((m->a.flags & PGA_WRITEABLE) != 0 &&
5537                                             TAILQ_EMPTY(&m->md.pv_list) &&
5538                                             (m->flags & PG_FICTITIOUS) == 0) {
5539                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5540                                                 if (TAILQ_EMPTY(&pvh->pv_list))
5541                                                         vm_page_aflag_clear(m, PGA_WRITEABLE);
5542                                         }
5543                                 }
5544                                 pmap_unuse_pt(pmap, pv->pv_va, ptel3e, &free);
5545 #ifdef PV_STATS
5546                                 freed++;
5547 #endif
5548                         }
5549                 }
5550                 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5551                 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5552                 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5553                 if (allfree) {
5554                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5555                         free_pv_chunk(pc);
5556                 }
5557         }
5558         if (lock != NULL)
5559                 rw_wunlock(lock);
5560         pmap_invalidate_all(pmap);
5561         PMAP_UNLOCK(pmap);
5562         vm_page_free_pages_toq(&free, true);
5563 }
5564
5565 void
5566 mmu_radix_remove_write(vm_page_t m)
5567 {
5568         struct md_page *pvh;
5569         pmap_t pmap;
5570         struct rwlock *lock;
5571         pv_entry_t next_pv, pv;
5572         pml3_entry_t *l3e;
5573         pt_entry_t oldpte, *pte;
5574         int pvh_gen, md_gen;
5575
5576         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5577         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5578             ("pmap_remove_write: page %p is not managed", m));
5579         vm_page_assert_busied(m);
5580
5581         if (!pmap_page_is_write_mapped(m))
5582                 return;
5583         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5584         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5585             pa_to_pvh(VM_PAGE_TO_PHYS(m));
5586 retry_pv_loop:
5587         rw_wlock(lock);
5588         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
5589                 pmap = PV_PMAP(pv);
5590                 if (!PMAP_TRYLOCK(pmap)) {
5591                         pvh_gen = pvh->pv_gen;
5592                         rw_wunlock(lock);
5593                         PMAP_LOCK(pmap);
5594                         rw_wlock(lock);
5595                         if (pvh_gen != pvh->pv_gen) {
5596                                 PMAP_UNLOCK(pmap);
5597                                 rw_wunlock(lock);
5598                                 goto retry_pv_loop;
5599                         }
5600                 }
5601                 l3e = pmap_pml3e(pmap, pv->pv_va);
5602                 if ((be64toh(*l3e) & PG_RW) != 0)
5603                         (void)pmap_demote_l3e_locked(pmap, l3e, pv->pv_va, &lock);
5604                 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
5605                     ("inconsistent pv lock %p %p for page %p",
5606                     lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
5607                 PMAP_UNLOCK(pmap);
5608         }
5609         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
5610                 pmap = PV_PMAP(pv);
5611                 if (!PMAP_TRYLOCK(pmap)) {
5612                         pvh_gen = pvh->pv_gen;
5613                         md_gen = m->md.pv_gen;
5614                         rw_wunlock(lock);
5615                         PMAP_LOCK(pmap);
5616                         rw_wlock(lock);
5617                         if (pvh_gen != pvh->pv_gen ||
5618                             md_gen != m->md.pv_gen) {
5619                                 PMAP_UNLOCK(pmap);
5620                                 rw_wunlock(lock);
5621                                 goto retry_pv_loop;
5622                         }
5623                 }
5624                 l3e = pmap_pml3e(pmap, pv->pv_va);
5625                 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0,
5626                     ("pmap_remove_write: found a 2mpage in page %p's pv list",
5627                     m));
5628                 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5629 retry:
5630                 oldpte = be64toh(*pte);
5631                 if (oldpte & PG_RW) {
5632                         if (!atomic_cmpset_long(pte, htobe64(oldpte),
5633                             htobe64((oldpte | RPTE_EAA_R) & ~(PG_RW | PG_M))))
5634                                 goto retry;
5635                         if ((oldpte & PG_M) != 0)
5636                                 vm_page_dirty(m);
5637                         pmap_invalidate_page(pmap, pv->pv_va);
5638                 }
5639                 PMAP_UNLOCK(pmap);
5640         }
5641         rw_wunlock(lock);
5642         vm_page_aflag_clear(m, PGA_WRITEABLE);
5643 }
5644
5645 /*
5646  *      Clear the wired attribute from the mappings for the specified range of
5647  *      addresses in the given pmap.  Every valid mapping within that range
5648  *      must have the wired attribute set.  In contrast, invalid mappings
5649  *      cannot have the wired attribute set, so they are ignored.
5650  *
5651  *      The wired attribute of the page table entry is not a hardware
5652  *      feature, so there is no need to invalidate any TLB entries.
5653  *      Since pmap_demote_l3e() for the wired entry must never fail,
5654  *      pmap_delayed_invl_started()/finished() calls around the
5655  *      function are not needed.
5656  */
5657 void
5658 mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5659 {
5660         vm_offset_t va_next;
5661         pml1_entry_t *l1e;
5662         pml2_entry_t *l2e;
5663         pml3_entry_t *l3e;
5664         pt_entry_t *pte;
5665
5666         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5667         PMAP_LOCK(pmap);
5668         for (; sva < eva; sva = va_next) {
5669                 l1e = pmap_pml1e(pmap, sva);
5670                 if ((be64toh(*l1e) & PG_V) == 0) {
5671                         va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5672                         if (va_next < sva)
5673                                 va_next = eva;
5674                         continue;
5675                 }
5676                 l2e = pmap_l1e_to_l2e(l1e, sva);
5677                 if ((be64toh(*l2e) & PG_V) == 0) {
5678                         va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5679                         if (va_next < sva)
5680                                 va_next = eva;
5681                         continue;
5682                 }
5683                 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5684                 if (va_next < sva)
5685                         va_next = eva;
5686                 l3e = pmap_l2e_to_l3e(l2e, sva);
5687                 if ((be64toh(*l3e) & PG_V) == 0)
5688                         continue;
5689                 if ((be64toh(*l3e) & RPTE_LEAF) != 0) {
5690                         if ((be64toh(*l3e) & PG_W) == 0)
5691                                 panic("pmap_unwire: pde %#jx is missing PG_W",
5692                                     (uintmax_t)(be64toh(*l3e)));
5693
5694                         /*
5695                          * Are we unwiring the entire large page?  If not,
5696                          * demote the mapping and fall through.
5697                          */
5698                         if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5699                                 atomic_clear_long(l3e, htobe64(PG_W));
5700                                 pmap->pm_stats.wired_count -= L3_PAGE_SIZE /
5701                                     PAGE_SIZE;
5702                                 continue;
5703                         } else if (!pmap_demote_l3e(pmap, l3e, sva))
5704                                 panic("pmap_unwire: demotion failed");
5705                 }
5706                 if (va_next > eva)
5707                         va_next = eva;
5708                 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
5709                     sva += PAGE_SIZE) {
5710                         MPASS(pte == pmap_pte(pmap, sva));
5711                         if ((be64toh(*pte) & PG_V) == 0)
5712                                 continue;
5713                         if ((be64toh(*pte) & PG_W) == 0)
5714                                 panic("pmap_unwire: pte %#jx is missing PG_W",
5715                                     (uintmax_t)(be64toh(*pte)));
5716
5717                         /*
5718                          * PG_W must be cleared atomically.  Although the pmap
5719                          * lock synchronizes access to PG_W, another processor
5720                          * could be setting PG_M and/or PG_A concurrently.
5721                          */
5722                         atomic_clear_long(pte, htobe64(PG_W));
5723                         pmap->pm_stats.wired_count--;
5724                 }
5725         }
5726         PMAP_UNLOCK(pmap);
5727 }
5728
5729 void
5730 mmu_radix_zero_page(vm_page_t m)
5731 {
5732         vm_offset_t addr;
5733
5734         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5735         addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5736         pagezero(addr);
5737 }
5738
5739 void
5740 mmu_radix_zero_page_area(vm_page_t m, int off, int size)
5741 {
5742         caddr_t addr;
5743
5744         CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
5745         MPASS(off + size <= PAGE_SIZE);
5746         addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5747         memset(addr + off, 0, size);
5748 }
5749
5750 static int
5751 mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5752 {
5753         pml3_entry_t *l3ep;
5754         pt_entry_t pte;
5755         vm_paddr_t pa;
5756         int val;
5757
5758         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
5759         PMAP_LOCK(pmap);
5760
5761         l3ep = pmap_pml3e(pmap, addr);
5762         if (l3ep != NULL && (be64toh(*l3ep) & PG_V)) {
5763                 if (be64toh(*l3ep) & RPTE_LEAF) {
5764                         pte = be64toh(*l3ep);
5765                         /* Compute the physical address of the 4KB page. */
5766                         pa = ((be64toh(*l3ep) & PG_PS_FRAME) | (addr & L3_PAGE_MASK)) &
5767                             PG_FRAME;
5768                         val = MINCORE_PSIND(1);
5769                 } else {
5770                         /* Native endian PTE, do not pass to functions */
5771                         pte = be64toh(*pmap_l3e_to_pte(l3ep, addr));
5772                         pa = pte & PG_FRAME;
5773                         val = 0;
5774                 }
5775         } else {
5776                 pte = 0;
5777                 pa = 0;
5778                 val = 0;
5779         }
5780         if ((pte & PG_V) != 0) {
5781                 val |= MINCORE_INCORE;
5782                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5783                         val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5784                 if ((pte & PG_A) != 0)
5785                         val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5786         }
5787         if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5788             (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
5789             (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
5790                 *locked_pa = pa;
5791         }
5792         PMAP_UNLOCK(pmap);
5793         return (val);
5794 }
5795
5796 void
5797 mmu_radix_activate(struct thread *td)
5798 {
5799         pmap_t pmap;
5800         uint32_t curpid;
5801
5802         CTR2(KTR_PMAP, "%s(%p)", __func__, td);
5803         critical_enter();
5804         pmap = vmspace_pmap(td->td_proc->p_vmspace);
5805         curpid = mfspr(SPR_PID);
5806         if (pmap->pm_pid > isa3_base_pid &&
5807                 curpid != pmap->pm_pid) {
5808                 mmu_radix_pid_set(pmap);
5809         }
5810         critical_exit();
5811 }
5812
5813 /*
5814  *      Increase the starting virtual address of the given mapping if a
5815  *      different alignment might result in more superpage mappings.
5816  */
5817 void
5818 mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset,
5819     vm_offset_t *addr, vm_size_t size)
5820 {
5821
5822         CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
5823             size);
5824         vm_offset_t superpage_offset;
5825
5826         if (size < L3_PAGE_SIZE)
5827                 return;
5828         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5829                 offset += ptoa(object->pg_color);
5830         superpage_offset = offset & L3_PAGE_MASK;
5831         if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE ||
5832             (*addr & L3_PAGE_MASK) == superpage_offset)
5833                 return;
5834         if ((*addr & L3_PAGE_MASK) < superpage_offset)
5835                 *addr = (*addr & ~L3_PAGE_MASK) + superpage_offset;
5836         else
5837                 *addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset;
5838 }
5839
5840 static void *
5841 mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
5842 {
5843         vm_offset_t va, tmpva, ppa, offset;
5844
5845         ppa = trunc_page(pa);
5846         offset = pa & PAGE_MASK;
5847         size = roundup2(offset + size, PAGE_SIZE);
5848         if (pa < powerpc_ptob(Maxmem))
5849                 panic("bad pa: %#lx less than Maxmem %#lx\n",
5850                           pa, powerpc_ptob(Maxmem));
5851         va = kva_alloc(size);
5852         if (bootverbose)
5853                 printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr);
5854         KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr));
5855
5856         if (!va)
5857                 panic("%s: Couldn't alloc kernel virtual memory", __func__);
5858
5859         for (tmpva = va; size > 0;) {
5860                 mmu_radix_kenter_attr(tmpva, ppa, attr);
5861                 size -= PAGE_SIZE;
5862                 tmpva += PAGE_SIZE;
5863                 ppa += PAGE_SIZE;
5864         }
5865         ptesync();
5866
5867         return ((void *)(va + offset));
5868 }
5869
5870 static void *
5871 mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size)
5872 {
5873
5874         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
5875
5876         return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
5877 }
5878
5879 void
5880 mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5881 {
5882
5883         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
5884         m->md.mdpg_cache_attrs = ma;
5885
5886         /*
5887          * If "m" is a normal page, update its direct mapping.  This update
5888          * can be relied upon to perform any cache operations that are
5889          * required for data coherence.
5890          */
5891         if ((m->flags & PG_FICTITIOUS) == 0 &&
5892             mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
5893             PAGE_SIZE, m->md.mdpg_cache_attrs))
5894                 panic("memory attribute change on the direct map failed");
5895 }
5896
5897 static void
5898 mmu_radix_unmapdev(vm_offset_t va, vm_size_t size)
5899 {
5900         vm_offset_t offset;
5901
5902         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
5903         /* If we gave a direct map region in pmap_mapdev, do nothing */
5904         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
5905                 return;
5906
5907         offset = va & PAGE_MASK;
5908         size = round_page(offset + size);
5909         va = trunc_page(va);
5910
5911         if (pmap_initialized) {
5912                 mmu_radix_qremove(va, atop(size));
5913                 kva_free(va, size);
5914         }
5915 }
5916
5917 void
5918 mmu_radix_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
5919 {
5920         vm_paddr_t pa = 0;
5921         int sync_sz;
5922
5923         while (sz > 0) {
5924                 pa = pmap_extract(pm, va);
5925                 sync_sz = PAGE_SIZE - (va & PAGE_MASK);
5926                 sync_sz = min(sync_sz, sz);
5927                 if (pa != 0) {
5928                         pa += (va & PAGE_MASK);
5929                         __syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
5930                 }
5931                 va += sync_sz;
5932                 sz -= sync_sz;
5933         }
5934 }
5935
5936 static __inline void
5937 pmap_pte_attr(pt_entry_t *pte, uint64_t cache_bits, uint64_t mask)
5938 {
5939         uint64_t opte, npte;
5940
5941         /*
5942          * The cache mode bits are all in the low 32-bits of the
5943          * PTE, so we can just spin on updating the low 32-bits.
5944          */
5945         do {
5946                 opte = be64toh(*pte);
5947                 npte = opte & ~mask;
5948                 npte |= cache_bits;
5949         } while (npte != opte && !atomic_cmpset_long(pte, htobe64(opte), htobe64(npte)));
5950 }
5951
5952 /*
5953  * Tries to demote a 1GB page mapping.
5954  */
5955 static boolean_t
5956 pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
5957 {
5958         pml2_entry_t oldpdpe;
5959         pml3_entry_t *firstpde, newpde, *pde;
5960         vm_paddr_t pdpgpa;
5961         vm_page_t pdpg;
5962
5963         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5964         oldpdpe = be64toh(*l2e);
5965         KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
5966             ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
5967         pdpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
5968         if (pdpg == NULL) {
5969                 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
5970                     " in pmap %p", va, pmap);
5971                 return (FALSE);
5972         }
5973         pdpg->pindex = va >> L2_PAGE_SIZE_SHIFT;
5974         pdpgpa = VM_PAGE_TO_PHYS(pdpg);
5975         firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa);
5976         KASSERT((oldpdpe & PG_A) != 0,
5977             ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
5978         KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
5979             ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
5980         newpde = oldpdpe;
5981
5982         /*
5983          * Initialize the page directory page.
5984          */
5985         for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
5986                 *pde = htobe64(newpde);
5987                 newpde += L3_PAGE_SIZE;
5988         }
5989
5990         /*
5991          * Demote the mapping.
5992          */
5993         pde_store(l2e, pdpgpa);
5994
5995         /*
5996          * Flush PWC --- XXX revisit
5997          */
5998         pmap_invalidate_all(pmap);
5999
6000         pmap_l2e_demotions++;
6001         CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
6002             " in pmap %p", va, pmap);
6003         return (TRUE);
6004 }
6005
6006 vm_paddr_t
6007 mmu_radix_kextract(vm_offset_t va)
6008 {
6009         pml3_entry_t l3e;
6010         vm_paddr_t pa;
6011
6012         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
6013         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
6014                 pa = DMAP_TO_PHYS(va);
6015         } else {
6016                 /* Big-endian PTE on stack */
6017                 l3e = *pmap_pml3e(kernel_pmap, va);
6018                 if (be64toh(l3e) & RPTE_LEAF) {
6019                         pa = (be64toh(l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK);
6020                         pa |= (va & L3_PAGE_MASK);
6021                 } else {
6022                         /*
6023                          * Beware of a concurrent promotion that changes the
6024                          * PDE at this point!  For example, vtopte() must not
6025                          * be used to access the PTE because it would use the
6026                          * new PDE.  It is, however, safe to use the old PDE
6027                          * because the page table page is preserved by the
6028                          * promotion.
6029                          */
6030                         pa = be64toh(*pmap_l3e_to_pte(&l3e, va));
6031                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
6032                         pa |= (va & PAGE_MASK);
6033                 }
6034         }
6035         return (pa);
6036 }
6037
6038 static pt_entry_t
6039 mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
6040 {
6041
6042         if (ma != VM_MEMATTR_DEFAULT) {
6043                 return pmap_cache_bits(ma);
6044         }
6045
6046         /*
6047          * Assume the page is cache inhibited and access is guarded unless
6048          * it's in our available memory array.
6049          */
6050         for (int i = 0; i < pregions_sz; i++) {
6051                 if ((pa >= pregions[i].mr_start) &&
6052                     (pa < (pregions[i].mr_start + pregions[i].mr_size)))
6053                         return (RPTE_ATTR_MEM);
6054         }
6055         return (RPTE_ATTR_GUARDEDIO);
6056 }
6057
6058 static void
6059 mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
6060 {
6061         pt_entry_t *pte, pteval;
6062         uint64_t cache_bits;
6063
6064         pte = kvtopte(va);
6065         MPASS(pte != NULL);
6066         pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
6067         cache_bits = mmu_radix_calc_wimg(pa, ma);
6068         pte_store(pte, pteval | cache_bits);
6069 }
6070
6071 void
6072 mmu_radix_kremove(vm_offset_t va)
6073 {
6074         pt_entry_t *pte;
6075
6076         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
6077
6078         pte = kvtopte(va);
6079         pte_clear(pte);
6080 }
6081
6082 int
6083 mmu_radix_decode_kernel_ptr(vm_offset_t addr,
6084     int *is_user, vm_offset_t *decoded)
6085 {
6086
6087         CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
6088         *decoded = addr;
6089         *is_user = (addr < VM_MAXUSER_ADDRESS);
6090         return (0);
6091 }
6092
6093 static boolean_t
6094 mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
6095 {
6096
6097         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
6098         return (mem_valid(pa, size));
6099 }
6100
6101 static void
6102 mmu_radix_scan_init(void)
6103 {
6104
6105         CTR1(KTR_PMAP, "%s()", __func__);
6106         UNIMPLEMENTED();
6107 }
6108
6109 static void
6110 mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz,
6111         void **va)
6112 {
6113         CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
6114         UNIMPLEMENTED();
6115 }
6116
6117 vm_offset_t
6118 mmu_radix_quick_enter_page(vm_page_t m)
6119 {
6120         vm_paddr_t paddr;
6121
6122         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
6123         paddr = VM_PAGE_TO_PHYS(m);
6124         return (PHYS_TO_DMAP(paddr));
6125 }
6126
6127 void
6128 mmu_radix_quick_remove_page(vm_offset_t addr __unused)
6129 {
6130         /* no work to do here */
6131         CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
6132 }
6133
6134 static void
6135 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
6136 {
6137         cpu_flush_dcache((void *)sva, eva - sva);
6138 }
6139
6140 int
6141 mmu_radix_change_attr(vm_offset_t va, vm_size_t size,
6142     vm_memattr_t mode)
6143 {
6144         int error;
6145
6146         CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode);
6147         PMAP_LOCK(kernel_pmap);
6148         error = pmap_change_attr_locked(va, size, mode, true);
6149         PMAP_UNLOCK(kernel_pmap);
6150         return (error);
6151 }
6152
6153 static int
6154 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
6155 {
6156         vm_offset_t base, offset, tmpva;
6157         vm_paddr_t pa_start, pa_end, pa_end1;
6158         pml2_entry_t *l2e;
6159         pml3_entry_t *l3e;
6160         pt_entry_t *pte;
6161         int cache_bits, error;
6162         boolean_t changed;
6163
6164         PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
6165         base = trunc_page(va);
6166         offset = va & PAGE_MASK;
6167         size = round_page(offset + size);
6168
6169         /*
6170          * Only supported on kernel virtual addresses, including the direct
6171          * map but excluding the recursive map.
6172          */
6173         if (base < DMAP_MIN_ADDRESS)
6174                 return (EINVAL);
6175
6176         cache_bits = pmap_cache_bits(mode);
6177         changed = FALSE;
6178
6179         /*
6180          * Pages that aren't mapped aren't supported.  Also break down 2MB pages
6181          * into 4KB pages if required.
6182          */
6183         for (tmpva = base; tmpva < base + size; ) {
6184                 l2e = pmap_pml2e(kernel_pmap, tmpva);
6185                 if (l2e == NULL || *l2e == 0)
6186                         return (EINVAL);
6187                 if (be64toh(*l2e) & RPTE_LEAF) {
6188                         /*
6189                          * If the current 1GB page already has the required
6190                          * memory type, then we need not demote this page. Just
6191                          * increment tmpva to the next 1GB page frame.
6192                          */
6193                         if ((be64toh(*l2e) & RPTE_ATTR_MASK) == cache_bits) {
6194                                 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6195                                 continue;
6196                         }
6197
6198                         /*
6199                          * If the current offset aligns with a 1GB page frame
6200                          * and there is at least 1GB left within the range, then
6201                          * we need not break down this page into 2MB pages.
6202                          */
6203                         if ((tmpva & L2_PAGE_MASK) == 0 &&
6204                             tmpva + L2_PAGE_MASK < base + size) {
6205                                 tmpva += L2_PAGE_MASK;
6206                                 continue;
6207                         }
6208                         if (!pmap_demote_l2e(kernel_pmap, l2e, tmpva))
6209                                 return (ENOMEM);
6210                 }
6211                 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6212                 KASSERT(l3e != NULL, ("no l3e entry for %#lx in %p\n",
6213                     tmpva, l2e));
6214                 if (*l3e == 0)
6215                         return (EINVAL);
6216                 if (be64toh(*l3e) & RPTE_LEAF) {
6217                         /*
6218                          * If the current 2MB page already has the required
6219                          * memory type, then we need not demote this page. Just
6220                          * increment tmpva to the next 2MB page frame.
6221                          */
6222                         if ((be64toh(*l3e) & RPTE_ATTR_MASK) == cache_bits) {
6223                                 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6224                                 continue;
6225                         }
6226
6227                         /*
6228                          * If the current offset aligns with a 2MB page frame
6229                          * and there is at least 2MB left within the range, then
6230                          * we need not break down this page into 4KB pages.
6231                          */
6232                         if ((tmpva & L3_PAGE_MASK) == 0 &&
6233                             tmpva + L3_PAGE_MASK < base + size) {
6234                                 tmpva += L3_PAGE_SIZE;
6235                                 continue;
6236                         }
6237                         if (!pmap_demote_l3e(kernel_pmap, l3e, tmpva))
6238                                 return (ENOMEM);
6239                 }
6240                 pte = pmap_l3e_to_pte(l3e, tmpva);
6241                 if (*pte == 0)
6242                         return (EINVAL);
6243                 tmpva += PAGE_SIZE;
6244         }
6245         error = 0;
6246
6247         /*
6248          * Ok, all the pages exist, so run through them updating their
6249          * cache mode if required.
6250          */
6251         pa_start = pa_end = 0;
6252         for (tmpva = base; tmpva < base + size; ) {
6253                 l2e = pmap_pml2e(kernel_pmap, tmpva);
6254                 if (be64toh(*l2e) & RPTE_LEAF) {
6255                         if ((be64toh(*l2e) & RPTE_ATTR_MASK) != cache_bits) {
6256                                 pmap_pte_attr(l2e, cache_bits,
6257                                     RPTE_ATTR_MASK);
6258                                 changed = TRUE;
6259                         }
6260                         if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6261                             (*l2e & PG_PS_FRAME) < dmaplimit) {
6262                                 if (pa_start == pa_end) {
6263                                         /* Start physical address run. */
6264                                         pa_start = be64toh(*l2e) & PG_PS_FRAME;
6265                                         pa_end = pa_start + L2_PAGE_SIZE;
6266                                 } else if (pa_end == (be64toh(*l2e) & PG_PS_FRAME))
6267                                         pa_end += L2_PAGE_SIZE;
6268                                 else {
6269                                         /* Run ended, update direct map. */
6270                                         error = pmap_change_attr_locked(
6271                                             PHYS_TO_DMAP(pa_start),
6272                                             pa_end - pa_start, mode, flush);
6273                                         if (error != 0)
6274                                                 break;
6275                                         /* Start physical address run. */
6276                                         pa_start = be64toh(*l2e) & PG_PS_FRAME;
6277                                         pa_end = pa_start + L2_PAGE_SIZE;
6278                                 }
6279                         }
6280                         tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6281                         continue;
6282                 }
6283                 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6284                 if (be64toh(*l3e) & RPTE_LEAF) {
6285                         if ((be64toh(*l3e) & RPTE_ATTR_MASK) != cache_bits) {
6286                                 pmap_pte_attr(l3e, cache_bits,
6287                                     RPTE_ATTR_MASK);
6288                                 changed = TRUE;
6289                         }
6290                         if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6291                             (be64toh(*l3e) & PG_PS_FRAME) < dmaplimit) {
6292                                 if (pa_start == pa_end) {
6293                                         /* Start physical address run. */
6294                                         pa_start = be64toh(*l3e) & PG_PS_FRAME;
6295                                         pa_end = pa_start + L3_PAGE_SIZE;
6296                                 } else if (pa_end == (be64toh(*l3e) & PG_PS_FRAME))
6297                                         pa_end += L3_PAGE_SIZE;
6298                                 else {
6299                                         /* Run ended, update direct map. */
6300                                         error = pmap_change_attr_locked(
6301                                             PHYS_TO_DMAP(pa_start),
6302                                             pa_end - pa_start, mode, flush);
6303                                         if (error != 0)
6304                                                 break;
6305                                         /* Start physical address run. */
6306                                         pa_start = be64toh(*l3e) & PG_PS_FRAME;
6307                                         pa_end = pa_start + L3_PAGE_SIZE;
6308                                 }
6309                         }
6310                         tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6311                 } else {
6312                         pte = pmap_l3e_to_pte(l3e, tmpva);
6313                         if ((be64toh(*pte) & RPTE_ATTR_MASK) != cache_bits) {
6314                                 pmap_pte_attr(pte, cache_bits,
6315                                     RPTE_ATTR_MASK);
6316                                 changed = TRUE;
6317                         }
6318                         if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6319                             (be64toh(*pte) & PG_FRAME) < dmaplimit) {
6320                                 if (pa_start == pa_end) {
6321                                         /* Start physical address run. */
6322                                         pa_start = be64toh(*pte) & PG_FRAME;
6323                                         pa_end = pa_start + PAGE_SIZE;
6324                                 } else if (pa_end == (be64toh(*pte) & PG_FRAME))
6325                                         pa_end += PAGE_SIZE;
6326                                 else {
6327                                         /* Run ended, update direct map. */
6328                                         error = pmap_change_attr_locked(
6329                                             PHYS_TO_DMAP(pa_start),
6330                                             pa_end - pa_start, mode, flush);
6331                                         if (error != 0)
6332                                                 break;
6333                                         /* Start physical address run. */
6334                                         pa_start = be64toh(*pte) & PG_FRAME;
6335                                         pa_end = pa_start + PAGE_SIZE;
6336                                 }
6337                         }
6338                         tmpva += PAGE_SIZE;
6339                 }
6340         }
6341         if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
6342                 pa_end1 = MIN(pa_end, dmaplimit);
6343                 if (pa_start != pa_end1)
6344                         error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
6345                             pa_end1 - pa_start, mode, flush);
6346         }
6347
6348         /*
6349          * Flush CPU caches if required to make sure any data isn't cached that
6350          * shouldn't be, etc.
6351          */
6352         if (changed) {
6353                 pmap_invalidate_all(kernel_pmap);
6354
6355                 if (flush)
6356                         pmap_invalidate_cache_range(base, tmpva);
6357         }
6358         return (error);
6359 }
6360
6361 /*
6362  * Allocate physical memory for the vm_page array and map it into KVA,
6363  * attempting to back the vm_pages with domain-local memory.
6364  */
6365 void
6366 mmu_radix_page_array_startup(long pages)
6367 {
6368 #ifdef notyet
6369         pml2_entry_t *l2e;
6370         pml3_entry_t *pde;
6371         pml3_entry_t newl3;
6372         vm_offset_t va;
6373         long pfn;
6374         int domain, i;
6375 #endif
6376         vm_paddr_t pa;
6377         vm_offset_t start, end;
6378
6379         vm_page_array_size = pages;
6380
6381         start = VM_MIN_KERNEL_ADDRESS;
6382         end = start + pages * sizeof(struct vm_page);
6383
6384         pa = vm_phys_early_alloc(0, end - start);
6385
6386         start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
6387 #ifdef notyet
6388         /* TODO: NUMA vm_page_array.  Blocked out until then (copied from amd64). */
6389         for (va = start; va < end; va += L3_PAGE_SIZE) {
6390                 pfn = first_page + (va - start) / sizeof(struct vm_page);
6391                 domain = vm_phys_domain(ptoa(pfn));
6392                 l2e = pmap_pml2e(kernel_pmap, va);
6393                 if ((be64toh(*l2e) & PG_V) == 0) {
6394                         pa = vm_phys_early_alloc(domain, PAGE_SIZE);
6395                         dump_add_page(pa);
6396                         pagezero(PHYS_TO_DMAP(pa));
6397                         pde_store(l2e, (pml2_entry_t)pa);
6398                 }
6399                 pde = pmap_l2e_to_l3e(l2e, va);
6400                 if ((be64toh(*pde) & PG_V) != 0)
6401                         panic("Unexpected pde %p", pde);
6402                 pa = vm_phys_early_alloc(domain, L3_PAGE_SIZE);
6403                 for (i = 0; i < NPDEPG; i++)
6404                         dump_add_page(pa + i * PAGE_SIZE);
6405                 newl3 = (pml3_entry_t)(pa | RPTE_EAA_P | RPTE_EAA_R | RPTE_EAA_W);
6406                 pte_store(pde, newl3);
6407         }
6408 #endif
6409         vm_page_array = (vm_page_t)start;
6410 }
6411
6412 #ifdef DDB
6413 #include <sys/kdb.h>
6414 #include <ddb/ddb.h>
6415
6416 static void
6417 pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va)
6418 {
6419         pml1_entry_t *l1e;
6420         pml2_entry_t *l2e;
6421         pml3_entry_t *l3e;
6422         pt_entry_t *pte;
6423
6424         l1e = &l1[pmap_pml1e_index(va)];
6425         db_printf("VA %#016lx l1e %#016lx", va, be64toh(*l1e));
6426         if ((be64toh(*l1e) & PG_V) == 0) {
6427                 db_printf("\n");
6428                 return;
6429         }
6430         l2e = pmap_l1e_to_l2e(l1e, va);
6431         db_printf(" l2e %#016lx", be64toh(*l2e));
6432         if ((be64toh(*l2e) & PG_V) == 0 || (be64toh(*l2e) & RPTE_LEAF) != 0) {
6433                 db_printf("\n");
6434                 return;
6435         }
6436         l3e = pmap_l2e_to_l3e(l2e, va);
6437         db_printf(" l3e %#016lx", be64toh(*l3e));
6438         if ((be64toh(*l3e) & PG_V) == 0 || (be64toh(*l3e) & RPTE_LEAF) != 0) {
6439                 db_printf("\n");
6440                 return;
6441         }
6442         pte = pmap_l3e_to_pte(l3e, va);
6443         db_printf(" pte %#016lx\n", be64toh(*pte));
6444 }
6445
6446 void
6447 pmap_page_print_mappings(vm_page_t m)
6448 {
6449         pmap_t pmap;
6450         pv_entry_t pv;
6451
6452         db_printf("page %p(%lx)\n", m, m->phys_addr);
6453         /* need to elide locks if running in ddb */
6454         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
6455                 db_printf("pv: %p ", pv);
6456                 db_printf("va: %#016lx ", pv->pv_va);
6457                 pmap = PV_PMAP(pv);
6458                 db_printf("pmap %p  ", pmap);
6459                 if (pmap != NULL) {
6460                         db_printf("asid: %lu\n", pmap->pm_pid);
6461                         pmap_pte_walk(pmap->pm_pml1, pv->pv_va);
6462                 }
6463         }
6464 }
6465
6466 DB_SHOW_COMMAND(pte, pmap_print_pte)
6467 {
6468         vm_offset_t va;
6469         pmap_t pmap;
6470
6471         if (!have_addr) {
6472                 db_printf("show pte addr\n");
6473                 return;
6474         }
6475         va = (vm_offset_t)addr;
6476
6477         if (va >= DMAP_MIN_ADDRESS)
6478                 pmap = kernel_pmap;
6479         else if (kdb_thread != NULL)
6480                 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
6481         else
6482                 pmap = vmspace_pmap(curthread->td_proc->p_vmspace);
6483
6484         pmap_pte_walk(pmap->pm_pml1, va);
6485 }
6486
6487 #endif