]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/powerpc/aim/mmu_radix.c
Migrate the feature of excluding RAM pages to use "excludelist"
[FreeBSD/FreeBSD.git] / sys / powerpc / aim / mmu_radix.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Matthew Macy
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
36 #include <sys/conf.h>
37 #include <sys/bitstring.h>
38 #include <sys/queue.h>
39 #include <sys/cpuset.h>
40 #include <sys/endian.h>
41 #include <sys/kerneldump.h>
42 #include <sys/ktr.h>
43 #include <sys/lock.h>
44 #include <sys/syslog.h>
45 #include <sys/msgbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/mman.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/rwlock.h>
51 #include <sys/sched.h>
52 #include <sys/sysctl.h>
53 #include <sys/systm.h>
54 #include <sys/vmem.h>
55 #include <sys/vmmeter.h>
56 #include <sys/smp.h>
57
58 #include <sys/kdb.h>
59
60 #include <dev/ofw/openfirm.h>
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_phys.h>
72 #include <vm/vm_reserv.h>
73 #include <vm/uma.h>
74
75 #include <machine/_inttypes.h>
76 #include <machine/cpu.h>
77 #include <machine/platform.h>
78 #include <machine/frame.h>
79 #include <machine/md_var.h>
80 #include <machine/psl.h>
81 #include <machine/bat.h>
82 #include <machine/hid.h>
83 #include <machine/pte.h>
84 #include <machine/sr.h>
85 #include <machine/trap.h>
86 #include <machine/mmuvar.h>
87
88 #ifdef INVARIANTS
89 #include <vm/uma_dbg.h>
90 #endif
91
92 #define PPC_BITLSHIFT(bit)      (sizeof(long)*NBBY - 1 - (bit))
93 #define PPC_BIT(bit)            (1UL << PPC_BITLSHIFT(bit))
94 #define PPC_BITLSHIFT_VAL(val, bit) ((val) << PPC_BITLSHIFT(bit))
95
96 #include "opt_ddb.h"
97 #ifdef DDB
98 static void pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va);
99 #endif
100
101 #define PG_W    RPTE_WIRED
102 #define PG_V    RPTE_VALID
103 #define PG_MANAGED      RPTE_MANAGED
104 #define PG_PROMOTED     RPTE_PROMOTED
105 #define PG_M    RPTE_C
106 #define PG_A    RPTE_R
107 #define PG_X    RPTE_EAA_X
108 #define PG_RW   RPTE_EAA_W
109 #define PG_PTE_CACHE RPTE_ATTR_MASK
110
111 #define RPTE_SHIFT 9
112 #define NLS_MASK ((1UL<<5)-1)
113 #define RPTE_ENTRIES (1UL<<RPTE_SHIFT)
114 #define RPTE_MASK (RPTE_ENTRIES-1)
115
116 #define NLB_SHIFT 0
117 #define NLB_MASK (((1UL<<52)-1) << 8)
118
119 extern int nkpt;
120 extern caddr_t crashdumpmap;
121
122 #define RIC_FLUSH_TLB 0
123 #define RIC_FLUSH_PWC 1
124 #define RIC_FLUSH_ALL 2
125
126 #define POWER9_TLB_SETS_RADIX   128     /* # sets in POWER9 TLB Radix mode */
127
128 #define PPC_INST_TLBIE                  0x7c000264
129 #define PPC_INST_TLBIEL                 0x7c000224
130 #define PPC_INST_SLBIA                  0x7c0003e4
131
132 #define ___PPC_RA(a)    (((a) & 0x1f) << 16)
133 #define ___PPC_RB(b)    (((b) & 0x1f) << 11)
134 #define ___PPC_RS(s)    (((s) & 0x1f) << 21)
135 #define ___PPC_RT(t)    ___PPC_RS(t)
136 #define ___PPC_R(r)     (((r) & 0x1) << 16)
137 #define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
138 #define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
139
140 #define PPC_SLBIA(IH)   __XSTRING(.long PPC_INST_SLBIA | \
141                                        ((IH & 0x7) << 21))
142 #define PPC_TLBIE_5(rb,rs,ric,prs,r)                            \
143         __XSTRING(.long PPC_INST_TLBIE |                        \
144                           ___PPC_RB(rb) | ___PPC_RS(rs) |       \
145                           ___PPC_RIC(ric) | ___PPC_PRS(prs) |   \
146                           ___PPC_R(r))
147
148 #define PPC_TLBIEL(rb,rs,ric,prs,r) \
149          __XSTRING(.long PPC_INST_TLBIEL | \
150                            ___PPC_RB(rb) | ___PPC_RS(rs) |      \
151                            ___PPC_RIC(ric) | ___PPC_PRS(prs) |  \
152                            ___PPC_R(r))
153
154 #define PPC_INVALIDATE_ERAT             PPC_SLBIA(7)
155
156 static __inline void
157 ttusync(void)
158 {
159         __asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
160 }
161
162 #define TLBIEL_INVAL_SEL_MASK   0xc00   /* invalidation selector */
163 #define  TLBIEL_INVAL_PAGE      0x000   /* invalidate a single page */
164 #define  TLBIEL_INVAL_SET_PID   0x400   /* invalidate a set for the current PID */
165 #define  TLBIEL_INVAL_SET_LPID  0x800   /* invalidate a set for current LPID */
166 #define  TLBIEL_INVAL_SET       0xc00   /* invalidate a set for all LPIDs */
167
168 #define TLBIE_ACTUAL_PAGE_MASK          0xe0
169 #define  TLBIE_ACTUAL_PAGE_4K           0x00
170 #define  TLBIE_ACTUAL_PAGE_64K          0xa0
171 #define  TLBIE_ACTUAL_PAGE_2M           0x20
172 #define  TLBIE_ACTUAL_PAGE_1G           0x40
173
174 #define TLBIE_PRS_PARTITION_SCOPE       0x0
175 #define TLBIE_PRS_PROCESS_SCOPE 0x1
176
177 #define TLBIE_RIC_INVALIDATE_TLB        0x0     /* Invalidate just TLB */
178 #define TLBIE_RIC_INVALIDATE_PWC        0x1     /* Invalidate just PWC */
179 #define TLBIE_RIC_INVALIDATE_ALL        0x2     /* Invalidate TLB, PWC,
180                                                  * cached {proc, part}tab entries
181                                                  */
182 #define TLBIE_RIC_INVALIDATE_SEQ        0x3     /* HPT - only:
183                                                  * Invalidate a range of translations
184                                                  */
185
186 static __inline void
187 radix_tlbie(uint8_t ric, uint8_t prs, uint16_t is, uint32_t pid, uint32_t lpid,
188                         vm_offset_t va, uint16_t ap)
189 {
190         uint64_t rb, rs;
191
192         MPASS((va & PAGE_MASK) == 0);
193
194         rs = ((uint64_t)pid << 32) | lpid;
195         rb = va | is | ap;
196         __asm __volatile(PPC_TLBIE_5(%0, %1, %2, %3, 1) : :
197                 "r" (rb), "r" (rs), "i" (ric), "i" (prs));
198 }
199
200 static __inline void
201 radix_tlbie_invlpg_user_4k(uint32_t pid, vm_offset_t va)
202 {
203
204         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
205                 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_4K);
206 }
207
208 static __inline void
209 radix_tlbie_invlpg_user_2m(uint32_t pid, vm_offset_t va)
210 {
211
212         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
213                 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_2M);
214 }
215
216 static __inline void
217 radix_tlbie_invlpwc_user(uint32_t pid)
218 {
219
220         radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
221                 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
222 }
223
224 static __inline void
225 radix_tlbie_flush_user(uint32_t pid)
226 {
227
228         radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
229                 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0);
230 }
231
232 static __inline void
233 radix_tlbie_invlpg_kernel_4k(vm_offset_t va)
234 {
235
236         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
237             TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_4K);
238 }
239
240 static __inline void
241 radix_tlbie_invlpg_kernel_2m(vm_offset_t va)
242 {
243
244         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
245             TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_2M);
246 }
247
248 /* 1GB pages aren't currently supported. */
249 static __inline __unused void
250 radix_tlbie_invlpg_kernel_1g(vm_offset_t va)
251 {
252
253         radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE,
254             TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_1G);
255 }
256
257 static __inline void
258 radix_tlbie_invlpwc_kernel(void)
259 {
260
261         radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE,
262             TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
263 }
264
265 static __inline void
266 radix_tlbie_flush_kernel(void)
267 {
268
269         radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE,
270             TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0);
271 }
272
273 static __inline vm_pindex_t
274 pmap_l3e_pindex(vm_offset_t va)
275 {
276         return ((va & PG_FRAME) >> L3_PAGE_SIZE_SHIFT);
277 }
278
279 static __inline vm_pindex_t
280 pmap_pml3e_index(vm_offset_t va)
281 {
282
283         return ((va >> L3_PAGE_SIZE_SHIFT) & RPTE_MASK);
284 }
285
286 static __inline vm_pindex_t
287 pmap_pml2e_index(vm_offset_t va)
288 {
289         return ((va >> L2_PAGE_SIZE_SHIFT) & RPTE_MASK);
290 }
291
292 static __inline vm_pindex_t
293 pmap_pml1e_index(vm_offset_t va)
294 {
295         return ((va & PG_FRAME) >> L1_PAGE_SIZE_SHIFT);
296 }
297
298 /* Return various clipped indexes for a given VA */
299 static __inline vm_pindex_t
300 pmap_pte_index(vm_offset_t va)
301 {
302
303         return ((va >> PAGE_SHIFT) & RPTE_MASK);
304 }
305
306 /* Return a pointer to the PT slot that corresponds to a VA */
307 static __inline pt_entry_t *
308 pmap_l3e_to_pte(pt_entry_t *l3e, vm_offset_t va)
309 {
310         pt_entry_t *pte;
311         vm_paddr_t ptepa;
312
313         ptepa = (*l3e & NLB_MASK);
314         pte = (pt_entry_t *)PHYS_TO_DMAP(ptepa);
315         return (&pte[pmap_pte_index(va)]);
316 }
317
318 /* Return a pointer to the PD slot that corresponds to a VA */
319 static __inline pt_entry_t *
320 pmap_l2e_to_l3e(pt_entry_t *l2e, vm_offset_t va)
321 {
322         pt_entry_t *l3e;
323         vm_paddr_t l3pa;
324
325         l3pa = (*l2e & NLB_MASK);
326         l3e = (pml3_entry_t *)PHYS_TO_DMAP(l3pa);
327         return (&l3e[pmap_pml3e_index(va)]);
328 }
329
330 /* Return a pointer to the PD slot that corresponds to a VA */
331 static __inline pt_entry_t *
332 pmap_l1e_to_l2e(pt_entry_t *l1e, vm_offset_t va)
333 {
334         pt_entry_t *l2e;
335         vm_paddr_t l2pa;
336
337         l2pa = (*l1e & NLB_MASK);
338
339         l2e = (pml2_entry_t *)PHYS_TO_DMAP(l2pa);
340         return (&l2e[pmap_pml2e_index(va)]);
341 }
342
343 static __inline pml1_entry_t *
344 pmap_pml1e(pmap_t pmap, vm_offset_t va)
345 {
346
347         return (&pmap->pm_pml1[pmap_pml1e_index(va)]);
348 }
349
350 static pt_entry_t *
351 pmap_pml2e(pmap_t pmap, vm_offset_t va)
352 {
353         pt_entry_t *l1e;
354
355         l1e = pmap_pml1e(pmap, va);
356         if (l1e == NULL || (*l1e & RPTE_VALID) == 0)
357                 return (NULL);
358         return (pmap_l1e_to_l2e(l1e, va));
359 }
360
361 static __inline pt_entry_t *
362 pmap_pml3e(pmap_t pmap, vm_offset_t va)
363 {
364         pt_entry_t *l2e;
365
366         l2e = pmap_pml2e(pmap, va);
367         if (l2e == NULL || (*l2e & RPTE_VALID) == 0)
368                 return (NULL);
369         return (pmap_l2e_to_l3e(l2e, va));
370 }
371
372 static __inline pt_entry_t *
373 pmap_pte(pmap_t pmap, vm_offset_t va)
374 {
375         pt_entry_t *l3e;
376
377         l3e = pmap_pml3e(pmap, va);
378         if (l3e == NULL || (*l3e & RPTE_VALID) == 0)
379                 return (NULL);
380         return (pmap_l3e_to_pte(l3e, va));
381 }
382
383 int nkpt = 64;
384 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0,
385     "Number of kernel page table pages allocated on bootup");
386
387 vm_paddr_t dmaplimit;
388
389 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
390
391 static int pg_ps_enabled = 1;
392 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
393     &pg_ps_enabled, 0, "Are large page mappings enabled?");
394 #ifdef INVARIANTS
395 #define VERBOSE_PMAP 0
396 #define VERBOSE_PROTECT 0
397 static int pmap_logging;
398 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_logging, CTLFLAG_RWTUN,
399     &pmap_logging, 0, "verbose debug logging");
400 #endif
401
402 static u_int64_t        KPTphys;        /* phys addr of kernel level 1 */
403
404 //static vm_paddr_t     KERNend;        /* phys addr of end of bootstrap data */
405
406 static vm_offset_t qframe = 0;
407 static struct mtx qframe_mtx;
408
409 void mmu_radix_activate(struct thread *);
410 void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int);
411 void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
412     vm_size_t);
413 void mmu_radix_clear_modify(vm_page_t);
414 void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
415 int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *);
416 int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
417 void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
418         vm_prot_t);
419 void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
420 vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va);
421 vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
422 void mmu_radix_kenter(vm_offset_t, vm_paddr_t);
423 vm_paddr_t mmu_radix_kextract(vm_offset_t);
424 void mmu_radix_kremove(vm_offset_t);
425 boolean_t mmu_radix_is_modified(vm_page_t);
426 boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
427 boolean_t mmu_radix_is_referenced(vm_page_t);
428 void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t,
429         vm_pindex_t, vm_size_t);
430 boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t);
431 void mmu_radix_page_init(vm_page_t);
432 boolean_t mmu_radix_page_is_mapped(vm_page_t m);
433 void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t);
434 int mmu_radix_page_wired_mappings(vm_page_t);
435 int mmu_radix_pinit(pmap_t);
436 void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
437 bool mmu_radix_ps_enabled(pmap_t);
438 void mmu_radix_qenter(vm_offset_t, vm_page_t *, int);
439 void mmu_radix_qremove(vm_offset_t, int);
440 vm_offset_t mmu_radix_quick_enter_page(vm_page_t);
441 void mmu_radix_quick_remove_page(vm_offset_t);
442 boolean_t mmu_radix_ts_referenced(vm_page_t);
443 void mmu_radix_release(pmap_t);
444 void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t);
445 void mmu_radix_remove_all(vm_page_t);
446 void mmu_radix_remove_pages(pmap_t);
447 void mmu_radix_remove_write(vm_page_t);
448 void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t);
449 void mmu_radix_zero_page(vm_page_t);
450 void mmu_radix_zero_page_area(vm_page_t, int, int);
451 int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
452 void mmu_radix_page_array_startup(long pages);
453
454 #include "mmu_oea64.h"
455
456 /*
457  * Kernel MMU interface
458  */
459
460 static void     mmu_radix_bootstrap(vm_offset_t, vm_offset_t);
461
462 static void mmu_radix_copy_page(vm_page_t, vm_page_t);
463 static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
464     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
465 static void mmu_radix_growkernel(vm_offset_t);
466 static void mmu_radix_init(void);
467 static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
468 static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
469 static void mmu_radix_pinit0(pmap_t);
470
471 static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t);
472 static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
473 static void mmu_radix_unmapdev(vm_offset_t, vm_size_t);
474 static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
475 static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t);
476 static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
477 static void mmu_radix_scan_init(void);
478 static void     mmu_radix_cpu_bootstrap(int ap);
479 static void     mmu_radix_tlbie_all(void);
480
481 static struct pmap_funcs mmu_radix_methods = {
482         .bootstrap = mmu_radix_bootstrap,
483         .copy_page = mmu_radix_copy_page,
484         .copy_pages = mmu_radix_copy_pages,
485         .cpu_bootstrap = mmu_radix_cpu_bootstrap,
486         .growkernel = mmu_radix_growkernel,
487         .init = mmu_radix_init,
488         .map =                  mmu_radix_map,
489         .mincore =              mmu_radix_mincore,
490         .pinit = mmu_radix_pinit,
491         .pinit0 = mmu_radix_pinit0,
492
493         .mapdev = mmu_radix_mapdev,
494         .mapdev_attr = mmu_radix_mapdev_attr,
495         .unmapdev = mmu_radix_unmapdev,
496         .kenter_attr = mmu_radix_kenter_attr,
497         .dev_direct_mapped = mmu_radix_dev_direct_mapped,
498         .dumpsys_pa_init = mmu_radix_scan_init,
499         .dumpsys_map_chunk = mmu_radix_dumpsys_map,
500         .page_is_mapped = mmu_radix_page_is_mapped,
501         .ps_enabled = mmu_radix_ps_enabled,
502         .object_init_pt = mmu_radix_object_init_pt,
503         .protect = mmu_radix_protect,
504         /* pmap dispatcher interface */
505         .clear_modify = mmu_radix_clear_modify,
506         .copy = mmu_radix_copy,
507         .enter = mmu_radix_enter,
508         .enter_object = mmu_radix_enter_object,
509         .enter_quick = mmu_radix_enter_quick,
510         .extract = mmu_radix_extract,
511         .extract_and_hold = mmu_radix_extract_and_hold,
512         .is_modified = mmu_radix_is_modified,
513         .is_prefaultable = mmu_radix_is_prefaultable,
514         .is_referenced = mmu_radix_is_referenced,
515         .ts_referenced = mmu_radix_ts_referenced,
516         .page_exists_quick = mmu_radix_page_exists_quick,
517         .page_init = mmu_radix_page_init,
518         .page_wired_mappings =  mmu_radix_page_wired_mappings,
519         .qenter = mmu_radix_qenter,
520         .qremove = mmu_radix_qremove,
521         .release = mmu_radix_release,
522         .remove = mmu_radix_remove,
523         .remove_all = mmu_radix_remove_all,
524         .remove_write = mmu_radix_remove_write,
525         .unwire = mmu_radix_unwire,
526         .zero_page = mmu_radix_zero_page,
527         .zero_page_area = mmu_radix_zero_page_area,
528         .activate = mmu_radix_activate,
529         .quick_enter_page =  mmu_radix_quick_enter_page,
530         .quick_remove_page =  mmu_radix_quick_remove_page,
531         .page_set_memattr = mmu_radix_page_set_memattr,
532         .page_array_startup =  mmu_radix_page_array_startup,
533
534         /* Internal interfaces */
535         .kenter = mmu_radix_kenter,
536         .kextract = mmu_radix_kextract,
537         .kremove = mmu_radix_kremove,
538         .change_attr = mmu_radix_change_attr,
539         .decode_kernel_ptr =  mmu_radix_decode_kernel_ptr,
540
541         .tlbie_all = mmu_radix_tlbie_all,
542 };
543
544 MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods);
545
546 static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
547         struct rwlock **lockp);
548 static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va);
549 static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *);
550 static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
551     struct spglist *free, struct rwlock **lockp);
552 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
553     pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
554 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
555 static bool pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *pde,
556     struct spglist *free);
557 static bool     pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
558         pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp);
559
560 static bool     pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e,
561                     u_int flags, struct rwlock **lockp);
562 #if VM_NRESERVLEVEL > 0
563 static void     pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
564         struct rwlock **lockp);
565 #endif
566 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
567 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
568 static vm_page_t mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
569         vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate);
570
571 static bool     pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
572         vm_prot_t prot, struct rwlock **lockp);
573 static int      pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde,
574         u_int flags, vm_page_t m, struct rwlock **lockp);
575
576 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
577 static void free_pv_chunk(struct pv_chunk *pc);
578 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp);
579 static vm_page_t pmap_allocl3e(pmap_t pmap, vm_offset_t va,
580         struct rwlock **lockp);
581 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
582         struct rwlock **lockp);
583 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
584     struct spglist *free);
585 static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free);
586
587 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start);
588 static void pmap_invalidate_all(pmap_t pmap);
589 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush);
590
591 /*
592  * Internal flags for pmap_enter()'s helper functions.
593  */
594 #define PMAP_ENTER_NORECLAIM    0x1000000       /* Don't reclaim PV entries. */
595 #define PMAP_ENTER_NOREPLACE    0x2000000       /* Don't replace mappings. */
596
597 #define UNIMPLEMENTED() panic("%s not implemented", __func__)
598 #define UNTESTED() panic("%s not yet tested", __func__)
599
600
601
602 /* Number of supported PID bits */
603 static unsigned int isa3_pid_bits;
604
605 /* PID to start allocating from */
606 static unsigned int isa3_base_pid;
607
608 #define PROCTAB_SIZE_SHIFT      (isa3_pid_bits + 4)
609 #define PROCTAB_ENTRIES (1ul << isa3_pid_bits)
610
611
612 /*
613  * Map of physical memory regions.
614  */
615 static struct   mem_region *regions, *pregions;
616 static struct   numa_mem_region *numa_pregions;
617 static u_int    phys_avail_count;
618 static int      regions_sz, pregions_sz, numa_pregions_sz;
619 static struct pate *isa3_parttab;
620 static struct prte *isa3_proctab;
621 static vmem_t *asid_arena;
622
623 extern void bs_remap_earlyboot(void);
624
625 #define RADIX_PGD_SIZE_SHIFT    16
626 #define RADIX_PGD_SIZE  (1UL << RADIX_PGD_SIZE_SHIFT)
627
628 #define RADIX_PGD_INDEX_SHIFT   (RADIX_PGD_SIZE_SHIFT-3)
629 #define NL2EPG (PAGE_SIZE/sizeof(pml2_entry_t))
630 #define NL3EPG (PAGE_SIZE/sizeof(pml3_entry_t))
631
632 #define NUPML1E         (RADIX_PGD_SIZE/sizeof(uint64_t))       /* number of userland PML1 pages */
633 #define NUPDPE          (NUPML1E * NL2EPG)/* number of userland PDP pages */
634 #define NUPDE           (NUPDPE * NL3EPG)       /* number of userland PD entries */
635
636 /* POWER9 only permits a 64k partition table size. */
637 #define PARTTAB_SIZE_SHIFT      16
638 #define PARTTAB_SIZE    (1UL << PARTTAB_SIZE_SHIFT)
639
640 #define PARTTAB_HR              (1UL << 63) /* host uses radix */
641 #define PARTTAB_GR              (1UL << 63) /* guest uses radix must match host */
642
643 /* TLB flush actions. Used as argument to tlbiel_all() */
644 enum {
645         TLB_INVAL_SCOPE_LPID = 0,       /* invalidate TLBs for current LPID */
646         TLB_INVAL_SCOPE_GLOBAL = 1,     /* invalidate all TLBs */
647 };
648
649 #define NPV_LIST_LOCKS  MAXCPU
650 static int pmap_initialized;
651 static vm_paddr_t proctab0pa;
652 static vm_paddr_t parttab_phys;
653 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
654
655 /*
656  * Data for the pv entry allocation mechanism.
657  * Updates to pv_invl_gen are protected by the pv_list_locks[]
658  * elements, but reads are not.
659  */
660 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
661 static struct mtx __exclusive_cache_line pv_chunks_mutex;
662 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
663 static struct md_page *pv_table;
664 static struct md_page pv_dummy;
665
666 #ifdef PV_STATS
667 #define PV_STAT(x)      do { x ; } while (0)
668 #else
669 #define PV_STAT(x)      do { } while (0)
670 #endif
671
672 #define pa_radix_index(pa)      ((pa) >> L3_PAGE_SIZE_SHIFT)
673 #define pa_to_pvh(pa)   (&pv_table[pa_radix_index(pa)])
674
675 #define PHYS_TO_PV_LIST_LOCK(pa)        \
676                         (&pv_list_locks[pa_radix_index(pa) % NPV_LIST_LOCKS])
677
678 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)  do {    \
679         struct rwlock **_lockp = (lockp);               \
680         struct rwlock *_new_lock;                       \
681                                                         \
682         _new_lock = PHYS_TO_PV_LIST_LOCK(pa);           \
683         if (_new_lock != *_lockp) {                     \
684                 if (*_lockp != NULL)                    \
685                         rw_wunlock(*_lockp);            \
686                 *_lockp = _new_lock;                    \
687                 rw_wlock(*_lockp);                      \
688         }                                               \
689 } while (0)
690
691 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)        \
692         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
693
694 #define RELEASE_PV_LIST_LOCK(lockp)             do {    \
695         struct rwlock **_lockp = (lockp);               \
696                                                         \
697         if (*_lockp != NULL) {                          \
698                 rw_wunlock(*_lockp);                    \
699                 *_lockp = NULL;                         \
700         }                                               \
701 } while (0)
702
703 #define VM_PAGE_TO_PV_LIST_LOCK(m)      \
704         PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
705
706 /*
707  * We support 52 bits, hence:
708  * bits 52 - 31 = 21, 0b10101
709  * RTS encoding details
710  * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
711  * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
712  */
713 #define RTS_SIZE ((0x2UL << 61) | (0x5UL << 5))
714
715
716 static int powernv_enabled = 1;
717
718 static inline void
719 tlbiel_radix_set_isa300(uint32_t set, uint32_t is,
720         uint32_t pid, uint32_t ric, uint32_t prs)
721 {
722         uint64_t rb;
723         uint64_t rs;
724
725         rb = PPC_BITLSHIFT_VAL(set, 51) | PPC_BITLSHIFT_VAL(is, 53);
726         rs = PPC_BITLSHIFT_VAL((uint64_t)pid, 31);
727
728         __asm __volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
729                      : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
730                      : "memory");
731 }
732
733 static void
734 tlbiel_flush_isa3(uint32_t num_sets, uint32_t is)
735 {
736         uint32_t set;
737
738         __asm __volatile("ptesync": : :"memory");
739
740         /*
741          * Flush the first set of the TLB, and the entire Page Walk Cache
742          * and partition table entries. Then flush the remaining sets of the
743          * TLB.
744          */
745         tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
746         for (set = 1; set < num_sets; set++)
747                 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
748
749         /* Do the same for process scoped entries. */
750         tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
751         for (set = 1; set < num_sets; set++)
752                 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
753
754         __asm __volatile("ptesync": : :"memory");
755 }
756
757 static void
758 mmu_radix_tlbiel_flush(int scope)
759 {
760         int is;
761
762         MPASS(scope == TLB_INVAL_SCOPE_LPID ||
763                   scope == TLB_INVAL_SCOPE_GLOBAL);
764         is = scope + 2;
765
766         tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, is);
767         __asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
768 }
769
770 static void
771 mmu_radix_tlbie_all()
772 {
773         /* TODO: LPID invalidate */
774         mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
775 }
776
777 static void
778 mmu_radix_init_amor(void)
779 {
780         /*
781         * In HV mode, we init AMOR (Authority Mask Override Register) so that
782         * the hypervisor and guest can setup IAMR (Instruction Authority Mask
783         * Register), enable key 0 and set it to 1.
784         *
785         * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
786         */
787         mtspr(SPR_AMOR, (3ul << 62));
788 }
789
790 static void
791 mmu_radix_init_iamr(void)
792 {
793         /*
794          * Radix always uses key0 of the IAMR to determine if an access is
795          * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
796          * fetch.
797          */
798         mtspr(SPR_IAMR, (1ul << 62));
799 }
800
801 static void
802 mmu_radix_pid_set(pmap_t pmap)
803 {
804
805         mtspr(SPR_PID, pmap->pm_pid);
806         isync();
807 }
808
809 /* Quick sort callout for comparing physical addresses. */
810 static int
811 pa_cmp(const void *a, const void *b)
812 {
813         const vm_paddr_t *pa = a, *pb = b;
814
815         if (*pa < *pb)
816                 return (-1);
817         else if (*pa > *pb)
818                 return (1);
819         else
820                 return (0);
821 }
822
823 #define pte_load_store(ptep, pte)       atomic_swap_long(ptep, pte)
824 #define pte_load_clear(ptep)            atomic_swap_long(ptep, 0)
825 #define pte_store(ptep, pte) do {          \
826         MPASS((pte) & (RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_X));  \
827         *(u_long *)(ptep) = (u_long)((pte) | PG_V | RPTE_LEAF); \
828 } while (0)
829 /*
830  * NB: should only be used for adding directories - not for direct mappings
831  */
832 #define pde_store(ptep, pa) do {                                \
833         *(u_long *)(ptep) = (u_long)(pa|RPTE_VALID|RPTE_SHIFT); \
834 } while (0)
835
836 #define pte_clear(ptep) do {                                    \
837                 *(u_long *)(ptep) = (u_long)(0);                \
838 } while (0)
839
840 #define PMAP_PDE_SUPERPAGE      (1 << 8)        /* supports 2MB superpages */
841
842 /*
843  * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB
844  * (PTE) page mappings have identical settings for the following fields:
845  */
846 #define PG_PTE_PROMOTE  (PG_X | PG_MANAGED | PG_W | PG_PTE_CACHE | \
847             PG_M | PG_A | RPTE_EAA_MASK | PG_V)
848
849
850 static __inline void
851 pmap_resident_count_inc(pmap_t pmap, int count)
852 {
853
854         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
855         pmap->pm_stats.resident_count += count;
856 }
857
858 static __inline void
859 pmap_resident_count_dec(pmap_t pmap, int count)
860 {
861
862         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
863         KASSERT(pmap->pm_stats.resident_count >= count,
864             ("pmap %p resident count underflow %ld %d", pmap,
865             pmap->pm_stats.resident_count, count));
866         pmap->pm_stats.resident_count -= count;
867 }
868
869 static void
870 pagezero(vm_offset_t va)
871 {
872         va = trunc_page(va);
873
874         bzero((void *)va, PAGE_SIZE);
875 }
876
877 static uint64_t
878 allocpages(int n)
879 {
880         u_int64_t ret;
881
882         ret = moea64_bootstrap_alloc(n * PAGE_SIZE, PAGE_SIZE);
883         for (int i = 0; i < n; i++)
884                 pagezero(PHYS_TO_DMAP(ret + i * PAGE_SIZE));
885         return (ret);
886 }
887
888 static pt_entry_t *
889 kvtopte(vm_offset_t va)
890 {
891         pt_entry_t *l3e;
892
893         l3e = pmap_pml3e(kernel_pmap, va);
894         if ((*l3e & RPTE_VALID) == 0)
895                 return (NULL);
896         return (pmap_l3e_to_pte(l3e, va));
897 }
898
899 void
900 mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa)
901 {
902         pt_entry_t *pte;
903
904         pte = kvtopte(va);
905         MPASS(pte != NULL);
906         *pte = pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | RPTE_EAA_W | \
907             RPTE_EAA_P | PG_M | PG_A;
908 }
909
910 bool
911 mmu_radix_ps_enabled(pmap_t pmap)
912 {
913         return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
914 }
915
916 static pt_entry_t *
917 pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e)
918 {
919         pml3_entry_t *l3e;
920         pt_entry_t *pte;
921
922         va &= PG_PS_FRAME;
923         l3e = pmap_pml3e(pmap, va);
924         if (l3e == NULL || (*l3e & PG_V) == 0)
925                 return (NULL);
926
927         if (*l3e & RPTE_LEAF) {
928                 *is_l3e = 1;
929                 return (l3e);
930         }
931         *is_l3e = 0;
932         va &= PG_FRAME;
933         pte = pmap_l3e_to_pte(l3e, va);
934         if (pte == NULL || (*pte & PG_V) == 0)
935                 return (NULL);
936         return (pte);
937 }
938
939 int
940 pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags)
941 {
942         pt_entry_t *pte;
943         pt_entry_t startpte, origpte, newpte;
944         vm_page_t m;
945         int is_l3e;
946
947         startpte = 0;
948  retry:
949         if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL)
950                 return (KERN_INVALID_ADDRESS);
951         origpte = newpte = *pte;
952         if (startpte == 0) {
953                 startpte = origpte;
954                 if (((flags & VM_PROT_WRITE) && (startpte & PG_M)) ||
955                     ((flags & VM_PROT_READ) && (startpte & PG_A))) {
956                         pmap_invalidate_all(pmap);
957 #ifdef INVARIANTS
958                         if (VERBOSE_PMAP || pmap_logging)
959                                 printf("%s(%p, %#lx, %#x) (%#lx) -- invalidate all\n",
960                                     __func__, pmap, va, flags, origpte);
961 #endif
962                         return (KERN_FAILURE);
963                 }
964         }
965 #ifdef INVARIANTS
966         if (VERBOSE_PMAP || pmap_logging)
967                 printf("%s(%p, %#lx, %#x) (%#lx)\n", __func__, pmap, va,
968                     flags, origpte);
969 #endif
970         PMAP_LOCK(pmap);
971         if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL ||
972             *pte != origpte) {
973                 PMAP_UNLOCK(pmap);
974                 return (KERN_FAILURE);
975         }
976         m = PHYS_TO_VM_PAGE(newpte & PG_FRAME);
977         MPASS(m != NULL);
978         switch (flags) {
979         case VM_PROT_READ:
980                 if ((newpte & (RPTE_EAA_R|RPTE_EAA_X)) == 0)
981                         goto protfail;
982                 newpte |= PG_A;
983                 vm_page_aflag_set(m, PGA_REFERENCED);
984                 break;
985         case VM_PROT_WRITE:
986                 if ((newpte & RPTE_EAA_W) == 0)
987                         goto protfail;
988                 if (is_l3e)
989                         goto protfail;
990                 newpte |= PG_M;
991                 vm_page_dirty(m);
992                 break;
993         case VM_PROT_EXECUTE:
994                 if ((newpte & RPTE_EAA_X) == 0)
995                         goto protfail;
996                 newpte |= PG_A;
997                 vm_page_aflag_set(m, PGA_REFERENCED);
998                 break;
999         }
1000
1001         if (!atomic_cmpset_long(pte, origpte, newpte))
1002                 goto retry;
1003         ptesync();
1004         PMAP_UNLOCK(pmap);
1005         if (startpte == newpte)
1006                 return (KERN_FAILURE);
1007         return (0);
1008  protfail:
1009         PMAP_UNLOCK(pmap);
1010         return (KERN_PROTECTION_FAILURE);
1011 }
1012
1013 /*
1014  * Returns TRUE if the given page is mapped individually or as part of
1015  * a 2mpage.  Otherwise, returns FALSE.
1016  */
1017 boolean_t
1018 mmu_radix_page_is_mapped(vm_page_t m)
1019 {
1020         struct rwlock *lock;
1021         boolean_t rv;
1022
1023         if ((m->oflags & VPO_UNMANAGED) != 0)
1024                 return (FALSE);
1025         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
1026         rw_rlock(lock);
1027         rv = !TAILQ_EMPTY(&m->md.pv_list) ||
1028             ((m->flags & PG_FICTITIOUS) == 0 &&
1029             !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
1030         rw_runlock(lock);
1031         return (rv);
1032 }
1033
1034 /*
1035  * Determine the appropriate bits to set in a PTE or PDE for a specified
1036  * caching mode.
1037  */
1038 static int
1039 pmap_cache_bits(vm_memattr_t ma)
1040 {
1041         if (ma != VM_MEMATTR_DEFAULT) {
1042                 switch (ma) {
1043                 case VM_MEMATTR_UNCACHEABLE:
1044                         return (RPTE_ATTR_GUARDEDIO);
1045                 case VM_MEMATTR_CACHEABLE:
1046                         return (RPTE_ATTR_MEM);
1047                 case VM_MEMATTR_WRITE_BACK:
1048                 case VM_MEMATTR_PREFETCHABLE:
1049                 case VM_MEMATTR_WRITE_COMBINING:
1050                         return (RPTE_ATTR_UNGUARDEDIO);
1051                 }
1052         }
1053         return (0);
1054 }
1055
1056 static void
1057 pmap_invalidate_page(pmap_t pmap, vm_offset_t start)
1058 {
1059         ptesync();
1060         if (pmap == kernel_pmap)
1061                 radix_tlbie_invlpg_kernel_4k(start);
1062         else
1063                 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1064         ttusync();
1065 }
1066
1067 static void
1068 pmap_invalidate_page_2m(pmap_t pmap, vm_offset_t start)
1069 {
1070         ptesync();
1071         if (pmap == kernel_pmap)
1072                 radix_tlbie_invlpg_kernel_2m(start);
1073         else
1074                 radix_tlbie_invlpg_user_2m(pmap->pm_pid, start);
1075         ttusync();
1076 }
1077
1078 static void
1079 pmap_invalidate_pwc(pmap_t pmap)
1080 {
1081         ptesync();
1082         if (pmap == kernel_pmap)
1083                 radix_tlbie_invlpwc_kernel();
1084         else
1085                 radix_tlbie_invlpwc_user(pmap->pm_pid);
1086         ttusync();
1087 }
1088
1089 static void
1090 pmap_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
1091 {
1092         if (((start - end) >> PAGE_SHIFT) > 8) {
1093                 pmap_invalidate_all(pmap);
1094                 return;
1095         }
1096         ptesync();
1097         if (pmap == kernel_pmap) {
1098                 while (start < end) {
1099                         radix_tlbie_invlpg_kernel_4k(start);
1100                         start += PAGE_SIZE;
1101                 }
1102         } else {
1103                 while (start < end) {
1104                         radix_tlbie_invlpg_user_4k(pmap->pm_pid, start);
1105                         start += PAGE_SIZE;
1106                 }
1107         }
1108         ttusync();
1109 }
1110
1111 static void
1112 pmap_invalidate_all(pmap_t pmap)
1113 {
1114         ptesync();
1115         if (pmap == kernel_pmap)
1116                 radix_tlbie_flush_kernel();
1117         else
1118                 radix_tlbie_flush_user(pmap->pm_pid);
1119         ttusync();
1120 }
1121
1122 static void
1123 pmap_invalidate_l3e_page(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e)
1124 {
1125
1126         /*
1127          * When the PDE has PG_PROMOTED set, the 2MB page mapping was created
1128          * by a promotion that did not invalidate the 512 4KB page mappings
1129          * that might exist in the TLB.  Consequently, at this point, the TLB
1130          * may hold both 4KB and 2MB page mappings for the address range [va,
1131          * va + L3_PAGE_SIZE).  Therefore, the entire range must be invalidated here.
1132          * In contrast, when PG_PROMOTED is clear, the TLB will not hold any
1133          * 4KB page mappings for the address range [va, va + L3_PAGE_SIZE), and so a
1134          * single INVLPG suffices to invalidate the 2MB page mapping from the
1135          * TLB.
1136          */
1137         ptesync();
1138         if ((l3e & PG_PROMOTED) != 0)
1139                 pmap_invalidate_range(pmap, va, va + L3_PAGE_SIZE - 1);
1140         else
1141                 pmap_invalidate_page_2m(pmap, va);
1142
1143         pmap_invalidate_pwc(pmap);
1144 }
1145
1146 static __inline struct pv_chunk *
1147 pv_to_chunk(pv_entry_t pv)
1148 {
1149
1150         return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1151 }
1152
1153 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1154
1155 #define PC_FREE0        0xfffffffffffffffful
1156 #define PC_FREE1        0x3ffffffffffffffful
1157
1158 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1 };
1159
1160 /*
1161  * Ensure that the number of spare PV entries in the specified pmap meets or
1162  * exceeds the given count, "needed".
1163  *
1164  * The given PV list lock may be released.
1165  */
1166 static void
1167 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1168 {
1169         struct pch new_tail;
1170         struct pv_chunk *pc;
1171         vm_page_t m;
1172         int avail, free;
1173         bool reclaimed;
1174
1175         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1176         KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1177
1178         /*
1179          * Newly allocated PV chunks must be stored in a private list until
1180          * the required number of PV chunks have been allocated.  Otherwise,
1181          * reclaim_pv_chunk() could recycle one of these chunks.  In
1182          * contrast, these chunks must be added to the pmap upon allocation.
1183          */
1184         TAILQ_INIT(&new_tail);
1185 retry:
1186         avail = 0;
1187         TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1188                 //              if ((cpu_feature2 & CPUID2_POPCNT) == 0)
1189                 bit_count((bitstr_t *)pc->pc_map, 0,
1190                                   sizeof(pc->pc_map) * NBBY, &free);
1191 #if 0
1192                 free = popcnt_pc_map_pq(pc->pc_map);
1193 #endif
1194                 if (free == 0)
1195                         break;
1196                 avail += free;
1197                 if (avail >= needed)
1198                         break;
1199         }
1200         for (reclaimed = false; avail < needed; avail += _NPCPV) {
1201                 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
1202                     VM_ALLOC_WIRED);
1203                 if (m == NULL) {
1204                         m = reclaim_pv_chunk(pmap, lockp);
1205                         if (m == NULL)
1206                                 goto retry;
1207                         reclaimed = true;
1208                 }
1209                 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1210                 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1211                 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1212                 pc->pc_pmap = pmap;
1213                 pc->pc_map[0] = PC_FREE0;
1214                 pc->pc_map[1] = PC_FREE1;
1215                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1216                 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
1217                 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
1218
1219                 /*
1220                  * The reclaim might have freed a chunk from the current pmap.
1221                  * If that chunk contained available entries, we need to
1222                  * re-count the number of available entries.
1223                  */
1224                 if (reclaimed)
1225                         goto retry;
1226         }
1227         if (!TAILQ_EMPTY(&new_tail)) {
1228                 mtx_lock(&pv_chunks_mutex);
1229                 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
1230                 mtx_unlock(&pv_chunks_mutex);
1231         }
1232 }
1233
1234 /*
1235  * First find and then remove the pv entry for the specified pmap and virtual
1236  * address from the specified pv list.  Returns the pv entry if found and NULL
1237  * otherwise.  This operation can be performed on pv lists for either 4KB or
1238  * 2MB page mappings.
1239  */
1240 static __inline pv_entry_t
1241 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1242 {
1243         pv_entry_t pv;
1244
1245         TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
1246 #ifdef INVARIANTS
1247                 if (PV_PMAP(pv) == NULL) {
1248                         printf("corrupted pv_chunk/pv %p\n", pv);
1249                         printf("pv_chunk: %64D\n", pv_to_chunk(pv), ":");
1250                 }
1251                 MPASS(PV_PMAP(pv) != NULL);
1252                 MPASS(pv->pv_va != 0);
1253 #endif
1254                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1255                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
1256                         pvh->pv_gen++;
1257                         break;
1258                 }
1259         }
1260         return (pv);
1261 }
1262
1263 /*
1264  * After demotion from a 2MB page mapping to 512 4KB page mappings,
1265  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
1266  * entries for each of the 4KB page mappings.
1267  */
1268 static void
1269 pmap_pv_demote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1270     struct rwlock **lockp)
1271 {
1272         struct md_page *pvh;
1273         struct pv_chunk *pc;
1274         pv_entry_t pv;
1275         vm_offset_t va_last;
1276         vm_page_t m;
1277         int bit, field;
1278
1279         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1280         KASSERT((pa & L3_PAGE_MASK) == 0,
1281             ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
1282         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1283
1284         /*
1285          * Transfer the 2mpage's pv entry for this mapping to the first
1286          * page's pv list.  Once this transfer begins, the pv list lock
1287          * must not be released until the last pv entry is reinstantiated.
1288          */
1289         pvh = pa_to_pvh(pa);
1290         va = trunc_2mpage(va);
1291         pv = pmap_pvh_remove(pvh, pmap, va);
1292         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
1293         m = PHYS_TO_VM_PAGE(pa);
1294         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1295
1296         m->md.pv_gen++;
1297         /* Instantiate the remaining NPTEPG - 1 pv entries. */
1298         PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
1299         va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1300         for (;;) {
1301                 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1302                 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0
1303                     , ("pmap_pv_demote_pde: missing spare"));
1304                 for (field = 0; field < _NPCM; field++) {
1305                         while (pc->pc_map[field]) {
1306                                 bit = cnttzd(pc->pc_map[field]);
1307                                 pc->pc_map[field] &= ~(1ul << bit);
1308                                 pv = &pc->pc_pventry[field * 64 + bit];
1309                                 va += PAGE_SIZE;
1310                                 pv->pv_va = va;
1311                                 m++;
1312                                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1313                             ("pmap_pv_demote_pde: page %p is not managed", m));
1314                                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1315
1316                                 m->md.pv_gen++;
1317                                 if (va == va_last)
1318                                         goto out;
1319                         }
1320                 }
1321                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1322                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1323         }
1324 out:
1325         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1326                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1327                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1328         }
1329         PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
1330         PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
1331 }
1332
1333 static void
1334 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap)
1335 {
1336
1337         if (pmap == NULL)
1338                 return;
1339         pmap_invalidate_all(pmap);
1340         if (pmap != locked_pmap)
1341                 PMAP_UNLOCK(pmap);
1342 }
1343
1344 /*
1345  * We are in a serious low memory condition.  Resort to
1346  * drastic measures to free some pages so we can allocate
1347  * another pv entry chunk.
1348  *
1349  * Returns NULL if PV entries were reclaimed from the specified pmap.
1350  *
1351  * We do not, however, unmap 2mpages because subsequent accesses will
1352  * allocate per-page pv entries until repromotion occurs, thereby
1353  * exacerbating the shortage of free pv entries.
1354  */
1355 static int active_reclaims = 0;
1356 static vm_page_t
1357 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1358 {
1359         struct pv_chunk *pc, *pc_marker, *pc_marker_end;
1360         struct pv_chunk_header pc_marker_b, pc_marker_end_b;
1361         struct md_page *pvh;
1362         pml3_entry_t *l3e;
1363         pmap_t next_pmap, pmap;
1364         pt_entry_t *pte, tpte;
1365         pv_entry_t pv;
1366         vm_offset_t va;
1367         vm_page_t m, m_pc;
1368         struct spglist free;
1369         uint64_t inuse;
1370         int bit, field, freed;
1371
1372         PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1373         KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
1374         pmap = NULL;
1375         m_pc = NULL;
1376         SLIST_INIT(&free);
1377         bzero(&pc_marker_b, sizeof(pc_marker_b));
1378         bzero(&pc_marker_end_b, sizeof(pc_marker_end_b));
1379         pc_marker = (struct pv_chunk *)&pc_marker_b;
1380         pc_marker_end = (struct pv_chunk *)&pc_marker_end_b;
1381
1382         mtx_lock(&pv_chunks_mutex);
1383         active_reclaims++;
1384         TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru);
1385         TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru);
1386         while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end &&
1387             SLIST_EMPTY(&free)) {
1388                 next_pmap = pc->pc_pmap;
1389                 if (next_pmap == NULL) {
1390                         /*
1391                          * The next chunk is a marker.  However, it is
1392                          * not our marker, so active_reclaims must be
1393                          * > 1.  Consequently, the next_chunk code
1394                          * will not rotate the pv_chunks list.
1395                          */
1396                         goto next_chunk;
1397                 }
1398                 mtx_unlock(&pv_chunks_mutex);
1399
1400                 /*
1401                  * A pv_chunk can only be removed from the pc_lru list
1402                  * when both pc_chunks_mutex is owned and the
1403                  * corresponding pmap is locked.
1404                  */
1405                 if (pmap != next_pmap) {
1406                         reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1407                         pmap = next_pmap;
1408                         /* Avoid deadlock and lock recursion. */
1409                         if (pmap > locked_pmap) {
1410                                 RELEASE_PV_LIST_LOCK(lockp);
1411                                 PMAP_LOCK(pmap);
1412                                 mtx_lock(&pv_chunks_mutex);
1413                                 continue;
1414                         } else if (pmap != locked_pmap) {
1415                                 if (PMAP_TRYLOCK(pmap)) {
1416                                         mtx_lock(&pv_chunks_mutex);
1417                                         continue;
1418                                 } else {
1419                                         pmap = NULL; /* pmap is not locked */
1420                                         mtx_lock(&pv_chunks_mutex);
1421                                         pc = TAILQ_NEXT(pc_marker, pc_lru);
1422                                         if (pc == NULL ||
1423                                             pc->pc_pmap != next_pmap)
1424                                                 continue;
1425                                         goto next_chunk;
1426                                 }
1427                         }
1428                 }
1429
1430                 /*
1431                  * Destroy every non-wired, 4 KB page mapping in the chunk.
1432                  */
1433                 freed = 0;
1434                 for (field = 0; field < _NPCM; field++) {
1435                         for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1436                             inuse != 0; inuse &= ~(1UL << bit)) {
1437                                 bit = cnttzd(inuse);
1438                                 pv = &pc->pc_pventry[field * 64 + bit];
1439                                 va = pv->pv_va;
1440                                 l3e = pmap_pml3e(pmap, va);
1441                                 if ((*l3e & RPTE_LEAF) != 0)
1442                                         continue;
1443                                 pte = pmap_l3e_to_pte(l3e, va);
1444                                 if ((*pte & PG_W) != 0)
1445                                         continue;
1446                                 tpte = pte_load_clear(pte);
1447                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
1448                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
1449                                         vm_page_dirty(m);
1450                                 if ((tpte & PG_A) != 0)
1451                                         vm_page_aflag_set(m, PGA_REFERENCED);
1452                                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1453                                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
1454
1455                                 m->md.pv_gen++;
1456                                 if (TAILQ_EMPTY(&m->md.pv_list) &&
1457                                     (m->flags & PG_FICTITIOUS) == 0) {
1458                                         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
1459                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
1460                                                 vm_page_aflag_clear(m,
1461                                                     PGA_WRITEABLE);
1462                                         }
1463                                 }
1464                                 pc->pc_map[field] |= 1UL << bit;
1465                                 pmap_unuse_pt(pmap, va, *l3e, &free);
1466                                 freed++;
1467                         }
1468                 }
1469                 if (freed == 0) {
1470                         mtx_lock(&pv_chunks_mutex);
1471                         goto next_chunk;
1472                 }
1473                 /* Every freed mapping is for a 4 KB page. */
1474                 pmap_resident_count_dec(pmap, freed);
1475                 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
1476                 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
1477                 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
1478                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1479                 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1) {
1480                         PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1481                         PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1482                         PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1483                         /* Entire chunk is free; return it. */
1484                         m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1485                         mtx_lock(&pv_chunks_mutex);
1486                         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1487                         break;
1488                 }
1489                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1490                 mtx_lock(&pv_chunks_mutex);
1491                 /* One freed pv entry in locked_pmap is sufficient. */
1492                 if (pmap == locked_pmap)
1493                         break;
1494 next_chunk:
1495                 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1496                 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru);
1497                 if (active_reclaims == 1 && pmap != NULL) {
1498                         /*
1499                          * Rotate the pv chunks list so that we do not
1500                          * scan the same pv chunks that could not be
1501                          * freed (because they contained a wired
1502                          * and/or superpage mapping) on every
1503                          * invocation of reclaim_pv_chunk().
1504                          */
1505                         while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) {
1506                                 MPASS(pc->pc_pmap != NULL);
1507                                 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1508                                 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1509                         }
1510                 }
1511         }
1512         TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru);
1513         TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru);
1514         active_reclaims--;
1515         mtx_unlock(&pv_chunks_mutex);
1516         reclaim_pv_chunk_leave_pmap(pmap, locked_pmap);
1517         if (m_pc == NULL && !SLIST_EMPTY(&free)) {
1518                 m_pc = SLIST_FIRST(&free);
1519                 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
1520                 /* Recycle a freed page table page. */
1521                 m_pc->ref_count = 1;
1522         }
1523         vm_page_free_pages_toq(&free, true);
1524         return (m_pc);
1525 }
1526
1527 /*
1528  * free the pv_entry back to the free list
1529  */
1530 static void
1531 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1532 {
1533         struct pv_chunk *pc;
1534         int idx, field, bit;
1535
1536 #ifdef VERBOSE_PV
1537         if (pmap != kernel_pmap)
1538                 printf("%s(%p, %p)\n", __func__, pmap, pv);
1539 #endif
1540         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1541         PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1542         PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1543         PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1544         pc = pv_to_chunk(pv);
1545         idx = pv - &pc->pc_pventry[0];
1546         field = idx / 64;
1547         bit = idx % 64;
1548         pc->pc_map[field] |= 1ul << bit;
1549         if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1) {
1550                 /* 98% of the time, pc is already at the head of the list. */
1551                 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1552                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1553                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1554                 }
1555                 return;
1556         }
1557         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1558         free_pv_chunk(pc);
1559 }
1560
1561 static void
1562 free_pv_chunk(struct pv_chunk *pc)
1563 {
1564         vm_page_t m;
1565
1566         mtx_lock(&pv_chunks_mutex);
1567         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1568         mtx_unlock(&pv_chunks_mutex);
1569         PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1570         PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1571         PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1572         /* entire chunk is free, return it */
1573         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1574         vm_page_unwire_noq(m);
1575         vm_page_free(m);
1576 }
1577
1578 /*
1579  * Returns a new PV entry, allocating a new PV chunk from the system when
1580  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
1581  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
1582  * returned.
1583  *
1584  * The given PV list lock may be released.
1585  */
1586 static pv_entry_t
1587 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1588 {
1589         int bit, field;
1590         pv_entry_t pv;
1591         struct pv_chunk *pc;
1592         vm_page_t m;
1593
1594         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1595         PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1596 retry:
1597         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1598         if (pc != NULL) {
1599                 for (field = 0; field < _NPCM; field++) {
1600                         if (pc->pc_map[field]) {
1601                                 bit = cnttzd(pc->pc_map[field]);
1602                                 break;
1603                         }
1604                 }
1605                 if (field < _NPCM) {
1606                         pv = &pc->pc_pventry[field * 64 + bit];
1607                         pc->pc_map[field] &= ~(1ul << bit);
1608                         /* If this was the last item, move it to tail */
1609                         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) {
1610                                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1611                                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1612                                     pc_list);
1613                         }
1614                         PV_STAT(atomic_add_long(&pv_entry_count, 1));
1615                         PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1616                         MPASS(PV_PMAP(pv) != NULL);
1617                         return (pv);
1618                 }
1619         }
1620         /* No free items, allocate another chunk */
1621         m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
1622             VM_ALLOC_WIRED);
1623         if (m == NULL) {
1624                 if (lockp == NULL) {
1625                         PV_STAT(pc_chunk_tryfail++);
1626                         return (NULL);
1627                 }
1628                 m = reclaim_pv_chunk(pmap, lockp);
1629                 if (m == NULL)
1630                         goto retry;
1631         }
1632         PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1633         PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1634         pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1635         pc->pc_pmap = pmap;
1636         pc->pc_map[0] = PC_FREE0 & ~1ul;        /* preallocated bit 0 */
1637         pc->pc_map[1] = PC_FREE1;
1638         mtx_lock(&pv_chunks_mutex);
1639         TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1640         mtx_unlock(&pv_chunks_mutex);
1641         pv = &pc->pc_pventry[0];
1642         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1643         PV_STAT(atomic_add_long(&pv_entry_count, 1));
1644         PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1645         MPASS(PV_PMAP(pv) != NULL);
1646         return (pv);
1647 }
1648
1649 #if VM_NRESERVLEVEL > 0
1650 /*
1651  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
1652  * replace the many pv entries for the 4KB page mappings by a single pv entry
1653  * for the 2MB page mapping.
1654  */
1655 static void
1656 pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1657     struct rwlock **lockp)
1658 {
1659         struct md_page *pvh;
1660         pv_entry_t pv;
1661         vm_offset_t va_last;
1662         vm_page_t m;
1663
1664         KASSERT((pa & L3_PAGE_MASK) == 0,
1665             ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
1666         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
1667
1668         /*
1669          * Transfer the first page's pv entry for this mapping to the 2mpage's
1670          * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
1671          * a transfer avoids the possibility that get_pv_entry() calls
1672          * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
1673          * mappings that is being promoted.
1674          */
1675         m = PHYS_TO_VM_PAGE(pa);
1676         va = trunc_2mpage(va);
1677         pv = pmap_pvh_remove(&m->md, pmap, va);
1678         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
1679         pvh = pa_to_pvh(pa);
1680         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
1681         pvh->pv_gen++;
1682         /* Free the remaining NPTEPG - 1 pv entries. */
1683         va_last = va + L3_PAGE_SIZE - PAGE_SIZE;
1684         do {
1685                 m++;
1686                 va += PAGE_SIZE;
1687                 pmap_pvh_free(&m->md, pmap, va);
1688         } while (va < va_last);
1689 }
1690 #endif /* VM_NRESERVLEVEL > 0 */
1691
1692 /*
1693  * First find and then destroy the pv entry for the specified pmap and virtual
1694  * address.  This operation can be performed on pv lists for either 4KB or 2MB
1695  * page mappings.
1696  */
1697 static void
1698 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1699 {
1700         pv_entry_t pv;
1701
1702         pv = pmap_pvh_remove(pvh, pmap, va);
1703         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
1704         free_pv_entry(pmap, pv);
1705 }
1706
1707 /*
1708  * Conditionally create the PV entry for a 4KB page mapping if the required
1709  * memory can be allocated without resorting to reclamation.
1710  */
1711 static boolean_t
1712 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1713     struct rwlock **lockp)
1714 {
1715         pv_entry_t pv;
1716
1717         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1718         /* Pass NULL instead of the lock pointer to disable reclamation. */
1719         if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1720                 pv->pv_va = va;
1721                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1722                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
1723                 m->md.pv_gen++;
1724                 return (TRUE);
1725         } else
1726                 return (FALSE);
1727 }
1728
1729 vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX];
1730 #ifdef INVARIANTS
1731 static void
1732 validate_addr(vm_paddr_t addr, vm_size_t size)
1733 {
1734         vm_paddr_t end = addr + size;
1735         bool found = false;
1736
1737         for (int i = 0; i < 2 * phys_avail_count; i += 2) {
1738                 if (addr >= phys_avail_debug[i] &&
1739                         end <= phys_avail_debug[i + 1]) {
1740                         found = true;
1741                         break;
1742                 }
1743         }
1744         KASSERT(found, ("%#lx-%#lx outside of initial phys_avail array",
1745                                         addr, end));
1746 }
1747 #else
1748 static void validate_addr(vm_paddr_t addr, vm_size_t size) {}
1749 #endif
1750 #define DMAP_PAGE_BITS (RPTE_VALID | RPTE_LEAF | RPTE_EAA_MASK | PG_M | PG_A)
1751
1752 static vm_paddr_t
1753 alloc_pt_page(void)
1754 {
1755         vm_paddr_t page;
1756
1757         page = allocpages(1);
1758         pagezero(PHYS_TO_DMAP(page));
1759         return (page);
1760 }
1761
1762 static void
1763 mmu_radix_dmap_range(vm_paddr_t start, vm_paddr_t end)
1764 {
1765         pt_entry_t *pte, pteval;
1766         vm_paddr_t page;
1767
1768         if (bootverbose)
1769                 printf("%s %lx -> %lx\n", __func__, start, end);
1770         while (start < end) {
1771                 pteval = start | DMAP_PAGE_BITS;
1772                 pte = pmap_pml1e(kernel_pmap, PHYS_TO_DMAP(start));
1773                 if ((*pte & RPTE_VALID) == 0) {
1774                         page = alloc_pt_page();
1775                         pde_store(pte, page);
1776                 }
1777                 pte = pmap_l1e_to_l2e(pte, PHYS_TO_DMAP(start));
1778                 if ((start & L2_PAGE_MASK) == 0 &&
1779                         end - start >= L2_PAGE_SIZE) {
1780                         start += L2_PAGE_SIZE;
1781                         goto done;
1782                 } else if ((*pte & RPTE_VALID) == 0) {
1783                         page = alloc_pt_page();
1784                         pde_store(pte, page);
1785                 }
1786
1787                 pte = pmap_l2e_to_l3e(pte, PHYS_TO_DMAP(start));
1788                 if ((start & L3_PAGE_MASK) == 0 &&
1789                         end - start >= L3_PAGE_SIZE) {
1790                         start += L3_PAGE_SIZE;
1791                         goto done;
1792                 } else if ((*pte & RPTE_VALID) == 0) {
1793                         page = alloc_pt_page();
1794                         pde_store(pte, page);
1795                 }
1796                 pte = pmap_l3e_to_pte(pte, PHYS_TO_DMAP(start));
1797                 start += PAGE_SIZE;
1798         done:
1799                 pte_store(pte, pteval);
1800         }
1801 }
1802
1803 static void
1804 mmu_radix_dmap_populate(vm_size_t hwphyssz)
1805 {
1806         vm_paddr_t start, end;
1807
1808         for (int i = 0; i < pregions_sz; i++) {
1809                 start = pregions[i].mr_start;
1810                 end = start + pregions[i].mr_size;
1811                 if (hwphyssz && start >= hwphyssz)
1812                         break;
1813                 if (hwphyssz && hwphyssz < end)
1814                         end = hwphyssz;
1815                 mmu_radix_dmap_range(start, end);
1816         }
1817 }
1818
1819 static void
1820 mmu_radix_setup_pagetables(vm_size_t hwphyssz)
1821 {
1822         vm_paddr_t ptpages, pages;
1823         pt_entry_t *pte;
1824         vm_paddr_t l1phys;
1825
1826         bzero(kernel_pmap, sizeof(struct pmap));
1827         PMAP_LOCK_INIT(kernel_pmap);
1828
1829         ptpages = allocpages(2);
1830         l1phys = moea64_bootstrap_alloc(RADIX_PGD_SIZE, RADIX_PGD_SIZE);
1831         validate_addr(l1phys, RADIX_PGD_SIZE);
1832         if (bootverbose)
1833                 printf("l1phys=%lx\n", l1phys);
1834         MPASS((l1phys & (RADIX_PGD_SIZE-1)) == 0);
1835         for (int i = 0; i < RADIX_PGD_SIZE/PAGE_SIZE; i++)
1836                 pagezero(PHYS_TO_DMAP(l1phys + i * PAGE_SIZE));
1837         kernel_pmap->pm_pml1 = (pml1_entry_t *)PHYS_TO_DMAP(l1phys);
1838
1839         mmu_radix_dmap_populate(hwphyssz);
1840
1841         /*
1842          * Create page tables for first 128MB of KVA
1843          */
1844         pages = ptpages;
1845         pte = pmap_pml1e(kernel_pmap, VM_MIN_KERNEL_ADDRESS);
1846         *pte = (pages | RPTE_VALID | RPTE_SHIFT);
1847         pages += PAGE_SIZE;
1848         pte = pmap_l1e_to_l2e(pte, VM_MIN_KERNEL_ADDRESS);
1849         *pte = (pages | RPTE_VALID | RPTE_SHIFT);
1850         pages += PAGE_SIZE;
1851         pte = pmap_l2e_to_l3e(pte, VM_MIN_KERNEL_ADDRESS);
1852         /*
1853          * the kernel page table pages need to be preserved in
1854          * phys_avail and not overlap with previous  allocations
1855          */
1856         pages = allocpages(nkpt);
1857         if (bootverbose) {
1858                 printf("phys_avail after dmap populate and nkpt allocation\n");
1859                 for (int j = 0; j < 2 * phys_avail_count; j+=2)
1860                         printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
1861                                    j, phys_avail[j], j + 1, phys_avail[j + 1]);
1862         }
1863         KPTphys = pages;
1864         for (int i = 0; i < nkpt; i++, pte++, pages += PAGE_SIZE)
1865                 *pte = (pages | RPTE_VALID | RPTE_SHIFT);
1866         kernel_vm_end = VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE;
1867         if (bootverbose)
1868                 printf("kernel_pmap pml1 %p\n", kernel_pmap->pm_pml1);
1869         /*
1870          * Add a physical memory segment (vm_phys_seg) corresponding to the
1871          * preallocated kernel page table pages so that vm_page structures
1872          * representing these pages will be created.  The vm_page structures
1873          * are required for promotion of the corresponding kernel virtual
1874          * addresses to superpage mappings.
1875          */
1876         vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
1877 }
1878
1879 static void
1880 mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end)
1881 {
1882         vm_paddr_t      kpstart, kpend;
1883         vm_size_t       physsz, hwphyssz;
1884         //uint64_t      l2virt;
1885         int             rm_pavail, proctab_size;
1886         int             i, j;
1887
1888         kpstart = start & ~DMAP_BASE_ADDRESS;
1889         kpend = end & ~DMAP_BASE_ADDRESS;
1890
1891         /* Get physical memory regions from firmware */
1892         mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
1893         CTR0(KTR_PMAP, "mmu_radix_early_bootstrap: physical memory");
1894
1895         if (2 * VM_PHYSSEG_MAX < regions_sz)
1896                 panic("mmu_radix_early_bootstrap: phys_avail too small");
1897
1898         if (bootverbose)
1899                 for (int i = 0; i < regions_sz; i++)
1900                         printf("regions[%d].mr_start=%lx regions[%d].mr_size=%lx\n",
1901                             i, regions[i].mr_start, i, regions[i].mr_size);
1902         /*
1903          * XXX workaround a simulator bug
1904          */
1905         for (int i = 0; i < regions_sz; i++)
1906                 if (regions[i].mr_start & PAGE_MASK) {
1907                         regions[i].mr_start += PAGE_MASK;
1908                         regions[i].mr_start &= ~PAGE_MASK;
1909                         regions[i].mr_size &= ~PAGE_MASK;
1910                 }
1911         if (bootverbose)
1912                 for (int i = 0; i < pregions_sz; i++)
1913                         printf("pregions[%d].mr_start=%lx pregions[%d].mr_size=%lx\n",
1914                             i, pregions[i].mr_start, i, pregions[i].mr_size);
1915
1916         phys_avail_count = 0;
1917         physsz = 0;
1918         hwphyssz = 0;
1919         TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1920         for (i = 0, j = 0; i < regions_sz; i++) {
1921                 if (bootverbose)
1922                         printf("regions[%d].mr_start=%016lx regions[%d].mr_size=%016lx\n",
1923                             i, regions[i].mr_start, i, regions[i].mr_size);
1924
1925                 if (regions[i].mr_size < PAGE_SIZE)
1926                         continue;
1927
1928                 if (hwphyssz != 0 &&
1929                     (physsz + regions[i].mr_size) >= hwphyssz) {
1930                         if (physsz < hwphyssz) {
1931                                 phys_avail[j] = regions[i].mr_start;
1932                                 phys_avail[j + 1] = regions[i].mr_start +
1933                                     (hwphyssz - physsz);
1934                                 physsz = hwphyssz;
1935                                 phys_avail_count++;
1936                                 dump_avail[j] = phys_avail[j];
1937                                 dump_avail[j + 1] = phys_avail[j + 1];
1938                         }
1939                         break;
1940                 }
1941                 phys_avail[j] = regions[i].mr_start;
1942                 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
1943                 dump_avail[j] = phys_avail[j];
1944                 dump_avail[j + 1] = phys_avail[j + 1];
1945
1946                 phys_avail_count++;
1947                 physsz += regions[i].mr_size;
1948                 j += 2;
1949         }
1950
1951         /* Check for overlap with the kernel and exception vectors */
1952         rm_pavail = 0;
1953         for (j = 0; j < 2 * phys_avail_count; j+=2) {
1954                 if (phys_avail[j] < EXC_LAST)
1955                         phys_avail[j] += EXC_LAST;
1956
1957                 if (phys_avail[j] >= kpstart &&
1958                     phys_avail[j + 1] <= kpend) {
1959                         phys_avail[j] = phys_avail[j + 1] = ~0;
1960                         rm_pavail++;
1961                         continue;
1962                 }
1963
1964                 if (kpstart >= phys_avail[j] &&
1965                     kpstart < phys_avail[j + 1]) {
1966                         if (kpend < phys_avail[j + 1]) {
1967                                 phys_avail[2 * phys_avail_count] =
1968                                     (kpend & ~PAGE_MASK) + PAGE_SIZE;
1969                                 phys_avail[2 * phys_avail_count + 1] =
1970                                     phys_avail[j + 1];
1971                                 phys_avail_count++;
1972                         }
1973
1974                         phys_avail[j + 1] = kpstart & ~PAGE_MASK;
1975                 }
1976
1977                 if (kpend >= phys_avail[j] &&
1978                     kpend < phys_avail[j + 1]) {
1979                         if (kpstart > phys_avail[j]) {
1980                                 phys_avail[2 * phys_avail_count] = phys_avail[j];
1981                                 phys_avail[2 * phys_avail_count + 1] =
1982                                     kpstart & ~PAGE_MASK;
1983                                 phys_avail_count++;
1984                         }
1985
1986                         phys_avail[j] = (kpend & ~PAGE_MASK) +
1987                             PAGE_SIZE;
1988                 }
1989         }
1990         qsort(phys_avail, 2 * phys_avail_count, sizeof(phys_avail[0]), pa_cmp);
1991         for (i = 0; i < 2 * phys_avail_count; i++)
1992                 phys_avail_debug[i] = phys_avail[i];
1993
1994         /* Remove physical available regions marked for removal (~0) */
1995         if (rm_pavail) {
1996                 phys_avail_count -= rm_pavail;
1997                 for (i = 2 * phys_avail_count;
1998                      i < 2*(phys_avail_count + rm_pavail); i+=2)
1999                         phys_avail[i] = phys_avail[i + 1] = 0;
2000         }
2001         if (bootverbose) {
2002                 printf("phys_avail ranges after filtering:\n");
2003                 for (j = 0; j < 2 * phys_avail_count; j+=2)
2004                         printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n",
2005                                    j, phys_avail[j], j + 1, phys_avail[j + 1]);
2006         }
2007         physmem = btoc(physsz);
2008
2009         /* XXX assume we're running non-virtualized and
2010          * we don't support BHYVE
2011          */
2012         if (isa3_pid_bits == 0)
2013                 isa3_pid_bits = 20;
2014         parttab_phys = moea64_bootstrap_alloc(PARTTAB_SIZE, PARTTAB_SIZE);
2015         validate_addr(parttab_phys, PARTTAB_SIZE);
2016         for (int i = 0; i < PARTTAB_SIZE/PAGE_SIZE; i++)
2017                 pagezero(PHYS_TO_DMAP(parttab_phys + i * PAGE_SIZE));
2018
2019         proctab_size = 1UL << PROCTAB_SIZE_SHIFT;
2020         proctab0pa = moea64_bootstrap_alloc(proctab_size, proctab_size);
2021         validate_addr(proctab0pa, proctab_size);
2022         for (int i = 0; i < proctab_size/PAGE_SIZE; i++)
2023                 pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE));
2024
2025         mmu_radix_setup_pagetables(hwphyssz);
2026 }
2027
2028 static void
2029 mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end)
2030 {
2031         int             i;
2032         vm_paddr_t      pa;
2033         void            *dpcpu;
2034         vm_offset_t va;
2035
2036         /*
2037          * Set up the Open Firmware pmap and add its mappings if not in real
2038          * mode.
2039          */
2040         if (bootverbose)
2041                 printf("%s enter\n", __func__);
2042
2043         /*
2044          * Calculate the last available physical address, and reserve the
2045          * vm_page_array (upper bound).
2046          */
2047         Maxmem = 0;
2048         for (i = 0; phys_avail[i + 2] != 0; i += 2)
2049                 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
2050
2051         /*
2052          * Set the start and end of kva.
2053          */
2054         virtual_avail = VM_MIN_KERNEL_ADDRESS;
2055         virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
2056
2057         /*
2058          * Remap any early IO mappings (console framebuffer, etc.)
2059          */
2060         bs_remap_earlyboot();
2061
2062         /*
2063          * Allocate a kernel stack with a guard page for thread0 and map it
2064          * into the kernel page map.
2065          */
2066         pa = allocpages(kstack_pages);
2067         va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
2068         virtual_avail = va + kstack_pages * PAGE_SIZE;
2069         CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
2070         thread0.td_kstack = va;
2071         for (i = 0; i < kstack_pages; i++) {
2072                 mmu_radix_kenter(va, pa);
2073                 pa += PAGE_SIZE;
2074                 va += PAGE_SIZE;
2075         }
2076         thread0.td_kstack_pages = kstack_pages;
2077
2078         /*
2079          * Allocate virtual address space for the message buffer.
2080          */
2081         pa = msgbuf_phys = allocpages((msgbufsize + PAGE_MASK)  >> PAGE_SHIFT);
2082         msgbufp = (struct msgbuf *)PHYS_TO_DMAP(pa);
2083
2084         /*
2085          * Allocate virtual address space for the dynamic percpu area.
2086          */
2087         pa = allocpages(DPCPU_SIZE >> PAGE_SHIFT);
2088         dpcpu = (void *)PHYS_TO_DMAP(pa);
2089         dpcpu_init(dpcpu, curcpu);
2090         /*
2091          * Reserve some special page table entries/VA space for temporary
2092          * mapping of pages.
2093          */
2094 }
2095
2096 static void
2097 mmu_parttab_init(void)
2098 {
2099         uint64_t ptcr;
2100
2101         isa3_parttab = (struct pate *)PHYS_TO_DMAP(parttab_phys);
2102
2103         if (bootverbose)
2104                 printf("%s parttab: %p\n", __func__, isa3_parttab);
2105         ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2106         if (bootverbose)
2107                 printf("setting ptcr %lx\n", ptcr);
2108         mtspr(SPR_PTCR, ptcr);
2109 }
2110
2111 static void
2112 mmu_parttab_update(uint64_t lpid, uint64_t pagetab, uint64_t proctab)
2113 {
2114         uint64_t prev;
2115
2116         if (bootverbose)
2117                 printf("%s isa3_parttab %p lpid %lx pagetab %lx proctab %lx\n", __func__, isa3_parttab,
2118                            lpid, pagetab, proctab);
2119         prev = be64toh(isa3_parttab[lpid].pagetab);
2120         isa3_parttab[lpid].pagetab = htobe64(pagetab);
2121         isa3_parttab[lpid].proctab = htobe64(proctab);
2122
2123         if (prev & PARTTAB_HR) {
2124                 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
2125                              "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2126                 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2127                              "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2128         } else {
2129                 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
2130                              "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
2131         }
2132         ttusync();
2133 }
2134
2135 static void
2136 mmu_radix_parttab_init(void)
2137 {
2138         uint64_t pagetab;
2139
2140         mmu_parttab_init();
2141         pagetab = RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | \
2142                          RADIX_PGD_INDEX_SHIFT | PARTTAB_HR;
2143         mmu_parttab_update(0, pagetab, 0);
2144 }
2145
2146 static void
2147 mmu_radix_proctab_register(vm_paddr_t proctabpa, uint64_t table_size)
2148 {
2149         uint64_t pagetab, proctab;
2150
2151         pagetab = be64toh(isa3_parttab[0].pagetab);
2152         proctab = proctabpa | table_size | PARTTAB_GR;
2153         mmu_parttab_update(0, pagetab, proctab);
2154 }
2155
2156 static void
2157 mmu_radix_proctab_init(void)
2158 {
2159
2160         isa3_base_pid = 1;
2161
2162         isa3_proctab = (void*)PHYS_TO_DMAP(proctab0pa);
2163         isa3_proctab->proctab0 =
2164             htobe64(RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) |
2165                 RADIX_PGD_INDEX_SHIFT);
2166
2167         mmu_radix_proctab_register(proctab0pa, PROCTAB_SIZE_SHIFT - 12);
2168
2169         __asm __volatile("ptesync" : : : "memory");
2170         __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
2171                      "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
2172         __asm __volatile("eieio; tlbsync; ptesync" : : : "memory");
2173         if (bootverbose)
2174                 printf("process table %p and kernel radix PDE: %p\n",
2175                            isa3_proctab, kernel_pmap->pm_pml1);
2176         mtmsr(mfmsr() | PSL_DR );
2177         mtmsr(mfmsr() &  ~PSL_DR);
2178         kernel_pmap->pm_pid = isa3_base_pid;
2179         isa3_base_pid++;
2180 }
2181
2182 void
2183 mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
2184     int advice)
2185 {
2186         struct rwlock *lock;
2187         pml1_entry_t *l1e;
2188         pml2_entry_t *l2e;
2189         pml3_entry_t oldl3e, *l3e;
2190         pt_entry_t *pte;
2191         vm_offset_t va, va_next;
2192         vm_page_t m;
2193         boolean_t anychanged;
2194
2195         if (advice != MADV_DONTNEED && advice != MADV_FREE)
2196                 return;
2197         anychanged = FALSE;
2198         PMAP_LOCK(pmap);
2199         for (; sva < eva; sva = va_next) {
2200                 l1e = pmap_pml1e(pmap, sva);
2201                 if ((*l1e & PG_V) == 0) {
2202                         va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2203                         if (va_next < sva)
2204                                 va_next = eva;
2205                         continue;
2206                 }
2207                 l2e = pmap_l1e_to_l2e(l1e, sva);
2208                 if ((*l2e & PG_V) == 0) {
2209                         va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2210                         if (va_next < sva)
2211                                 va_next = eva;
2212                         continue;
2213                 }
2214                 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2215                 if (va_next < sva)
2216                         va_next = eva;
2217                 l3e = pmap_l2e_to_l3e(l2e, sva);
2218                 oldl3e = *l3e;
2219                 if ((oldl3e & PG_V) == 0)
2220                         continue;
2221                 else if ((oldl3e & RPTE_LEAF) != 0) {
2222                         if ((oldl3e & PG_MANAGED) == 0)
2223                                 continue;
2224                         lock = NULL;
2225                         if (!pmap_demote_l3e_locked(pmap, l3e, sva, &lock)) {
2226                                 if (lock != NULL)
2227                                         rw_wunlock(lock);
2228
2229                                 /*
2230                                  * The large page mapping was destroyed.
2231                                  */
2232                                 continue;
2233                         }
2234
2235                         /*
2236                          * Unless the page mappings are wired, remove the
2237                          * mapping to a single page so that a subsequent
2238                          * access may repromote.  Since the underlying page
2239                          * table page is fully populated, this removal never
2240                          * frees a page table page.
2241                          */
2242                         if ((oldl3e & PG_W) == 0) {
2243                                 pte = pmap_l3e_to_pte(l3e, sva);
2244                                 KASSERT((*pte & PG_V) != 0,
2245                                     ("pmap_advise: invalid PTE"));
2246                                 pmap_remove_pte(pmap, pte, sva, *l3e, NULL,
2247                                     &lock);
2248                                 anychanged = TRUE;
2249                         }
2250                         if (lock != NULL)
2251                                 rw_wunlock(lock);
2252                 }
2253                 if (va_next > eva)
2254                         va_next = eva;
2255                 va = va_next;
2256                 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next;
2257                          pte++, sva += PAGE_SIZE) {
2258                         MPASS(pte == pmap_pte(pmap, sva));
2259
2260                         if ((*pte & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V))
2261                                 goto maybe_invlrng;
2262                         else if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2263                                 if (advice == MADV_DONTNEED) {
2264                                         /*
2265                                          * Future calls to pmap_is_modified()
2266                                          * can be avoided by making the page
2267                                          * dirty now.
2268                                          */
2269                                         m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
2270                                         vm_page_dirty(m);
2271                                 }
2272                                 atomic_clear_long(pte, PG_M | PG_A);
2273                         } else if ((*pte & PG_A) != 0)
2274                                 atomic_clear_long(pte, PG_A);
2275                         else
2276                                 goto maybe_invlrng;
2277                         anychanged = TRUE;
2278                         continue;
2279 maybe_invlrng:
2280                         if (va != va_next) {
2281                                 anychanged = true;
2282                                 va = va_next;
2283                         }
2284                 }
2285                 if (va != va_next)
2286                         anychanged = true;
2287         }
2288         if (anychanged)
2289                 pmap_invalidate_all(pmap);
2290         PMAP_UNLOCK(pmap);
2291 }
2292
2293 /*
2294  * Routines used in machine-dependent code
2295  */
2296 static void
2297 mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end)
2298 {
2299         uint64_t lpcr;
2300
2301         if (bootverbose)
2302                 printf("%s\n", __func__);
2303         hw_direct_map = 1;
2304         mmu_radix_early_bootstrap(start, end);
2305         if (bootverbose)
2306                 printf("early bootstrap complete\n");
2307         if (powernv_enabled) {
2308                 lpcr = mfspr(SPR_LPCR);
2309                 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2310                 mmu_radix_parttab_init();
2311                 mmu_radix_init_amor();
2312                 if (bootverbose)
2313                         printf("powernv init complete\n");
2314         }
2315         mmu_radix_init_iamr();
2316         mmu_radix_proctab_init();
2317         mmu_radix_pid_set(kernel_pmap);
2318         /* XXX assume CPU_FTR_HVMODE */
2319         mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2320
2321         mmu_radix_late_bootstrap(start, end);
2322         numa_mem_regions(&numa_pregions, &numa_pregions_sz);
2323         if (bootverbose)
2324                 printf("%s done\n", __func__);
2325         pmap_bootstrapped = 1;
2326         dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE);
2327         PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS);
2328 }
2329
2330 static void
2331 mmu_radix_cpu_bootstrap(int ap)
2332 {
2333         uint64_t lpcr;
2334         uint64_t ptcr;
2335
2336         if (powernv_enabled) {
2337                 lpcr = mfspr(SPR_LPCR);
2338                 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2339
2340                 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12);
2341                 mtspr(SPR_PTCR, ptcr);
2342                 mmu_radix_init_amor();
2343         }
2344         mmu_radix_init_iamr();
2345         mmu_radix_pid_set(kernel_pmap);
2346         mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
2347 }
2348
2349 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3e, CTLFLAG_RD, 0,
2350     "2MB page mapping counters");
2351
2352 static u_long pmap_l3e_demotions;
2353 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, demotions, CTLFLAG_RD,
2354     &pmap_l3e_demotions, 0, "2MB page demotions");
2355
2356 static u_long pmap_l3e_mappings;
2357 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, mappings, CTLFLAG_RD,
2358     &pmap_l3e_mappings, 0, "2MB page mappings");
2359
2360 static u_long pmap_l3e_p_failures;
2361 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, p_failures, CTLFLAG_RD,
2362     &pmap_l3e_p_failures, 0, "2MB page promotion failures");
2363
2364 static u_long pmap_l3e_promotions;
2365 SYSCTL_ULONG(_vm_pmap_l3e, OID_AUTO, promotions, CTLFLAG_RD,
2366     &pmap_l3e_promotions, 0, "2MB page promotions");
2367
2368 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0,
2369     "1GB page mapping counters");
2370
2371 static u_long pmap_l2e_demotions;
2372 SYSCTL_ULONG(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD,
2373     &pmap_l2e_demotions, 0, "1GB page demotions");
2374
2375 void
2376 mmu_radix_clear_modify(vm_page_t m)
2377 {
2378         struct md_page *pvh;
2379         pmap_t pmap;
2380         pv_entry_t next_pv, pv;
2381         pml3_entry_t oldl3e, *l3e;
2382         pt_entry_t oldpte, *pte;
2383         struct rwlock *lock;
2384         vm_offset_t va;
2385         int md_gen, pvh_gen;
2386
2387         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2388             ("pmap_clear_modify: page %p is not managed", m));
2389         vm_page_assert_busied(m);
2390         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
2391
2392         /*
2393          * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
2394          * If the object containing the page is locked and the page is not
2395          * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
2396          */
2397         if ((m->a.flags & PGA_WRITEABLE) == 0)
2398                 return;
2399         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2400             pa_to_pvh(VM_PAGE_TO_PHYS(m));
2401         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2402         rw_wlock(lock);
2403 restart:
2404         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
2405                 pmap = PV_PMAP(pv);
2406                 if (!PMAP_TRYLOCK(pmap)) {
2407                         pvh_gen = pvh->pv_gen;
2408                         rw_wunlock(lock);
2409                         PMAP_LOCK(pmap);
2410                         rw_wlock(lock);
2411                         if (pvh_gen != pvh->pv_gen) {
2412                                 PMAP_UNLOCK(pmap);
2413                                 goto restart;
2414                         }
2415                 }
2416                 va = pv->pv_va;
2417                 l3e = pmap_pml3e(pmap, va);
2418                 oldl3e = *l3e;
2419                 if ((oldl3e & PG_RW) != 0) {
2420                         if (pmap_demote_l3e_locked(pmap, l3e, va, &lock)) {
2421                                 if ((oldl3e & PG_W) == 0) {
2422                                         /*
2423                                          * Write protect the mapping to a
2424                                          * single page so that a subsequent
2425                                          * write access may repromote.
2426                                          */
2427                                         va += VM_PAGE_TO_PHYS(m) - (oldl3e &
2428                                             PG_PS_FRAME);
2429                                         pte = pmap_l3e_to_pte(l3e, va);
2430                                         oldpte = *pte;
2431                                         if ((oldpte & PG_V) != 0) {
2432                                                 while (!atomic_cmpset_long(pte,
2433                                                     oldpte,
2434                                                         (oldpte | RPTE_EAA_R) & ~(PG_M | PG_RW)))
2435                                                            oldpte = *pte;
2436                                                 vm_page_dirty(m);
2437                                                 pmap_invalidate_page(pmap, va);
2438                                         }
2439                                 }
2440                         }
2441                 }
2442                 PMAP_UNLOCK(pmap);
2443         }
2444         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2445                 pmap = PV_PMAP(pv);
2446                 if (!PMAP_TRYLOCK(pmap)) {
2447                         md_gen = m->md.pv_gen;
2448                         pvh_gen = pvh->pv_gen;
2449                         rw_wunlock(lock);
2450                         PMAP_LOCK(pmap);
2451                         rw_wlock(lock);
2452                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2453                                 PMAP_UNLOCK(pmap);
2454                                 goto restart;
2455                         }
2456                 }
2457                 l3e = pmap_pml3e(pmap, pv->pv_va);
2458                 KASSERT((*l3e & RPTE_LEAF) == 0, ("pmap_clear_modify: found"
2459                     " a 2mpage in page %p's pv list", m));
2460                 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
2461                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2462                         atomic_clear_long(pte, PG_M);
2463                         pmap_invalidate_page(pmap, pv->pv_va);
2464                 }
2465                 PMAP_UNLOCK(pmap);
2466         }
2467         rw_wunlock(lock);
2468 }
2469
2470 void
2471 mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2472     vm_size_t len, vm_offset_t src_addr)
2473 {
2474         struct rwlock *lock;
2475         struct spglist free;
2476         vm_offset_t addr;
2477         vm_offset_t end_addr = src_addr + len;
2478         vm_offset_t va_next;
2479         vm_page_t dst_pdpg, dstmpte, srcmpte;
2480         bool invalidate_all;
2481
2482         CTR6(KTR_PMAP,
2483             "%s(dst_pmap=%p, src_pmap=%p, dst_addr=%lx, len=%lu, src_addr=%lx)\n",
2484             __func__, dst_pmap, src_pmap, dst_addr, len, src_addr);
2485
2486         if (dst_addr != src_addr)
2487                 return;
2488         lock = NULL;
2489         invalidate_all = false;
2490         if (dst_pmap < src_pmap) {
2491                 PMAP_LOCK(dst_pmap);
2492                 PMAP_LOCK(src_pmap);
2493         } else {
2494                 PMAP_LOCK(src_pmap);
2495                 PMAP_LOCK(dst_pmap);
2496         }
2497
2498         for (addr = src_addr; addr < end_addr; addr = va_next) {
2499                 pml1_entry_t *l1e;
2500                 pml2_entry_t *l2e;
2501                 pml3_entry_t srcptepaddr, *l3e;
2502                 pt_entry_t *src_pte, *dst_pte;
2503
2504                 l1e = pmap_pml1e(src_pmap, addr);
2505                 if ((*l1e & PG_V) == 0) {
2506                         va_next = (addr + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
2507                         if (va_next < addr)
2508                                 va_next = end_addr;
2509                         continue;
2510                 }
2511
2512                 l2e = pmap_l1e_to_l2e(l1e, addr);
2513                 if ((*l2e & PG_V) == 0) {
2514                         va_next = (addr + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
2515                         if (va_next < addr)
2516                                 va_next = end_addr;
2517                         continue;
2518                 }
2519
2520                 va_next = (addr + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
2521                 if (va_next < addr)
2522                         va_next = end_addr;
2523
2524                 l3e = pmap_l2e_to_l3e(l2e, addr);
2525                 srcptepaddr = *l3e;
2526                 if (srcptepaddr == 0)
2527                         continue;
2528
2529                 if (srcptepaddr & RPTE_LEAF) {
2530                         if ((addr & L3_PAGE_MASK) != 0 ||
2531                             addr + L3_PAGE_SIZE > end_addr)
2532                                 continue;
2533                         dst_pdpg = pmap_allocl3e(dst_pmap, addr, NULL);
2534                         if (dst_pdpg == NULL)
2535                                 break;
2536                         l3e = (pml3_entry_t *)
2537                             PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
2538                         l3e = &l3e[pmap_pml3e_index(addr)];
2539                         if (*l3e == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
2540                             pmap_pv_insert_l3e(dst_pmap, addr, srcptepaddr,
2541                             PMAP_ENTER_NORECLAIM, &lock))) {
2542                                 *l3e = srcptepaddr & ~PG_W;
2543                                 pmap_resident_count_inc(dst_pmap,
2544                                     L3_PAGE_SIZE / PAGE_SIZE);
2545                                 atomic_add_long(&pmap_l3e_mappings, 1);
2546                         } else
2547                                 dst_pdpg->ref_count--;
2548                         continue;
2549                 }
2550
2551                 srcptepaddr &= PG_FRAME;
2552                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
2553                 KASSERT(srcmpte->ref_count > 0,
2554                     ("pmap_copy: source page table page is unused"));
2555
2556                 if (va_next > end_addr)
2557                         va_next = end_addr;
2558
2559                 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
2560                 src_pte = &src_pte[pmap_pte_index(addr)];
2561                 dstmpte = NULL;
2562                 while (addr < va_next) {
2563                         pt_entry_t ptetemp;
2564                         ptetemp = *src_pte;
2565                         /*
2566                          * we only virtual copy managed pages
2567                          */
2568                         if ((ptetemp & PG_MANAGED) != 0) {
2569                                 if (dstmpte != NULL &&
2570                                     dstmpte->pindex == pmap_l3e_pindex(addr))
2571                                         dstmpte->ref_count++;
2572                                 else if ((dstmpte = pmap_allocpte(dst_pmap,
2573                                     addr, NULL)) == NULL)
2574                                         goto out;
2575                                 dst_pte = (pt_entry_t *)
2576                                     PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
2577                                 dst_pte = &dst_pte[pmap_pte_index(addr)];
2578                                 if (*dst_pte == 0 &&
2579                                     pmap_try_insert_pv_entry(dst_pmap, addr,
2580                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
2581                                     &lock)) {
2582                                         /*
2583                                          * Clear the wired, modified, and
2584                                          * accessed (referenced) bits
2585                                          * during the copy.
2586                                          */
2587                                         *dst_pte = ptetemp & ~(PG_W | PG_M |
2588                                             PG_A);
2589                                         pmap_resident_count_inc(dst_pmap, 1);
2590                                 } else {
2591                                         SLIST_INIT(&free);
2592                                         if (pmap_unwire_ptp(dst_pmap, addr,
2593                                             dstmpte, &free)) {
2594                                                 /*
2595                                                  * Although "addr" is not
2596                                                  * mapped, paging-structure
2597                                                  * caches could nonetheless
2598                                                  * have entries that refer to
2599                                                  * the freed page table pages.
2600                                                  * Invalidate those entries.
2601                                                  */
2602                                                 invalidate_all = true;
2603                                                 vm_page_free_pages_toq(&free,
2604                                                     true);
2605                                         }
2606                                         goto out;
2607                                 }
2608                                 if (dstmpte->ref_count >= srcmpte->ref_count)
2609                                         break;
2610                         }
2611                         addr += PAGE_SIZE;
2612                         if (__predict_false((addr & L3_PAGE_MASK) == 0))
2613                                 src_pte = pmap_pte(src_pmap, addr);
2614                         else
2615                                 src_pte++;
2616                 }
2617         }
2618 out:
2619         if (invalidate_all)
2620                 pmap_invalidate_all(dst_pmap);
2621         if (lock != NULL)
2622                 rw_wunlock(lock);
2623         PMAP_UNLOCK(src_pmap);
2624         PMAP_UNLOCK(dst_pmap);
2625 }
2626
2627 static void
2628 mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst)
2629 {
2630         vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2631         vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2632
2633         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
2634         /*
2635          * XXX slow
2636          */
2637         bcopy((void *)src, (void *)dst, PAGE_SIZE);
2638 }
2639
2640 static void
2641 mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2642     vm_offset_t b_offset, int xfersize)
2643 {
2644
2645         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
2646             a_offset, mb, b_offset, xfersize);
2647         UNIMPLEMENTED();
2648 }
2649
2650 #if VM_NRESERVLEVEL > 0
2651 /*
2652  * Tries to promote the 512, contiguous 4KB page mappings that are within a
2653  * single page table page (PTP) to a single 2MB page mapping.  For promotion
2654  * to occur, two conditions must be met: (1) the 4KB page mappings must map
2655  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
2656  * identical characteristics.
2657  */
2658 static int
2659 pmap_promote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va,
2660     struct rwlock **lockp)
2661 {
2662         pml3_entry_t newpde;
2663         pt_entry_t *firstpte, oldpte, pa, *pte;
2664         vm_page_t mpte;
2665
2666         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2667
2668         /*
2669          * Examine the first PTE in the specified PTP.  Abort if this PTE is
2670          * either invalid, unused, or does not map the first 4KB physical page
2671          * within a 2MB page.
2672          */
2673         firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
2674 setpde:
2675         newpde = *firstpte;
2676         if ((newpde & ((PG_FRAME & L3_PAGE_MASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
2677                 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2678                     " in pmap %p", va, pmap);
2679                 goto fail;
2680         }
2681         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
2682                 /*
2683                  * When PG_M is already clear, PG_RW can be cleared without
2684                  * a TLB invalidation.
2685                  */
2686                 if (!atomic_cmpset_long(firstpte, newpde, (newpde | RPTE_EAA_R) & ~RPTE_EAA_W))
2687                         goto setpde;
2688                 newpde &= ~RPTE_EAA_W;
2689         }
2690
2691         /*
2692          * Examine each of the other PTEs in the specified PTP.  Abort if this
2693          * PTE maps an unexpected 4KB physical page or does not have identical
2694          * characteristics to the first PTE.
2695          */
2696         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + L3_PAGE_SIZE - PAGE_SIZE;
2697         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
2698 setpte:
2699                 oldpte = *pte;
2700                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
2701                         CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2702                             " in pmap %p", va, pmap);
2703                         goto fail;
2704                 }
2705                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
2706                         /*
2707                          * When PG_M is already clear, PG_RW can be cleared
2708                          * without a TLB invalidation.
2709                          */
2710                         if (!atomic_cmpset_long(pte, oldpte, (oldpte | RPTE_EAA_R) & ~RPTE_EAA_W))
2711                                 goto setpte;
2712                         oldpte &= ~RPTE_EAA_W;
2713                         CTR2(KTR_PMAP, "pmap_promote_l3e: protect for va %#lx"
2714                             " in pmap %p", (oldpte & PG_FRAME & L3_PAGE_MASK) |
2715                             (va & ~L3_PAGE_MASK), pmap);
2716                 }
2717                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
2718                         CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx"
2719                             " in pmap %p", va, pmap);
2720                         goto fail;
2721                 }
2722                 pa -= PAGE_SIZE;
2723         }
2724
2725         /*
2726          * Save the page table page in its current state until the PDE
2727          * mapping the superpage is demoted by pmap_demote_pde() or
2728          * destroyed by pmap_remove_pde().
2729          */
2730         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
2731         KASSERT(mpte >= vm_page_array &&
2732             mpte < &vm_page_array[vm_page_array_size],
2733             ("pmap_promote_l3e: page table page is out of range"));
2734         KASSERT(mpte->pindex == pmap_l3e_pindex(va),
2735             ("pmap_promote_l3e: page table page's pindex is wrong"));
2736         if (pmap_insert_pt_page(pmap, mpte)) {
2737                 CTR2(KTR_PMAP,
2738                     "pmap_promote_l3e: failure for va %#lx in pmap %p", va,
2739                     pmap);
2740                 goto fail;
2741         }
2742
2743         /*
2744          * Promote the pv entries.
2745          */
2746         if ((newpde & PG_MANAGED) != 0)
2747                 pmap_pv_promote_l3e(pmap, va, newpde & PG_PS_FRAME, lockp);
2748
2749         pte_store(pde, PG_PROMOTED | newpde);
2750         atomic_add_long(&pmap_l3e_promotions, 1);
2751         CTR2(KTR_PMAP, "pmap_promote_l3e: success for va %#lx"
2752             " in pmap %p", va, pmap);
2753         return (0);
2754  fail:
2755         atomic_add_long(&pmap_l3e_p_failures, 1);
2756         return (KERN_FAILURE);
2757 }
2758 #endif /* VM_NRESERVLEVEL > 0 */
2759
2760 int
2761 mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
2762     vm_prot_t prot, u_int flags, int8_t psind)
2763 {
2764         struct rwlock *lock;
2765         pml3_entry_t *l3e;
2766         pt_entry_t *pte;
2767         pt_entry_t newpte, origpte;
2768         pv_entry_t pv;
2769         vm_paddr_t opa, pa;
2770         vm_page_t mpte, om;
2771         int rv, retrycount;
2772         boolean_t nosleep, invalidate_all, invalidate_page;
2773
2774         va = trunc_page(va);
2775         retrycount = 0;
2776         invalidate_page = invalidate_all = false;
2777         CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va,
2778             m, prot, flags, psind);
2779         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
2780         KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
2781             va >= kmi.clean_eva,
2782             ("pmap_enter: managed mapping within the clean submap"));
2783         if ((m->oflags & VPO_UNMANAGED) == 0)
2784                 VM_PAGE_OBJECT_BUSY_ASSERT(m);
2785
2786         KASSERT((flags & PMAP_ENTER_RESERVED) == 0,
2787             ("pmap_enter: flags %u has reserved bits set", flags));
2788         pa = VM_PAGE_TO_PHYS(m);
2789         newpte = (pt_entry_t)(pa | PG_A | PG_V | RPTE_LEAF);
2790         if ((flags & VM_PROT_WRITE) != 0)
2791                 newpte |= PG_M;
2792         if ((flags & VM_PROT_READ) != 0)
2793                 newpte |= PG_A;
2794         if (prot & VM_PROT_READ)
2795                 newpte |= RPTE_EAA_R;
2796         if ((prot & VM_PROT_WRITE) != 0)
2797                 newpte |= RPTE_EAA_W;
2798         KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
2799             ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
2800
2801         if (prot & VM_PROT_EXECUTE)
2802                 newpte |= PG_X;
2803         if ((flags & PMAP_ENTER_WIRED) != 0)
2804                 newpte |= PG_W;
2805         if (va >= DMAP_MIN_ADDRESS)
2806                 newpte |= RPTE_EAA_P;
2807         newpte |= pmap_cache_bits(m->md.mdpg_cache_attrs);
2808         /*
2809          * Set modified bit gratuitously for writeable mappings if
2810          * the page is unmanaged. We do not want to take a fault
2811          * to do the dirty bit accounting for these mappings.
2812          */
2813         if ((m->oflags & VPO_UNMANAGED) != 0) {
2814                 if ((newpte & PG_RW) != 0)
2815                         newpte |= PG_M;
2816         } else
2817                 newpte |= PG_MANAGED;
2818
2819         lock = NULL;
2820         PMAP_LOCK(pmap);
2821         if (psind == 1) {
2822                 /* Assert the required virtual and physical alignment. */
2823                 KASSERT((va & L3_PAGE_MASK) == 0, ("pmap_enter: va unaligned"));
2824                 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind"));
2825                 rv = pmap_enter_l3e(pmap, va, newpte | RPTE_LEAF, flags, m, &lock);
2826                 goto out;
2827         }
2828         mpte = NULL;
2829
2830         /*
2831          * In the case that a page table page is not
2832          * resident, we are creating it here.
2833          */
2834 retry:
2835         l3e = pmap_pml3e(pmap, va);
2836         if (l3e != NULL && (*l3e & PG_V) != 0 && ((*l3e & RPTE_LEAF) == 0 ||
2837             pmap_demote_l3e_locked(pmap, l3e, va, &lock))) {
2838                 pte = pmap_l3e_to_pte(l3e, va);
2839                 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
2840                         mpte = PHYS_TO_VM_PAGE(*l3e & PG_FRAME);
2841                         mpte->ref_count++;
2842                 }
2843         } else if (va < VM_MAXUSER_ADDRESS) {
2844                 /*
2845                  * Here if the pte page isn't mapped, or if it has been
2846                  * deallocated.
2847                  */
2848                 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2849                 mpte = _pmap_allocpte(pmap, pmap_l3e_pindex(va),
2850                     nosleep ? NULL : &lock);
2851                 if (mpte == NULL && nosleep) {
2852                         rv = KERN_RESOURCE_SHORTAGE;
2853                         goto out;
2854                 }
2855                 if (__predict_false(retrycount++ == 6))
2856                         panic("too many retries");
2857                 invalidate_all = true;
2858                 goto retry;
2859         } else
2860                 panic("pmap_enter: invalid page directory va=%#lx", va);
2861
2862         origpte = *pte;
2863         pv = NULL;
2864
2865         /*
2866          * Is the specified virtual address already mapped?
2867          */
2868         if ((origpte & PG_V) != 0) {
2869 #ifdef INVARIANTS
2870                 if (VERBOSE_PMAP || pmap_logging) {
2871                         printf("cow fault pmap_enter(%p, %#lx, %p, %#x, %x, %d) --"
2872                             " asid=%lu curpid=%d name=%s origpte0x%lx\n",
2873                             pmap, va, m, prot, flags, psind, pmap->pm_pid,
2874                             curproc->p_pid, curproc->p_comm, origpte);
2875                         pmap_pte_walk(pmap->pm_pml1, va);
2876                 }
2877 #endif
2878                 /*
2879                  * Wiring change, just update stats. We don't worry about
2880                  * wiring PT pages as they remain resident as long as there
2881                  * are valid mappings in them. Hence, if a user page is wired,
2882                  * the PT page will be also.
2883                  */
2884                 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
2885                         pmap->pm_stats.wired_count++;
2886                 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
2887                         pmap->pm_stats.wired_count--;
2888
2889                 /*
2890                  * Remove the extra PT page reference.
2891                  */
2892                 if (mpte != NULL) {
2893                         mpte->ref_count--;
2894                         KASSERT(mpte->ref_count > 0,
2895                             ("pmap_enter: missing reference to page table page,"
2896                              " va: 0x%lx", va));
2897                 }
2898
2899                 /*
2900                  * Has the physical page changed?
2901                  */
2902                 opa = origpte & PG_FRAME;
2903                 if (opa == pa) {
2904                         /*
2905                          * No, might be a protection or wiring change.
2906                          */
2907                         if ((origpte & PG_MANAGED) != 0 &&
2908                             (newpte & PG_RW) != 0)
2909                                 vm_page_aflag_set(m, PGA_WRITEABLE);
2910                         if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) {
2911                                 if ((newpte & (PG_A|PG_M)) != (origpte & (PG_A|PG_M))) {
2912                                         if (!atomic_cmpset_long(pte, origpte, newpte))
2913                                                 goto retry;
2914                                         if ((newpte & PG_M) != (origpte & PG_M))
2915                                                 vm_page_dirty(m);
2916                                         if ((newpte & PG_A) != (origpte & PG_A))
2917                                                 vm_page_aflag_set(m, PGA_REFERENCED);
2918                                         ptesync();
2919                                 } else
2920                                         invalidate_all = true;
2921                                 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
2922                                         goto unchanged;
2923                         }
2924                         goto validate;
2925                 }
2926
2927                 /*
2928                  * The physical page has changed.  Temporarily invalidate
2929                  * the mapping.  This ensures that all threads sharing the
2930                  * pmap keep a consistent view of the mapping, which is
2931                  * necessary for the correct handling of COW faults.  It
2932                  * also permits reuse of the old mapping's PV entry,
2933                  * avoiding an allocation.
2934                  *
2935                  * For consistency, handle unmanaged mappings the same way.
2936                  */
2937                 origpte = pte_load_clear(pte);
2938                 KASSERT((origpte & PG_FRAME) == opa,
2939                     ("pmap_enter: unexpected pa update for %#lx", va));
2940                 if ((origpte & PG_MANAGED) != 0) {
2941                         om = PHYS_TO_VM_PAGE(opa);
2942
2943                         /*
2944                          * The pmap lock is sufficient to synchronize with
2945                          * concurrent calls to pmap_page_test_mappings() and
2946                          * pmap_ts_referenced().
2947                          */
2948                         if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2949                                 vm_page_dirty(om);
2950                         if ((origpte & PG_A) != 0)
2951                                 vm_page_aflag_set(om, PGA_REFERENCED);
2952                         CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
2953                         pv = pmap_pvh_remove(&om->md, pmap, va);
2954                         if ((newpte & PG_MANAGED) == 0)
2955                                 free_pv_entry(pmap, pv);
2956 #ifdef INVARIANTS
2957                         else if (origpte & PG_MANAGED) {
2958                                 if (pv == NULL) {
2959                                         pmap_page_print_mappings(om);
2960                                         MPASS(pv != NULL);
2961                                 }
2962                         }
2963 #endif
2964                         if ((om->a.flags & PGA_WRITEABLE) != 0 &&
2965                             TAILQ_EMPTY(&om->md.pv_list) &&
2966                             ((om->flags & PG_FICTITIOUS) != 0 ||
2967                             TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
2968                                 vm_page_aflag_clear(om, PGA_WRITEABLE);
2969                 }
2970                 if ((origpte & PG_A) != 0)
2971                         invalidate_page = true;
2972                 origpte = 0;
2973         } else {
2974                 if (pmap != kernel_pmap) {
2975 #ifdef INVARIANTS
2976                         if (VERBOSE_PMAP || pmap_logging)
2977                                 printf("pmap_enter(%p, %#lx, %p, %#x, %x, %d) -- asid=%lu curpid=%d name=%s\n",
2978                                     pmap, va, m, prot, flags, psind,
2979                                     pmap->pm_pid, curproc->p_pid,
2980                                     curproc->p_comm);
2981 #endif
2982                 }
2983
2984                 /*
2985                  * Increment the counters.
2986                  */
2987                 if ((newpte & PG_W) != 0)
2988                         pmap->pm_stats.wired_count++;
2989                 pmap_resident_count_inc(pmap, 1);
2990         }
2991
2992         /*
2993          * Enter on the PV list if part of our managed memory.
2994          */
2995         if ((newpte & PG_MANAGED) != 0) {
2996                 if (pv == NULL) {
2997                         pv = get_pv_entry(pmap, &lock);
2998                         pv->pv_va = va;
2999                 }
3000 #ifdef VERBOSE_PV
3001                 else
3002                         printf("reassigning pv: %p to pmap: %p\n",
3003                                    pv, pmap);
3004 #endif
3005                 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
3006                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3007                 m->md.pv_gen++;
3008                 if ((newpte & PG_RW) != 0)
3009                         vm_page_aflag_set(m, PGA_WRITEABLE);
3010         }
3011
3012         /*
3013          * Update the PTE.
3014          */
3015         if ((origpte & PG_V) != 0) {
3016 validate:
3017                 origpte = pte_load_store(pte, newpte);
3018                 KASSERT((origpte & PG_FRAME) == pa,
3019                     ("pmap_enter: unexpected pa update for %#lx", va));
3020                 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) ==
3021                     (PG_M | PG_RW)) {
3022                         if ((origpte & PG_MANAGED) != 0)
3023                                 vm_page_dirty(m);
3024                         invalidate_page = true;
3025
3026                         /*
3027                          * Although the PTE may still have PG_RW set, TLB
3028                          * invalidation may nonetheless be required because
3029                          * the PTE no longer has PG_M set.
3030                          */
3031                 } else if ((origpte & PG_X) != 0 || (newpte & PG_X) == 0) {
3032                         /*
3033                          * Removing capabilities requires invalidation on POWER
3034                          */
3035                         invalidate_page = true;
3036                         goto unchanged;
3037                 }
3038                 if ((origpte & PG_A) != 0)
3039                         invalidate_page = true;
3040         } else {
3041                 pte_store(pte, newpte);
3042                 ptesync();
3043         }
3044 unchanged:
3045
3046 #if VM_NRESERVLEVEL > 0
3047         /*
3048          * If both the page table page and the reservation are fully
3049          * populated, then attempt promotion.
3050          */
3051         if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
3052             mmu_radix_ps_enabled(pmap) &&
3053             (m->flags & PG_FICTITIOUS) == 0 &&
3054             vm_reserv_level_iffullpop(m) == 0 &&
3055                 pmap_promote_l3e(pmap, l3e, va, &lock) == 0)
3056                 invalidate_all = true;
3057 #endif
3058         if (invalidate_all)
3059                 pmap_invalidate_all(pmap);
3060         else if (invalidate_page)
3061                 pmap_invalidate_page(pmap, va);
3062
3063         rv = KERN_SUCCESS;
3064 out:
3065         if (lock != NULL)
3066                 rw_wunlock(lock);
3067         PMAP_UNLOCK(pmap);
3068
3069         return (rv);
3070 }
3071
3072
3073 /*
3074  * Tries to create a read- and/or execute-only 2MB page mapping.  Returns true
3075  * if successful.  Returns false if (1) a page table page cannot be allocated
3076  * without sleeping, (2) a mapping already exists at the specified virtual
3077  * address, or (3) a PV entry cannot be allocated without reclaiming another
3078  * PV entry.
3079  */
3080 static bool
3081 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3082     struct rwlock **lockp)
3083 {
3084         pml3_entry_t newpde;
3085
3086         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3087         newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs) |
3088             RPTE_LEAF | PG_V;
3089         if ((m->oflags & VPO_UNMANAGED) == 0)
3090                 newpde |= PG_MANAGED;
3091         if (prot & VM_PROT_EXECUTE)
3092                 newpde |= PG_X;
3093         if (prot & VM_PROT_READ)
3094                 newpde |= RPTE_EAA_R;
3095         if (va >= DMAP_MIN_ADDRESS)
3096                 newpde |= RPTE_EAA_P;
3097         return (pmap_enter_l3e(pmap, va, newpde, PMAP_ENTER_NOSLEEP |
3098             PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
3099             KERN_SUCCESS);
3100 }
3101
3102 /*
3103  * Tries to create the specified 2MB page mapping.  Returns KERN_SUCCESS if
3104  * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
3105  * otherwise.  Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and
3106  * a mapping already exists at the specified virtual address.  Returns
3107  * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table
3108  * page allocation failed.  Returns KERN_RESOURCE_SHORTAGE if
3109  * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed.
3110  *
3111  * The parameter "m" is only used when creating a managed, writeable mapping.
3112  */
3113 static int
3114 pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
3115     vm_page_t m, struct rwlock **lockp)
3116 {
3117         struct spglist free;
3118         pml3_entry_t oldl3e, *l3e;
3119         vm_page_t mt, pdpg;
3120
3121         KASSERT((newpde & (PG_M | PG_RW)) != PG_RW,
3122             ("pmap_enter_pde: newpde is missing PG_M"));
3123         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3124
3125         if ((pdpg = pmap_allocl3e(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
3126             NULL : lockp)) == NULL) {
3127                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3128                     " in pmap %p", va, pmap);
3129                 return (KERN_RESOURCE_SHORTAGE);
3130         }
3131         l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
3132         l3e = &l3e[pmap_pml3e_index(va)];
3133         oldl3e = *l3e;
3134         if ((oldl3e & PG_V) != 0) {
3135                 KASSERT(pdpg->ref_count > 1,
3136                     ("pmap_enter_pde: pdpg's wire count is too low"));
3137                 if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
3138                         pdpg->ref_count--;
3139                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3140                             " in pmap %p", va, pmap);
3141                         return (KERN_FAILURE);
3142                 }
3143                 /* Break the existing mapping(s). */
3144                 SLIST_INIT(&free);
3145                 if ((oldl3e & RPTE_LEAF) != 0) {
3146                         /*
3147                          * The reference to the PD page that was acquired by
3148                          * pmap_allocl3e() ensures that it won't be freed.
3149                          * However, if the PDE resulted from a promotion, then
3150                          * a reserved PT page could be freed.
3151                          */
3152                         (void)pmap_remove_l3e(pmap, l3e, va, &free, lockp);
3153                 } else {
3154                         if (pmap_remove_ptes(pmap, va, va + L3_PAGE_SIZE, l3e,
3155                             &free, lockp))
3156                                pmap_invalidate_all(pmap);
3157                 }
3158                 vm_page_free_pages_toq(&free, true);
3159                 if (va >= VM_MAXUSER_ADDRESS) {
3160                         mt = PHYS_TO_VM_PAGE(*l3e & PG_FRAME);
3161                         if (pmap_insert_pt_page(pmap, mt)) {
3162                                 /*
3163                                  * XXX Currently, this can't happen because
3164                                  * we do not perform pmap_enter(psind == 1)
3165                                  * on the kernel pmap.
3166                                  */
3167                                 panic("pmap_enter_pde: trie insert failed");
3168                         }
3169                 } else
3170                         KASSERT(*l3e == 0, ("pmap_enter_pde: non-zero pde %p",
3171                             l3e));
3172         }
3173         if ((newpde & PG_MANAGED) != 0) {
3174                 /*
3175                  * Abort this mapping if its PV entry could not be created.
3176                  */
3177                 if (!pmap_pv_insert_l3e(pmap, va, newpde, flags, lockp)) {
3178                         SLIST_INIT(&free);
3179                         if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
3180                                 /*
3181                                  * Although "va" is not mapped, paging-
3182                                  * structure caches could nonetheless have
3183                                  * entries that refer to the freed page table
3184                                  * pages.  Invalidate those entries.
3185                                  */
3186                                 pmap_invalidate_page(pmap, va);
3187                                 vm_page_free_pages_toq(&free, true);
3188                         }
3189                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3190                             " in pmap %p", va, pmap);
3191                         return (KERN_RESOURCE_SHORTAGE);
3192                 }
3193                 if ((newpde & PG_RW) != 0) {
3194                         for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
3195                                 vm_page_aflag_set(mt, PGA_WRITEABLE);
3196                 }
3197         }
3198
3199         /*
3200          * Increment counters.
3201          */
3202         if ((newpde & PG_W) != 0)
3203                 pmap->pm_stats.wired_count += L3_PAGE_SIZE / PAGE_SIZE;
3204         pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
3205
3206         /*
3207          * Map the superpage.  (This is not a promoted mapping; there will not
3208          * be any lingering 4KB page mappings in the TLB.)
3209          */
3210         pte_store(l3e, newpde);
3211
3212         atomic_add_long(&pmap_l3e_mappings, 1);
3213         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
3214             " in pmap %p", va, pmap);
3215         return (KERN_SUCCESS);
3216 }
3217
3218 void
3219 mmu_radix_enter_object(pmap_t pmap, vm_offset_t start,
3220     vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
3221 {
3222
3223         struct rwlock *lock;
3224         vm_offset_t va;
3225         vm_page_t m, mpte;
3226         vm_pindex_t diff, psize;
3227         bool invalidate;
3228         VM_OBJECT_ASSERT_LOCKED(m_start->object);
3229
3230         CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
3231             end, m_start, prot);
3232
3233         invalidate = false;
3234         psize = atop(end - start);
3235         mpte = NULL;
3236         m = m_start;
3237         lock = NULL;
3238         PMAP_LOCK(pmap);
3239         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3240                 va = start + ptoa(diff);
3241                 if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end &&
3242                     m->psind == 1 && mmu_radix_ps_enabled(pmap) &&
3243                     pmap_enter_2mpage(pmap, va, m, prot, &lock))
3244                         m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1];
3245                 else
3246                         mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot,
3247                             mpte, &lock, &invalidate);
3248                 m = TAILQ_NEXT(m, listq);
3249         }
3250         ptesync();
3251         if (lock != NULL)
3252                 rw_wunlock(lock);
3253         if (invalidate)
3254                 pmap_invalidate_all(pmap);
3255         PMAP_UNLOCK(pmap);
3256 }
3257
3258 static vm_page_t
3259 mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3260     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate)
3261 {
3262         struct spglist free;
3263         pt_entry_t *pte;
3264         vm_paddr_t pa;
3265
3266         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3267             (m->oflags & VPO_UNMANAGED) != 0,
3268             ("mmu_radix_enter_quick_locked: managed mapping within the clean submap"));
3269         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3270
3271         /*
3272          * In the case that a page table page is not
3273          * resident, we are creating it here.
3274          */
3275         if (va < VM_MAXUSER_ADDRESS) {
3276                 vm_pindex_t ptepindex;
3277                 pml3_entry_t *ptepa;
3278
3279                 /*
3280                  * Calculate pagetable page index
3281                  */
3282                 ptepindex = pmap_l3e_pindex(va);
3283                 if (mpte && (mpte->pindex == ptepindex)) {
3284                         mpte->ref_count++;
3285                 } else {
3286                         /*
3287                          * Get the page directory entry
3288                          */
3289                         ptepa = pmap_pml3e(pmap, va);
3290
3291                         /*
3292                          * If the page table page is mapped, we just increment
3293                          * the hold count, and activate it.  Otherwise, we
3294                          * attempt to allocate a page table page.  If this
3295                          * attempt fails, we don't retry.  Instead, we give up.
3296                          */
3297                         if (ptepa && (*ptepa & PG_V) != 0) {
3298                                 if (*ptepa & RPTE_LEAF)
3299                                         return (NULL);
3300                                 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
3301                                 mpte->ref_count++;
3302                         } else {
3303                                 /*
3304                                  * Pass NULL instead of the PV list lock
3305                                  * pointer, because we don't intend to sleep.
3306                                  */
3307                                 mpte = _pmap_allocpte(pmap, ptepindex, NULL);
3308                                 if (mpte == NULL)
3309                                         return (mpte);
3310                         }
3311                 }
3312                 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3313                 pte = &pte[pmap_pte_index(va)];
3314         } else {
3315                 mpte = NULL;
3316                 pte = pmap_pte(pmap, va);
3317         }
3318         if (*pte) {
3319                 if (mpte != NULL) {
3320                         mpte->ref_count--;
3321                         mpte = NULL;
3322                 }
3323                 return (mpte);
3324         }
3325
3326         /*
3327          * Enter on the PV list if part of our managed memory.
3328          */
3329         if ((m->oflags & VPO_UNMANAGED) == 0 &&
3330             !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3331                 if (mpte != NULL) {
3332                         SLIST_INIT(&free);
3333                         if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
3334                                 /*
3335                                  * Although "va" is not mapped, paging-
3336                                  * structure caches could nonetheless have
3337                                  * entries that refer to the freed page table
3338                                  * pages.  Invalidate those entries.
3339                                  */
3340                                 *invalidate = true;
3341                                 vm_page_free_pages_toq(&free, true);
3342                         }
3343                         mpte = NULL;
3344                 }
3345                 return (mpte);
3346         }
3347
3348         /*
3349          * Increment counters
3350          */
3351         pmap_resident_count_inc(pmap, 1);
3352
3353         pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs);
3354         if (prot & VM_PROT_EXECUTE)
3355                 pa |= PG_X;
3356         else
3357                 pa |= RPTE_EAA_R;
3358         if ((m->oflags & VPO_UNMANAGED) == 0)
3359                 pa |= PG_MANAGED;
3360
3361         pte_store(pte, pa);
3362         return (mpte);
3363 }
3364
3365 void
3366 mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
3367     vm_prot_t prot)
3368 {
3369         struct rwlock *lock;
3370         bool invalidate;
3371
3372         lock = NULL;
3373         invalidate = false;
3374         PMAP_LOCK(pmap);
3375         mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock,
3376             &invalidate);
3377         ptesync();
3378         if (lock != NULL)
3379                 rw_wunlock(lock);
3380         if (invalidate)
3381                 pmap_invalidate_all(pmap);
3382         PMAP_UNLOCK(pmap);
3383 }
3384
3385 vm_paddr_t
3386 mmu_radix_extract(pmap_t pmap, vm_offset_t va)
3387 {
3388         pml3_entry_t *l3e;
3389         pt_entry_t *pte;
3390         vm_paddr_t pa;
3391
3392         l3e = pmap_pml3e(pmap, va);
3393         if (__predict_false(l3e == NULL))
3394                 return (0);
3395         if (*l3e & RPTE_LEAF) {
3396                 pa = (*l3e & PG_PS_FRAME) | (va & L3_PAGE_MASK);
3397                 pa |= (va & L3_PAGE_MASK);
3398         } else {
3399                 /*
3400                  * Beware of a concurrent promotion that changes the
3401                  * PDE at this point!  For example, vtopte() must not
3402                  * be used to access the PTE because it would use the
3403                  * new PDE.  It is, however, safe to use the old PDE
3404                  * because the page table page is preserved by the
3405                  * promotion.
3406                  */
3407                 pte = pmap_l3e_to_pte(l3e, va);
3408                 if (__predict_false(pte == NULL))
3409                         return (0);
3410                 pa = *pte;
3411                 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
3412                 pa |= (va & PAGE_MASK);
3413         }
3414         return (pa);
3415 }
3416
3417 vm_page_t
3418 mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3419 {
3420         pml3_entry_t l3e, *l3ep;
3421         pt_entry_t pte;
3422         vm_paddr_t pa;
3423         vm_page_t m;
3424
3425         pa = 0;
3426         m = NULL;
3427         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
3428         PMAP_LOCK(pmap);
3429         l3ep = pmap_pml3e(pmap, va);
3430         if (l3ep != NULL && (l3e = *l3ep)) {
3431                 if (l3e & RPTE_LEAF) {
3432                         if ((l3e & PG_RW) || (prot & VM_PROT_WRITE) == 0)
3433                                 m = PHYS_TO_VM_PAGE((l3e & PG_PS_FRAME) |
3434                                     (va & L3_PAGE_MASK));
3435                 } else {
3436                         pte = *pmap_l3e_to_pte(l3ep, va);
3437                         if ((pte & PG_V) &&
3438                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0))
3439                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
3440                 }
3441                 if (m != NULL && !vm_page_wire_mapped(m))
3442                         m = NULL;
3443         }
3444         PMAP_UNLOCK(pmap);
3445         return (m);
3446 }
3447
3448 static void
3449 mmu_radix_growkernel(vm_offset_t addr)
3450 {
3451         vm_paddr_t paddr;
3452         vm_page_t nkpg;
3453         pml3_entry_t *l3e;
3454         pml2_entry_t *l2e;
3455
3456         CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
3457         if (VM_MIN_KERNEL_ADDRESS < addr &&
3458                 addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE))
3459                 return;
3460
3461         addr = roundup2(addr, L3_PAGE_SIZE);
3462         if (addr - 1 >= vm_map_max(kernel_map))
3463                 addr = vm_map_max(kernel_map);
3464         while (kernel_vm_end < addr) {
3465                 l2e = pmap_pml2e(kernel_pmap, kernel_vm_end);
3466                 if ((*l2e & PG_V) == 0) {
3467                         /* We need a new PDP entry */
3468                         nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_PAGE_SIZE_SHIFT,
3469                             VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
3470                             VM_ALLOC_WIRED | VM_ALLOC_ZERO);
3471                         if (nkpg == NULL)
3472                                 panic("pmap_growkernel: no memory to grow kernel");
3473                         if ((nkpg->flags & PG_ZERO) == 0)
3474                                 mmu_radix_zero_page(nkpg);
3475                         paddr = VM_PAGE_TO_PHYS(nkpg);
3476                         pde_store(l2e, paddr);
3477                         continue; /* try again */
3478                 }
3479                 l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end);
3480                 if ((*l3e & PG_V) != 0) {
3481                         kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3482                         if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3483                                 kernel_vm_end = vm_map_max(kernel_map);
3484                                 break;
3485                         }
3486                         continue;
3487                 }
3488
3489                 nkpg = vm_page_alloc(NULL, pmap_l3e_pindex(kernel_vm_end),
3490                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
3491                     VM_ALLOC_ZERO);
3492                 if (nkpg == NULL)
3493                         panic("pmap_growkernel: no memory to grow kernel");
3494                 if ((nkpg->flags & PG_ZERO) == 0)
3495                         mmu_radix_zero_page(nkpg);
3496                 paddr = VM_PAGE_TO_PHYS(nkpg);
3497                 pde_store(l3e, paddr);
3498
3499                 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
3500                 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
3501                         kernel_vm_end = vm_map_max(kernel_map);
3502                         break;
3503                 }
3504         }
3505         ptesync();
3506 }
3507
3508 static MALLOC_DEFINE(M_RADIX_PGD, "radix_pgd", "radix page table root directory");
3509 static uma_zone_t zone_radix_pgd;
3510
3511 static int
3512 radix_pgd_import(void *arg __unused, void **store, int count, int domain __unused,
3513     int flags)
3514 {
3515
3516         for (int i = 0; i < count; i++) {
3517                 vm_page_t m = vm_page_alloc_contig(NULL, 0,
3518                     VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
3519                     VM_ALLOC_ZERO | VM_ALLOC_WAITOK, RADIX_PGD_SIZE/PAGE_SIZE,
3520                     0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE,
3521                     VM_MEMATTR_DEFAULT);
3522                 /* XXX zero on alloc here so we don't have to later */
3523                 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3524         }
3525         return (count);
3526 }
3527
3528 static void
3529 radix_pgd_release(void *arg __unused, void **store, int count)
3530 {
3531         vm_page_t m;
3532         struct spglist free;
3533         int page_count;
3534
3535         SLIST_INIT(&free);
3536         page_count = RADIX_PGD_SIZE/PAGE_SIZE;
3537
3538         for (int i = 0; i < count; i++) {
3539                 /*
3540                  * XXX selectively remove dmap and KVA entries so we don't
3541                  * need to bzero
3542                  */
3543                 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i]));
3544                 for (int j = page_count-1; j >= 0; j--) {
3545                         vm_page_unwire_noq(&m[j]);
3546                         SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss);
3547                 }
3548                 vm_page_free_pages_toq(&free, false);
3549         }
3550 }
3551
3552 static void
3553 mmu_radix_init()
3554 {
3555         vm_page_t mpte;
3556         vm_size_t s;
3557         int error, i, pv_npg;
3558
3559         /* L1TF, reserve page @0 unconditionally */
3560         vm_page_excludelist_add(0, bootverbose);
3561
3562         zone_radix_pgd = uma_zcache_create("radix_pgd_cache",
3563                 RADIX_PGD_SIZE, NULL, NULL,
3564 #ifdef INVARIANTS
3565             trash_init, trash_fini,
3566 #else
3567             NULL, NULL,
3568 #endif
3569                 radix_pgd_import, radix_pgd_release,
3570                 NULL, UMA_ZONE_NOBUCKET);
3571
3572         /*
3573          * Initialize the vm page array entries for the kernel pmap's
3574          * page table pages.
3575          */
3576         PMAP_LOCK(kernel_pmap);
3577         for (i = 0; i < nkpt; i++) {
3578                 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
3579                 KASSERT(mpte >= vm_page_array &&
3580                     mpte < &vm_page_array[vm_page_array_size],
3581                     ("pmap_init: page table page is out of range size: %lu",
3582                      vm_page_array_size));
3583                 mpte->pindex = pmap_l3e_pindex(VM_MIN_KERNEL_ADDRESS) + i;
3584                 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
3585                 MPASS(PHYS_TO_VM_PAGE(mpte->phys_addr) == mpte);
3586                 //pmap_insert_pt_page(kernel_pmap, mpte);
3587                 mpte->ref_count = 1;
3588         }
3589         PMAP_UNLOCK(kernel_pmap);
3590         vm_wire_add(nkpt);
3591
3592         CTR1(KTR_PMAP, "%s()", __func__);
3593         TAILQ_INIT(&pv_dummy.pv_list);
3594
3595         /*
3596          * Are large page mappings enabled?
3597          */
3598         TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
3599         if (pg_ps_enabled) {
3600                 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
3601                     ("pmap_init: can't assign to pagesizes[1]"));
3602                 pagesizes[1] = L3_PAGE_SIZE;
3603         }
3604
3605         /*
3606          * Initialize the pv chunk list mutex.
3607          */
3608         mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
3609
3610         /*
3611          * Initialize the pool of pv list locks.
3612          */
3613         for (i = 0; i < NPV_LIST_LOCKS; i++)
3614                 rw_init(&pv_list_locks[i], "pmap pv list");
3615
3616         /*
3617          * Calculate the size of the pv head table for superpages.
3618          */
3619         pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L3_PAGE_SIZE);
3620
3621         /*
3622          * Allocate memory for the pv head table for superpages.
3623          */
3624         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
3625         s = round_page(s);
3626         pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
3627         for (i = 0; i < pv_npg; i++)
3628                 TAILQ_INIT(&pv_table[i].pv_list);
3629         TAILQ_INIT(&pv_dummy.pv_list);
3630
3631         pmap_initialized = 1;
3632         mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN);
3633         error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
3634             (vmem_addr_t *)&qframe);
3635
3636         if (error != 0)
3637                 panic("qframe allocation failed");
3638         asid_arena = vmem_create("ASID", isa3_base_pid + 1, (1<<isa3_pid_bits),
3639             1, 1, M_WAITOK);
3640 }
3641
3642 static boolean_t
3643 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3644 {
3645         struct rwlock *lock;
3646         pv_entry_t pv;
3647         struct md_page *pvh;
3648         pt_entry_t *pte, mask;
3649         pmap_t pmap;
3650         int md_gen, pvh_gen;
3651         boolean_t rv;
3652
3653         rv = FALSE;
3654         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3655         rw_rlock(lock);
3656 restart:
3657         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
3658                 pmap = PV_PMAP(pv);
3659                 if (!PMAP_TRYLOCK(pmap)) {
3660                         md_gen = m->md.pv_gen;
3661                         rw_runlock(lock);
3662                         PMAP_LOCK(pmap);
3663                         rw_rlock(lock);
3664                         if (md_gen != m->md.pv_gen) {
3665                                 PMAP_UNLOCK(pmap);
3666                                 goto restart;
3667                         }
3668                 }
3669                 pte = pmap_pte(pmap, pv->pv_va);
3670                 mask = 0;
3671                 if (modified)
3672                         mask |= PG_RW | PG_M;
3673                 if (accessed)
3674                         mask |= PG_V | PG_A;
3675                 rv = (*pte & mask) == mask;
3676                 PMAP_UNLOCK(pmap);
3677                 if (rv)
3678                         goto out;
3679         }
3680         if ((m->flags & PG_FICTITIOUS) == 0) {
3681                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3682                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
3683                         pmap = PV_PMAP(pv);
3684                         if (!PMAP_TRYLOCK(pmap)) {
3685                                 md_gen = m->md.pv_gen;
3686                                 pvh_gen = pvh->pv_gen;
3687                                 rw_runlock(lock);
3688                                 PMAP_LOCK(pmap);
3689                                 rw_rlock(lock);
3690                                 if (md_gen != m->md.pv_gen ||
3691                                     pvh_gen != pvh->pv_gen) {
3692                                         PMAP_UNLOCK(pmap);
3693                                         goto restart;
3694                                 }
3695                         }
3696                         pte = pmap_pml3e(pmap, pv->pv_va);
3697                         mask = 0;
3698                         if (modified)
3699                                 mask |= PG_RW | PG_M;
3700                         if (accessed)
3701                                 mask |= PG_V | PG_A;
3702                         rv = (*pte & mask) == mask;
3703                         PMAP_UNLOCK(pmap);
3704                         if (rv)
3705                                 goto out;
3706                 }
3707         }
3708 out:
3709         rw_runlock(lock);
3710         return (rv);
3711 }
3712
3713 /*
3714  *      pmap_is_modified:
3715  *
3716  *      Return whether or not the specified physical page was modified
3717  *      in any physical maps.
3718  */
3719 boolean_t
3720 mmu_radix_is_modified(vm_page_t m)
3721 {
3722
3723         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3724             ("pmap_is_modified: page %p is not managed", m));
3725
3726         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3727         /*
3728          * If the page is not busied then this check is racy.
3729          */
3730         if (!pmap_page_is_write_mapped(m))
3731                 return (FALSE);
3732         return (pmap_page_test_mappings(m, FALSE, TRUE));
3733 }
3734
3735 boolean_t
3736 mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3737 {
3738         pml3_entry_t *l3e;
3739         pt_entry_t *pte;
3740         boolean_t rv;
3741
3742         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
3743         rv = FALSE;
3744         PMAP_LOCK(pmap);
3745         l3e = pmap_pml3e(pmap, addr);
3746         if (l3e != NULL && (*l3e & (RPTE_LEAF | PG_V)) == PG_V) {
3747                 pte = pmap_l3e_to_pte(l3e, addr);
3748                 rv = (*pte & PG_V) == 0;
3749         }
3750         PMAP_UNLOCK(pmap);
3751         return (rv);
3752 }
3753
3754 boolean_t
3755 mmu_radix_is_referenced(vm_page_t m)
3756 {
3757         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3758             ("pmap_is_referenced: page %p is not managed", m));
3759         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3760         return (pmap_page_test_mappings(m, TRUE, FALSE));
3761 }
3762
3763 /*
3764  *      pmap_ts_referenced:
3765  *
3766  *      Return a count of reference bits for a page, clearing those bits.
3767  *      It is not necessary for every reference bit to be cleared, but it
3768  *      is necessary that 0 only be returned when there are truly no
3769  *      reference bits set.
3770  *
3771  *      As an optimization, update the page's dirty field if a modified bit is
3772  *      found while counting reference bits.  This opportunistic update can be
3773  *      performed at low cost and can eliminate the need for some future calls
3774  *      to pmap_is_modified().  However, since this function stops after
3775  *      finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
3776  *      dirty pages.  Those dirty pages will only be detected by a future call
3777  *      to pmap_is_modified().
3778  *
3779  *      A DI block is not needed within this function, because
3780  *      invalidations are performed before the PV list lock is
3781  *      released.
3782  */
3783 boolean_t
3784 mmu_radix_ts_referenced(vm_page_t m)
3785 {
3786         struct md_page *pvh;
3787         pv_entry_t pv, pvf;
3788         pmap_t pmap;
3789         struct rwlock *lock;
3790         pml3_entry_t oldl3e, *l3e;
3791         pt_entry_t *pte;
3792         vm_paddr_t pa;
3793         int cleared, md_gen, not_cleared, pvh_gen;
3794         struct spglist free;
3795
3796         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
3797         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3798             ("pmap_ts_referenced: page %p is not managed", m));
3799         SLIST_INIT(&free);
3800         cleared = 0;
3801         pa = VM_PAGE_TO_PHYS(m);
3802         lock = PHYS_TO_PV_LIST_LOCK(pa);
3803         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
3804         rw_wlock(lock);
3805 retry:
3806         not_cleared = 0;
3807         if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
3808                 goto small_mappings;
3809         pv = pvf;
3810         do {
3811                 if (pvf == NULL)
3812                         pvf = pv;
3813                 pmap = PV_PMAP(pv);
3814                 if (!PMAP_TRYLOCK(pmap)) {
3815                         pvh_gen = pvh->pv_gen;
3816                         rw_wunlock(lock);
3817                         PMAP_LOCK(pmap);
3818                         rw_wlock(lock);
3819                         if (pvh_gen != pvh->pv_gen) {
3820                                 PMAP_UNLOCK(pmap);
3821                                 goto retry;
3822                         }
3823                 }
3824                 l3e = pmap_pml3e(pmap, pv->pv_va);
3825                 oldl3e = *l3e;
3826                 if ((oldl3e & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
3827                         /*
3828                          * Although "oldpde" is mapping a 2MB page, because
3829                          * this function is called at a 4KB page granularity,
3830                          * we only update the 4KB page under test.
3831                          */
3832                         vm_page_dirty(m);
3833                 }
3834                 if ((oldl3e & PG_A) != 0) {
3835                         /*
3836                          * Since this reference bit is shared by 512 4KB
3837                          * pages, it should not be cleared every time it is
3838                          * tested.  Apply a simple "hash" function on the
3839                          * physical page number, the virtual superpage number,
3840                          * and the pmap address to select one 4KB page out of
3841                          * the 512 on which testing the reference bit will
3842                          * result in clearing that reference bit.  This
3843                          * function is designed to avoid the selection of the
3844                          * same 4KB page for every 2MB page mapping.
3845                          *
3846                          * On demotion, a mapping that hasn't been referenced
3847                          * is simply destroyed.  To avoid the possibility of a
3848                          * subsequent page fault on a demoted wired mapping,
3849                          * always leave its reference bit set.  Moreover,
3850                          * since the superpage is wired, the current state of
3851                          * its reference bit won't affect page replacement.
3852                          */
3853                         if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L3_PAGE_SIZE_SHIFT) ^
3854                             (uintptr_t)pmap) & (NPTEPG - 1)) == 0 &&
3855                             (oldl3e & PG_W) == 0) {
3856                                 atomic_clear_long(l3e, PG_A);
3857                                 pmap_invalidate_page(pmap, pv->pv_va);
3858                                 cleared++;
3859                                 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3860                                     ("inconsistent pv lock %p %p for page %p",
3861                                     lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3862                         } else
3863                                 not_cleared++;
3864                 }
3865                 PMAP_UNLOCK(pmap);
3866                 /* Rotate the PV list if it has more than one entry. */
3867                 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3868                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
3869                         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
3870                         pvh->pv_gen++;
3871                 }
3872                 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
3873                         goto out;
3874         } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
3875 small_mappings:
3876         if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
3877                 goto out;
3878         pv = pvf;
3879         do {
3880                 if (pvf == NULL)
3881                         pvf = pv;
3882                 pmap = PV_PMAP(pv);
3883                 if (!PMAP_TRYLOCK(pmap)) {
3884                         pvh_gen = pvh->pv_gen;
3885                         md_gen = m->md.pv_gen;
3886                         rw_wunlock(lock);
3887                         PMAP_LOCK(pmap);
3888                         rw_wlock(lock);
3889                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3890                                 PMAP_UNLOCK(pmap);
3891                                 goto retry;
3892                         }
3893                 }
3894                 l3e = pmap_pml3e(pmap, pv->pv_va);
3895                 KASSERT((*l3e & RPTE_LEAF) == 0,
3896                     ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
3897                     m));
3898                 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
3899                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
3900                         vm_page_dirty(m);
3901                 if ((*pte & PG_A) != 0) {
3902                         atomic_clear_long(pte, PG_A);
3903                         pmap_invalidate_page(pmap, pv->pv_va);
3904                         cleared++;
3905                 }
3906                 PMAP_UNLOCK(pmap);
3907                 /* Rotate the PV list if it has more than one entry. */
3908                 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) {
3909                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
3910                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
3911                         m->md.pv_gen++;
3912                 }
3913         } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
3914             not_cleared < PMAP_TS_REFERENCED_MAX);
3915 out:
3916         rw_wunlock(lock);
3917         vm_page_free_pages_toq(&free, true);
3918         return (cleared + not_cleared);
3919 }
3920
3921 static vm_offset_t
3922 mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start,
3923     vm_paddr_t end, int prot __unused)
3924 {
3925
3926         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
3927                  prot);
3928         return (PHYS_TO_DMAP(start));
3929 }
3930
3931 void
3932 mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
3933     vm_object_t object, vm_pindex_t pindex, vm_size_t size)
3934 {
3935         pml3_entry_t *l3e;
3936         vm_paddr_t pa, ptepa;
3937         vm_page_t p, pdpg;
3938         vm_memattr_t ma;
3939
3940         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
3941             object, pindex, size);
3942         VM_OBJECT_ASSERT_WLOCKED(object);
3943         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3944                         ("pmap_object_init_pt: non-device object"));
3945         /* NB: size can be logically ored with addr here */
3946         if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) {
3947                 if (!mmu_radix_ps_enabled(pmap))
3948                         return;
3949                 if (!vm_object_populate(object, pindex, pindex + atop(size)))
3950                         return;
3951                 p = vm_page_lookup(object, pindex);
3952                 KASSERT(p->valid == VM_PAGE_BITS_ALL,
3953                     ("pmap_object_init_pt: invalid page %p", p));
3954                 ma = p->md.mdpg_cache_attrs;
3955
3956                 /*
3957                  * Abort the mapping if the first page is not physically
3958                  * aligned to a 2MB page boundary.
3959                  */
3960                 ptepa = VM_PAGE_TO_PHYS(p);
3961                 if (ptepa & L3_PAGE_MASK)
3962                         return;
3963
3964                 /*
3965                  * Skip the first page.  Abort the mapping if the rest of
3966                  * the pages are not physically contiguous or have differing
3967                  * memory attributes.
3968                  */
3969                 p = TAILQ_NEXT(p, listq);
3970                 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
3971                     pa += PAGE_SIZE) {
3972                         KASSERT(p->valid == VM_PAGE_BITS_ALL,
3973                             ("pmap_object_init_pt: invalid page %p", p));
3974                         if (pa != VM_PAGE_TO_PHYS(p) ||
3975                             ma != p->md.mdpg_cache_attrs)
3976                                 return;
3977                         p = TAILQ_NEXT(p, listq);
3978                 }
3979
3980                 PMAP_LOCK(pmap);
3981                 for (pa = ptepa | pmap_cache_bits(ma);
3982                     pa < ptepa + size; pa += L3_PAGE_SIZE) {
3983                         pdpg = pmap_allocl3e(pmap, addr, NULL);
3984                         if (pdpg == NULL) {
3985                                 /*
3986                                  * The creation of mappings below is only an
3987                                  * optimization.  If a page directory page
3988                                  * cannot be allocated without blocking,
3989                                  * continue on to the next mapping rather than
3990                                  * blocking.
3991                                  */
3992                                 addr += L3_PAGE_SIZE;
3993                                 continue;
3994                         }
3995                         l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
3996                         l3e = &l3e[pmap_pml3e_index(addr)];
3997                         if ((*l3e & PG_V) == 0) {
3998                                 pa |= PG_M | PG_A | PG_RW;
3999                                 pte_store(l3e, pa);
4000                                 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE);
4001                                 atomic_add_long(&pmap_l3e_mappings, 1);
4002                         } else {
4003                                 /* Continue on if the PDE is already valid. */
4004                                 pdpg->ref_count--;
4005                                 KASSERT(pdpg->ref_count > 0,
4006                                     ("pmap_object_init_pt: missing reference "
4007                                     "to page directory page, va: 0x%lx", addr));
4008                         }
4009                         addr += L3_PAGE_SIZE;
4010                 }
4011                 ptesync();
4012                 PMAP_UNLOCK(pmap);
4013         }
4014 }
4015
4016 boolean_t
4017 mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
4018 {
4019         struct md_page *pvh;
4020         struct rwlock *lock;
4021         pv_entry_t pv;
4022         int loops = 0;
4023         boolean_t rv;
4024
4025         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4026             ("pmap_page_exists_quick: page %p is not managed", m));
4027         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
4028         rv = FALSE;
4029         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4030         rw_rlock(lock);
4031         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4032                 if (PV_PMAP(pv) == pmap) {
4033                         rv = TRUE;
4034                         break;
4035                 }
4036                 loops++;
4037                 if (loops >= 16)
4038                         break;
4039         }
4040         if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
4041                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4042                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4043                         if (PV_PMAP(pv) == pmap) {
4044                                 rv = TRUE;
4045                                 break;
4046                         }
4047                         loops++;
4048                         if (loops >= 16)
4049                                 break;
4050                 }
4051         }
4052         rw_runlock(lock);
4053         return (rv);
4054 }
4055
4056 void
4057 mmu_radix_page_init(vm_page_t m)
4058 {
4059
4060         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4061         TAILQ_INIT(&m->md.pv_list);
4062         m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
4063 }
4064
4065 int
4066 mmu_radix_page_wired_mappings(vm_page_t m)
4067 {
4068         struct rwlock *lock;
4069         struct md_page *pvh;
4070         pmap_t pmap;
4071         pt_entry_t *pte;
4072         pv_entry_t pv;
4073         int count, md_gen, pvh_gen;
4074
4075         if ((m->oflags & VPO_UNMANAGED) != 0)
4076                 return (0);
4077         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
4078         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
4079         rw_rlock(lock);
4080 restart:
4081         count = 0;
4082         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
4083                 pmap = PV_PMAP(pv);
4084                 if (!PMAP_TRYLOCK(pmap)) {
4085                         md_gen = m->md.pv_gen;
4086                         rw_runlock(lock);
4087                         PMAP_LOCK(pmap);
4088                         rw_rlock(lock);
4089                         if (md_gen != m->md.pv_gen) {
4090                                 PMAP_UNLOCK(pmap);
4091                                 goto restart;
4092                         }
4093                 }
4094                 pte = pmap_pte(pmap, pv->pv_va);
4095                 if ((*pte & PG_W) != 0)
4096                         count++;
4097                 PMAP_UNLOCK(pmap);
4098         }
4099         if ((m->flags & PG_FICTITIOUS) == 0) {
4100                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4101                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
4102                         pmap = PV_PMAP(pv);
4103                         if (!PMAP_TRYLOCK(pmap)) {
4104                                 md_gen = m->md.pv_gen;
4105                                 pvh_gen = pvh->pv_gen;
4106                                 rw_runlock(lock);
4107                                 PMAP_LOCK(pmap);
4108                                 rw_rlock(lock);
4109                                 if (md_gen != m->md.pv_gen ||
4110                                     pvh_gen != pvh->pv_gen) {
4111                                         PMAP_UNLOCK(pmap);
4112                                         goto restart;
4113                                 }
4114                         }
4115                         pte = pmap_pml3e(pmap, pv->pv_va);
4116                         if ((*pte & PG_W) != 0)
4117                                 count++;
4118                         PMAP_UNLOCK(pmap);
4119                 }
4120         }
4121         rw_runlock(lock);
4122         return (count);
4123 }
4124
4125 static void
4126 mmu_radix_update_proctab(int pid, pml1_entry_t l1pa)
4127 {
4128         isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE |  l1pa | RADIX_PGD_INDEX_SHIFT);
4129 }
4130
4131 int
4132 mmu_radix_pinit(pmap_t pmap)
4133 {
4134         vmem_addr_t pid;
4135         vm_paddr_t l1pa;
4136
4137         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4138
4139         /*
4140          * allocate the page directory page
4141          */
4142         pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK);
4143
4144         for (int j = 0; j <  RADIX_PGD_SIZE_SHIFT; j++)
4145                 pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE);
4146         pmap->pm_radix.rt_root = 0;
4147         TAILQ_INIT(&pmap->pm_pvchunk);
4148         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4149         pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4150         vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid);
4151
4152         pmap->pm_pid = pid;
4153         l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1);
4154         mmu_radix_update_proctab(pid, l1pa);
4155         __asm __volatile("ptesync;isync" : : : "memory");
4156
4157         return (1);
4158 }
4159
4160 /*
4161  * This routine is called if the desired page table page does not exist.
4162  *
4163  * If page table page allocation fails, this routine may sleep before
4164  * returning NULL.  It sleeps only if a lock pointer was given.
4165  *
4166  * Note: If a page allocation fails at page table level two or three,
4167  * one or two pages may be held during the wait, only to be released
4168  * afterwards.  This conservative approach is easily argued to avoid
4169  * race conditions.
4170  */
4171 static vm_page_t
4172 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
4173 {
4174         vm_page_t m, pdppg, pdpg;
4175
4176         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4177
4178         /*
4179          * Allocate a page table page.
4180          */
4181         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
4182             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
4183                 if (lockp != NULL) {
4184                         RELEASE_PV_LIST_LOCK(lockp);
4185                         PMAP_UNLOCK(pmap);
4186                         vm_wait(NULL);
4187                         PMAP_LOCK(pmap);
4188                 }
4189                 /*
4190                  * Indicate the need to retry.  While waiting, the page table
4191                  * page may have been allocated.
4192                  */
4193                 return (NULL);
4194         }
4195         if ((m->flags & PG_ZERO) == 0)
4196                 mmu_radix_zero_page(m);
4197
4198         /*
4199          * Map the pagetable page into the process address space, if
4200          * it isn't already there.
4201          */
4202
4203         if (ptepindex >= (NUPDE + NUPDPE)) {
4204                 pml1_entry_t *l1e;
4205                 vm_pindex_t pml1index;
4206
4207                 /* Wire up a new PDPE page */
4208                 pml1index = ptepindex - (NUPDE + NUPDPE);
4209                 l1e = &pmap->pm_pml1[pml1index];
4210                 pde_store(l1e, VM_PAGE_TO_PHYS(m));
4211
4212         } else if (ptepindex >= NUPDE) {
4213                 vm_pindex_t pml1index;
4214                 vm_pindex_t pdpindex;
4215                 pml1_entry_t *l1e;
4216                 pml2_entry_t *l2e;
4217
4218                 /* Wire up a new l2e page */
4219                 pdpindex = ptepindex - NUPDE;
4220                 pml1index = pdpindex >> RPTE_SHIFT;
4221
4222                 l1e = &pmap->pm_pml1[pml1index];
4223                 if ((*l1e & PG_V) == 0) {
4224                         /* Have to allocate a new pdp, recurse */
4225                         if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml1index,
4226                                 lockp) == NULL) {
4227                                 vm_page_unwire_noq(m);
4228                                 vm_page_free_zero(m);
4229                                 return (NULL);
4230                         }
4231                 } else {
4232                         /* Add reference to l2e page */
4233                         pdppg = PHYS_TO_VM_PAGE(*l1e & PG_FRAME);
4234                         pdppg->ref_count++;
4235                 }
4236                 l2e = (pml2_entry_t *)PHYS_TO_DMAP(*l1e & PG_FRAME);
4237
4238                 /* Now find the pdp page */
4239                 l2e = &l2e[pdpindex & RPTE_MASK];
4240                 pde_store(l2e, VM_PAGE_TO_PHYS(m));
4241
4242         } else {
4243                 vm_pindex_t pml1index;
4244                 vm_pindex_t pdpindex;
4245                 pml1_entry_t *l1e;
4246                 pml2_entry_t *l2e;
4247                 pml3_entry_t *l3e;
4248
4249                 /* Wire up a new PTE page */
4250                 pdpindex = ptepindex >> RPTE_SHIFT;
4251                 pml1index = pdpindex >> RPTE_SHIFT;
4252
4253                 /* First, find the pdp and check that its valid. */
4254                 l1e = &pmap->pm_pml1[pml1index];
4255                 if ((*l1e & PG_V) == 0) {
4256                         /* Have to allocate a new pd, recurse */
4257                         if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4258                             lockp) == NULL) {
4259                                 vm_page_unwire_noq(m);
4260                                 vm_page_free_zero(m);
4261                                 return (NULL);
4262                         }
4263                         l2e = (pml2_entry_t *)PHYS_TO_DMAP(*l1e & PG_FRAME);
4264                         l2e = &l2e[pdpindex & RPTE_MASK];
4265                 } else {
4266                         l2e = (pml2_entry_t *)PHYS_TO_DMAP(*l1e & PG_FRAME);
4267                         l2e = &l2e[pdpindex & RPTE_MASK];
4268                         if ((*l2e & PG_V) == 0) {
4269                                 /* Have to allocate a new pd, recurse */
4270                                 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
4271                                     lockp) == NULL) {
4272                                         vm_page_unwire_noq(m);
4273                                         vm_page_free_zero(m);
4274                                         return (NULL);
4275                                 }
4276                         } else {
4277                                 /* Add reference to the pd page */
4278                                 pdpg = PHYS_TO_VM_PAGE(*l2e & PG_FRAME);
4279                                 pdpg->ref_count++;
4280                         }
4281                 }
4282                 l3e = (pml3_entry_t *)PHYS_TO_DMAP(*l2e & PG_FRAME);
4283
4284                 /* Now we know where the page directory page is */
4285                 l3e = &l3e[ptepindex & RPTE_MASK];
4286                 pde_store(l3e, VM_PAGE_TO_PHYS(m));
4287         }
4288
4289         pmap_resident_count_inc(pmap, 1);
4290         return (m);
4291 }
4292 static vm_page_t
4293 pmap_allocl3e(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4294 {
4295         vm_pindex_t pdpindex, ptepindex;
4296         pml2_entry_t *pdpe;
4297         vm_page_t pdpg;
4298
4299 retry:
4300         pdpe = pmap_pml2e(pmap, va);
4301         if (pdpe != NULL && (*pdpe & PG_V) != 0) {
4302                 /* Add a reference to the pd page. */
4303                 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
4304                 pdpg->ref_count++;
4305         } else {
4306                 /* Allocate a pd page. */
4307                 ptepindex = pmap_l3e_pindex(va);
4308                 pdpindex = ptepindex >> RPTE_SHIFT;
4309                 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
4310                 if (pdpg == NULL && lockp != NULL)
4311                         goto retry;
4312         }
4313         return (pdpg);
4314 }
4315
4316 static vm_page_t
4317 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
4318 {
4319         vm_pindex_t ptepindex;
4320         pml3_entry_t *pd;
4321         vm_page_t m;
4322
4323         /*
4324          * Calculate pagetable page index
4325          */
4326         ptepindex = pmap_l3e_pindex(va);
4327 retry:
4328         /*
4329          * Get the page directory entry
4330          */
4331         pd = pmap_pml3e(pmap, va);
4332
4333         /*
4334          * This supports switching from a 2MB page to a
4335          * normal 4K page.
4336          */
4337         if (pd != NULL && (*pd & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V)) {
4338                 if (!pmap_demote_l3e_locked(pmap, pd, va, lockp)) {
4339                         /*
4340                          * Invalidation of the 2MB page mapping may have caused
4341                          * the deallocation of the underlying PD page.
4342                          */
4343                         pd = NULL;
4344                 }
4345         }
4346
4347         /*
4348          * If the page table page is mapped, we just increment the
4349          * hold count, and activate it.
4350          */
4351         if (pd != NULL && (*pd & PG_V) != 0) {
4352                 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
4353                 m->ref_count++;
4354         } else {
4355                 /*
4356                  * Here if the pte page isn't mapped, or if it has been
4357                  * deallocated.
4358                  */
4359                 m = _pmap_allocpte(pmap, ptepindex, lockp);
4360                 if (m == NULL && lockp != NULL)
4361                         goto retry;
4362         }
4363         return (m);
4364 }
4365
4366 static void
4367 mmu_radix_pinit0(pmap_t pmap)
4368 {
4369
4370         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4371         PMAP_LOCK_INIT(pmap);
4372         pmap->pm_pml1 = kernel_pmap->pm_pml1;
4373         pmap->pm_pid = kernel_pmap->pm_pid;
4374
4375         pmap->pm_radix.rt_root = 0;
4376         TAILQ_INIT(&pmap->pm_pvchunk);
4377         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
4378         kernel_pmap->pm_flags =
4379                 pmap->pm_flags = PMAP_PDE_SUPERPAGE;
4380 }
4381 /*
4382  * pmap_protect_l3e: do the things to protect a 2mpage in a process
4383  */
4384 static boolean_t
4385 pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot)
4386 {
4387         pt_entry_t newpde, oldpde;
4388         vm_offset_t eva, va;
4389         vm_page_t m;
4390         boolean_t anychanged;
4391
4392         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4393         KASSERT((sva & L3_PAGE_MASK) == 0,
4394             ("pmap_protect_l3e: sva is not 2mpage aligned"));
4395         anychanged = FALSE;
4396 retry:
4397         oldpde = newpde = *l3e;
4398         if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
4399             (PG_MANAGED | PG_M | PG_RW)) {
4400                 eva = sva + L3_PAGE_SIZE;
4401                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4402                     va < eva; va += PAGE_SIZE, m++)
4403                         vm_page_dirty(m);
4404         }
4405         if ((prot & VM_PROT_WRITE) == 0) {
4406                 newpde &= ~(PG_RW | PG_M);
4407                 newpde |= RPTE_EAA_R;
4408         }
4409         if (prot & VM_PROT_EXECUTE)
4410                 newpde |= PG_X;
4411         if (newpde != oldpde) {
4412                 /*
4413                  * As an optimization to future operations on this PDE, clear
4414                  * PG_PROMOTED.  The impending invalidation will remove any
4415                  * lingering 4KB page mappings from the TLB.
4416                  */
4417                 if (!atomic_cmpset_long(l3e, oldpde, newpde & ~PG_PROMOTED))
4418                         goto retry;
4419                 anychanged = TRUE;
4420         }
4421         return (anychanged);
4422 }
4423
4424 void
4425 mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
4426     vm_prot_t prot)
4427 {
4428         vm_offset_t va_next;
4429         pml1_entry_t *l1e;
4430         pml2_entry_t *l2e;
4431         pml3_entry_t ptpaddr, *l3e;
4432         pt_entry_t *pte;
4433         boolean_t anychanged;
4434
4435         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva,
4436             prot);
4437
4438         KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
4439         if (prot == VM_PROT_NONE) {
4440                 mmu_radix_remove(pmap, sva, eva);
4441                 return;
4442         }
4443
4444         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
4445             (VM_PROT_WRITE|VM_PROT_EXECUTE))
4446                 return;
4447
4448 #ifdef INVARIANTS
4449         if (VERBOSE_PROTECT || pmap_logging)
4450                 printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n",
4451                            pmap, sva, eva, prot, pmap->pm_pid);
4452 #endif
4453         anychanged = FALSE;
4454
4455         PMAP_LOCK(pmap);
4456         for (; sva < eva; sva = va_next) {
4457                 l1e = pmap_pml1e(pmap, sva);
4458                 if ((*l1e & PG_V) == 0) {
4459                         va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
4460                         if (va_next < sva)
4461                                 va_next = eva;
4462                         continue;
4463                 }
4464
4465                 l2e = pmap_l1e_to_l2e(l1e, sva);
4466                 if ((*l2e & PG_V) == 0) {
4467                         va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
4468                         if (va_next < sva)
4469                                 va_next = eva;
4470                         continue;
4471                 }
4472
4473                 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
4474                 if (va_next < sva)
4475                         va_next = eva;
4476
4477                 l3e = pmap_l2e_to_l3e(l2e, sva);
4478                 ptpaddr = *l3e;
4479
4480                 /*
4481                  * Weed out invalid mappings.
4482                  */
4483                 if (ptpaddr == 0)
4484                         continue;
4485
4486                 /*
4487                  * Check for large page.
4488                  */
4489                 if ((ptpaddr & RPTE_LEAF) != 0) {
4490                         /*
4491                          * Are we protecting the entire large page?  If not,
4492                          * demote the mapping and fall through.
4493                          */
4494                         if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
4495                                 if (pmap_protect_l3e(pmap, l3e, sva, prot))
4496                                         anychanged = TRUE;
4497                                 continue;
4498                         } else if (!pmap_demote_l3e(pmap, l3e, sva)) {
4499                                 /*
4500                                  * The large page mapping was destroyed.
4501                                  */
4502                                 continue;
4503                         }
4504                 }
4505
4506                 if (va_next > eva)
4507                         va_next = eva;
4508
4509                 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
4510                     sva += PAGE_SIZE) {
4511                         pt_entry_t obits, pbits;
4512                         vm_page_t m;
4513
4514 retry:
4515                         MPASS(pte == pmap_pte(pmap, sva));
4516                         obits = pbits = *pte;
4517                         if ((pbits & PG_V) == 0)
4518                                 continue;
4519
4520                         if ((prot & VM_PROT_WRITE) == 0) {
4521                                 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
4522                                     (PG_MANAGED | PG_M | PG_RW)) {
4523                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
4524                                         vm_page_dirty(m);
4525                                 }
4526                                 pbits &= ~(PG_RW | PG_M);
4527                                 pbits |= RPTE_EAA_R;
4528                         }
4529                         if (prot & VM_PROT_EXECUTE)
4530                                 pbits |= PG_X;
4531
4532                         if (pbits != obits) {
4533                                 if (!atomic_cmpset_long(pte, obits, pbits))
4534                                         goto retry;
4535                                 if (obits & (PG_A|PG_M)) {
4536                                         anychanged = TRUE;
4537 #ifdef INVARIANTS
4538                                         if (VERBOSE_PROTECT || pmap_logging)
4539                                                 printf("%#lx %#lx -> %#lx\n",
4540                                                     sva, obits, pbits);
4541 #endif
4542                                 }
4543                         }
4544                 }
4545         }
4546         if (anychanged)
4547                 pmap_invalidate_all(pmap);
4548         PMAP_UNLOCK(pmap);
4549 }
4550
4551 void
4552 mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count)
4553 {
4554
4555         CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count);
4556         pt_entry_t oldpte, pa, *pte;
4557         vm_page_t m;
4558         uint64_t cache_bits, attr_bits;
4559         vm_offset_t va;
4560
4561         oldpte = 0;
4562         attr_bits = RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
4563         va = sva;
4564         pte = kvtopte(va);
4565         while (va < sva + PAGE_SIZE * count) {
4566                 if (__predict_false((va & L3_PAGE_MASK) == 0))
4567                         pte = kvtopte(va);
4568                 MPASS(pte == pmap_pte(kernel_pmap, va));
4569
4570                 /*
4571                  * XXX there has to be a more efficient way than traversing
4572                  * the page table every time - but go for correctness for
4573                  * today
4574                  */
4575
4576                 m = *ma++;
4577                 cache_bits = pmap_cache_bits(m->md.mdpg_cache_attrs);
4578                 pa = VM_PAGE_TO_PHYS(m) | cache_bits | attr_bits;
4579                 if (*pte != pa) {
4580                         oldpte |= *pte;
4581                         pte_store(pte, pa);
4582                 }
4583                 va += PAGE_SIZE;
4584                 pte++;
4585         }
4586         if (__predict_false((oldpte & RPTE_VALID) != 0))
4587                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
4588                     PAGE_SIZE);
4589         else
4590                 ptesync();
4591 }
4592
4593 void
4594 mmu_radix_qremove(vm_offset_t sva, int count)
4595 {
4596         vm_offset_t va;
4597         pt_entry_t *pte;
4598
4599         CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count);
4600         KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva));
4601
4602         va = sva;
4603         pte = kvtopte(va);
4604         while (va < sva + PAGE_SIZE * count) {
4605                 if (__predict_false((va & L3_PAGE_MASK) == 0))
4606                         pte = kvtopte(va);
4607                 pte_clear(pte);
4608                 pte++;
4609                 va += PAGE_SIZE;
4610         }
4611         pmap_invalidate_range(kernel_pmap, sva, va);
4612 }
4613
4614 /***************************************************
4615  * Page table page management routines.....
4616  ***************************************************/
4617 /*
4618  * Schedule the specified unused page table page to be freed.  Specifically,
4619  * add the page to the specified list of pages that will be released to the
4620  * physical memory manager after the TLB has been updated.
4621  */
4622 static __inline void
4623 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
4624     boolean_t set_PG_ZERO)
4625 {
4626
4627         if (set_PG_ZERO)
4628                 m->flags |= PG_ZERO;
4629         else
4630                 m->flags &= ~PG_ZERO;
4631         SLIST_INSERT_HEAD(free, m, plinks.s.ss);
4632 }
4633
4634 /*
4635  * Inserts the specified page table page into the specified pmap's collection
4636  * of idle page table pages.  Each of a pmap's page table pages is responsible
4637  * for mapping a distinct range of virtual addresses.  The pmap's collection is
4638  * ordered by this virtual address range.
4639  */
4640 static __inline int
4641 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
4642 {
4643
4644         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4645         return (vm_radix_insert(&pmap->pm_radix, mpte));
4646 }
4647
4648 /*
4649  * Removes the page table page mapping the specified virtual address from the
4650  * specified pmap's collection of idle page table pages, and returns it.
4651  * Otherwise, returns NULL if there is no page table page corresponding to the
4652  * specified virtual address.
4653  */
4654 static __inline vm_page_t
4655 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
4656 {
4657
4658         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4659         return (vm_radix_remove(&pmap->pm_radix, pmap_l3e_pindex(va)));
4660 }
4661
4662 /*
4663  * Decrements a page table page's wire count, which is used to record the
4664  * number of valid page table entries within the page.  If the wire count
4665  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
4666  * page table page was unmapped and FALSE otherwise.
4667  */
4668 static inline boolean_t
4669 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4670 {
4671
4672         --m->ref_count;
4673         if (m->ref_count == 0) {
4674                 _pmap_unwire_ptp(pmap, va, m, free);
4675                 return (TRUE);
4676         } else
4677                 return (FALSE);
4678 }
4679
4680 static void
4681 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
4682 {
4683
4684         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4685         /*
4686          * unmap the page table page
4687          */
4688         if (m->pindex >= (NUPDE + NUPDPE)) {
4689                 /* PDP page */
4690                 pml1_entry_t *pml1;
4691                 pml1 = pmap_pml1e(pmap, va);
4692                 *pml1 = 0;
4693         } else if (m->pindex >= NUPDE) {
4694                 /* PD page */
4695                 pml2_entry_t *l2e;
4696                 l2e = pmap_pml2e(pmap, va);
4697                 *l2e = 0;
4698         } else {
4699                 /* PTE page */
4700                 pml3_entry_t *l3e;
4701                 l3e = pmap_pml3e(pmap, va);
4702                 *l3e = 0;
4703         }
4704         pmap_resident_count_dec(pmap, 1);
4705         if (m->pindex < NUPDE) {
4706                 /* We just released a PT, unhold the matching PD */
4707                 vm_page_t pdpg;
4708
4709                 pdpg = PHYS_TO_VM_PAGE(*pmap_pml2e(pmap, va) & PG_FRAME);
4710                 pmap_unwire_ptp(pmap, va, pdpg, free);
4711         }
4712         if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
4713                 /* We just released a PD, unhold the matching PDP */
4714                 vm_page_t pdppg;
4715
4716                 pdppg = PHYS_TO_VM_PAGE(*pmap_pml1e(pmap, va) & PG_FRAME);
4717                 pmap_unwire_ptp(pmap, va, pdppg, free);
4718         }
4719
4720         /*
4721          * Put page on a list so that it is released after
4722          * *ALL* TLB shootdown is done
4723          */
4724         pmap_add_delayed_free_list(m, free, TRUE);
4725 }
4726
4727 /*
4728  * After removing a page table entry, this routine is used to
4729  * conditionally free the page, and manage the hold/wire counts.
4730  */
4731 static int
4732 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde,
4733     struct spglist *free)
4734 {
4735         vm_page_t mpte;
4736
4737         if (va >= VM_MAXUSER_ADDRESS)
4738                 return (0);
4739         KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
4740         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
4741         return (pmap_unwire_ptp(pmap, va, mpte, free));
4742 }
4743
4744 void
4745 mmu_radix_release(pmap_t pmap)
4746 {
4747
4748         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
4749         KASSERT(pmap->pm_stats.resident_count == 0,
4750             ("pmap_release: pmap resident count %ld != 0",
4751             pmap->pm_stats.resident_count));
4752         KASSERT(vm_radix_is_empty(&pmap->pm_radix),
4753             ("pmap_release: pmap has reserved page table page(s)"));
4754
4755         pmap_invalidate_all(pmap);
4756         isa3_proctab[pmap->pm_pid].proctab0 = 0;
4757         uma_zfree(zone_radix_pgd, pmap->pm_pml1);
4758         vmem_free(asid_arena, pmap->pm_pid, 1);
4759 }
4760
4761 /*
4762  * Create the PV entry for a 2MB page mapping.  Always returns true unless the
4763  * flag PMAP_ENTER_NORECLAIM is specified.  If that flag is specified, returns
4764  * false if the PV entry cannot be allocated without resorting to reclamation.
4765  */
4766 static bool
4767 pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t pde, u_int flags,
4768     struct rwlock **lockp)
4769 {
4770         struct md_page *pvh;
4771         pv_entry_t pv;
4772         vm_paddr_t pa;
4773
4774         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4775         /* Pass NULL instead of the lock pointer to disable reclamation. */
4776         if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ?
4777             NULL : lockp)) == NULL)
4778                 return (false);
4779         pv->pv_va = va;
4780         pa = pde & PG_PS_FRAME;
4781         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
4782         pvh = pa_to_pvh(pa);
4783         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link);
4784         pvh->pv_gen++;
4785         return (true);
4786 }
4787
4788 /*
4789  * Fills a page table page with mappings to consecutive physical pages.
4790  */
4791 static void
4792 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
4793 {
4794         pt_entry_t *pte;
4795
4796         for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
4797                 *pte = newpte;
4798                 newpte += PAGE_SIZE;
4799         }
4800 }
4801
4802 static boolean_t
4803 pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va)
4804 {
4805         struct rwlock *lock;
4806         boolean_t rv;
4807
4808         lock = NULL;
4809         rv = pmap_demote_l3e_locked(pmap, pde, va, &lock);
4810         if (lock != NULL)
4811                 rw_wunlock(lock);
4812         return (rv);
4813 }
4814
4815 static boolean_t
4816 pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
4817     struct rwlock **lockp)
4818 {
4819         pml3_entry_t oldpde;
4820         pt_entry_t *firstpte;
4821         vm_paddr_t mptepa;
4822         vm_page_t mpte;
4823         struct spglist free;
4824         vm_offset_t sva;
4825
4826         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4827         oldpde = *l3e;
4828         KASSERT((oldpde & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
4829             ("pmap_demote_l3e: oldpde is missing RPTE_LEAF and/or PG_V %lx",
4830             oldpde));
4831         if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
4832             NULL) {
4833                 KASSERT((oldpde & PG_W) == 0,
4834                     ("pmap_demote_l3e: page table page for a wired mapping"
4835                     " is missing"));
4836
4837                 /*
4838                  * Invalidate the 2MB page mapping and return "failure" if the
4839                  * mapping was never accessed or the allocation of the new
4840                  * page table page fails.  If the 2MB page mapping belongs to
4841                  * the direct map region of the kernel's address space, then
4842                  * the page allocation request specifies the highest possible
4843                  * priority (VM_ALLOC_INTERRUPT).  Otherwise, the priority is
4844                  * normal.  Page table pages are preallocated for every other
4845                  * part of the kernel address space, so the direct map region
4846                  * is the only part of the kernel address space that must be
4847                  * handled here.
4848                  */
4849                 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
4850                     pmap_l3e_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
4851                     DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
4852                     VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
4853                         SLIST_INIT(&free);
4854                         sva = trunc_2mpage(va);
4855                         pmap_remove_l3e(pmap, l3e, sva, &free, lockp);
4856                         pmap_invalidate_l3e_page(pmap, sva, oldpde);
4857                         vm_page_free_pages_toq(&free, true);
4858                         CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx"
4859                             " in pmap %p", va, pmap);
4860                         return (FALSE);
4861                 }
4862                 if (va < VM_MAXUSER_ADDRESS)
4863                         pmap_resident_count_inc(pmap, 1);
4864         }
4865         mptepa = VM_PAGE_TO_PHYS(mpte);
4866         firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
4867         KASSERT((oldpde & PG_A) != 0,
4868             ("pmap_demote_l3e: oldpde is missing PG_A"));
4869         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
4870             ("pmap_demote_l3e: oldpde is missing PG_M"));
4871
4872         /*
4873          * If the page table page is new, initialize it.
4874          */
4875         if (mpte->ref_count == 1) {
4876                 mpte->ref_count = NPTEPG;
4877                 pmap_fill_ptp(firstpte, oldpde);
4878         }
4879
4880         KASSERT((*firstpte & PG_FRAME) == (oldpde & PG_FRAME),
4881             ("pmap_demote_l3e: firstpte and newpte map different physical"
4882             " addresses"));
4883
4884         /*
4885          * If the mapping has changed attributes, update the page table
4886          * entries.
4887          */
4888         if ((*firstpte & PG_PTE_PROMOTE) != (oldpde & PG_PTE_PROMOTE))
4889                 pmap_fill_ptp(firstpte, oldpde);
4890
4891         /*
4892          * The spare PV entries must be reserved prior to demoting the
4893          * mapping, that is, prior to changing the PDE.  Otherwise, the state
4894          * of the PDE and the PV lists will be inconsistent, which can result
4895          * in reclaim_pv_chunk() attempting to remove a PV entry from the
4896          * wrong PV list and pmap_pv_demote_l3e() failing to find the expected
4897          * PV entry for the 2MB page mapping that is being demoted.
4898          */
4899         if ((oldpde & PG_MANAGED) != 0)
4900                 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
4901
4902         /*
4903          * Demote the mapping.  This pmap is locked.  The old PDE has
4904          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
4905          * set.  Thus, there is no danger of a race with another
4906          * processor changing the setting of PG_A and/or PG_M between
4907          * the read above and the store below.
4908          */
4909         pde_store(l3e, mptepa);
4910         ptesync();
4911         /*
4912          * Demote the PV entry.
4913          */
4914         if ((oldpde & PG_MANAGED) != 0)
4915                 pmap_pv_demote_l3e(pmap, va, oldpde & PG_PS_FRAME, lockp);
4916
4917
4918         atomic_add_long(&pmap_l3e_demotions, 1);
4919         CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx"
4920             " in pmap %p", va, pmap);
4921         return (TRUE);
4922 }
4923
4924 /*
4925  * pmap_remove_kernel_pde: Remove a kernel superpage mapping.
4926  */
4927 static void
4928 pmap_remove_kernel_l3e(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va)
4929 {
4930         vm_paddr_t mptepa;
4931         vm_page_t mpte;
4932
4933         KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap));
4934         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4935         mpte = pmap_remove_pt_page(pmap, va);
4936         if (mpte == NULL)
4937                 panic("pmap_remove_kernel_pde: Missing pt page.");
4938
4939         mptepa = VM_PAGE_TO_PHYS(mpte);
4940
4941         /*
4942          * Initialize the page table page.
4943          */
4944         pagezero(PHYS_TO_DMAP(mptepa));
4945
4946         /*
4947          * Demote the mapping.
4948          */
4949         pde_store(l3e, mptepa);
4950         ptesync();
4951 }
4952
4953 /*
4954  * pmap_remove_l3e: do the things to unmap a superpage in a process
4955  */
4956 static int
4957 pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
4958     struct spglist *free, struct rwlock **lockp)
4959 {
4960         struct md_page *pvh;
4961         pml3_entry_t oldpde;
4962         vm_offset_t eva, va;
4963         vm_page_t m, mpte;
4964
4965         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4966         KASSERT((sva & L3_PAGE_MASK) == 0,
4967             ("pmap_remove_l3e: sva is not 2mpage aligned"));
4968         oldpde = pte_load_clear(pdq);
4969         if (oldpde & PG_W)
4970                 pmap->pm_stats.wired_count -= (L3_PAGE_SIZE / PAGE_SIZE);
4971         pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
4972         if (oldpde & PG_MANAGED) {
4973                 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
4974                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
4975                 pmap_pvh_free(pvh, pmap, sva);
4976                 eva = sva + L3_PAGE_SIZE;
4977                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
4978                     va < eva; va += PAGE_SIZE, m++) {
4979                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
4980                                 vm_page_dirty(m);
4981                         if (oldpde & PG_A)
4982                                 vm_page_aflag_set(m, PGA_REFERENCED);
4983                         if (TAILQ_EMPTY(&m->md.pv_list) &&
4984                             TAILQ_EMPTY(&pvh->pv_list))
4985                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
4986                 }
4987         }
4988         if (pmap == kernel_pmap) {
4989                 pmap_remove_kernel_l3e(pmap, pdq, sva);
4990         } else {
4991                 mpte = pmap_remove_pt_page(pmap, sva);
4992                 if (mpte != NULL) {
4993                         pmap_resident_count_dec(pmap, 1);
4994                         KASSERT(mpte->ref_count == NPTEPG,
4995                             ("pmap_remove_l3e: pte page wire count error"));
4996                         mpte->ref_count = 0;
4997                         pmap_add_delayed_free_list(mpte, free, FALSE);
4998                 }
4999         }
5000         return (pmap_unuse_pt(pmap, sva, *pmap_pml2e(pmap, sva), free));
5001 }
5002
5003
5004 /*
5005  * pmap_remove_pte: do the things to unmap a page in a process
5006  */
5007 static int
5008 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
5009     pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
5010 {
5011         struct md_page *pvh;
5012         pt_entry_t oldpte;
5013         vm_page_t m;
5014
5015         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5016         oldpte = pte_load_clear(ptq);
5017         if (oldpte & RPTE_WIRED)
5018                 pmap->pm_stats.wired_count -= 1;
5019         pmap_resident_count_dec(pmap, 1);
5020         if (oldpte & RPTE_MANAGED) {
5021                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
5022                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5023                         vm_page_dirty(m);
5024                 if (oldpte & PG_A)
5025                         vm_page_aflag_set(m, PGA_REFERENCED);
5026                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
5027                 pmap_pvh_free(&m->md, pmap, va);
5028                 if (TAILQ_EMPTY(&m->md.pv_list) &&
5029                     (m->flags & PG_FICTITIOUS) == 0) {
5030                         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5031                         if (TAILQ_EMPTY(&pvh->pv_list))
5032                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
5033                 }
5034         }
5035         return (pmap_unuse_pt(pmap, va, ptepde, free));
5036 }
5037
5038 /*
5039  * Remove a single page from a process address space
5040  */
5041 static bool
5042 pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *l3e,
5043     struct spglist *free)
5044 {
5045         struct rwlock *lock;
5046         pt_entry_t *pte;
5047         bool invalidate_all;
5048
5049         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5050         if ((*l3e & RPTE_VALID) == 0) {
5051                 return (false);
5052         }
5053         pte = pmap_l3e_to_pte(l3e, va);
5054         if ((*pte & RPTE_VALID) == 0) {
5055                 return (false);
5056         }
5057         lock = NULL;
5058
5059         invalidate_all = pmap_remove_pte(pmap, pte, va, *l3e, free, &lock);
5060         if (lock != NULL)
5061                 rw_wunlock(lock);
5062         if (!invalidate_all)
5063                 pmap_invalidate_page(pmap, va);
5064         return (invalidate_all);
5065 }
5066
5067 /*
5068  * Removes the specified range of addresses from the page table page.
5069  */
5070 static bool
5071 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
5072     pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp)
5073 {
5074         pt_entry_t *pte;
5075         vm_offset_t va;
5076         bool anyvalid;
5077
5078         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5079         anyvalid = false;
5080         va = eva;
5081         for (pte = pmap_l3e_to_pte(l3e, sva); sva != eva; pte++,
5082             sva += PAGE_SIZE) {
5083                 MPASS(pte == pmap_pte(pmap, sva));
5084                 if (*pte == 0) {
5085                         if (va != eva) {
5086                                 anyvalid = true;
5087                                 va = eva;
5088                         }
5089                         continue;
5090                 }
5091                 if (va == eva)
5092                         va = sva;
5093                 if (pmap_remove_pte(pmap, pte, sva, *l3e, free, lockp)) {
5094                         anyvalid = true;
5095                         sva += PAGE_SIZE;
5096                         break;
5097                 }
5098         }
5099         if (anyvalid)
5100                 pmap_invalidate_all(pmap);
5101         else if (va != eva)
5102                 pmap_invalidate_range(pmap, va, sva);
5103         return (anyvalid);
5104 }
5105
5106
5107 void
5108 mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5109 {
5110         struct rwlock *lock;
5111         vm_offset_t va_next;
5112         pml1_entry_t *l1e;
5113         pml2_entry_t *l2e;
5114         pml3_entry_t ptpaddr, *l3e;
5115         struct spglist free;
5116         bool anyvalid;
5117
5118         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5119
5120         /*
5121          * Perform an unsynchronized read.  This is, however, safe.
5122          */
5123         if (pmap->pm_stats.resident_count == 0)
5124                 return;
5125
5126         anyvalid = false;
5127         SLIST_INIT(&free);
5128
5129         /* XXX something fishy here */
5130         sva = (sva + PAGE_MASK) & ~PAGE_MASK;
5131         eva = (eva + PAGE_MASK) & ~PAGE_MASK;
5132
5133         PMAP_LOCK(pmap);
5134
5135         /*
5136          * special handling of removing one page.  a very
5137          * common operation and easy to short circuit some
5138          * code.
5139          */
5140         if (sva + PAGE_SIZE == eva) {
5141                 l3e = pmap_pml3e(pmap, sva);
5142                 if (l3e && (*l3e & RPTE_LEAF) == 0) {
5143                         anyvalid = pmap_remove_page(pmap, sva, l3e, &free);
5144                         goto out;
5145                 }
5146         }
5147
5148         lock = NULL;
5149         for (; sva < eva; sva = va_next) {
5150
5151                 if (pmap->pm_stats.resident_count == 0)
5152                         break;
5153                 l1e = pmap_pml1e(pmap, sva);
5154                 if (l1e == NULL || (*l1e & PG_V) == 0) {
5155                         va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5156                         if (va_next < sva)
5157                                 va_next = eva;
5158                         continue;
5159                 }
5160
5161                 l2e = pmap_l1e_to_l2e(l1e, sva);
5162                 if (l2e == NULL || (*l2e & PG_V) == 0) {
5163                         va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5164                         if (va_next < sva)
5165                                 va_next = eva;
5166                         continue;
5167                 }
5168
5169                 /*
5170                  * Calculate index for next page table.
5171                  */
5172                 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5173                 if (va_next < sva)
5174                         va_next = eva;
5175
5176                 l3e = pmap_l2e_to_l3e(l2e, sva);
5177                 ptpaddr = *l3e;
5178
5179                 /*
5180                  * Weed out invalid mappings.
5181                  */
5182                 if (ptpaddr == 0)
5183                         continue;
5184
5185                 /*
5186                  * Check for large page.
5187                  */
5188                 if ((ptpaddr & RPTE_LEAF) != 0) {
5189                         /*
5190                          * Are we removing the entire large page?  If not,
5191                          * demote the mapping and fall through.
5192                          */
5193                         if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5194                                 pmap_remove_l3e(pmap, l3e, sva, &free, &lock);
5195                                 continue;
5196                         } else if (!pmap_demote_l3e_locked(pmap, l3e, sva,
5197                             &lock)) {
5198                                 /* The large page mapping was destroyed. */
5199                                 continue;
5200                         } else
5201                                 ptpaddr = *l3e;
5202                 }
5203
5204                 /*
5205                  * Limit our scan to either the end of the va represented
5206                  * by the current page table page, or to the end of the
5207                  * range being removed.
5208                  */
5209                 if (va_next > eva)
5210                         va_next = eva;
5211
5212                 if (pmap_remove_ptes(pmap, sva, va_next, l3e, &free, &lock))
5213                         anyvalid = true;
5214         }
5215         if (lock != NULL)
5216                 rw_wunlock(lock);
5217 out:
5218         if (anyvalid)
5219                 pmap_invalidate_all(pmap);
5220         PMAP_UNLOCK(pmap);
5221         vm_page_free_pages_toq(&free, true);
5222 }
5223
5224 void
5225 mmu_radix_remove_all(vm_page_t m)
5226 {
5227         struct md_page *pvh;
5228         pv_entry_t pv;
5229         pmap_t pmap;
5230         struct rwlock *lock;
5231         pt_entry_t *pte, tpte;
5232         pml3_entry_t *l3e;
5233         vm_offset_t va;
5234         struct spglist free;
5235         int pvh_gen, md_gen;
5236
5237         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5238         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5239             ("pmap_remove_all: page %p is not managed", m));
5240         SLIST_INIT(&free);
5241         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5242         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5243             pa_to_pvh(VM_PAGE_TO_PHYS(m));
5244 retry:
5245         rw_wlock(lock);
5246         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
5247                 pmap = PV_PMAP(pv);
5248                 if (!PMAP_TRYLOCK(pmap)) {
5249                         pvh_gen = pvh->pv_gen;
5250                         rw_wunlock(lock);
5251                         PMAP_LOCK(pmap);
5252                         rw_wlock(lock);
5253                         if (pvh_gen != pvh->pv_gen) {
5254                                 rw_wunlock(lock);
5255                                 PMAP_UNLOCK(pmap);
5256                                 goto retry;
5257                         }
5258                 }
5259                 va = pv->pv_va;
5260                 l3e = pmap_pml3e(pmap, va);
5261                 (void)pmap_demote_l3e_locked(pmap, l3e, va, &lock);
5262                 PMAP_UNLOCK(pmap);
5263         }
5264         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
5265                 pmap = PV_PMAP(pv);
5266                 if (!PMAP_TRYLOCK(pmap)) {
5267                         pvh_gen = pvh->pv_gen;
5268                         md_gen = m->md.pv_gen;
5269                         rw_wunlock(lock);
5270                         PMAP_LOCK(pmap);
5271                         rw_wlock(lock);
5272                         if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
5273                                 rw_wunlock(lock);
5274                                 PMAP_UNLOCK(pmap);
5275                                 goto retry;
5276                         }
5277                 }
5278                 pmap_resident_count_dec(pmap, 1);
5279                 l3e = pmap_pml3e(pmap, pv->pv_va);
5280                 KASSERT((*l3e & RPTE_LEAF) == 0, ("pmap_remove_all: found"
5281                     " a 2mpage in page %p's pv list", m));
5282                 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5283                 tpte = pte_load_clear(pte);
5284                 if (tpte & PG_W)
5285                         pmap->pm_stats.wired_count--;
5286                 if (tpte & PG_A)
5287                         vm_page_aflag_set(m, PGA_REFERENCED);
5288
5289                 /*
5290                  * Update the vm_page_t clean and reference bits.
5291                  */
5292                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5293                         vm_page_dirty(m);
5294                 pmap_unuse_pt(pmap, pv->pv_va, *l3e, &free);
5295                 pmap_invalidate_page(pmap, pv->pv_va);
5296                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5297                 m->md.pv_gen++;
5298                 free_pv_entry(pmap, pv);
5299                 PMAP_UNLOCK(pmap);
5300         }
5301         vm_page_aflag_clear(m, PGA_WRITEABLE);
5302         rw_wunlock(lock);
5303         vm_page_free_pages_toq(&free, true);
5304 }
5305
5306 /*
5307  * Destroy all managed, non-wired mappings in the given user-space
5308  * pmap.  This pmap cannot be active on any processor besides the
5309  * caller.
5310  *
5311  * This function cannot be applied to the kernel pmap.  Moreover, it
5312  * is not intended for general use.  It is only to be used during
5313  * process termination.  Consequently, it can be implemented in ways
5314  * that make it faster than pmap_remove().  First, it can more quickly
5315  * destroy mappings by iterating over the pmap's collection of PV
5316  * entries, rather than searching the page table.  Second, it doesn't
5317  * have to test and clear the page table entries atomically, because
5318  * no processor is currently accessing the user address space.  In
5319  * particular, a page table entry's dirty bit won't change state once
5320  * this function starts.
5321  *
5322  * Although this function destroys all of the pmap's managed,
5323  * non-wired mappings, it can delay and batch the invalidation of TLB
5324  * entries without calling pmap_delayed_invl_started() and
5325  * pmap_delayed_invl_finished().  Because the pmap is not active on
5326  * any other processor, none of these TLB entries will ever be used
5327  * before their eventual invalidation.  Consequently, there is no need
5328  * for either pmap_remove_all() or pmap_remove_write() to wait for
5329  * that eventual TLB invalidation.
5330  */
5331
5332 void
5333 mmu_radix_remove_pages(pmap_t pmap)
5334 {
5335
5336         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
5337         pml3_entry_t ptel3e;
5338         pt_entry_t *pte, tpte;
5339         struct spglist free;
5340         vm_page_t m, mpte, mt;
5341         pv_entry_t pv;
5342         struct md_page *pvh;
5343         struct pv_chunk *pc, *npc;
5344         struct rwlock *lock;
5345         int64_t bit;
5346         uint64_t inuse, bitmask;
5347         int allfree, field, freed, idx;
5348         boolean_t superpage;
5349         vm_paddr_t pa;
5350
5351         /*
5352          * Assert that the given pmap is only active on the current
5353          * CPU.  Unfortunately, we cannot block another CPU from
5354          * activating the pmap while this function is executing.
5355          */
5356         KASSERT(pmap->pm_pid == mfspr(SPR_PID),
5357             ("non-current asid %lu - expected %lu", pmap->pm_pid,
5358             mfspr(SPR_PID)));
5359
5360         lock = NULL;
5361
5362         SLIST_INIT(&free);
5363         PMAP_LOCK(pmap);
5364         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
5365                 allfree = 1;
5366                 freed = 0;
5367                 for (field = 0; field < _NPCM; field++) {
5368                         inuse = ~pc->pc_map[field] & pc_freemask[field];
5369                         while (inuse != 0) {
5370                                 bit = cnttzd(inuse);
5371                                 bitmask = 1UL << bit;
5372                                 idx = field * 64 + bit;
5373                                 pv = &pc->pc_pventry[idx];
5374                                 inuse &= ~bitmask;
5375
5376                                 pte = pmap_pml2e(pmap, pv->pv_va);
5377                                 ptel3e = *pte;
5378                                 pte = pmap_l2e_to_l3e(pte, pv->pv_va);
5379                                 tpte = *pte;
5380                                 if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) {
5381                                         superpage = FALSE;
5382                                         ptel3e = tpte;
5383                                         pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
5384                                             PG_FRAME);
5385                                         pte = &pte[pmap_pte_index(pv->pv_va)];
5386                                         tpte = *pte;
5387                                 } else {
5388                                         /*
5389                                          * Keep track whether 'tpte' is a
5390                                          * superpage explicitly instead of
5391                                          * relying on RPTE_LEAF being set.
5392                                          *
5393                                          * This is because RPTE_LEAF is numerically
5394                                          * identical to PG_PTE_PAT and thus a
5395                                          * regular page could be mistaken for
5396                                          * a superpage.
5397                                          */
5398                                         superpage = TRUE;
5399                                 }
5400
5401                                 if ((tpte & PG_V) == 0) {
5402                                         panic("bad pte va %lx pte %lx",
5403                                             pv->pv_va, tpte);
5404                                 }
5405
5406 /*
5407  * We cannot remove wired pages from a process' mapping at this time
5408  */
5409                                 if (tpte & PG_W) {
5410                                         allfree = 0;
5411                                         continue;
5412                                 }
5413
5414                                 if (superpage)
5415                                         pa = tpte & PG_PS_FRAME;
5416                                 else
5417                                         pa = tpte & PG_FRAME;
5418
5419                                 m = PHYS_TO_VM_PAGE(pa);
5420                                 KASSERT(m->phys_addr == pa,
5421                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
5422                                     m, (uintmax_t)m->phys_addr,
5423                                     (uintmax_t)tpte));
5424
5425                                 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
5426                                     m < &vm_page_array[vm_page_array_size],
5427                                     ("pmap_remove_pages: bad tpte %#jx",
5428                                     (uintmax_t)tpte));
5429
5430                                 pte_clear(pte);
5431
5432                                 /*
5433                                  * Update the vm_page_t clean/reference bits.
5434                                  */
5435                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
5436                                         if (superpage) {
5437                                                 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5438                                                         vm_page_dirty(mt);
5439                                         } else
5440                                                 vm_page_dirty(m);
5441                                 }
5442
5443                                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
5444
5445                                 /* Mark free */
5446                                 pc->pc_map[field] |= bitmask;
5447                                 if (superpage) {
5448                                         pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE);
5449                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
5450                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_link);
5451                                         pvh->pv_gen++;
5452                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
5453                                                 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++)
5454                                                         if ((mt->a.flags & PGA_WRITEABLE) != 0 &&
5455                                                             TAILQ_EMPTY(&mt->md.pv_list))
5456                                                                 vm_page_aflag_clear(mt, PGA_WRITEABLE);
5457                                         }
5458                                         mpte = pmap_remove_pt_page(pmap, pv->pv_va);
5459                                         if (mpte != NULL) {
5460                                                 pmap_resident_count_dec(pmap, 1);
5461                                                 KASSERT(mpte->ref_count == NPTEPG,
5462                                                     ("pmap_remove_pages: pte page wire count error"));
5463                                                 mpte->ref_count = 0;
5464                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
5465                                         }
5466                                 } else {
5467                                         pmap_resident_count_dec(pmap, 1);
5468 #ifdef VERBOSE_PV
5469                                         printf("freeing pv (%p, %p)\n",
5470                                                    pmap, pv);
5471 #endif
5472                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_link);
5473                                         m->md.pv_gen++;
5474                                         if ((m->a.flags & PGA_WRITEABLE) != 0 &&
5475                                             TAILQ_EMPTY(&m->md.pv_list) &&
5476                                             (m->flags & PG_FICTITIOUS) == 0) {
5477                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
5478                                                 if (TAILQ_EMPTY(&pvh->pv_list))
5479                                                         vm_page_aflag_clear(m, PGA_WRITEABLE);
5480                                         }
5481                                 }
5482                                 pmap_unuse_pt(pmap, pv->pv_va, ptel3e, &free);
5483                                 freed++;
5484                         }
5485                 }
5486                 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
5487                 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
5488                 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
5489                 if (allfree) {
5490                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
5491                         free_pv_chunk(pc);
5492                 }
5493         }
5494         if (lock != NULL)
5495                 rw_wunlock(lock);
5496         pmap_invalidate_all(pmap);
5497         PMAP_UNLOCK(pmap);
5498         vm_page_free_pages_toq(&free, true);
5499 }
5500
5501 void
5502 mmu_radix_remove_write(vm_page_t m)
5503 {
5504         struct md_page *pvh;
5505         pmap_t pmap;
5506         struct rwlock *lock;
5507         pv_entry_t next_pv, pv;
5508         pml3_entry_t *l3e;
5509         pt_entry_t oldpte, *pte;
5510         int pvh_gen, md_gen;
5511
5512         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5513         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
5514             ("pmap_remove_write: page %p is not managed", m));
5515         vm_page_assert_busied(m);
5516
5517         if (!pmap_page_is_write_mapped(m))
5518                 return;
5519         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
5520         pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
5521             pa_to_pvh(VM_PAGE_TO_PHYS(m));
5522 retry_pv_loop:
5523         rw_wlock(lock);
5524         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) {
5525                 pmap = PV_PMAP(pv);
5526                 if (!PMAP_TRYLOCK(pmap)) {
5527                         pvh_gen = pvh->pv_gen;
5528                         rw_wunlock(lock);
5529                         PMAP_LOCK(pmap);
5530                         rw_wlock(lock);
5531                         if (pvh_gen != pvh->pv_gen) {
5532                                 PMAP_UNLOCK(pmap);
5533                                 rw_wunlock(lock);
5534                                 goto retry_pv_loop;
5535                         }
5536                 }
5537                 l3e = pmap_pml3e(pmap, pv->pv_va);
5538                 if ((*l3e & PG_RW) != 0)
5539                         (void)pmap_demote_l3e_locked(pmap, l3e, pv->pv_va, &lock);
5540                 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
5541                     ("inconsistent pv lock %p %p for page %p",
5542                     lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
5543                 PMAP_UNLOCK(pmap);
5544         }
5545         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
5546                 pmap = PV_PMAP(pv);
5547                 if (!PMAP_TRYLOCK(pmap)) {
5548                         pvh_gen = pvh->pv_gen;
5549                         md_gen = m->md.pv_gen;
5550                         rw_wunlock(lock);
5551                         PMAP_LOCK(pmap);
5552                         rw_wlock(lock);
5553                         if (pvh_gen != pvh->pv_gen ||
5554                             md_gen != m->md.pv_gen) {
5555                                 PMAP_UNLOCK(pmap);
5556                                 rw_wunlock(lock);
5557                                 goto retry_pv_loop;
5558                         }
5559                 }
5560                 l3e = pmap_pml3e(pmap, pv->pv_va);
5561                 KASSERT((*l3e & RPTE_LEAF) == 0,
5562                     ("pmap_remove_write: found a 2mpage in page %p's pv list",
5563                     m));
5564                 pte = pmap_l3e_to_pte(l3e, pv->pv_va);
5565 retry:
5566                 oldpte = *pte;
5567                 if (oldpte & PG_RW) {
5568                         if (!atomic_cmpset_long(pte, oldpte,
5569                             (oldpte | RPTE_EAA_R) & ~(PG_RW | PG_M)))
5570                                 goto retry;
5571                         if ((oldpte & PG_M) != 0)
5572                                 vm_page_dirty(m);
5573                         pmap_invalidate_page(pmap, pv->pv_va);
5574                 }
5575                 PMAP_UNLOCK(pmap);
5576         }
5577         rw_wunlock(lock);
5578         vm_page_aflag_clear(m, PGA_WRITEABLE);
5579 }
5580
5581 /*
5582  *      Clear the wired attribute from the mappings for the specified range of
5583  *      addresses in the given pmap.  Every valid mapping within that range
5584  *      must have the wired attribute set.  In contrast, invalid mappings
5585  *      cannot have the wired attribute set, so they are ignored.
5586  *
5587  *      The wired attribute of the page table entry is not a hardware
5588  *      feature, so there is no need to invalidate any TLB entries.
5589  *      Since pmap_demote_l3e() for the wired entry must never fail,
5590  *      pmap_delayed_invl_started()/finished() calls around the
5591  *      function are not needed.
5592  */
5593 void
5594 mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
5595 {
5596         vm_offset_t va_next;
5597         pml1_entry_t *l1e;
5598         pml2_entry_t *l2e;
5599         pml3_entry_t *l3e;
5600         pt_entry_t *pte;
5601
5602         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva);
5603         PMAP_LOCK(pmap);
5604         for (; sva < eva; sva = va_next) {
5605                 l1e = pmap_pml1e(pmap, sva);
5606                 if ((*l1e & PG_V) == 0) {
5607                         va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK;
5608                         if (va_next < sva)
5609                                 va_next = eva;
5610                         continue;
5611                 }
5612                 l2e = pmap_l1e_to_l2e(l1e, sva);
5613                 if ((*l2e & PG_V) == 0) {
5614                         va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK;
5615                         if (va_next < sva)
5616                                 va_next = eva;
5617                         continue;
5618                 }
5619                 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK;
5620                 if (va_next < sva)
5621                         va_next = eva;
5622                 l3e = pmap_l2e_to_l3e(l2e, sva);
5623                 if ((*l3e & PG_V) == 0)
5624                         continue;
5625                 if ((*l3e & RPTE_LEAF) != 0) {
5626                         if ((*l3e & PG_W) == 0)
5627                                 panic("pmap_unwire: pde %#jx is missing PG_W",
5628                                     (uintmax_t)*l3e);
5629
5630                         /*
5631                          * Are we unwiring the entire large page?  If not,
5632                          * demote the mapping and fall through.
5633                          */
5634                         if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
5635                                 atomic_clear_long(l3e, PG_W);
5636                                 pmap->pm_stats.wired_count -= L3_PAGE_SIZE /
5637                                     PAGE_SIZE;
5638                                 continue;
5639                         } else if (!pmap_demote_l3e(pmap, l3e, sva))
5640                                 panic("pmap_unwire: demotion failed");
5641                 }
5642                 if (va_next > eva)
5643                         va_next = eva;
5644                 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++,
5645                     sva += PAGE_SIZE) {
5646                         MPASS(pte == pmap_pte(pmap, sva));
5647                         if ((*pte & PG_V) == 0)
5648                                 continue;
5649                         if ((*pte & PG_W) == 0)
5650                                 panic("pmap_unwire: pte %#jx is missing PG_W",
5651                                     (uintmax_t)*pte);
5652
5653                         /*
5654                          * PG_W must be cleared atomically.  Although the pmap
5655                          * lock synchronizes access to PG_W, another processor
5656                          * could be setting PG_M and/or PG_A concurrently.
5657                          */
5658                         atomic_clear_long(pte, PG_W);
5659                         pmap->pm_stats.wired_count--;
5660                 }
5661         }
5662         PMAP_UNLOCK(pmap);
5663 }
5664
5665 void
5666 mmu_radix_zero_page(vm_page_t m)
5667 {
5668         vm_offset_t addr;
5669
5670         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
5671         addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5672         pagezero(addr);
5673 }
5674
5675 void
5676 mmu_radix_zero_page_area(vm_page_t m, int off, int size)
5677 {
5678         caddr_t addr;
5679
5680         CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
5681         MPASS(off + size <= PAGE_SIZE);
5682         addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
5683         memset(addr + off, 0, size);
5684 }
5685
5686
5687
5688
5689 static int
5690 mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
5691 {
5692         pml3_entry_t *l3ep;
5693         pt_entry_t pte;
5694         vm_paddr_t pa;
5695         int val;
5696
5697         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
5698         PMAP_LOCK(pmap);
5699
5700         l3ep = pmap_pml3e(pmap, addr);
5701         if (l3ep != NULL && (*l3ep & PG_V)) {
5702                 if (*l3ep & RPTE_LEAF) {
5703                         pte = *l3ep;
5704                         /* Compute the physical address of the 4KB page. */
5705                         pa = ((*l3ep & PG_PS_FRAME) | (addr & L3_PAGE_MASK)) &
5706                             PG_FRAME;
5707                         val = MINCORE_SUPER;
5708                 } else {
5709                         pte = *pmap_l3e_to_pte(l3ep, addr);
5710                         pa = pte & PG_FRAME;
5711                         val = 0;
5712                 }
5713         } else {
5714                 pte = 0;
5715                 pa = 0;
5716                 val = 0;
5717         }
5718         if ((pte & PG_V) != 0) {
5719                 val |= MINCORE_INCORE;
5720                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
5721                         val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
5722                 if ((pte & PG_A) != 0)
5723                         val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
5724         }
5725         if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
5726             (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
5727             (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
5728                 *locked_pa = pa;
5729         }
5730         PMAP_UNLOCK(pmap);
5731         return (val);
5732 }
5733
5734 void
5735 mmu_radix_activate(struct thread *td)
5736 {
5737         pmap_t pmap;
5738         uint32_t curpid;
5739
5740         CTR2(KTR_PMAP, "%s(%p)", __func__, td);
5741         critical_enter();
5742         pmap = vmspace_pmap(td->td_proc->p_vmspace);
5743         curpid = mfspr(SPR_PID);
5744         if (pmap->pm_pid > isa3_base_pid &&
5745                 curpid != pmap->pm_pid) {
5746                 mmu_radix_pid_set(pmap);
5747         }
5748         critical_exit();
5749 }
5750
5751 /*
5752  *      Increase the starting virtual address of the given mapping if a
5753  *      different alignment might result in more superpage mappings.
5754  */
5755 void
5756 mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset,
5757     vm_offset_t *addr, vm_size_t size)
5758 {
5759
5760         CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
5761             size);
5762         vm_offset_t superpage_offset;
5763
5764         if (size < L3_PAGE_SIZE)
5765                 return;
5766         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
5767                 offset += ptoa(object->pg_color);
5768         superpage_offset = offset & L3_PAGE_MASK;
5769         if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE ||
5770             (*addr & L3_PAGE_MASK) == superpage_offset)
5771                 return;
5772         if ((*addr & L3_PAGE_MASK) < superpage_offset)
5773                 *addr = (*addr & ~L3_PAGE_MASK) + superpage_offset;
5774         else
5775                 *addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset;
5776 }
5777
5778 static void *
5779 mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
5780 {
5781         vm_offset_t va, tmpva, ppa, offset;
5782
5783         ppa = trunc_page(pa);
5784         offset = pa & PAGE_MASK;
5785         size = roundup2(offset + size, PAGE_SIZE);
5786         if (pa < powerpc_ptob(Maxmem))
5787                 panic("bad pa: %#lx less than Maxmem %#lx\n",
5788                           pa, powerpc_ptob(Maxmem));
5789         va = kva_alloc(size);
5790         if (bootverbose)
5791                 printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr);
5792         KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr));
5793
5794         if (!va)
5795                 panic("%s: Couldn't alloc kernel virtual memory", __func__);
5796
5797         for (tmpva = va; size > 0;) {
5798                 mmu_radix_kenter_attr(tmpva, ppa, attr);
5799                 size -= PAGE_SIZE;
5800                 tmpva += PAGE_SIZE;
5801                 ppa += PAGE_SIZE;
5802         }
5803         ptesync();
5804
5805         return ((void *)(va + offset));
5806 }
5807
5808 static void *
5809 mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size)
5810 {
5811
5812         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
5813
5814         return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
5815 }
5816
5817 void
5818 mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5819 {
5820
5821         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
5822         m->md.mdpg_cache_attrs = ma;
5823
5824         /*
5825          * If "m" is a normal page, update its direct mapping.  This update
5826          * can be relied upon to perform any cache operations that are
5827          * required for data coherence.
5828          */
5829         if ((m->flags & PG_FICTITIOUS) == 0 &&
5830             mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
5831             PAGE_SIZE, m->md.mdpg_cache_attrs))
5832                 panic("memory attribute change on the direct map failed");
5833 }
5834
5835 static void
5836 mmu_radix_unmapdev(vm_offset_t va, vm_size_t size)
5837 {
5838         vm_offset_t offset;
5839
5840         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
5841         /* If we gave a direct map region in pmap_mapdev, do nothing */
5842         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
5843                 return;
5844
5845         offset = va & PAGE_MASK;
5846         size = round_page(offset + size);
5847         va = trunc_page(va);
5848
5849         if (pmap_initialized)
5850                 kva_free(va, size);
5851 }
5852
5853 static __inline void
5854 pmap_pte_attr(pt_entry_t *pte, uint64_t cache_bits, uint64_t mask)
5855 {
5856         uint64_t opte, npte;
5857
5858         /*
5859          * The cache mode bits are all in the low 32-bits of the
5860          * PTE, so we can just spin on updating the low 32-bits.
5861          */
5862         do {
5863                 opte = *pte;
5864                 npte = opte & ~mask;
5865                 npte |= cache_bits;
5866         } while (npte != opte && !atomic_cmpset_long(pte, opte, npte));
5867 }
5868
5869 /*
5870  * Tries to demote a 1GB page mapping.
5871  */
5872 static boolean_t
5873 pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
5874 {
5875         pml2_entry_t oldpdpe;
5876         pml3_entry_t *firstpde, newpde, *pde;
5877         vm_paddr_t pdpgpa;
5878         vm_page_t pdpg;
5879
5880         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
5881         oldpdpe = *l2e;
5882         KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V),
5883             ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
5884         pdpg = vm_page_alloc(NULL, va >> L2_PAGE_SIZE_SHIFT,
5885             VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
5886         if (pdpg == NULL) {
5887                 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
5888                     " in pmap %p", va, pmap);
5889                 return (FALSE);
5890         }
5891         pdpgpa = VM_PAGE_TO_PHYS(pdpg);
5892         firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa);
5893         KASSERT((oldpdpe & PG_A) != 0,
5894             ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
5895         KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
5896             ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
5897         newpde = oldpdpe;
5898
5899         /*
5900          * Initialize the page directory page.
5901          */
5902         for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
5903                 *pde = newpde;
5904                 newpde += L3_PAGE_SIZE;
5905         }
5906
5907         /*
5908          * Demote the mapping.
5909          */
5910         pde_store(l2e, pdpgpa);
5911
5912         /*
5913          * Flush PWC --- XXX revisit
5914          */
5915         pmap_invalidate_all(pmap);
5916
5917         pmap_l2e_demotions++;
5918         CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
5919             " in pmap %p", va, pmap);
5920         return (TRUE);
5921 }
5922
5923 vm_paddr_t
5924 mmu_radix_kextract(vm_offset_t va)
5925 {
5926         pml3_entry_t l3e;
5927         vm_paddr_t pa;
5928
5929         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
5930         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
5931                 pa = DMAP_TO_PHYS(va);
5932         } else {
5933                 l3e = *pmap_pml3e(kernel_pmap, va);
5934                 if (l3e & RPTE_LEAF) {
5935                         pa = (l3e & PG_PS_FRAME) | (va & L3_PAGE_MASK);
5936                         pa |= (va & L3_PAGE_MASK);
5937                 } else {
5938                         /*
5939                          * Beware of a concurrent promotion that changes the
5940                          * PDE at this point!  For example, vtopte() must not
5941                          * be used to access the PTE because it would use the
5942                          * new PDE.  It is, however, safe to use the old PDE
5943                          * because the page table page is preserved by the
5944                          * promotion.
5945                          */
5946                         pa = *pmap_l3e_to_pte(&l3e, va);
5947                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
5948                         pa |= (va & PAGE_MASK);
5949                 }
5950         }
5951         return (pa);
5952 }
5953
5954 static pt_entry_t
5955 mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
5956 {
5957
5958         if (ma != VM_MEMATTR_DEFAULT) {
5959                 return pmap_cache_bits(ma);
5960         }
5961
5962         /*
5963          * Assume the page is cache inhibited and access is guarded unless
5964          * it's in our available memory array.
5965          */
5966         for (int i = 0; i < pregions_sz; i++) {
5967                 if ((pa >= pregions[i].mr_start) &&
5968                     (pa < (pregions[i].mr_start + pregions[i].mr_size)))
5969                         return (RPTE_ATTR_MEM);
5970         }
5971         return (RPTE_ATTR_GUARDEDIO);
5972 }
5973
5974 static void
5975 mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
5976 {
5977         pt_entry_t *pte, pteval;
5978         uint64_t cache_bits;
5979
5980         pte = kvtopte(va);
5981         MPASS(pte != NULL);
5982         pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A;
5983         cache_bits = mmu_radix_calc_wimg(pa, ma);
5984         pte_store(pte, pteval | cache_bits);
5985 }
5986
5987 void
5988 mmu_radix_kremove(vm_offset_t va)
5989 {
5990         pt_entry_t *pte;
5991
5992         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
5993
5994         pte = kvtopte(va);
5995         pte_clear(pte);
5996 }
5997
5998 int
5999 mmu_radix_decode_kernel_ptr(vm_offset_t addr,
6000     int *is_user, vm_offset_t *decoded)
6001 {
6002
6003         CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
6004         *decoded = addr;
6005         *is_user = (addr < VM_MAXUSER_ADDRESS);
6006         return (0);
6007 }
6008
6009 static boolean_t
6010 mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
6011 {
6012
6013         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
6014         return (mem_valid(pa, size));
6015 }
6016
6017 static void
6018 mmu_radix_scan_init()
6019 {
6020
6021         CTR1(KTR_PMAP, "%s()", __func__);
6022         UNIMPLEMENTED();
6023 }
6024
6025 static void
6026 mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz,
6027         void **va)
6028 {
6029         CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
6030         UNIMPLEMENTED();
6031 }
6032
6033 vm_offset_t
6034 mmu_radix_quick_enter_page(vm_page_t m)
6035 {
6036         vm_paddr_t paddr;
6037
6038         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
6039         paddr = VM_PAGE_TO_PHYS(m);
6040         return (PHYS_TO_DMAP(paddr));
6041 }
6042
6043 void
6044 mmu_radix_quick_remove_page(vm_offset_t addr __unused)
6045 {
6046         /* no work to do here */
6047         CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
6048 }
6049
6050 static void
6051 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
6052 {
6053         cpu_flush_dcache((void *)sva, eva - sva);
6054 }
6055
6056 int
6057 mmu_radix_change_attr(vm_offset_t va, vm_size_t size,
6058     vm_memattr_t mode)
6059 {
6060         int error;
6061
6062         CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode);
6063         PMAP_LOCK(kernel_pmap);
6064         error = pmap_change_attr_locked(va, size, mode, true);
6065         PMAP_UNLOCK(kernel_pmap);
6066         return (error);
6067 }
6068
6069 static int
6070 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
6071 {
6072         vm_offset_t base, offset, tmpva;
6073         vm_paddr_t pa_start, pa_end, pa_end1;
6074         pml2_entry_t *l2e;
6075         pml3_entry_t *l3e;
6076         pt_entry_t *pte;
6077         int cache_bits, error;
6078         boolean_t changed;
6079
6080         PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
6081         base = trunc_page(va);
6082         offset = va & PAGE_MASK;
6083         size = round_page(offset + size);
6084
6085         /*
6086          * Only supported on kernel virtual addresses, including the direct
6087          * map but excluding the recursive map.
6088          */
6089         if (base < DMAP_MIN_ADDRESS)
6090                 return (EINVAL);
6091
6092         cache_bits = pmap_cache_bits(mode);
6093         changed = FALSE;
6094
6095         /*
6096          * Pages that aren't mapped aren't supported.  Also break down 2MB pages
6097          * into 4KB pages if required.
6098          */
6099         for (tmpva = base; tmpva < base + size; ) {
6100                 l2e = pmap_pml2e(kernel_pmap, tmpva);
6101                 if (l2e == NULL || *l2e == 0)
6102                         return (EINVAL);
6103                 if (*l2e & RPTE_LEAF) {
6104                         /*
6105                          * If the current 1GB page already has the required
6106                          * memory type, then we need not demote this page. Just
6107                          * increment tmpva to the next 1GB page frame.
6108                          */
6109                         if ((*l2e & RPTE_ATTR_MASK) == cache_bits) {
6110                                 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6111                                 continue;
6112                         }
6113
6114                         /*
6115                          * If the current offset aligns with a 1GB page frame
6116                          * and there is at least 1GB left within the range, then
6117                          * we need not break down this page into 2MB pages.
6118                          */
6119                         if ((tmpva & L2_PAGE_MASK) == 0 &&
6120                             tmpva + L2_PAGE_MASK < base + size) {
6121                                 tmpva += L2_PAGE_MASK;
6122                                 continue;
6123                         }
6124                         if (!pmap_demote_l2e(kernel_pmap, l2e, tmpva))
6125                                 return (ENOMEM);
6126                 }
6127                 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6128                 KASSERT(l3e != NULL, ("no l3e entry for %#lx in %p\n",
6129                     tmpva, l2e));
6130                 if (*l3e == 0)
6131                         return (EINVAL);
6132                 if (*l3e & RPTE_LEAF) {
6133                         /*
6134                          * If the current 2MB page already has the required
6135                          * memory type, then we need not demote this page. Just
6136                          * increment tmpva to the next 2MB page frame.
6137                          */
6138                         if ((*l3e & RPTE_ATTR_MASK) == cache_bits) {
6139                                 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6140                                 continue;
6141                         }
6142
6143                         /*
6144                          * If the current offset aligns with a 2MB page frame
6145                          * and there is at least 2MB left within the range, then
6146                          * we need not break down this page into 4KB pages.
6147                          */
6148                         if ((tmpva & L3_PAGE_MASK) == 0 &&
6149                             tmpva + L3_PAGE_MASK < base + size) {
6150                                 tmpva += L3_PAGE_SIZE;
6151                                 continue;
6152                         }
6153                         if (!pmap_demote_l3e(kernel_pmap, l3e, tmpva))
6154                                 return (ENOMEM);
6155                 }
6156                 pte = pmap_l3e_to_pte(l3e, tmpva);
6157                 if (*pte == 0)
6158                         return (EINVAL);
6159                 tmpva += PAGE_SIZE;
6160         }
6161         error = 0;
6162
6163         /*
6164          * Ok, all the pages exist, so run through them updating their
6165          * cache mode if required.
6166          */
6167         pa_start = pa_end = 0;
6168         for (tmpva = base; tmpva < base + size; ) {
6169                 l2e = pmap_pml2e(kernel_pmap, tmpva);
6170                 if (*l2e & RPTE_LEAF) {
6171                         if ((*l2e & RPTE_ATTR_MASK) != cache_bits) {
6172                                 pmap_pte_attr(l2e, cache_bits,
6173                                     RPTE_ATTR_MASK);
6174                                 changed = TRUE;
6175                         }
6176                         if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6177                             (*l2e & PG_PS_FRAME) < dmaplimit) {
6178                                 if (pa_start == pa_end) {
6179                                         /* Start physical address run. */
6180                                         pa_start = *l2e & PG_PS_FRAME;
6181                                         pa_end = pa_start + L2_PAGE_SIZE;
6182                                 } else if (pa_end == (*l2e & PG_PS_FRAME))
6183                                         pa_end += L2_PAGE_SIZE;
6184                                 else {
6185                                         /* Run ended, update direct map. */
6186                                         error = pmap_change_attr_locked(
6187                                             PHYS_TO_DMAP(pa_start),
6188                                             pa_end - pa_start, mode, flush);
6189                                         if (error != 0)
6190                                                 break;
6191                                         /* Start physical address run. */
6192                                         pa_start = *l2e & PG_PS_FRAME;
6193                                         pa_end = pa_start + L2_PAGE_SIZE;
6194                                 }
6195                         }
6196                         tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE;
6197                         continue;
6198                 }
6199                 l3e = pmap_l2e_to_l3e(l2e, tmpva);
6200                 if (*l3e & RPTE_LEAF) {
6201                         if ((*l3e & RPTE_ATTR_MASK) != cache_bits) {
6202                                 pmap_pte_attr(l3e, cache_bits,
6203                                     RPTE_ATTR_MASK);
6204                                 changed = TRUE;
6205                         }
6206                         if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6207                             (*l3e & PG_PS_FRAME) < dmaplimit) {
6208                                 if (pa_start == pa_end) {
6209                                         /* Start physical address run. */
6210                                         pa_start = *l3e & PG_PS_FRAME;
6211                                         pa_end = pa_start + L3_PAGE_SIZE;
6212                                 } else if (pa_end == (*l3e & PG_PS_FRAME))
6213                                         pa_end += L3_PAGE_SIZE;
6214                                 else {
6215                                         /* Run ended, update direct map. */
6216                                         error = pmap_change_attr_locked(
6217                                             PHYS_TO_DMAP(pa_start),
6218                                             pa_end - pa_start, mode, flush);
6219                                         if (error != 0)
6220                                                 break;
6221                                         /* Start physical address run. */
6222                                         pa_start = *l3e & PG_PS_FRAME;
6223                                         pa_end = pa_start + L3_PAGE_SIZE;
6224                                 }
6225                         }
6226                         tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE;
6227                 } else {
6228                         pte = pmap_l3e_to_pte(l3e, tmpva);
6229                         if ((*pte & RPTE_ATTR_MASK) != cache_bits) {
6230                                 pmap_pte_attr(pte, cache_bits,
6231                                     RPTE_ATTR_MASK);
6232                                 changed = TRUE;
6233                         }
6234                         if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
6235                             (*pte & PG_FRAME) < dmaplimit) {
6236                                 if (pa_start == pa_end) {
6237                                         /* Start physical address run. */
6238                                         pa_start = *pte & PG_FRAME;
6239                                         pa_end = pa_start + PAGE_SIZE;
6240                                 } else if (pa_end == (*pte & PG_FRAME))
6241                                         pa_end += PAGE_SIZE;
6242                                 else {
6243                                         /* Run ended, update direct map. */
6244                                         error = pmap_change_attr_locked(
6245                                             PHYS_TO_DMAP(pa_start),
6246                                             pa_end - pa_start, mode, flush);
6247                                         if (error != 0)
6248                                                 break;
6249                                         /* Start physical address run. */
6250                                         pa_start = *pte & PG_FRAME;
6251                                         pa_end = pa_start + PAGE_SIZE;
6252                                 }
6253                         }
6254                         tmpva += PAGE_SIZE;
6255                 }
6256         }
6257         if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
6258                 pa_end1 = MIN(pa_end, dmaplimit);
6259                 if (pa_start != pa_end1)
6260                         error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
6261                             pa_end1 - pa_start, mode, flush);
6262         }
6263
6264         /*
6265          * Flush CPU caches if required to make sure any data isn't cached that
6266          * shouldn't be, etc.
6267          */
6268         if (changed) {
6269                 pmap_invalidate_all(kernel_pmap);
6270
6271                 if (flush)
6272                         pmap_invalidate_cache_range(base, tmpva);
6273
6274         }
6275         return (error);
6276 }
6277
6278 /*
6279  * Allocate physical memory for the vm_page array and map it into KVA,
6280  * attempting to back the vm_pages with domain-local memory.
6281  */
6282 void
6283 mmu_radix_page_array_startup(long pages)
6284 {
6285 #ifdef notyet
6286         pml2_entry_t *l2e;
6287         pml3_entry_t *pde;
6288         pml3_entry_t newl3;
6289         vm_offset_t va;
6290         long pfn;
6291         int domain, i;
6292 #endif
6293         vm_paddr_t pa;
6294         vm_offset_t start, end;
6295
6296         vm_page_array_size = pages;
6297
6298         start = VM_MIN_KERNEL_ADDRESS;
6299         end = start + pages * sizeof(struct vm_page);
6300
6301         pa = vm_phys_early_alloc(0, end - start);
6302
6303         start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
6304 #ifdef notyet
6305         /* TODO: NUMA vm_page_array.  Blocked out until then (copied from amd64). */
6306         for (va = start; va < end; va += L3_PAGE_SIZE) {
6307                 pfn = first_page + (va - start) / sizeof(struct vm_page);
6308                 domain = _vm_phys_domain(ptoa(pfn));
6309                 l2e = pmap_pml2e(kernel_pmap, va);
6310                 if ((*l2e & PG_V) == 0) {
6311                         pa = vm_phys_early_alloc(domain, PAGE_SIZE);
6312                         dump_add_page(pa);
6313                         pagezero(PHYS_TO_DMAP(pa));
6314                         pde_store(l2e, (pml2_entry_t)pa);
6315                 }
6316                 pde = pmap_l2e_to_l3e(l2e, va);
6317                 if ((*pde & PG_V) != 0)
6318                         panic("Unexpected pde %p", pde);
6319                 pa = vm_phys_early_alloc(domain, L3_PAGE_SIZE);
6320                 for (i = 0; i < NPDEPG; i++)
6321                         dump_add_page(pa + i * PAGE_SIZE);
6322                 newl3 = (pml3_entry_t)(pa | RPTE_EAA_P | RPTE_EAA_R | RPTE_EAA_W);
6323                 pte_store(pde, newl3);
6324         }
6325 #endif
6326         vm_page_array = (vm_page_t)start;
6327 }
6328
6329 #ifdef DDB
6330 #include <sys/kdb.h>
6331 #include <ddb/ddb.h>
6332
6333 static void
6334 pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va)
6335 {
6336         pml1_entry_t *l1e;
6337         pml2_entry_t *l2e;
6338         pml3_entry_t *l3e;
6339         pt_entry_t *pte;
6340
6341         l1e = &l1[pmap_pml1e_index(va)];
6342         db_printf("VA %#016lx l1e %#016lx", va, *l1e);
6343         if ((*l1e & PG_V) == 0) {
6344                 db_printf("\n");
6345                 return;
6346         }
6347         l2e = pmap_l1e_to_l2e(l1e, va);
6348         db_printf(" l2e %#016lx", *l2e);
6349         if ((*l2e & PG_V) == 0 || (*l2e & RPTE_LEAF) != 0) {
6350                 db_printf("\n");
6351                 return;
6352         }
6353         l3e = pmap_l2e_to_l3e(l2e, va);
6354         db_printf(" l3e %#016lx", *l3e);
6355         if ((*l3e & PG_V) == 0 || (*l3e & RPTE_LEAF) != 0) {
6356                 db_printf("\n");
6357                 return;
6358         }
6359         pte = pmap_l3e_to_pte(l3e, va);
6360         db_printf(" pte %#016lx\n", *pte);
6361 }
6362
6363 void
6364 pmap_page_print_mappings(vm_page_t m)
6365 {
6366         pmap_t pmap;
6367         pv_entry_t pv;
6368
6369         db_printf("page %p(%lx)\n", m, m->phys_addr);
6370         /* need to elide locks if running in ddb */
6371         TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
6372                 db_printf("pv: %p ", pv);
6373                 db_printf("va: %#016lx ", pv->pv_va);
6374                 pmap = PV_PMAP(pv);
6375                 db_printf("pmap %p  ", pmap);
6376                 if (pmap != NULL) {
6377                         db_printf("asid: %lu\n", pmap->pm_pid);
6378                         pmap_pte_walk(pmap->pm_pml1, pv->pv_va);
6379                 }
6380         }
6381 }
6382
6383 DB_SHOW_COMMAND(pte, pmap_print_pte)
6384 {
6385         vm_offset_t va;
6386         pmap_t pmap;
6387
6388         if (!have_addr) {
6389                 db_printf("show pte addr\n");
6390                 return;
6391         }
6392         va = (vm_offset_t)addr;
6393
6394         if (va >= DMAP_MIN_ADDRESS)
6395                 pmap = kernel_pmap;
6396         else if (kdb_thread != NULL)
6397                 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace);
6398         else
6399                 pmap = vmspace_pmap(curthread->td_proc->p_vmspace);
6400
6401         pmap_pte_walk(pmap->pm_pml1, va);
6402 }
6403
6404 #endif
6405