]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/sparc64/sparc64/pmap.c
Upgrade to OpenSSH 6.7p1, retaining libwrap support (which has been removed
[FreeBSD/FreeBSD.git] / sys / sparc64 / sparc64 / pmap.c
1 /*-
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  * Copyright (c) 1994 David Greenman
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * the Systems Programming Group of the University of Utah Computer
11  * Science Department and William Jolitz of UUNET Technologies Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
38  */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 /*
44  * Manages physical address maps.
45  *
46  * Since the information managed by this module is also stored by the
47  * logical address mapping module, this module may throw away valid virtual
48  * to physical mappings at almost any time.  However, invalidations of
49  * mappings must be done as requested.
50  *
51  * In order to cope with hardware architectures which make virtual to
52  * physical map invalidates expensive, this module may delay invalidate
53  * reduced protection operations until such time as they are actually
54  * necessary.  This module is given full information as to which processors
55  * are currently using which maps, and to when physical maps must be made
56  * correct.
57  */
58
59 #include "opt_kstack_pages.h"
60 #include "opt_pmap.h"
61
62 #include <sys/param.h>
63 #include <sys/kernel.h>
64 #include <sys/ktr.h>
65 #include <sys/lock.h>
66 #include <sys/msgbuf.h>
67 #include <sys/mutex.h>
68 #include <sys/proc.h>
69 #include <sys/rwlock.h>
70 #include <sys/smp.h>
71 #include <sys/sysctl.h>
72 #include <sys/systm.h>
73 #include <sys/vmmeter.h>
74
75 #include <dev/ofw/openfirm.h>
76
77 #include <vm/vm.h>
78 #include <vm/vm_param.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_extern.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_phys.h>
87
88 #include <machine/cache.h>
89 #include <machine/frame.h>
90 #include <machine/instr.h>
91 #include <machine/md_var.h>
92 #include <machine/metadata.h>
93 #include <machine/ofw_mem.h>
94 #include <machine/smp.h>
95 #include <machine/tlb.h>
96 #include <machine/tte.h>
97 #include <machine/tsb.h>
98 #include <machine/ver.h>
99
100 /*
101  * Virtual address of message buffer
102  */
103 struct msgbuf *msgbufp;
104
105 /*
106  * Map of physical memory reagions
107  */
108 vm_paddr_t phys_avail[128];
109 static struct ofw_mem_region mra[128];
110 struct ofw_mem_region sparc64_memreg[128];
111 int sparc64_nmemreg;
112 static struct ofw_map translations[128];
113 static int translations_size;
114
115 static vm_offset_t pmap_idle_map;
116 static vm_offset_t pmap_temp_map_1;
117 static vm_offset_t pmap_temp_map_2;
118
119 /*
120  * First and last available kernel virtual addresses
121  */
122 vm_offset_t virtual_avail;
123 vm_offset_t virtual_end;
124 vm_offset_t kernel_vm_end;
125
126 vm_offset_t vm_max_kernel_address;
127
128 /*
129  * Kernel pmap
130  */
131 struct pmap kernel_pmap_store;
132
133 struct rwlock_padalign tte_list_global_lock;
134
135 /*
136  * Allocate physical memory for use in pmap_bootstrap.
137  */
138 static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size, uint32_t colors);
139
140 static void pmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data);
141 static void pmap_cache_remove(vm_page_t m, vm_offset_t va);
142 static int pmap_protect_tte(struct pmap *pm1, struct pmap *pm2,
143     struct tte *tp, vm_offset_t va);
144 static int pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp,
145     vm_offset_t va);
146 static void pmap_init_qpages(void);
147
148 /*
149  * Map the given physical page at the specified virtual address in the
150  * target pmap with the protection requested.  If specified the page
151  * will be wired down.
152  *
153  * The page queues and pmap must be locked.
154  */
155 static int pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
156     vm_prot_t prot, u_int flags, int8_t psind);
157
158 extern int tl1_dmmu_miss_direct_patch_tsb_phys_1[];
159 extern int tl1_dmmu_miss_direct_patch_tsb_phys_end_1[];
160 extern int tl1_dmmu_miss_patch_asi_1[];
161 extern int tl1_dmmu_miss_patch_quad_ldd_1[];
162 extern int tl1_dmmu_miss_patch_tsb_1[];
163 extern int tl1_dmmu_miss_patch_tsb_2[];
164 extern int tl1_dmmu_miss_patch_tsb_mask_1[];
165 extern int tl1_dmmu_miss_patch_tsb_mask_2[];
166 extern int tl1_dmmu_prot_patch_asi_1[];
167 extern int tl1_dmmu_prot_patch_quad_ldd_1[];
168 extern int tl1_dmmu_prot_patch_tsb_1[];
169 extern int tl1_dmmu_prot_patch_tsb_2[];
170 extern int tl1_dmmu_prot_patch_tsb_mask_1[];
171 extern int tl1_dmmu_prot_patch_tsb_mask_2[];
172 extern int tl1_immu_miss_patch_asi_1[];
173 extern int tl1_immu_miss_patch_quad_ldd_1[];
174 extern int tl1_immu_miss_patch_tsb_1[];
175 extern int tl1_immu_miss_patch_tsb_2[];
176 extern int tl1_immu_miss_patch_tsb_mask_1[];
177 extern int tl1_immu_miss_patch_tsb_mask_2[];
178
179 /*
180  * If user pmap is processed with pmap_remove and with pmap_remove and the
181  * resident count drops to 0, there are no more pages to remove, so we
182  * need not continue.
183  */
184 #define PMAP_REMOVE_DONE(pm) \
185         ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
186
187 /*
188  * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove()
189  * and pmap_protect() instead of trying each virtual address.
190  */
191 #define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE)
192
193 SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "");
194
195 PMAP_STATS_VAR(pmap_nenter);
196 PMAP_STATS_VAR(pmap_nenter_update);
197 PMAP_STATS_VAR(pmap_nenter_replace);
198 PMAP_STATS_VAR(pmap_nenter_new);
199 PMAP_STATS_VAR(pmap_nkenter);
200 PMAP_STATS_VAR(pmap_nkenter_oc);
201 PMAP_STATS_VAR(pmap_nkenter_stupid);
202 PMAP_STATS_VAR(pmap_nkremove);
203 PMAP_STATS_VAR(pmap_nqenter);
204 PMAP_STATS_VAR(pmap_nqremove);
205 PMAP_STATS_VAR(pmap_ncache_enter);
206 PMAP_STATS_VAR(pmap_ncache_enter_c);
207 PMAP_STATS_VAR(pmap_ncache_enter_oc);
208 PMAP_STATS_VAR(pmap_ncache_enter_cc);
209 PMAP_STATS_VAR(pmap_ncache_enter_coc);
210 PMAP_STATS_VAR(pmap_ncache_enter_nc);
211 PMAP_STATS_VAR(pmap_ncache_enter_cnc);
212 PMAP_STATS_VAR(pmap_ncache_remove);
213 PMAP_STATS_VAR(pmap_ncache_remove_c);
214 PMAP_STATS_VAR(pmap_ncache_remove_oc);
215 PMAP_STATS_VAR(pmap_ncache_remove_cc);
216 PMAP_STATS_VAR(pmap_ncache_remove_coc);
217 PMAP_STATS_VAR(pmap_ncache_remove_nc);
218 PMAP_STATS_VAR(pmap_nzero_page);
219 PMAP_STATS_VAR(pmap_nzero_page_c);
220 PMAP_STATS_VAR(pmap_nzero_page_oc);
221 PMAP_STATS_VAR(pmap_nzero_page_nc);
222 PMAP_STATS_VAR(pmap_nzero_page_area);
223 PMAP_STATS_VAR(pmap_nzero_page_area_c);
224 PMAP_STATS_VAR(pmap_nzero_page_area_oc);
225 PMAP_STATS_VAR(pmap_nzero_page_area_nc);
226 PMAP_STATS_VAR(pmap_nzero_page_idle);
227 PMAP_STATS_VAR(pmap_nzero_page_idle_c);
228 PMAP_STATS_VAR(pmap_nzero_page_idle_oc);
229 PMAP_STATS_VAR(pmap_nzero_page_idle_nc);
230 PMAP_STATS_VAR(pmap_ncopy_page);
231 PMAP_STATS_VAR(pmap_ncopy_page_c);
232 PMAP_STATS_VAR(pmap_ncopy_page_oc);
233 PMAP_STATS_VAR(pmap_ncopy_page_nc);
234 PMAP_STATS_VAR(pmap_ncopy_page_dc);
235 PMAP_STATS_VAR(pmap_ncopy_page_doc);
236 PMAP_STATS_VAR(pmap_ncopy_page_sc);
237 PMAP_STATS_VAR(pmap_ncopy_page_soc);
238
239 PMAP_STATS_VAR(pmap_nnew_thread);
240 PMAP_STATS_VAR(pmap_nnew_thread_oc);
241
242 static inline u_long dtlb_get_data(u_int tlb, u_int slot);
243
244 /*
245  * Quick sort callout for comparing memory regions
246  */
247 static int mr_cmp(const void *a, const void *b);
248 static int om_cmp(const void *a, const void *b);
249
250 static int
251 mr_cmp(const void *a, const void *b)
252 {
253         const struct ofw_mem_region *mra;
254         const struct ofw_mem_region *mrb;
255
256         mra = a;
257         mrb = b;
258         if (mra->mr_start < mrb->mr_start)
259                 return (-1);
260         else if (mra->mr_start > mrb->mr_start)
261                 return (1);
262         else
263                 return (0);
264 }
265
266 static int
267 om_cmp(const void *a, const void *b)
268 {
269         const struct ofw_map *oma;
270         const struct ofw_map *omb;
271
272         oma = a;
273         omb = b;
274         if (oma->om_start < omb->om_start)
275                 return (-1);
276         else if (oma->om_start > omb->om_start)
277                 return (1);
278         else
279                 return (0);
280 }
281
282 static inline u_long
283 dtlb_get_data(u_int tlb, u_int slot)
284 {
285         u_long data;
286         register_t s;
287
288         slot = TLB_DAR_SLOT(tlb, slot);
289         /*
290          * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
291          * work around errata of USIII and beyond.
292          */
293         s = intr_disable();
294         (void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
295         data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
296         intr_restore(s);
297         return (data);
298 }
299
300 /*
301  * Bootstrap the system enough to run with virtual memory.
302  */
303 void
304 pmap_bootstrap(u_int cpu_impl)
305 {
306         struct pmap *pm;
307         struct tte *tp;
308         vm_offset_t off;
309         vm_offset_t va;
310         vm_paddr_t pa;
311         vm_size_t physsz;
312         vm_size_t virtsz;
313         u_long data;
314         u_long vpn;
315         phandle_t pmem;
316         phandle_t vmem;
317         u_int dtlb_slots_avail;
318         int i;
319         int j;
320         int sz;
321         uint32_t asi;
322         uint32_t colors;
323         uint32_t ldd;
324
325         /*
326          * Set the kernel context.
327          */
328         pmap_set_kctx();
329
330         colors = dcache_color_ignore != 0 ? 1 : DCACHE_COLORS;
331
332         /*
333          * Find out what physical memory is available from the PROM and
334          * initialize the phys_avail array.  This must be done before
335          * pmap_bootstrap_alloc is called.
336          */
337         if ((pmem = OF_finddevice("/memory")) == -1)
338                 OF_panic("%s: finddevice /memory", __func__);
339         if ((sz = OF_getproplen(pmem, "available")) == -1)
340                 OF_panic("%s: getproplen /memory/available", __func__);
341         if (sizeof(phys_avail) < sz)
342                 OF_panic("%s: phys_avail too small", __func__);
343         if (sizeof(mra) < sz)
344                 OF_panic("%s: mra too small", __func__);
345         bzero(mra, sz);
346         if (OF_getprop(pmem, "available", mra, sz) == -1)
347                 OF_panic("%s: getprop /memory/available", __func__);
348         sz /= sizeof(*mra);
349 #ifdef DIAGNOSTIC
350         OF_printf("pmap_bootstrap: physical memory\n");
351 #endif
352         qsort(mra, sz, sizeof (*mra), mr_cmp);
353         physsz = 0;
354         getenv_quad("hw.physmem", &physmem);
355         physmem = btoc(physmem);
356         for (i = 0, j = 0; i < sz; i++, j += 2) {
357 #ifdef DIAGNOSTIC
358                 OF_printf("start=%#lx size=%#lx\n", mra[i].mr_start,
359                     mra[i].mr_size);
360 #endif
361                 if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) {
362                         if (btoc(physsz) < physmem) {
363                                 phys_avail[j] = mra[i].mr_start;
364                                 phys_avail[j + 1] = mra[i].mr_start +
365                                     (ctob(physmem) - physsz);
366                                 physsz = ctob(physmem);
367                         }
368                         break;
369                 }
370                 phys_avail[j] = mra[i].mr_start;
371                 phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
372                 physsz += mra[i].mr_size;
373         }
374         physmem = btoc(physsz);
375
376         /*
377          * Calculate the size of kernel virtual memory, and the size and mask
378          * for the kernel TSB based on the phsyical memory size but limited
379          * by the amount of dTLB slots available for locked entries if we have
380          * to lock the TSB in the TLB (given that for spitfire-class CPUs all
381          * of the dt64 slots can hold locked entries but there is no large
382          * dTLB for unlocked ones, we don't use more than half of it for the
383          * TSB).
384          * Note that for reasons unknown OpenSolaris doesn't take advantage of
385          * ASI_ATOMIC_QUAD_LDD_PHYS on UltraSPARC-III.  However, given that no
386          * public documentation is available for these, the latter just might
387          * not support it, yet.
388          */
389         if (cpu_impl == CPU_IMPL_SPARC64V ||
390             cpu_impl >= CPU_IMPL_ULTRASPARCIIIp) {
391                 tsb_kernel_ldd_phys = 1;
392                 virtsz = roundup(5 / 3 * physsz, PAGE_SIZE_4M <<
393                     (PAGE_SHIFT - TTE_SHIFT));
394         } else {
395                 dtlb_slots_avail = 0;
396                 for (i = 0; i < dtlb_slots; i++) {
397                         data = dtlb_get_data(cpu_impl ==
398                             CPU_IMPL_ULTRASPARCIII ? TLB_DAR_T16 :
399                             TLB_DAR_T32, i);
400                         if ((data & (TD_V | TD_L)) != (TD_V | TD_L))
401                                 dtlb_slots_avail++;
402                 }
403 #ifdef SMP
404                 dtlb_slots_avail -= PCPU_PAGES;
405 #endif
406                 if (cpu_impl >= CPU_IMPL_ULTRASPARCI &&
407                     cpu_impl < CPU_IMPL_ULTRASPARCIII)
408                         dtlb_slots_avail /= 2;
409                 virtsz = roundup(physsz, PAGE_SIZE_4M <<
410                     (PAGE_SHIFT - TTE_SHIFT));
411                 virtsz = MIN(virtsz, (dtlb_slots_avail * PAGE_SIZE_4M) <<
412                     (PAGE_SHIFT - TTE_SHIFT));
413         }
414         vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
415         tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
416         tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
417
418         /*
419          * Allocate the kernel TSB and lock it in the TLB if necessary.
420          */
421         pa = pmap_bootstrap_alloc(tsb_kernel_size, colors);
422         if (pa & PAGE_MASK_4M)
423                 OF_panic("%s: TSB unaligned", __func__);
424         tsb_kernel_phys = pa;
425         if (tsb_kernel_ldd_phys == 0) {
426                 tsb_kernel =
427                     (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
428                 pmap_map_tsb();
429                 bzero(tsb_kernel, tsb_kernel_size);
430         } else {
431                 tsb_kernel =
432                     (struct tte *)TLB_PHYS_TO_DIRECT(tsb_kernel_phys);
433                 aszero(ASI_PHYS_USE_EC, tsb_kernel_phys, tsb_kernel_size);
434         }
435
436         /*
437          * Allocate and map the dynamic per-CPU area for the BSP.
438          */
439         pa = pmap_bootstrap_alloc(DPCPU_SIZE, colors);
440         dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pa);
441
442         /*
443          * Allocate and map the message buffer.
444          */
445         pa = pmap_bootstrap_alloc(msgbufsize, colors);
446         msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa);
447
448         /*
449          * Patch the TSB addresses and mask as well as the ASIs used to load
450          * it into the trap table.
451          */
452
453 #define LDDA_R_I_R(rd, imm_asi, rs1, rs2)                               \
454         (EIF_OP(IOP_LDST) | EIF_F3_RD(rd) | EIF_F3_OP3(INS3_LDDA) |     \
455             EIF_F3_RS1(rs1) | EIF_F3_I(0) | EIF_F3_IMM_ASI(imm_asi) |   \
456             EIF_F3_RS2(rs2))
457 #define OR_R_I_R(rd, imm13, rs1)                                        \
458         (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) |       \
459             EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
460 #define SETHI(rd, imm22)                                                \
461         (EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) |   \
462             EIF_IMM((imm22) >> 10, 22))
463 #define WR_R_I(rd, imm13, rs1)                                          \
464         (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_WR) |       \
465             EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
466
467 #define PATCH_ASI(addr, asi) do {                                       \
468         if (addr[0] != WR_R_I(IF_F3_RD(addr[0]), 0x0,                   \
469             IF_F3_RS1(addr[0])))                                        \
470                 OF_panic("%s: patched instructions have changed",       \
471                     __func__);                                          \
472         addr[0] |= EIF_IMM((asi), 13);                                  \
473         flush(addr);                                                    \
474 } while (0)
475
476 #define PATCH_LDD(addr, asi) do {                                       \
477         if (addr[0] != LDDA_R_I_R(IF_F3_RD(addr[0]), 0x0,               \
478             IF_F3_RS1(addr[0]), IF_F3_RS2(addr[0])))                    \
479                 OF_panic("%s: patched instructions have changed",       \
480                     __func__);                                          \
481         addr[0] |= EIF_F3_IMM_ASI(asi);                                 \
482         flush(addr);                                                    \
483 } while (0)
484
485 #define PATCH_TSB(addr, val) do {                                       \
486         if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) ||                 \
487             addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0,                 \
488             IF_F3_RS1(addr[1])) ||                                      \
489             addr[3] != SETHI(IF_F2_RD(addr[3]), 0x0))                   \
490                 OF_panic("%s: patched instructions have changed",       \
491                     __func__);                                          \
492         addr[0] |= EIF_IMM((val) >> 42, 22);                            \
493         addr[1] |= EIF_IMM((val) >> 32, 10);                            \
494         addr[3] |= EIF_IMM((val) >> 10, 22);                            \
495         flush(addr);                                                    \
496         flush(addr + 1);                                                \
497         flush(addr + 3);                                                \
498 } while (0)
499
500 #define PATCH_TSB_MASK(addr, val) do {                                  \
501         if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) ||                 \
502             addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0,                 \
503             IF_F3_RS1(addr[1])))                                        \
504                 OF_panic("%s: patched instructions have changed",       \
505                     __func__);                                          \
506         addr[0] |= EIF_IMM((val) >> 10, 22);                            \
507         addr[1] |= EIF_IMM((val), 10);                                  \
508         flush(addr);                                                    \
509         flush(addr + 1);                                                \
510 } while (0)
511
512         if (tsb_kernel_ldd_phys == 0) {
513                 asi = ASI_N;
514                 ldd = ASI_NUCLEUS_QUAD_LDD;
515                 off = (vm_offset_t)tsb_kernel;
516         } else {
517                 asi = ASI_PHYS_USE_EC;
518                 ldd = ASI_ATOMIC_QUAD_LDD_PHYS;
519                 off = (vm_offset_t)tsb_kernel_phys;
520         }
521         PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_1, tsb_kernel_phys);
522         PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_end_1,
523             tsb_kernel_phys + tsb_kernel_size - 1);
524         PATCH_ASI(tl1_dmmu_miss_patch_asi_1, asi);
525         PATCH_LDD(tl1_dmmu_miss_patch_quad_ldd_1, ldd);
526         PATCH_TSB(tl1_dmmu_miss_patch_tsb_1, off);
527         PATCH_TSB(tl1_dmmu_miss_patch_tsb_2, off);
528         PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_1, tsb_kernel_mask);
529         PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_2, tsb_kernel_mask);
530         PATCH_ASI(tl1_dmmu_prot_patch_asi_1, asi);
531         PATCH_LDD(tl1_dmmu_prot_patch_quad_ldd_1, ldd);
532         PATCH_TSB(tl1_dmmu_prot_patch_tsb_1, off);
533         PATCH_TSB(tl1_dmmu_prot_patch_tsb_2, off);
534         PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_1, tsb_kernel_mask);
535         PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_2, tsb_kernel_mask);
536         PATCH_ASI(tl1_immu_miss_patch_asi_1, asi);
537         PATCH_LDD(tl1_immu_miss_patch_quad_ldd_1, ldd);
538         PATCH_TSB(tl1_immu_miss_patch_tsb_1, off);
539         PATCH_TSB(tl1_immu_miss_patch_tsb_2, off);
540         PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_1, tsb_kernel_mask);
541         PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_2, tsb_kernel_mask);
542
543         /*
544          * Enter fake 8k pages for the 4MB kernel pages, so that
545          * pmap_kextract() will work for them.
546          */
547         for (i = 0; i < kernel_tlb_slots; i++) {
548                 pa = kernel_tlbs[i].te_pa;
549                 va = kernel_tlbs[i].te_va;
550                 for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) {
551                         tp = tsb_kvtotte(va + off);
552                         vpn = TV_VPN(va + off, TS_8K);
553                         data = TD_V | TD_8K | TD_PA(pa + off) | TD_REF |
554                             TD_SW | TD_CP | TD_CV | TD_P | TD_W;
555                         pmap_bootstrap_set_tte(tp, vpn, data);
556                 }
557         }
558
559         /*
560          * Set the start and end of KVA.  The kernel is loaded starting
561          * at the first available 4MB super page, so we advance to the
562          * end of the last one used for it.
563          */
564         virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M;
565         virtual_end = vm_max_kernel_address;
566         kernel_vm_end = vm_max_kernel_address;
567
568         /*
569          * Allocate kva space for temporary mappings.
570          */
571         pmap_idle_map = virtual_avail;
572         virtual_avail += PAGE_SIZE * colors;
573         pmap_temp_map_1 = virtual_avail;
574         virtual_avail += PAGE_SIZE * colors;
575         pmap_temp_map_2 = virtual_avail;
576         virtual_avail += PAGE_SIZE * colors;
577
578         /*
579          * Allocate a kernel stack with guard page for thread0 and map it
580          * into the kernel TSB.  We must ensure that the virtual address is
581          * colored properly for corresponding CPUs, since we're allocating
582          * from phys_avail so the memory won't have an associated vm_page_t.
583          */
584         pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, colors);
585         kstack0_phys = pa;
586         virtual_avail += roundup(KSTACK_GUARD_PAGES, colors) * PAGE_SIZE;
587         kstack0 = virtual_avail;
588         virtual_avail += roundup(KSTACK_PAGES, colors) * PAGE_SIZE;
589         if (dcache_color_ignore == 0)
590                 KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys),
591                     ("pmap_bootstrap: kstack0 miscolored"));
592         for (i = 0; i < KSTACK_PAGES; i++) {
593                 pa = kstack0_phys + i * PAGE_SIZE;
594                 va = kstack0 + i * PAGE_SIZE;
595                 tp = tsb_kvtotte(va);
596                 vpn = TV_VPN(va, TS_8K);
597                 data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP |
598                     TD_CV | TD_P | TD_W;
599                 pmap_bootstrap_set_tte(tp, vpn, data);
600         }
601
602         /*
603          * Calculate the last available physical address.
604          */
605         for (i = 0; phys_avail[i + 2] != 0; i += 2)
606                 ;
607         Maxmem = sparc64_btop(phys_avail[i + 1]);
608
609         /*
610          * Add the PROM mappings to the kernel TSB.
611          */
612         if ((vmem = OF_finddevice("/virtual-memory")) == -1)
613                 OF_panic("%s: finddevice /virtual-memory", __func__);
614         if ((sz = OF_getproplen(vmem, "translations")) == -1)
615                 OF_panic("%s: getproplen translations", __func__);
616         if (sizeof(translations) < sz)
617                 OF_panic("%s: translations too small", __func__);
618         bzero(translations, sz);
619         if (OF_getprop(vmem, "translations", translations, sz) == -1)
620                 OF_panic("%s: getprop /virtual-memory/translations",
621                     __func__);
622         sz /= sizeof(*translations);
623         translations_size = sz;
624 #ifdef DIAGNOSTIC
625         OF_printf("pmap_bootstrap: translations\n");
626 #endif
627         qsort(translations, sz, sizeof (*translations), om_cmp);
628         for (i = 0; i < sz; i++) {
629 #ifdef DIAGNOSTIC
630                 OF_printf("translation: start=%#lx size=%#lx tte=%#lx\n",
631                     translations[i].om_start, translations[i].om_size,
632                     translations[i].om_tte);
633 #endif
634                 if ((translations[i].om_tte & TD_V) == 0)
635                         continue;
636                 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
637                     translations[i].om_start > VM_MAX_PROM_ADDRESS)
638                         continue;
639                 for (off = 0; off < translations[i].om_size;
640                     off += PAGE_SIZE) {
641                         va = translations[i].om_start + off;
642                         tp = tsb_kvtotte(va);
643                         vpn = TV_VPN(va, TS_8K);
644                         data = ((translations[i].om_tte &
645                             ~((TD_SOFT2_MASK << TD_SOFT2_SHIFT) |
646                             (cpu_impl >= CPU_IMPL_ULTRASPARCI &&
647                             cpu_impl < CPU_IMPL_ULTRASPARCIII ?
648                             (TD_DIAG_SF_MASK << TD_DIAG_SF_SHIFT) :
649                             (TD_RSVD_CH_MASK << TD_RSVD_CH_SHIFT)) |
650                             (TD_SOFT_MASK << TD_SOFT_SHIFT))) | TD_EXEC) +
651                             off;
652                         pmap_bootstrap_set_tte(tp, vpn, data);
653                 }
654         }
655
656         /*
657          * Get the available physical memory ranges from /memory/reg.  These
658          * are only used for kernel dumps, but it may not be wise to do PROM
659          * calls in that situation.
660          */
661         if ((sz = OF_getproplen(pmem, "reg")) == -1)
662                 OF_panic("%s: getproplen /memory/reg", __func__);
663         if (sizeof(sparc64_memreg) < sz)
664                 OF_panic("%s: sparc64_memreg too small", __func__);
665         if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
666                 OF_panic("%s: getprop /memory/reg", __func__);
667         sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
668
669         /*
670          * Initialize the kernel pmap (which is statically allocated).
671          */
672         pm = kernel_pmap;
673         PMAP_LOCK_INIT(pm);
674         for (i = 0; i < MAXCPU; i++)
675                 pm->pm_context[i] = TLB_CTX_KERNEL;
676         CPU_FILL(&pm->pm_active);
677
678         /*
679          * Initialize the global tte list lock, which is more commonly
680          * known as the pmap pv global lock.
681          */
682         rw_init(&tte_list_global_lock, "pmap pv global");
683
684         /*
685          * Flush all non-locked TLB entries possibly left over by the
686          * firmware.
687          */
688         tlb_flush_nonlocked();
689 }
690
691 static void
692 pmap_init_qpages(void)
693 {
694         struct pcpu *pc;
695         int i;
696
697         if (dcache_color_ignore != 0)
698                 return;
699
700         CPU_FOREACH(i) {
701                 pc = pcpu_find(i);
702                 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE * DCACHE_COLORS);
703                 if (pc->pc_qmap_addr == 0)
704                         panic("pmap_init_qpages: unable to allocate KVA");
705         }
706 }
707
708 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_qpages, NULL);
709
710 /*
711  * Map the 4MB kernel TSB pages.
712  */
713 void
714 pmap_map_tsb(void)
715 {
716         vm_offset_t va;
717         vm_paddr_t pa;
718         u_long data;
719         int i;
720
721         for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
722                 va = (vm_offset_t)tsb_kernel + i;
723                 pa = tsb_kernel_phys + i;
724                 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
725                     TD_P | TD_W;
726                 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) |
727                     TLB_TAR_CTX(TLB_CTX_KERNEL));
728                 stxa_sync(0, ASI_DTLB_DATA_IN_REG, data);
729         }
730 }
731
732 /*
733  * Set the secondary context to be the kernel context (needed for FP block
734  * operations in the kernel).
735  */
736 void
737 pmap_set_kctx(void)
738 {
739
740         stxa(AA_DMMU_SCXR, ASI_DMMU, (ldxa(AA_DMMU_SCXR, ASI_DMMU) &
741             TLB_CXR_PGSZ_MASK) | TLB_CTX_KERNEL);
742         flush(KERNBASE);
743 }
744
745 /*
746  * Allocate a physical page of memory directly from the phys_avail map.
747  * Can only be called from pmap_bootstrap before avail start and end are
748  * calculated.
749  */
750 static vm_paddr_t
751 pmap_bootstrap_alloc(vm_size_t size, uint32_t colors)
752 {
753         vm_paddr_t pa;
754         int i;
755
756         size = roundup(size, PAGE_SIZE * colors);
757         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
758                 if (phys_avail[i + 1] - phys_avail[i] < size)
759                         continue;
760                 pa = phys_avail[i];
761                 phys_avail[i] += size;
762                 return (pa);
763         }
764         OF_panic("%s: no suitable region found", __func__);
765 }
766
767 /*
768  * Set a TTE.  This function is intended as a helper when tsb_kernel is
769  * direct-mapped but we haven't taken over the trap table, yet, as it's the
770  * case when we are taking advantage of ASI_ATOMIC_QUAD_LDD_PHYS to access
771  * the kernel TSB.
772  */
773 void
774 pmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data)
775 {
776
777         if (tsb_kernel_ldd_phys == 0) {
778                 tp->tte_vpn = vpn;
779                 tp->tte_data = data;
780         } else {
781                 stxa((vm_paddr_t)tp + offsetof(struct tte, tte_vpn),
782                     ASI_PHYS_USE_EC, vpn);
783                 stxa((vm_paddr_t)tp + offsetof(struct tte, tte_data),
784                     ASI_PHYS_USE_EC, data);
785         }
786 }
787
788 /*
789  * Initialize a vm_page's machine-dependent fields.
790  */
791 void
792 pmap_page_init(vm_page_t m)
793 {
794
795         TAILQ_INIT(&m->md.tte_list);
796         m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
797         m->md.pmap = NULL;
798 }
799
800 /*
801  * Initialize the pmap module.
802  */
803 void
804 pmap_init(void)
805 {
806         vm_offset_t addr;
807         vm_size_t size;
808         int result;
809         int i;
810
811         for (i = 0; i < translations_size; i++) {
812                 addr = translations[i].om_start;
813                 size = translations[i].om_size;
814                 if ((translations[i].om_tte & TD_V) == 0)
815                         continue;
816                 if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS)
817                         continue;
818                 result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0,
819                     VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
820                 if (result != KERN_SUCCESS || addr != translations[i].om_start)
821                         panic("pmap_init: vm_map_find");
822         }
823 }
824
825 /*
826  * Extract the physical page address associated with the given
827  * map/virtual_address pair.
828  */
829 vm_paddr_t
830 pmap_extract(pmap_t pm, vm_offset_t va)
831 {
832         struct tte *tp;
833         vm_paddr_t pa;
834
835         if (pm == kernel_pmap)
836                 return (pmap_kextract(va));
837         PMAP_LOCK(pm);
838         tp = tsb_tte_lookup(pm, va);
839         if (tp == NULL)
840                 pa = 0;
841         else
842                 pa = TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp));
843         PMAP_UNLOCK(pm);
844         return (pa);
845 }
846
847 /*
848  * Atomically extract and hold the physical page with the given
849  * pmap and virtual address pair if that mapping permits the given
850  * protection.
851  */
852 vm_page_t
853 pmap_extract_and_hold(pmap_t pm, vm_offset_t va, vm_prot_t prot)
854 {
855         struct tte *tp;
856         vm_page_t m;
857         vm_paddr_t pa;
858
859         m = NULL;
860         pa = 0;
861         PMAP_LOCK(pm);
862 retry:
863         if (pm == kernel_pmap) {
864                 if (va >= VM_MIN_DIRECT_ADDRESS) {
865                         tp = NULL;
866                         m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va));
867                         (void)vm_page_pa_tryrelock(pm, TLB_DIRECT_TO_PHYS(va),
868                             &pa);
869                         vm_page_hold(m);
870                 } else {
871                         tp = tsb_kvtotte(va);
872                         if ((tp->tte_data & TD_V) == 0)
873                                 tp = NULL;
874                 }
875         } else
876                 tp = tsb_tte_lookup(pm, va);
877         if (tp != NULL && ((tp->tte_data & TD_SW) ||
878             (prot & VM_PROT_WRITE) == 0)) {
879                 if (vm_page_pa_tryrelock(pm, TTE_GET_PA(tp), &pa))
880                         goto retry;
881                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
882                 vm_page_hold(m);
883         }
884         PA_UNLOCK_COND(pa);
885         PMAP_UNLOCK(pm);
886         return (m);
887 }
888
889 /*
890  * Extract the physical page address associated with the given kernel virtual
891  * address.
892  */
893 vm_paddr_t
894 pmap_kextract(vm_offset_t va)
895 {
896         struct tte *tp;
897
898         if (va >= VM_MIN_DIRECT_ADDRESS)
899                 return (TLB_DIRECT_TO_PHYS(va));
900         tp = tsb_kvtotte(va);
901         if ((tp->tte_data & TD_V) == 0)
902                 return (0);
903         return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
904 }
905
906 int
907 pmap_cache_enter(vm_page_t m, vm_offset_t va)
908 {
909         struct tte *tp;
910         int color;
911
912         rw_assert(&tte_list_global_lock, RA_WLOCKED);
913         KASSERT((m->flags & PG_FICTITIOUS) == 0,
914             ("pmap_cache_enter: fake page"));
915         PMAP_STATS_INC(pmap_ncache_enter);
916
917         if (dcache_color_ignore != 0)
918                 return (1);
919
920         /*
921          * Find the color for this virtual address and note the added mapping.
922          */
923         color = DCACHE_COLOR(va);
924         m->md.colors[color]++;
925
926         /*
927          * If all existing mappings have the same color, the mapping is
928          * cacheable.
929          */
930         if (m->md.color == color) {
931                 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0,
932                     ("pmap_cache_enter: cacheable, mappings of other color"));
933                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
934                         PMAP_STATS_INC(pmap_ncache_enter_c);
935                 else
936                         PMAP_STATS_INC(pmap_ncache_enter_oc);
937                 return (1);
938         }
939
940         /*
941          * If there are no mappings of the other color, and the page still has
942          * the wrong color, this must be a new mapping.  Change the color to
943          * match the new mapping, which is cacheable.  We must flush the page
944          * from the cache now.
945          */
946         if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) {
947                 KASSERT(m->md.colors[color] == 1,
948                     ("pmap_cache_enter: changing color, not new mapping"));
949                 dcache_page_inval(VM_PAGE_TO_PHYS(m));
950                 m->md.color = color;
951                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
952                         PMAP_STATS_INC(pmap_ncache_enter_cc);
953                 else
954                         PMAP_STATS_INC(pmap_ncache_enter_coc);
955                 return (1);
956         }
957
958         /*
959          * If the mapping is already non-cacheable, just return.
960          */
961         if (m->md.color == -1) {
962                 PMAP_STATS_INC(pmap_ncache_enter_nc);
963                 return (0);
964         }
965
966         PMAP_STATS_INC(pmap_ncache_enter_cnc);
967
968         /*
969          * Mark all mappings as uncacheable, flush any lines with the other
970          * color out of the dcache, and set the color to none (-1).
971          */
972         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
973                 atomic_clear_long(&tp->tte_data, TD_CV);
974                 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
975         }
976         dcache_page_inval(VM_PAGE_TO_PHYS(m));
977         m->md.color = -1;
978         return (0);
979 }
980
981 static void
982 pmap_cache_remove(vm_page_t m, vm_offset_t va)
983 {
984         struct tte *tp;
985         int color;
986
987         rw_assert(&tte_list_global_lock, RA_WLOCKED);
988         CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
989             m->md.colors[DCACHE_COLOR(va)]);
990         KASSERT((m->flags & PG_FICTITIOUS) == 0,
991             ("pmap_cache_remove: fake page"));
992         PMAP_STATS_INC(pmap_ncache_remove);
993
994         if (dcache_color_ignore != 0)
995                 return;
996
997         KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0,
998             ("pmap_cache_remove: no mappings %d <= 0",
999             m->md.colors[DCACHE_COLOR(va)]));
1000
1001         /*
1002          * Find the color for this virtual address and note the removal of
1003          * the mapping.
1004          */
1005         color = DCACHE_COLOR(va);
1006         m->md.colors[color]--;
1007
1008         /*
1009          * If the page is cacheable, just return and keep the same color, even
1010          * if there are no longer any mappings.
1011          */
1012         if (m->md.color != -1) {
1013                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
1014                         PMAP_STATS_INC(pmap_ncache_remove_c);
1015                 else
1016                         PMAP_STATS_INC(pmap_ncache_remove_oc);
1017                 return;
1018         }
1019
1020         KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0,
1021             ("pmap_cache_remove: uncacheable, no mappings of other color"));
1022
1023         /*
1024          * If the page is not cacheable (color is -1), and the number of
1025          * mappings for this color is not zero, just return.  There are
1026          * mappings of the other color still, so remain non-cacheable.
1027          */
1028         if (m->md.colors[color] != 0) {
1029                 PMAP_STATS_INC(pmap_ncache_remove_nc);
1030                 return;
1031         }
1032
1033         /*
1034          * The number of mappings for this color is now zero.  Recache the
1035          * other colored mappings, and change the page color to the other
1036          * color.  There should be no lines in the data cache for this page,
1037          * so flushing should not be needed.
1038          */
1039         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1040                 atomic_set_long(&tp->tte_data, TD_CV);
1041                 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1042         }
1043         m->md.color = DCACHE_OTHER_COLOR(color);
1044
1045         if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
1046                 PMAP_STATS_INC(pmap_ncache_remove_cc);
1047         else
1048                 PMAP_STATS_INC(pmap_ncache_remove_coc);
1049 }
1050
1051 /*
1052  * Map a wired page into kernel virtual address space.
1053  */
1054 void
1055 pmap_kenter(vm_offset_t va, vm_page_t m)
1056 {
1057         vm_offset_t ova;
1058         struct tte *tp;
1059         vm_page_t om;
1060         u_long data;
1061
1062         rw_assert(&tte_list_global_lock, RA_WLOCKED);
1063         PMAP_STATS_INC(pmap_nkenter);
1064         tp = tsb_kvtotte(va);
1065         CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
1066             va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data);
1067         if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) {
1068                 CTR5(KTR_SPARE2,
1069         "pmap_kenter: off color va=%#lx pa=%#lx o=%p ot=%d pi=%#lx",
1070                     va, VM_PAGE_TO_PHYS(m), m->object,
1071                     m->object ? m->object->type : -1,
1072                     m->pindex);
1073                 PMAP_STATS_INC(pmap_nkenter_oc);
1074         }
1075         if ((tp->tte_data & TD_V) != 0) {
1076                 om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1077                 ova = TTE_GET_VA(tp);
1078                 if (m == om && va == ova) {
1079                         PMAP_STATS_INC(pmap_nkenter_stupid);
1080                         return;
1081                 }
1082                 TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
1083                 pmap_cache_remove(om, ova);
1084                 if (va != ova)
1085                         tlb_page_demap(kernel_pmap, ova);
1086         }
1087         data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP |
1088             TD_P | TD_W;
1089         if (pmap_cache_enter(m, va) != 0)
1090                 data |= TD_CV;
1091         tp->tte_vpn = TV_VPN(va, TS_8K);
1092         tp->tte_data = data;
1093         TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
1094 }
1095
1096 /*
1097  * Map a wired page into kernel virtual address space.  This additionally
1098  * takes a flag argument which is or'ed to the TTE data.  This is used by
1099  * sparc64_bus_mem_map().
1100  * NOTE: if the mapping is non-cacheable, it's the caller's responsibility
1101  * to flush entries that might still be in the cache, if applicable.
1102  */
1103 void
1104 pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags)
1105 {
1106         struct tte *tp;
1107
1108         tp = tsb_kvtotte(va);
1109         CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
1110             va, pa, tp, tp->tte_data);
1111         tp->tte_vpn = TV_VPN(va, TS_8K);
1112         tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
1113 }
1114
1115 /*
1116  * Remove a wired page from kernel virtual address space.
1117  */
1118 void
1119 pmap_kremove(vm_offset_t va)
1120 {
1121         struct tte *tp;
1122         vm_page_t m;
1123
1124         rw_assert(&tte_list_global_lock, RA_WLOCKED);
1125         PMAP_STATS_INC(pmap_nkremove);
1126         tp = tsb_kvtotte(va);
1127         CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
1128             tp->tte_data);
1129         if ((tp->tte_data & TD_V) == 0)
1130                 return;
1131         m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1132         TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1133         pmap_cache_remove(m, va);
1134         TTE_ZERO(tp);
1135 }
1136
1137 /*
1138  * Inverse of pmap_kenter_flags, used by bus_space_unmap().
1139  */
1140 void
1141 pmap_kremove_flags(vm_offset_t va)
1142 {
1143         struct tte *tp;
1144
1145         tp = tsb_kvtotte(va);
1146         CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp,
1147             tp->tte_data);
1148         TTE_ZERO(tp);
1149 }
1150
1151 /*
1152  * Map a range of physical addresses into kernel virtual address space.
1153  *
1154  * The value passed in *virt is a suggested virtual address for the mapping.
1155  * Architectures which can support a direct-mapped physical to virtual region
1156  * can return the appropriate address within that region, leaving '*virt'
1157  * unchanged.
1158  */
1159 vm_offset_t
1160 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1161 {
1162
1163         return (TLB_PHYS_TO_DIRECT(start));
1164 }
1165
1166 /*
1167  * Map a list of wired pages into kernel virtual address space.  This is
1168  * intended for temporary mappings which do not need page modification or
1169  * references recorded.  Existing mappings in the region are overwritten.
1170  */
1171 void
1172 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
1173 {
1174         vm_offset_t va;
1175
1176         PMAP_STATS_INC(pmap_nqenter);
1177         va = sva;
1178         rw_wlock(&tte_list_global_lock);
1179         while (count-- > 0) {
1180                 pmap_kenter(va, *m);
1181                 va += PAGE_SIZE;
1182                 m++;
1183         }
1184         rw_wunlock(&tte_list_global_lock);
1185         tlb_range_demap(kernel_pmap, sva, va);
1186 }
1187
1188 /*
1189  * Remove page mappings from kernel virtual address space.  Intended for
1190  * temporary mappings entered by pmap_qenter.
1191  */
1192 void
1193 pmap_qremove(vm_offset_t sva, int count)
1194 {
1195         vm_offset_t va;
1196
1197         PMAP_STATS_INC(pmap_nqremove);
1198         va = sva;
1199         rw_wlock(&tte_list_global_lock);
1200         while (count-- > 0) {
1201                 pmap_kremove(va);
1202                 va += PAGE_SIZE;
1203         }
1204         rw_wunlock(&tte_list_global_lock);
1205         tlb_range_demap(kernel_pmap, sva, va);
1206 }
1207
1208 /*
1209  * Initialize the pmap associated with process 0.
1210  */
1211 void
1212 pmap_pinit0(pmap_t pm)
1213 {
1214         int i;
1215
1216         PMAP_LOCK_INIT(pm);
1217         for (i = 0; i < MAXCPU; i++)
1218                 pm->pm_context[i] = TLB_CTX_KERNEL;
1219         CPU_ZERO(&pm->pm_active);
1220         pm->pm_tsb = NULL;
1221         pm->pm_tsb_obj = NULL;
1222         bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1223 }
1224
1225 /*
1226  * Initialize a preallocated and zeroed pmap structure, such as one in a
1227  * vmspace structure.
1228  */
1229 int
1230 pmap_pinit(pmap_t pm)
1231 {
1232         vm_page_t ma[TSB_PAGES];
1233         vm_page_t m;
1234         int i;
1235
1236         /*
1237          * Allocate KVA space for the TSB.
1238          */
1239         if (pm->pm_tsb == NULL) {
1240                 pm->pm_tsb = (struct tte *)kva_alloc(TSB_BSIZE);
1241                 if (pm->pm_tsb == NULL)
1242                         return (0);
1243                 }
1244
1245         /*
1246          * Allocate an object for it.
1247          */
1248         if (pm->pm_tsb_obj == NULL)
1249                 pm->pm_tsb_obj = vm_object_allocate(OBJT_PHYS, TSB_PAGES);
1250
1251         for (i = 0; i < MAXCPU; i++)
1252                 pm->pm_context[i] = -1;
1253         CPU_ZERO(&pm->pm_active);
1254
1255         VM_OBJECT_WLOCK(pm->pm_tsb_obj);
1256         for (i = 0; i < TSB_PAGES; i++) {
1257                 m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY |
1258                     VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1259                 m->valid = VM_PAGE_BITS_ALL;
1260                 m->md.pmap = pm;
1261                 ma[i] = m;
1262         }
1263         VM_OBJECT_WUNLOCK(pm->pm_tsb_obj);
1264         pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
1265
1266         bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1267         return (1);
1268 }
1269
1270 /*
1271  * Release any resources held by the given physical map.
1272  * Called when a pmap initialized by pmap_pinit is being released.
1273  * Should only be called if the map contains no valid mappings.
1274  */
1275 void
1276 pmap_release(pmap_t pm)
1277 {
1278         vm_object_t obj;
1279         vm_page_t m;
1280 #ifdef SMP
1281         struct pcpu *pc;
1282 #endif
1283
1284         CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
1285             pm->pm_context[curcpu], pm->pm_tsb);
1286         KASSERT(pmap_resident_count(pm) == 0,
1287             ("pmap_release: resident pages %ld != 0",
1288             pmap_resident_count(pm)));
1289
1290         /*
1291          * After the pmap was freed, it might be reallocated to a new process.
1292          * When switching, this might lead us to wrongly assume that we need
1293          * not switch contexts because old and new pmap pointer are equal.
1294          * Therefore, make sure that this pmap is not referenced by any PCPU
1295          * pointer any more.  This could happen in two cases:
1296          * - A process that referenced the pmap is currently exiting on a CPU.
1297          *   However, it is guaranteed to not switch in any more after setting
1298          *   its state to PRS_ZOMBIE.
1299          * - A process that referenced this pmap ran on a CPU, but we switched
1300          *   to a kernel thread, leaving the pmap pointer unchanged.
1301          */
1302 #ifdef SMP
1303         sched_pin();
1304         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
1305                 atomic_cmpset_rel_ptr((uintptr_t *)&pc->pc_pmap,
1306                     (uintptr_t)pm, (uintptr_t)NULL);
1307         sched_unpin();
1308 #else
1309         critical_enter();
1310         if (PCPU_GET(pmap) == pm)
1311                 PCPU_SET(pmap, NULL);
1312         critical_exit();
1313 #endif
1314
1315         pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
1316         obj = pm->pm_tsb_obj;
1317         VM_OBJECT_WLOCK(obj);
1318         KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
1319         while (!TAILQ_EMPTY(&obj->memq)) {
1320                 m = TAILQ_FIRST(&obj->memq);
1321                 m->md.pmap = NULL;
1322                 m->wire_count--;
1323                 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1324                 vm_page_free_zero(m);
1325         }
1326         VM_OBJECT_WUNLOCK(obj);
1327 }
1328
1329 /*
1330  * Grow the number of kernel page table entries.  Unneeded.
1331  */
1332 void
1333 pmap_growkernel(vm_offset_t addr)
1334 {
1335
1336         panic("pmap_growkernel: can't grow kernel");
1337 }
1338
1339 int
1340 pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1341     vm_offset_t va)
1342 {
1343         vm_page_t m;
1344         u_long data;
1345
1346         rw_assert(&tte_list_global_lock, RA_WLOCKED);
1347         data = atomic_readandclear_long(&tp->tte_data);
1348         if ((data & TD_FAKE) == 0) {
1349                 m = PHYS_TO_VM_PAGE(TD_PA(data));
1350                 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1351                 if ((data & TD_WIRED) != 0)
1352                         pm->pm_stats.wired_count--;
1353                 if ((data & TD_PV) != 0) {
1354                         if ((data & TD_W) != 0)
1355                                 vm_page_dirty(m);
1356                         if ((data & TD_REF) != 0)
1357                                 vm_page_aflag_set(m, PGA_REFERENCED);
1358                         if (TAILQ_EMPTY(&m->md.tte_list))
1359                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
1360                         pm->pm_stats.resident_count--;
1361                 }
1362                 pmap_cache_remove(m, va);
1363         }
1364         TTE_ZERO(tp);
1365         if (PMAP_REMOVE_DONE(pm))
1366                 return (0);
1367         return (1);
1368 }
1369
1370 /*
1371  * Remove the given range of addresses from the specified map.
1372  */
1373 void
1374 pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
1375 {
1376         struct tte *tp;
1377         vm_offset_t va;
1378
1379         CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
1380             pm->pm_context[curcpu], start, end);
1381         if (PMAP_REMOVE_DONE(pm))
1382                 return;
1383         rw_wlock(&tte_list_global_lock);
1384         PMAP_LOCK(pm);
1385         if (end - start > PMAP_TSB_THRESH) {
1386                 tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
1387                 tlb_context_demap(pm);
1388         } else {
1389                 for (va = start; va < end; va += PAGE_SIZE)
1390                         if ((tp = tsb_tte_lookup(pm, va)) != NULL &&
1391                             !pmap_remove_tte(pm, NULL, tp, va))
1392                                 break;
1393                 tlb_range_demap(pm, start, end - 1);
1394         }
1395         PMAP_UNLOCK(pm);
1396         rw_wunlock(&tte_list_global_lock);
1397 }
1398
1399 void
1400 pmap_remove_all(vm_page_t m)
1401 {
1402         struct pmap *pm;
1403         struct tte *tpn;
1404         struct tte *tp;
1405         vm_offset_t va;
1406
1407         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1408             ("pmap_remove_all: page %p is not managed", m));
1409         rw_wlock(&tte_list_global_lock);
1410         for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
1411                 tpn = TAILQ_NEXT(tp, tte_link);
1412                 if ((tp->tte_data & TD_PV) == 0)
1413                         continue;
1414                 pm = TTE_GET_PMAP(tp);
1415                 va = TTE_GET_VA(tp);
1416                 PMAP_LOCK(pm);
1417                 if ((tp->tte_data & TD_WIRED) != 0)
1418                         pm->pm_stats.wired_count--;
1419                 if ((tp->tte_data & TD_REF) != 0)
1420                         vm_page_aflag_set(m, PGA_REFERENCED);
1421                 if ((tp->tte_data & TD_W) != 0)
1422                         vm_page_dirty(m);
1423                 tp->tte_data &= ~TD_V;
1424                 tlb_page_demap(pm, va);
1425                 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1426                 pm->pm_stats.resident_count--;
1427                 pmap_cache_remove(m, va);
1428                 TTE_ZERO(tp);
1429                 PMAP_UNLOCK(pm);
1430         }
1431         vm_page_aflag_clear(m, PGA_WRITEABLE);
1432         rw_wunlock(&tte_list_global_lock);
1433 }
1434
1435 static int
1436 pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1437     vm_offset_t va)
1438 {
1439         u_long data;
1440         vm_page_t m;
1441
1442         PMAP_LOCK_ASSERT(pm, MA_OWNED);
1443         data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
1444         if ((data & (TD_PV | TD_W)) == (TD_PV | TD_W)) {
1445                 m = PHYS_TO_VM_PAGE(TD_PA(data));
1446                 vm_page_dirty(m);
1447         }
1448         return (1);
1449 }
1450
1451 /*
1452  * Set the physical protection on the specified range of this map as requested.
1453  */
1454 void
1455 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1456 {
1457         vm_offset_t va;
1458         struct tte *tp;
1459
1460         CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
1461             pm->pm_context[curcpu], sva, eva, prot);
1462
1463         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1464                 pmap_remove(pm, sva, eva);
1465                 return;
1466         }
1467
1468         if (prot & VM_PROT_WRITE)
1469                 return;
1470
1471         PMAP_LOCK(pm);
1472         if (eva - sva > PMAP_TSB_THRESH) {
1473                 tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
1474                 tlb_context_demap(pm);
1475         } else {
1476                 for (va = sva; va < eva; va += PAGE_SIZE)
1477                         if ((tp = tsb_tte_lookup(pm, va)) != NULL)
1478                                 pmap_protect_tte(pm, NULL, tp, va);
1479                 tlb_range_demap(pm, sva, eva - 1);
1480         }
1481         PMAP_UNLOCK(pm);
1482 }
1483
1484 /*
1485  * Map the given physical page at the specified virtual address in the
1486  * target pmap with the protection requested.  If specified the page
1487  * will be wired down.
1488  */
1489 int
1490 pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1491     u_int flags, int8_t psind)
1492 {
1493         int rv;
1494
1495         rw_wlock(&tte_list_global_lock);
1496         PMAP_LOCK(pm);
1497         rv = pmap_enter_locked(pm, va, m, prot, flags, psind);
1498         rw_wunlock(&tte_list_global_lock);
1499         PMAP_UNLOCK(pm);
1500         return (rv);
1501 }
1502
1503 /*
1504  * Map the given physical page at the specified virtual address in the
1505  * target pmap with the protection requested.  If specified the page
1506  * will be wired down.
1507  *
1508  * The page queues and pmap must be locked.
1509  */
1510 static int
1511 pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1512     u_int flags, int8_t psind __unused)
1513 {
1514         struct tte *tp;
1515         vm_paddr_t pa;
1516         vm_page_t real;
1517         u_long data;
1518         boolean_t wired;
1519
1520         rw_assert(&tte_list_global_lock, RA_WLOCKED);
1521         PMAP_LOCK_ASSERT(pm, MA_OWNED);
1522         if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1523                 VM_OBJECT_ASSERT_LOCKED(m->object);
1524         PMAP_STATS_INC(pmap_nenter);
1525         pa = VM_PAGE_TO_PHYS(m);
1526         wired = (flags & PMAP_ENTER_WIRED) != 0;
1527
1528         /*
1529          * If this is a fake page from the device_pager, but it covers actual
1530          * physical memory, convert to the real backing page.
1531          */
1532         if ((m->flags & PG_FICTITIOUS) != 0) {
1533                 real = vm_phys_paddr_to_vm_page(pa);
1534                 if (real != NULL)
1535                         m = real;
1536         }
1537
1538         CTR6(KTR_PMAP,
1539             "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
1540             pm->pm_context[curcpu], m, va, pa, prot, wired);
1541
1542         /*
1543          * If there is an existing mapping, and the physical address has not
1544          * changed, must be protection or wiring change.
1545          */
1546         if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
1547                 CTR0(KTR_PMAP, "pmap_enter_locked: update");
1548                 PMAP_STATS_INC(pmap_nenter_update);
1549
1550                 /*
1551                  * Wiring change, just update stats.
1552                  */
1553                 if (wired) {
1554                         if ((tp->tte_data & TD_WIRED) == 0) {
1555                                 tp->tte_data |= TD_WIRED;
1556                                 pm->pm_stats.wired_count++;
1557                         }
1558                 } else {
1559                         if ((tp->tte_data & TD_WIRED) != 0) {
1560                                 tp->tte_data &= ~TD_WIRED;
1561                                 pm->pm_stats.wired_count--;
1562                         }
1563                 }
1564
1565                 /*
1566                  * Save the old bits and clear the ones we're interested in.
1567                  */
1568                 data = tp->tte_data;
1569                 tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W);
1570
1571                 /*
1572                  * If we're turning off write permissions, sense modify status.
1573                  */
1574                 if ((prot & VM_PROT_WRITE) != 0) {
1575                         tp->tte_data |= TD_SW;
1576                         if (wired)
1577                                 tp->tte_data |= TD_W;
1578                         if ((m->oflags & VPO_UNMANAGED) == 0)
1579                                 vm_page_aflag_set(m, PGA_WRITEABLE);
1580                 } else if ((data & TD_W) != 0)
1581                         vm_page_dirty(m);
1582
1583                 /*
1584                  * If we're turning on execute permissions, flush the icache.
1585                  */
1586                 if ((prot & VM_PROT_EXECUTE) != 0) {
1587                         if ((data & TD_EXEC) == 0)
1588                                 icache_page_inval(pa);
1589                         tp->tte_data |= TD_EXEC;
1590                 }
1591
1592                 /*
1593                  * Delete the old mapping.
1594                  */
1595                 tlb_page_demap(pm, TTE_GET_VA(tp));
1596         } else {
1597                 /*
1598                  * If there is an existing mapping, but its for a different
1599                  * physical address, delete the old mapping.
1600                  */
1601                 if (tp != NULL) {
1602                         CTR0(KTR_PMAP, "pmap_enter_locked: replace");
1603                         PMAP_STATS_INC(pmap_nenter_replace);
1604                         pmap_remove_tte(pm, NULL, tp, va);
1605                         tlb_page_demap(pm, va);
1606                 } else {
1607                         CTR0(KTR_PMAP, "pmap_enter_locked: new");
1608                         PMAP_STATS_INC(pmap_nenter_new);
1609                 }
1610
1611                 /*
1612                  * Now set up the data and install the new mapping.
1613                  */
1614                 data = TD_V | TD_8K | TD_PA(pa);
1615                 if (pm == kernel_pmap)
1616                         data |= TD_P;
1617                 if ((prot & VM_PROT_WRITE) != 0) {
1618                         data |= TD_SW;
1619                         if ((m->oflags & VPO_UNMANAGED) == 0)
1620                                 vm_page_aflag_set(m, PGA_WRITEABLE);
1621                 }
1622                 if (prot & VM_PROT_EXECUTE) {
1623                         data |= TD_EXEC;
1624                         icache_page_inval(pa);
1625                 }
1626
1627                 /*
1628                  * If its wired update stats.  We also don't need reference or
1629                  * modify tracking for wired mappings, so set the bits now.
1630                  */
1631                 if (wired) {
1632                         pm->pm_stats.wired_count++;
1633                         data |= TD_REF | TD_WIRED;
1634                         if ((prot & VM_PROT_WRITE) != 0)
1635                                 data |= TD_W;
1636                 }
1637
1638                 tsb_tte_enter(pm, m, va, TS_8K, data);
1639         }
1640
1641         return (KERN_SUCCESS);
1642 }
1643
1644 /*
1645  * Maps a sequence of resident pages belonging to the same object.
1646  * The sequence begins with the given page m_start.  This page is
1647  * mapped at the given virtual address start.  Each subsequent page is
1648  * mapped at a virtual address that is offset from start by the same
1649  * amount as the page is offset from m_start within the object.  The
1650  * last page in the sequence is the page with the largest offset from
1651  * m_start that can be mapped at a virtual address less than the given
1652  * virtual address end.  Not every virtual page between start and end
1653  * is mapped; only those for which a resident page exists with the
1654  * corresponding offset from m_start are mapped.
1655  */
1656 void
1657 pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
1658     vm_page_t m_start, vm_prot_t prot)
1659 {
1660         vm_page_t m;
1661         vm_pindex_t diff, psize;
1662
1663         VM_OBJECT_ASSERT_LOCKED(m_start->object);
1664
1665         psize = atop(end - start);
1666         m = m_start;
1667         rw_wlock(&tte_list_global_lock);
1668         PMAP_LOCK(pm);
1669         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1670                 pmap_enter_locked(pm, start + ptoa(diff), m, prot &
1671                     (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
1672                 m = TAILQ_NEXT(m, listq);
1673         }
1674         rw_wunlock(&tte_list_global_lock);
1675         PMAP_UNLOCK(pm);
1676 }
1677
1678 void
1679 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1680 {
1681
1682         rw_wlock(&tte_list_global_lock);
1683         PMAP_LOCK(pm);
1684         pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1685             0, 0);
1686         rw_wunlock(&tte_list_global_lock);
1687         PMAP_UNLOCK(pm);
1688 }
1689
1690 void
1691 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1692     vm_pindex_t pindex, vm_size_t size)
1693 {
1694
1695         VM_OBJECT_ASSERT_WLOCKED(object);
1696         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
1697             ("pmap_object_init_pt: non-device object"));
1698 }
1699
1700 static int
1701 pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp, vm_offset_t va)
1702 {
1703
1704         PMAP_LOCK_ASSERT(pm, MA_OWNED);
1705         if ((tp->tte_data & TD_WIRED) == 0)
1706                 panic("pmap_unwire_tte: tp %p is missing TD_WIRED", tp);
1707         atomic_clear_long(&tp->tte_data, TD_WIRED);
1708         pm->pm_stats.wired_count--;
1709         return (1);
1710 }
1711
1712 /*
1713  * Clear the wired attribute from the mappings for the specified range of
1714  * addresses in the given pmap.  Every valid mapping within that range must
1715  * have the wired attribute set.  In contrast, invalid mappings cannot have
1716  * the wired attribute set, so they are ignored.
1717  *
1718  * The wired attribute of the translation table entry is not a hardware
1719  * feature, so there is no need to invalidate any TLB entries.
1720  */
1721 void
1722 pmap_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1723 {
1724         vm_offset_t va;
1725         struct tte *tp;
1726
1727         PMAP_LOCK(pm);
1728         if (eva - sva > PMAP_TSB_THRESH)
1729                 tsb_foreach(pm, NULL, sva, eva, pmap_unwire_tte);
1730         else {
1731                 for (va = sva; va < eva; va += PAGE_SIZE)
1732                         if ((tp = tsb_tte_lookup(pm, va)) != NULL)
1733                                 pmap_unwire_tte(pm, NULL, tp, va);
1734         }
1735         PMAP_UNLOCK(pm);
1736 }
1737
1738 static int
1739 pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp,
1740     vm_offset_t va)
1741 {
1742         vm_page_t m;
1743         u_long data;
1744
1745         if ((tp->tte_data & TD_FAKE) != 0)
1746                 return (1);
1747         if (tsb_tte_lookup(dst_pmap, va) == NULL) {
1748                 data = tp->tte_data &
1749                     ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
1750                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1751                 tsb_tte_enter(dst_pmap, m, va, TS_8K, data);
1752         }
1753         return (1);
1754 }
1755
1756 void
1757 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1758     vm_size_t len, vm_offset_t src_addr)
1759 {
1760         struct tte *tp;
1761         vm_offset_t va;
1762
1763         if (dst_addr != src_addr)
1764                 return;
1765         rw_wlock(&tte_list_global_lock);
1766         if (dst_pmap < src_pmap) {
1767                 PMAP_LOCK(dst_pmap);
1768                 PMAP_LOCK(src_pmap);
1769         } else {
1770                 PMAP_LOCK(src_pmap);
1771                 PMAP_LOCK(dst_pmap);
1772         }
1773         if (len > PMAP_TSB_THRESH) {
1774                 tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len,
1775                     pmap_copy_tte);
1776                 tlb_context_demap(dst_pmap);
1777         } else {
1778                 for (va = src_addr; va < src_addr + len; va += PAGE_SIZE)
1779                         if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL)
1780                                 pmap_copy_tte(src_pmap, dst_pmap, tp, va);
1781                 tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
1782         }
1783         rw_wunlock(&tte_list_global_lock);
1784         PMAP_UNLOCK(src_pmap);
1785         PMAP_UNLOCK(dst_pmap);
1786 }
1787
1788 void
1789 pmap_zero_page(vm_page_t m)
1790 {
1791         struct tte *tp;
1792         vm_offset_t va;
1793         vm_paddr_t pa;
1794
1795         KASSERT((m->flags & PG_FICTITIOUS) == 0,
1796             ("pmap_zero_page: fake page"));
1797         PMAP_STATS_INC(pmap_nzero_page);
1798         pa = VM_PAGE_TO_PHYS(m);
1799         if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1800                 PMAP_STATS_INC(pmap_nzero_page_c);
1801                 va = TLB_PHYS_TO_DIRECT(pa);
1802                 cpu_block_zero((void *)va, PAGE_SIZE);
1803         } else if (m->md.color == -1) {
1804                 PMAP_STATS_INC(pmap_nzero_page_nc);
1805                 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1806         } else {
1807                 PMAP_STATS_INC(pmap_nzero_page_oc);
1808                 PMAP_LOCK(kernel_pmap);
1809                 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1810                 tp = tsb_kvtotte(va);
1811                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1812                 tp->tte_vpn = TV_VPN(va, TS_8K);
1813                 cpu_block_zero((void *)va, PAGE_SIZE);
1814                 tlb_page_demap(kernel_pmap, va);
1815                 PMAP_UNLOCK(kernel_pmap);
1816         }
1817 }
1818
1819 void
1820 pmap_zero_page_area(vm_page_t m, int off, int size)
1821 {
1822         struct tte *tp;
1823         vm_offset_t va;
1824         vm_paddr_t pa;
1825
1826         KASSERT((m->flags & PG_FICTITIOUS) == 0,
1827             ("pmap_zero_page_area: fake page"));
1828         KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
1829         PMAP_STATS_INC(pmap_nzero_page_area);
1830         pa = VM_PAGE_TO_PHYS(m);
1831         if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1832                 PMAP_STATS_INC(pmap_nzero_page_area_c);
1833                 va = TLB_PHYS_TO_DIRECT(pa);
1834                 bzero((void *)(va + off), size);
1835         } else if (m->md.color == -1) {
1836                 PMAP_STATS_INC(pmap_nzero_page_area_nc);
1837                 aszero(ASI_PHYS_USE_EC, pa + off, size);
1838         } else {
1839                 PMAP_STATS_INC(pmap_nzero_page_area_oc);
1840                 PMAP_LOCK(kernel_pmap);
1841                 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1842                 tp = tsb_kvtotte(va);
1843                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1844                 tp->tte_vpn = TV_VPN(va, TS_8K);
1845                 bzero((void *)(va + off), size);
1846                 tlb_page_demap(kernel_pmap, va);
1847                 PMAP_UNLOCK(kernel_pmap);
1848         }
1849 }
1850
1851 void
1852 pmap_zero_page_idle(vm_page_t m)
1853 {
1854         struct tte *tp;
1855         vm_offset_t va;
1856         vm_paddr_t pa;
1857
1858         KASSERT((m->flags & PG_FICTITIOUS) == 0,
1859             ("pmap_zero_page_idle: fake page"));
1860         PMAP_STATS_INC(pmap_nzero_page_idle);
1861         pa = VM_PAGE_TO_PHYS(m);
1862         if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1863                 PMAP_STATS_INC(pmap_nzero_page_idle_c);
1864                 va = TLB_PHYS_TO_DIRECT(pa);
1865                 cpu_block_zero((void *)va, PAGE_SIZE);
1866         } else if (m->md.color == -1) {
1867                 PMAP_STATS_INC(pmap_nzero_page_idle_nc);
1868                 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1869         } else {
1870                 PMAP_STATS_INC(pmap_nzero_page_idle_oc);
1871                 va = pmap_idle_map + (m->md.color * PAGE_SIZE);
1872                 tp = tsb_kvtotte(va);
1873                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1874                 tp->tte_vpn = TV_VPN(va, TS_8K);
1875                 cpu_block_zero((void *)va, PAGE_SIZE);
1876                 tlb_page_demap(kernel_pmap, va);
1877         }
1878 }
1879
1880 void
1881 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
1882 {
1883         vm_offset_t vdst;
1884         vm_offset_t vsrc;
1885         vm_paddr_t pdst;
1886         vm_paddr_t psrc;
1887         struct tte *tp;
1888
1889         KASSERT((mdst->flags & PG_FICTITIOUS) == 0,
1890             ("pmap_copy_page: fake dst page"));
1891         KASSERT((msrc->flags & PG_FICTITIOUS) == 0,
1892             ("pmap_copy_page: fake src page"));
1893         PMAP_STATS_INC(pmap_ncopy_page);
1894         pdst = VM_PAGE_TO_PHYS(mdst);
1895         psrc = VM_PAGE_TO_PHYS(msrc);
1896         if (dcache_color_ignore != 0 ||
1897             (msrc->md.color == DCACHE_COLOR(psrc) &&
1898             mdst->md.color == DCACHE_COLOR(pdst))) {
1899                 PMAP_STATS_INC(pmap_ncopy_page_c);
1900                 vdst = TLB_PHYS_TO_DIRECT(pdst);
1901                 vsrc = TLB_PHYS_TO_DIRECT(psrc);
1902                 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1903         } else if (msrc->md.color == -1 && mdst->md.color == -1) {
1904                 PMAP_STATS_INC(pmap_ncopy_page_nc);
1905                 ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE);
1906         } else if (msrc->md.color == -1) {
1907                 if (mdst->md.color == DCACHE_COLOR(pdst)) {
1908                         PMAP_STATS_INC(pmap_ncopy_page_dc);
1909                         vdst = TLB_PHYS_TO_DIRECT(pdst);
1910                         ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1911                             PAGE_SIZE);
1912                 } else {
1913                         PMAP_STATS_INC(pmap_ncopy_page_doc);
1914                         PMAP_LOCK(kernel_pmap);
1915                         vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1916                         tp = tsb_kvtotte(vdst);
1917                         tp->tte_data =
1918                             TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1919                         tp->tte_vpn = TV_VPN(vdst, TS_8K);
1920                         ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1921                             PAGE_SIZE);
1922                         tlb_page_demap(kernel_pmap, vdst);
1923                         PMAP_UNLOCK(kernel_pmap);
1924                 }
1925         } else if (mdst->md.color == -1) {
1926                 if (msrc->md.color == DCACHE_COLOR(psrc)) {
1927                         PMAP_STATS_INC(pmap_ncopy_page_sc);
1928                         vsrc = TLB_PHYS_TO_DIRECT(psrc);
1929                         ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1930                             PAGE_SIZE);
1931                 } else {
1932                         PMAP_STATS_INC(pmap_ncopy_page_soc);
1933                         PMAP_LOCK(kernel_pmap);
1934                         vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE);
1935                         tp = tsb_kvtotte(vsrc);
1936                         tp->tte_data =
1937                             TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1938                         tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1939                         ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1940                             PAGE_SIZE);
1941                         tlb_page_demap(kernel_pmap, vsrc);
1942                         PMAP_UNLOCK(kernel_pmap);
1943                 }
1944         } else {
1945                 PMAP_STATS_INC(pmap_ncopy_page_oc);
1946                 PMAP_LOCK(kernel_pmap);
1947                 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1948                 tp = tsb_kvtotte(vdst);
1949                 tp->tte_data =
1950                     TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1951                 tp->tte_vpn = TV_VPN(vdst, TS_8K);
1952                 vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE);
1953                 tp = tsb_kvtotte(vsrc);
1954                 tp->tte_data =
1955                     TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1956                 tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1957                 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1958                 tlb_page_demap(kernel_pmap, vdst);
1959                 tlb_page_demap(kernel_pmap, vsrc);
1960                 PMAP_UNLOCK(kernel_pmap);
1961         }
1962 }
1963
1964 vm_offset_t
1965 pmap_quick_enter_page(vm_page_t m)
1966 {
1967         vm_paddr_t pa;
1968         vm_offset_t qaddr;
1969         struct tte *tp;
1970
1971         pa = VM_PAGE_TO_PHYS(m);
1972         if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa))
1973                 return (TLB_PHYS_TO_DIRECT(pa));
1974
1975         critical_enter();
1976         qaddr = PCPU_GET(qmap_addr);
1977         qaddr += (PAGE_SIZE * ((DCACHE_COLORS + DCACHE_COLOR(pa) -
1978             DCACHE_COLOR(qaddr)) % DCACHE_COLORS));
1979         tp = tsb_kvtotte(qaddr);
1980
1981         KASSERT(tp->tte_data == 0, ("pmap_quick_enter_page: PTE busy"));
1982         
1983         tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1984         tp->tte_vpn = TV_VPN(qaddr, TS_8K);
1985
1986         return (qaddr);
1987 }
1988
1989 void
1990 pmap_quick_remove_page(vm_offset_t addr)
1991 {
1992         vm_offset_t qaddr;
1993         struct tte *tp;
1994
1995         if (addr >= VM_MIN_DIRECT_ADDRESS)
1996                 return;
1997
1998         tp = tsb_kvtotte(addr);
1999         qaddr = PCPU_GET(qmap_addr);
2000         
2001         KASSERT((addr >= qaddr) && (addr < (qaddr + (PAGE_SIZE * DCACHE_COLORS))),
2002             ("pmap_quick_remove_page: invalid address"));
2003         KASSERT(tp->tte_data != 0, ("pmap_quick_remove_page: PTE not in use"));
2004         
2005         stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_DMMU_DEMAP, 0);
2006         stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
2007         flush(KERNBASE);
2008         TTE_ZERO(tp);
2009         critical_exit();
2010 }
2011
2012 int unmapped_buf_allowed;
2013
2014 void
2015 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2016     vm_offset_t b_offset, int xfersize)
2017 {
2018
2019         panic("pmap_copy_pages: not implemented");
2020 }
2021
2022 /*
2023  * Returns true if the pmap's pv is one of the first
2024  * 16 pvs linked to from this page.  This count may
2025  * be changed upwards or downwards in the future; it
2026  * is only necessary that true be returned for a small
2027  * subset of pmaps for proper page aging.
2028  */
2029 boolean_t
2030 pmap_page_exists_quick(pmap_t pm, vm_page_t m)
2031 {
2032         struct tte *tp;
2033         int loops;
2034         boolean_t rv;
2035
2036         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2037             ("pmap_page_exists_quick: page %p is not managed", m));
2038         loops = 0;
2039         rv = FALSE;
2040         rw_wlock(&tte_list_global_lock);
2041         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2042                 if ((tp->tte_data & TD_PV) == 0)
2043                         continue;
2044                 if (TTE_GET_PMAP(tp) == pm) {
2045                         rv = TRUE;
2046                         break;
2047                 }
2048                 if (++loops >= 16)
2049                         break;
2050         }
2051         rw_wunlock(&tte_list_global_lock);
2052         return (rv);
2053 }
2054
2055 /*
2056  * Return the number of managed mappings to the given physical page
2057  * that are wired.
2058  */
2059 int
2060 pmap_page_wired_mappings(vm_page_t m)
2061 {
2062         struct tte *tp;
2063         int count;
2064
2065         count = 0;
2066         if ((m->oflags & VPO_UNMANAGED) != 0)
2067                 return (count);
2068         rw_wlock(&tte_list_global_lock);
2069         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
2070                 if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
2071                         count++;
2072         rw_wunlock(&tte_list_global_lock);
2073         return (count);
2074 }
2075
2076 /*
2077  * Remove all pages from specified address space, this aids process exit
2078  * speeds.  This is much faster than pmap_remove in the case of running down
2079  * an entire address space.  Only works for the current pmap.
2080  */
2081 void
2082 pmap_remove_pages(pmap_t pm)
2083 {
2084
2085 }
2086
2087 /*
2088  * Returns TRUE if the given page has a managed mapping.
2089  */
2090 boolean_t
2091 pmap_page_is_mapped(vm_page_t m)
2092 {
2093         struct tte *tp;
2094         boolean_t rv;
2095
2096         rv = FALSE;
2097         if ((m->oflags & VPO_UNMANAGED) != 0)
2098                 return (rv);
2099         rw_wlock(&tte_list_global_lock);
2100         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
2101                 if ((tp->tte_data & TD_PV) != 0) {
2102                         rv = TRUE;
2103                         break;
2104                 }
2105         rw_wunlock(&tte_list_global_lock);
2106         return (rv);
2107 }
2108
2109 /*
2110  * Return a count of reference bits for a page, clearing those bits.
2111  * It is not necessary for every reference bit to be cleared, but it
2112  * is necessary that 0 only be returned when there are truly no
2113  * reference bits set.
2114  *
2115  * XXX: The exact number of bits to check and clear is a matter that
2116  * should be tested and standardized at some point in the future for
2117  * optimal aging of shared pages.
2118  */
2119 int
2120 pmap_ts_referenced(vm_page_t m)
2121 {
2122         struct tte *tpf;
2123         struct tte *tpn;
2124         struct tte *tp;
2125         u_long data;
2126         int count;
2127
2128         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2129             ("pmap_ts_referenced: page %p is not managed", m));
2130         count = 0;
2131         rw_wlock(&tte_list_global_lock);
2132         if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
2133                 tpf = tp;
2134                 do {
2135                         tpn = TAILQ_NEXT(tp, tte_link);
2136                         TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
2137                         TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
2138                         if ((tp->tte_data & TD_PV) == 0)
2139                                 continue;
2140                         data = atomic_clear_long(&tp->tte_data, TD_REF);
2141                         if ((data & TD_REF) != 0 && ++count > 4)
2142                                 break;
2143                 } while ((tp = tpn) != NULL && tp != tpf);
2144         }
2145         rw_wunlock(&tte_list_global_lock);
2146         return (count);
2147 }
2148
2149 boolean_t
2150 pmap_is_modified(vm_page_t m)
2151 {
2152         struct tte *tp;
2153         boolean_t rv;
2154
2155         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2156             ("pmap_is_modified: page %p is not managed", m));
2157         rv = FALSE;
2158
2159         /*
2160          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2161          * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
2162          * is clear, no TTEs can have TD_W set.
2163          */
2164         VM_OBJECT_ASSERT_WLOCKED(m->object);
2165         if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2166                 return (rv);
2167         rw_wlock(&tte_list_global_lock);
2168         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2169                 if ((tp->tte_data & TD_PV) == 0)
2170                         continue;
2171                 if ((tp->tte_data & TD_W) != 0) {
2172                         rv = TRUE;
2173                         break;
2174                 }
2175         }
2176         rw_wunlock(&tte_list_global_lock);
2177         return (rv);
2178 }
2179
2180 /*
2181  *      pmap_is_prefaultable:
2182  *
2183  *      Return whether or not the specified virtual address is elgible
2184  *      for prefault.
2185  */
2186 boolean_t
2187 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2188 {
2189         boolean_t rv;
2190
2191         PMAP_LOCK(pmap);
2192         rv = tsb_tte_lookup(pmap, addr) == NULL;
2193         PMAP_UNLOCK(pmap);
2194         return (rv);
2195 }
2196
2197 /*
2198  * Return whether or not the specified physical page was referenced
2199  * in any physical maps.
2200  */
2201 boolean_t
2202 pmap_is_referenced(vm_page_t m)
2203 {
2204         struct tte *tp;
2205         boolean_t rv;
2206
2207         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2208             ("pmap_is_referenced: page %p is not managed", m));
2209         rv = FALSE;
2210         rw_wlock(&tte_list_global_lock);
2211         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2212                 if ((tp->tte_data & TD_PV) == 0)
2213                         continue;
2214                 if ((tp->tte_data & TD_REF) != 0) {
2215                         rv = TRUE;
2216                         break;
2217                 }
2218         }
2219         rw_wunlock(&tte_list_global_lock);
2220         return (rv);
2221 }
2222
2223 /*
2224  * This function is advisory.
2225  */
2226 void
2227 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
2228 {
2229 }
2230
2231 void
2232 pmap_clear_modify(vm_page_t m)
2233 {
2234         struct tte *tp;
2235         u_long data;
2236
2237         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2238             ("pmap_clear_modify: page %p is not managed", m));
2239         VM_OBJECT_ASSERT_WLOCKED(m->object);
2240         KASSERT(!vm_page_xbusied(m),
2241             ("pmap_clear_modify: page %p is exclusive busied", m));
2242
2243         /*
2244          * If the page is not PGA_WRITEABLE, then no TTEs can have TD_W set.
2245          * If the object containing the page is locked and the page is not
2246          * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
2247          */
2248         if ((m->aflags & PGA_WRITEABLE) == 0)
2249                 return;
2250         rw_wlock(&tte_list_global_lock);
2251         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2252                 if ((tp->tte_data & TD_PV) == 0)
2253                         continue;
2254                 data = atomic_clear_long(&tp->tte_data, TD_W);
2255                 if ((data & TD_W) != 0)
2256                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
2257         }
2258         rw_wunlock(&tte_list_global_lock);
2259 }
2260
2261 void
2262 pmap_remove_write(vm_page_t m)
2263 {
2264         struct tte *tp;
2265         u_long data;
2266
2267         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2268             ("pmap_remove_write: page %p is not managed", m));
2269
2270         /*
2271          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2272          * set by another thread while the object is locked.  Thus,
2273          * if PGA_WRITEABLE is clear, no page table entries need updating.
2274          */
2275         VM_OBJECT_ASSERT_WLOCKED(m->object);
2276         if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2277                 return;
2278         rw_wlock(&tte_list_global_lock);
2279         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2280                 if ((tp->tte_data & TD_PV) == 0)
2281                         continue;
2282                 data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
2283                 if ((data & TD_W) != 0) {
2284                         vm_page_dirty(m);
2285                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
2286                 }
2287         }
2288         vm_page_aflag_clear(m, PGA_WRITEABLE);
2289         rw_wunlock(&tte_list_global_lock);
2290 }
2291
2292 int
2293 pmap_mincore(pmap_t pm, vm_offset_t addr, vm_paddr_t *locked_pa)
2294 {
2295
2296         /* TODO; */
2297         return (0);
2298 }
2299
2300 /*
2301  * Activate a user pmap.  The pmap must be activated before its address space
2302  * can be accessed in any way.
2303  */
2304 void
2305 pmap_activate(struct thread *td)
2306 {
2307         struct vmspace *vm;
2308         struct pmap *pm;
2309         int context;
2310
2311         critical_enter();
2312         vm = td->td_proc->p_vmspace;
2313         pm = vmspace_pmap(vm);
2314
2315         context = PCPU_GET(tlb_ctx);
2316         if (context == PCPU_GET(tlb_ctx_max)) {
2317                 tlb_flush_user();
2318                 context = PCPU_GET(tlb_ctx_min);
2319         }
2320         PCPU_SET(tlb_ctx, context + 1);
2321
2322         pm->pm_context[curcpu] = context;
2323 #ifdef SMP
2324         CPU_SET_ATOMIC(PCPU_GET(cpuid), &pm->pm_active);
2325         atomic_store_acq_ptr((uintptr_t *)PCPU_PTR(pmap), (uintptr_t)pm);
2326 #else
2327         CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
2328         PCPU_SET(pmap, pm);
2329 #endif
2330
2331         stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
2332         stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
2333         stxa(AA_DMMU_PCXR, ASI_DMMU, (ldxa(AA_DMMU_PCXR, ASI_DMMU) &
2334             TLB_CXR_PGSZ_MASK) | context);
2335         flush(KERNBASE);
2336         critical_exit();
2337 }
2338
2339 void
2340 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2341 {
2342
2343 }
2344
2345 /*
2346  * Increase the starting virtual address of the given mapping if a
2347  * different alignment might result in more superpage mappings.
2348  */
2349 void
2350 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
2351     vm_offset_t *addr, vm_size_t size)
2352 {
2353
2354 }