]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/sparc64/sparc64/pmap.c
Merge missed sources for lldb-specific TableGen tool.
[FreeBSD/FreeBSD.git] / sys / sparc64 / sparc64 / pmap.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department and William Jolitz of UUNET Technologies Inc.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
40  */
41
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44
45 /*
46  * Manages physical address maps.
47  *
48  * Since the information managed by this module is also stored by the
49  * logical address mapping module, this module may throw away valid virtual
50  * to physical mappings at almost any time.  However, invalidations of
51  * mappings must be done as requested.
52  *
53  * In order to cope with hardware architectures which make virtual to
54  * physical map invalidates expensive, this module may delay invalidate
55  * reduced protection operations until such time as they are actually
56  * necessary.  This module is given full information as to which processors
57  * are currently using which maps, and to when physical maps must be made
58  * correct.
59  */
60
61 #include "opt_kstack_pages.h"
62 #include "opt_pmap.h"
63
64 #include <sys/param.h>
65 #include <sys/kernel.h>
66 #include <sys/ktr.h>
67 #include <sys/lock.h>
68 #include <sys/msgbuf.h>
69 #include <sys/mutex.h>
70 #include <sys/proc.h>
71 #include <sys/rwlock.h>
72 #include <sys/smp.h>
73 #include <sys/sysctl.h>
74 #include <sys/systm.h>
75 #include <sys/vmmeter.h>
76
77 #include <dev/ofw/openfirm.h>
78
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_extern.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_pager.h>
88 #include <vm/vm_phys.h>
89
90 #include <machine/cache.h>
91 #include <machine/frame.h>
92 #include <machine/instr.h>
93 #include <machine/md_var.h>
94 #include <machine/metadata.h>
95 #include <machine/ofw_mem.h>
96 #include <machine/smp.h>
97 #include <machine/tlb.h>
98 #include <machine/tte.h>
99 #include <machine/tsb.h>
100 #include <machine/ver.h>
101
102 /*
103  * Map of physical memory reagions
104  */
105 static struct ofw_mem_region mra[VM_PHYSSEG_MAX];
106 struct ofw_mem_region sparc64_memreg[VM_PHYSSEG_MAX];
107 int sparc64_nmemreg;
108 static struct ofw_map translations[VM_PHYSSEG_MAX];
109 static int translations_size;
110
111 static vm_offset_t pmap_idle_map;
112 static vm_offset_t pmap_temp_map_1;
113 static vm_offset_t pmap_temp_map_2;
114
115 /*
116  * First and last available kernel virtual addresses
117  */
118 vm_offset_t virtual_avail;
119 vm_offset_t virtual_end;
120 vm_offset_t kernel_vm_end;
121
122 vm_offset_t vm_max_kernel_address;
123
124 /*
125  * Kernel pmap
126  */
127 struct pmap kernel_pmap_store;
128
129 struct rwlock_padalign tte_list_global_lock;
130
131 /*
132  * Allocate physical memory for use in pmap_bootstrap.
133  */
134 static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size, uint32_t colors);
135
136 static void pmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data);
137 static void pmap_cache_remove(vm_page_t m, vm_offset_t va);
138 static int pmap_protect_tte(struct pmap *pm1, struct pmap *pm2,
139     struct tte *tp, vm_offset_t va);
140 static int pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp,
141     vm_offset_t va);
142 static void pmap_init_qpages(void);
143
144 /*
145  * Map the given physical page at the specified virtual address in the
146  * target pmap with the protection requested.  If specified the page
147  * will be wired down.
148  *
149  * The page queues and pmap must be locked.
150  */
151 static int pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
152     vm_prot_t prot, u_int flags, int8_t psind);
153
154 extern int tl1_dmmu_miss_direct_patch_tsb_phys_1[];
155 extern int tl1_dmmu_miss_direct_patch_tsb_phys_end_1[];
156 extern int tl1_dmmu_miss_patch_asi_1[];
157 extern int tl1_dmmu_miss_patch_quad_ldd_1[];
158 extern int tl1_dmmu_miss_patch_tsb_1[];
159 extern int tl1_dmmu_miss_patch_tsb_2[];
160 extern int tl1_dmmu_miss_patch_tsb_mask_1[];
161 extern int tl1_dmmu_miss_patch_tsb_mask_2[];
162 extern int tl1_dmmu_prot_patch_asi_1[];
163 extern int tl1_dmmu_prot_patch_quad_ldd_1[];
164 extern int tl1_dmmu_prot_patch_tsb_1[];
165 extern int tl1_dmmu_prot_patch_tsb_2[];
166 extern int tl1_dmmu_prot_patch_tsb_mask_1[];
167 extern int tl1_dmmu_prot_patch_tsb_mask_2[];
168 extern int tl1_immu_miss_patch_asi_1[];
169 extern int tl1_immu_miss_patch_quad_ldd_1[];
170 extern int tl1_immu_miss_patch_tsb_1[];
171 extern int tl1_immu_miss_patch_tsb_2[];
172 extern int tl1_immu_miss_patch_tsb_mask_1[];
173 extern int tl1_immu_miss_patch_tsb_mask_2[];
174
175 /*
176  * If user pmap is processed with pmap_remove and with pmap_remove and the
177  * resident count drops to 0, there are no more pages to remove, so we
178  * need not continue.
179  */
180 #define PMAP_REMOVE_DONE(pm) \
181         ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
182
183 /*
184  * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove()
185  * and pmap_protect() instead of trying each virtual address.
186  */
187 #define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE)
188
189 SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "");
190
191 PMAP_STATS_VAR(pmap_nenter);
192 PMAP_STATS_VAR(pmap_nenter_update);
193 PMAP_STATS_VAR(pmap_nenter_replace);
194 PMAP_STATS_VAR(pmap_nenter_new);
195 PMAP_STATS_VAR(pmap_nkenter);
196 PMAP_STATS_VAR(pmap_nkenter_oc);
197 PMAP_STATS_VAR(pmap_nkenter_stupid);
198 PMAP_STATS_VAR(pmap_nkremove);
199 PMAP_STATS_VAR(pmap_nqenter);
200 PMAP_STATS_VAR(pmap_nqremove);
201 PMAP_STATS_VAR(pmap_ncache_enter);
202 PMAP_STATS_VAR(pmap_ncache_enter_c);
203 PMAP_STATS_VAR(pmap_ncache_enter_oc);
204 PMAP_STATS_VAR(pmap_ncache_enter_cc);
205 PMAP_STATS_VAR(pmap_ncache_enter_coc);
206 PMAP_STATS_VAR(pmap_ncache_enter_nc);
207 PMAP_STATS_VAR(pmap_ncache_enter_cnc);
208 PMAP_STATS_VAR(pmap_ncache_remove);
209 PMAP_STATS_VAR(pmap_ncache_remove_c);
210 PMAP_STATS_VAR(pmap_ncache_remove_oc);
211 PMAP_STATS_VAR(pmap_ncache_remove_cc);
212 PMAP_STATS_VAR(pmap_ncache_remove_coc);
213 PMAP_STATS_VAR(pmap_ncache_remove_nc);
214 PMAP_STATS_VAR(pmap_nzero_page);
215 PMAP_STATS_VAR(pmap_nzero_page_c);
216 PMAP_STATS_VAR(pmap_nzero_page_oc);
217 PMAP_STATS_VAR(pmap_nzero_page_nc);
218 PMAP_STATS_VAR(pmap_nzero_page_area);
219 PMAP_STATS_VAR(pmap_nzero_page_area_c);
220 PMAP_STATS_VAR(pmap_nzero_page_area_oc);
221 PMAP_STATS_VAR(pmap_nzero_page_area_nc);
222 PMAP_STATS_VAR(pmap_ncopy_page);
223 PMAP_STATS_VAR(pmap_ncopy_page_c);
224 PMAP_STATS_VAR(pmap_ncopy_page_oc);
225 PMAP_STATS_VAR(pmap_ncopy_page_nc);
226 PMAP_STATS_VAR(pmap_ncopy_page_dc);
227 PMAP_STATS_VAR(pmap_ncopy_page_doc);
228 PMAP_STATS_VAR(pmap_ncopy_page_sc);
229 PMAP_STATS_VAR(pmap_ncopy_page_soc);
230
231 PMAP_STATS_VAR(pmap_nnew_thread);
232 PMAP_STATS_VAR(pmap_nnew_thread_oc);
233
234 static inline u_long dtlb_get_data(u_int tlb, u_int slot);
235
236 /*
237  * Quick sort callout for comparing memory regions
238  */
239 static int mr_cmp(const void *a, const void *b);
240 static int om_cmp(const void *a, const void *b);
241
242 static int
243 mr_cmp(const void *a, const void *b)
244 {
245         const struct ofw_mem_region *mra;
246         const struct ofw_mem_region *mrb;
247
248         mra = a;
249         mrb = b;
250         if (mra->mr_start < mrb->mr_start)
251                 return (-1);
252         else if (mra->mr_start > mrb->mr_start)
253                 return (1);
254         else
255                 return (0);
256 }
257
258 static int
259 om_cmp(const void *a, const void *b)
260 {
261         const struct ofw_map *oma;
262         const struct ofw_map *omb;
263
264         oma = a;
265         omb = b;
266         if (oma->om_start < omb->om_start)
267                 return (-1);
268         else if (oma->om_start > omb->om_start)
269                 return (1);
270         else
271                 return (0);
272 }
273
274 static inline u_long
275 dtlb_get_data(u_int tlb, u_int slot)
276 {
277         u_long data;
278         register_t s;
279
280         slot = TLB_DAR_SLOT(tlb, slot);
281         /*
282          * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to
283          * work around errata of USIII and beyond.
284          */
285         s = intr_disable();
286         (void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
287         data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG);
288         intr_restore(s);
289         return (data);
290 }
291
292 /*
293  * Bootstrap the system enough to run with virtual memory.
294  */
295 void
296 pmap_bootstrap(u_int cpu_impl)
297 {
298         struct pmap *pm;
299         struct tte *tp;
300         vm_offset_t off;
301         vm_offset_t va;
302         vm_paddr_t pa;
303         vm_size_t physsz;
304         vm_size_t virtsz;
305         u_long data;
306         u_long vpn;
307         phandle_t pmem;
308         phandle_t vmem;
309         u_int dtlb_slots_avail;
310         int i;
311         int j;
312         int sz;
313         uint32_t asi;
314         uint32_t colors;
315         uint32_t ldd;
316
317         /*
318          * Set the kernel context.
319          */
320         pmap_set_kctx();
321
322         colors = dcache_color_ignore != 0 ? 1 : DCACHE_COLORS;
323
324         /*
325          * Find out what physical memory is available from the PROM and
326          * initialize the phys_avail array.  This must be done before
327          * pmap_bootstrap_alloc is called.
328          */
329         if ((pmem = OF_finddevice("/memory")) == -1)
330                 OF_panic("%s: finddevice /memory", __func__);
331         if ((sz = OF_getproplen(pmem, "available")) == -1)
332                 OF_panic("%s: getproplen /memory/available", __func__);
333         if (PHYS_AVAIL_ENTRIES < sz)
334                 OF_panic("%s: phys_avail too small", __func__);
335         if (sizeof(mra) < sz)
336                 OF_panic("%s: mra too small", __func__);
337         bzero(mra, sz);
338         if (OF_getprop(pmem, "available", mra, sz) == -1)
339                 OF_panic("%s: getprop /memory/available", __func__);
340         sz /= sizeof(*mra);
341 #ifdef DIAGNOSTIC
342         OF_printf("pmap_bootstrap: physical memory\n");
343 #endif
344         qsort(mra, sz, sizeof (*mra), mr_cmp);
345         physsz = 0;
346         getenv_quad("hw.physmem", &physmem);
347         physmem = btoc(physmem);
348         for (i = 0, j = 0; i < sz; i++, j += 2) {
349 #ifdef DIAGNOSTIC
350                 OF_printf("start=%#lx size=%#lx\n", mra[i].mr_start,
351                     mra[i].mr_size);
352 #endif
353                 if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) {
354                         if (btoc(physsz) < physmem) {
355                                 phys_avail[j] = mra[i].mr_start;
356                                 phys_avail[j + 1] = mra[i].mr_start +
357                                     (ctob(physmem) - physsz);
358                                 physsz = ctob(physmem);
359                         }
360                         break;
361                 }
362                 phys_avail[j] = mra[i].mr_start;
363                 phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
364                 physsz += mra[i].mr_size;
365         }
366         physmem = btoc(physsz);
367
368         /*
369          * Calculate the size of kernel virtual memory, and the size and mask
370          * for the kernel TSB based on the phsyical memory size but limited
371          * by the amount of dTLB slots available for locked entries if we have
372          * to lock the TSB in the TLB (given that for spitfire-class CPUs all
373          * of the dt64 slots can hold locked entries but there is no large
374          * dTLB for unlocked ones, we don't use more than half of it for the
375          * TSB).
376          * Note that for reasons unknown OpenSolaris doesn't take advantage of
377          * ASI_ATOMIC_QUAD_LDD_PHYS on UltraSPARC-III.  However, given that no
378          * public documentation is available for these, the latter just might
379          * not support it, yet.
380          */
381         if (cpu_impl == CPU_IMPL_SPARC64V ||
382             cpu_impl >= CPU_IMPL_ULTRASPARCIIIp) {
383                 tsb_kernel_ldd_phys = 1;
384                 virtsz = roundup(5 / 3 * physsz, PAGE_SIZE_4M <<
385                     (PAGE_SHIFT - TTE_SHIFT));
386         } else {
387                 dtlb_slots_avail = 0;
388                 for (i = 0; i < dtlb_slots; i++) {
389                         data = dtlb_get_data(cpu_impl ==
390                             CPU_IMPL_ULTRASPARCIII ? TLB_DAR_T16 :
391                             TLB_DAR_T32, i);
392                         if ((data & (TD_V | TD_L)) != (TD_V | TD_L))
393                                 dtlb_slots_avail++;
394                 }
395 #ifdef SMP
396                 dtlb_slots_avail -= PCPU_PAGES;
397 #endif
398                 if (cpu_impl >= CPU_IMPL_ULTRASPARCI &&
399                     cpu_impl < CPU_IMPL_ULTRASPARCIII)
400                         dtlb_slots_avail /= 2;
401                 virtsz = roundup(physsz, PAGE_SIZE_4M <<
402                     (PAGE_SHIFT - TTE_SHIFT));
403                 virtsz = MIN(virtsz, (dtlb_slots_avail * PAGE_SIZE_4M) <<
404                     (PAGE_SHIFT - TTE_SHIFT));
405         }
406         vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
407         tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
408         tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
409
410         /*
411          * Allocate the kernel TSB and lock it in the TLB if necessary.
412          */
413         pa = pmap_bootstrap_alloc(tsb_kernel_size, colors);
414         if (pa & PAGE_MASK_4M)
415                 OF_panic("%s: TSB unaligned", __func__);
416         tsb_kernel_phys = pa;
417         if (tsb_kernel_ldd_phys == 0) {
418                 tsb_kernel =
419                     (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
420                 pmap_map_tsb();
421                 bzero(tsb_kernel, tsb_kernel_size);
422         } else {
423                 tsb_kernel =
424                     (struct tte *)TLB_PHYS_TO_DIRECT(tsb_kernel_phys);
425                 aszero(ASI_PHYS_USE_EC, tsb_kernel_phys, tsb_kernel_size);
426         }
427
428         /*
429          * Allocate and map the dynamic per-CPU area for the BSP.
430          */
431         pa = pmap_bootstrap_alloc(DPCPU_SIZE, colors);
432         dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pa);
433
434         /*
435          * Allocate and map the message buffer.
436          */
437         pa = pmap_bootstrap_alloc(msgbufsize, colors);
438         msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa);
439
440         /*
441          * Patch the TSB addresses and mask as well as the ASIs used to load
442          * it into the trap table.
443          */
444
445 #define LDDA_R_I_R(rd, imm_asi, rs1, rs2)                               \
446         (EIF_OP(IOP_LDST) | EIF_F3_RD(rd) | EIF_F3_OP3(INS3_LDDA) |     \
447             EIF_F3_RS1(rs1) | EIF_F3_I(0) | EIF_F3_IMM_ASI(imm_asi) |   \
448             EIF_F3_RS2(rs2))
449 #define OR_R_I_R(rd, imm13, rs1)                                        \
450         (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) |       \
451             EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
452 #define SETHI(rd, imm22)                                                \
453         (EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) |   \
454             EIF_IMM((imm22) >> 10, 22))
455 #define WR_R_I(rd, imm13, rs1)                                          \
456         (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_WR) |       \
457             EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
458
459 #define PATCH_ASI(addr, asi) do {                                       \
460         if (addr[0] != WR_R_I(IF_F3_RD(addr[0]), 0x0,                   \
461             IF_F3_RS1(addr[0])))                                        \
462                 OF_panic("%s: patched instructions have changed",       \
463                     __func__);                                          \
464         addr[0] |= EIF_IMM((asi), 13);                                  \
465         flush(addr);                                                    \
466 } while (0)
467
468 #define PATCH_LDD(addr, asi) do {                                       \
469         if (addr[0] != LDDA_R_I_R(IF_F3_RD(addr[0]), 0x0,               \
470             IF_F3_RS1(addr[0]), IF_F3_RS2(addr[0])))                    \
471                 OF_panic("%s: patched instructions have changed",       \
472                     __func__);                                          \
473         addr[0] |= EIF_F3_IMM_ASI(asi);                                 \
474         flush(addr);                                                    \
475 } while (0)
476
477 #define PATCH_TSB(addr, val) do {                                       \
478         if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) ||                 \
479             addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0,                 \
480             IF_F3_RS1(addr[1])) ||                                      \
481             addr[3] != SETHI(IF_F2_RD(addr[3]), 0x0))                   \
482                 OF_panic("%s: patched instructions have changed",       \
483                     __func__);                                          \
484         addr[0] |= EIF_IMM((val) >> 42, 22);                            \
485         addr[1] |= EIF_IMM((val) >> 32, 10);                            \
486         addr[3] |= EIF_IMM((val) >> 10, 22);                            \
487         flush(addr);                                                    \
488         flush(addr + 1);                                                \
489         flush(addr + 3);                                                \
490 } while (0)
491
492 #define PATCH_TSB_MASK(addr, val) do {                                  \
493         if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) ||                 \
494             addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0,                 \
495             IF_F3_RS1(addr[1])))                                        \
496                 OF_panic("%s: patched instructions have changed",       \
497                     __func__);                                          \
498         addr[0] |= EIF_IMM((val) >> 10, 22);                            \
499         addr[1] |= EIF_IMM((val), 10);                                  \
500         flush(addr);                                                    \
501         flush(addr + 1);                                                \
502 } while (0)
503
504         if (tsb_kernel_ldd_phys == 0) {
505                 asi = ASI_N;
506                 ldd = ASI_NUCLEUS_QUAD_LDD;
507                 off = (vm_offset_t)tsb_kernel;
508         } else {
509                 asi = ASI_PHYS_USE_EC;
510                 ldd = ASI_ATOMIC_QUAD_LDD_PHYS;
511                 off = (vm_offset_t)tsb_kernel_phys;
512         }
513         PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_1, tsb_kernel_phys);
514         PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_end_1,
515             tsb_kernel_phys + tsb_kernel_size - 1);
516         PATCH_ASI(tl1_dmmu_miss_patch_asi_1, asi);
517         PATCH_LDD(tl1_dmmu_miss_patch_quad_ldd_1, ldd);
518         PATCH_TSB(tl1_dmmu_miss_patch_tsb_1, off);
519         PATCH_TSB(tl1_dmmu_miss_patch_tsb_2, off);
520         PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_1, tsb_kernel_mask);
521         PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_2, tsb_kernel_mask);
522         PATCH_ASI(tl1_dmmu_prot_patch_asi_1, asi);
523         PATCH_LDD(tl1_dmmu_prot_patch_quad_ldd_1, ldd);
524         PATCH_TSB(tl1_dmmu_prot_patch_tsb_1, off);
525         PATCH_TSB(tl1_dmmu_prot_patch_tsb_2, off);
526         PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_1, tsb_kernel_mask);
527         PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_2, tsb_kernel_mask);
528         PATCH_ASI(tl1_immu_miss_patch_asi_1, asi);
529         PATCH_LDD(tl1_immu_miss_patch_quad_ldd_1, ldd);
530         PATCH_TSB(tl1_immu_miss_patch_tsb_1, off);
531         PATCH_TSB(tl1_immu_miss_patch_tsb_2, off);
532         PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_1, tsb_kernel_mask);
533         PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_2, tsb_kernel_mask);
534
535         /*
536          * Enter fake 8k pages for the 4MB kernel pages, so that
537          * pmap_kextract() will work for them.
538          */
539         for (i = 0; i < kernel_tlb_slots; i++) {
540                 pa = kernel_tlbs[i].te_pa;
541                 va = kernel_tlbs[i].te_va;
542                 for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) {
543                         tp = tsb_kvtotte(va + off);
544                         vpn = TV_VPN(va + off, TS_8K);
545                         data = TD_V | TD_8K | TD_PA(pa + off) | TD_REF |
546                             TD_SW | TD_CP | TD_CV | TD_P | TD_W;
547                         pmap_bootstrap_set_tte(tp, vpn, data);
548                 }
549         }
550
551         /*
552          * Set the start and end of KVA.  The kernel is loaded starting
553          * at the first available 4MB super page, so we advance to the
554          * end of the last one used for it.
555          */
556         virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M;
557         virtual_end = vm_max_kernel_address;
558         kernel_vm_end = vm_max_kernel_address;
559
560         /*
561          * Allocate kva space for temporary mappings.
562          */
563         pmap_idle_map = virtual_avail;
564         virtual_avail += PAGE_SIZE * colors;
565         pmap_temp_map_1 = virtual_avail;
566         virtual_avail += PAGE_SIZE * colors;
567         pmap_temp_map_2 = virtual_avail;
568         virtual_avail += PAGE_SIZE * colors;
569
570         /*
571          * Allocate a kernel stack with guard page for thread0 and map it
572          * into the kernel TSB.  We must ensure that the virtual address is
573          * colored properly for corresponding CPUs, since we're allocating
574          * from phys_avail so the memory won't have an associated vm_page_t.
575          */
576         pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, colors);
577         kstack0_phys = pa;
578         virtual_avail += roundup(KSTACK_GUARD_PAGES, colors) * PAGE_SIZE;
579         kstack0 = virtual_avail;
580         virtual_avail += roundup(KSTACK_PAGES, colors) * PAGE_SIZE;
581         if (dcache_color_ignore == 0)
582                 KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys),
583                     ("pmap_bootstrap: kstack0 miscolored"));
584         for (i = 0; i < KSTACK_PAGES; i++) {
585                 pa = kstack0_phys + i * PAGE_SIZE;
586                 va = kstack0 + i * PAGE_SIZE;
587                 tp = tsb_kvtotte(va);
588                 vpn = TV_VPN(va, TS_8K);
589                 data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP |
590                     TD_CV | TD_P | TD_W;
591                 pmap_bootstrap_set_tte(tp, vpn, data);
592         }
593
594         /*
595          * Calculate the last available physical address.
596          */
597         for (i = 0; phys_avail[i + 2] != 0; i += 2)
598                 ;
599         Maxmem = sparc64_btop(phys_avail[i + 1]);
600
601         /*
602          * Add the PROM mappings to the kernel TSB.
603          */
604         if ((vmem = OF_finddevice("/virtual-memory")) == -1)
605                 OF_panic("%s: finddevice /virtual-memory", __func__);
606         if ((sz = OF_getproplen(vmem, "translations")) == -1)
607                 OF_panic("%s: getproplen translations", __func__);
608         if (sizeof(translations) < sz)
609                 OF_panic("%s: translations too small", __func__);
610         bzero(translations, sz);
611         if (OF_getprop(vmem, "translations", translations, sz) == -1)
612                 OF_panic("%s: getprop /virtual-memory/translations",
613                     __func__);
614         sz /= sizeof(*translations);
615         translations_size = sz;
616 #ifdef DIAGNOSTIC
617         OF_printf("pmap_bootstrap: translations\n");
618 #endif
619         qsort(translations, sz, sizeof (*translations), om_cmp);
620         for (i = 0; i < sz; i++) {
621 #ifdef DIAGNOSTIC
622                 OF_printf("translation: start=%#lx size=%#lx tte=%#lx\n",
623                     translations[i].om_start, translations[i].om_size,
624                     translations[i].om_tte);
625 #endif
626                 if ((translations[i].om_tte & TD_V) == 0)
627                         continue;
628                 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
629                     translations[i].om_start > VM_MAX_PROM_ADDRESS)
630                         continue;
631                 for (off = 0; off < translations[i].om_size;
632                     off += PAGE_SIZE) {
633                         va = translations[i].om_start + off;
634                         tp = tsb_kvtotte(va);
635                         vpn = TV_VPN(va, TS_8K);
636                         data = ((translations[i].om_tte &
637                             ~((TD_SOFT2_MASK << TD_SOFT2_SHIFT) |
638                             (cpu_impl >= CPU_IMPL_ULTRASPARCI &&
639                             cpu_impl < CPU_IMPL_ULTRASPARCIII ?
640                             (TD_DIAG_SF_MASK << TD_DIAG_SF_SHIFT) :
641                             (TD_RSVD_CH_MASK << TD_RSVD_CH_SHIFT)) |
642                             (TD_SOFT_MASK << TD_SOFT_SHIFT))) | TD_EXEC) +
643                             off;
644                         pmap_bootstrap_set_tte(tp, vpn, data);
645                 }
646         }
647
648         /*
649          * Get the available physical memory ranges from /memory/reg.  These
650          * are only used for kernel dumps, but it may not be wise to do PROM
651          * calls in that situation.
652          */
653         if ((sz = OF_getproplen(pmem, "reg")) == -1)
654                 OF_panic("%s: getproplen /memory/reg", __func__);
655         if (sizeof(sparc64_memreg) < sz)
656                 OF_panic("%s: sparc64_memreg too small", __func__);
657         if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
658                 OF_panic("%s: getprop /memory/reg", __func__);
659         sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
660
661         /*
662          * Initialize the kernel pmap (which is statically allocated).
663          */
664         pm = kernel_pmap;
665         PMAP_LOCK_INIT(pm);
666         for (i = 0; i < MAXCPU; i++)
667                 pm->pm_context[i] = TLB_CTX_KERNEL;
668         CPU_FILL(&pm->pm_active);
669
670         /*
671          * Initialize the global tte list lock, which is more commonly
672          * known as the pmap pv global lock.
673          */
674         rw_init(&tte_list_global_lock, "pmap pv global");
675
676         /*
677          * Flush all non-locked TLB entries possibly left over by the
678          * firmware.
679          */
680         tlb_flush_nonlocked();
681 }
682
683 static void
684 pmap_init_qpages(void)
685 {
686         struct pcpu *pc;
687         int i;
688
689         if (dcache_color_ignore != 0)
690                 return;
691
692         CPU_FOREACH(i) {
693                 pc = pcpu_find(i);
694                 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE * DCACHE_COLORS);
695                 if (pc->pc_qmap_addr == 0)
696                         panic("pmap_init_qpages: unable to allocate KVA");
697         }
698 }
699
700 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_qpages, NULL);
701
702 /*
703  * Map the 4MB kernel TSB pages.
704  */
705 void
706 pmap_map_tsb(void)
707 {
708         vm_offset_t va;
709         vm_paddr_t pa;
710         u_long data;
711         int i;
712
713         for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
714                 va = (vm_offset_t)tsb_kernel + i;
715                 pa = tsb_kernel_phys + i;
716                 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
717                     TD_P | TD_W;
718                 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) |
719                     TLB_TAR_CTX(TLB_CTX_KERNEL));
720                 stxa_sync(0, ASI_DTLB_DATA_IN_REG, data);
721         }
722 }
723
724 /*
725  * Set the secondary context to be the kernel context (needed for FP block
726  * operations in the kernel).
727  */
728 void
729 pmap_set_kctx(void)
730 {
731
732         stxa(AA_DMMU_SCXR, ASI_DMMU, (ldxa(AA_DMMU_SCXR, ASI_DMMU) &
733             TLB_CXR_PGSZ_MASK) | TLB_CTX_KERNEL);
734         flush(KERNBASE);
735 }
736
737 /*
738  * Allocate a physical page of memory directly from the phys_avail map.
739  * Can only be called from pmap_bootstrap before avail start and end are
740  * calculated.
741  */
742 static vm_paddr_t
743 pmap_bootstrap_alloc(vm_size_t size, uint32_t colors)
744 {
745         vm_paddr_t pa;
746         int i;
747
748         size = roundup(size, PAGE_SIZE * colors);
749         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
750                 if (phys_avail[i + 1] - phys_avail[i] < size)
751                         continue;
752                 pa = phys_avail[i];
753                 phys_avail[i] += size;
754                 return (pa);
755         }
756         OF_panic("%s: no suitable region found", __func__);
757 }
758
759 /*
760  * Set a TTE.  This function is intended as a helper when tsb_kernel is
761  * direct-mapped but we haven't taken over the trap table, yet, as it's the
762  * case when we are taking advantage of ASI_ATOMIC_QUAD_LDD_PHYS to access
763  * the kernel TSB.
764  */
765 void
766 pmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data)
767 {
768
769         if (tsb_kernel_ldd_phys == 0) {
770                 tp->tte_vpn = vpn;
771                 tp->tte_data = data;
772         } else {
773                 stxa((vm_paddr_t)tp + offsetof(struct tte, tte_vpn),
774                     ASI_PHYS_USE_EC, vpn);
775                 stxa((vm_paddr_t)tp + offsetof(struct tte, tte_data),
776                     ASI_PHYS_USE_EC, data);
777         }
778 }
779
780 /*
781  * Initialize a vm_page's machine-dependent fields.
782  */
783 void
784 pmap_page_init(vm_page_t m)
785 {
786
787         TAILQ_INIT(&m->md.tte_list);
788         m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
789         m->md.pmap = NULL;
790 }
791
792 /*
793  * Initialize the pmap module.
794  */
795 void
796 pmap_init(void)
797 {
798         vm_offset_t addr;
799         vm_size_t size;
800         int result;
801         int i;
802
803         for (i = 0; i < translations_size; i++) {
804                 addr = translations[i].om_start;
805                 size = translations[i].om_size;
806                 if ((translations[i].om_tte & TD_V) == 0)
807                         continue;
808                 if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS)
809                         continue;
810                 result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0,
811                     VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
812                 if (result != KERN_SUCCESS || addr != translations[i].om_start)
813                         panic("pmap_init: vm_map_find");
814         }
815 }
816
817 /*
818  * Extract the physical page address associated with the given
819  * map/virtual_address pair.
820  */
821 vm_paddr_t
822 pmap_extract(pmap_t pm, vm_offset_t va)
823 {
824         struct tte *tp;
825         vm_paddr_t pa;
826
827         if (pm == kernel_pmap)
828                 return (pmap_kextract(va));
829         PMAP_LOCK(pm);
830         tp = tsb_tte_lookup(pm, va);
831         if (tp == NULL)
832                 pa = 0;
833         else
834                 pa = TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp));
835         PMAP_UNLOCK(pm);
836         return (pa);
837 }
838
839 /*
840  * Atomically extract and hold the physical page with the given
841  * pmap and virtual address pair if that mapping permits the given
842  * protection.
843  */
844 vm_page_t
845 pmap_extract_and_hold(pmap_t pm, vm_offset_t va, vm_prot_t prot)
846 {
847         struct tte *tp;
848         vm_page_t m;
849         vm_paddr_t pa;
850
851         m = NULL;
852         pa = 0;
853         PMAP_LOCK(pm);
854 retry:
855         if (pm == kernel_pmap) {
856                 if (va >= VM_MIN_DIRECT_ADDRESS) {
857                         tp = NULL;
858                         m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va));
859                         (void)vm_page_pa_tryrelock(pm, TLB_DIRECT_TO_PHYS(va),
860                             &pa);
861                         vm_page_wire(m);
862                 } else {
863                         tp = tsb_kvtotte(va);
864                         if ((tp->tte_data & TD_V) == 0)
865                                 tp = NULL;
866                 }
867         } else
868                 tp = tsb_tte_lookup(pm, va);
869         if (tp != NULL && ((tp->tte_data & TD_SW) ||
870             (prot & VM_PROT_WRITE) == 0)) {
871                 if (vm_page_pa_tryrelock(pm, TTE_GET_PA(tp), &pa))
872                         goto retry;
873                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
874                 vm_page_wire(m);
875         }
876         PA_UNLOCK_COND(pa);
877         PMAP_UNLOCK(pm);
878         return (m);
879 }
880
881 /*
882  * Extract the physical page address associated with the given kernel virtual
883  * address.
884  */
885 vm_paddr_t
886 pmap_kextract(vm_offset_t va)
887 {
888         struct tte *tp;
889
890         if (va >= VM_MIN_DIRECT_ADDRESS)
891                 return (TLB_DIRECT_TO_PHYS(va));
892         tp = tsb_kvtotte(va);
893         if ((tp->tte_data & TD_V) == 0)
894                 return (0);
895         return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
896 }
897
898 int
899 pmap_cache_enter(vm_page_t m, vm_offset_t va)
900 {
901         struct tte *tp;
902         int color;
903
904         rw_assert(&tte_list_global_lock, RA_WLOCKED);
905         KASSERT((m->flags & PG_FICTITIOUS) == 0,
906             ("pmap_cache_enter: fake page"));
907         PMAP_STATS_INC(pmap_ncache_enter);
908
909         if (dcache_color_ignore != 0)
910                 return (1);
911
912         /*
913          * Find the color for this virtual address and note the added mapping.
914          */
915         color = DCACHE_COLOR(va);
916         m->md.colors[color]++;
917
918         /*
919          * If all existing mappings have the same color, the mapping is
920          * cacheable.
921          */
922         if (m->md.color == color) {
923                 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0,
924                     ("pmap_cache_enter: cacheable, mappings of other color"));
925                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
926                         PMAP_STATS_INC(pmap_ncache_enter_c);
927                 else
928                         PMAP_STATS_INC(pmap_ncache_enter_oc);
929                 return (1);
930         }
931
932         /*
933          * If there are no mappings of the other color, and the page still has
934          * the wrong color, this must be a new mapping.  Change the color to
935          * match the new mapping, which is cacheable.  We must flush the page
936          * from the cache now.
937          */
938         if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) {
939                 KASSERT(m->md.colors[color] == 1,
940                     ("pmap_cache_enter: changing color, not new mapping"));
941                 dcache_page_inval(VM_PAGE_TO_PHYS(m));
942                 m->md.color = color;
943                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
944                         PMAP_STATS_INC(pmap_ncache_enter_cc);
945                 else
946                         PMAP_STATS_INC(pmap_ncache_enter_coc);
947                 return (1);
948         }
949
950         /*
951          * If the mapping is already non-cacheable, just return.
952          */
953         if (m->md.color == -1) {
954                 PMAP_STATS_INC(pmap_ncache_enter_nc);
955                 return (0);
956         }
957
958         PMAP_STATS_INC(pmap_ncache_enter_cnc);
959
960         /*
961          * Mark all mappings as uncacheable, flush any lines with the other
962          * color out of the dcache, and set the color to none (-1).
963          */
964         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
965                 atomic_clear_long(&tp->tte_data, TD_CV);
966                 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
967         }
968         dcache_page_inval(VM_PAGE_TO_PHYS(m));
969         m->md.color = -1;
970         return (0);
971 }
972
973 static void
974 pmap_cache_remove(vm_page_t m, vm_offset_t va)
975 {
976         struct tte *tp;
977         int color;
978
979         rw_assert(&tte_list_global_lock, RA_WLOCKED);
980         CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
981             m->md.colors[DCACHE_COLOR(va)]);
982         KASSERT((m->flags & PG_FICTITIOUS) == 0,
983             ("pmap_cache_remove: fake page"));
984         PMAP_STATS_INC(pmap_ncache_remove);
985
986         if (dcache_color_ignore != 0)
987                 return;
988
989         KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0,
990             ("pmap_cache_remove: no mappings %d <= 0",
991             m->md.colors[DCACHE_COLOR(va)]));
992
993         /*
994          * Find the color for this virtual address and note the removal of
995          * the mapping.
996          */
997         color = DCACHE_COLOR(va);
998         m->md.colors[color]--;
999
1000         /*
1001          * If the page is cacheable, just return and keep the same color, even
1002          * if there are no longer any mappings.
1003          */
1004         if (m->md.color != -1) {
1005                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
1006                         PMAP_STATS_INC(pmap_ncache_remove_c);
1007                 else
1008                         PMAP_STATS_INC(pmap_ncache_remove_oc);
1009                 return;
1010         }
1011
1012         KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0,
1013             ("pmap_cache_remove: uncacheable, no mappings of other color"));
1014
1015         /*
1016          * If the page is not cacheable (color is -1), and the number of
1017          * mappings for this color is not zero, just return.  There are
1018          * mappings of the other color still, so remain non-cacheable.
1019          */
1020         if (m->md.colors[color] != 0) {
1021                 PMAP_STATS_INC(pmap_ncache_remove_nc);
1022                 return;
1023         }
1024
1025         /*
1026          * The number of mappings for this color is now zero.  Recache the
1027          * other colored mappings, and change the page color to the other
1028          * color.  There should be no lines in the data cache for this page,
1029          * so flushing should not be needed.
1030          */
1031         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1032                 atomic_set_long(&tp->tte_data, TD_CV);
1033                 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1034         }
1035         m->md.color = DCACHE_OTHER_COLOR(color);
1036
1037         if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
1038                 PMAP_STATS_INC(pmap_ncache_remove_cc);
1039         else
1040                 PMAP_STATS_INC(pmap_ncache_remove_coc);
1041 }
1042
1043 /*
1044  * Map a wired page into kernel virtual address space.
1045  */
1046 void
1047 pmap_kenter(vm_offset_t va, vm_page_t m)
1048 {
1049         vm_offset_t ova;
1050         struct tte *tp;
1051         vm_page_t om;
1052         u_long data;
1053
1054         rw_assert(&tte_list_global_lock, RA_WLOCKED);
1055         PMAP_STATS_INC(pmap_nkenter);
1056         tp = tsb_kvtotte(va);
1057         CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
1058             va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data);
1059         if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) {
1060                 CTR5(KTR_SPARE2,
1061         "pmap_kenter: off color va=%#lx pa=%#lx o=%p ot=%d pi=%#lx",
1062                     va, VM_PAGE_TO_PHYS(m), m->object,
1063                     m->object ? m->object->type : -1,
1064                     m->pindex);
1065                 PMAP_STATS_INC(pmap_nkenter_oc);
1066         }
1067         if ((tp->tte_data & TD_V) != 0) {
1068                 om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1069                 ova = TTE_GET_VA(tp);
1070                 if (m == om && va == ova) {
1071                         PMAP_STATS_INC(pmap_nkenter_stupid);
1072                         return;
1073                 }
1074                 TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
1075                 pmap_cache_remove(om, ova);
1076                 if (va != ova)
1077                         tlb_page_demap(kernel_pmap, ova);
1078         }
1079         data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP |
1080             TD_P | TD_W;
1081         if (pmap_cache_enter(m, va) != 0)
1082                 data |= TD_CV;
1083         tp->tte_vpn = TV_VPN(va, TS_8K);
1084         tp->tte_data = data;
1085         TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
1086 }
1087
1088 /*
1089  * Map a wired page into kernel virtual address space.  This additionally
1090  * takes a flag argument which is or'ed to the TTE data.  This is used by
1091  * sparc64_bus_mem_map().
1092  * NOTE: if the mapping is non-cacheable, it's the caller's responsibility
1093  * to flush entries that might still be in the cache, if applicable.
1094  */
1095 void
1096 pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags)
1097 {
1098         struct tte *tp;
1099
1100         tp = tsb_kvtotte(va);
1101         CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
1102             va, pa, tp, tp->tte_data);
1103         tp->tte_vpn = TV_VPN(va, TS_8K);
1104         tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
1105 }
1106
1107 /*
1108  * Remove a wired page from kernel virtual address space.
1109  */
1110 void
1111 pmap_kremove(vm_offset_t va)
1112 {
1113         struct tte *tp;
1114         vm_page_t m;
1115
1116         rw_assert(&tte_list_global_lock, RA_WLOCKED);
1117         PMAP_STATS_INC(pmap_nkremove);
1118         tp = tsb_kvtotte(va);
1119         CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
1120             tp->tte_data);
1121         if ((tp->tte_data & TD_V) == 0)
1122                 return;
1123         m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1124         TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1125         pmap_cache_remove(m, va);
1126         TTE_ZERO(tp);
1127 }
1128
1129 /*
1130  * Inverse of pmap_kenter_flags, used by bus_space_unmap().
1131  */
1132 void
1133 pmap_kremove_flags(vm_offset_t va)
1134 {
1135         struct tte *tp;
1136
1137         tp = tsb_kvtotte(va);
1138         CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp,
1139             tp->tte_data);
1140         TTE_ZERO(tp);
1141 }
1142
1143 /*
1144  * Map a range of physical addresses into kernel virtual address space.
1145  *
1146  * The value passed in *virt is a suggested virtual address for the mapping.
1147  * Architectures which can support a direct-mapped physical to virtual region
1148  * can return the appropriate address within that region, leaving '*virt'
1149  * unchanged.
1150  */
1151 vm_offset_t
1152 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1153 {
1154
1155         return (TLB_PHYS_TO_DIRECT(start));
1156 }
1157
1158 /*
1159  * Map a list of wired pages into kernel virtual address space.  This is
1160  * intended for temporary mappings which do not need page modification or
1161  * references recorded.  Existing mappings in the region are overwritten.
1162  */
1163 void
1164 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
1165 {
1166         vm_offset_t va;
1167
1168         PMAP_STATS_INC(pmap_nqenter);
1169         va = sva;
1170         rw_wlock(&tte_list_global_lock);
1171         while (count-- > 0) {
1172                 pmap_kenter(va, *m);
1173                 va += PAGE_SIZE;
1174                 m++;
1175         }
1176         rw_wunlock(&tte_list_global_lock);
1177         tlb_range_demap(kernel_pmap, sva, va);
1178 }
1179
1180 /*
1181  * Remove page mappings from kernel virtual address space.  Intended for
1182  * temporary mappings entered by pmap_qenter.
1183  */
1184 void
1185 pmap_qremove(vm_offset_t sva, int count)
1186 {
1187         vm_offset_t va;
1188
1189         PMAP_STATS_INC(pmap_nqremove);
1190         va = sva;
1191         rw_wlock(&tte_list_global_lock);
1192         while (count-- > 0) {
1193                 pmap_kremove(va);
1194                 va += PAGE_SIZE;
1195         }
1196         rw_wunlock(&tte_list_global_lock);
1197         tlb_range_demap(kernel_pmap, sva, va);
1198 }
1199
1200 /*
1201  * Initialize the pmap associated with process 0.
1202  */
1203 void
1204 pmap_pinit0(pmap_t pm)
1205 {
1206         int i;
1207
1208         PMAP_LOCK_INIT(pm);
1209         for (i = 0; i < MAXCPU; i++)
1210                 pm->pm_context[i] = TLB_CTX_KERNEL;
1211         CPU_ZERO(&pm->pm_active);
1212         pm->pm_tsb = NULL;
1213         pm->pm_tsb_obj = NULL;
1214         bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1215 }
1216
1217 /*
1218  * Initialize a preallocated and zeroed pmap structure, such as one in a
1219  * vmspace structure.
1220  */
1221 int
1222 pmap_pinit(pmap_t pm)
1223 {
1224         vm_page_t ma[TSB_PAGES];
1225         int i;
1226
1227         /*
1228          * Allocate KVA space for the TSB.
1229          */
1230         if (pm->pm_tsb == NULL) {
1231                 pm->pm_tsb = (struct tte *)kva_alloc(TSB_BSIZE);
1232                 if (pm->pm_tsb == NULL)
1233                         return (0);
1234                 }
1235
1236         /*
1237          * Allocate an object for it.
1238          */
1239         if (pm->pm_tsb_obj == NULL)
1240                 pm->pm_tsb_obj = vm_object_allocate(OBJT_PHYS, TSB_PAGES);
1241
1242         for (i = 0; i < MAXCPU; i++)
1243                 pm->pm_context[i] = -1;
1244         CPU_ZERO(&pm->pm_active);
1245
1246         VM_OBJECT_WLOCK(pm->pm_tsb_obj);
1247         (void)vm_page_grab_pages(pm->pm_tsb_obj, 0, VM_ALLOC_NORMAL |
1248             VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_ZERO, ma, TSB_PAGES);
1249         VM_OBJECT_WUNLOCK(pm->pm_tsb_obj);
1250         for (i = 0; i < TSB_PAGES; i++)
1251                 ma[i]->md.pmap = pm;
1252         pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
1253
1254         bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1255         return (1);
1256 }
1257
1258 /*
1259  * Release any resources held by the given physical map.
1260  * Called when a pmap initialized by pmap_pinit is being released.
1261  * Should only be called if the map contains no valid mappings.
1262  */
1263 void
1264 pmap_release(pmap_t pm)
1265 {
1266         vm_object_t obj;
1267         vm_page_t m;
1268 #ifdef SMP
1269         struct pcpu *pc;
1270 #endif
1271
1272         CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
1273             pm->pm_context[curcpu], pm->pm_tsb);
1274         KASSERT(pmap_resident_count(pm) == 0,
1275             ("pmap_release: resident pages %ld != 0",
1276             pmap_resident_count(pm)));
1277
1278         /*
1279          * After the pmap was freed, it might be reallocated to a new process.
1280          * When switching, this might lead us to wrongly assume that we need
1281          * not switch contexts because old and new pmap pointer are equal.
1282          * Therefore, make sure that this pmap is not referenced by any PCPU
1283          * pointer any more.  This could happen in two cases:
1284          * - A process that referenced the pmap is currently exiting on a CPU.
1285          *   However, it is guaranteed to not switch in any more after setting
1286          *   its state to PRS_ZOMBIE.
1287          * - A process that referenced this pmap ran on a CPU, but we switched
1288          *   to a kernel thread, leaving the pmap pointer unchanged.
1289          */
1290 #ifdef SMP
1291         sched_pin();
1292         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
1293                 atomic_cmpset_rel_ptr((uintptr_t *)&pc->pc_pmap,
1294                     (uintptr_t)pm, (uintptr_t)NULL);
1295         sched_unpin();
1296 #else
1297         critical_enter();
1298         if (PCPU_GET(pmap) == pm)
1299                 PCPU_SET(pmap, NULL);
1300         critical_exit();
1301 #endif
1302
1303         pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
1304         obj = pm->pm_tsb_obj;
1305         VM_OBJECT_WLOCK(obj);
1306         KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
1307         while (!TAILQ_EMPTY(&obj->memq)) {
1308                 m = TAILQ_FIRST(&obj->memq);
1309                 m->md.pmap = NULL;
1310                 vm_page_unwire_noq(m);
1311                 vm_page_free_zero(m);
1312         }
1313         VM_OBJECT_WUNLOCK(obj);
1314 }
1315
1316 /*
1317  * Grow the number of kernel page table entries.  Unneeded.
1318  */
1319 void
1320 pmap_growkernel(vm_offset_t addr)
1321 {
1322
1323         panic("pmap_growkernel: can't grow kernel");
1324 }
1325
1326 int
1327 pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1328     vm_offset_t va)
1329 {
1330         vm_page_t m;
1331         u_long data;
1332
1333         rw_assert(&tte_list_global_lock, RA_WLOCKED);
1334         data = atomic_readandclear_long(&tp->tte_data);
1335         if ((data & TD_FAKE) == 0) {
1336                 m = PHYS_TO_VM_PAGE(TD_PA(data));
1337                 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1338                 if ((data & TD_WIRED) != 0)
1339                         pm->pm_stats.wired_count--;
1340                 if ((data & TD_PV) != 0) {
1341                         if ((data & TD_W) != 0)
1342                                 vm_page_dirty(m);
1343                         if ((data & TD_REF) != 0)
1344                                 vm_page_aflag_set(m, PGA_REFERENCED);
1345                         if (TAILQ_EMPTY(&m->md.tte_list))
1346                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
1347                         pm->pm_stats.resident_count--;
1348                 }
1349                 pmap_cache_remove(m, va);
1350         }
1351         TTE_ZERO(tp);
1352         if (PMAP_REMOVE_DONE(pm))
1353                 return (0);
1354         return (1);
1355 }
1356
1357 /*
1358  * Remove the given range of addresses from the specified map.
1359  */
1360 void
1361 pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
1362 {
1363         struct tte *tp;
1364         vm_offset_t va;
1365
1366         CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
1367             pm->pm_context[curcpu], start, end);
1368         if (PMAP_REMOVE_DONE(pm))
1369                 return;
1370         rw_wlock(&tte_list_global_lock);
1371         PMAP_LOCK(pm);
1372         if (end - start > PMAP_TSB_THRESH) {
1373                 tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
1374                 tlb_context_demap(pm);
1375         } else {
1376                 for (va = start; va < end; va += PAGE_SIZE)
1377                         if ((tp = tsb_tte_lookup(pm, va)) != NULL &&
1378                             !pmap_remove_tte(pm, NULL, tp, va))
1379                                 break;
1380                 tlb_range_demap(pm, start, end - 1);
1381         }
1382         PMAP_UNLOCK(pm);
1383         rw_wunlock(&tte_list_global_lock);
1384 }
1385
1386 void
1387 pmap_remove_all(vm_page_t m)
1388 {
1389         struct pmap *pm;
1390         struct tte *tpn;
1391         struct tte *tp;
1392         vm_offset_t va;
1393
1394         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1395             ("pmap_remove_all: page %p is not managed", m));
1396         rw_wlock(&tte_list_global_lock);
1397         for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
1398                 tpn = TAILQ_NEXT(tp, tte_link);
1399                 if ((tp->tte_data & TD_PV) == 0)
1400                         continue;
1401                 pm = TTE_GET_PMAP(tp);
1402                 va = TTE_GET_VA(tp);
1403                 PMAP_LOCK(pm);
1404                 if ((tp->tte_data & TD_WIRED) != 0)
1405                         pm->pm_stats.wired_count--;
1406                 if ((tp->tte_data & TD_REF) != 0)
1407                         vm_page_aflag_set(m, PGA_REFERENCED);
1408                 if ((tp->tte_data & TD_W) != 0)
1409                         vm_page_dirty(m);
1410                 tp->tte_data &= ~TD_V;
1411                 tlb_page_demap(pm, va);
1412                 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1413                 pm->pm_stats.resident_count--;
1414                 pmap_cache_remove(m, va);
1415                 TTE_ZERO(tp);
1416                 PMAP_UNLOCK(pm);
1417         }
1418         vm_page_aflag_clear(m, PGA_WRITEABLE);
1419         rw_wunlock(&tte_list_global_lock);
1420 }
1421
1422 static int
1423 pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1424     vm_offset_t va)
1425 {
1426         u_long data;
1427         vm_page_t m;
1428
1429         PMAP_LOCK_ASSERT(pm, MA_OWNED);
1430         data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
1431         if ((data & (TD_PV | TD_W)) == (TD_PV | TD_W)) {
1432                 m = PHYS_TO_VM_PAGE(TD_PA(data));
1433                 vm_page_dirty(m);
1434         }
1435         return (1);
1436 }
1437
1438 /*
1439  * Set the physical protection on the specified range of this map as requested.
1440  */
1441 void
1442 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1443 {
1444         vm_offset_t va;
1445         struct tte *tp;
1446
1447         CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
1448             pm->pm_context[curcpu], sva, eva, prot);
1449
1450         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1451                 pmap_remove(pm, sva, eva);
1452                 return;
1453         }
1454
1455         if (prot & VM_PROT_WRITE)
1456                 return;
1457
1458         PMAP_LOCK(pm);
1459         if (eva - sva > PMAP_TSB_THRESH) {
1460                 tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
1461                 tlb_context_demap(pm);
1462         } else {
1463                 for (va = sva; va < eva; va += PAGE_SIZE)
1464                         if ((tp = tsb_tte_lookup(pm, va)) != NULL)
1465                                 pmap_protect_tte(pm, NULL, tp, va);
1466                 tlb_range_demap(pm, sva, eva - 1);
1467         }
1468         PMAP_UNLOCK(pm);
1469 }
1470
1471 /*
1472  * Map the given physical page at the specified virtual address in the
1473  * target pmap with the protection requested.  If specified the page
1474  * will be wired down.
1475  */
1476 int
1477 pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1478     u_int flags, int8_t psind)
1479 {
1480         int rv;
1481
1482         rw_wlock(&tte_list_global_lock);
1483         PMAP_LOCK(pm);
1484         rv = pmap_enter_locked(pm, va, m, prot, flags, psind);
1485         rw_wunlock(&tte_list_global_lock);
1486         PMAP_UNLOCK(pm);
1487         return (rv);
1488 }
1489
1490 /*
1491  * Map the given physical page at the specified virtual address in the
1492  * target pmap with the protection requested.  If specified the page
1493  * will be wired down.
1494  *
1495  * The page queues and pmap must be locked.
1496  */
1497 static int
1498 pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1499     u_int flags, int8_t psind __unused)
1500 {
1501         struct tte *tp;
1502         vm_paddr_t pa;
1503         vm_page_t real;
1504         u_long data;
1505         boolean_t wired;
1506
1507         rw_assert(&tte_list_global_lock, RA_WLOCKED);
1508         PMAP_LOCK_ASSERT(pm, MA_OWNED);
1509         if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1510                 VM_OBJECT_ASSERT_LOCKED(m->object);
1511         PMAP_STATS_INC(pmap_nenter);
1512         pa = VM_PAGE_TO_PHYS(m);
1513         wired = (flags & PMAP_ENTER_WIRED) != 0;
1514
1515         /*
1516          * If this is a fake page from the device_pager, but it covers actual
1517          * physical memory, convert to the real backing page.
1518          */
1519         if ((m->flags & PG_FICTITIOUS) != 0) {
1520                 real = vm_phys_paddr_to_vm_page(pa);
1521                 if (real != NULL)
1522                         m = real;
1523         }
1524
1525         CTR6(KTR_PMAP,
1526             "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
1527             pm->pm_context[curcpu], m, va, pa, prot, wired);
1528
1529         /*
1530          * If there is an existing mapping, and the physical address has not
1531          * changed, must be protection or wiring change.
1532          */
1533         if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
1534                 CTR0(KTR_PMAP, "pmap_enter_locked: update");
1535                 PMAP_STATS_INC(pmap_nenter_update);
1536
1537                 /*
1538                  * Wiring change, just update stats.
1539                  */
1540                 if (wired) {
1541                         if ((tp->tte_data & TD_WIRED) == 0) {
1542                                 tp->tte_data |= TD_WIRED;
1543                                 pm->pm_stats.wired_count++;
1544                         }
1545                 } else {
1546                         if ((tp->tte_data & TD_WIRED) != 0) {
1547                                 tp->tte_data &= ~TD_WIRED;
1548                                 pm->pm_stats.wired_count--;
1549                         }
1550                 }
1551
1552                 /*
1553                  * Save the old bits and clear the ones we're interested in.
1554                  */
1555                 data = tp->tte_data;
1556                 tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W);
1557
1558                 /*
1559                  * If we're turning off write permissions, sense modify status.
1560                  */
1561                 if ((prot & VM_PROT_WRITE) != 0) {
1562                         tp->tte_data |= TD_SW;
1563                         if (wired)
1564                                 tp->tte_data |= TD_W;
1565                         if ((m->oflags & VPO_UNMANAGED) == 0)
1566                                 vm_page_aflag_set(m, PGA_WRITEABLE);
1567                 } else if ((data & TD_W) != 0)
1568                         vm_page_dirty(m);
1569
1570                 /*
1571                  * If we're turning on execute permissions, flush the icache.
1572                  */
1573                 if ((prot & VM_PROT_EXECUTE) != 0) {
1574                         if ((data & TD_EXEC) == 0)
1575                                 icache_page_inval(pa);
1576                         tp->tte_data |= TD_EXEC;
1577                 }
1578
1579                 /*
1580                  * Delete the old mapping.
1581                  */
1582                 tlb_page_demap(pm, TTE_GET_VA(tp));
1583         } else {
1584                 /*
1585                  * If there is an existing mapping, but its for a different
1586                  * physical address, delete the old mapping.
1587                  */
1588                 if (tp != NULL) {
1589                         CTR0(KTR_PMAP, "pmap_enter_locked: replace");
1590                         PMAP_STATS_INC(pmap_nenter_replace);
1591                         pmap_remove_tte(pm, NULL, tp, va);
1592                         tlb_page_demap(pm, va);
1593                 } else {
1594                         CTR0(KTR_PMAP, "pmap_enter_locked: new");
1595                         PMAP_STATS_INC(pmap_nenter_new);
1596                 }
1597
1598                 /*
1599                  * Now set up the data and install the new mapping.
1600                  */
1601                 data = TD_V | TD_8K | TD_PA(pa);
1602                 if (pm == kernel_pmap)
1603                         data |= TD_P;
1604                 if ((prot & VM_PROT_WRITE) != 0) {
1605                         data |= TD_SW;
1606                         if ((m->oflags & VPO_UNMANAGED) == 0)
1607                                 vm_page_aflag_set(m, PGA_WRITEABLE);
1608                 }
1609                 if (prot & VM_PROT_EXECUTE) {
1610                         data |= TD_EXEC;
1611                         icache_page_inval(pa);
1612                 }
1613
1614                 /*
1615                  * If its wired update stats.  We also don't need reference or
1616                  * modify tracking for wired mappings, so set the bits now.
1617                  */
1618                 if (wired) {
1619                         pm->pm_stats.wired_count++;
1620                         data |= TD_REF | TD_WIRED;
1621                         if ((prot & VM_PROT_WRITE) != 0)
1622                                 data |= TD_W;
1623                 }
1624
1625                 tsb_tte_enter(pm, m, va, TS_8K, data);
1626         }
1627
1628         return (KERN_SUCCESS);
1629 }
1630
1631 /*
1632  * Maps a sequence of resident pages belonging to the same object.
1633  * The sequence begins with the given page m_start.  This page is
1634  * mapped at the given virtual address start.  Each subsequent page is
1635  * mapped at a virtual address that is offset from start by the same
1636  * amount as the page is offset from m_start within the object.  The
1637  * last page in the sequence is the page with the largest offset from
1638  * m_start that can be mapped at a virtual address less than the given
1639  * virtual address end.  Not every virtual page between start and end
1640  * is mapped; only those for which a resident page exists with the
1641  * corresponding offset from m_start are mapped.
1642  */
1643 void
1644 pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
1645     vm_page_t m_start, vm_prot_t prot)
1646 {
1647         vm_page_t m;
1648         vm_pindex_t diff, psize;
1649
1650         VM_OBJECT_ASSERT_LOCKED(m_start->object);
1651
1652         psize = atop(end - start);
1653         m = m_start;
1654         rw_wlock(&tte_list_global_lock);
1655         PMAP_LOCK(pm);
1656         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1657                 pmap_enter_locked(pm, start + ptoa(diff), m, prot &
1658                     (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
1659                 m = TAILQ_NEXT(m, listq);
1660         }
1661         rw_wunlock(&tte_list_global_lock);
1662         PMAP_UNLOCK(pm);
1663 }
1664
1665 void
1666 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1667 {
1668
1669         rw_wlock(&tte_list_global_lock);
1670         PMAP_LOCK(pm);
1671         pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1672             0, 0);
1673         rw_wunlock(&tte_list_global_lock);
1674         PMAP_UNLOCK(pm);
1675 }
1676
1677 void
1678 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1679     vm_pindex_t pindex, vm_size_t size)
1680 {
1681
1682         VM_OBJECT_ASSERT_WLOCKED(object);
1683         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
1684             ("pmap_object_init_pt: non-device object"));
1685 }
1686
1687 static int
1688 pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp, vm_offset_t va)
1689 {
1690
1691         PMAP_LOCK_ASSERT(pm, MA_OWNED);
1692         if ((tp->tte_data & TD_WIRED) == 0)
1693                 panic("pmap_unwire_tte: tp %p is missing TD_WIRED", tp);
1694         atomic_clear_long(&tp->tte_data, TD_WIRED);
1695         pm->pm_stats.wired_count--;
1696         return (1);
1697 }
1698
1699 /*
1700  * Clear the wired attribute from the mappings for the specified range of
1701  * addresses in the given pmap.  Every valid mapping within that range must
1702  * have the wired attribute set.  In contrast, invalid mappings cannot have
1703  * the wired attribute set, so they are ignored.
1704  *
1705  * The wired attribute of the translation table entry is not a hardware
1706  * feature, so there is no need to invalidate any TLB entries.
1707  */
1708 void
1709 pmap_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1710 {
1711         vm_offset_t va;
1712         struct tte *tp;
1713
1714         PMAP_LOCK(pm);
1715         if (eva - sva > PMAP_TSB_THRESH)
1716                 tsb_foreach(pm, NULL, sva, eva, pmap_unwire_tte);
1717         else {
1718                 for (va = sva; va < eva; va += PAGE_SIZE)
1719                         if ((tp = tsb_tte_lookup(pm, va)) != NULL)
1720                                 pmap_unwire_tte(pm, NULL, tp, va);
1721         }
1722         PMAP_UNLOCK(pm);
1723 }
1724
1725 static int
1726 pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp,
1727     vm_offset_t va)
1728 {
1729         vm_page_t m;
1730         u_long data;
1731
1732         if ((tp->tte_data & TD_FAKE) != 0)
1733                 return (1);
1734         if (tsb_tte_lookup(dst_pmap, va) == NULL) {
1735                 data = tp->tte_data &
1736                     ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
1737                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1738                 tsb_tte_enter(dst_pmap, m, va, TS_8K, data);
1739         }
1740         return (1);
1741 }
1742
1743 void
1744 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1745     vm_size_t len, vm_offset_t src_addr)
1746 {
1747         struct tte *tp;
1748         vm_offset_t va;
1749
1750         if (dst_addr != src_addr)
1751                 return;
1752         rw_wlock(&tte_list_global_lock);
1753         if (dst_pmap < src_pmap) {
1754                 PMAP_LOCK(dst_pmap);
1755                 PMAP_LOCK(src_pmap);
1756         } else {
1757                 PMAP_LOCK(src_pmap);
1758                 PMAP_LOCK(dst_pmap);
1759         }
1760         if (len > PMAP_TSB_THRESH) {
1761                 tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len,
1762                     pmap_copy_tte);
1763                 tlb_context_demap(dst_pmap);
1764         } else {
1765                 for (va = src_addr; va < src_addr + len; va += PAGE_SIZE)
1766                         if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL)
1767                                 pmap_copy_tte(src_pmap, dst_pmap, tp, va);
1768                 tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
1769         }
1770         rw_wunlock(&tte_list_global_lock);
1771         PMAP_UNLOCK(src_pmap);
1772         PMAP_UNLOCK(dst_pmap);
1773 }
1774
1775 void
1776 pmap_zero_page(vm_page_t m)
1777 {
1778         struct tte *tp;
1779         vm_offset_t va;
1780         vm_paddr_t pa;
1781
1782         KASSERT((m->flags & PG_FICTITIOUS) == 0,
1783             ("pmap_zero_page: fake page"));
1784         PMAP_STATS_INC(pmap_nzero_page);
1785         pa = VM_PAGE_TO_PHYS(m);
1786         if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1787                 PMAP_STATS_INC(pmap_nzero_page_c);
1788                 va = TLB_PHYS_TO_DIRECT(pa);
1789                 cpu_block_zero((void *)va, PAGE_SIZE);
1790         } else if (m->md.color == -1) {
1791                 PMAP_STATS_INC(pmap_nzero_page_nc);
1792                 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1793         } else {
1794                 PMAP_STATS_INC(pmap_nzero_page_oc);
1795                 PMAP_LOCK(kernel_pmap);
1796                 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1797                 tp = tsb_kvtotte(va);
1798                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1799                 tp->tte_vpn = TV_VPN(va, TS_8K);
1800                 cpu_block_zero((void *)va, PAGE_SIZE);
1801                 tlb_page_demap(kernel_pmap, va);
1802                 PMAP_UNLOCK(kernel_pmap);
1803         }
1804 }
1805
1806 void
1807 pmap_zero_page_area(vm_page_t m, int off, int size)
1808 {
1809         struct tte *tp;
1810         vm_offset_t va;
1811         vm_paddr_t pa;
1812
1813         KASSERT((m->flags & PG_FICTITIOUS) == 0,
1814             ("pmap_zero_page_area: fake page"));
1815         KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
1816         PMAP_STATS_INC(pmap_nzero_page_area);
1817         pa = VM_PAGE_TO_PHYS(m);
1818         if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
1819                 PMAP_STATS_INC(pmap_nzero_page_area_c);
1820                 va = TLB_PHYS_TO_DIRECT(pa);
1821                 bzero((void *)(va + off), size);
1822         } else if (m->md.color == -1) {
1823                 PMAP_STATS_INC(pmap_nzero_page_area_nc);
1824                 aszero(ASI_PHYS_USE_EC, pa + off, size);
1825         } else {
1826                 PMAP_STATS_INC(pmap_nzero_page_area_oc);
1827                 PMAP_LOCK(kernel_pmap);
1828                 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1829                 tp = tsb_kvtotte(va);
1830                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1831                 tp->tte_vpn = TV_VPN(va, TS_8K);
1832                 bzero((void *)(va + off), size);
1833                 tlb_page_demap(kernel_pmap, va);
1834                 PMAP_UNLOCK(kernel_pmap);
1835         }
1836 }
1837
1838 void
1839 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
1840 {
1841         vm_offset_t vdst;
1842         vm_offset_t vsrc;
1843         vm_paddr_t pdst;
1844         vm_paddr_t psrc;
1845         struct tte *tp;
1846
1847         KASSERT((mdst->flags & PG_FICTITIOUS) == 0,
1848             ("pmap_copy_page: fake dst page"));
1849         KASSERT((msrc->flags & PG_FICTITIOUS) == 0,
1850             ("pmap_copy_page: fake src page"));
1851         PMAP_STATS_INC(pmap_ncopy_page);
1852         pdst = VM_PAGE_TO_PHYS(mdst);
1853         psrc = VM_PAGE_TO_PHYS(msrc);
1854         if (dcache_color_ignore != 0 ||
1855             (msrc->md.color == DCACHE_COLOR(psrc) &&
1856             mdst->md.color == DCACHE_COLOR(pdst))) {
1857                 PMAP_STATS_INC(pmap_ncopy_page_c);
1858                 vdst = TLB_PHYS_TO_DIRECT(pdst);
1859                 vsrc = TLB_PHYS_TO_DIRECT(psrc);
1860                 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1861         } else if (msrc->md.color == -1 && mdst->md.color == -1) {
1862                 PMAP_STATS_INC(pmap_ncopy_page_nc);
1863                 ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE);
1864         } else if (msrc->md.color == -1) {
1865                 if (mdst->md.color == DCACHE_COLOR(pdst)) {
1866                         PMAP_STATS_INC(pmap_ncopy_page_dc);
1867                         vdst = TLB_PHYS_TO_DIRECT(pdst);
1868                         ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1869                             PAGE_SIZE);
1870                 } else {
1871                         PMAP_STATS_INC(pmap_ncopy_page_doc);
1872                         PMAP_LOCK(kernel_pmap);
1873                         vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1874                         tp = tsb_kvtotte(vdst);
1875                         tp->tte_data =
1876                             TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1877                         tp->tte_vpn = TV_VPN(vdst, TS_8K);
1878                         ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1879                             PAGE_SIZE);
1880                         tlb_page_demap(kernel_pmap, vdst);
1881                         PMAP_UNLOCK(kernel_pmap);
1882                 }
1883         } else if (mdst->md.color == -1) {
1884                 if (msrc->md.color == DCACHE_COLOR(psrc)) {
1885                         PMAP_STATS_INC(pmap_ncopy_page_sc);
1886                         vsrc = TLB_PHYS_TO_DIRECT(psrc);
1887                         ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1888                             PAGE_SIZE);
1889                 } else {
1890                         PMAP_STATS_INC(pmap_ncopy_page_soc);
1891                         PMAP_LOCK(kernel_pmap);
1892                         vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE);
1893                         tp = tsb_kvtotte(vsrc);
1894                         tp->tte_data =
1895                             TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1896                         tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1897                         ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1898                             PAGE_SIZE);
1899                         tlb_page_demap(kernel_pmap, vsrc);
1900                         PMAP_UNLOCK(kernel_pmap);
1901                 }
1902         } else {
1903                 PMAP_STATS_INC(pmap_ncopy_page_oc);
1904                 PMAP_LOCK(kernel_pmap);
1905                 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1906                 tp = tsb_kvtotte(vdst);
1907                 tp->tte_data =
1908                     TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1909                 tp->tte_vpn = TV_VPN(vdst, TS_8K);
1910                 vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE);
1911                 tp = tsb_kvtotte(vsrc);
1912                 tp->tte_data =
1913                     TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1914                 tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1915                 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1916                 tlb_page_demap(kernel_pmap, vdst);
1917                 tlb_page_demap(kernel_pmap, vsrc);
1918                 PMAP_UNLOCK(kernel_pmap);
1919         }
1920 }
1921
1922 vm_offset_t
1923 pmap_quick_enter_page(vm_page_t m)
1924 {
1925         vm_paddr_t pa;
1926         vm_offset_t qaddr;
1927         struct tte *tp;
1928
1929         pa = VM_PAGE_TO_PHYS(m);
1930         if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa))
1931                 return (TLB_PHYS_TO_DIRECT(pa));
1932
1933         critical_enter();
1934         qaddr = PCPU_GET(qmap_addr);
1935         qaddr += (PAGE_SIZE * ((DCACHE_COLORS + DCACHE_COLOR(pa) -
1936             DCACHE_COLOR(qaddr)) % DCACHE_COLORS));
1937         tp = tsb_kvtotte(qaddr);
1938
1939         KASSERT(tp->tte_data == 0, ("pmap_quick_enter_page: PTE busy"));
1940         
1941         tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1942         tp->tte_vpn = TV_VPN(qaddr, TS_8K);
1943
1944         return (qaddr);
1945 }
1946
1947 void
1948 pmap_quick_remove_page(vm_offset_t addr)
1949 {
1950         vm_offset_t qaddr;
1951         struct tte *tp;
1952
1953         if (addr >= VM_MIN_DIRECT_ADDRESS)
1954                 return;
1955
1956         tp = tsb_kvtotte(addr);
1957         qaddr = PCPU_GET(qmap_addr);
1958         
1959         KASSERT((addr >= qaddr) && (addr < (qaddr + (PAGE_SIZE * DCACHE_COLORS))),
1960             ("pmap_quick_remove_page: invalid address"));
1961         KASSERT(tp->tte_data != 0, ("pmap_quick_remove_page: PTE not in use"));
1962         
1963         stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_DMMU_DEMAP, 0);
1964         stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0);
1965         flush(KERNBASE);
1966         TTE_ZERO(tp);
1967         critical_exit();
1968 }
1969
1970 int unmapped_buf_allowed;
1971
1972 void
1973 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
1974     vm_offset_t b_offset, int xfersize)
1975 {
1976
1977         panic("pmap_copy_pages: not implemented");
1978 }
1979
1980 /*
1981  * Returns true if the pmap's pv is one of the first
1982  * 16 pvs linked to from this page.  This count may
1983  * be changed upwards or downwards in the future; it
1984  * is only necessary that true be returned for a small
1985  * subset of pmaps for proper page aging.
1986  */
1987 boolean_t
1988 pmap_page_exists_quick(pmap_t pm, vm_page_t m)
1989 {
1990         struct tte *tp;
1991         int loops;
1992         boolean_t rv;
1993
1994         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1995             ("pmap_page_exists_quick: page %p is not managed", m));
1996         loops = 0;
1997         rv = FALSE;
1998         rw_wlock(&tte_list_global_lock);
1999         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2000                 if ((tp->tte_data & TD_PV) == 0)
2001                         continue;
2002                 if (TTE_GET_PMAP(tp) == pm) {
2003                         rv = TRUE;
2004                         break;
2005                 }
2006                 if (++loops >= 16)
2007                         break;
2008         }
2009         rw_wunlock(&tte_list_global_lock);
2010         return (rv);
2011 }
2012
2013 /*
2014  * Return the number of managed mappings to the given physical page
2015  * that are wired.
2016  */
2017 int
2018 pmap_page_wired_mappings(vm_page_t m)
2019 {
2020         struct tte *tp;
2021         int count;
2022
2023         count = 0;
2024         if ((m->oflags & VPO_UNMANAGED) != 0)
2025                 return (count);
2026         rw_wlock(&tte_list_global_lock);
2027         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
2028                 if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED))
2029                         count++;
2030         rw_wunlock(&tte_list_global_lock);
2031         return (count);
2032 }
2033
2034 /*
2035  * Remove all pages from specified address space, this aids process exit
2036  * speeds.  This is much faster than pmap_remove in the case of running down
2037  * an entire address space.  Only works for the current pmap.
2038  */
2039 void
2040 pmap_remove_pages(pmap_t pm)
2041 {
2042
2043 }
2044
2045 /*
2046  * Returns TRUE if the given page has a managed mapping.
2047  */
2048 boolean_t
2049 pmap_page_is_mapped(vm_page_t m)
2050 {
2051         struct tte *tp;
2052         boolean_t rv;
2053
2054         rv = FALSE;
2055         if ((m->oflags & VPO_UNMANAGED) != 0)
2056                 return (rv);
2057         rw_wlock(&tte_list_global_lock);
2058         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link)
2059                 if ((tp->tte_data & TD_PV) != 0) {
2060                         rv = TRUE;
2061                         break;
2062                 }
2063         rw_wunlock(&tte_list_global_lock);
2064         return (rv);
2065 }
2066
2067 /*
2068  * Return a count of reference bits for a page, clearing those bits.
2069  * It is not necessary for every reference bit to be cleared, but it
2070  * is necessary that 0 only be returned when there are truly no
2071  * reference bits set.
2072  *
2073  * As an optimization, update the page's dirty field if a modified bit is
2074  * found while counting reference bits.  This opportunistic update can be
2075  * performed at low cost and can eliminate the need for some future calls
2076  * to pmap_is_modified().  However, since this function stops after
2077  * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
2078  * dirty pages.  Those dirty pages will only be detected by a future call
2079  * to pmap_is_modified().
2080  */
2081 int
2082 pmap_ts_referenced(vm_page_t m)
2083 {
2084         struct tte *tpf;
2085         struct tte *tpn;
2086         struct tte *tp;
2087         u_long data;
2088         int count;
2089
2090         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2091             ("pmap_ts_referenced: page %p is not managed", m));
2092         count = 0;
2093         rw_wlock(&tte_list_global_lock);
2094         if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
2095                 tpf = tp;
2096                 do {
2097                         tpn = TAILQ_NEXT(tp, tte_link);
2098                         TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
2099                         TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
2100                         if ((tp->tte_data & TD_PV) == 0)
2101                                 continue;
2102                         data = atomic_clear_long(&tp->tte_data, TD_REF);
2103                         if ((data & TD_W) != 0)
2104                                 vm_page_dirty(m);
2105                         if ((data & TD_REF) != 0 && ++count >=
2106                             PMAP_TS_REFERENCED_MAX)
2107                                 break;
2108                 } while ((tp = tpn) != NULL && tp != tpf);
2109         }
2110         rw_wunlock(&tte_list_global_lock);
2111         return (count);
2112 }
2113
2114 boolean_t
2115 pmap_is_modified(vm_page_t m)
2116 {
2117         struct tte *tp;
2118         boolean_t rv;
2119
2120         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2121             ("pmap_is_modified: page %p is not managed", m));
2122         rv = FALSE;
2123
2124         /*
2125          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2126          * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
2127          * is clear, no TTEs can have TD_W set.
2128          */
2129         VM_OBJECT_ASSERT_WLOCKED(m->object);
2130         if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2131                 return (rv);
2132         rw_wlock(&tte_list_global_lock);
2133         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2134                 if ((tp->tte_data & TD_PV) == 0)
2135                         continue;
2136                 if ((tp->tte_data & TD_W) != 0) {
2137                         rv = TRUE;
2138                         break;
2139                 }
2140         }
2141         rw_wunlock(&tte_list_global_lock);
2142         return (rv);
2143 }
2144
2145 /*
2146  *      pmap_is_prefaultable:
2147  *
2148  *      Return whether or not the specified virtual address is elgible
2149  *      for prefault.
2150  */
2151 boolean_t
2152 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2153 {
2154         boolean_t rv;
2155
2156         PMAP_LOCK(pmap);
2157         rv = tsb_tte_lookup(pmap, addr) == NULL;
2158         PMAP_UNLOCK(pmap);
2159         return (rv);
2160 }
2161
2162 /*
2163  * Return whether or not the specified physical page was referenced
2164  * in any physical maps.
2165  */
2166 boolean_t
2167 pmap_is_referenced(vm_page_t m)
2168 {
2169         struct tte *tp;
2170         boolean_t rv;
2171
2172         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2173             ("pmap_is_referenced: page %p is not managed", m));
2174         rv = FALSE;
2175         rw_wlock(&tte_list_global_lock);
2176         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2177                 if ((tp->tte_data & TD_PV) == 0)
2178                         continue;
2179                 if ((tp->tte_data & TD_REF) != 0) {
2180                         rv = TRUE;
2181                         break;
2182                 }
2183         }
2184         rw_wunlock(&tte_list_global_lock);
2185         return (rv);
2186 }
2187
2188 /*
2189  * This function is advisory.
2190  */
2191 void
2192 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
2193 {
2194 }
2195
2196 void
2197 pmap_clear_modify(vm_page_t m)
2198 {
2199         struct tte *tp;
2200         u_long data;
2201
2202         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2203             ("pmap_clear_modify: page %p is not managed", m));
2204         VM_OBJECT_ASSERT_WLOCKED(m->object);
2205         KASSERT(!vm_page_xbusied(m),
2206             ("pmap_clear_modify: page %p is exclusive busied", m));
2207
2208         /*
2209          * If the page is not PGA_WRITEABLE, then no TTEs can have TD_W set.
2210          * If the object containing the page is locked and the page is not
2211          * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
2212          */
2213         if ((m->aflags & PGA_WRITEABLE) == 0)
2214                 return;
2215         rw_wlock(&tte_list_global_lock);
2216         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2217                 if ((tp->tte_data & TD_PV) == 0)
2218                         continue;
2219                 data = atomic_clear_long(&tp->tte_data, TD_W);
2220                 if ((data & TD_W) != 0)
2221                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
2222         }
2223         rw_wunlock(&tte_list_global_lock);
2224 }
2225
2226 void
2227 pmap_remove_write(vm_page_t m)
2228 {
2229         struct tte *tp;
2230         u_long data;
2231
2232         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2233             ("pmap_remove_write: page %p is not managed", m));
2234
2235         /*
2236          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2237          * set by another thread while the object is locked.  Thus,
2238          * if PGA_WRITEABLE is clear, no page table entries need updating.
2239          */
2240         VM_OBJECT_ASSERT_WLOCKED(m->object);
2241         if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2242                 return;
2243         rw_wlock(&tte_list_global_lock);
2244         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
2245                 if ((tp->tte_data & TD_PV) == 0)
2246                         continue;
2247                 data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
2248                 if ((data & TD_W) != 0) {
2249                         vm_page_dirty(m);
2250                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
2251                 }
2252         }
2253         vm_page_aflag_clear(m, PGA_WRITEABLE);
2254         rw_wunlock(&tte_list_global_lock);
2255 }
2256
2257 int
2258 pmap_mincore(pmap_t pm, vm_offset_t addr, vm_paddr_t *locked_pa)
2259 {
2260
2261         /* TODO; */
2262         return (0);
2263 }
2264
2265 /*
2266  * Activate a user pmap.  The pmap must be activated before its address space
2267  * can be accessed in any way.
2268  */
2269 void
2270 pmap_activate(struct thread *td)
2271 {
2272         struct vmspace *vm;
2273         struct pmap *pm;
2274         int context;
2275
2276         critical_enter();
2277         vm = td->td_proc->p_vmspace;
2278         pm = vmspace_pmap(vm);
2279
2280         context = PCPU_GET(tlb_ctx);
2281         if (context == PCPU_GET(tlb_ctx_max)) {
2282                 tlb_flush_user();
2283                 context = PCPU_GET(tlb_ctx_min);
2284         }
2285         PCPU_SET(tlb_ctx, context + 1);
2286
2287         pm->pm_context[curcpu] = context;
2288 #ifdef SMP
2289         CPU_SET_ATOMIC(PCPU_GET(cpuid), &pm->pm_active);
2290         atomic_store_acq_ptr((uintptr_t *)PCPU_PTR(pmap), (uintptr_t)pm);
2291 #else
2292         CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
2293         PCPU_SET(pmap, pm);
2294 #endif
2295
2296         stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
2297         stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
2298         stxa(AA_DMMU_PCXR, ASI_DMMU, (ldxa(AA_DMMU_PCXR, ASI_DMMU) &
2299             TLB_CXR_PGSZ_MASK) | context);
2300         flush(KERNBASE);
2301         critical_exit();
2302 }
2303
2304 void
2305 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2306 {
2307
2308 }
2309
2310 /*
2311  * Increase the starting virtual address of the given mapping if a
2312  * different alignment might result in more superpage mappings.
2313  */
2314 void
2315 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
2316     vm_offset_t *addr, vm_size_t size)
2317 {
2318
2319 }
2320
2321 boolean_t
2322 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
2323 {
2324
2325         return (mode == VM_MEMATTR_DEFAULT);
2326 }