]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/sun4v/sun4v/pmap.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / sun4v / sun4v / pmap.c
1 /*-
2  * Copyright (c) 2006 Kip Macy <kmacy@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_kstack_pages.h"
32 #include "opt_msgbuf.h"
33 #include "opt_pmap.h"
34 #include "opt_trap_trace.h"
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/msgbuf.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/smp.h>
45 #include <sys/sched.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
48 #include <sys/vmmeter.h>
49
50 #include <dev/ofw/openfirm.h>
51
52 #include <vm/vm.h> 
53 #include <vm/vm_page.h>
54 #include <vm/vm_param.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_extern.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vm_phys.h>
62 #include <vm/uma.h>
63
64 #include <machine/cpu.h>
65 #include <machine/frame.h>
66 #include <machine/instr.h>
67 #include <machine/md_var.h>
68 #include <machine/metadata.h>
69 #include <machine/ofw_mem.h>
70 #include <machine/mmu.h>
71 #include <machine/smp.h>
72 #include <machine/tlb.h>
73 #include <machine/tte.h>
74 #include <machine/tte_hash.h>
75 #include <machine/pcb.h>
76 #include <machine/pstate.h>
77 #include <machine/tsb.h>
78
79 #include <machine/hypervisorvar.h>
80 #include <machine/hv_api.h>
81
82 #ifdef TRAP_TRACING
83 void trap_trace_report(int);
84 #endif
85
86 #if 1
87 #define PMAP_DEBUG
88 #endif
89 #ifndef PMAP_SHPGPERPROC
90 #define PMAP_SHPGPERPROC        200
91 #endif
92
93 /*
94  * Virtual and physical address of message buffer.
95  */
96 struct msgbuf *msgbufp;
97 vm_paddr_t msgbuf_phys;
98
99 /*
100  * Map of physical memory reagions.
101  */
102 vm_paddr_t phys_avail[128];
103 vm_paddr_t phys_avail_tmp[128];
104 static struct ofw_mem_region mra[128];
105 static struct ofw_map translations[128];
106 static int translations_size;
107
108
109 struct ofw_mem_region sparc64_memreg[128];
110 int sparc64_nmemreg;
111
112 extern vm_paddr_t mmu_fault_status_area;
113
114 /*
115  * First and last available kernel virtual addresses.
116  */
117 vm_offset_t virtual_avail;
118 vm_offset_t virtual_end;
119 vm_offset_t kernel_vm_end;
120 vm_offset_t vm_max_kernel_address;
121
122 #ifndef PMAP_SHPGPERPROC
123 #define PMAP_SHPGPERPROC 200
124 #endif
125 /*
126  * Data for the pv entry allocation mechanism
127  */
128 static uma_zone_t pvzone;
129 static struct vm_object pvzone_obj;
130 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
131 int pmap_debug = 0;
132 static int pmap_debug_range = 1;
133 static int use_256M_pages = 1;
134
135 static struct mtx pmap_ctx_lock;
136 static uint16_t ctx_stack[PMAP_CONTEXT_MAX];
137 static int ctx_stack_top; 
138
139 static int permanent_mappings = 0;
140 static uint64_t nucleus_memory;
141 static uint64_t nucleus_mappings[4];
142 /*
143  * Kernel pmap.
144  */
145 struct pmap kernel_pmap_store;
146
147 hv_tsb_info_t kernel_td[MAX_TSB_INFO];
148
149 /*
150  * This should be determined at boot time
151  * with tiny TLBS it doesn't make sense to try and selectively
152  * invalidate more than this 
153  */
154 #define MAX_INVALIDATES   32
155 #define MAX_TSB_CLEARS   128
156
157 /*
158  * Allocate physical memory for use in pmap_bootstrap.
159  */
160 static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size);
161
162 /*
163  * If user pmap is processed with pmap_remove and with pmap_remove and the
164  * resident count drops to 0, there are no more pages to remove, so we
165  * need not continue.
166  */
167 #define PMAP_REMOVE_DONE(pm) \
168         ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
169
170 /*
171  * Kernel MMU interface
172  */
173 #define curthread_pmap vmspace_pmap(curthread->td_proc->p_vmspace) 
174
175 #ifdef PMAP_DEBUG
176 #define KDPRINTF if (pmap_debug) printf
177 #define DPRINTF \
178         if (curthread_pmap && (curthread_pmap->pm_context != 0) && ((PCPU_GET(cpumask) & curthread_pmap->pm_active) == 0)) \
179         panic("cpumask(0x%x) & active (0x%x) == 0 pid == %d\n",  \
180               PCPU_GET(cpumask), curthread_pmap->pm_active, curthread->td_proc->p_pid); \
181 if (pmap_debug) printf
182
183
184 #else
185 #define DPRINTF(...)
186 #define KDPRINTF(...)
187 #endif
188
189
190 static void free_pv_entry(pv_entry_t pv);
191 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
192
193 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
194 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
195 static void pmap_remove_tte(pmap_t pmap, tte_t tte_data, vm_offset_t va);
196 static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot);
197 static void pmap_tsb_reset(pmap_t pmap);
198 static void pmap_tsb_resize(pmap_t pmap);
199 static void pmap_tte_hash_resize(pmap_t pmap);
200
201 void pmap_set_ctx_panic(uint64_t error, vm_paddr_t tsb_ra, pmap_t pmap);
202
203 struct tsb_resize_info {
204         uint64_t tri_tsbscratch;
205         uint64_t tri_tsb_ra;
206 };
207
208 /*
209  * Quick sort callout for comparing memory regions.
210  */
211 static int mr_cmp(const void *a, const void *b);
212 static int om_cmp(const void *a, const void *b);
213 static int
214 mr_cmp(const void *a, const void *b)
215 {
216         const struct ofw_mem_region *mra;
217         const struct ofw_mem_region *mrb;
218
219         mra = a;
220         mrb = b;
221         if (mra->mr_start < mrb->mr_start)
222                 return (-1);
223         else if (mra->mr_start > mrb->mr_start)
224                 return (1);
225         else
226                 return (0);
227 }
228 static int
229 om_cmp(const void *a, const void *b)
230 {
231         const struct ofw_map *oma;
232         const struct ofw_map *omb;
233
234         oma = a;
235         omb = b;
236         if (oma->om_start < omb->om_start)
237                 return (-1);
238         else if (oma->om_start > omb->om_start)
239                 return (1);
240         else
241                 return (0);
242 }
243
244 static __inline void
245 free_context(uint16_t ctx)
246 {
247         mtx_lock_spin(&pmap_ctx_lock);
248         ctx_stack[ctx_stack_top++] = ctx;
249         mtx_unlock_spin(&pmap_ctx_lock);
250
251         KASSERT(ctx_stack_top < PMAP_CONTEXT_MAX, 
252                 ("context stack overrun - system error"));
253 }
254
255 static __inline uint16_t
256 get_context(void)
257 {
258         uint16_t ctx;
259
260         mtx_lock_spin(&pmap_ctx_lock);
261         ctx = ctx_stack[--ctx_stack_top];
262         mtx_unlock_spin(&pmap_ctx_lock);
263
264         KASSERT(ctx_stack_top > 0,
265                 ("context stack underrun - need to implement context stealing"));
266
267         return ctx;
268 }
269
270 static __inline void
271 free_pv_entry(pv_entry_t pv)
272 {
273         pv_entry_count--;
274         uma_zfree(pvzone, pv);
275 }
276
277 /*
278  * get a new pv_entry, allocating a block from the system
279  * when needed.
280  */
281 static pv_entry_t
282 get_pv_entry(pmap_t locked_pmap)
283 {
284         static const struct timeval printinterval = { 60, 0 };
285         static struct timeval lastprint;
286         struct vpgqueues *vpq;
287         uint64_t tte_data;
288         pmap_t pmap;
289         pv_entry_t allocated_pv, next_pv, pv;
290         vm_offset_t va;
291         vm_page_t m;
292
293         PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
294         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
295         allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
296         if (allocated_pv != NULL) {
297                 pv_entry_count++;
298                 if (pv_entry_count > pv_entry_high_water)
299                         pagedaemon_wakeup();
300                 else
301                         return (allocated_pv);
302         }
303
304         /*
305          * Reclaim pv entries: At first, destroy mappings to inactive
306          * pages.  After that, if a pv entry is still needed, destroy
307          * mappings to active pages.
308          */
309         if (ratecheck(&lastprint, &printinterval))
310                 printf("Approaching the limit on PV entries, "
311                     "increase the vm.pmap.shpgperproc tunable.\n");
312
313         vpq = &vm_page_queues[PQ_INACTIVE];
314 retry:
315         TAILQ_FOREACH(m, &vpq->pl, pageq) {
316                 if (m->hold_count || m->busy)
317                         continue;
318                 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
319                         va = pv->pv_va;
320                         pmap = pv->pv_pmap;
321                         /* Avoid deadlock and lock recursion. */
322                         if (pmap > locked_pmap)
323                                 PMAP_LOCK(pmap);
324                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
325                                 continue;
326                         pmap->pm_stats.resident_count--;
327
328                         tte_data = tte_hash_delete(pmap->pm_hash, va);
329
330                         KASSERT((tte_data & VTD_WIRED) == 0,
331                             ("get_pv_entry: wired pte %#jx", (uintmax_t)tte_data));
332                         if (tte_data & VTD_REF)
333                                 vm_page_flag_set(m, PG_REFERENCED);
334                         if (tte_data & VTD_W) {
335                                 KASSERT((tte_data & VTD_SW_W),
336                                 ("get_pv_entry: modified page not writable: va: %lx, tte: %lx",
337                                     va, tte_data));
338                                 vm_page_dirty(m);
339                         }
340
341                         pmap_invalidate_page(pmap, va, TRUE);
342                         TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
343                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
344                         if (TAILQ_EMPTY(&m->md.pv_list))
345                                 vm_page_flag_clear(m, PG_WRITEABLE);
346                         m->md.pv_list_count--;
347
348                         if (pmap != locked_pmap)
349                                 PMAP_UNLOCK(pmap);
350                         if (allocated_pv == NULL)
351                                 allocated_pv = pv;
352                         else
353                                 free_pv_entry(pv);
354                 }
355         }
356         if (allocated_pv == NULL) {
357                 if (vpq == &vm_page_queues[PQ_INACTIVE]) {
358                         vpq = &vm_page_queues[PQ_ACTIVE];
359                         goto retry;
360                 }
361                 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
362         }
363         return (allocated_pv);
364 }
365
366 /*
367  * Allocate a physical page of memory directly from the phys_avail map.
368  * Can only be called from pmap_bootstrap before avail start and end are
369  * calculated.
370  */
371 static vm_paddr_t
372 pmap_bootstrap_alloc(vm_size_t size)
373 {
374         vm_paddr_t pa;
375         int i;
376
377         size = round_page(size);
378
379         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
380                 if (phys_avail[i + 1] - phys_avail[i] < size)
381                         continue;
382                 pa = phys_avail[i];
383                 phys_avail[i] += size;
384                 pmap_scrub_pages(pa, size);
385                 return (pa);
386         }
387         panic("pmap_bootstrap_alloc");
388 }
389
390 /*
391  * Activate a user pmap.  The pmap must be activated before its address space
392  * can be accessed in any way.
393  */
394 void
395 pmap_activate(struct thread *td)
396 {
397         pmap_t pmap, oldpmap;
398         int err;
399         
400         critical_enter();
401         pmap = vmspace_pmap(td->td_proc->p_vmspace);
402         oldpmap = PCPU_GET(curpmap);
403 #if defined(SMP)
404         atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
405         atomic_set_int(&pmap->pm_tlbactive, PCPU_GET(cpumask));
406         atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
407 #else
408         oldpmap->pm_active &= ~1;
409         pmap->pm_active |= 1;
410         pmap->pm_tlbactive |= 1;
411 #endif
412
413         pmap->pm_hashscratch = tte_hash_set_scratchpad_user(pmap->pm_hash, pmap->pm_context);
414         pmap->pm_tsbscratch = tsb_set_scratchpad_user(&pmap->pm_tsb);
415         pmap->pm_tsb_miss_count = pmap->pm_tsb_cap_miss_count = 0;
416
417         PCPU_SET(curpmap, pmap);
418         if (pmap->pm_context != 0)
419                 if ((err = hv_mmu_tsb_ctxnon0(1, pmap->pm_tsb_ra)) != H_EOK)
420                         panic("failed to set TSB 0x%lx - context == %ld\n", 
421                               pmap->pm_tsb_ra, pmap->pm_context);
422         stxa(MMU_CID_S, ASI_MMU_CONTEXTID, pmap->pm_context);
423         membar(Sync);
424         critical_exit();
425 }
426
427 vm_offset_t 
428 pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
429 {
430         return (va);
431 }
432
433 /*
434  *      Increase the starting virtual address of the given mapping if a
435  *      different alignment might result in more superpage mappings.
436  */
437 void
438 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
439     vm_offset_t *addr, vm_size_t size)
440 {
441 }
442
443 /*
444  * Bootstrap the system enough to run with virtual memory.
445  */
446 void
447 pmap_bootstrap(vm_offset_t ekva)
448 {
449         struct pmap *pm;
450         vm_offset_t off, va;
451         vm_paddr_t pa, tsb_8k_pa, tsb_4m_pa, kernel_hash_pa, nucleus_memory_start;
452         vm_size_t physsz, virtsz, kernel_hash_shift;
453         ihandle_t pmem, vmem;
454         int i, j, k, sz;
455         uint64_t tsb_8k_size, tsb_4m_size, error, physmem_tunable, physmemstart_tunable;
456         vm_paddr_t real_phys_avail[128], tmp_phys_avail[128], bounds;
457         
458
459         if ((vmem = OF_finddevice("/virtual-memory")) == -1)
460                 panic("pmap_bootstrap: finddevice /virtual-memory");
461         if ((sz = OF_getproplen(vmem, "translations")) == -1)
462                 panic("pmap_bootstrap: getproplen translations");
463         if (sizeof(translations) < sz)
464                 panic("pmap_bootstrap: translations too small");
465         bzero(translations, sz);
466         if (OF_getprop(vmem, "translations", translations, sz) == -1)
467                 panic("pmap_bootstrap: getprop /virtual-memory/translations");
468         sz /= sizeof(*translations);
469         translations_size = sz;
470         nucleus_memory_start = 0;
471         CTR0(KTR_PMAP, "pmap_bootstrap: translations");
472         qsort(translations, sz, sizeof (*translations), om_cmp);
473
474         for (i = 0; i < sz; i++) {
475                 KDPRINTF("om_size=%ld om_start=%lx om_tte=%lx\n", 
476                         translations[i].om_size, translations[i].om_start, 
477                         translations[i].om_tte);
478                 if ((translations[i].om_start >= KERNBASE) && 
479                     (translations[i].om_start <= KERNBASE + 3*PAGE_SIZE_4M)) {
480                         for (j = 0; j < translations[i].om_size; j += PAGE_SIZE_4M) {
481                                 KDPRINTF("mapping permanent translation\n");
482                                 pa = TTE_GET_PA(translations[i].om_tte) + j;
483                                 va = translations[i].om_start + j;
484                                 error = hv_mmu_map_perm_addr(va, KCONTEXT, 
485                                                              pa | TTE_KERNEL | VTD_4M, MAP_ITLB | MAP_DTLB);
486                                 if (error != H_EOK)
487                                         panic("map_perm_addr returned error=%ld", error);
488                                 
489                                 if ((nucleus_memory_start == 0) || (pa < nucleus_memory_start))
490                                         nucleus_memory_start = pa;
491                                 printf("nucleus_mappings[%d] = 0x%lx\n", permanent_mappings, pa);
492                                 nucleus_mappings[permanent_mappings++] = pa;
493                                 nucleus_memory += PAGE_SIZE_4M;
494 #ifdef SMP
495                                 mp_add_nucleus_mapping(va, pa|TTE_KERNEL|VTD_4M);
496 #endif
497                         }
498                 }  
499         }
500
501         /*
502          * Find out what physical memory is available from the prom and
503          * initialize the phys_avail array.  This must be done before
504          * pmap_bootstrap_alloc is called.
505          */
506         if ((pmem = OF_finddevice("/memory")) == -1)
507                 panic("pmap_bootstrap: finddevice /memory");
508         if ((sz = OF_getproplen(pmem, "available")) == -1)
509                 panic("pmap_bootstrap: getproplen /memory/available");
510         if (sizeof(vm_paddr_t)*128 < sz) /* FIXME */
511                 panic("pmap_bootstrap: phys_avail too small");
512         if (sizeof(mra) < sz)
513                 panic("pmap_bootstrap: mra too small");
514         bzero(mra, sz);
515         if (OF_getprop(pmem, "available", mra, sz) == -1)
516                 panic("pmap_bootstrap: getprop /memory/available");
517
518         sz /= sizeof(*mra);
519         CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
520
521         qsort(mra, sz, sizeof (*mra), mr_cmp);
522         physmemstart_tunable = physmem_tunable = physmem = physsz = 0;
523         
524         if (TUNABLE_ULONG_FETCH("hw.physmemstart", &physmemstart_tunable)) {
525                 KDPRINTF("desired physmemstart=0x%lx\n", physmemstart_tunable);
526         }
527         if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) {
528                 physmem = atop(physmem_tunable);
529                 KDPRINTF("desired physmem=0x%lx\n", physmem_tunable);
530         }
531         if ((physmem_tunable != 0) && (physmemstart_tunable != 0))
532                 physmem_tunable += physmemstart_tunable;
533         
534         bzero(real_phys_avail, sizeof(real_phys_avail));
535         bzero(tmp_phys_avail, sizeof(tmp_phys_avail));
536
537         for (i = 0, j = 0; i < sz; i++) {
538                 uint64_t size;
539                 KDPRINTF("start=%#lx size=%#lx\n", mra[i].mr_start, mra[i].mr_size);
540                 if (mra[i].mr_size < PAGE_SIZE_4M)
541                         continue;
542
543                 if ((mra[i].mr_start & PAGE_MASK_4M) || (mra[i].mr_size & PAGE_MASK_4M)) {
544                         uint64_t newstart, roundup;
545                         newstart = ((mra[i].mr_start + (PAGE_MASK_4M)) & ~PAGE_MASK_4M);
546                         roundup = newstart - mra[i].mr_start;
547                         size = (mra[i].mr_size - roundup) & ~PAGE_MASK_4M;
548                         mra[i].mr_start = newstart;
549                         if (size < PAGE_SIZE_4M)
550                                 continue;
551                         mra[i].mr_size = size;
552                 }
553                 real_phys_avail[j] = mra[i].mr_start;
554                 if (physmem_tunable != 0 && ((physsz + mra[i].mr_size) >= physmem_tunable)) {
555                         mra[i].mr_size = physmem_tunable - physsz;
556                         physsz = physmem_tunable;
557                         real_phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
558                         break;
559                 }
560                 physsz += mra[i].mr_size;
561                 real_phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
562                 j += 2;
563         }
564         physmem = btoc(physsz - physmemstart_tunable);
565
566         /*
567          * This is needed for versions of OFW that would allocate us memory
568          * and then forget to remove it from the available ranges ...
569          * as well as for compensating for the above move of nucleus pages
570          */
571         for (i = 0, j = 0, bounds = (1UL<<32); real_phys_avail[i] != 0; i += 2) {
572                 vm_paddr_t start = real_phys_avail[i];
573                 uint64_t end = real_phys_avail[i + 1];
574                 CTR2(KTR_PMAP, "start=%#lx size=%#lx\n", start, end);
575                 KDPRINTF("real_phys start=%#lx end=%#lx\n", start, end);
576                 /* 
577                  * Is kernel memory at the beginning of range?
578                  */
579                 if (nucleus_memory_start == start) {
580                         start += nucleus_memory;
581                 }
582                 /* 
583                  * Is kernel memory at the end of range?
584                  */
585                 if (nucleus_memory_start == (end - nucleus_memory)) 
586                         end -= nucleus_memory;
587
588                 if (physmemstart_tunable != 0 && 
589                     (end < physmemstart_tunable))
590                         continue;
591
592                 if (physmemstart_tunable != 0 && 
593                     ((start < physmemstart_tunable))) {
594                         start = physmemstart_tunable;
595                 }
596
597                 /* 
598                  * Is kernel memory in the middle somewhere?             
599                  */
600                 if ((nucleus_memory_start > start) && 
601                     (nucleus_memory_start < end)) {
602                         phys_avail[j] = start;
603                         phys_avail[j+1] = nucleus_memory_start;
604                         start =  nucleus_memory_start + nucleus_memory;
605                         j += 2;
606                 }
607                 /*
608                  * Break phys_avail up on 4GB boundaries to try
609                  * to work around PCI-e allocation bug
610                  * we rely on the fact that kernel memory is allocated 
611                  * from the first 4GB of physical memory
612                  */ 
613                 while (bounds < start)
614                         bounds += (1UL<<32);
615
616                 while (bounds < end) {
617                         phys_avail[j] = start;
618                         phys_avail[j + 1] = bounds;
619                         start = bounds;
620                         bounds += (1UL<<32);
621                         j += 2;
622                 }
623                 phys_avail[j] = start; 
624                 phys_avail[j + 1] = end;
625                 j += 2;
626         }
627
628         /*
629          * Merge nucleus memory in to real_phys_avail
630          *
631          */
632         for (i = 0; real_phys_avail[i] != 0; i += 2) {
633                 if (real_phys_avail[i] == nucleus_memory_start + nucleus_memory)
634                         real_phys_avail[i] -= nucleus_memory;
635                 
636                 if (real_phys_avail[i + 1] == nucleus_memory_start)
637                         real_phys_avail[i + 1] += nucleus_memory;
638                 
639                 if (real_phys_avail[i + 1] == real_phys_avail[i + 2]) {
640                         real_phys_avail[i + 1] = real_phys_avail[i + 3];
641                         for (k = i + 2; real_phys_avail[k] != 0; k += 2) {
642                                 real_phys_avail[k] = real_phys_avail[k + 2];
643                                 real_phys_avail[k + 1] = real_phys_avail[k + 3];
644                         }
645                 }
646         }
647         for (i = 0; phys_avail[i] != 0; i += 2)
648                 if (pmap_debug_range || pmap_debug)
649                         printf("phys_avail[%d]=0x%lx phys_avail[%d]=0x%lx\n",
650                         i, phys_avail[i], i+1, phys_avail[i+1]);
651
652         /*
653          * Shuffle the memory range containing the 256MB page with 
654          * nucleus_memory to the beginning of the phys_avail array
655          * so that physical memory from that page is preferentially
656          * allocated first
657          */
658         for (j = 0; phys_avail[j] != 0; j += 2) 
659                 if (nucleus_memory_start < phys_avail[j])
660                         break;
661         /*
662          * Don't shuffle unless we have a full 256M page in the range
663          * our kernel malloc appears to be horribly brittle
664          */
665         if ((phys_avail[j + 1] - phys_avail[j]) < 
666             (PAGE_SIZE_256M - nucleus_memory))
667                 goto skipshuffle;
668
669         for (i = j, k = 0; phys_avail[i] != 0; k++, i++)
670                 tmp_phys_avail[k] = phys_avail[i];
671         for (i = 0; i < j; i++)
672                 tmp_phys_avail[k + i] = phys_avail[i];
673         for (i = 0; i < 128; i++)
674                 phys_avail[i] = tmp_phys_avail[i];
675
676 skipshuffle:
677         for (i = 0; real_phys_avail[i] != 0; i += 2)
678                 if (pmap_debug_range || pmap_debug)
679                         printf("real_phys_avail[%d]=0x%lx real_phys_avail[%d]=0x%lx\n",
680                         i, real_phys_avail[i], i+1, real_phys_avail[i+1]);
681
682         for (i = 0; phys_avail[i] != 0; i += 2)
683                 if (pmap_debug_range || pmap_debug)
684                         printf("phys_avail[%d]=0x%lx phys_avail[%d]=0x%lx\n",
685                         i, phys_avail[i], i+1, phys_avail[i+1]);
686         /*
687          * Calculate the size of kernel virtual memory, and the size and mask
688          * for the kernel tsb.
689          */
690         virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
691         vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
692
693         /*
694          * Set the start and end of kva.  The kernel is loaded at the first
695          * available 4 meg super page, so round up to the end of the page.
696          */
697         virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
698         virtual_end = vm_max_kernel_address;
699         kernel_vm_end = vm_max_kernel_address;
700
701         /*
702          * Allocate and map a 4MB page for the kernel hashtable 
703          *
704          */
705 #ifndef SIMULATOR
706         kernel_hash_shift = 10; /* PAGE_SIZE_4M*2 */
707 #else
708         kernel_hash_shift = 6; /* PAGE_SIZE_8K*64 */
709 #endif
710
711         kernel_hash_pa = pmap_bootstrap_alloc((1<<(kernel_hash_shift + PAGE_SHIFT)));
712         if (kernel_hash_pa & PAGE_MASK_4M)
713                 panic("pmap_bootstrap: hashtable pa unaligned\n");
714         /*
715          * Set up TSB descriptors for the hypervisor
716          *
717          */
718 #ifdef notyet
719         tsb_8k_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
720 #else
721         /* avoid alignment complaints from the hypervisor */
722         tsb_8k_size = PAGE_SIZE_4M;
723 #endif
724
725         tsb_8k_pa = pmap_bootstrap_alloc(tsb_8k_size);
726         if (tsb_8k_pa & PAGE_MASK_4M)
727                 panic("pmap_bootstrap: tsb unaligned\n");
728         KDPRINTF("tsb_8k_size is 0x%lx, tsb_8k_pa is 0x%lx\n", tsb_8k_size, tsb_8k_pa);
729
730         tsb_4m_size = (virtsz >> (PAGE_SHIFT_4M - TTE_SHIFT)) << 3;
731         tsb_4m_pa = pmap_bootstrap_alloc(tsb_4m_size);
732
733         kernel_td[TSB8K_INDEX].hti_idxpgsz = TTE8K;
734         kernel_td[TSB8K_INDEX].hti_assoc = 1;
735         kernel_td[TSB8K_INDEX].hti_ntte = (tsb_8k_size >> TTE_SHIFT);
736         kernel_td[TSB8K_INDEX].hti_ctx_index = 0;
737         kernel_td[TSB8K_INDEX].hti_pgszs = TSB8K;
738         kernel_td[TSB8K_INDEX].hti_rsvd = 0;
739         kernel_td[TSB8K_INDEX].hti_ra = tsb_8k_pa;
740
741         /*
742          * Initialize kernel's private TSB from 8K page TSB
743          *
744          */
745         kernel_pmap->pm_tsb.hti_idxpgsz = TTE8K;
746         kernel_pmap->pm_tsb.hti_assoc = 1;
747         kernel_pmap->pm_tsb.hti_ntte = (tsb_8k_size >> TTE_SHIFT);
748         kernel_pmap->pm_tsb.hti_ctx_index = 0;
749         kernel_pmap->pm_tsb.hti_pgszs = TSB8K;
750         kernel_pmap->pm_tsb.hti_rsvd = 0;
751         kernel_pmap->pm_tsb.hti_ra = tsb_8k_pa;
752         
753         kernel_pmap->pm_tsb_ra = vtophys((vm_offset_t)&kernel_pmap->pm_tsb);
754         tsb_set_scratchpad_kernel(&kernel_pmap->pm_tsb);
755         
756         /*
757          * Initialize kernel TSB for 4M pages
758          * currently (not by design) used for permanent mappings
759          */
760         
761
762         KDPRINTF("tsb_4m_pa is 0x%lx tsb_4m_size is 0x%lx\n", tsb_4m_pa, tsb_4m_size);
763         kernel_td[TSB4M_INDEX].hti_idxpgsz = TTE4M;
764         kernel_td[TSB4M_INDEX].hti_assoc = 1;
765         kernel_td[TSB4M_INDEX].hti_ntte = (tsb_4m_size >> TTE_SHIFT);
766         kernel_td[TSB4M_INDEX].hti_ctx_index = 0;
767         kernel_td[TSB4M_INDEX].hti_pgszs = TSB4M|TSB256M;
768         kernel_td[TSB4M_INDEX].hti_rsvd = 0;
769         kernel_td[TSB4M_INDEX].hti_ra = tsb_4m_pa;
770         /*
771          * allocate MMU fault status areas for all CPUS
772          */
773         mmu_fault_status_area = pmap_bootstrap_alloc(MMFSA_SIZE*MAXCPU);
774
775         /*
776          * Allocate and map the message buffer.
777          */
778         msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
779         msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys);
780
781         /*
782          * Allocate a kernel stack with guard page for thread0 and map it into
783          * the kernel tsb.  
784          */
785         pa = pmap_bootstrap_alloc(KSTACK_PAGES*PAGE_SIZE);
786         kstack0_phys = pa;
787         virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE;
788         kstack0 = virtual_avail;
789         virtual_avail += KSTACK_PAGES * PAGE_SIZE;
790         for (i = 0; i < KSTACK_PAGES; i++) {
791                 pa = kstack0_phys + i * PAGE_SIZE;
792                 va = kstack0 + i * PAGE_SIZE;
793                 tsb_set_tte_real(&kernel_td[TSB8K_INDEX], va, va,
794                             pa | TTE_KERNEL | VTD_8K, 0);
795         }
796         /*
797          * Calculate the last available physical address.
798          */
799         for (i = 0; phys_avail[i + 2] != 0; i += 2)
800                 KDPRINTF("phys_avail[%d]=0x%lx phys_avail[%d]=0x%lx\n",
801                         i, phys_avail[i], i+1, phys_avail[i+1]);
802         KDPRINTF("phys_avail[%d]=0x%lx phys_avail[%d]=0x%lx\n",
803                         i, phys_avail[i], i+1, phys_avail[i+1]);
804
805         Maxmem = sparc64_btop(phys_avail[i + 1]);
806         
807         /*
808          * Add the prom mappings to the kernel tsb.
809          */
810         for (i = 0; i < sz; i++) {
811                 CTR3(KTR_PMAP,
812                     "translation: start=%#lx size=%#lx tte=%#lx",
813                     translations[i].om_start, translations[i].om_size,
814                     translations[i].om_tte);
815                 KDPRINTF("om_size=%ld om_start=%lx om_tte=%lx\n", 
816                        translations[i].om_size, translations[i].om_start, 
817                        translations[i].om_tte);
818
819                 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
820                     translations[i].om_start > VM_MAX_PROM_ADDRESS) 
821                         continue;
822
823                 for (off = 0; off < translations[i].om_size;
824                      off += PAGE_SIZE) {
825                         va = translations[i].om_start + off;
826                         pa = TTE_GET_PA(translations[i].om_tte) + off;
827                         tsb_assert_invalid(&kernel_td[TSB8K_INDEX], va);
828                         tsb_set_tte_real(&kernel_td[TSB8K_INDEX], va, va, pa | 
829                                     TTE_KERNEL | VTD_8K, 0);
830                 }
831         }
832
833         if ((error = hv_mmu_tsb_ctx0(MAX_TSB_INFO, 
834                                      vtophys((vm_offset_t)kernel_td))) != H_EOK)
835                 panic("failed to set ctx0 TSBs error: %ld", error);
836
837 #ifdef SMP
838         mp_set_tsb_desc_ra(vtophys((vm_offset_t)&kernel_td));
839 #endif
840         /*
841          * setup direct mappings
842          * 
843          */
844         for (i = 0, pa = real_phys_avail[i]; pa != 0; i += 2, pa = real_phys_avail[i]) {
845                 vm_paddr_t tag_pa = 0, next_pa = 0;
846                 uint64_t size_bits = VTD_4M;
847                 while (pa < real_phys_avail[i + 1]) {
848                         if (use_256M_pages &&
849                             (pa & PAGE_MASK_256M) == 0 && 
850                             ((pa + PAGE_SIZE_256M) <= real_phys_avail[i + 1])) {
851                                 tag_pa = pa;
852                                 size_bits = VTD_256M;
853                                 next_pa = pa + PAGE_SIZE_256M;
854                         } else if (next_pa <= pa) {
855                                 tag_pa = pa;
856                                 size_bits = VTD_4M;
857                         }
858                         tsb_assert_invalid(&kernel_td[TSB4M_INDEX], TLB_PHYS_TO_DIRECT(pa));
859                         tsb_set_tte_real(&kernel_td[TSB4M_INDEX], TLB_PHYS_TO_DIRECT(pa), 
860                                          TLB_PHYS_TO_DIRECT(pa), 
861                                          tag_pa | TTE_KERNEL | size_bits, 0);
862                         pa += PAGE_SIZE_4M;
863                 }
864         }
865
866         /*
867          * Get the available physical memory ranges from /memory/reg. These
868          * are only used for kernel dumps, but it may not be wise to do prom
869          * calls in that situation.
870          */
871         if ((sz = OF_getproplen(pmem, "reg")) == -1)
872                 panic("pmap_bootstrap: getproplen /memory/reg");
873         if (sizeof(sparc64_memreg) < sz)
874                 panic("pmap_bootstrap: sparc64_memreg too small");
875         if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
876                 panic("pmap_bootstrap: getprop /memory/reg");
877         sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
878
879         pm = kernel_pmap;
880         pm->pm_active = ~0;
881         pm->pm_tlbactive = ~0;
882
883         PMAP_LOCK_INIT(kernel_pmap);
884
885         TAILQ_INIT(&kernel_pmap->pm_pvlist);
886
887         /* 
888          * This could happen earlier - but I put it here to avoid 
889          * attempts to do updates until they're legal
890          */
891         pm->pm_hash = tte_hash_kernel_create(TLB_PHYS_TO_DIRECT(kernel_hash_pa), kernel_hash_shift, 
892                                              pmap_bootstrap_alloc(PAGE_SIZE));
893         pm->pm_hashscratch = tte_hash_set_scratchpad_kernel(pm->pm_hash);
894
895         for (i = 0; i < translations_size; i++) {
896                 KDPRINTF("om_size=%ld om_start=%lx om_tte=%lx\n", 
897                        translations[i].om_size, translations[i].om_start, 
898                        translations[i].om_tte);
899
900                 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
901                     translations[i].om_start > VM_MAX_PROM_ADDRESS) {
902                         KDPRINTF("skipping\n");
903                         continue;
904                 }
905                 for (off = 0; off < translations[i].om_size; off += PAGE_SIZE) {
906                         va = translations[i].om_start + off;
907                         pa = TTE_GET_PA(translations[i].om_tte) + off;
908                         tte_hash_insert(pm->pm_hash, va, pa | TTE_KERNEL | VTD_8K);
909                 }
910                 KDPRINTF("set om_size=%ld om_start=%lx om_tte=%lx\n", 
911                        translations[i].om_size, translations[i].om_start, 
912                        translations[i].om_tte);
913         }
914         for (i = 0; i < KSTACK_PAGES; i++) {
915                 pa = kstack0_phys + i * PAGE_SIZE;
916                 va = kstack0 + i * PAGE_SIZE;
917                 tte_hash_insert(pm->pm_hash, va, pa | TTE_KERNEL | VTD_8K);
918         }
919         /*
920          * Add direct mappings to hash
921          *
922          */
923 #ifdef notyet
924         /* hash only supports 8k pages */
925         for (pa = PAGE_SIZE_4M; pa < phys_avail[2]; pa += PAGE_SIZE_4M)
926                 tte_hash_insert(pm->pm_hash, TLB_PHYS_TO_DIRECT(pa), 
927                                 pa | TTE_KERNEL | VTD_4M);
928 #endif
929
930
931         if (bootverbose)
932                 printf("pmap_bootstrap done\n");
933 }
934
935
936
937 /*
938  *      Routine:        pmap_change_wiring
939  *      Function:       Change the wiring attribute for a map/virtual-address
940  *                      pair.
941  *      In/out conditions:
942  *                      The mapping must already exist in the pmap.
943  */
944 void
945 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
946 {
947         boolean_t iswired;
948         PMAP_LOCK(pmap);
949         iswired = tte_get_virt_bit(pmap, va, VTD_WIRED);
950
951         if (wired && !iswired) {
952                 pmap->pm_stats.wired_count++;
953                 tte_set_virt_bit(pmap, va, VTD_WIRED);
954         } else if (!wired && iswired) {
955                 pmap->pm_stats.wired_count--;
956                 tte_clear_virt_bit(pmap, va, VTD_WIRED);
957         }
958         PMAP_UNLOCK(pmap);
959 }
960
961 void
962 pmap_clear_modify(vm_page_t m)
963 {
964         KDPRINTF("pmap_clear_modify(0x%lx)\n", VM_PAGE_TO_PHYS(m));
965         tte_clear_phys_bit(m, VTD_W);
966 }
967
968 void
969 pmap_clear_reference(vm_page_t m)
970 {
971         KDPRINTF("pmap_clear_reference(0x%lx)\n", VM_PAGE_TO_PHYS(m));
972         tte_clear_phys_bit(m, VTD_REF);
973 }
974
975 void
976 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
977           vm_size_t len, vm_offset_t src_addr)
978 {
979         vm_offset_t addr, end_addr;
980
981         end_addr = src_addr + len;
982         /*
983          * Don't let optional prefaulting of pages make us go
984          * way below the low water mark of free pages or way
985          * above high water mark of used pv entries.
986          */
987         if (cnt.v_free_count < cnt.v_free_reserved ||
988             pv_entry_count > pv_entry_high_water)
989                 return;
990         
991
992         vm_page_lock_queues();
993         if (dst_pmap < src_pmap) {
994                 PMAP_LOCK(dst_pmap);
995                 PMAP_LOCK(src_pmap);
996         } else {
997                 PMAP_LOCK(src_pmap);
998                 PMAP_LOCK(dst_pmap);
999         }
1000         for (addr = src_addr; addr < end_addr; addr += PAGE_SIZE) {
1001                 tte_t tte_data;
1002                 vm_page_t m;
1003
1004                 tte_data = tte_hash_lookup(src_pmap->pm_hash, addr);
1005
1006                 if ((tte_data & VTD_MANAGED) != 0) {
1007                         if (tte_hash_lookup(dst_pmap->pm_hash, addr) == 0) {
1008                                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
1009
1010                                 tte_hash_insert(dst_pmap->pm_hash, addr, tte_data & ~(VTD_W|VTD_REF|VTD_WIRED));
1011                                 dst_pmap->pm_stats.resident_count++;
1012                                 pmap_insert_entry(dst_pmap, addr, m);
1013                         } 
1014                 }               
1015         }
1016         vm_page_unlock_queues();
1017         PMAP_UNLOCK(src_pmap);
1018         PMAP_UNLOCK(dst_pmap);
1019 }
1020
1021 void
1022 pmap_copy_page(vm_page_t src, vm_page_t dst)
1023 {
1024         vm_paddr_t srcpa, dstpa;
1025         srcpa = VM_PAGE_TO_PHYS(src);
1026         dstpa = VM_PAGE_TO_PHYS(dst);
1027
1028         novbcopy((char *)TLB_PHYS_TO_DIRECT(srcpa), (char *)TLB_PHYS_TO_DIRECT(dstpa), PAGE_SIZE);
1029
1030
1031 }
1032
1033 static __inline void
1034 pmap_add_tte(pmap_t pmap, vm_offset_t va, vm_page_t m, tte_t *tte_data, int wired)
1035 {
1036
1037         if (wired)
1038                 pmap->pm_stats.wired_count++;
1039         
1040         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
1041                 pmap_insert_entry(pmap, va, m);
1042                 *tte_data |= VTD_MANAGED;
1043         }
1044 }
1045
1046 /*
1047  * Map the given physical page at the specified virtual address in the
1048  * target pmap with the protection requested.  If specified the page
1049  * will be wired down.
1050  */
1051 void
1052 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1053     vm_prot_t prot, boolean_t wired)
1054 {
1055         vm_paddr_t pa, opa;
1056         uint64_t tte_data, otte_data;
1057         vm_page_t om;
1058         int invlva;
1059
1060         if (pmap->pm_context)
1061                 DPRINTF("pmap_enter(va=%lx, pa=0x%lx, prot=%x)\n", va, 
1062                         VM_PAGE_TO_PHYS(m), prot);
1063
1064         om = NULL;
1065         
1066         vm_page_lock_queues();
1067         PMAP_LOCK(pmap);
1068
1069         tte_data = pa = VM_PAGE_TO_PHYS(m);
1070         otte_data = tte_hash_delete(pmap->pm_hash, va);
1071         opa = TTE_GET_PA(otte_data);
1072
1073         if (opa == 0) {
1074                 /*
1075                  * This is a new mapping
1076                  */
1077                 pmap->pm_stats.resident_count++;
1078                 pmap_add_tte(pmap, va, m, &tte_data, wired);
1079
1080         } else if (pa != opa) {
1081                 /*
1082                  * Mapping has changed, handle validating new mapping.
1083                  * 
1084                  */
1085                 if (otte_data & VTD_WIRED)
1086                         pmap->pm_stats.wired_count--;
1087
1088                 if (otte_data & VTD_MANAGED) {
1089                         om = PHYS_TO_VM_PAGE(opa);
1090                         pmap_remove_entry(pmap, om, va);
1091                 }
1092
1093                 pmap_add_tte(pmap, va, m, &tte_data, wired);
1094
1095         } else /* (pa == opa) */ {
1096                 /*
1097                  * Mapping has not changed, must be protection or wiring change.
1098                  */
1099
1100                 /*
1101                  * Wiring change, just update stats. We don't worry about
1102                  * wiring PT pages as they remain resident as long as there
1103                  * are valid mappings in them. Hence, if a user page is wired,
1104                  * the PT page will be also.
1105                  */
1106                 if (wired && ((otte_data & VTD_WIRED) == 0))
1107                         pmap->pm_stats.wired_count++;
1108                 else if (!wired && (otte_data & VTD_WIRED))
1109                         pmap->pm_stats.wired_count--;
1110
1111                 /*
1112                  * We might be turning off write access to the page,
1113                  * so we go ahead and sense modify status.
1114                  */
1115                 if (otte_data & VTD_MANAGED) {
1116                         om = m;
1117                         tte_data |= VTD_MANAGED;
1118                 }
1119         } 
1120
1121         /*
1122          * Now validate mapping with desired protection/wiring.
1123          */
1124         if ((prot & VM_PROT_WRITE) != 0) {
1125                 tte_data |= VTD_SW_W; 
1126                 vm_page_flag_set(m, PG_WRITEABLE);
1127         }
1128         if ((prot & VM_PROT_EXECUTE) != 0)
1129                 tte_data |= VTD_X;
1130         if (wired)
1131                 tte_data |= VTD_WIRED;
1132         if (pmap == kernel_pmap)
1133                 tte_data |= VTD_P;
1134         
1135         invlva = FALSE;
1136         if ((otte_data & ~(VTD_W|VTD_REF)) != tte_data) {
1137                 if (otte_data & VTD_V) {
1138                         if (otte_data & VTD_REF) {
1139                                 if (otte_data & VTD_MANAGED) 
1140                                         vm_page_flag_set(om, PG_REFERENCED);
1141                                 if ((opa != pa) || ((opa & VTD_X) != (pa & VTD_X)))
1142                                         invlva = TRUE;
1143                         }
1144                         if (otte_data & VTD_W) {
1145                                 if (otte_data & VTD_MANAGED) 
1146                                         vm_page_dirty(om);
1147                                 if ((pa & VTD_SW_W) != 0) 
1148                                         invlva = TRUE;
1149                         }
1150                         if (invlva)
1151                                 pmap_invalidate_page(pmap, va, TRUE);
1152                 }
1153         } 
1154
1155
1156         tte_hash_insert(pmap->pm_hash, va, tte_data|TTE_MINFLAGS|VTD_REF);
1157         /*
1158          * XXX this needs to be locked for the threaded / kernel case 
1159          */
1160         tsb_set_tte(&pmap->pm_tsb, va, tte_data|TTE_MINFLAGS|VTD_REF, 
1161                     pmap->pm_context);
1162
1163         if (tte_hash_needs_resize(pmap->pm_hash))
1164                 pmap_tte_hash_resize(pmap);
1165
1166         /*
1167          * 512 is an arbitrary number of tsb misses
1168          */
1169         if (0 && pmap->pm_context != 0 && pmap->pm_tsb_miss_count > 512)
1170                 pmap_tsb_resize(pmap);
1171
1172         vm_page_unlock_queues();
1173
1174         PMAP_UNLOCK(pmap);
1175 }
1176
1177 /*
1178  * Maps a sequence of resident pages belonging to the same object.
1179  * The sequence begins with the given page m_start.  This page is
1180  * mapped at the given virtual address start.  Each subsequent page is
1181  * mapped at a virtual address that is offset from start by the same
1182  * amount as the page is offset from m_start within the object.  The
1183  * last page in the sequence is the page with the largest offset from
1184  * m_start that can be mapped at a virtual address less than the given
1185  * virtual address end.  Not every virtual page between start and end
1186  * is mapped; only those for which a resident page exists with the
1187  * corresponding offset from m_start are mapped.
1188  */
1189 void
1190 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 
1191                   vm_page_t m_start, vm_prot_t prot)
1192 {
1193         vm_page_t m;
1194         vm_pindex_t diff, psize;
1195
1196         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
1197         psize = atop(end - start);
1198         m = m_start;
1199         PMAP_LOCK(pmap);
1200         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1201                 pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
1202                 m = TAILQ_NEXT(m, listq);
1203         }
1204         PMAP_UNLOCK(pmap);
1205 }
1206
1207 void
1208 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1209 {
1210         PMAP_LOCK(pmap);
1211         pmap_enter_quick_locked(pmap, va, m, prot);
1212         PMAP_UNLOCK(pmap);
1213 }
1214
1215 static void
1216 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
1217 {
1218
1219         tte_t tte_data;
1220
1221         if (pmap->pm_context)
1222                 KDPRINTF("pmap_enter_quick(ctx=0x%lx va=%lx, pa=0x%lx prot=%x)\n", 
1223                         pmap->pm_context, va, VM_PAGE_TO_PHYS(m), prot);
1224
1225         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1226         if (tte_hash_lookup(pmap->pm_hash, va))
1227                 return;
1228                 
1229         tte_data = VM_PAGE_TO_PHYS(m);
1230         /*
1231          * Enter on the PV list if part of our managed memory. Note that we
1232          * raise IPL while manipulating pv_table since pmap_enter can be
1233          * called at interrupt time.
1234          */
1235         if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
1236                 pmap_insert_entry(pmap, va, m);
1237                 tte_data |= VTD_MANAGED;
1238         }
1239
1240         pmap->pm_stats.resident_count++;
1241
1242         if ((prot & VM_PROT_EXECUTE) != 0)
1243                 tte_data |= VTD_X;
1244
1245         tte_hash_insert(pmap->pm_hash, va, tte_data | TTE_MINFLAGS);
1246 }
1247
1248 /*
1249  * Extract the physical page address associated with the given
1250  * map/virtual_address pair.
1251  */
1252 vm_paddr_t
1253 pmap_extract(pmap_t pmap, vm_offset_t va)
1254 {
1255         vm_paddr_t pa;
1256         tte_t tte_data;
1257
1258         tte_data = tte_hash_lookup(pmap->pm_hash, va);
1259         pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
1260
1261         return (pa);
1262 }
1263
1264 /*
1265  * Atomically extract and hold the physical page with the given
1266  * pmap and virtual address pair if that mapping permits the given
1267  * protection.
1268  */
1269 vm_page_t
1270 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1271 {
1272         tte_t tte_data;
1273         vm_page_t m;
1274
1275         m = NULL;
1276         vm_page_lock_queues();
1277         PMAP_LOCK(pmap);
1278         tte_data = tte_hash_lookup(pmap->pm_hash, va);
1279         if (tte_data != 0 && 
1280             ((tte_data & VTD_SW_W) || (prot & VM_PROT_WRITE) == 0)) {
1281                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
1282                 vm_page_hold(m);
1283         }
1284         vm_page_unlock_queues();
1285         PMAP_UNLOCK(pmap);
1286
1287         return (m);
1288 }
1289
1290 void *
1291 pmap_alloc_zeroed_contig_pages(int npages, uint64_t alignment)
1292 {
1293         vm_page_t m, tm;
1294         int i;
1295         void *ptr;
1296         
1297         m = NULL;
1298         while (m == NULL) {     
1299                 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1300                         m = vm_phys_alloc_contig(npages, phys_avail[i], 
1301                                                  phys_avail[i + 1], alignment, (1UL<<34));
1302                         if (m)
1303                                 goto found;
1304                 }
1305                 if (m == NULL) {
1306                         printf("vm_phys_alloc_contig failed - waiting to retry\n");
1307                         VM_WAIT;
1308                 }
1309         }
1310 found:
1311         for (i = 0, tm = m; i < npages; i++, tm++) {
1312                 tm->wire_count++;
1313                 if ((tm->flags & PG_ZERO) == 0)
1314                         pmap_zero_page(tm);
1315         }
1316         ptr = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1317         
1318         return (ptr);
1319 }
1320
1321 void
1322 pmap_free_contig_pages(void *ptr, int npages)
1323 {
1324         int i;
1325         vm_page_t m;
1326
1327         m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)ptr));
1328         for (i = 0; i < npages; i++, m++) {
1329                 m->wire_count--;
1330                 atomic_subtract_int(&cnt.v_wire_count, 1);
1331                 vm_page_free(m);
1332         }
1333 }
1334
1335 void 
1336 pmap_growkernel(vm_offset_t addr)
1337 {
1338         return;
1339 }
1340
1341 void 
1342 pmap_init(void)
1343 {
1344
1345         /* allocate pv_entry zones */
1346         int shpgperproc = PMAP_SHPGPERPROC;
1347
1348         for (ctx_stack_top = 1; ctx_stack_top < PMAP_CONTEXT_MAX; ctx_stack_top++) 
1349                 ctx_stack[ctx_stack_top] = ctx_stack_top;
1350
1351         mtx_init(&pmap_ctx_lock, "ctx lock", NULL, MTX_SPIN);
1352
1353         /*
1354          * Initialize the address space (zone) for the pv entries.  Set a
1355          * high water mark so that the system can recover from excessive
1356          * numbers of pv entries.
1357          */
1358         pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 
1359             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1360         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1361         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1362         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1363         pv_entry_high_water = 9 * (pv_entry_max / 10);
1364         uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1365
1366         tte_hash_init();
1367
1368 }
1369
1370 /*
1371  * Create a pv entry for page at pa for
1372  * (pmap, va).
1373  */
1374 static void
1375 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
1376 {
1377         pv_entry_t pv;
1378
1379         KDPRINTF("pmap_insert_entry(va=0x%lx, pa=0x%lx)\n", va, VM_PAGE_TO_PHYS(m));
1380         pv = get_pv_entry(pmap);
1381         pv->pv_va = va;
1382         pv->pv_pmap = pmap;
1383
1384         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1385         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1386         TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1387         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1388         m->md.pv_list_count++;
1389 }
1390
1391 #ifdef TRAP_TRACING
1392 static int trap_trace_report_done;
1393 #endif
1394
1395 #ifdef SMP
1396 static cpumask_t
1397 pmap_ipi(pmap_t pmap, char *func, uint64_t arg1, uint64_t arg2)
1398 {
1399
1400         int i, cpu_count, retried;
1401         u_int cpus;
1402         cpumask_t cpumask, active, curactive;
1403         cpumask_t active_total, ackmask;
1404         uint16_t *cpulist;
1405
1406         retried = 0;
1407
1408         if (!smp_started)
1409                 return (0);
1410
1411         cpumask = PCPU_GET(cpumask);
1412         cpulist = PCPU_GET(cpulist);
1413         curactive = 0;
1414
1415         if (rdpr(pil) != 14)
1416                 panic("pil %ld != 14", rdpr(pil));
1417
1418 #ifndef CPUMASK_NOT_BEING_ERRONEOUSLY_CHANGED
1419         /* by definition cpumask should have curcpu's bit set */
1420         if (cpumask != (1 << curcpu)) 
1421                 panic("cpumask(0x%x) != (1 << curcpu) (0x%x)\n", 
1422                       cpumask, (1 << curcpu));
1423
1424 #endif
1425 #ifdef notyet
1426         if ((active_total = (pmap->pm_tlbactive & ~cpumask)) == 0)
1427                 goto done;
1428
1429         if (pmap->pm_context != 0)
1430                 active_total = active = (pmap->pm_tlbactive & ~cpumask);
1431         else 
1432 #endif
1433                 active_total = active = PCPU_GET(other_cpus);
1434
1435         if (active == 0)
1436                 goto done;
1437         
1438  retry:
1439         
1440         for (i = curactive = cpu_count = 0, cpus = active; i < mp_ncpus && cpus; i++, cpus = (cpus>>1)) {
1441                 if ((cpus & 0x1) == 0)
1442                         continue;
1443                 
1444                 curactive |= (1 << i);
1445                 cpulist[cpu_count] = (uint16_t)i;
1446                 cpu_count++;
1447         }
1448
1449         ackmask = 0;
1450         cpu_ipi_selected(cpu_count, cpulist, (uint64_t)func, (uint64_t)arg1, 
1451                          (uint64_t)arg2, (uint64_t *)&ackmask);
1452
1453         while (ackmask != curactive) {
1454                 membar(Sync);
1455                 i++;
1456                 if (i > 10000000) {
1457 #ifdef TRAP_TRACING
1458                         int j;
1459 #endif
1460                         uint64_t cpu_state;
1461                         printf("cpu with cpumask=0x%x appears to not be responding to ipis\n",
1462                                curactive & ~ackmask);
1463
1464 #ifdef TRAP_TRACING
1465                         if (!trap_trace_report_done) {
1466                                 trap_trace_report_done = 1;
1467                                 for (j = 0; j < MAXCPU; j++)
1468                                         if (((1 << j) & curactive & ~ackmask) != 0) {
1469                                                 struct pcpu *pc = pcpu_find(j);
1470                                                 printf("pcpu pad 0x%jx 0x%jx 0x%jx 0x%jx 0x%jx 0x%jx 0x%jx\n",
1471                                                     pc->pad[0], pc->pad[1], pc->pad[2], pc->pad[3],
1472                                                     pc->pad[4], pc->pad[5], pc->pad[6]);
1473                                                 trap_trace_report(j);
1474                                         }
1475                         }
1476 #endif
1477
1478                         hv_cpu_state((uint64_t)ffs64(curactive & ~ackmask), &cpu_state);
1479                         printf("cpu_state of %ld is %ld\n", ffs64(curactive & ~ackmask), cpu_state);
1480                         if (!retried) {
1481                                 printf("I'm going to send off another ipi just to confirm that it isn't a memory barrier bug\n"
1482                                "and then I'm going to panic\n");
1483
1484                                 retried = 1;
1485                                 goto retry;
1486                         }
1487
1488                         panic(" ackmask=0x%x active=0x%x\n", ackmask, curactive);
1489                 }
1490         }
1491
1492         active_total |= curactive;
1493         if ((active = ((pmap->pm_tlbactive & all_cpus) & ~(active_total|cpumask))) != 0) {
1494                 printf("pmap_ipi: retrying");
1495                 goto retry;
1496         }
1497  done:
1498         return (active_total);
1499 }
1500 #endif
1501
1502 void
1503 pmap_invalidate_page(pmap_t pmap, vm_offset_t va, int cleartsb)
1504 {
1505
1506         if (cleartsb == TRUE)
1507                 tsb_clear_tte(&pmap->pm_tsb, va);
1508
1509         DPRINTF("pmap_invalidate_page(va=0x%lx)\n", va);
1510         spinlock_enter();
1511         invlpg(va, pmap->pm_context);
1512 #ifdef SMP
1513         pmap_ipi(pmap, (void *)tl_invlpg, (uint64_t)va, (uint64_t)pmap->pm_context);
1514 #endif
1515         spinlock_exit();
1516 }
1517
1518 void
1519 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int cleartsb)
1520 {
1521         vm_offset_t tva, invlrngva;
1522         char *func;
1523 #ifdef SMP
1524         cpumask_t active;
1525 #endif
1526         if ((eva - sva) == PAGE_SIZE) {
1527                 pmap_invalidate_page(pmap, sva, cleartsb);
1528                 return;
1529         }
1530         
1531
1532         KASSERT(sva < eva, ("invalidating negative or zero range sva=0x%lx eva=0x%lx", sva, eva));
1533
1534         if (cleartsb == TRUE) 
1535                 tsb_clear_range(&pmap->pm_tsb, sva, eva);
1536
1537         spinlock_enter();
1538         if ((sva - eva) < PAGE_SIZE*64) {
1539                 for (tva = sva; tva < eva; tva += PAGE_SIZE_8K)
1540                         invlpg(tva, pmap->pm_context);
1541                 func = tl_invlrng;
1542         } else if (pmap->pm_context) {
1543                 func = tl_invlctx;
1544                 invlctx(pmap->pm_context);
1545
1546         } else {
1547                 func = tl_invltlb;
1548                 invltlb();
1549         }
1550 #ifdef SMP
1551         invlrngva = sva | ((eva - sva) >> PAGE_SHIFT);
1552         active = pmap_ipi(pmap, (void *)func, pmap->pm_context, invlrngva);
1553         active &= ~pmap->pm_active;
1554         atomic_clear_int(&pmap->pm_tlbactive, active);
1555 #endif
1556         spinlock_exit();
1557 }
1558
1559 void
1560 pmap_invalidate_all(pmap_t pmap)
1561 {
1562
1563         KASSERT(pmap != kernel_pmap, ("invalidate_all called on kernel_pmap"));
1564
1565         tsb_clear(&pmap->pm_tsb);
1566
1567         spinlock_enter();
1568         invlctx(pmap->pm_context);
1569 #ifdef SMP
1570         pmap_ipi(pmap, tl_invlctx, pmap->pm_context, 0);
1571         pmap->pm_tlbactive = pmap->pm_active;
1572 #endif
1573         spinlock_exit();
1574 }
1575
1576 boolean_t
1577 pmap_is_modified(vm_page_t m)
1578 {
1579
1580         return (tte_get_phys_bit(m, VTD_W));
1581 }
1582
1583
1584 boolean_t 
1585 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
1586 {
1587         return (tte_hash_lookup(pmap->pm_hash, va) == 0);
1588 }
1589
1590 /*
1591  * Extract the physical page address associated with the given kernel virtual
1592  * address.
1593  */
1594
1595 vm_paddr_t
1596 pmap_kextract(vm_offset_t va)
1597 {
1598         tte_t tte_data;
1599         vm_paddr_t pa;
1600
1601         pa = 0;
1602         if (va > KERNBASE && va < KERNBASE + nucleus_memory) {
1603                 uint64_t offset;
1604                 offset = va - KERNBASE; 
1605                 pa = nucleus_mappings[offset >> 22] | (va & PAGE_MASK_4M);
1606         }
1607         if ((pa == 0) && (tte_data = tsb_lookup_tte(va, 0)) != 0)
1608                 pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
1609
1610         if ((pa == 0) && (tte_data = tte_hash_lookup(kernel_pmap->pm_hash, va)) != 0)
1611                 pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
1612
1613         return pa;
1614 }
1615
1616 /*
1617  * Map a range of physical addresses into kernel virtual address space.
1618  *
1619  * The value passed in *virt is a suggested virtual address for the mapping.
1620  * Architectures which can support a direct-mapped physical to virtual region
1621  * can return the appropriate address within that region, leaving '*virt'
1622  * unchanged.
1623  */
1624 vm_offset_t
1625 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1626 {
1627         return TLB_PHYS_TO_DIRECT(start);
1628 }
1629
1630 int 
1631 pmap_mincore(pmap_t pmap, vm_offset_t addr)
1632 {
1633         return (0);
1634 }
1635
1636 void 
1637 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 
1638                     vm_pindex_t index, vm_size_t size)
1639 {
1640         printf("pmap_object_init_pt\n");
1641         return;
1642 }
1643
1644 /*
1645  * Returns true if the pmap's pv is one of the first
1646  * 16 pvs linked to from this page.  This count may
1647  * be changed upwards or downwards in the future; it
1648  * is only necessary that true be returned for a small
1649  * subset of pmaps for proper page aging.
1650  */
1651 boolean_t
1652 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1653 {
1654         pv_entry_t pv;
1655         int loops = 0;
1656
1657         if (m->flags & PG_FICTITIOUS)
1658                 return FALSE;
1659
1660         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1661         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1662                 if (pv->pv_pmap == pmap) {
1663                         return TRUE;
1664                 }
1665                 loops++;
1666                 if (loops >= 16)
1667                         break;
1668         }       
1669         return (FALSE);
1670 }
1671
1672 /*
1673  * Initialize a vm_page's machine-dependent fields.
1674  */
1675 void
1676 pmap_page_init(vm_page_t m)
1677 {
1678
1679         TAILQ_INIT(&m->md.pv_list);
1680         m->md.pv_list_count = 0;
1681 }
1682 /*
1683  * Lower the permission for all mappings to a given page.
1684  */
1685 void
1686 pmap_remove_write(vm_page_t m)
1687 {
1688         if ((m->flags & PG_WRITEABLE) == 0)
1689                 return;
1690         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1691         tte_clear_phys_bit(m, VTD_SW_W|VTD_W);
1692         vm_page_flag_clear(m, PG_WRITEABLE);
1693 }
1694 /*
1695  * Initialize the pmap associated with process 0.
1696  */
1697 void
1698 pmap_pinit0(pmap_t pmap)
1699 {
1700         PMAP_LOCK_INIT(pmap);
1701         pmap->pm_active = pmap->pm_tlbactive = ~0;
1702         pmap->pm_context = 0;
1703         pmap->pm_tsb_ra = kernel_pmap->pm_tsb_ra;
1704         pmap->pm_hash = kernel_pmap->pm_hash;
1705         critical_enter();
1706         PCPU_SET(curpmap, pmap);
1707         critical_exit();
1708         TAILQ_INIT(&pmap->pm_pvlist);
1709         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1710 }
1711
1712 /*
1713  * Initialize a preallocated and zeroed pmap structure, such as one in a
1714  * vmspace structure.
1715  */
1716 int
1717 pmap_pinit(pmap_t pmap)
1718 {
1719         int i;
1720
1721         pmap->pm_context = get_context();
1722         pmap->pm_tsb_ra = vtophys(&pmap->pm_tsb);
1723
1724         vm_page_lock_queues();
1725         pmap->pm_hash = tte_hash_create(pmap->pm_context, &pmap->pm_hashscratch);
1726         tsb_init(&pmap->pm_tsb, &pmap->pm_tsbscratch, TSB_INIT_SHIFT);
1727         vm_page_unlock_queues();
1728         pmap->pm_tsb_miss_count = pmap->pm_tsb_cap_miss_count = 0;
1729         pmap->pm_active = pmap->pm_tlbactive = 0;
1730         for (i = 0; i < TSB_MAX_RESIZE; i++)
1731                 pmap->pm_old_tsb_ra[i] = 0;
1732
1733         TAILQ_INIT(&pmap->pm_pvlist);
1734         PMAP_LOCK_INIT(pmap);
1735         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1736         return (1);
1737 }
1738
1739 /*
1740  * Set the physical protection on the specified range of this map as requested.
1741  */
1742 void
1743 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1744 {
1745
1746         int anychanged;
1747         vm_offset_t tva;
1748         uint64_t clearbits;
1749
1750         DPRINTF("pmap_protect(0x%lx, 0x%lx, %d)\n", sva, eva, prot);
1751         
1752         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1753                 pmap_remove(pmap, sva, eva);
1754                 return;
1755         }
1756         
1757         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 
1758             (VM_PROT_WRITE|VM_PROT_EXECUTE))
1759                 return;
1760
1761         clearbits = anychanged = 0;
1762         
1763         if ((prot & VM_PROT_WRITE) == 0)
1764                 clearbits |= (VTD_W|VTD_SW_W);
1765         if ((prot & VM_PROT_EXECUTE) == 0)
1766                 clearbits |= VTD_X;
1767
1768         vm_page_lock_queues();
1769         PMAP_LOCK(pmap);
1770         for (tva = sva; tva < eva; tva += PAGE_SIZE) {
1771                 uint64_t otte_data;
1772                 vm_page_t m;
1773
1774                 if ((otte_data = tte_hash_clear_bits(pmap->pm_hash, tva, 
1775                                                      clearbits)) == 0)
1776                         continue;
1777                 /*
1778                  * XXX technically we should do a shootdown if it 
1779                  * was referenced and was executable - but is not now
1780                  */
1781                 if (!anychanged && (otte_data & VTD_W))
1782                         anychanged = 1;
1783                 
1784                 if (otte_data & VTD_MANAGED) {
1785                         m = NULL;
1786
1787                         if (otte_data & VTD_REF) {
1788                                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(otte_data));
1789                                 vm_page_flag_set(m, PG_REFERENCED);
1790                         }
1791                         if (otte_data & VTD_W) {
1792                                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(otte_data));
1793                                 vm_page_dirty(m);
1794                         }
1795                 } 
1796         }
1797
1798         vm_page_unlock_queues();
1799         if (anychanged)
1800                 pmap_invalidate_range(pmap, sva, eva, TRUE);
1801         PMAP_UNLOCK(pmap);
1802 }
1803
1804 /*
1805  * Map a list of wired pages into kernel virtual address space.  This is
1806  * intended for temporary mappings which do not need page modification or
1807  * references recorded.  Existing mappings in the region are overwritten.
1808  */
1809 void
1810 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
1811 {
1812         vm_offset_t va;
1813         tte_t otte;
1814         
1815         otte = 0;
1816         va = sva;
1817         while (count-- > 0) {
1818                 otte |= tte_hash_update(kernel_pmap->pm_hash, va,  
1819                                         VM_PAGE_TO_PHYS(*m) | TTE_KERNEL | VTD_8K);
1820                 va += PAGE_SIZE;
1821                 m++;
1822         }
1823         if ((otte & VTD_REF) != 0)
1824                 pmap_invalidate_range(kernel_pmap, sva, va, FALSE);
1825 }
1826
1827 /*
1828  * Remove page mappings from kernel virtual address space.  Intended for
1829  * temporary mappings entered by pmap_qenter.
1830  */
1831 void
1832 pmap_qremove(vm_offset_t sva, int count)
1833 {
1834         vm_offset_t va;
1835         tte_t otte;
1836
1837         va = sva;
1838
1839         otte = 0;
1840         while (count-- > 0) {
1841                 otte |= tte_hash_delete(kernel_pmap->pm_hash, va);
1842                 va += PAGE_SIZE;
1843         }
1844         if ((otte & VTD_REF) != 0)
1845                 pmap_invalidate_range(kernel_pmap, sva, va, TRUE);
1846 }
1847
1848 /*
1849  * Release any resources held by the given physical map.
1850  * Called when a pmap initialized by pmap_pinit is being released.
1851  * Should only be called if the map contains no valid mappings.
1852  */
1853 void
1854 pmap_release(pmap_t pmap)
1855 {
1856         KASSERT(pmap->pm_stats.resident_count == 0,
1857             ("pmap_release: pmap resident count %ld != 0",
1858             pmap->pm_stats.resident_count));
1859
1860         tsb_deinit(&pmap->pm_tsb);
1861         tte_hash_destroy(pmap->pm_hash);
1862         free_context(pmap->pm_context);
1863         PMAP_LOCK_DESTROY(pmap);
1864 }
1865
1866 /*
1867  * Remove the given range of addresses from the specified map.
1868  */
1869 void
1870 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
1871 {
1872         int invlva;
1873         vm_offset_t tva;
1874         uint64_t tte_data;
1875         /*
1876          * Perform an unsynchronized read.  This is, however, safe.
1877          */
1878         if (pmap->pm_stats.resident_count == 0)
1879                 return;
1880         
1881         DPRINTF("pmap_remove(start=0x%lx, end=0x%lx)\n", 
1882                 start, end);
1883         invlva = 0;
1884         vm_page_lock_queues();
1885         PMAP_LOCK(pmap);
1886         for (tva = start; tva < end; tva += PAGE_SIZE) {
1887                 if ((tte_data = tte_hash_delete(pmap->pm_hash, tva)) == 0)
1888                         continue;
1889                 pmap_remove_tte(pmap, tte_data, tva);
1890                 if (tte_data & (VTD_REF|VTD_W))
1891                         invlva = 1;
1892         }
1893         vm_page_unlock_queues();
1894         if (invlva)
1895                 pmap_invalidate_range(pmap, start, end, TRUE);
1896         PMAP_UNLOCK(pmap);
1897 }
1898
1899 /*
1900  *      Routine:        pmap_remove_all
1901  *      Function:
1902  *              Removes this physical page from
1903  *              all physical maps in which it resides.
1904  *              Reflects back modify bits to the pager.
1905  *
1906  *      Notes:
1907  *              Original versions of this routine were very
1908  *              inefficient because they iteratively called
1909  *              pmap_remove (slow...)
1910  */
1911
1912 void
1913 pmap_remove_all(vm_page_t m)
1914 {
1915         pv_entry_t pv;
1916         uint64_t tte_data;
1917         DPRINTF("pmap_remove_all 0x%lx\n", VM_PAGE_TO_PHYS(m));
1918
1919         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1920         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1921                 PMAP_LOCK(pv->pv_pmap);
1922                 pv->pv_pmap->pm_stats.resident_count--;
1923
1924                 tte_data = tte_hash_delete(pv->pv_pmap->pm_hash, pv->pv_va);
1925
1926                 if (tte_data & VTD_WIRED)
1927                         pv->pv_pmap->pm_stats.wired_count--;
1928                 if (tte_data & VTD_REF)
1929                         vm_page_flag_set(m, PG_REFERENCED);
1930                 
1931                 /*
1932                  * Update the vm_page_t clean and reference bits.
1933                  */
1934                 if (tte_data & VTD_W) {
1935                         KASSERT((tte_data & VTD_SW_W),
1936         ("pmap_remove_all: modified page not writable: va: %lx, tte: %lx",
1937                             pv->pv_va, tte_data));
1938                         vm_page_dirty(m);
1939                 }
1940         
1941                 pmap_invalidate_page(pv->pv_pmap, pv->pv_va, TRUE);
1942                 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1943                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1944                 m->md.pv_list_count--;
1945                 PMAP_UNLOCK(pv->pv_pmap);
1946                 free_pv_entry(pv);
1947         }
1948         vm_page_flag_clear(m, PG_WRITEABLE);
1949 }
1950
1951 static void
1952 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1953 {
1954         pv_entry_t pv;
1955         if (pmap != kernel_pmap)
1956                 DPRINTF("pmap_remove_entry(va=0x%lx, pa=0x%lx)\n", va, VM_PAGE_TO_PHYS(m));
1957         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1958         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1959         if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1960                 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1961                         if (pmap == pv->pv_pmap && va == pv->pv_va) 
1962                                 break;
1963                 }
1964         } else {
1965                 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1966                         if (va == pv->pv_va) 
1967                                 break;
1968                 }
1969         }
1970         KASSERT(pv != NULL, ("pmap_remove_entry: pv not found va=0x%lx pa=0x%lx", va, VM_PAGE_TO_PHYS(m)));
1971         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1972         m->md.pv_list_count--;
1973         if (TAILQ_EMPTY(&m->md.pv_list))
1974                 vm_page_flag_clear(m, PG_WRITEABLE);
1975         TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1976         free_pv_entry(pv);
1977 }
1978
1979
1980 void
1981 pmap_remove_pages(pmap_t pmap)
1982 {
1983         
1984         vm_page_t m;
1985         pv_entry_t pv, npv;
1986         tte_t tte_data;
1987         
1988         DPRINTF("pmap_remove_pages(ctx=0x%lx)\n", pmap->pm_context);
1989         vm_page_lock_queues();
1990         PMAP_LOCK(pmap);
1991         for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
1992                 tte_data = tte_hash_delete(pmap->pm_hash, pv->pv_va);
1993
1994                 if (tte_data == 0) {
1995                         printf("TTE IS ZERO @ VA %016lx\n", pv->pv_va);
1996                         panic("bad tte");
1997                 }
1998                 if (tte_data & VTD_WIRED) {
1999                         panic("wired page in process not handled correctly");
2000                         pmap->pm_stats.wired_count--;
2001                 }
2002                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
2003
2004                 pmap->pm_stats.resident_count--;
2005                 
2006                 if (tte_data & VTD_W) {
2007                         vm_page_dirty(m);
2008                 }
2009                 
2010                 npv = TAILQ_NEXT(pv, pv_plist);
2011                 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2012                 
2013                 m->md.pv_list_count--;
2014                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2015                 if (TAILQ_EMPTY(&m->md.pv_list))
2016                         vm_page_flag_clear(m, PG_WRITEABLE);
2017
2018                 free_pv_entry(pv);
2019         }
2020         pmap->pm_hash = tte_hash_reset(pmap->pm_hash, &pmap->pm_hashscratch);
2021         if (0)
2022                 pmap_tsb_reset(pmap);
2023
2024         vm_page_unlock_queues();
2025         pmap_invalidate_all(pmap);
2026         PMAP_UNLOCK(pmap);
2027 }
2028
2029 static void
2030 pmap_tsb_reset(pmap_t pmap)
2031 {
2032         int i;
2033
2034         for (i = 1; i < TSB_MAX_RESIZE && pmap->pm_old_tsb_ra[i]; i++) {
2035                 pmap_free_contig_pages((void *)TLB_PHYS_TO_DIRECT(pmap->pm_old_tsb_ra[i]), 
2036                                        (1 << (TSB_INIT_SHIFT + i)));
2037                 pmap->pm_old_tsb_ra[i] = 0;
2038         }
2039         if (pmap->pm_old_tsb_ra[0] != 0) {
2040                 vm_paddr_t tsb_pa = pmap->pm_tsb.hti_ra;
2041                 int size = tsb_size(&pmap->pm_tsb);
2042                 pmap->pm_tsb.hti_ntte = (1 << (TSB_INIT_SHIFT + PAGE_SHIFT - TTE_SHIFT));
2043                 pmap->pm_tsb.hti_ra = pmap->pm_old_tsb_ra[0];
2044                 pmap_free_contig_pages((void *)TLB_PHYS_TO_DIRECT(tsb_pa), size);
2045                 pmap->pm_tsbscratch = pmap->pm_tsb.hti_ra | (uint64_t)TSB_INIT_SHIFT;
2046                 pmap->pm_old_tsb_ra[0] = 0;
2047         }
2048 }
2049
2050 void
2051 pmap_scrub_pages(vm_paddr_t pa, int64_t size)
2052 {
2053         uint64_t bytes_zeroed;
2054         while (size > 0) {
2055                 hv_mem_scrub(pa, size, &bytes_zeroed);
2056                 pa += bytes_zeroed;
2057                 size -= bytes_zeroed;
2058         }
2059 }
2060
2061 static void
2062 pmap_remove_tte(pmap_t pmap, tte_t tte_data, vm_offset_t va)
2063 {
2064         
2065         vm_page_t m;
2066
2067         if (pmap != kernel_pmap)
2068                 DPRINTF("pmap_remove_tte(va=0x%lx, pa=0x%lx)\n", va, TTE_GET_PA(tte_data));
2069
2070         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2071         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2072         if (tte_data & VTD_WIRED)
2073                 pmap->pm_stats.wired_count--;
2074
2075         pmap->pm_stats.resident_count--;
2076         
2077         if (tte_data & VTD_MANAGED) {
2078                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tte_data));
2079                 if (tte_data & VTD_W) {
2080                         vm_page_dirty(m);       
2081                 }
2082                 if (tte_data & VTD_REF) 
2083                         vm_page_flag_set(m, PG_REFERENCED);
2084                 pmap_remove_entry(pmap, m, va);
2085         }
2086 }
2087
2088 /* resize the tsb if the number of capacity misses is greater than 1/4 of
2089  * the total 
2090  */  
2091 static void
2092 pmap_tsb_resize(pmap_t pmap)
2093 {
2094         uint32_t miss_count;
2095         uint32_t cap_miss_count;
2096         struct tsb_resize_info info;
2097         hv_tsb_info_t hvtsb;
2098         uint64_t tsbscratch;
2099
2100         KASSERT(pmap == curthread_pmap, ("operating on non-current pmap"));
2101         miss_count = pmap->pm_tsb_miss_count;
2102         cap_miss_count = pmap->pm_tsb_cap_miss_count;
2103         int npages_shift = tsb_page_shift(pmap);
2104
2105         if (npages_shift < (TSB_INIT_SHIFT + TSB_MAX_RESIZE) && 
2106             cap_miss_count > (miss_count >> 1)) {
2107                 DPRINTF("resizing tsb for proc=%s pid=%d\n", 
2108                         curthread->td_proc->p_comm, curthread->td_proc->p_pid);
2109                 pmap->pm_old_tsb_ra[npages_shift - TSB_INIT_SHIFT] = pmap->pm_tsb.hti_ra;
2110
2111                 /* double TSB size */
2112                 tsb_init(&hvtsb, &tsbscratch, npages_shift + 1);
2113 #ifdef SMP
2114                 spinlock_enter();
2115                 /* reset tsb */
2116                 bcopy(&hvtsb, &pmap->pm_tsb, sizeof(hv_tsb_info_t));
2117                 pmap->pm_tsbscratch = tsb_set_scratchpad_user(&pmap->pm_tsb);
2118
2119                 if (hv_mmu_tsb_ctxnon0(1, pmap->pm_tsb_ra) != H_EOK)
2120                         panic("failed to set TSB 0x%lx - context == %ld\n", 
2121                               pmap->pm_tsb_ra, pmap->pm_context);
2122                 info.tri_tsbscratch = pmap->pm_tsbscratch;
2123                 info.tri_tsb_ra = pmap->pm_tsb_ra;
2124                 pmap_ipi(pmap, tl_tsbupdate, pmap->pm_context, vtophys(&info));
2125                 pmap->pm_tlbactive = pmap->pm_active;
2126                 spinlock_exit();
2127 #else 
2128                 bcopy(&hvtsb, &pmap->pm_tsb, sizeof(hvtsb));
2129                 if (hv_mmu_tsb_ctxnon0(1, pmap->pm_tsb_ra) != H_EOK)
2130                         panic("failed to set TSB 0x%lx - context == %ld\n", 
2131                               pmap->pm_tsb_ra, pmap->pm_context);
2132                 pmap->pm_tsbscratch = tsb_set_scratchpad_user(&pmap->pm_tsb);
2133 #endif          
2134         }
2135         pmap->pm_tsb_miss_count = 0;
2136         pmap->pm_tsb_cap_miss_count = 0;
2137 }
2138
2139 static void
2140 pmap_tte_hash_resize(pmap_t pmap)
2141 {
2142         tte_hash_t old_th = pmap->pm_hash;
2143         
2144         pmap->pm_hash = tte_hash_resize(pmap->pm_hash);
2145         spinlock_enter();
2146         if (curthread->td_proc->p_numthreads != 1) 
2147                 pmap_ipi(pmap, tl_ttehashupdate, pmap->pm_context, pmap->pm_hashscratch);
2148
2149         pmap->pm_hashscratch = tte_hash_set_scratchpad_user(pmap->pm_hash, pmap->pm_context);   
2150         spinlock_exit();
2151         tte_hash_destroy(old_th);
2152 }
2153
2154 /*
2155  *      pmap_ts_referenced:
2156  *
2157  *      Return a count of reference bits for a page, clearing those bits.
2158  *      It is not necessary for every reference bit to be cleared, but it
2159  *      is necessary that 0 only be returned when there are truly no
2160  *      reference bits set.
2161  *
2162  *      XXX: The exact number of bits to check and clear is a matter that
2163  *      should be tested and standardized at some point in the future for
2164  *      optimal aging of shared pages.
2165  */
2166
2167 int
2168 pmap_ts_referenced(vm_page_t m)
2169 {
2170         
2171         int rv;
2172         pv_entry_t pv, pvf, pvn;
2173         pmap_t pmap;
2174         tte_t otte_data;
2175
2176         rv = 0;
2177         if (m->flags & PG_FICTITIOUS)
2178                 return (rv);
2179
2180         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2181         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2182                 
2183                 pvf = pv;
2184
2185                 do {
2186                         pvn = TAILQ_NEXT(pv, pv_list);
2187                         
2188                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2189                         
2190                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2191                         
2192                         pmap = pv->pv_pmap;
2193                         PMAP_LOCK(pmap);
2194                         otte_data = tte_hash_clear_bits(pmap->pm_hash, pv->pv_va, VTD_REF);
2195                         if ((otte_data & VTD_REF) != 0) {
2196                                 pmap_invalidate_page(pmap, pv->pv_va, TRUE);
2197                                 
2198                                 rv++;
2199                                 if (rv > 4) {
2200                                         PMAP_UNLOCK(pmap);
2201                                         break;
2202                                 }
2203                         }
2204                 
2205                         PMAP_UNLOCK(pmap);
2206                 } while ((pv = pvn) != NULL && pv != pvf);
2207         }
2208         return (rv);
2209 }
2210
2211 void
2212 pmap_zero_page(vm_page_t m)
2213 {
2214         hwblkclr((void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)), PAGE_SIZE);
2215 }
2216
2217 void
2218 pmap_zero_page_area(vm_page_t m, int off, int size)
2219 {
2220         vm_paddr_t pa;
2221         vm_offset_t va;
2222                 
2223         pa = VM_PAGE_TO_PHYS(m);
2224         va = TLB_PHYS_TO_DIRECT(pa);
2225         if (off == 0 && size == PAGE_SIZE)
2226                 hwblkclr((void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)), PAGE_SIZE);
2227         else
2228                 bzero((char *)(va + off), size);
2229
2230 }
2231
2232 void
2233 pmap_zero_page_idle(vm_page_t m)
2234 {
2235         hwblkclr((void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)), PAGE_SIZE);
2236 }
2237
2238 void
2239 pmap_set_ctx_panic(uint64_t error, vm_paddr_t tsb_ra, pmap_t pmap)
2240 {
2241         panic("setting ctxnon0 failed ctx=0x%lx hvtsb_ra=0x%lx tsbscratch=0x%lx error=0x%lx",
2242               pmap->pm_context, tsb_ra, pmap->pm_tsbscratch, error);
2243         
2244 }