2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
45 #include "opt_user_ldt.h"
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/malloc.h>
55 #include <sys/vnode.h>
56 #include <sys/vmmeter.h>
57 #include <sys/kernel.h>
58 #include <sys/sysctl.h>
60 #include <machine/clock.h>
61 #include <machine/cpu.h>
62 #include <machine/md_var.h>
64 #include <machine/smp.h>
66 #include <machine/pcb_ext.h>
67 #include <machine/vm86.h>
70 #include <vm/vm_param.h>
71 #include <vm/vm_prot.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_extern.h>
81 #include <pc98/pc98/pc98.h>
83 #include <i386/isa/isa.h>
86 static void cpu_reset_real __P((void));
88 static void cpu_reset_proxy __P((void));
89 static u_int cpu_reset_proxyid;
90 static volatile u_int cpu_reset_proxy_active;
94 * quick version of vm_fault
97 vm_fault_quick(v, prot)
101 if (prot & VM_PROT_WRITE)
102 subyte(v, fubyte(v));
108 * Finish a fork operation, with process p2 nearly set up.
109 * Copy and update the pcb, set up the stack so that the child
110 * ready to run and return to user mode.
114 register struct proc *p1, *p2;
116 struct pcb *pcb2 = &p2->p_addr->u_pcb;
119 /* Ensure that p1's pcb is up to date. */
121 npxsave(&p1->p_addr->u_pcb.pcb_savefpu);
125 p2->p_addr->u_pcb = p1->p_addr->u_pcb;
128 * Create a new fresh stack for the new process.
129 * Copy the trap frame for the return to user mode as if from a
130 * syscall. This copies the user mode register values.
132 p2->p_md.md_regs = (struct trapframe *)
133 ((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
134 *p2->p_md.md_regs = *p1->p_md.md_regs;
137 * Set registers for trampoline to user mode. Leave space for the
138 * return address on stack. These are the kernel mode register values.
140 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
141 pcb2->pcb_edi = p2->p_md.md_regs->tf_edi;
142 pcb2->pcb_esi = (int)fork_return;
143 pcb2->pcb_ebp = p2->p_md.md_regs->tf_ebp;
144 pcb2->pcb_esp = (int)p2->p_md.md_regs - sizeof(void *);
145 pcb2->pcb_ebx = (int)p2;
146 pcb2->pcb_eip = (int)fork_trampoline;
148 * pcb2->pcb_ldt: duplicated below, if necessary.
149 * pcb2->pcb_ldt_len: cloned above.
150 * pcb2->pcb_savefpu: cloned above.
151 * pcb2->pcb_flags: cloned above (always 0 here?).
152 * pcb2->pcb_onfault: cloned above (always NULL here?).
156 pcb2->pcb_mpnest = 1;
159 * XXX don't copy the i/o pages. this should probably be fixed.
164 /* Copy the LDT, if necessary. */
165 if (pcb2->pcb_ldt != 0) {
166 union descriptor *new_ldt;
167 size_t len = pcb2->pcb_ldt_len * sizeof(union descriptor);
169 new_ldt = (union descriptor *)kmem_alloc(kernel_map, len);
170 bcopy(pcb2->pcb_ldt, new_ldt, len);
171 pcb2->pcb_ldt = (caddr_t)new_ldt;
176 * Now, cpu_switch() can schedule the new process.
177 * pcb_esp is loaded pointing to the cpu_switch() stack frame
178 * containing the return address when exiting cpu_switch.
179 * This will normally be to proc_trampoline(), which will have
180 * %ebx loaded with the new proc's pointer. proc_trampoline()
181 * will set up a stack to call fork_return(p, frame); to complete
182 * the return to user-mode.
187 * Intercept the return address from a freshly forked process that has NOT
188 * been scheduled yet.
190 * This is needed to make kernel threads stay in kernel mode.
193 cpu_set_fork_handler(p, func, arg)
195 void (*func) __P((void *));
199 * Note that the trap frame follows the args, so the function
200 * is really called like this: func(arg, frame);
202 p->p_addr->u_pcb.pcb_esi = (int) func; /* function */
203 p->p_addr->u_pcb.pcb_ebx = (int) arg; /* first arg */
208 register struct proc *p;
210 struct pcb *pcb = &p->p_addr->u_pcb;
215 if (pcb->pcb_ext != 0) {
217 * XXX do we need to move the TSS off the allocated pages
218 * before freeing them? (not done here)
220 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
225 if (pcb->pcb_ldt != 0) {
228 currentldt = _default_ldt;
230 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt,
231 pcb->pcb_ldt_len * sizeof(union descriptor));
232 pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0;
244 /* drop per-process resources */
245 pmap_dispose_proc(p);
247 /* and clean-out the vmspace */
248 vmspace_free(p->p_vmspace);
252 * Dump the machine specific header information at the start of a core dump.
255 cpu_coredump(p, vp, cred)
263 tempuser = malloc(ctob(UPAGES), M_TEMP, M_WAITOK);
267 bzero(tempuser, ctob(UPAGES));
268 bcopy(p->p_addr, tempuser, sizeof(struct user));
269 bcopy(p->p_md.md_regs,
270 tempuser + ((caddr_t) p->p_md.md_regs - (caddr_t) p->p_addr),
271 sizeof(struct trapframe));
273 error = vn_rdwr(UIO_WRITE, vp, (caddr_t) tempuser,
275 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT,
276 cred, (int *)NULL, p);
278 free(tempuser, M_TEMP);
285 setredzone(pte, vaddr)
289 /* eventually do this by setting up an expand-down stack segment
290 for ss0: selector, allowing stack access down to top of u.
291 this means though that protection violations need to be handled
292 thru a double fault exception that must do an integral task
293 switch to a known good context, within which a dump can be
294 taken. a sensible scheme might be to save the initial context
295 used by sched (that has physical memory mapped 1:1 at bottom)
296 and take the dump while still in mapped mode */
301 * Convert kernel VA to physical address
308 va = pmap_kextract((vm_offset_t)addr);
310 panic("kvtop: zero page frame");
315 * Map an IO request into kernel virtual address space.
317 * All requests are (re)mapped into kernel VA space.
318 * Notice that we use b_bufsize for the size of the buffer
319 * to be mapped. b_bcount might be modified by the driver.
323 register struct buf *bp;
325 register caddr_t addr, v, kva;
328 if ((bp->b_flags & B_PHYS) == 0)
331 for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
332 addr < bp->b_data + bp->b_bufsize;
333 addr += PAGE_SIZE, v += PAGE_SIZE) {
335 * Do the vm_fault if needed; do the copy-on-write thing
336 * when reading stuff off device into memory.
339 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
340 pa = trunc_page(pmap_kextract((vm_offset_t) addr));
342 panic("vmapbuf: page not present");
343 vm_page_hold(PHYS_TO_VM_PAGE(pa));
344 pmap_kenter((vm_offset_t) v, pa);
347 kva = bp->b_saveaddr;
348 bp->b_saveaddr = bp->b_data;
349 bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
353 * Free the io map PTEs associated with this IO operation.
354 * We also invalidate the TLB entries and restore the original b_addr.
358 register struct buf *bp;
360 register caddr_t addr;
363 if ((bp->b_flags & B_PHYS) == 0)
366 for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
367 addr < bp->b_data + bp->b_bufsize;
369 pa = trunc_page(pmap_kextract((vm_offset_t) addr));
370 pmap_kremove((vm_offset_t) addr);
371 vm_page_unhold(PHYS_TO_VM_PAGE(pa));
374 bp->b_data = bp->b_saveaddr;
378 * Force reset the processor by invalidating the entire address space!
387 cpu_reset_proxy_active = 1;
388 while (cpu_reset_proxy_active == 1)
389 ; /* Wait for other cpu to disable interupts */
390 saved_mp_lock = mp_lock;
392 printf("cpu_reset_proxy: Grabbed mp lock for BSP\n");
393 cpu_reset_proxy_active = 3;
394 while (cpu_reset_proxy_active == 3)
395 ; /* Wait for other cpu to enable interrupts */
396 stop_cpus((1<<cpu_reset_proxyid));
397 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
407 if (smp_active == 0) {
414 printf("cpu_reset called on cpu#%d\n",cpuid);
416 map = other_cpus & ~ stopped_cpus;
419 printf("cpu_reset: Stopping other CPUs\n");
420 stop_cpus(map); /* Stop all other CPUs */
428 /* We are not BSP (CPU #0) */
430 cpu_reset_proxyid = cpuid;
431 cpustop_restartfunc = cpu_reset_proxy;
432 printf("cpu_reset: Restarting BSP\n");
433 started_cpus = (1<<0); /* Restart CPU #0 */
436 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
437 cnt++; /* Wait for BSP to announce restart */
438 if (cpu_reset_proxy_active == 0)
439 printf("cpu_reset: Failed to restart BSP\n");
440 __asm __volatile("cli" : : : "memory");
441 cpu_reset_proxy_active = 2;
443 while (cpu_reset_proxy_active == 2 && cnt < 10000000)
444 cnt++; /* Do nothing */
445 if (cpu_reset_proxy_active == 2) {
446 printf("cpu_reset: BSP did not grab mp lock\n");
447 cpu_reset_real(); /* XXX: Bogus ? */
449 cpu_reset_proxy_active = 4;
450 __asm __volatile("sti" : : : "memory");
466 * Attempt to do a CPU reset via CPU reset port.
469 if ((inb(0x35) & 0xa0) != 0xa0) {
470 outb(0x37, 0x0f); /* SHUT0 = 0. */
471 outb(0x37, 0x0b); /* SHUT1 = 0. */
473 outb(0xf0, 0x00); /* Reset. */
476 * Attempt to do a CPU reset via the keyboard controller,
477 * do not turn of the GateA20, as any machine that fails
478 * to do the reset here would then end up in no man's land.
481 #if !defined(BROKEN_KEYBOARD_RESET)
482 outb(IO_KBD + 4, 0xFE);
483 DELAY(500000); /* wait 0.5 sec to see if that did it */
484 printf("Keyboard reset did not work, attempting CPU shutdown\n");
485 DELAY(1000000); /* wait 1 sec for printf to complete */
488 /* force a shutdown by unmapping entire address space ! */
489 bzero((caddr_t) PTD, PAGE_SIZE);
491 /* "good night, sweet prince .... <THUNK!>" */
504 rv = vm_map_growstack (p, sp);
505 if (rv != KERN_SUCCESS)
511 SYSCTL_DECL(_vm_stats_misc);
513 static int cnt_prezero;
515 SYSCTL_INT(_vm_stats_misc, OID_AUTO,
516 cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
519 * Implement the pre-zeroed page mechanism.
520 * This routine is called from the idle loop.
523 #define ZIDLE_LO(v) ((v) * 2 / 3)
524 #define ZIDLE_HI(v) ((v) * 4 / 5)
529 static int free_rover;
530 static int zero_state;
535 * Attempt to maintain approximately 1/2 of our free pages in a
536 * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid
537 * generally zeroing a page when the system is near steady-state.
538 * Otherwise we might get 'flutter' during disk I/O / IPC or
539 * fast sleeps. We also do not want to be continuously zeroing
540 * pages because doing so may flush our L1 and L2 caches too much.
543 if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
545 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
552 __asm __volatile("sti" : : : "memory");
554 m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
555 if (m != NULL && (m->flags & PG_ZERO) == 0) {
556 vm_page_queues[m->queue].lcnt--;
557 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
563 pmap_zero_page(VM_PAGE_TO_PHYS(m));
568 vm_page_flag_set(m, PG_ZERO);
569 m->queue = PQ_FREE + m->pc;
570 vm_page_queues[m->queue].lcnt++;
571 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m,
573 ++vm_page_zero_count;
575 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
578 free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
580 __asm __volatile("cli" : : : "memory");
592 * Software interrupt handler for queued VM system processing.
597 if (busdma_swi_pending != 0)
602 * Tell whether this address is in some physical memory region.
603 * Currently used by the kernel coredump code in order to avoid
604 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
605 * or other unpredictable behaviour.
611 is_physical_memory(addr)
616 /* The ISA ``memory hole''. */
617 if (addr >= 0xa0000 && addr < 0x100000)
622 * stuff other tests for known memory-mapped devices (PCI?)