2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
48 #include "opt_reset.h"
52 #include <sys/param.h>
53 #include <sys/systm.h>
56 #include <sys/kernel.h>
59 #include <sys/malloc.h>
61 #include <sys/mutex.h>
62 #include <sys/pioctl.h>
64 #include <sys/sysent.h>
65 #include <sys/sf_buf.h>
67 #include <sys/sched.h>
68 #include <sys/sysctl.h>
69 #include <sys/unistd.h>
70 #include <sys/vnode.h>
71 #include <sys/vmmeter.h>
73 #include <machine/cpu.h>
74 #include <machine/cputypes.h>
75 #include <machine/md_var.h>
76 #include <machine/pcb.h>
77 #include <machine/pcb_ext.h>
78 #include <machine/smp.h>
79 #include <machine/vm86.h>
82 #include <machine/elan_mmcr.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_kern.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_param.h>
93 #include <xen/hypervisor.h>
96 #include <pc98/cbus/cbus.h>
98 #include <x86/isa/isa.h>
102 #include <machine/xbox.h>
106 #define NSFBUFS (512 + maxusers * 16)
109 _Static_assert(OFFSETOF_CURTHREAD == offsetof(struct pcpu, pc_curthread),
110 "OFFSETOF_CURTHREAD does not correspond with offset of pc_curthread.");
111 _Static_assert(OFFSETOF_CURPCB == offsetof(struct pcpu, pc_curpcb),
112 "OFFSETOF_CURPCB does not correspond with offset of pc_curpcb.");
114 static void cpu_reset_real(void);
116 static void cpu_reset_proxy(void);
117 static u_int cpu_reset_proxyid;
118 static volatile u_int cpu_reset_proxy_active;
120 static void sf_buf_init(void *arg);
121 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
123 LIST_HEAD(sf_head, sf_buf);
126 * A hash table of active sendfile(2) buffers
128 static struct sf_head *sf_buf_active;
129 static u_long sf_buf_hashmask;
131 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
133 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
134 static u_int sf_buf_alloc_want;
137 * A lock used to synchronize access to the hash table and free list
139 static struct mtx sf_buf_lock;
141 extern int _ucodesel, _udatasel;
144 * Finish a fork operation, with process p2 nearly set up.
145 * Copy and update the pcb, set up the stack so that the child
146 * ready to run and return to user mode.
149 cpu_fork(td1, p2, td2, flags)
150 register struct thread *td1;
151 register struct proc *p2;
155 register struct proc *p1;
160 if ((flags & RFPROC) == 0) {
161 if ((flags & RFMEM) == 0) {
162 /* unshare user LDT */
163 struct mdproc *mdp1 = &p1->p_md;
164 struct proc_ldt *pldt, *pldt1;
166 mtx_lock_spin(&dt_lock);
167 if ((pldt1 = mdp1->md_ldt) != NULL &&
168 pldt1->ldt_refcnt > 1) {
169 pldt = user_ldt_alloc(mdp1, pldt1->ldt_len);
171 panic("could not copy LDT");
174 user_ldt_deref(pldt1);
176 mtx_unlock_spin(&dt_lock);
181 /* Ensure that td1's pcb is up to date. */
182 if (td1 == curthread)
183 td1->td_pcb->pcb_gs = rgs();
186 if (PCPU_GET(fpcurthread) == td1)
187 npxsave(td1->td_pcb->pcb_save);
191 /* Point the pcb to the top of the stack */
192 pcb2 = (struct pcb *)(td2->td_kstack +
193 td2->td_kstack_pages * PAGE_SIZE) - 1;
197 bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
199 /* Properly initialize pcb_save */
200 pcb2->pcb_save = &pcb2->pcb_user_save;
202 /* Point mdproc and then copy over td1's contents */
204 bcopy(&p1->p_md, mdp2, sizeof(*mdp2));
207 * Create a new fresh stack for the new process.
208 * Copy the trap frame for the return to user mode as if from a
209 * syscall. This copies most of the user mode register values.
210 * The -16 is so we can expand the trapframe if we go to vm86.
212 td2->td_frame = (struct trapframe *)((caddr_t)td2->td_pcb - 16) - 1;
213 bcopy(td1->td_frame, td2->td_frame, sizeof(struct trapframe));
215 td2->td_frame->tf_eax = 0; /* Child returns zero */
216 td2->td_frame->tf_eflags &= ~PSL_C; /* success */
217 td2->td_frame->tf_edx = 1;
220 * If the parent process has the trap bit set (i.e. a debugger had
221 * single stepped the process to the system call), we need to clear
222 * the trap flag from the new frame unless the debugger had set PF_FORK
223 * on the parent. Otherwise, the child will receive a (likely
224 * unexpected) SIGTRAP when it executes the first instruction after
225 * returning to userland.
227 if ((p1->p_pfsflags & PF_FORK) == 0)
228 td2->td_frame->tf_eflags &= ~PSL_T;
231 * Set registers for trampoline to user mode. Leave space for the
232 * return address on stack. These are the kernel mode register values.
235 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdpt);
237 pcb2->pcb_cr3 = vtophys(vmspace_pmap(p2->p_vmspace)->pm_pdir);
240 pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */
242 pcb2->pcb_esp = (int)td2->td_frame - sizeof(void *);
243 pcb2->pcb_ebx = (int)td2; /* fork_trampoline argument */
244 pcb2->pcb_eip = (int)fork_trampoline;
245 pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */
247 * pcb2->pcb_dr*: cloned above.
248 * pcb2->pcb_savefpu: cloned above.
249 * pcb2->pcb_flags: cloned above.
250 * pcb2->pcb_onfault: cloned above (always NULL here?).
251 * pcb2->pcb_gs: cloned above.
252 * pcb2->pcb_ext: cleared below.
256 * XXX don't copy the i/o pages. this should probably be fixed.
260 /* Copy the LDT, if necessary. */
261 mtx_lock_spin(&dt_lock);
262 if (mdp2->md_ldt != NULL) {
264 mdp2->md_ldt->ldt_refcnt++;
266 mdp2->md_ldt = user_ldt_alloc(mdp2,
267 mdp2->md_ldt->ldt_len);
268 if (mdp2->md_ldt == NULL)
269 panic("could not copy LDT");
272 mtx_unlock_spin(&dt_lock);
274 /* Setup to release spin count in fork_exit(). */
275 td2->td_md.md_spinlock_count = 1;
277 * XXX XEN need to check on PSL_USER is handled
279 td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
281 * Now, cpu_switch() can schedule the new process.
282 * pcb_esp is loaded pointing to the cpu_switch() stack frame
283 * containing the return address when exiting cpu_switch.
284 * This will normally be to fork_trampoline(), which will have
285 * %ebx loaded with the new proc's pointer. fork_trampoline()
286 * will set up a stack to call fork_return(p, frame); to complete
287 * the return to user-mode.
292 * Intercept the return address from a freshly forked process that has NOT
293 * been scheduled yet.
295 * This is needed to make kernel threads stay in kernel mode.
298 cpu_set_fork_handler(td, func, arg)
300 void (*func)(void *);
304 * Note that the trap frame follows the args, so the function
305 * is really called like this: func(arg, frame);
307 td->td_pcb->pcb_esi = (int) func; /* function */
308 td->td_pcb->pcb_ebx = (int) arg; /* first arg */
312 cpu_exit(struct thread *td)
316 * If this process has a custom LDT, release it. Reset pc->pcb_gs
317 * and %gs before we free it in case they refer to an LDT entry.
319 mtx_lock_spin(&dt_lock);
320 if (td->td_proc->p_md.md_ldt) {
321 td->td_pcb->pcb_gs = _udatasel;
325 mtx_unlock_spin(&dt_lock);
329 cpu_thread_exit(struct thread *td)
334 if (td == PCPU_GET(fpcurthread))
339 /* Disable any hardware breakpoints. */
340 if (td->td_pcb->pcb_flags & PCB_DBREGS) {
342 td->td_pcb->pcb_flags &= ~PCB_DBREGS;
347 cpu_thread_clean(struct thread *td)
352 if (pcb->pcb_ext != NULL) {
353 /* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
355 * XXX do we need to move the TSS off the allocated pages
356 * before freeing them? (not done here)
358 kva_free((vm_offset_t)pcb->pcb_ext,
365 cpu_thread_swapin(struct thread *td)
370 cpu_thread_swapout(struct thread *td)
375 cpu_thread_alloc(struct thread *td)
378 td->td_pcb = (struct pcb *)(td->td_kstack +
379 td->td_kstack_pages * PAGE_SIZE) - 1;
380 td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb - 16) - 1;
381 td->td_pcb->pcb_ext = NULL;
382 td->td_pcb->pcb_save = &td->td_pcb->pcb_user_save;
386 cpu_thread_free(struct thread *td)
389 cpu_thread_clean(td);
393 cpu_set_syscall_retval(struct thread *td, int error)
398 td->td_frame->tf_eax = td->td_retval[0];
399 td->td_frame->tf_edx = td->td_retval[1];
400 td->td_frame->tf_eflags &= ~PSL_C;
405 * Reconstruct pc, assuming lcall $X,y is 7 bytes, int
406 * 0x80 is 2 bytes. We saved this in tf_err.
408 td->td_frame->tf_eip -= td->td_frame->tf_err;
415 if (td->td_proc->p_sysent->sv_errsize) {
416 if (error >= td->td_proc->p_sysent->sv_errsize)
417 error = -1; /* XXX */
419 error = td->td_proc->p_sysent->sv_errtbl[error];
421 td->td_frame->tf_eax = error;
422 td->td_frame->tf_eflags |= PSL_C;
428 * Initialize machine state (pcb and trap frame) for a new thread about to
429 * upcall. Put enough state in the new thread's PCB to get it to go back
430 * userret(), where we can intercept it again to set the return (upcall)
431 * Address and stack, along with those from upcals that are from other sources
432 * such as those generated in thread_userret() itself.
435 cpu_set_upcall(struct thread *td, struct thread *td0)
439 /* Point the pcb to the top of the stack. */
443 * Copy the upcall pcb. This loads kernel regs.
444 * Those not loaded individually below get their default
447 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
448 pcb2->pcb_flags &= ~(PCB_NPXINITDONE | PCB_NPXUSERINITDONE);
449 pcb2->pcb_save = &pcb2->pcb_user_save;
452 * Create a new fresh stack for the new thread.
454 bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
456 /* If the current thread has the trap bit set (i.e. a debugger had
457 * single stepped the process to the system call), we need to clear
458 * the trap flag from the new frame. Otherwise, the new thread will
459 * receive a (likely unexpected) SIGTRAP when it executes the first
460 * instruction after returning to userland.
462 td->td_frame->tf_eflags &= ~PSL_T;
465 * Set registers for trampoline to user mode. Leave space for the
466 * return address on stack. These are the kernel mode register values.
469 pcb2->pcb_esi = (int)fork_return; /* trampoline arg */
471 pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */
472 pcb2->pcb_ebx = (int)td; /* trampoline arg */
473 pcb2->pcb_eip = (int)fork_trampoline;
474 pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */
475 pcb2->pcb_gs = rgs();
477 * If we didn't copy the pcb, we'd need to do the following registers:
478 * pcb2->pcb_cr3: cloned above.
479 * pcb2->pcb_dr*: cloned above.
480 * pcb2->pcb_savefpu: cloned above.
481 * pcb2->pcb_flags: cloned above.
482 * pcb2->pcb_onfault: cloned above (always NULL here?).
483 * pcb2->pcb_gs: cloned above.
484 * pcb2->pcb_ext: cleared below.
486 pcb2->pcb_ext = NULL;
488 /* Setup to release spin count in fork_exit(). */
489 td->td_md.md_spinlock_count = 1;
490 td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
494 * Set that machine state for performing an upcall that has to
495 * be done in thread_userret() so that those upcalls generated
496 * in thread_userret() itself can be done as well.
499 cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
504 * Do any extra cleaning that needs to be done.
505 * The thread may have optional components
506 * that are not present in a fresh thread.
507 * This may be a recycled thread so make it look
508 * as though it's newly allocated.
510 cpu_thread_clean(td);
513 * Set the trap frame to point at the beginning of the uts
516 td->td_frame->tf_ebp = 0;
517 td->td_frame->tf_esp =
518 (((int)stack->ss_sp + stack->ss_size - 4) & ~0x0f) - 4;
519 td->td_frame->tf_eip = (int)entry;
522 * Pass the address of the mailbox for this kse to the uts
523 * function as a parameter on the stack.
525 suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
530 cpu_set_user_tls(struct thread *td, void *tls_base)
532 struct segment_descriptor sd;
536 * Construct a descriptor and store it in the pcb for
537 * the next context switch. Also store it in the gdt
538 * so that the load of tf_fs into %fs will activate it
539 * at return to userland.
541 base = (uint32_t)tls_base;
542 sd.sd_lobase = base & 0xffffff;
543 sd.sd_hibase = (base >> 24) & 0xff;
544 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
546 sd.sd_type = SDT_MEMRWA;
554 td->td_pcb->pcb_gsd = sd;
555 if (td == curthread) {
556 PCPU_GET(fsgs_gdt)[1] = sd;
557 load_gs(GSEL(GUGS_SEL, SEL_UPL));
564 * Convert kernel VA to physical address
571 pa = pmap_kextract((vm_offset_t)addr);
573 panic("kvtop: zero page frame");
583 cpu_reset_proxy_active = 1;
584 while (cpu_reset_proxy_active == 1)
585 ; /* Wait for other cpu to see that we've started */
586 CPU_SETOF(cpu_reset_proxyid, &tcrp);
588 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
598 if (arch_i386_is_xbox) {
599 /* Kick the PIC16L, it can reboot the box */
611 CPU_CLR(PCPU_GET(cpuid), &map);
612 CPU_NAND(&map, &stopped_cpus);
613 if (!CPU_EMPTY(&map)) {
614 printf("cpu_reset: Stopping other CPUs\n");
618 if (PCPU_GET(cpuid) != 0) {
619 cpu_reset_proxyid = PCPU_GET(cpuid);
620 cpustop_restartfunc = cpu_reset_proxy;
621 cpu_reset_proxy_active = 0;
622 printf("cpu_reset: Restarting BSP\n");
624 /* Restart CPU #0. */
625 /* XXX: restart_cpus(1 << 0); */
626 CPU_SETOF(0, &started_cpus);
630 while (cpu_reset_proxy_active == 0 && cnt < 10000000)
631 cnt++; /* Wait for BSP to announce restart */
632 if (cpu_reset_proxy_active == 0)
633 printf("cpu_reset: Failed to restart BSP\n");
635 cpu_reset_proxy_active = 2;
651 struct region_descriptor null_idt;
658 if (smp_processor_id() == 0)
659 HYPERVISOR_shutdown(SHUTDOWN_reboot);
661 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
664 if (elan_mmcr != NULL)
665 elan_mmcr->RESCFG = 1;
668 if (cpu == CPU_GEODE1100) {
669 /* Attempt Geode's own reset */
670 outl(0xcf8, 0x80009044ul);
676 * Attempt to do a CPU reset via CPU reset port.
678 if ((inb(0x35) & 0xa0) != 0xa0) {
679 outb(0x37, 0x0f); /* SHUT0 = 0. */
680 outb(0x37, 0x0b); /* SHUT1 = 0. */
682 outb(0xf0, 0x00); /* Reset. */
684 #if !defined(BROKEN_KEYBOARD_RESET)
686 * Attempt to do a CPU reset via the keyboard controller,
687 * do not turn off GateA20, as any machine that fails
688 * to do the reset here would then end up in no man's land.
690 outb(IO_KBD + 4, 0xFE);
691 DELAY(500000); /* wait 0.5 sec to see if that did it */
695 * Attempt to force a reset via the Reset Control register at
696 * I/O port 0xcf9. Bit 2 forces a system reset when it
697 * transitions from 0 to 1. Bit 1 selects the type of reset
698 * to attempt: 0 selects a "soft" reset, and 1 selects a
699 * "hard" reset. We try a "hard" reset. The first write sets
700 * bit 1 to select a "hard" reset and clears bit 2. The
701 * second write forces a 0 -> 1 transition in bit 2 to trigger
706 DELAY(500000); /* wait 0.5 sec to see if that did it */
709 * Attempt to force a reset via the Fast A20 and Init register
710 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
711 * Bit 0 asserts INIT# when set to 1. We are careful to only
712 * preserve bit 1 while setting bit 0. We also must clear bit
713 * 0 before setting it if it isn't already clear.
718 outb(0x92, b & 0xfe);
720 DELAY(500000); /* wait 0.5 sec to see if that did it */
724 printf("No known reset method worked, attempting CPU shutdown\n");
725 DELAY(1000000); /* wait 1 sec for printf to complete */
728 null_idt.rd_limit = 0;
729 null_idt.rd_base = 0;
732 /* "good night, sweet prince .... <THUNK!>" */
740 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
743 sf_buf_init(void *arg)
745 struct sf_buf *sf_bufs;
750 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
752 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
753 TAILQ_INIT(&sf_buf_freelist);
754 sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
755 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
757 for (i = 0; i < nsfbufs; i++) {
758 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
759 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
761 sf_buf_alloc_want = 0;
762 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
766 * Invalidate the cache lines that may belong to the page, if
767 * (possibly old) mapping of the page by sf buffer exists. Returns
768 * TRUE when mapping was found and cache invalidated.
771 sf_buf_invalidate_cache(vm_page_t m)
773 struct sf_head *hash_list;
777 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
779 mtx_lock(&sf_buf_lock);
780 LIST_FOREACH(sf, hash_list, list_entry) {
783 * Use pmap_qenter to update the pte for
784 * existing mapping, in particular, the PAT
785 * settings are recalculated.
787 pmap_qenter(sf->kva, &m, 1);
788 pmap_invalidate_cache_range(sf->kva, sf->kva +
794 mtx_unlock(&sf_buf_lock);
799 * Get an sf_buf from the freelist. May block if none are available.
802 sf_buf_alloc(struct vm_page *m, int flags)
804 pt_entry_t opte, *ptep;
805 struct sf_head *hash_list;
813 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0,
814 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned"));
815 hash_list = &sf_buf_active[SF_BUF_HASH(m)];
816 mtx_lock(&sf_buf_lock);
817 LIST_FOREACH(sf, hash_list, list_entry) {
820 if (sf->ref_count == 1) {
821 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
823 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
832 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
833 if (flags & SFB_NOWAIT)
836 SFSTAT_INC(sf_allocwait);
837 error = msleep(&sf_buf_freelist, &sf_buf_lock,
838 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
842 * If we got a signal, don't risk going back to sleep.
847 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
849 LIST_REMOVE(sf, list_entry);
850 LIST_INSERT_HEAD(hash_list, sf, list_entry);
854 nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
857 * Update the sf_buf's virtual-to-physical mapping, flushing the
858 * virtual address from the TLB. Since the reference count for
859 * the sf_buf's old mapping was zero, that mapping is not
860 * currently in use. Consequently, there is no need to exchange
861 * the old and new PTEs atomically, even under PAE.
863 ptep = vtopte(sf->kva);
866 PT_SET_MA(sf->kva, xpmap_ptom(VM_PAGE_TO_PHYS(m)) | pgeflag
867 | PG_RW | PG_V | pmap_cache_bits(m->md.pat_mode, 0));
869 *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V |
870 pmap_cache_bits(m->md.pat_mode, 0);
874 * Avoid unnecessary TLB invalidations: If the sf_buf's old
875 * virtual-to-physical mapping was not used, then any processor
876 * that has invalidated the sf_buf's virtual address from its TLB
877 * since the last used mapping need not invalidate again.
880 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
881 CPU_ZERO(&sf->cpumask);
884 cpuid = PCPU_GET(cpuid);
885 if (!CPU_ISSET(cpuid, &sf->cpumask)) {
886 CPU_SET(cpuid, &sf->cpumask);
889 if ((flags & SFB_CPUPRIVATE) == 0) {
890 other_cpus = all_cpus;
891 CPU_CLR(cpuid, &other_cpus);
892 CPU_NAND(&other_cpus, &sf->cpumask);
893 if (!CPU_EMPTY(&other_cpus)) {
894 CPU_OR(&sf->cpumask, &other_cpus);
895 smp_masked_invlpg(other_cpus, sf->kva);
900 if ((opte & (PG_V | PG_A)) == (PG_V | PG_A))
901 pmap_invalidate_page(kernel_pmap, sf->kva);
904 mtx_unlock(&sf_buf_lock);
909 * Remove a reference from the given sf_buf, adding it to the free
910 * list when its reference count reaches zero. A freed sf_buf still,
911 * however, retains its virtual-to-physical mapping until it is
912 * recycled or reactivated by sf_buf_alloc(9).
915 sf_buf_free(struct sf_buf *sf)
918 mtx_lock(&sf_buf_lock);
920 if (sf->ref_count == 0) {
921 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
925 * Xen doesn't like having dangling R/W mappings
927 pmap_qremove(sf->kva, 1);
929 LIST_REMOVE(sf, list_entry);
931 if (sf_buf_alloc_want > 0)
932 wakeup(&sf_buf_freelist);
934 mtx_unlock(&sf_buf_lock);
938 * Software interrupt handler for queued VM system processing.
943 if (busdma_swi_pending != 0)
948 * Tell whether this address is in some physical memory region.
949 * Currently used by the kernel coredump code in order to avoid
950 * dumping the ``ISA memory hole'' which could cause indefinite hangs,
951 * or other unpredictable behaviour.
955 is_physical_memory(vm_paddr_t addr)
959 /* The ISA ``memory hole''. */
960 if (addr >= 0xa0000 && addr < 0x100000)
965 * stuff other tests for known memory-mapped devices (PCI?)