2 * Copyright (c) 2009 Alex Keda <admin@lissyara.su>
3 * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_x86bios.h"
33 #include <sys/param.h>
35 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/sysctl.h>
42 #include <contrib/x86emu/x86emu.h>
43 #include <contrib/x86emu/x86emu_regs.h>
44 #include <compat/x86bios/x86bios.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
53 #define X86BIOS_NATIVE_ARCH
56 #define X86BIOS_NATIVE_VM86
59 #define X86BIOS_MEM_SIZE 0x00100000 /* 1M */
61 static struct mtx x86bios_lock;
63 SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL, "x86bios debugging");
64 static int x86bios_trace_call;
65 TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call);
66 SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0,
67 "Trace far function calls");
68 static int x86bios_trace_int;
69 TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int);
70 SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0,
71 "Trace software interrupt handlers");
73 #ifdef X86BIOS_NATIVE_VM86
75 #include <machine/vm86.h>
76 #include <machine/vmparam.h>
77 #include <machine/pc/bios.h>
79 struct vm86context x86bios_vmc;
82 x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf)
85 vmf->vmf_ds = regs->R_DS;
86 vmf->vmf_es = regs->R_ES;
87 vmf->vmf_ss = regs->R_SS;
88 vmf->vmf_flags = regs->R_FLG;
89 vmf->vmf_ax = regs->R_AX;
90 vmf->vmf_bx = regs->R_BX;
91 vmf->vmf_cx = regs->R_CX;
92 vmf->vmf_dx = regs->R_DX;
93 vmf->vmf_sp = regs->R_SP;
94 vmf->vmf_bp = regs->R_BP;
95 vmf->vmf_si = regs->R_SI;
96 vmf->vmf_di = regs->R_DI;
100 x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs)
103 regs->R_DS = vmf->vmf_ds;
104 regs->R_ES = vmf->vmf_es;
105 regs->R_SS = vmf->vmf_ss;
106 regs->R_FLG = vmf->vmf_flags;
107 regs->R_AX = vmf->vmf_ax;
108 regs->R_BX = vmf->vmf_bx;
109 regs->R_CX = vmf->vmf_cx;
110 regs->R_DX = vmf->vmf_dx;
111 regs->R_SP = vmf->vmf_sp;
112 regs->R_BP = vmf->vmf_bp;
113 regs->R_SI = vmf->vmf_si;
114 regs->R_DI = vmf->vmf_di;
118 x86bios_alloc(uint32_t *offset, size_t size, int flags)
123 addr = (vm_offset_t)contigmalloc(size, M_DEVBUF, flags, 0,
124 X86BIOS_MEM_SIZE, PAGE_SIZE, 0);
126 *offset = vtophys(addr);
127 mtx_lock(&x86bios_lock);
128 for (i = 0; i < howmany(size, PAGE_SIZE); i++)
129 vm86_addpage(&x86bios_vmc, atop(*offset),
130 addr + i * PAGE_SIZE);
131 mtx_unlock(&x86bios_lock);
134 return ((void *)addr);
138 x86bios_free(void *addr, size_t size)
142 mtx_lock(&x86bios_lock);
143 for (i = 0, last = -1; i < x86bios_vmc.npages; i++)
144 if (x86bios_vmc.pmap[i].kva >= (vm_offset_t)addr &&
145 x86bios_vmc.pmap[i].kva < (vm_offset_t)addr + size) {
146 bzero(&x86bios_vmc.pmap[i],
147 sizeof(x86bios_vmc.pmap[i]));
150 if (last == x86bios_vmc.npages - 1) {
151 x86bios_vmc.npages -= howmany(size, PAGE_SIZE);
152 for (i = x86bios_vmc.npages - 1;
153 i >= 0 && x86bios_vmc.pmap[i].kva == 0; i--)
154 x86bios_vmc.npages--;
156 mtx_unlock(&x86bios_lock);
157 contigfree(addr, size, M_DEVBUF);
161 x86bios_init_regs(struct x86regs *regs)
164 bzero(regs, sizeof(*regs));
168 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
170 struct vm86frame vmf;
172 if (x86bios_trace_call)
173 printf("Calling 0x%05x (ax=0x%04x bx=0x%04x "
174 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
175 (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX,
176 regs->R_DX, regs->R_ES, regs->R_DI);
178 bzero(&vmf, sizeof(vmf));
179 x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
182 mtx_lock(&x86bios_lock);
183 vm86_datacall(-1, &vmf, &x86bios_vmc);
184 mtx_unlock(&x86bios_lock);
185 x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
187 if (x86bios_trace_call)
188 printf("Exiting 0x%05x (ax=0x%04x bx=0x%04x "
189 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
190 (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX,
191 regs->R_DX, regs->R_ES, regs->R_DI);
195 x86bios_get_intr(int intno)
198 return (readl(x86bios_offset(intno * 4)));
202 x86bios_intr(struct x86regs *regs, int intno)
204 struct vm86frame vmf;
206 if (x86bios_trace_int)
207 printf("Calling int 0x%x (ax=0x%04x bx=0x%04x "
208 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
209 intno, regs->R_AX, regs->R_BX, regs->R_CX,
210 regs->R_DX, regs->R_ES, regs->R_DI);
212 bzero(&vmf, sizeof(vmf));
213 x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
214 mtx_lock(&x86bios_lock);
215 vm86_datacall(intno, &vmf, &x86bios_vmc);
216 mtx_unlock(&x86bios_lock);
217 x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
219 if (x86bios_trace_int)
220 printf("Exiting int 0x%x (ax=0x%04x bx=0x%04x "
221 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
222 intno, regs->R_AX, regs->R_BX, regs->R_CX,
223 regs->R_DX, regs->R_ES, regs->R_DI);
227 x86bios_offset(uint32_t offset)
231 addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset),
232 X86BIOS_PHYSTOOFF(offset));
234 addr = BIOS_PADDRTOVADDR(offset);
236 return ((void *)addr);
243 mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
244 bzero(&x86bios_vmc, sizeof(x86bios_vmc));
253 mtx_destroy(&x86bios_lock);
260 #include <machine/iodev.h>
262 #define X86BIOS_PAGE_SIZE 0x00001000 /* 4K */
264 #define X86BIOS_IVT_SIZE 0x00000500 /* 1K + 256 (BDA) */
266 #define X86BIOS_IVT_BASE 0x00000000
267 #define X86BIOS_RAM_BASE 0x00001000
268 #define X86BIOS_ROM_BASE 0x000a0000
270 #define X86BIOS_ROM_SIZE (X86BIOS_MEM_SIZE - (uint32_t)x86bios_rom_phys)
271 #define X86BIOS_SEG_SIZE X86BIOS_PAGE_SIZE
273 #define X86BIOS_PAGES (X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE)
275 #define X86BIOS_R_SS _pad2
276 #define X86BIOS_R_SP _pad3.I16_reg.x_reg
278 static struct x86emu x86bios_emu;
280 static void *x86bios_ivt;
281 static void *x86bios_rom;
282 static void *x86bios_seg;
284 static vm_offset_t *x86bios_map;
286 static vm_paddr_t x86bios_rom_phys;
287 static vm_paddr_t x86bios_seg_phys;
289 static int x86bios_fault;
290 static uint32_t x86bios_fault_addr;
291 static uint16_t x86bios_fault_cs;
292 static uint16_t x86bios_fault_ip;
295 x86bios_set_fault(struct x86emu *emu, uint32_t addr)
299 x86bios_fault_addr = addr;
300 x86bios_fault_cs = emu->x86.R_CS;
301 x86bios_fault_ip = emu->x86.R_IP;
302 x86emu_halt_sys(emu);
306 x86bios_get_pages(uint32_t offset, size_t size)
310 if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
313 if (offset >= X86BIOS_MEM_SIZE)
314 offset -= X86BIOS_MEM_SIZE;
315 addr = x86bios_map[offset / X86BIOS_PAGE_SIZE];
317 addr += offset % X86BIOS_PAGE_SIZE;
319 return ((void *)addr);
323 x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size)
327 for (i = pa / X86BIOS_PAGE_SIZE, j = 0;
328 j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++)
329 x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE;
333 x86bios_emu_rdb(struct x86emu *emu, uint32_t addr)
337 va = x86bios_get_pages(addr, sizeof(*va));
339 x86bios_set_fault(emu, addr);
345 x86bios_emu_rdw(struct x86emu *emu, uint32_t addr)
349 va = x86bios_get_pages(addr, sizeof(*va));
351 x86bios_set_fault(emu, addr);
353 #ifndef __NO_STRICT_ALIGNMENT
355 return (le16dec(va));
358 return (le16toh(*va));
362 x86bios_emu_rdl(struct x86emu *emu, uint32_t addr)
366 va = x86bios_get_pages(addr, sizeof(*va));
368 x86bios_set_fault(emu, addr);
370 #ifndef __NO_STRICT_ALIGNMENT
372 return (le32dec(va));
375 return (le32toh(*va));
379 x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val)
383 va = x86bios_get_pages(addr, sizeof(*va));
385 x86bios_set_fault(emu, addr);
391 x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val)
395 va = x86bios_get_pages(addr, sizeof(*va));
397 x86bios_set_fault(emu, addr);
399 #ifndef __NO_STRICT_ALIGNMENT
408 x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val)
412 va = x86bios_get_pages(addr, sizeof(*va));
414 x86bios_set_fault(emu, addr);
416 #ifndef __NO_STRICT_ALIGNMENT
425 x86bios_emu_inb(struct x86emu *emu, uint16_t port)
428 if (port == 0xb2) /* APM scratch register */
430 if (port >= 0x80 && port < 0x88) /* POST status register */
433 return (iodev_read_1(port));
437 x86bios_emu_inw(struct x86emu *emu, uint16_t port)
441 if (port >= 0x80 && port < 0x88) /* POST status register */
444 #ifndef X86BIOS_NATIVE_ARCH
445 if ((port & 1) != 0) {
446 val = iodev_read_1(port);
447 val |= iodev_read_1(port + 1) << 8;
450 val = iodev_read_2(port);
456 x86bios_emu_inl(struct x86emu *emu, uint16_t port)
460 if (port >= 0x80 && port < 0x88) /* POST status register */
463 #ifndef X86BIOS_NATIVE_ARCH
464 if ((port & 1) != 0) {
465 val = iodev_read_1(port);
466 val |= iodev_read_2(port + 1) << 8;
467 val |= iodev_read_1(port + 3) << 24;
468 } else if ((port & 2) != 0) {
469 val = iodev_read_2(port);
470 val |= iodev_read_2(port + 2) << 16;
473 val = iodev_read_4(port);
479 x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val)
482 if (port == 0xb2) /* APM scratch register */
484 if (port >= 0x80 && port < 0x88) /* POST status register */
487 iodev_write_1(port, val);
491 x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val)
494 if (port >= 0x80 && port < 0x88) /* POST status register */
497 #ifndef X86BIOS_NATIVE_ARCH
498 if ((port & 1) != 0) {
499 iodev_write_1(port, val);
500 iodev_write_1(port + 1, val >> 8);
503 iodev_write_2(port, val);
507 x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val)
510 if (port >= 0x80 && port < 0x88) /* POST status register */
513 #ifndef X86BIOS_NATIVE_ARCH
514 if ((port & 1) != 0) {
515 iodev_write_1(port, val);
516 iodev_write_2(port + 1, val >> 8);
517 iodev_write_1(port + 3, val >> 24);
518 } else if ((port & 2) != 0) {
519 iodev_write_2(port, val);
520 iodev_write_2(port + 2, val >> 16);
523 iodev_write_4(port, val);
527 x86bios_emu_get_intr(struct x86emu *emu, int intno)
534 sp = (uint16_t *)((vm_offset_t)x86bios_seg + emu->x86.R_SP);
535 sp[0] = htole16(emu->x86.R_IP);
536 sp[1] = htole16(emu->x86.R_CS);
537 sp[2] = htole16(emu->x86.R_FLG);
539 iv = x86bios_get_intr(intno);
540 emu->x86.R_IP = iv & 0xffff;
541 emu->x86.R_CS = (iv >> 16) & 0xffff;
542 emu->x86.R_FLG &= ~(F_IF | F_TF);
546 x86bios_alloc(uint32_t *offset, size_t size, int flags)
550 if (offset == NULL || size == 0)
553 vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE,
554 x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
556 *offset = vtophys(vaddr);
557 x86bios_set_pages((vm_offset_t)vaddr, *offset, size);
564 x86bios_free(void *addr, size_t size)
568 if (addr == NULL || size == 0)
571 paddr = vtophys(addr);
572 if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys ||
573 paddr % X86BIOS_PAGE_SIZE != 0)
576 bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE,
577 sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE));
578 contigfree(addr, size, M_DEVBUF);
582 x86bios_init_regs(struct x86regs *regs)
585 bzero(regs, sizeof(*regs));
586 regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys);
587 regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2;
591 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
594 if (x86bios_map == NULL)
597 if (x86bios_trace_call)
598 printf("Calling 0x%05x (ax=0x%04x bx=0x%04x "
599 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
600 (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX,
601 regs->R_DX, regs->R_ES, regs->R_DI);
603 mtx_lock_spin(&x86bios_lock);
604 memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
606 x86emu_exec_call(&x86bios_emu, seg, off);
607 memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
608 mtx_unlock_spin(&x86bios_lock);
610 if (x86bios_trace_call) {
611 printf("Exiting 0x%05x (ax=0x%04x bx=0x%04x "
612 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
613 (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX,
614 regs->R_DX, regs->R_ES, regs->R_DI);
616 printf("Page fault at 0x%05x from 0x%04x:0x%04x.\n",
617 x86bios_fault_addr, x86bios_fault_cs,
623 x86bios_get_intr(int intno)
627 iv = (uint32_t *)((vm_offset_t)x86bios_ivt + intno * 4);
629 return (le32toh(*iv));
633 x86bios_intr(struct x86regs *regs, int intno)
636 if (intno < 0 || intno > 255)
639 if (x86bios_map == NULL)
642 if (x86bios_trace_int)
643 printf("Calling int 0x%x (ax=0x%04x bx=0x%04x "
644 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
645 intno, regs->R_AX, regs->R_BX, regs->R_CX,
646 regs->R_DX, regs->R_ES, regs->R_DI);
648 mtx_lock_spin(&x86bios_lock);
649 memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
651 x86emu_exec_intr(&x86bios_emu, intno);
652 memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
653 mtx_unlock_spin(&x86bios_lock);
655 if (x86bios_trace_int) {
656 printf("Exiting int 0x%x (ax=0x%04x bx=0x%04x "
657 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",
658 intno, regs->R_AX, regs->R_BX, regs->R_CX,
659 regs->R_DX, regs->R_ES, regs->R_DI);
661 printf("Page fault at 0x%05x from 0x%04x:0x%04x.\n",
662 x86bios_fault_addr, x86bios_fault_cs,
668 x86bios_offset(uint32_t offset)
671 return (x86bios_get_pages(offset, 1));
675 x86bios_unmap_mem(void)
678 if (x86bios_ivt != NULL)
679 #ifdef X86BIOS_NATIVE_ARCH
680 pmap_unmapdev((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE);
682 free(x86bios_ivt, M_DEVBUF);
684 if (x86bios_rom != NULL)
685 pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE);
686 if (x86bios_seg != NULL)
687 contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF);
691 x86bios_map_mem(void)
694 #ifdef X86BIOS_NATIVE_ARCH
695 x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE);
697 /* Probe EBDA via BDA. */
698 x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e);
699 x86bios_rom_phys = x86bios_rom_phys << 4;
700 if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE &&
701 X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024)
703 rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE);
706 x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_ZERO | M_WAITOK);
709 x86bios_rom_phys = X86BIOS_ROM_BASE;
710 x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE);
711 if (x86bios_rom == NULL)
713 #ifdef X86BIOS_NATIVE_ARCH
714 /* Change attribute for EBDA. */
715 if (x86bios_rom_phys < X86BIOS_ROM_BASE &&
716 pmap_change_attr((vm_offset_t)x86bios_rom,
717 X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0)
721 x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK,
722 X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
723 x86bios_seg_phys = vtophys(x86bios_seg);
726 printf("x86bios: IVT 0x%06x-0x%06x at %p\n",
727 X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1,
729 printf("x86bios: SSEG 0x%06x-0x%06x at %p\n",
730 (uint32_t)x86bios_seg_phys,
731 X86BIOS_SEG_SIZE + (uint32_t)x86bios_seg_phys - 1,
733 if (x86bios_rom_phys < X86BIOS_ROM_BASE)
734 printf("x86bios: EBDA 0x%06x-0x%06x at %p\n",
735 (uint32_t)x86bios_rom_phys, X86BIOS_ROM_BASE - 1,
737 printf("x86bios: ROM 0x%06x-0x%06x at %p\n",
738 X86BIOS_ROM_BASE, X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
739 (void *)((vm_offset_t)x86bios_rom + X86BIOS_ROM_BASE -
740 (vm_offset_t)x86bios_rom_phys));
756 if (x86bios_map_mem() != 0)
759 mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_SPIN);
761 x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF,
763 x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE,
765 x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys,
767 x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys,
770 bzero(&x86bios_emu, sizeof(x86bios_emu));
772 x86bios_emu.emu_rdb = x86bios_emu_rdb;
773 x86bios_emu.emu_rdw = x86bios_emu_rdw;
774 x86bios_emu.emu_rdl = x86bios_emu_rdl;
775 x86bios_emu.emu_wrb = x86bios_emu_wrb;
776 x86bios_emu.emu_wrw = x86bios_emu_wrw;
777 x86bios_emu.emu_wrl = x86bios_emu_wrl;
779 x86bios_emu.emu_inb = x86bios_emu_inb;
780 x86bios_emu.emu_inw = x86bios_emu_inw;
781 x86bios_emu.emu_inl = x86bios_emu_inl;
782 x86bios_emu.emu_outb = x86bios_emu_outb;
783 x86bios_emu.emu_outw = x86bios_emu_outw;
784 x86bios_emu.emu_outl = x86bios_emu_outl;
786 for (i = 0; i < 256; i++)
787 x86bios_emu._x86emu_intrTab[i] = x86bios_emu_get_intr;
795 vm_offset_t *map = x86bios_map;
797 mtx_lock_spin(&x86bios_lock);
798 if (x86bios_map != NULL) {
799 free(x86bios_map, M_DEVBUF);
802 mtx_unlock_spin(&x86bios_lock);
807 mtx_destroy(&x86bios_lock);
815 x86bios_get_orm(uint32_t offset)
819 /* Does the shadow ROM contain BIOS POST code for x86? */
820 p = x86bios_offset(offset);
821 if (p == NULL || p[0] != 0x55 || p[1] != 0xaa || p[3] != 0xe9)
828 x86bios_match_device(uint32_t offset, device_t dev)
831 uint16_t device, vendor;
832 uint8_t class, progif, subclass;
834 /* Does the shadow ROM contain BIOS POST code for x86? */
835 p = x86bios_get_orm(offset);
839 /* Does it contain PCI data structure? */
840 p += le16toh(*(uint16_t *)(p + 0x18));
841 if (bcmp(p, "PCIR", 4) != 0 ||
842 le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0)
845 /* Does it match the vendor, device, and classcode? */
846 vendor = le16toh(*(uint16_t *)(p + 0x04));
847 device = le16toh(*(uint16_t *)(p + 0x06));
848 progif = *(p + 0x0d);
849 subclass = *(p + 0x0e);
851 if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) ||
852 class != pci_get_class(dev) || subclass != pci_get_subclass(dev) ||
853 progif != pci_get_progif(dev))
860 x86bios_modevent(module_t mod __unused, int type, void *data __unused)
865 return (x86bios_init());
867 return (x86bios_uninit());
873 static moduledata_t x86bios_mod = {
879 DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY);
880 MODULE_VERSION(x86bios, 1);