2 * Copyright (c) 2009 Alex Keda <admin@lissyara.su>
3 * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_x86bios.h"
33 #include <sys/param.h>
35 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/sysctl.h>
42 #include <contrib/x86emu/x86emu.h>
43 #include <contrib/x86emu/x86emu_regs.h>
44 #include <compat/x86bios/x86bios.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
53 #define X86BIOS_NATIVE_ARCH
56 #define X86BIOS_NATIVE_VM86
59 #define X86BIOS_MEM_SIZE 0x00100000 /* 1M */
61 #define X86BIOS_TRACE(h, n, r) do { \
63 " (ax=0x%04x bx=0x%04x cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",\
64 (n), (r)->R_AX, (r)->R_BX, (r)->R_CX, (r)->R_DX, \
65 (r)->R_ES, (r)->R_DI); \
68 static struct mtx x86bios_lock;
70 static SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL,
72 static int x86bios_trace_call;
73 TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call);
74 SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0,
75 "Trace far function calls");
76 static int x86bios_trace_int;
77 TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int);
78 SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0,
79 "Trace software interrupt handlers");
81 #ifdef X86BIOS_NATIVE_VM86
83 #include <machine/vm86.h>
84 #include <machine/vmparam.h>
85 #include <machine/pc/bios.h>
87 struct vm86context x86bios_vmc;
90 x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf)
93 vmf->vmf_ds = regs->R_DS;
94 vmf->vmf_es = regs->R_ES;
95 vmf->vmf_ax = regs->R_AX;
96 vmf->vmf_bx = regs->R_BX;
97 vmf->vmf_cx = regs->R_CX;
98 vmf->vmf_dx = regs->R_DX;
99 vmf->vmf_bp = regs->R_BP;
100 vmf->vmf_si = regs->R_SI;
101 vmf->vmf_di = regs->R_DI;
105 x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs)
108 regs->R_DS = vmf->vmf_ds;
109 regs->R_ES = vmf->vmf_es;
110 regs->R_FLG = vmf->vmf_flags;
111 regs->R_AX = vmf->vmf_ax;
112 regs->R_BX = vmf->vmf_bx;
113 regs->R_CX = vmf->vmf_cx;
114 regs->R_DX = vmf->vmf_dx;
115 regs->R_BP = vmf->vmf_bp;
116 regs->R_SI = vmf->vmf_si;
117 regs->R_DI = vmf->vmf_di;
121 x86bios_alloc(uint32_t *offset, size_t size, int flags)
126 if (offset == NULL || size == 0)
128 vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE,
131 *offset = vtophys(vaddr);
132 mtx_lock(&x86bios_lock);
133 for (i = 0; i < atop(round_page(size)); i++)
134 vm86_addpage(&x86bios_vmc, atop(*offset) + i,
135 (vm_offset_t)vaddr + ptoa(i));
136 mtx_unlock(&x86bios_lock);
143 x86bios_free(void *addr, size_t size)
148 if (addr == NULL || size == 0)
150 paddr = vtophys(addr);
151 if (paddr >= X86BIOS_MEM_SIZE || (paddr & PAGE_MASK) != 0)
153 mtx_lock(&x86bios_lock);
154 for (i = 0; i < x86bios_vmc.npages; i++)
155 if (x86bios_vmc.pmap[i].kva == (vm_offset_t)addr)
157 if (i >= x86bios_vmc.npages) {
158 mtx_unlock(&x86bios_lock);
161 nfree = atop(round_page(size));
162 bzero(x86bios_vmc.pmap + i, sizeof(*x86bios_vmc.pmap) * nfree);
163 if (i + nfree == x86bios_vmc.npages) {
164 x86bios_vmc.npages -= nfree;
165 while (--i >= 0 && x86bios_vmc.pmap[i].kva == 0)
166 x86bios_vmc.npages--;
168 mtx_unlock(&x86bios_lock);
169 contigfree(addr, size, M_DEVBUF);
173 x86bios_init_regs(struct x86regs *regs)
176 bzero(regs, sizeof(*regs));
180 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
182 struct vm86frame vmf;
184 if (x86bios_trace_call)
185 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
187 bzero(&vmf, sizeof(vmf));
188 x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
191 mtx_lock(&x86bios_lock);
192 vm86_datacall(-1, &vmf, &x86bios_vmc);
193 mtx_unlock(&x86bios_lock);
194 x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
196 if (x86bios_trace_call)
197 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
201 x86bios_get_intr(int intno)
204 return (readl(BIOS_PADDRTOVADDR(intno * 4)));
208 x86bios_set_intr(int intno, uint32_t saddr)
211 writel(BIOS_PADDRTOVADDR(intno * 4), saddr);
215 x86bios_intr(struct x86regs *regs, int intno)
217 struct vm86frame vmf;
219 if (x86bios_trace_int)
220 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
222 bzero(&vmf, sizeof(vmf));
223 x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
224 mtx_lock(&x86bios_lock);
225 vm86_datacall(intno, &vmf, &x86bios_vmc);
226 mtx_unlock(&x86bios_lock);
227 x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
229 if (x86bios_trace_int)
230 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
234 x86bios_offset(uint32_t offset)
238 addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset),
239 X86BIOS_PHYSTOOFF(offset));
241 addr = BIOS_PADDRTOVADDR(offset);
243 return ((void *)addr);
250 mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
251 bzero(&x86bios_vmc, sizeof(x86bios_vmc));
260 mtx_destroy(&x86bios_lock);
267 #include <machine/iodev.h>
269 #define X86BIOS_PAGE_SIZE 0x00001000 /* 4K */
271 #define X86BIOS_IVT_SIZE 0x00000500 /* 1K + 256 (BDA) */
273 #define X86BIOS_IVT_BASE 0x00000000
274 #define X86BIOS_RAM_BASE 0x00001000
275 #define X86BIOS_ROM_BASE 0x000a0000
277 #define X86BIOS_ROM_SIZE (X86BIOS_MEM_SIZE - x86bios_rom_phys)
278 #define X86BIOS_SEG_SIZE X86BIOS_PAGE_SIZE
280 #define X86BIOS_PAGES (X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE)
282 #define X86BIOS_R_SS _pad2
283 #define X86BIOS_R_SP _pad3.I16_reg.x_reg
285 static struct x86emu x86bios_emu;
287 static void *x86bios_ivt;
288 static void *x86bios_rom;
289 static void *x86bios_seg;
291 static vm_offset_t *x86bios_map;
293 static vm_paddr_t x86bios_rom_phys;
294 static vm_paddr_t x86bios_seg_phys;
296 static int x86bios_fault;
297 static uint32_t x86bios_fault_addr;
298 static uint16_t x86bios_fault_cs;
299 static uint16_t x86bios_fault_ip;
302 x86bios_set_fault(struct x86emu *emu, uint32_t addr)
306 x86bios_fault_addr = addr;
307 x86bios_fault_cs = emu->x86.R_CS;
308 x86bios_fault_ip = emu->x86.R_IP;
309 x86emu_halt_sys(emu);
313 x86bios_get_pages(uint32_t offset, size_t size)
317 if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
320 if (offset >= X86BIOS_MEM_SIZE)
321 offset -= X86BIOS_MEM_SIZE;
322 addr = x86bios_map[offset / X86BIOS_PAGE_SIZE];
324 addr += offset % X86BIOS_PAGE_SIZE;
326 return ((void *)addr);
330 x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size)
334 for (i = pa / X86BIOS_PAGE_SIZE, j = 0;
335 j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++)
336 x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE;
340 x86bios_emu_rdb(struct x86emu *emu, uint32_t addr)
344 va = x86bios_get_pages(addr, sizeof(*va));
346 x86bios_set_fault(emu, addr);
352 x86bios_emu_rdw(struct x86emu *emu, uint32_t addr)
356 va = x86bios_get_pages(addr, sizeof(*va));
358 x86bios_set_fault(emu, addr);
360 #ifndef __NO_STRICT_ALIGNMENT
362 return (le16dec(va));
365 return (le16toh(*va));
369 x86bios_emu_rdl(struct x86emu *emu, uint32_t addr)
373 va = x86bios_get_pages(addr, sizeof(*va));
375 x86bios_set_fault(emu, addr);
377 #ifndef __NO_STRICT_ALIGNMENT
379 return (le32dec(va));
382 return (le32toh(*va));
386 x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val)
390 va = x86bios_get_pages(addr, sizeof(*va));
392 x86bios_set_fault(emu, addr);
398 x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val)
402 va = x86bios_get_pages(addr, sizeof(*va));
404 x86bios_set_fault(emu, addr);
406 #ifndef __NO_STRICT_ALIGNMENT
415 x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val)
419 va = x86bios_get_pages(addr, sizeof(*va));
421 x86bios_set_fault(emu, addr);
423 #ifndef __NO_STRICT_ALIGNMENT
432 x86bios_emu_inb(struct x86emu *emu, uint16_t port)
435 #ifndef X86BIOS_NATIVE_ARCH
436 if (port == 0xb2) /* APM scratch register */
438 if (port >= 0x80 && port < 0x88) /* POST status register */
442 return (iodev_read_1(port));
446 x86bios_emu_inw(struct x86emu *emu, uint16_t port)
450 #ifndef X86BIOS_NATIVE_ARCH
451 if (port >= 0x80 && port < 0x88) /* POST status register */
454 if ((port & 1) != 0) {
455 val = iodev_read_1(port);
456 val |= iodev_read_1(port + 1) << 8;
459 val = iodev_read_2(port);
465 x86bios_emu_inl(struct x86emu *emu, uint16_t port)
469 #ifndef X86BIOS_NATIVE_ARCH
470 if (port >= 0x80 && port < 0x88) /* POST status register */
473 if ((port & 1) != 0) {
474 val = iodev_read_1(port);
475 val |= iodev_read_2(port + 1) << 8;
476 val |= iodev_read_1(port + 3) << 24;
477 } else if ((port & 2) != 0) {
478 val = iodev_read_2(port);
479 val |= iodev_read_2(port + 2) << 16;
482 val = iodev_read_4(port);
488 x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val)
491 #ifndef X86BIOS_NATIVE_ARCH
492 if (port == 0xb2) /* APM scratch register */
494 if (port >= 0x80 && port < 0x88) /* POST status register */
498 iodev_write_1(port, val);
502 x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val)
505 #ifndef X86BIOS_NATIVE_ARCH
506 if (port >= 0x80 && port < 0x88) /* POST status register */
509 if ((port & 1) != 0) {
510 iodev_write_1(port, val);
511 iodev_write_1(port + 1, val >> 8);
514 iodev_write_2(port, val);
518 x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val)
521 #ifndef X86BIOS_NATIVE_ARCH
522 if (port >= 0x80 && port < 0x88) /* POST status register */
525 if ((port & 1) != 0) {
526 iodev_write_1(port, val);
527 iodev_write_2(port + 1, val >> 8);
528 iodev_write_1(port + 3, val >> 24);
529 } else if ((port & 2) != 0) {
530 iodev_write_2(port, val);
531 iodev_write_2(port + 2, val >> 16);
534 iodev_write_4(port, val);
538 x86bios_alloc(uint32_t *offset, size_t size, int flags)
542 if (offset == NULL || size == 0)
544 vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE,
545 x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
547 *offset = vtophys(vaddr);
548 mtx_lock(&x86bios_lock);
549 x86bios_set_pages((vm_offset_t)vaddr, *offset, size);
550 mtx_unlock(&x86bios_lock);
557 x86bios_free(void *addr, size_t size)
561 if (addr == NULL || size == 0)
563 paddr = vtophys(addr);
564 if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys ||
565 paddr % X86BIOS_PAGE_SIZE != 0)
567 mtx_lock(&x86bios_lock);
568 bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE,
569 sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE));
570 mtx_unlock(&x86bios_lock);
571 contigfree(addr, size, M_DEVBUF);
575 x86bios_init_regs(struct x86regs *regs)
578 bzero(regs, sizeof(*regs));
579 regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys);
580 regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2;
584 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
587 if (x86bios_trace_call)
588 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
590 mtx_lock(&x86bios_lock);
591 memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
594 x86emu_exec_call(&x86bios_emu, seg, off);
596 memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
597 mtx_unlock(&x86bios_lock);
599 if (x86bios_trace_call) {
600 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
602 printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
603 x86bios_fault_addr, x86bios_fault_cs,
609 x86bios_get_intr(int intno)
612 return (le32toh(*((uint32_t *)x86bios_ivt + intno)));
616 x86bios_set_intr(int intno, uint32_t saddr)
619 *((uint32_t *)x86bios_ivt + intno) = htole32(saddr);
623 x86bios_intr(struct x86regs *regs, int intno)
626 if (intno < 0 || intno > 255)
629 if (x86bios_trace_int)
630 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
632 mtx_lock(&x86bios_lock);
633 memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
636 x86emu_exec_intr(&x86bios_emu, intno);
638 memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
639 mtx_unlock(&x86bios_lock);
641 if (x86bios_trace_int) {
642 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
644 printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
645 x86bios_fault_addr, x86bios_fault_cs,
651 x86bios_offset(uint32_t offset)
654 return (x86bios_get_pages(offset, 1));
658 x86bios_unmap_mem(void)
661 free(x86bios_map, M_DEVBUF);
662 if (x86bios_ivt != NULL)
663 #ifdef X86BIOS_NATIVE_ARCH
664 pmap_unmapbios((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE);
666 free(x86bios_ivt, M_DEVBUF);
668 if (x86bios_rom != NULL)
669 pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE);
670 if (x86bios_seg != NULL)
671 contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF);
675 x86bios_map_mem(void)
678 x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF,
681 #ifdef X86BIOS_NATIVE_ARCH
682 x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE);
684 /* Probe EBDA via BDA. */
685 x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e);
686 x86bios_rom_phys = x86bios_rom_phys << 4;
687 if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE &&
688 X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024)
690 rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE);
693 x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_ZERO | M_WAITOK);
696 x86bios_rom_phys = X86BIOS_ROM_BASE;
697 x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE);
698 if (x86bios_rom == NULL)
700 #ifdef X86BIOS_NATIVE_ARCH
701 /* Change attribute for EBDA. */
702 if (x86bios_rom_phys < X86BIOS_ROM_BASE &&
703 pmap_change_attr((vm_offset_t)x86bios_rom,
704 X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0)
708 x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK,
709 X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
710 x86bios_seg_phys = vtophys(x86bios_seg);
712 x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE,
714 x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys,
716 x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys,
720 printf("x86bios: IVT 0x%06jx-0x%06jx at %p\n",
721 (vm_paddr_t)X86BIOS_IVT_BASE,
722 (vm_paddr_t)X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1,
724 printf("x86bios: SSEG 0x%06jx-0x%06jx at %p\n",
726 (vm_paddr_t)X86BIOS_SEG_SIZE + x86bios_seg_phys - 1,
728 if (x86bios_rom_phys < X86BIOS_ROM_BASE)
729 printf("x86bios: EBDA 0x%06jx-0x%06jx at %p\n",
730 x86bios_rom_phys, (vm_paddr_t)X86BIOS_ROM_BASE - 1,
732 printf("x86bios: ROM 0x%06jx-0x%06jx at %p\n",
733 (vm_paddr_t)X86BIOS_ROM_BASE,
734 (vm_paddr_t)X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
735 (caddr_t)x86bios_rom + X86BIOS_ROM_BASE - x86bios_rom_phys);
750 mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
752 if (x86bios_map_mem() != 0)
755 bzero(&x86bios_emu, sizeof(x86bios_emu));
757 x86bios_emu.emu_rdb = x86bios_emu_rdb;
758 x86bios_emu.emu_rdw = x86bios_emu_rdw;
759 x86bios_emu.emu_rdl = x86bios_emu_rdl;
760 x86bios_emu.emu_wrb = x86bios_emu_wrb;
761 x86bios_emu.emu_wrw = x86bios_emu_wrw;
762 x86bios_emu.emu_wrl = x86bios_emu_wrl;
764 x86bios_emu.emu_inb = x86bios_emu_inb;
765 x86bios_emu.emu_inw = x86bios_emu_inw;
766 x86bios_emu.emu_inl = x86bios_emu_inl;
767 x86bios_emu.emu_outb = x86bios_emu_outb;
768 x86bios_emu.emu_outw = x86bios_emu_outw;
769 x86bios_emu.emu_outl = x86bios_emu_outl;
779 mtx_destroy(&x86bios_lock);
787 x86bios_get_orm(uint32_t offset)
791 /* Does the shadow ROM contain BIOS POST code for x86? */
792 p = x86bios_offset(offset);
793 if (p == NULL || p[0] != 0x55 || p[1] != 0xaa ||
794 (p[3] != 0xe9 && p[3] != 0xeb))
801 x86bios_match_device(uint32_t offset, device_t dev)
804 uint16_t device, vendor;
805 uint8_t class, progif, subclass;
807 /* Does the shadow ROM contain BIOS POST code for x86? */
808 p = x86bios_get_orm(offset);
812 /* Does it contain PCI data structure? */
813 p += le16toh(*(uint16_t *)(p + 0x18));
814 if (bcmp(p, "PCIR", 4) != 0 ||
815 le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0)
818 /* Does it match the vendor, device, and classcode? */
819 vendor = le16toh(*(uint16_t *)(p + 0x04));
820 device = le16toh(*(uint16_t *)(p + 0x06));
821 progif = *(p + 0x0d);
822 subclass = *(p + 0x0e);
824 if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) ||
825 class != pci_get_class(dev) || subclass != pci_get_subclass(dev) ||
826 progif != pci_get_progif(dev))
833 x86bios_modevent(module_t mod __unused, int type, void *data __unused)
838 return (x86bios_init());
840 return (x86bios_uninit());
846 static moduledata_t x86bios_mod = {
852 DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY);
853 MODULE_VERSION(x86bios, 1);