2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/ehcireg.h>
66 #include <dev/usb/controller/ohcireg.h>
67 #include <dev/usb/controller/uhcireg.h>
73 #include <contrib/dev/acpica/include/acpi.h>
76 #define ACPI_PWR_FOR_SLEEP(x, y, z)
80 * XXX: Due to a limitation of the bus_dma_tag_create() API, we cannot
81 * specify a 4GB boundary on 32-bit targets. Usually this does not
82 * matter as it is ok to use a boundary of 0 on these systems.
83 * However, in the case of PAE, DMA addresses can cross a 4GB
84 * boundary, so as a workaround use a 2GB boundary.
86 #if (BUS_SPACE_MAXADDR > 0xFFFFFFFF)
88 #define PCI_DMA_BOUNDARY 0x80000000
90 #define PCI_DMA_BOUNDARY 0x100000000
94 #define PCIR_IS_BIOS(cfg, reg) \
95 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
96 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
98 static pci_addr_t pci_mapbase(uint64_t mapreg);
99 static const char *pci_maptype(uint64_t mapreg);
100 static int pci_mapsize(uint64_t testval);
101 static int pci_maprange(uint64_t mapreg);
102 static pci_addr_t pci_rombase(uint64_t mapreg);
103 static int pci_romsize(uint64_t testval);
104 static void pci_fixancient(pcicfgregs *cfg);
105 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
107 static int pci_porten(device_t dev);
108 static int pci_memen(device_t dev);
109 static void pci_assign_interrupt(device_t bus, device_t dev,
111 static int pci_add_map(device_t bus, device_t dev, int reg,
112 struct resource_list *rl, int force, int prefetch);
113 static int pci_probe(device_t dev);
114 static int pci_attach(device_t dev);
115 static void pci_load_vendor_data(void);
116 static int pci_describe_parse_line(char **ptr, int *vendor,
117 int *device, char **desc);
118 static char *pci_describe_device(device_t dev);
119 static bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev);
120 static int pci_modevent(module_t mod, int what, void *arg);
121 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
123 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
124 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
125 int reg, uint32_t *data);
127 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
128 int reg, uint32_t data);
130 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
131 static void pci_disable_msi(device_t dev);
132 static void pci_enable_msi(device_t dev, uint64_t address,
134 static void pci_enable_msix(device_t dev, u_int index,
135 uint64_t address, uint32_t data);
136 static void pci_mask_msix(device_t dev, u_int index);
137 static void pci_unmask_msix(device_t dev, u_int index);
138 static int pci_msi_blacklisted(void);
139 static void pci_resume_msi(device_t dev);
140 static void pci_resume_msix(device_t dev);
141 static int pci_remap_intr_method(device_t bus, device_t dev,
144 static device_method_t pci_methods[] = {
145 /* Device interface */
146 DEVMETHOD(device_probe, pci_probe),
147 DEVMETHOD(device_attach, pci_attach),
148 DEVMETHOD(device_detach, bus_generic_detach),
149 DEVMETHOD(device_shutdown, bus_generic_shutdown),
150 DEVMETHOD(device_suspend, pci_suspend),
151 DEVMETHOD(device_resume, pci_resume),
154 DEVMETHOD(bus_print_child, pci_print_child),
155 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
156 DEVMETHOD(bus_read_ivar, pci_read_ivar),
157 DEVMETHOD(bus_write_ivar, pci_write_ivar),
158 DEVMETHOD(bus_driver_added, pci_driver_added),
159 DEVMETHOD(bus_setup_intr, pci_setup_intr),
160 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
162 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
163 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
164 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
165 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
166 DEVMETHOD(bus_delete_resource, pci_delete_resource),
167 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
168 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
169 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
170 DEVMETHOD(bus_activate_resource, pci_activate_resource),
171 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
172 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
173 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
174 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
177 DEVMETHOD(pci_read_config, pci_read_config_method),
178 DEVMETHOD(pci_write_config, pci_write_config_method),
179 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
180 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
181 DEVMETHOD(pci_enable_io, pci_enable_io_method),
182 DEVMETHOD(pci_disable_io, pci_disable_io_method),
183 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
184 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
185 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
186 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
187 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
188 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
189 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
190 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
191 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
192 DEVMETHOD(pci_release_msi, pci_release_msi_method),
193 DEVMETHOD(pci_msi_count, pci_msi_count_method),
194 DEVMETHOD(pci_msix_count, pci_msix_count_method),
199 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
201 static devclass_t pci_devclass;
202 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
203 MODULE_VERSION(pci, 1);
205 static char *pci_vendordata;
206 static size_t pci_vendordata_size;
209 uint32_t devid; /* Vendor/device of the card */
211 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
212 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
213 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
214 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
219 static const struct pci_quirk const pci_quirks[] = {
220 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
221 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
222 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
223 /* As does the Serverworks OSB4 (the SMBus mapping register) */
224 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
227 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
228 * or the CMIC-SL (AKA ServerWorks GC_LE).
230 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
231 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
234 * MSI doesn't work on earlier Intel chipsets including
235 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
237 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
246 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
249 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
252 * MSI-X doesn't work with at least LSI SAS1068E passed through by
255 { 0x079015ad, PCI_QUIRK_DISABLE_MSI, 0, 0 },
258 * Some virtualization environments emulate an older chipset
259 * but support MSI just fine. QEMU uses the Intel 82440.
261 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
264 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
265 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
266 * It prevents us from attaching hpet(4) when the bit is unset.
267 * Note this quirk only affects SB600 revision A13 and earlier.
268 * For SB600 A21 and later, firmware must set the bit to hide it.
269 * For SB700 and later, it is unused and hardcoded to zero.
271 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
276 /* map register information */
277 #define PCI_MAPMEM 0x01 /* memory map */
278 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
279 #define PCI_MAPPORT 0x04 /* port map */
281 struct devlist pci_devq;
282 uint32_t pci_generation;
283 uint32_t pci_numdevs = 0;
284 static int pcie_chipset, pcix_chipset;
287 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
289 static int pci_enable_io_modes = 1;
290 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
291 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
292 &pci_enable_io_modes, 1,
293 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
294 enable these bits correctly. We'd like to do this all the time, but there\n\
295 are some peripherals that this causes problems with.");
297 static int pci_do_power_nodriver = 0;
298 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
299 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
300 &pci_do_power_nodriver, 0,
301 "Place a function into D3 state when no driver attaches to it. 0 means\n\
302 disable. 1 means conservatively place devices into D3 state. 2 means\n\
303 agressively place devices into D3 state. 3 means put absolutely everything\n\
306 static int pci_do_power_resume = 1;
307 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
308 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
309 &pci_do_power_resume, 1,
310 "Transition from D3 -> D0 on resume.");
312 static int pci_do_msi = 1;
313 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
314 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
315 "Enable support for MSI interrupts");
317 static int pci_do_msix = 1;
318 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
319 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
320 "Enable support for MSI-X interrupts");
322 static int pci_honor_msi_blacklist = 1;
323 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
324 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
325 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
327 #if defined(__i386__) || defined(__amd64__)
328 static int pci_usb_takeover = 1;
330 static int pci_usb_takeover = 0;
332 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
333 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
334 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
335 Disable this if you depend on BIOS emulation of USB devices, that is\n\
336 you use USB devices (like keyboard or mouse) but do not load USB drivers");
338 /* Find a device_t by bus/slot/function in domain 0 */
341 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
344 return (pci_find_dbsf(0, bus, slot, func));
347 /* Find a device_t by domain/bus/slot/function */
350 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
352 struct pci_devinfo *dinfo;
354 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
355 if ((dinfo->cfg.domain == domain) &&
356 (dinfo->cfg.bus == bus) &&
357 (dinfo->cfg.slot == slot) &&
358 (dinfo->cfg.func == func)) {
359 return (dinfo->cfg.dev);
366 /* Find a device_t by vendor/device ID */
369 pci_find_device(uint16_t vendor, uint16_t device)
371 struct pci_devinfo *dinfo;
373 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
374 if ((dinfo->cfg.vendor == vendor) &&
375 (dinfo->cfg.device == device)) {
376 return (dinfo->cfg.dev);
384 pci_find_class(uint8_t class, uint8_t subclass)
386 struct pci_devinfo *dinfo;
388 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
389 if (dinfo->cfg.baseclass == class &&
390 dinfo->cfg.subclass == subclass) {
391 return (dinfo->cfg.dev);
399 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
404 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
407 retval += vprintf(fmt, ap);
412 /* return base address of memory or port map */
415 pci_mapbase(uint64_t mapreg)
418 if (PCI_BAR_MEM(mapreg))
419 return (mapreg & PCIM_BAR_MEM_BASE);
421 return (mapreg & PCIM_BAR_IO_BASE);
424 /* return map type of memory or port map */
427 pci_maptype(uint64_t mapreg)
430 if (PCI_BAR_IO(mapreg))
432 if (mapreg & PCIM_BAR_MEM_PREFETCH)
433 return ("Prefetchable Memory");
437 /* return log2 of map size decoded for memory or port map */
440 pci_mapsize(uint64_t testval)
444 testval = pci_mapbase(testval);
447 while ((testval & 1) == 0)
456 /* return base address of device ROM */
459 pci_rombase(uint64_t mapreg)
462 return (mapreg & PCIM_BIOS_ADDR_MASK);
465 /* return log2 of map size decided for device ROM */
468 pci_romsize(uint64_t testval)
472 testval = pci_rombase(testval);
475 while ((testval & 1) == 0)
484 /* return log2 of address range supported by map register */
487 pci_maprange(uint64_t mapreg)
491 if (PCI_BAR_IO(mapreg))
494 switch (mapreg & PCIM_BAR_MEM_TYPE) {
495 case PCIM_BAR_MEM_32:
498 case PCIM_BAR_MEM_1MB:
501 case PCIM_BAR_MEM_64:
508 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
511 pci_fixancient(pcicfgregs *cfg)
513 if (cfg->hdrtype != 0)
516 /* PCI to PCI bridges use header type 1 */
517 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
521 /* extract header type specific config data */
524 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
526 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
527 switch (cfg->hdrtype) {
529 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
530 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
531 cfg->nummaps = PCI_MAXMAPS_0;
534 cfg->nummaps = PCI_MAXMAPS_1;
537 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
538 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
539 cfg->nummaps = PCI_MAXMAPS_2;
545 /* read configuration header into pcicfgregs structure */
547 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
549 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
550 pcicfgregs *cfg = NULL;
551 struct pci_devinfo *devlist_entry;
552 struct devlist *devlist_head;
554 devlist_head = &pci_devq;
556 devlist_entry = NULL;
558 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
559 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
560 if (devlist_entry == NULL)
563 cfg = &devlist_entry->cfg;
569 cfg->vendor = REG(PCIR_VENDOR, 2);
570 cfg->device = REG(PCIR_DEVICE, 2);
571 cfg->cmdreg = REG(PCIR_COMMAND, 2);
572 cfg->statreg = REG(PCIR_STATUS, 2);
573 cfg->baseclass = REG(PCIR_CLASS, 1);
574 cfg->subclass = REG(PCIR_SUBCLASS, 1);
575 cfg->progif = REG(PCIR_PROGIF, 1);
576 cfg->revid = REG(PCIR_REVID, 1);
577 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
578 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
579 cfg->lattimer = REG(PCIR_LATTIMER, 1);
580 cfg->intpin = REG(PCIR_INTPIN, 1);
581 cfg->intline = REG(PCIR_INTLINE, 1);
583 cfg->mingnt = REG(PCIR_MINGNT, 1);
584 cfg->maxlat = REG(PCIR_MAXLAT, 1);
586 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
587 cfg->hdrtype &= ~PCIM_MFDEV;
588 STAILQ_INIT(&cfg->maps);
591 pci_hdrtypedata(pcib, b, s, f, cfg);
593 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
594 pci_read_cap(pcib, cfg);
596 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
598 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
599 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
600 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
601 devlist_entry->conf.pc_sel.pc_func = cfg->func;
602 devlist_entry->conf.pc_hdr = cfg->hdrtype;
604 devlist_entry->conf.pc_subvendor = cfg->subvendor;
605 devlist_entry->conf.pc_subdevice = cfg->subdevice;
606 devlist_entry->conf.pc_vendor = cfg->vendor;
607 devlist_entry->conf.pc_device = cfg->device;
609 devlist_entry->conf.pc_class = cfg->baseclass;
610 devlist_entry->conf.pc_subclass = cfg->subclass;
611 devlist_entry->conf.pc_progif = cfg->progif;
612 devlist_entry->conf.pc_revid = cfg->revid;
617 return (devlist_entry);
622 pci_read_cap(device_t pcib, pcicfgregs *cfg)
624 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
625 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
626 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
630 int ptr, nextptr, ptrptr;
632 switch (cfg->hdrtype & PCIM_HDRTYPE) {
635 ptrptr = PCIR_CAP_PTR;
638 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
641 return; /* no extended capabilities support */
643 nextptr = REG(ptrptr, 1); /* sanity check? */
646 * Read capability entries.
648 while (nextptr != 0) {
651 printf("illegal PCI extended capability offset %d\n",
655 /* Find the next entry */
657 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
659 /* Process this entry */
660 switch (REG(ptr + PCICAP_ID, 1)) {
661 case PCIY_PMG: /* PCI power management */
662 if (cfg->pp.pp_cap == 0) {
663 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
664 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
665 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
666 if ((nextptr - ptr) > PCIR_POWER_DATA)
667 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
670 case PCIY_HT: /* HyperTransport */
671 /* Determine HT-specific capability type. */
672 val = REG(ptr + PCIR_HT_COMMAND, 2);
674 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
675 cfg->ht.ht_slave = ptr;
677 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
678 switch (val & PCIM_HTCMD_CAP_MASK) {
679 case PCIM_HTCAP_MSI_MAPPING:
680 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
681 /* Sanity check the mapping window. */
682 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
685 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
687 if (addr != MSI_INTEL_ADDR_BASE)
689 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
690 cfg->domain, cfg->bus,
691 cfg->slot, cfg->func,
694 addr = MSI_INTEL_ADDR_BASE;
696 cfg->ht.ht_msimap = ptr;
697 cfg->ht.ht_msictrl = val;
698 cfg->ht.ht_msiaddr = addr;
703 case PCIY_MSI: /* PCI MSI */
704 cfg->msi.msi_location = ptr;
705 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
706 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
707 PCIM_MSICTRL_MMC_MASK)>>1);
709 case PCIY_MSIX: /* PCI MSI-X */
710 cfg->msix.msix_location = ptr;
711 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
712 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
713 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
714 val = REG(ptr + PCIR_MSIX_TABLE, 4);
715 cfg->msix.msix_table_bar = PCIR_BAR(val &
717 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
718 val = REG(ptr + PCIR_MSIX_PBA, 4);
719 cfg->msix.msix_pba_bar = PCIR_BAR(val &
721 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
723 case PCIY_VPD: /* PCI Vital Product Data */
724 cfg->vpd.vpd_reg = ptr;
727 /* Should always be true. */
728 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
729 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
730 cfg->subvendor = val & 0xffff;
731 cfg->subdevice = val >> 16;
734 case PCIY_PCIX: /* PCI-X */
736 * Assume we have a PCI-X chipset if we have
737 * at least one PCI-PCI bridge with a PCI-X
738 * capability. Note that some systems with
739 * PCI-express or HT chipsets might match on
740 * this check as well.
742 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
745 case PCIY_EXPRESS: /* PCI-express */
747 * Assume we have a PCI-express chipset if we have
748 * at least one PCI-express device.
757 #if defined(__powerpc__)
759 * Enable the MSI mapping window for all HyperTransport
760 * slaves. PCI-PCI bridges have their windows enabled via
763 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
764 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
766 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
767 cfg->domain, cfg->bus, cfg->slot, cfg->func);
768 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
769 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
773 /* REG and WREG use carry through to next functions */
777 * PCI Vital Product Data
780 #define PCI_VPD_TIMEOUT 1000000
783 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
785 int count = PCI_VPD_TIMEOUT;
787 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
789 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
791 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
794 DELAY(1); /* limit looping */
796 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
803 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
805 int count = PCI_VPD_TIMEOUT;
807 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
809 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
810 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
811 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
814 DELAY(1); /* limit looping */
821 #undef PCI_VPD_TIMEOUT
823 struct vpd_readstate {
833 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
838 if (vrs->bytesinval == 0) {
839 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
841 vrs->val = le32toh(reg);
843 byte = vrs->val & 0xff;
846 vrs->val = vrs->val >> 8;
847 byte = vrs->val & 0xff;
857 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
859 struct vpd_readstate vrs;
864 int alloc, off; /* alloc/off for RO/W arrays */
870 /* init vpd reader */
878 name = remain = i = 0; /* shut up stupid gcc */
879 alloc = off = 0; /* shut up stupid gcc */
880 dflen = 0; /* shut up stupid gcc */
883 if (vpd_nextbyte(&vrs, &byte)) {
888 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
889 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
890 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
893 case 0: /* item name */
895 if (vpd_nextbyte(&vrs, &byte2)) {
900 if (vpd_nextbyte(&vrs, &byte2)) {
904 remain |= byte2 << 8;
905 if (remain > (0x7f*4 - vrs.off)) {
908 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
909 cfg->domain, cfg->bus, cfg->slot,
915 name = (byte >> 3) & 0xf;
918 case 0x2: /* String */
919 cfg->vpd.vpd_ident = malloc(remain + 1,
927 case 0x10: /* VPD-R */
930 cfg->vpd.vpd_ros = malloc(alloc *
931 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
935 case 0x11: /* VPD-W */
938 cfg->vpd.vpd_w = malloc(alloc *
939 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
943 default: /* Invalid data, abort */
949 case 1: /* Identifier String */
950 cfg->vpd.vpd_ident[i++] = byte;
953 cfg->vpd.vpd_ident[i] = '\0';
958 case 2: /* VPD-R Keyword Header */
960 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
961 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
962 M_DEVBUF, M_WAITOK | M_ZERO);
964 cfg->vpd.vpd_ros[off].keyword[0] = byte;
965 if (vpd_nextbyte(&vrs, &byte2)) {
969 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
970 if (vpd_nextbyte(&vrs, &byte2)) {
976 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
979 * if this happens, we can't trust the rest
983 "pci%d:%d:%d:%d: bad keyword length: %d\n",
984 cfg->domain, cfg->bus, cfg->slot,
989 } else if (dflen == 0) {
990 cfg->vpd.vpd_ros[off].value = malloc(1 *
991 sizeof(*cfg->vpd.vpd_ros[off].value),
993 cfg->vpd.vpd_ros[off].value[0] = '\x00';
995 cfg->vpd.vpd_ros[off].value = malloc(
997 sizeof(*cfg->vpd.vpd_ros[off].value),
1001 /* keep in sync w/ state 3's transistions */
1002 if (dflen == 0 && remain == 0)
1004 else if (dflen == 0)
1010 case 3: /* VPD-R Keyword Value */
1011 cfg->vpd.vpd_ros[off].value[i++] = byte;
1012 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1013 "RV", 2) == 0 && cksumvalid == -1) {
1019 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1020 cfg->domain, cfg->bus,
1021 cfg->slot, cfg->func,
1030 /* keep in sync w/ state 2's transistions */
1032 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1033 if (dflen == 0 && remain == 0) {
1034 cfg->vpd.vpd_rocnt = off;
1035 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1036 off * sizeof(*cfg->vpd.vpd_ros),
1037 M_DEVBUF, M_WAITOK | M_ZERO);
1039 } else if (dflen == 0)
1049 case 5: /* VPD-W Keyword Header */
1051 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1052 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1053 M_DEVBUF, M_WAITOK | M_ZERO);
1055 cfg->vpd.vpd_w[off].keyword[0] = byte;
1056 if (vpd_nextbyte(&vrs, &byte2)) {
1060 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1061 if (vpd_nextbyte(&vrs, &byte2)) {
1065 cfg->vpd.vpd_w[off].len = dflen = byte2;
1066 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1067 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1068 sizeof(*cfg->vpd.vpd_w[off].value),
1069 M_DEVBUF, M_WAITOK);
1072 /* keep in sync w/ state 6's transistions */
1073 if (dflen == 0 && remain == 0)
1075 else if (dflen == 0)
1081 case 6: /* VPD-W Keyword Value */
1082 cfg->vpd.vpd_w[off].value[i++] = byte;
1085 /* keep in sync w/ state 5's transistions */
1087 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1088 if (dflen == 0 && remain == 0) {
1089 cfg->vpd.vpd_wcnt = off;
1090 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1091 off * sizeof(*cfg->vpd.vpd_w),
1092 M_DEVBUF, M_WAITOK | M_ZERO);
1094 } else if (dflen == 0)
1099 printf("pci%d:%d:%d:%d: invalid state: %d\n",
1100 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1107 if (cksumvalid == 0 || state < -1) {
1108 /* read-only data bad, clean up */
1109 if (cfg->vpd.vpd_ros != NULL) {
1110 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1111 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1112 free(cfg->vpd.vpd_ros, M_DEVBUF);
1113 cfg->vpd.vpd_ros = NULL;
1117 /* I/O error, clean up */
1118 printf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1119 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1120 if (cfg->vpd.vpd_ident != NULL) {
1121 free(cfg->vpd.vpd_ident, M_DEVBUF);
1122 cfg->vpd.vpd_ident = NULL;
1124 if (cfg->vpd.vpd_w != NULL) {
1125 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1126 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1127 free(cfg->vpd.vpd_w, M_DEVBUF);
1128 cfg->vpd.vpd_w = NULL;
1131 cfg->vpd.vpd_cached = 1;
1137 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1139 struct pci_devinfo *dinfo = device_get_ivars(child);
1140 pcicfgregs *cfg = &dinfo->cfg;
1142 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1143 pci_read_vpd(device_get_parent(dev), cfg);
1145 *identptr = cfg->vpd.vpd_ident;
1147 if (*identptr == NULL)
1154 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1157 struct pci_devinfo *dinfo = device_get_ivars(child);
1158 pcicfgregs *cfg = &dinfo->cfg;
1161 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1162 pci_read_vpd(device_get_parent(dev), cfg);
1164 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1165 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1166 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1167 *vptr = cfg->vpd.vpd_ros[i].value;
1176 * Find the requested extended capability and return the offset in
1177 * configuration space via the pointer provided. The function returns
1178 * 0 on success and error code otherwise.
1181 pci_find_extcap_method(device_t dev, device_t child, int capability,
1184 struct pci_devinfo *dinfo = device_get_ivars(child);
1185 pcicfgregs *cfg = &dinfo->cfg;
1190 * Check the CAP_LIST bit of the PCI status register first.
1192 status = pci_read_config(child, PCIR_STATUS, 2);
1193 if (!(status & PCIM_STATUS_CAPPRESENT))
1197 * Determine the start pointer of the capabilities list.
1199 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1205 ptr = PCIR_CAP_PTR_2;
1209 return (ENXIO); /* no extended capabilities support */
1211 ptr = pci_read_config(child, ptr, 1);
1214 * Traverse the capabilities list.
1217 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1222 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1229 * Support for MSI-X message interrupts.
1232 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1234 struct pci_devinfo *dinfo = device_get_ivars(dev);
1235 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1238 KASSERT(msix->msix_table_len > index, ("bogus index"));
1239 offset = msix->msix_table_offset + index * 16;
1240 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1241 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1242 bus_write_4(msix->msix_table_res, offset + 8, data);
1244 /* Enable MSI -> HT mapping. */
1245 pci_ht_map_msi(dev, address);
1249 pci_mask_msix(device_t dev, u_int index)
1251 struct pci_devinfo *dinfo = device_get_ivars(dev);
1252 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1253 uint32_t offset, val;
1255 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1256 offset = msix->msix_table_offset + index * 16 + 12;
1257 val = bus_read_4(msix->msix_table_res, offset);
1258 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1259 val |= PCIM_MSIX_VCTRL_MASK;
1260 bus_write_4(msix->msix_table_res, offset, val);
1265 pci_unmask_msix(device_t dev, u_int index)
1267 struct pci_devinfo *dinfo = device_get_ivars(dev);
1268 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1269 uint32_t offset, val;
1271 KASSERT(msix->msix_table_len > index, ("bogus index"));
1272 offset = msix->msix_table_offset + index * 16 + 12;
1273 val = bus_read_4(msix->msix_table_res, offset);
1274 if (val & PCIM_MSIX_VCTRL_MASK) {
1275 val &= ~PCIM_MSIX_VCTRL_MASK;
1276 bus_write_4(msix->msix_table_res, offset, val);
1281 pci_pending_msix(device_t dev, u_int index)
1283 struct pci_devinfo *dinfo = device_get_ivars(dev);
1284 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1285 uint32_t offset, bit;
1287 KASSERT(msix->msix_table_len > index, ("bogus index"));
1288 offset = msix->msix_pba_offset + (index / 32) * 4;
1289 bit = 1 << index % 32;
1290 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1294 * Restore MSI-X registers and table during resume. If MSI-X is
1295 * enabled then walk the virtual table to restore the actual MSI-X
1299 pci_resume_msix(device_t dev)
1301 struct pci_devinfo *dinfo = device_get_ivars(dev);
1302 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1303 struct msix_table_entry *mte;
1304 struct msix_vector *mv;
1307 if (msix->msix_alloc > 0) {
1308 /* First, mask all vectors. */
1309 for (i = 0; i < msix->msix_msgnum; i++)
1310 pci_mask_msix(dev, i);
1312 /* Second, program any messages with at least one handler. */
1313 for (i = 0; i < msix->msix_table_len; i++) {
1314 mte = &msix->msix_table[i];
1315 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1317 mv = &msix->msix_vectors[mte->mte_vector - 1];
1318 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1319 pci_unmask_msix(dev, i);
1322 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1323 msix->msix_ctrl, 2);
1327 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1328 * returned in *count. After this function returns, each message will be
1329 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1332 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1334 struct pci_devinfo *dinfo = device_get_ivars(child);
1335 pcicfgregs *cfg = &dinfo->cfg;
1336 struct resource_list_entry *rle;
1337 int actual, error, i, irq, max;
1339 /* Don't let count == 0 get us into trouble. */
1343 /* If rid 0 is allocated, then fail. */
1344 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1345 if (rle != NULL && rle->res != NULL)
1348 /* Already have allocated messages? */
1349 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1352 /* If MSI is blacklisted for this system, fail. */
1353 if (pci_msi_blacklisted())
1356 /* MSI-X capability present? */
1357 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1360 /* Make sure the appropriate BARs are mapped. */
1361 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1362 cfg->msix.msix_table_bar);
1363 if (rle == NULL || rle->res == NULL ||
1364 !(rman_get_flags(rle->res) & RF_ACTIVE))
1366 cfg->msix.msix_table_res = rle->res;
1367 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1368 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1369 cfg->msix.msix_pba_bar);
1370 if (rle == NULL || rle->res == NULL ||
1371 !(rman_get_flags(rle->res) & RF_ACTIVE))
1374 cfg->msix.msix_pba_res = rle->res;
1377 device_printf(child,
1378 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1379 *count, cfg->msix.msix_msgnum);
1380 max = min(*count, cfg->msix.msix_msgnum);
1381 for (i = 0; i < max; i++) {
1382 /* Allocate a message. */
1383 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1389 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1395 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1397 device_printf(child, "using IRQ %lu for MSI-X\n",
1403 * Be fancy and try to print contiguous runs of
1404 * IRQ values as ranges. 'irq' is the previous IRQ.
1405 * 'run' is true if we are in a range.
1407 device_printf(child, "using IRQs %lu", rle->start);
1410 for (i = 1; i < actual; i++) {
1411 rle = resource_list_find(&dinfo->resources,
1412 SYS_RES_IRQ, i + 1);
1414 /* Still in a run? */
1415 if (rle->start == irq + 1) {
1421 /* Finish previous range. */
1427 /* Start new range. */
1428 printf(",%lu", rle->start);
1432 /* Unfinished range? */
1435 printf(" for MSI-X\n");
1439 /* Mask all vectors. */
1440 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1441 pci_mask_msix(child, i);
1443 /* Allocate and initialize vector data and virtual table. */
1444 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1445 M_DEVBUF, M_WAITOK | M_ZERO);
1446 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1447 M_DEVBUF, M_WAITOK | M_ZERO);
1448 for (i = 0; i < actual; i++) {
1449 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1450 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1451 cfg->msix.msix_table[i].mte_vector = i + 1;
1454 /* Update control register to enable MSI-X. */
1455 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1456 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1457 cfg->msix.msix_ctrl, 2);
1459 /* Update counts of alloc'd messages. */
1460 cfg->msix.msix_alloc = actual;
1461 cfg->msix.msix_table_len = actual;
1467 * By default, pci_alloc_msix() will assign the allocated IRQ
1468 * resources consecutively to the first N messages in the MSI-X table.
1469 * However, device drivers may want to use different layouts if they
1470 * either receive fewer messages than they asked for, or they wish to
1471 * populate the MSI-X table sparsely. This method allows the driver
1472 * to specify what layout it wants. It must be called after a
1473 * successful pci_alloc_msix() but before any of the associated
1474 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1476 * The 'vectors' array contains 'count' message vectors. The array
1477 * maps directly to the MSI-X table in that index 0 in the array
1478 * specifies the vector for the first message in the MSI-X table, etc.
1479 * The vector value in each array index can either be 0 to indicate
1480 * that no vector should be assigned to a message slot, or it can be a
1481 * number from 1 to N (where N is the count returned from a
1482 * succcessful call to pci_alloc_msix()) to indicate which message
1483 * vector (IRQ) to be used for the corresponding message.
1485 * On successful return, each message with a non-zero vector will have
1486 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1487 * 1. Additionally, if any of the IRQs allocated via the previous
1488 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1489 * will be freed back to the system automatically.
1491 * For example, suppose a driver has a MSI-X table with 6 messages and
1492 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1493 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1494 * C. After the call to pci_alloc_msix(), the device will be setup to
1495 * have an MSI-X table of ABC--- (where - means no vector assigned).
1496 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1497 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1498 * be freed back to the system. This device will also have valid
1499 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1501 * In any case, the SYS_RES_IRQ rid X will always map to the message
1502 * at MSI-X table index X - 1 and will only be valid if a vector is
1503 * assigned to that table entry.
1506 pci_remap_msix_method(device_t dev, device_t child, int count,
1507 const u_int *vectors)
1509 struct pci_devinfo *dinfo = device_get_ivars(child);
1510 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1511 struct resource_list_entry *rle;
1512 int i, irq, j, *used;
1515 * Have to have at least one message in the table but the
1516 * table can't be bigger than the actual MSI-X table in the
1519 if (count == 0 || count > msix->msix_msgnum)
1522 /* Sanity check the vectors. */
1523 for (i = 0; i < count; i++)
1524 if (vectors[i] > msix->msix_alloc)
1528 * Make sure there aren't any holes in the vectors to be used.
1529 * It's a big pain to support it, and it doesn't really make
1530 * sense anyway. Also, at least one vector must be used.
1532 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1534 for (i = 0; i < count; i++)
1535 if (vectors[i] != 0)
1536 used[vectors[i] - 1] = 1;
1537 for (i = 0; i < msix->msix_alloc - 1; i++)
1538 if (used[i] == 0 && used[i + 1] == 1) {
1539 free(used, M_DEVBUF);
1543 free(used, M_DEVBUF);
1547 /* Make sure none of the resources are allocated. */
1548 for (i = 0; i < msix->msix_table_len; i++) {
1549 if (msix->msix_table[i].mte_vector == 0)
1551 if (msix->msix_table[i].mte_handlers > 0)
1553 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1554 KASSERT(rle != NULL, ("missing resource"));
1555 if (rle->res != NULL)
1559 /* Free the existing resource list entries. */
1560 for (i = 0; i < msix->msix_table_len; i++) {
1561 if (msix->msix_table[i].mte_vector == 0)
1563 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1567 * Build the new virtual table keeping track of which vectors are
1570 free(msix->msix_table, M_DEVBUF);
1571 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1572 M_DEVBUF, M_WAITOK | M_ZERO);
1573 for (i = 0; i < count; i++)
1574 msix->msix_table[i].mte_vector = vectors[i];
1575 msix->msix_table_len = count;
1577 /* Free any unused IRQs and resize the vectors array if necessary. */
1578 j = msix->msix_alloc - 1;
1580 struct msix_vector *vec;
1582 while (used[j] == 0) {
1583 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1584 msix->msix_vectors[j].mv_irq);
1587 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1589 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1591 free(msix->msix_vectors, M_DEVBUF);
1592 msix->msix_vectors = vec;
1593 msix->msix_alloc = j + 1;
1595 free(used, M_DEVBUF);
1597 /* Map the IRQs onto the rids. */
1598 for (i = 0; i < count; i++) {
1599 if (vectors[i] == 0)
1601 irq = msix->msix_vectors[vectors[i]].mv_irq;
1602 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1607 device_printf(child, "Remapped MSI-X IRQs as: ");
1608 for (i = 0; i < count; i++) {
1611 if (vectors[i] == 0)
1615 msix->msix_vectors[vectors[i]].mv_irq);
1624 pci_release_msix(device_t dev, device_t child)
1626 struct pci_devinfo *dinfo = device_get_ivars(child);
1627 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1628 struct resource_list_entry *rle;
1631 /* Do we have any messages to release? */
1632 if (msix->msix_alloc == 0)
1635 /* Make sure none of the resources are allocated. */
1636 for (i = 0; i < msix->msix_table_len; i++) {
1637 if (msix->msix_table[i].mte_vector == 0)
1639 if (msix->msix_table[i].mte_handlers > 0)
1641 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1642 KASSERT(rle != NULL, ("missing resource"));
1643 if (rle->res != NULL)
1647 /* Update control register to disable MSI-X. */
1648 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1649 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1650 msix->msix_ctrl, 2);
1652 /* Free the resource list entries. */
1653 for (i = 0; i < msix->msix_table_len; i++) {
1654 if (msix->msix_table[i].mte_vector == 0)
1656 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1658 free(msix->msix_table, M_DEVBUF);
1659 msix->msix_table_len = 0;
1661 /* Release the IRQs. */
1662 for (i = 0; i < msix->msix_alloc; i++)
1663 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1664 msix->msix_vectors[i].mv_irq);
1665 free(msix->msix_vectors, M_DEVBUF);
1666 msix->msix_alloc = 0;
1671 * Return the max supported MSI-X messages this device supports.
1672 * Basically, assuming the MD code can alloc messages, this function
1673 * should return the maximum value that pci_alloc_msix() can return.
1674 * Thus, it is subject to the tunables, etc.
1677 pci_msix_count_method(device_t dev, device_t child)
1679 struct pci_devinfo *dinfo = device_get_ivars(child);
1680 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1682 if (pci_do_msix && msix->msix_location != 0)
1683 return (msix->msix_msgnum);
1688 * HyperTransport MSI mapping control
1691 pci_ht_map_msi(device_t dev, uint64_t addr)
1693 struct pci_devinfo *dinfo = device_get_ivars(dev);
1694 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1699 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1700 ht->ht_msiaddr >> 20 == addr >> 20) {
1701 /* Enable MSI -> HT mapping. */
1702 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1703 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1707 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1708 /* Disable MSI -> HT mapping. */
1709 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1710 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1716 pci_get_max_read_req(device_t dev)
1721 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) != 0)
1723 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2);
1724 val &= PCIM_EXP_CTL_MAX_READ_REQUEST;
1726 return (1 << (val + 7));
1730 pci_set_max_read_req(device_t dev, int size)
1735 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) != 0)
1741 size = (1 << (fls(size) - 1));
1742 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2);
1743 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
1744 val |= (fls(size) - 8) << 12;
1745 pci_write_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, val, 2);
1750 * Support for MSI message signalled interrupts.
1753 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1755 struct pci_devinfo *dinfo = device_get_ivars(dev);
1756 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1758 /* Write data and address values. */
1759 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1760 address & 0xffffffff, 4);
1761 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1762 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1764 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1767 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1770 /* Enable MSI in the control register. */
1771 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1772 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1775 /* Enable MSI -> HT mapping. */
1776 pci_ht_map_msi(dev, address);
1780 pci_disable_msi(device_t dev)
1782 struct pci_devinfo *dinfo = device_get_ivars(dev);
1783 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1785 /* Disable MSI -> HT mapping. */
1786 pci_ht_map_msi(dev, 0);
1788 /* Disable MSI in the control register. */
1789 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1790 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1795 * Restore MSI registers during resume. If MSI is enabled then
1796 * restore the data and address registers in addition to the control
1800 pci_resume_msi(device_t dev)
1802 struct pci_devinfo *dinfo = device_get_ivars(dev);
1803 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1807 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1808 address = msi->msi_addr;
1809 data = msi->msi_data;
1810 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1811 address & 0xffffffff, 4);
1812 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1813 pci_write_config(dev, msi->msi_location +
1814 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1815 pci_write_config(dev, msi->msi_location +
1816 PCIR_MSI_DATA_64BIT, data, 2);
1818 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1821 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1826 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1828 struct pci_devinfo *dinfo = device_get_ivars(dev);
1829 pcicfgregs *cfg = &dinfo->cfg;
1830 struct resource_list_entry *rle;
1831 struct msix_table_entry *mte;
1832 struct msix_vector *mv;
1838 * Handle MSI first. We try to find this IRQ among our list
1839 * of MSI IRQs. If we find it, we request updated address and
1840 * data registers and apply the results.
1842 if (cfg->msi.msi_alloc > 0) {
1844 /* If we don't have any active handlers, nothing to do. */
1845 if (cfg->msi.msi_handlers == 0)
1847 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1848 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1850 if (rle->start == irq) {
1851 error = PCIB_MAP_MSI(device_get_parent(bus),
1852 dev, irq, &addr, &data);
1855 pci_disable_msi(dev);
1856 dinfo->cfg.msi.msi_addr = addr;
1857 dinfo->cfg.msi.msi_data = data;
1858 pci_enable_msi(dev, addr, data);
1866 * For MSI-X, we check to see if we have this IRQ. If we do,
1867 * we request the updated mapping info. If that works, we go
1868 * through all the slots that use this IRQ and update them.
1870 if (cfg->msix.msix_alloc > 0) {
1871 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1872 mv = &cfg->msix.msix_vectors[i];
1873 if (mv->mv_irq == irq) {
1874 error = PCIB_MAP_MSI(device_get_parent(bus),
1875 dev, irq, &addr, &data);
1878 mv->mv_address = addr;
1880 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1881 mte = &cfg->msix.msix_table[j];
1882 if (mte->mte_vector != i + 1)
1884 if (mte->mte_handlers == 0)
1886 pci_mask_msix(dev, j);
1887 pci_enable_msix(dev, j, addr, data);
1888 pci_unmask_msix(dev, j);
1899 * Returns true if the specified device is blacklisted because MSI
1903 pci_msi_device_blacklisted(device_t dev)
1905 const struct pci_quirk *q;
1907 if (!pci_honor_msi_blacklist)
1910 for (q = &pci_quirks[0]; q->devid; q++) {
1911 if (q->devid == pci_get_devid(dev) &&
1912 q->type == PCI_QUIRK_DISABLE_MSI)
1919 * Returns true if a specified chipset supports MSI when it is
1920 * emulated hardware in a virtual machine.
1923 pci_msi_vm_chipset(device_t dev)
1925 const struct pci_quirk *q;
1927 for (q = &pci_quirks[0]; q->devid; q++) {
1928 if (q->devid == pci_get_devid(dev) &&
1929 q->type == PCI_QUIRK_ENABLE_MSI_VM)
1936 * Determine if MSI is blacklisted globally on this sytem. Currently,
1937 * we just check for blacklisted chipsets as represented by the
1938 * host-PCI bridge at device 0:0:0. In the future, it may become
1939 * necessary to check other system attributes, such as the kenv values
1940 * that give the motherboard manufacturer and model number.
1943 pci_msi_blacklisted(void)
1947 if (!pci_honor_msi_blacklist)
1950 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1951 if (!(pcie_chipset || pcix_chipset)) {
1952 if (vm_guest != VM_GUEST_NO) {
1953 dev = pci_find_bsf(0, 0, 0);
1955 return (pci_msi_vm_chipset(dev) == 0);
1960 dev = pci_find_bsf(0, 0, 0);
1962 return (pci_msi_device_blacklisted(dev));
1967 * Attempt to allocate *count MSI messages. The actual number allocated is
1968 * returned in *count. After this function returns, each message will be
1969 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1972 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1974 struct pci_devinfo *dinfo = device_get_ivars(child);
1975 pcicfgregs *cfg = &dinfo->cfg;
1976 struct resource_list_entry *rle;
1977 int actual, error, i, irqs[32];
1980 /* Don't let count == 0 get us into trouble. */
1984 /* If rid 0 is allocated, then fail. */
1985 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1986 if (rle != NULL && rle->res != NULL)
1989 /* Already have allocated messages? */
1990 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1993 /* If MSI is blacklisted for this system, fail. */
1994 if (pci_msi_blacklisted())
1997 /* MSI capability present? */
1998 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2002 device_printf(child,
2003 "attempting to allocate %d MSI vectors (%d supported)\n",
2004 *count, cfg->msi.msi_msgnum);
2006 /* Don't ask for more than the device supports. */
2007 actual = min(*count, cfg->msi.msi_msgnum);
2009 /* Don't ask for more than 32 messages. */
2010 actual = min(actual, 32);
2012 /* MSI requires power of 2 number of messages. */
2013 if (!powerof2(actual))
2017 /* Try to allocate N messages. */
2018 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2030 * We now have N actual messages mapped onto SYS_RES_IRQ
2031 * resources in the irqs[] array, so add new resources
2032 * starting at rid 1.
2034 for (i = 0; i < actual; i++)
2035 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2036 irqs[i], irqs[i], 1);
2040 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2045 * Be fancy and try to print contiguous runs
2046 * of IRQ values as ranges. 'run' is true if
2047 * we are in a range.
2049 device_printf(child, "using IRQs %d", irqs[0]);
2051 for (i = 1; i < actual; i++) {
2053 /* Still in a run? */
2054 if (irqs[i] == irqs[i - 1] + 1) {
2059 /* Finish previous range. */
2061 printf("-%d", irqs[i - 1]);
2065 /* Start new range. */
2066 printf(",%d", irqs[i]);
2069 /* Unfinished range? */
2071 printf("-%d", irqs[actual - 1]);
2072 printf(" for MSI\n");
2076 /* Update control register with actual count. */
2077 ctrl = cfg->msi.msi_ctrl;
2078 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2079 ctrl |= (ffs(actual) - 1) << 4;
2080 cfg->msi.msi_ctrl = ctrl;
2081 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2083 /* Update counts of alloc'd messages. */
2084 cfg->msi.msi_alloc = actual;
2085 cfg->msi.msi_handlers = 0;
2090 /* Release the MSI messages associated with this device. */
2092 pci_release_msi_method(device_t dev, device_t child)
2094 struct pci_devinfo *dinfo = device_get_ivars(child);
2095 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2096 struct resource_list_entry *rle;
2097 int error, i, irqs[32];
2099 /* Try MSI-X first. */
2100 error = pci_release_msix(dev, child);
2101 if (error != ENODEV)
2104 /* Do we have any messages to release? */
2105 if (msi->msi_alloc == 0)
2107 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2109 /* Make sure none of the resources are allocated. */
2110 if (msi->msi_handlers > 0)
2112 for (i = 0; i < msi->msi_alloc; i++) {
2113 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2114 KASSERT(rle != NULL, ("missing MSI resource"));
2115 if (rle->res != NULL)
2117 irqs[i] = rle->start;
2120 /* Update control register with 0 count. */
2121 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2122 ("%s: MSI still enabled", __func__));
2123 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2124 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2127 /* Release the messages. */
2128 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2129 for (i = 0; i < msi->msi_alloc; i++)
2130 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2132 /* Update alloc count. */
2140 * Return the max supported MSI messages this device supports.
2141 * Basically, assuming the MD code can alloc messages, this function
2142 * should return the maximum value that pci_alloc_msi() can return.
2143 * Thus, it is subject to the tunables, etc.
2146 pci_msi_count_method(device_t dev, device_t child)
2148 struct pci_devinfo *dinfo = device_get_ivars(child);
2149 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2151 if (pci_do_msi && msi->msi_location != 0)
2152 return (msi->msi_msgnum);
2156 /* free pcicfgregs structure and all depending data structures */
2159 pci_freecfg(struct pci_devinfo *dinfo)
2161 struct devlist *devlist_head;
2162 struct pci_map *pm, *next;
2165 devlist_head = &pci_devq;
2167 if (dinfo->cfg.vpd.vpd_reg) {
2168 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2169 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2170 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2171 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2172 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2173 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2174 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2176 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2179 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2180 free(dinfo, M_DEVBUF);
2182 /* increment the generation count */
2185 /* we're losing one device */
2191 * PCI power manangement
2194 pci_set_powerstate_method(device_t dev, device_t child, int state)
2196 struct pci_devinfo *dinfo = device_get_ivars(child);
2197 pcicfgregs *cfg = &dinfo->cfg;
2199 int result, oldstate, highest, delay;
2201 if (cfg->pp.pp_cap == 0)
2202 return (EOPNOTSUPP);
2205 * Optimize a no state change request away. While it would be OK to
2206 * write to the hardware in theory, some devices have shown odd
2207 * behavior when going from D3 -> D3.
2209 oldstate = pci_get_powerstate(child);
2210 if (oldstate == state)
2214 * The PCI power management specification states that after a state
2215 * transition between PCI power states, system software must
2216 * guarantee a minimal delay before the function accesses the device.
2217 * Compute the worst case delay that we need to guarantee before we
2218 * access the device. Many devices will be responsive much more
2219 * quickly than this delay, but there are some that don't respond
2220 * instantly to state changes. Transitions to/from D3 state require
2221 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2222 * is done below with DELAY rather than a sleeper function because
2223 * this function can be called from contexts where we cannot sleep.
2225 highest = (oldstate > state) ? oldstate : state;
2226 if (highest == PCI_POWERSTATE_D3)
2228 else if (highest == PCI_POWERSTATE_D2)
2232 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2233 & ~PCIM_PSTAT_DMASK;
2236 case PCI_POWERSTATE_D0:
2237 status |= PCIM_PSTAT_D0;
2239 case PCI_POWERSTATE_D1:
2240 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2241 return (EOPNOTSUPP);
2242 status |= PCIM_PSTAT_D1;
2244 case PCI_POWERSTATE_D2:
2245 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2246 return (EOPNOTSUPP);
2247 status |= PCIM_PSTAT_D2;
2249 case PCI_POWERSTATE_D3:
2250 status |= PCIM_PSTAT_D3;
2257 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2260 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2267 pci_get_powerstate_method(device_t dev, device_t child)
2269 struct pci_devinfo *dinfo = device_get_ivars(child);
2270 pcicfgregs *cfg = &dinfo->cfg;
2274 if (cfg->pp.pp_cap != 0) {
2275 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2276 switch (status & PCIM_PSTAT_DMASK) {
2278 result = PCI_POWERSTATE_D0;
2281 result = PCI_POWERSTATE_D1;
2284 result = PCI_POWERSTATE_D2;
2287 result = PCI_POWERSTATE_D3;
2290 result = PCI_POWERSTATE_UNKNOWN;
2294 /* No support, device is always at D0 */
2295 result = PCI_POWERSTATE_D0;
2301 * Some convenience functions for PCI device drivers.
2304 static __inline void
2305 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2309 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2311 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2314 static __inline void
2315 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2319 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2321 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2325 pci_enable_busmaster_method(device_t dev, device_t child)
2327 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2332 pci_disable_busmaster_method(device_t dev, device_t child)
2334 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2339 pci_enable_io_method(device_t dev, device_t child, int space)
2344 case SYS_RES_IOPORT:
2345 bit = PCIM_CMD_PORTEN;
2347 case SYS_RES_MEMORY:
2348 bit = PCIM_CMD_MEMEN;
2353 pci_set_command_bit(dev, child, bit);
2358 pci_disable_io_method(device_t dev, device_t child, int space)
2363 case SYS_RES_IOPORT:
2364 bit = PCIM_CMD_PORTEN;
2366 case SYS_RES_MEMORY:
2367 bit = PCIM_CMD_MEMEN;
2372 pci_clear_command_bit(dev, child, bit);
2377 * New style pci driver. Parent device is either a pci-host-bridge or a
2378 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2382 pci_print_verbose(struct pci_devinfo *dinfo)
2386 pcicfgregs *cfg = &dinfo->cfg;
2388 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2389 cfg->vendor, cfg->device, cfg->revid);
2390 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2391 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2392 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2393 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2395 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2396 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2397 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2398 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2399 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2400 if (cfg->intpin > 0)
2401 printf("\tintpin=%c, irq=%d\n",
2402 cfg->intpin +'a' -1, cfg->intline);
2403 if (cfg->pp.pp_cap) {
2406 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2407 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2408 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2409 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2410 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2411 status & PCIM_PSTAT_DMASK);
2413 if (cfg->msi.msi_location) {
2416 ctrl = cfg->msi.msi_ctrl;
2417 printf("\tMSI supports %d message%s%s%s\n",
2418 cfg->msi.msi_msgnum,
2419 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2420 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2421 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2423 if (cfg->msix.msix_location) {
2424 printf("\tMSI-X supports %d message%s ",
2425 cfg->msix.msix_msgnum,
2426 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2427 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2428 printf("in map 0x%x\n",
2429 cfg->msix.msix_table_bar);
2431 printf("in maps 0x%x and 0x%x\n",
2432 cfg->msix.msix_table_bar,
2433 cfg->msix.msix_pba_bar);
2439 pci_porten(device_t dev)
2441 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2445 pci_memen(device_t dev)
2447 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2451 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2453 struct pci_devinfo *dinfo;
2454 pci_addr_t map, testval;
2459 * The device ROM BAR is special. It is always a 32-bit
2460 * memory BAR. Bit 0 is special and should not be set when
2463 dinfo = device_get_ivars(dev);
2464 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2465 map = pci_read_config(dev, reg, 4);
2466 pci_write_config(dev, reg, 0xfffffffe, 4);
2467 testval = pci_read_config(dev, reg, 4);
2468 pci_write_config(dev, reg, map, 4);
2470 *testvalp = testval;
2474 map = pci_read_config(dev, reg, 4);
2475 ln2range = pci_maprange(map);
2477 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2480 * Disable decoding via the command register before
2481 * determining the BAR's length since we will be placing it in
2484 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2485 pci_write_config(dev, PCIR_COMMAND,
2486 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2489 * Determine the BAR's length by writing all 1's. The bottom
2490 * log_2(size) bits of the BAR will stick as 0 when we read
2493 pci_write_config(dev, reg, 0xffffffff, 4);
2494 testval = pci_read_config(dev, reg, 4);
2495 if (ln2range == 64) {
2496 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2497 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2501 * Restore the original value of the BAR. We may have reprogrammed
2502 * the BAR of the low-level console device and when booting verbose,
2503 * we need the console device addressable.
2505 pci_write_config(dev, reg, map, 4);
2507 pci_write_config(dev, reg + 4, map >> 32, 4);
2508 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2511 *testvalp = testval;
2515 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2517 struct pci_devinfo *dinfo;
2520 /* The device ROM BAR is always a 32-bit memory BAR. */
2521 dinfo = device_get_ivars(dev);
2522 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2525 ln2range = pci_maprange(pm->pm_value);
2526 pci_write_config(dev, pm->pm_reg, base, 4);
2528 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2529 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2531 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2532 pm->pm_reg + 4, 4) << 32;
2536 pci_find_bar(device_t dev, int reg)
2538 struct pci_devinfo *dinfo;
2541 dinfo = device_get_ivars(dev);
2542 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2543 if (pm->pm_reg == reg)
2550 pci_bar_enabled(device_t dev, struct pci_map *pm)
2552 struct pci_devinfo *dinfo;
2555 dinfo = device_get_ivars(dev);
2556 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2557 !(pm->pm_value & PCIM_BIOS_ENABLE))
2559 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2560 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2561 return ((cmd & PCIM_CMD_MEMEN) != 0);
2563 return ((cmd & PCIM_CMD_PORTEN) != 0);
2566 static struct pci_map *
2567 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2569 struct pci_devinfo *dinfo;
2570 struct pci_map *pm, *prev;
2572 dinfo = device_get_ivars(dev);
2573 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2575 pm->pm_value = value;
2577 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2578 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2580 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2581 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2585 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2587 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2592 pci_restore_bars(device_t dev)
2594 struct pci_devinfo *dinfo;
2598 dinfo = device_get_ivars(dev);
2599 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2600 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2603 ln2range = pci_maprange(pm->pm_value);
2604 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2606 pci_write_config(dev, pm->pm_reg + 4,
2607 pm->pm_value >> 32, 4);
2612 * Add a resource based on a pci map register. Return 1 if the map
2613 * register is a 32bit map register or 2 if it is a 64bit register.
2616 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2617 int force, int prefetch)
2620 pci_addr_t base, map, testval;
2621 pci_addr_t start, end, count;
2622 int barlen, basezero, maprange, mapsize, type;
2624 struct resource *res;
2627 * The BAR may already exist if the device is a CardBus card
2628 * whose CIS is stored in this BAR.
2630 pm = pci_find_bar(dev, reg);
2632 maprange = pci_maprange(pm->pm_value);
2633 barlen = maprange == 64 ? 2 : 1;
2637 pci_read_bar(dev, reg, &map, &testval);
2638 if (PCI_BAR_MEM(map)) {
2639 type = SYS_RES_MEMORY;
2640 if (map & PCIM_BAR_MEM_PREFETCH)
2643 type = SYS_RES_IOPORT;
2644 mapsize = pci_mapsize(testval);
2645 base = pci_mapbase(map);
2646 #ifdef __PCI_BAR_ZERO_VALID
2649 basezero = base == 0;
2651 maprange = pci_maprange(map);
2652 barlen = maprange == 64 ? 2 : 1;
2655 * For I/O registers, if bottom bit is set, and the next bit up
2656 * isn't clear, we know we have a BAR that doesn't conform to the
2657 * spec, so ignore it. Also, sanity check the size of the data
2658 * areas to the type of memory involved. Memory must be at least
2659 * 16 bytes in size, while I/O ranges must be at least 4.
2661 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2663 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2664 (type == SYS_RES_IOPORT && mapsize < 2))
2667 /* Save a record of this BAR. */
2668 pm = pci_add_bar(dev, reg, map, mapsize);
2670 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2671 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2672 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2673 printf(", port disabled\n");
2674 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2675 printf(", memory disabled\n");
2677 printf(", enabled\n");
2681 * If base is 0, then we have problems if this architecture does
2682 * not allow that. It is best to ignore such entries for the
2683 * moment. These will be allocated later if the driver specifically
2684 * requests them. However, some removable busses look better when
2685 * all resources are allocated, so allow '0' to be overriden.
2687 * Similarly treat maps whose values is the same as the test value
2688 * read back. These maps have had all f's written to them by the
2689 * BIOS in an attempt to disable the resources.
2691 if (!force && (basezero || map == testval))
2693 if ((u_long)base != base) {
2695 "pci%d:%d:%d:%d bar %#x too many address bits",
2696 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2697 pci_get_function(dev), reg);
2702 * This code theoretically does the right thing, but has
2703 * undesirable side effects in some cases where peripherals
2704 * respond oddly to having these bits enabled. Let the user
2705 * be able to turn them off (since pci_enable_io_modes is 1 by
2708 if (pci_enable_io_modes) {
2709 /* Turn on resources that have been left off by a lazy BIOS */
2710 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2711 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2712 cmd |= PCIM_CMD_PORTEN;
2713 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2715 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2716 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2717 cmd |= PCIM_CMD_MEMEN;
2718 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2721 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2723 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2727 count = (pci_addr_t)1 << mapsize;
2728 if (basezero || base == pci_mapbase(testval)) {
2729 start = 0; /* Let the parent decide. */
2733 end = base + count - 1;
2735 resource_list_add(rl, type, reg, start, end, count);
2738 * Try to allocate the resource for this BAR from our parent
2739 * so that this resource range is already reserved. The
2740 * driver for this device will later inherit this resource in
2741 * pci_alloc_resource().
2743 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2744 prefetch ? RF_PREFETCHABLE : 0);
2747 * If the allocation fails, clear the BAR and delete
2748 * the resource list entry to force
2749 * pci_alloc_resource() to allocate resources from the
2752 resource_list_delete(rl, type, reg);
2755 start = rman_get_start(res);
2756 rman_set_device(res, bus);
2758 pci_write_bar(dev, pm, start);
2763 * For ATA devices we need to decide early what addressing mode to use.
2764 * Legacy demands that the primary and secondary ATA ports sits on the
2765 * same addresses that old ISA hardware did. This dictates that we use
2766 * those addresses and ignore the BAR's if we cannot set PCI native
2770 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2771 uint32_t prefetchmask)
2774 int rid, type, progif;
2776 /* if this device supports PCI native addressing use it */
2777 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2778 if ((progif & 0x8a) == 0x8a) {
2779 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2780 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2781 printf("Trying ATA native PCI addressing mode\n");
2782 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2786 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2787 type = SYS_RES_IOPORT;
2788 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2789 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2790 prefetchmask & (1 << 0));
2791 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2792 prefetchmask & (1 << 1));
2795 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2796 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7,
2798 rman_set_device(r, bus);
2800 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2801 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6,
2803 rman_set_device(r, bus);
2805 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2806 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
2807 prefetchmask & (1 << 2));
2808 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
2809 prefetchmask & (1 << 3));
2812 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2813 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177,
2815 rman_set_device(r, bus);
2817 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2818 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376,
2820 rman_set_device(r, bus);
2822 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
2823 prefetchmask & (1 << 4));
2824 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
2825 prefetchmask & (1 << 5));
2829 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2831 struct pci_devinfo *dinfo = device_get_ivars(dev);
2832 pcicfgregs *cfg = &dinfo->cfg;
2833 char tunable_name[64];
2836 /* Has to have an intpin to have an interrupt. */
2837 if (cfg->intpin == 0)
2840 /* Let the user override the IRQ with a tunable. */
2841 irq = PCI_INVALID_IRQ;
2842 snprintf(tunable_name, sizeof(tunable_name),
2843 "hw.pci%d.%d.%d.INT%c.irq",
2844 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2845 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2846 irq = PCI_INVALID_IRQ;
2849 * If we didn't get an IRQ via the tunable, then we either use the
2850 * IRQ value in the intline register or we ask the bus to route an
2851 * interrupt for us. If force_route is true, then we only use the
2852 * value in the intline register if the bus was unable to assign an
2855 if (!PCI_INTERRUPT_VALID(irq)) {
2856 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2857 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2858 if (!PCI_INTERRUPT_VALID(irq))
2862 /* If after all that we don't have an IRQ, just bail. */
2863 if (!PCI_INTERRUPT_VALID(irq))
2866 /* Update the config register if it changed. */
2867 if (irq != cfg->intline) {
2869 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2872 /* Add this IRQ as rid 0 interrupt resource. */
2873 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2876 /* Perform early OHCI takeover from SMM. */
2878 ohci_early_takeover(device_t self)
2880 struct resource *res;
2886 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
2890 ctl = bus_read_4(res, OHCI_CONTROL);
2891 if (ctl & OHCI_IR) {
2893 printf("ohci early: "
2894 "SMM active, request owner change\n");
2895 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
2896 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
2898 ctl = bus_read_4(res, OHCI_CONTROL);
2900 if (ctl & OHCI_IR) {
2902 printf("ohci early: "
2903 "SMM does not respond, resetting\n");
2904 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
2906 /* Disable interrupts */
2907 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
2910 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
2913 /* Perform early UHCI takeover from SMM. */
2915 uhci_early_takeover(device_t self)
2917 struct resource *res;
2921 * Set the PIRQD enable bit and switch off all the others. We don't
2922 * want legacy support to interfere with us XXX Does this also mean
2923 * that the BIOS won't touch the keyboard anymore if it is connected
2924 * to the ports of the root hub?
2926 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
2928 /* Disable interrupts */
2929 rid = PCI_UHCI_BASE_REG;
2930 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
2932 bus_write_2(res, UHCI_INTR, 0);
2933 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
2937 /* Perform early EHCI takeover from SMM. */
2939 ehci_early_takeover(device_t self)
2941 struct resource *res;
2951 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
2955 cparams = bus_read_4(res, EHCI_HCCPARAMS);
2957 /* Synchronise with the BIOS if it owns the controller. */
2958 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
2959 eecp = EHCI_EECP_NEXT(eec)) {
2960 eec = pci_read_config(self, eecp, 4);
2961 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
2964 bios_sem = pci_read_config(self, eecp +
2965 EHCI_LEGSUP_BIOS_SEM, 1);
2966 if (bios_sem == 0) {
2970 printf("ehci early: "
2971 "SMM active, request owner change\n");
2973 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
2975 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
2977 bios_sem = pci_read_config(self, eecp +
2978 EHCI_LEGSUP_BIOS_SEM, 1);
2981 if (bios_sem != 0) {
2983 printf("ehci early: "
2984 "SMM does not respond\n");
2986 /* Disable interrupts */
2987 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
2988 bus_write_4(res, offs + EHCI_USBINTR, 0);
2990 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
2994 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
2996 struct pci_devinfo *dinfo;
2998 struct resource_list *rl;
2999 const struct pci_quirk *q;
3003 dinfo = device_get_ivars(dev);
3005 rl = &dinfo->resources;
3006 devid = (cfg->device << 16) | cfg->vendor;
3008 /* ATA devices needs special map treatment */
3009 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3010 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3011 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3012 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3013 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3014 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3016 for (i = 0; i < cfg->nummaps;) {
3018 * Skip quirked resources.
3020 for (q = &pci_quirks[0]; q->devid != 0; q++)
3021 if (q->devid == devid &&
3022 q->type == PCI_QUIRK_UNMAP_REG &&
3023 q->arg1 == PCIR_BAR(i))
3025 if (q->devid != 0) {
3029 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3030 prefetchmask & (1 << i));
3034 * Add additional, quirked resources.
3036 for (q = &pci_quirks[0]; q->devid != 0; q++)
3037 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3038 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3040 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3041 #ifdef __PCI_REROUTE_INTERRUPT
3043 * Try to re-route interrupts. Sometimes the BIOS or
3044 * firmware may leave bogus values in these registers.
3045 * If the re-route fails, then just stick with what we
3048 pci_assign_interrupt(bus, dev, 1);
3050 pci_assign_interrupt(bus, dev, 0);
3054 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3055 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3056 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3057 ehci_early_takeover(dev);
3058 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3059 ohci_early_takeover(dev);
3060 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3061 uhci_early_takeover(dev);
3066 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3068 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3069 device_t pcib = device_get_parent(dev);
3070 struct pci_devinfo *dinfo;
3072 int s, f, pcifunchigh;
3075 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3076 ("dinfo_size too small"));
3077 maxslots = PCIB_MAXSLOTS(pcib);
3078 for (s = 0; s <= maxslots; s++) {
3082 hdrtype = REG(PCIR_HDRTYPE, 1);
3083 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3085 if (hdrtype & PCIM_MFDEV)
3086 pcifunchigh = PCI_FUNCMAX;
3087 for (f = 0; f <= pcifunchigh; f++) {
3088 dinfo = pci_read_device(pcib, domain, busno, s, f,
3090 if (dinfo != NULL) {
3091 pci_add_child(dev, dinfo);
3099 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3101 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3102 device_set_ivars(dinfo->cfg.dev, dinfo);
3103 resource_list_init(&dinfo->resources);
3104 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3105 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3106 pci_print_verbose(dinfo);
3107 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3111 pci_probe(device_t dev)
3114 device_set_desc(dev, "PCI bus");
3116 /* Allow other subclasses to override this driver. */
3117 return (BUS_PROBE_GENERIC);
3121 pci_attach_common(device_t dev)
3123 struct pci_softc *sc;
3125 #ifdef PCI_DMA_BOUNDARY
3126 int error, tag_valid;
3129 sc = device_get_softc(dev);
3130 domain = pcib_get_domain(dev);
3131 busno = pcib_get_bus(dev);
3133 device_printf(dev, "domain=%d, physical bus=%d\n",
3135 #ifdef PCI_DMA_BOUNDARY
3137 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3138 devclass_find("pci")) {
3139 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3140 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3141 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3142 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3144 device_printf(dev, "Failed to create DMA tag: %d\n",
3151 sc->sc_dma_tag = bus_get_dma_tag(dev);
3156 pci_attach(device_t dev)
3158 int busno, domain, error;
3160 error = pci_attach_common(dev);
3165 * Since there can be multiple independantly numbered PCI
3166 * busses on systems with multiple PCI domains, we can't use
3167 * the unit number to decide which bus we are probing. We ask
3168 * the parent pcib what our domain and bus numbers are.
3170 domain = pcib_get_domain(dev);
3171 busno = pcib_get_bus(dev);
3172 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3173 return (bus_generic_attach(dev));
3177 pci_suspend(device_t dev)
3179 int dstate, error, i, numdevs;
3180 device_t acpi_dev, child, *devlist;
3181 struct pci_devinfo *dinfo;
3184 * Save the PCI configuration space for each child and set the
3185 * device in the appropriate power state for this sleep state.
3188 if (pci_do_power_resume)
3189 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3190 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3192 for (i = 0; i < numdevs; i++) {
3194 dinfo = device_get_ivars(child);
3195 pci_cfg_save(child, dinfo, 0);
3198 /* Suspend devices before potentially powering them down. */
3199 error = bus_generic_suspend(dev);
3201 free(devlist, M_TEMP);
3206 * Always set the device to D3. If ACPI suggests a different
3207 * power state, use it instead. If ACPI is not present, the
3208 * firmware is responsible for managing device power. Skip
3209 * children who aren't attached since they are powered down
3210 * separately. Only manage type 0 devices for now.
3212 for (i = 0; acpi_dev && i < numdevs; i++) {
3214 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3215 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
3216 dstate = PCI_POWERSTATE_D3;
3217 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
3218 pci_set_powerstate(child, dstate);
3221 free(devlist, M_TEMP);
3226 pci_resume(device_t dev)
3228 int i, numdevs, error;
3229 device_t acpi_dev, child, *devlist;
3230 struct pci_devinfo *dinfo;
3233 * Set each child to D0 and restore its PCI configuration space.
3236 if (pci_do_power_resume)
3237 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3238 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3240 for (i = 0; i < numdevs; i++) {
3242 * Notify ACPI we're going to D0 but ignore the result. If
3243 * ACPI is not present, the firmware is responsible for
3244 * managing device power. Only manage type 0 devices for now.
3247 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3248 if (acpi_dev && device_is_attached(child) &&
3249 dinfo->cfg.hdrtype == 0) {
3250 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
3251 pci_set_powerstate(child, PCI_POWERSTATE_D0);
3254 /* Now the device is powered up, restore its config space. */
3255 pci_cfg_restore(child, dinfo);
3257 free(devlist, M_TEMP);
3258 return (bus_generic_resume(dev));
3262 pci_load_vendor_data(void)
3264 caddr_t vendordata, info;
3266 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
3267 info = preload_search_info(vendordata, MODINFO_ADDR);
3268 pci_vendordata = *(char **)info;
3269 info = preload_search_info(vendordata, MODINFO_SIZE);
3270 pci_vendordata_size = *(size_t *)info;
3271 /* terminate the database */
3272 pci_vendordata[pci_vendordata_size] = '\n';
3277 pci_driver_added(device_t dev, driver_t *driver)
3282 struct pci_devinfo *dinfo;
3286 device_printf(dev, "driver added\n");
3287 DEVICE_IDENTIFY(driver, dev);
3288 if (device_get_children(dev, &devlist, &numdevs) != 0)
3290 for (i = 0; i < numdevs; i++) {
3292 if (device_get_state(child) != DS_NOTPRESENT)
3294 dinfo = device_get_ivars(child);
3295 pci_print_verbose(dinfo);
3297 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3298 pci_cfg_restore(child, dinfo);
3299 if (device_probe_and_attach(child) != 0)
3300 pci_cfg_save(child, dinfo, 1);
3302 free(devlist, M_TEMP);
3306 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3307 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3309 struct pci_devinfo *dinfo;
3310 struct msix_table_entry *mte;
3311 struct msix_vector *mv;
3317 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3322 /* If this is not a direct child, just bail out. */
3323 if (device_get_parent(child) != dev) {
3328 rid = rman_get_rid(irq);
3330 /* Make sure that INTx is enabled */
3331 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3334 * Check to see if the interrupt is MSI or MSI-X.
3335 * Ask our parent to map the MSI and give
3336 * us the address and data register values.
3337 * If we fail for some reason, teardown the
3338 * interrupt handler.
3340 dinfo = device_get_ivars(child);
3341 if (dinfo->cfg.msi.msi_alloc > 0) {
3342 if (dinfo->cfg.msi.msi_addr == 0) {
3343 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3344 ("MSI has handlers, but vectors not mapped"));
3345 error = PCIB_MAP_MSI(device_get_parent(dev),
3346 child, rman_get_start(irq), &addr, &data);
3349 dinfo->cfg.msi.msi_addr = addr;
3350 dinfo->cfg.msi.msi_data = data;
3352 if (dinfo->cfg.msi.msi_handlers == 0)
3353 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3354 dinfo->cfg.msi.msi_data);
3355 dinfo->cfg.msi.msi_handlers++;
3357 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3358 ("No MSI or MSI-X interrupts allocated"));
3359 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3360 ("MSI-X index too high"));
3361 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3362 KASSERT(mte->mte_vector != 0, ("no message vector"));
3363 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3364 KASSERT(mv->mv_irq == rman_get_start(irq),
3366 if (mv->mv_address == 0) {
3367 KASSERT(mte->mte_handlers == 0,
3368 ("MSI-X table entry has handlers, but vector not mapped"));
3369 error = PCIB_MAP_MSI(device_get_parent(dev),
3370 child, rman_get_start(irq), &addr, &data);
3373 mv->mv_address = addr;
3376 if (mte->mte_handlers == 0) {
3377 pci_enable_msix(child, rid - 1, mv->mv_address,
3379 pci_unmask_msix(child, rid - 1);
3381 mte->mte_handlers++;
3384 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3385 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3388 (void)bus_generic_teardown_intr(dev, child, irq,
3398 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3401 struct msix_table_entry *mte;
3402 struct resource_list_entry *rle;
3403 struct pci_devinfo *dinfo;
3406 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3409 /* If this isn't a direct child, just bail out */
3410 if (device_get_parent(child) != dev)
3411 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3413 rid = rman_get_rid(irq);
3416 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3419 * Check to see if the interrupt is MSI or MSI-X. If so,
3420 * decrement the appropriate handlers count and mask the
3421 * MSI-X message, or disable MSI messages if the count
3424 dinfo = device_get_ivars(child);
3425 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3426 if (rle->res != irq)
3428 if (dinfo->cfg.msi.msi_alloc > 0) {
3429 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3430 ("MSI-X index too high"));
3431 if (dinfo->cfg.msi.msi_handlers == 0)
3433 dinfo->cfg.msi.msi_handlers--;
3434 if (dinfo->cfg.msi.msi_handlers == 0)
3435 pci_disable_msi(child);
3437 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3438 ("No MSI or MSI-X interrupts allocated"));
3439 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3440 ("MSI-X index too high"));
3441 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3442 if (mte->mte_handlers == 0)
3444 mte->mte_handlers--;
3445 if (mte->mte_handlers == 0)
3446 pci_mask_msix(child, rid - 1);
3449 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3452 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3457 pci_print_child(device_t dev, device_t child)
3459 struct pci_devinfo *dinfo;
3460 struct resource_list *rl;
3463 dinfo = device_get_ivars(child);
3464 rl = &dinfo->resources;
3466 retval += bus_print_child_header(dev, child);
3468 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3469 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3470 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3471 if (device_get_flags(dev))
3472 retval += printf(" flags %#x", device_get_flags(dev));
3474 retval += printf(" at device %d.%d", pci_get_slot(child),
3475 pci_get_function(child));
3477 retval += bus_print_child_footer(dev, child);
3487 } pci_nomatch_tab[] = {
3488 {PCIC_OLD, -1, "old"},
3489 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3490 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3491 {PCIC_STORAGE, -1, "mass storage"},
3492 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3493 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3494 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3495 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3496 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3497 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3498 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3499 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3500 {PCIC_STORAGE, PCIS_STORAGE_NVM, "NVM"},
3501 {PCIC_NETWORK, -1, "network"},
3502 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3503 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3504 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3505 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3506 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3507 {PCIC_DISPLAY, -1, "display"},
3508 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3509 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3510 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3511 {PCIC_MULTIMEDIA, -1, "multimedia"},
3512 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3513 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3514 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3515 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3516 {PCIC_MEMORY, -1, "memory"},
3517 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3518 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3519 {PCIC_BRIDGE, -1, "bridge"},
3520 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3521 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3522 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3523 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3524 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3525 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3526 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3527 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3528 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3529 {PCIC_SIMPLECOMM, -1, "simple comms"},
3530 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3531 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3532 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3533 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3534 {PCIC_BASEPERIPH, -1, "base peripheral"},
3535 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3536 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3537 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3538 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3539 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3540 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3541 {PCIC_INPUTDEV, -1, "input device"},
3542 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3543 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3544 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3545 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3546 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3547 {PCIC_DOCKING, -1, "docking station"},
3548 {PCIC_PROCESSOR, -1, "processor"},
3549 {PCIC_SERIALBUS, -1, "serial bus"},
3550 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3551 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3552 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3553 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3554 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3555 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3556 {PCIC_WIRELESS, -1, "wireless controller"},
3557 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3558 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3559 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3560 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3561 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3562 {PCIC_SATCOM, -1, "satellite communication"},
3563 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3564 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3565 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3566 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3567 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3568 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3569 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3570 {PCIC_DASP, -1, "dasp"},
3571 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3576 pci_probe_nomatch(device_t dev, device_t child)
3579 char *cp, *scp, *device;
3582 * Look for a listing for this device in a loaded device database.
3584 if ((device = pci_describe_device(child)) != NULL) {
3585 device_printf(dev, "<%s>", device);
3586 free(device, M_DEVBUF);
3589 * Scan the class/subclass descriptions for a general
3594 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3595 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3596 if (pci_nomatch_tab[i].subclass == -1) {
3597 cp = pci_nomatch_tab[i].desc;
3598 } else if (pci_nomatch_tab[i].subclass ==
3599 pci_get_subclass(child)) {
3600 scp = pci_nomatch_tab[i].desc;
3604 device_printf(dev, "<%s%s%s>",
3606 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3609 printf(" at device %d.%d (no driver attached)\n",
3610 pci_get_slot(child), pci_get_function(child));
3611 pci_cfg_save(child, device_get_ivars(child), 1);
3616 * Parse the PCI device database, if loaded, and return a pointer to a
3617 * description of the device.
3619 * The database is flat text formatted as follows:
3621 * Any line not in a valid format is ignored.
3622 * Lines are terminated with newline '\n' characters.
3624 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3627 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3628 * - devices cannot be listed without a corresponding VENDOR line.
3629 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3630 * another TAB, then the device name.
3634 * Assuming (ptr) points to the beginning of a line in the database,
3635 * return the vendor or device and description of the next entry.
3636 * The value of (vendor) or (device) inappropriate for the entry type
3637 * is set to -1. Returns nonzero at the end of the database.
3639 * Note that this is slightly unrobust in the face of corrupt data;
3640 * we attempt to safeguard against this by spamming the end of the
3641 * database with a newline when we initialise.
3644 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3653 left = pci_vendordata_size - (cp - pci_vendordata);
3661 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3665 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3668 /* skip to next line */
3669 while (*cp != '\n' && left > 0) {
3678 /* skip to next line */
3679 while (*cp != '\n' && left > 0) {
3683 if (*cp == '\n' && left > 0)
3690 pci_describe_device(device_t dev)
3693 char *desc, *vp, *dp, *line;
3695 desc = vp = dp = NULL;
3698 * If we have no vendor data, we can't do anything.
3700 if (pci_vendordata == NULL)
3704 * Scan the vendor data looking for this device
3706 line = pci_vendordata;
3707 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3710 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3712 if (vendor == pci_get_vendor(dev))
3715 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3718 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3726 if (device == pci_get_device(dev))
3730 snprintf(dp, 80, "0x%x", pci_get_device(dev));
3731 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3733 sprintf(desc, "%s, %s", vp, dp);
3743 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3745 struct pci_devinfo *dinfo;
3748 dinfo = device_get_ivars(child);
3752 case PCI_IVAR_ETHADDR:
3754 * The generic accessor doesn't deal with failure, so
3755 * we set the return value, then return an error.
3757 *((uint8_t **) result) = NULL;
3759 case PCI_IVAR_SUBVENDOR:
3760 *result = cfg->subvendor;
3762 case PCI_IVAR_SUBDEVICE:
3763 *result = cfg->subdevice;
3765 case PCI_IVAR_VENDOR:
3766 *result = cfg->vendor;
3768 case PCI_IVAR_DEVICE:
3769 *result = cfg->device;
3771 case PCI_IVAR_DEVID:
3772 *result = (cfg->device << 16) | cfg->vendor;
3774 case PCI_IVAR_CLASS:
3775 *result = cfg->baseclass;
3777 case PCI_IVAR_SUBCLASS:
3778 *result = cfg->subclass;
3780 case PCI_IVAR_PROGIF:
3781 *result = cfg->progif;
3783 case PCI_IVAR_REVID:
3784 *result = cfg->revid;
3786 case PCI_IVAR_INTPIN:
3787 *result = cfg->intpin;
3790 *result = cfg->intline;
3792 case PCI_IVAR_DOMAIN:
3793 *result = cfg->domain;
3799 *result = cfg->slot;
3801 case PCI_IVAR_FUNCTION:
3802 *result = cfg->func;
3804 case PCI_IVAR_CMDREG:
3805 *result = cfg->cmdreg;
3807 case PCI_IVAR_CACHELNSZ:
3808 *result = cfg->cachelnsz;
3810 case PCI_IVAR_MINGNT:
3811 *result = cfg->mingnt;
3813 case PCI_IVAR_MAXLAT:
3814 *result = cfg->maxlat;
3816 case PCI_IVAR_LATTIMER:
3817 *result = cfg->lattimer;
3826 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3828 struct pci_devinfo *dinfo;
3830 dinfo = device_get_ivars(child);
3833 case PCI_IVAR_INTPIN:
3834 dinfo->cfg.intpin = value;
3836 case PCI_IVAR_ETHADDR:
3837 case PCI_IVAR_SUBVENDOR:
3838 case PCI_IVAR_SUBDEVICE:
3839 case PCI_IVAR_VENDOR:
3840 case PCI_IVAR_DEVICE:
3841 case PCI_IVAR_DEVID:
3842 case PCI_IVAR_CLASS:
3843 case PCI_IVAR_SUBCLASS:
3844 case PCI_IVAR_PROGIF:
3845 case PCI_IVAR_REVID:
3847 case PCI_IVAR_DOMAIN:
3850 case PCI_IVAR_FUNCTION:
3851 return (EINVAL); /* disallow for now */
3858 #include "opt_ddb.h"
3860 #include <ddb/ddb.h>
3861 #include <sys/cons.h>
3864 * List resources based on pci map registers, used for within ddb
3867 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3869 struct pci_devinfo *dinfo;
3870 struct devlist *devlist_head;
3873 int i, error, none_count;
3876 /* get the head of the device queue */
3877 devlist_head = &pci_devq;
3880 * Go through the list of devices and print out devices
3882 for (error = 0, i = 0,
3883 dinfo = STAILQ_FIRST(devlist_head);
3884 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3885 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3887 /* Populate pd_name and pd_unit */
3890 name = device_get_name(dinfo->cfg.dev);
3893 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3894 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3895 (name && *name) ? name : "none",
3896 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3898 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3899 p->pc_sel.pc_func, (p->pc_class << 16) |
3900 (p->pc_subclass << 8) | p->pc_progif,
3901 (p->pc_subdevice << 16) | p->pc_subvendor,
3902 (p->pc_device << 16) | p->pc_vendor,
3903 p->pc_revid, p->pc_hdr);
3908 static struct resource *
3909 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3910 u_long start, u_long end, u_long count, u_int flags)
3912 struct pci_devinfo *dinfo = device_get_ivars(child);
3913 struct resource_list *rl = &dinfo->resources;
3914 struct resource_list_entry *rle;
3915 struct resource *res;
3917 pci_addr_t map, testval;
3921 pm = pci_find_bar(child, *rid);
3923 /* This is a BAR that we failed to allocate earlier. */
3924 mapsize = pm->pm_size;
3928 * Weed out the bogons, and figure out how large the
3929 * BAR/map is. BARs that read back 0 here are bogus
3930 * and unimplemented. Note: atapci in legacy mode are
3931 * special and handled elsewhere in the code. If you
3932 * have a atapci device in legacy mode and it fails
3933 * here, that other code is broken.
3935 pci_read_bar(child, *rid, &map, &testval);
3938 * Determine the size of the BAR and ignore BARs with a size
3939 * of 0. Device ROM BARs use a different mask value.
3941 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
3942 mapsize = pci_romsize(testval);
3944 mapsize = pci_mapsize(testval);
3947 pm = pci_add_bar(child, *rid, map, mapsize);
3950 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
3951 if (type != SYS_RES_MEMORY) {
3954 "child %s requested type %d for rid %#x,"
3955 " but the BAR says it is an memio\n",
3956 device_get_nameunit(child), type, *rid);
3960 if (type != SYS_RES_IOPORT) {
3963 "child %s requested type %d for rid %#x,"
3964 " but the BAR says it is an ioport\n",
3965 device_get_nameunit(child), type, *rid);
3971 * For real BARs, we need to override the size that
3972 * the driver requests, because that's what the BAR
3973 * actually uses and we would otherwise have a
3974 * situation where we might allocate the excess to
3975 * another driver, which won't work.
3977 count = (pci_addr_t)1 << mapsize;
3978 if (RF_ALIGNMENT(flags) < mapsize)
3979 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3980 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
3981 flags |= RF_PREFETCHABLE;
3984 * Allocate enough resource, and then write back the
3985 * appropriate BAR for that resource.
3987 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
3988 start, end, count, flags & ~RF_ACTIVE);
3990 device_printf(child,
3991 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3992 count, *rid, type, start, end);
3995 rman_set_device(res, dev);
3996 resource_list_add(rl, type, *rid, start, end, count);
3997 rle = resource_list_find(rl, type, *rid);
3999 panic("pci_alloc_map: unexpectedly can't find resource.");
4001 rle->start = rman_get_start(res);
4002 rle->end = rman_get_end(res);
4005 device_printf(child,
4006 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4007 count, *rid, type, rman_get_start(res));
4008 map = rman_get_start(res);
4009 pci_write_bar(child, pm, map);
4015 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4016 u_long start, u_long end, u_long count, u_int flags)
4018 struct pci_devinfo *dinfo = device_get_ivars(child);
4019 struct resource_list *rl = &dinfo->resources;
4020 struct resource_list_entry *rle;
4021 struct resource *res;
4022 pcicfgregs *cfg = &dinfo->cfg;
4024 if (device_get_parent(child) != dev)
4025 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4026 type, rid, start, end, count, flags));
4029 * Perform lazy resource allocation
4034 * Can't alloc legacy interrupt once MSI messages have
4037 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4038 cfg->msix.msix_alloc > 0))
4042 * If the child device doesn't have an interrupt
4043 * routed and is deserving of an interrupt, try to
4046 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4048 pci_assign_interrupt(dev, child, 0);
4050 case SYS_RES_IOPORT:
4051 case SYS_RES_MEMORY:
4054 * PCI-PCI bridge I/O window resources are not BARs.
4055 * For those allocations just pass the request up the
4058 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4060 case PCIR_IOBASEL_1:
4061 case PCIR_MEMBASE_1:
4062 case PCIR_PMBASEL_1:
4064 * XXX: Should we bother creating a resource
4067 return (bus_generic_alloc_resource(dev, child,
4068 type, rid, start, end, count, flags));
4072 /* Allocate resources for this BAR if needed. */
4073 rle = resource_list_find(rl, type, *rid);
4075 res = pci_alloc_map(dev, child, type, rid, start, end,
4079 rle = resource_list_find(rl, type, *rid);
4083 * If the resource belongs to the bus, then give it to
4084 * the child. We need to activate it if requested
4085 * since the bus always allocates inactive resources.
4087 if (rle != NULL && rle->res != NULL &&
4088 rman_get_device(rle->res) == dev) {
4090 device_printf(child,
4091 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
4092 rman_get_size(rle->res), *rid, type,
4093 rman_get_start(rle->res));
4094 rman_set_device(rle->res, child);
4095 if ((flags & RF_ACTIVE) &&
4096 bus_activate_resource(child, type, *rid,
4102 return (resource_list_alloc(rl, dev, child, type, rid,
4103 start, end, count, flags));
4107 pci_release_resource(device_t dev, device_t child, int type, int rid,
4112 if (device_get_parent(child) != dev)
4113 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4117 * For BARs we don't actually want to release the resource.
4118 * Instead, we deactivate the resource if needed and then give
4119 * ownership of the BAR back to the bus.
4122 case SYS_RES_IOPORT:
4123 case SYS_RES_MEMORY:
4124 if (rman_get_device(r) != child)
4126 if (rman_get_flags(r) & RF_ACTIVE) {
4127 error = bus_deactivate_resource(child, type, rid, r);
4131 rman_set_device(r, dev);
4134 return (bus_generic_rl_release_resource(dev, child, type, rid, r));
4138 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4141 struct pci_devinfo *dinfo;
4144 error = bus_generic_activate_resource(dev, child, type, rid, r);
4148 /* Enable decoding in the command register when activating BARs. */
4149 if (device_get_parent(child) == dev) {
4150 /* Device ROMs need their decoding explicitly enabled. */
4151 dinfo = device_get_ivars(child);
4152 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4153 pci_write_bar(child, pci_find_bar(child, rid),
4154 rman_get_start(r) | PCIM_BIOS_ENABLE);
4156 case SYS_RES_IOPORT:
4157 case SYS_RES_MEMORY:
4158 error = PCI_ENABLE_IO(dev, child, type);
4166 pci_deactivate_resource(device_t dev, device_t child, int type,
4167 int rid, struct resource *r)
4169 struct pci_devinfo *dinfo;
4172 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4176 /* Disable decoding for device ROMs. */
4177 if (device_get_parent(child) == dev) {
4178 dinfo = device_get_ivars(child);
4179 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4180 pci_write_bar(child, pci_find_bar(child, rid),
4187 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4189 struct pci_devinfo *dinfo;
4190 struct resource_list *rl;
4191 struct resource_list_entry *rle;
4193 if (device_get_parent(child) != dev)
4196 dinfo = device_get_ivars(child);
4197 rl = &dinfo->resources;
4198 rle = resource_list_find(rl, type, rid);
4203 if (rman_get_device(rle->res) != dev ||
4204 rman_get_flags(rle->res) & RF_ACTIVE) {
4205 device_printf(dev, "delete_resource: "
4206 "Resource still owned by child, oops. "
4207 "(type=%d, rid=%d, addr=%lx)\n",
4208 rle->type, rle->rid,
4209 rman_get_start(rle->res));
4213 #ifndef __PCI_BAR_ZERO_VALID
4215 * If this is a BAR, clear the BAR so it stops
4216 * decoding before releasing the resource.
4219 case SYS_RES_IOPORT:
4220 case SYS_RES_MEMORY:
4221 pci_write_bar(child, pci_find_bar(child, rid), 0);
4225 bus_release_resource(dev, type, rid, rle->res);
4227 resource_list_delete(rl, type, rid);
4230 struct resource_list *
4231 pci_get_resource_list (device_t dev, device_t child)
4233 struct pci_devinfo *dinfo = device_get_ivars(child);
4235 return (&dinfo->resources);
4239 pci_get_dma_tag(device_t bus, device_t dev)
4241 struct pci_softc *sc = device_get_softc(bus);
4243 return (sc->sc_dma_tag);
4247 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4249 struct pci_devinfo *dinfo = device_get_ivars(child);
4250 pcicfgregs *cfg = &dinfo->cfg;
4252 return (PCIB_READ_CONFIG(device_get_parent(dev),
4253 cfg->bus, cfg->slot, cfg->func, reg, width));
4257 pci_write_config_method(device_t dev, device_t child, int reg,
4258 uint32_t val, int width)
4260 struct pci_devinfo *dinfo = device_get_ivars(child);
4261 pcicfgregs *cfg = &dinfo->cfg;
4263 PCIB_WRITE_CONFIG(device_get_parent(dev),
4264 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4268 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4272 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4273 pci_get_function(child));
4278 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4281 struct pci_devinfo *dinfo;
4284 dinfo = device_get_ivars(child);
4286 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4287 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4288 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4294 pci_assign_interrupt_method(device_t dev, device_t child)
4296 struct pci_devinfo *dinfo = device_get_ivars(child);
4297 pcicfgregs *cfg = &dinfo->cfg;
4299 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4304 pci_modevent(module_t mod, int what, void *arg)
4306 static struct cdev *pci_cdev;
4310 STAILQ_INIT(&pci_devq);
4312 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4314 pci_load_vendor_data();
4318 destroy_dev(pci_cdev);
4326 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
4330 * Only do header type 0 devices. Type 1 devices are bridges,
4331 * which we know need special treatment. Type 2 devices are
4332 * cardbus bridges which also require special treatment.
4333 * Other types are unknown, and we err on the side of safety
4336 if (dinfo->cfg.hdrtype != 0)
4340 * Restore the device to full power mode. We must do this
4341 * before we restore the registers because moving from D3 to
4342 * D0 will cause the chip's BARs and some other registers to
4343 * be reset to some unknown power on reset values. Cut down
4344 * the noise on boot by doing nothing if we are already in
4347 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
4348 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4349 pci_restore_bars(dev);
4350 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
4351 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
4352 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
4353 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
4354 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
4355 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
4356 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
4357 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
4358 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
4360 /* Restore MSI and MSI-X configurations if they are present. */
4361 if (dinfo->cfg.msi.msi_location != 0)
4362 pci_resume_msi(dev);
4363 if (dinfo->cfg.msix.msix_location != 0)
4364 pci_resume_msix(dev);
4368 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
4374 * Only do header type 0 devices. Type 1 devices are bridges, which
4375 * we know need special treatment. Type 2 devices are cardbus bridges
4376 * which also require special treatment. Other types are unknown, and
4377 * we err on the side of safety by ignoring them. Powering down
4378 * bridges should not be undertaken lightly.
4380 if (dinfo->cfg.hdrtype != 0)
4384 * Some drivers apparently write to these registers w/o updating our
4385 * cached copy. No harm happens if we update the copy, so do so here
4386 * so we can restore them. The COMMAND register is modified by the
4387 * bus w/o updating the cache. This should represent the normally
4388 * writable portion of the 'defined' part of type 0 headers. In
4389 * theory we also need to save/restore the PCI capability structures
4390 * we know about, but apart from power we don't know any that are
4393 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
4394 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4395 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
4396 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
4397 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
4398 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
4399 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
4400 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
4401 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
4402 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
4403 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
4404 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
4405 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
4406 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
4407 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4410 * don't set the state for display devices, base peripherals and
4411 * memory devices since bad things happen when they are powered down.
4412 * We should (a) have drivers that can easily detach and (b) use
4413 * generic drivers for these devices so that some device actually
4414 * attaches. We need to make sure that when we implement (a) we don't
4415 * power the device down on a reattach.
4417 cls = pci_get_class(dev);
4420 switch (pci_do_power_nodriver)
4422 case 0: /* NO powerdown at all */
4424 case 1: /* Conservative about what to power down */
4425 if (cls == PCIC_STORAGE)
4428 case 2: /* Agressive about what to power down */
4429 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4430 cls == PCIC_BASEPERIPH)
4433 case 3: /* Power down everything */
4437 * PCI spec says we can only go into D3 state from D0 state.
4438 * Transition from D[12] into D0 before going to D3 state.
4440 ps = pci_get_powerstate(dev);
4441 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4442 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4443 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4444 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
4447 /* Wrapper APIs suitable for device driver use. */
4449 pci_save_state(device_t dev)
4451 struct pci_devinfo *dinfo;
4453 dinfo = device_get_ivars(dev);
4454 pci_cfg_save(dev, dinfo, 0);
4458 pci_restore_state(device_t dev)
4460 struct pci_devinfo *dinfo;
4462 dinfo = device_get_ivars(dev);
4463 pci_cfg_restore(dev, dinfo);