2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/xhcireg.h>
66 #include <dev/usb/controller/ehcireg.h>
67 #include <dev/usb/controller/ohcireg.h>
68 #include <dev/usb/controller/uhcireg.h>
73 #define PCIR_IS_BIOS(cfg, reg) \
74 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
75 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
77 static int pci_has_quirk(uint32_t devid, int quirk);
78 static pci_addr_t pci_mapbase(uint64_t mapreg);
79 static const char *pci_maptype(uint64_t mapreg);
80 static int pci_mapsize(uint64_t testval);
81 static int pci_maprange(uint64_t mapreg);
82 static pci_addr_t pci_rombase(uint64_t mapreg);
83 static int pci_romsize(uint64_t testval);
84 static void pci_fixancient(pcicfgregs *cfg);
85 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
87 static int pci_porten(device_t dev);
88 static int pci_memen(device_t dev);
89 static void pci_assign_interrupt(device_t bus, device_t dev,
91 static int pci_add_map(device_t bus, device_t dev, int reg,
92 struct resource_list *rl, int force, int prefetch);
93 static int pci_probe(device_t dev);
94 static int pci_attach(device_t dev);
96 static int pci_detach(device_t dev);
98 static void pci_load_vendor_data(void);
99 static int pci_describe_parse_line(char **ptr, int *vendor,
100 int *device, char **desc);
101 static char *pci_describe_device(device_t dev);
102 static int pci_modevent(module_t mod, int what, void *arg);
103 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
105 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
106 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
107 int reg, uint32_t *data);
109 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
110 int reg, uint32_t data);
112 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
113 static void pci_disable_msi(device_t dev);
114 static void pci_enable_msi(device_t dev, uint64_t address,
116 static void pci_enable_msix(device_t dev, u_int index,
117 uint64_t address, uint32_t data);
118 static void pci_mask_msix(device_t dev, u_int index);
119 static void pci_unmask_msix(device_t dev, u_int index);
120 static int pci_msi_blacklisted(void);
121 static int pci_msix_blacklisted(void);
122 static void pci_resume_msi(device_t dev);
123 static void pci_resume_msix(device_t dev);
124 static int pci_remap_intr_method(device_t bus, device_t dev,
127 static uint16_t pci_get_rid_method(device_t dev, device_t child);
129 static device_method_t pci_methods[] = {
130 /* Device interface */
131 DEVMETHOD(device_probe, pci_probe),
132 DEVMETHOD(device_attach, pci_attach),
134 DEVMETHOD(device_detach, pci_detach),
136 DEVMETHOD(device_detach, bus_generic_detach),
138 DEVMETHOD(device_shutdown, bus_generic_shutdown),
139 DEVMETHOD(device_suspend, pci_suspend),
140 DEVMETHOD(device_resume, pci_resume),
143 DEVMETHOD(bus_print_child, pci_print_child),
144 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
145 DEVMETHOD(bus_read_ivar, pci_read_ivar),
146 DEVMETHOD(bus_write_ivar, pci_write_ivar),
147 DEVMETHOD(bus_driver_added, pci_driver_added),
148 DEVMETHOD(bus_setup_intr, pci_setup_intr),
149 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
151 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
152 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
153 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
154 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
155 DEVMETHOD(bus_delete_resource, pci_delete_resource),
156 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
157 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
158 DEVMETHOD(bus_release_resource, pci_release_resource),
159 DEVMETHOD(bus_activate_resource, pci_activate_resource),
160 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
161 DEVMETHOD(bus_child_detached, pci_child_detached),
162 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
163 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
164 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
167 DEVMETHOD(pci_read_config, pci_read_config_method),
168 DEVMETHOD(pci_write_config, pci_write_config_method),
169 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
170 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
171 DEVMETHOD(pci_enable_io, pci_enable_io_method),
172 DEVMETHOD(pci_disable_io, pci_disable_io_method),
173 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
174 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
175 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
176 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
177 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
178 DEVMETHOD(pci_find_cap, pci_find_cap_method),
179 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
180 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
181 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
182 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
183 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
184 DEVMETHOD(pci_release_msi, pci_release_msi_method),
185 DEVMETHOD(pci_msi_count, pci_msi_count_method),
186 DEVMETHOD(pci_msix_count, pci_msix_count_method),
187 DEVMETHOD(pci_get_rid, pci_get_rid_method),
192 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
194 static devclass_t pci_devclass;
195 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
196 MODULE_VERSION(pci, 1);
198 static char *pci_vendordata;
199 static size_t pci_vendordata_size;
202 uint32_t devid; /* Vendor/device of the card */
204 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
205 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
206 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
207 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
208 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
209 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
214 static const struct pci_quirk pci_quirks[] = {
215 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
216 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
217 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
218 /* As does the Serverworks OSB4 (the SMBus mapping register) */
219 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
222 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
223 * or the CMIC-SL (AKA ServerWorks GC_LE).
225 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
226 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
229 * MSI doesn't work on earlier Intel chipsets including
230 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
232 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
233 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
234 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
244 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
247 * MSI-X allocation doesn't work properly for devices passed through
248 * by VMware up to at least ESXi 5.1.
250 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
251 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
254 * Some virtualization environments emulate an older chipset
255 * but support MSI just fine. QEMU uses the Intel 82440.
257 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
260 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
261 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
262 * It prevents us from attaching hpet(4) when the bit is unset.
263 * Note this quirk only affects SB600 revision A13 and earlier.
264 * For SB600 A21 and later, firmware must set the bit to hide it.
265 * For SB700 and later, it is unused and hardcoded to zero.
267 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
270 * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
271 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
272 * command register is set.
274 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
275 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
276 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
279 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
280 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
282 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
283 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
284 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
285 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
286 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
287 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
292 /* map register information */
293 #define PCI_MAPMEM 0x01 /* memory map */
294 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
295 #define PCI_MAPPORT 0x04 /* port map */
297 struct devlist pci_devq;
298 uint32_t pci_generation;
299 uint32_t pci_numdevs = 0;
300 static int pcie_chipset, pcix_chipset;
303 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
305 static int pci_enable_io_modes = 1;
306 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
307 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
308 &pci_enable_io_modes, 1,
309 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
310 enable these bits correctly. We'd like to do this all the time, but there\n\
311 are some peripherals that this causes problems with.");
313 static int pci_do_realloc_bars = 0;
314 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
315 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
316 &pci_do_realloc_bars, 0,
317 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
319 static int pci_do_power_nodriver = 0;
320 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
321 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
322 &pci_do_power_nodriver, 0,
323 "Place a function into D3 state when no driver attaches to it. 0 means\n\
324 disable. 1 means conservatively place devices into D3 state. 2 means\n\
325 agressively place devices into D3 state. 3 means put absolutely everything\n\
328 int pci_do_power_resume = 1;
329 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
330 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
331 &pci_do_power_resume, 1,
332 "Transition from D3 -> D0 on resume.");
334 int pci_do_power_suspend = 1;
335 TUNABLE_INT("hw.pci.do_power_suspend", &pci_do_power_suspend);
336 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RW,
337 &pci_do_power_suspend, 1,
338 "Transition from D0 -> D3 on suspend.");
340 static int pci_do_msi = 1;
341 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
342 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
343 "Enable support for MSI interrupts");
345 static int pci_do_msix = 1;
346 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
347 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
348 "Enable support for MSI-X interrupts");
350 static int pci_honor_msi_blacklist = 1;
351 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
352 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
353 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
355 #if defined(__i386__) || defined(__amd64__)
356 static int pci_usb_takeover = 1;
358 static int pci_usb_takeover = 0;
360 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
361 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
362 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
363 Disable this if you depend on BIOS emulation of USB devices, that is\n\
364 you use USB devices (like keyboard or mouse) but do not load USB drivers");
366 static int pci_clear_bars;
367 TUNABLE_INT("hw.pci.clear_bars", &pci_clear_bars);
368 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
369 "Ignore firmware-assigned resources for BARs.");
371 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
372 static int pci_clear_buses;
373 TUNABLE_INT("hw.pci.clear_buses", &pci_clear_buses);
374 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
375 "Ignore firmware-assigned bus numbers.");
378 static int pci_enable_ari = 1;
379 TUNABLE_INT("hw.pci.enable_ari", &pci_enable_ari);
380 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
381 0, "Enable support for PCIe Alternative RID Interpretation");
384 pci_has_quirk(uint32_t devid, int quirk)
386 const struct pci_quirk *q;
388 for (q = &pci_quirks[0]; q->devid; q++) {
389 if (q->devid == devid && q->type == quirk)
395 /* Find a device_t by bus/slot/function in domain 0 */
398 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
401 return (pci_find_dbsf(0, bus, slot, func));
404 /* Find a device_t by domain/bus/slot/function */
407 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
409 struct pci_devinfo *dinfo;
411 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
412 if ((dinfo->cfg.domain == domain) &&
413 (dinfo->cfg.bus == bus) &&
414 (dinfo->cfg.slot == slot) &&
415 (dinfo->cfg.func == func)) {
416 return (dinfo->cfg.dev);
423 /* Find a device_t by vendor/device ID */
426 pci_find_device(uint16_t vendor, uint16_t device)
428 struct pci_devinfo *dinfo;
430 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
431 if ((dinfo->cfg.vendor == vendor) &&
432 (dinfo->cfg.device == device)) {
433 return (dinfo->cfg.dev);
441 pci_find_class(uint8_t class, uint8_t subclass)
443 struct pci_devinfo *dinfo;
445 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
446 if (dinfo->cfg.baseclass == class &&
447 dinfo->cfg.subclass == subclass) {
448 return (dinfo->cfg.dev);
456 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
461 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
464 retval += vprintf(fmt, ap);
469 /* return base address of memory or port map */
472 pci_mapbase(uint64_t mapreg)
475 if (PCI_BAR_MEM(mapreg))
476 return (mapreg & PCIM_BAR_MEM_BASE);
478 return (mapreg & PCIM_BAR_IO_BASE);
481 /* return map type of memory or port map */
484 pci_maptype(uint64_t mapreg)
487 if (PCI_BAR_IO(mapreg))
489 if (mapreg & PCIM_BAR_MEM_PREFETCH)
490 return ("Prefetchable Memory");
494 /* return log2 of map size decoded for memory or port map */
497 pci_mapsize(uint64_t testval)
501 testval = pci_mapbase(testval);
504 while ((testval & 1) == 0)
513 /* return base address of device ROM */
516 pci_rombase(uint64_t mapreg)
519 return (mapreg & PCIM_BIOS_ADDR_MASK);
522 /* return log2 of map size decided for device ROM */
525 pci_romsize(uint64_t testval)
529 testval = pci_rombase(testval);
532 while ((testval & 1) == 0)
541 /* return log2 of address range supported by map register */
544 pci_maprange(uint64_t mapreg)
548 if (PCI_BAR_IO(mapreg))
551 switch (mapreg & PCIM_BAR_MEM_TYPE) {
552 case PCIM_BAR_MEM_32:
555 case PCIM_BAR_MEM_1MB:
558 case PCIM_BAR_MEM_64:
565 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
568 pci_fixancient(pcicfgregs *cfg)
570 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
573 /* PCI to PCI bridges use header type 1 */
574 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
575 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
578 /* extract header type specific config data */
581 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
583 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
584 switch (cfg->hdrtype & PCIM_HDRTYPE) {
585 case PCIM_HDRTYPE_NORMAL:
586 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
587 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
588 cfg->mingnt = REG(PCIR_MINGNT, 1);
589 cfg->maxlat = REG(PCIR_MAXLAT, 1);
590 cfg->nummaps = PCI_MAXMAPS_0;
592 case PCIM_HDRTYPE_BRIDGE:
593 cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1);
594 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1);
595 cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1);
596 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1);
597 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2);
598 cfg->nummaps = PCI_MAXMAPS_1;
600 case PCIM_HDRTYPE_CARDBUS:
601 cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1);
602 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1);
603 cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1);
604 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1);
605 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2);
606 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
607 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
608 cfg->nummaps = PCI_MAXMAPS_2;
614 /* read configuration header into pcicfgregs structure */
616 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
618 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
619 pcicfgregs *cfg = NULL;
620 struct pci_devinfo *devlist_entry;
621 struct devlist *devlist_head;
623 devlist_head = &pci_devq;
625 devlist_entry = NULL;
627 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
628 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
629 if (devlist_entry == NULL)
632 cfg = &devlist_entry->cfg;
638 cfg->vendor = REG(PCIR_VENDOR, 2);
639 cfg->device = REG(PCIR_DEVICE, 2);
640 cfg->cmdreg = REG(PCIR_COMMAND, 2);
641 cfg->statreg = REG(PCIR_STATUS, 2);
642 cfg->baseclass = REG(PCIR_CLASS, 1);
643 cfg->subclass = REG(PCIR_SUBCLASS, 1);
644 cfg->progif = REG(PCIR_PROGIF, 1);
645 cfg->revid = REG(PCIR_REVID, 1);
646 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
647 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
648 cfg->lattimer = REG(PCIR_LATTIMER, 1);
649 cfg->intpin = REG(PCIR_INTPIN, 1);
650 cfg->intline = REG(PCIR_INTLINE, 1);
652 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
653 cfg->hdrtype &= ~PCIM_MFDEV;
654 STAILQ_INIT(&cfg->maps);
657 pci_hdrtypedata(pcib, b, s, f, cfg);
659 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
660 pci_read_cap(pcib, cfg);
662 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
664 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
665 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
666 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
667 devlist_entry->conf.pc_sel.pc_func = cfg->func;
668 devlist_entry->conf.pc_hdr = cfg->hdrtype;
670 devlist_entry->conf.pc_subvendor = cfg->subvendor;
671 devlist_entry->conf.pc_subdevice = cfg->subdevice;
672 devlist_entry->conf.pc_vendor = cfg->vendor;
673 devlist_entry->conf.pc_device = cfg->device;
675 devlist_entry->conf.pc_class = cfg->baseclass;
676 devlist_entry->conf.pc_subclass = cfg->subclass;
677 devlist_entry->conf.pc_progif = cfg->progif;
678 devlist_entry->conf.pc_revid = cfg->revid;
683 return (devlist_entry);
688 pci_read_cap(device_t pcib, pcicfgregs *cfg)
690 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
691 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
692 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
696 int ptr, nextptr, ptrptr;
698 switch (cfg->hdrtype & PCIM_HDRTYPE) {
699 case PCIM_HDRTYPE_NORMAL:
700 case PCIM_HDRTYPE_BRIDGE:
701 ptrptr = PCIR_CAP_PTR;
703 case PCIM_HDRTYPE_CARDBUS:
704 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
707 return; /* no extended capabilities support */
709 nextptr = REG(ptrptr, 1); /* sanity check? */
712 * Read capability entries.
714 while (nextptr != 0) {
717 printf("illegal PCI extended capability offset %d\n",
721 /* Find the next entry */
723 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
725 /* Process this entry */
726 switch (REG(ptr + PCICAP_ID, 1)) {
727 case PCIY_PMG: /* PCI power management */
728 if (cfg->pp.pp_cap == 0) {
729 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
730 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
731 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
732 if ((nextptr - ptr) > PCIR_POWER_DATA)
733 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
736 case PCIY_HT: /* HyperTransport */
737 /* Determine HT-specific capability type. */
738 val = REG(ptr + PCIR_HT_COMMAND, 2);
740 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
741 cfg->ht.ht_slave = ptr;
743 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
744 switch (val & PCIM_HTCMD_CAP_MASK) {
745 case PCIM_HTCAP_MSI_MAPPING:
746 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
747 /* Sanity check the mapping window. */
748 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
751 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
753 if (addr != MSI_INTEL_ADDR_BASE)
755 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
756 cfg->domain, cfg->bus,
757 cfg->slot, cfg->func,
760 addr = MSI_INTEL_ADDR_BASE;
762 cfg->ht.ht_msimap = ptr;
763 cfg->ht.ht_msictrl = val;
764 cfg->ht.ht_msiaddr = addr;
769 case PCIY_MSI: /* PCI MSI */
770 cfg->msi.msi_location = ptr;
771 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
772 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
773 PCIM_MSICTRL_MMC_MASK)>>1);
775 case PCIY_MSIX: /* PCI MSI-X */
776 cfg->msix.msix_location = ptr;
777 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
778 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
779 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
780 val = REG(ptr + PCIR_MSIX_TABLE, 4);
781 cfg->msix.msix_table_bar = PCIR_BAR(val &
783 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
784 val = REG(ptr + PCIR_MSIX_PBA, 4);
785 cfg->msix.msix_pba_bar = PCIR_BAR(val &
787 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
789 case PCIY_VPD: /* PCI Vital Product Data */
790 cfg->vpd.vpd_reg = ptr;
793 /* Should always be true. */
794 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
795 PCIM_HDRTYPE_BRIDGE) {
796 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
797 cfg->subvendor = val & 0xffff;
798 cfg->subdevice = val >> 16;
801 case PCIY_PCIX: /* PCI-X */
803 * Assume we have a PCI-X chipset if we have
804 * at least one PCI-PCI bridge with a PCI-X
805 * capability. Note that some systems with
806 * PCI-express or HT chipsets might match on
807 * this check as well.
809 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
812 cfg->pcix.pcix_location = ptr;
814 case PCIY_EXPRESS: /* PCI-express */
816 * Assume we have a PCI-express chipset if we have
817 * at least one PCI-express device.
820 cfg->pcie.pcie_location = ptr;
821 val = REG(ptr + PCIER_FLAGS, 2);
822 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
829 #if defined(__powerpc__)
831 * Enable the MSI mapping window for all HyperTransport
832 * slaves. PCI-PCI bridges have their windows enabled via
835 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
836 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
838 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
839 cfg->domain, cfg->bus, cfg->slot, cfg->func);
840 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
841 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
845 /* REG and WREG use carry through to next functions */
849 * PCI Vital Product Data
852 #define PCI_VPD_TIMEOUT 1000000
855 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
857 int count = PCI_VPD_TIMEOUT;
859 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
861 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
863 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
866 DELAY(1); /* limit looping */
868 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
875 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
877 int count = PCI_VPD_TIMEOUT;
879 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
881 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
882 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
883 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
886 DELAY(1); /* limit looping */
893 #undef PCI_VPD_TIMEOUT
895 struct vpd_readstate {
905 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
910 if (vrs->bytesinval == 0) {
911 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
913 vrs->val = le32toh(reg);
915 byte = vrs->val & 0xff;
918 vrs->val = vrs->val >> 8;
919 byte = vrs->val & 0xff;
929 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
931 struct vpd_readstate vrs;
936 int alloc, off; /* alloc/off for RO/W arrays */
942 /* init vpd reader */
950 name = remain = i = 0; /* shut up stupid gcc */
951 alloc = off = 0; /* shut up stupid gcc */
952 dflen = 0; /* shut up stupid gcc */
955 if (vpd_nextbyte(&vrs, &byte)) {
960 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
961 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
962 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
965 case 0: /* item name */
967 if (vpd_nextbyte(&vrs, &byte2)) {
972 if (vpd_nextbyte(&vrs, &byte2)) {
976 remain |= byte2 << 8;
977 if (remain > (0x7f*4 - vrs.off)) {
980 "invalid VPD data, remain %#x\n",
986 name = (byte >> 3) & 0xf;
989 case 0x2: /* String */
990 cfg->vpd.vpd_ident = malloc(remain + 1,
998 case 0x10: /* VPD-R */
1001 cfg->vpd.vpd_ros = malloc(alloc *
1002 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1006 case 0x11: /* VPD-W */
1009 cfg->vpd.vpd_w = malloc(alloc *
1010 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1014 default: /* Invalid data, abort */
1020 case 1: /* Identifier String */
1021 cfg->vpd.vpd_ident[i++] = byte;
1024 cfg->vpd.vpd_ident[i] = '\0';
1029 case 2: /* VPD-R Keyword Header */
1031 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1032 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1033 M_DEVBUF, M_WAITOK | M_ZERO);
1035 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1036 if (vpd_nextbyte(&vrs, &byte2)) {
1040 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1041 if (vpd_nextbyte(&vrs, &byte2)) {
1045 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1047 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1050 * if this happens, we can't trust the rest
1053 pci_printf(cfg, "bad keyword length: %d\n",
1058 } else if (dflen == 0) {
1059 cfg->vpd.vpd_ros[off].value = malloc(1 *
1060 sizeof(*cfg->vpd.vpd_ros[off].value),
1061 M_DEVBUF, M_WAITOK);
1062 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1064 cfg->vpd.vpd_ros[off].value = malloc(
1066 sizeof(*cfg->vpd.vpd_ros[off].value),
1067 M_DEVBUF, M_WAITOK);
1070 /* keep in sync w/ state 3's transistions */
1071 if (dflen == 0 && remain == 0)
1073 else if (dflen == 0)
1079 case 3: /* VPD-R Keyword Value */
1080 cfg->vpd.vpd_ros[off].value[i++] = byte;
1081 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1082 "RV", 2) == 0 && cksumvalid == -1) {
1088 "bad VPD cksum, remain %hhu\n",
1097 /* keep in sync w/ state 2's transistions */
1099 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1100 if (dflen == 0 && remain == 0) {
1101 cfg->vpd.vpd_rocnt = off;
1102 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1103 off * sizeof(*cfg->vpd.vpd_ros),
1104 M_DEVBUF, M_WAITOK | M_ZERO);
1106 } else if (dflen == 0)
1116 case 5: /* VPD-W Keyword Header */
1118 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1119 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1120 M_DEVBUF, M_WAITOK | M_ZERO);
1122 cfg->vpd.vpd_w[off].keyword[0] = byte;
1123 if (vpd_nextbyte(&vrs, &byte2)) {
1127 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1128 if (vpd_nextbyte(&vrs, &byte2)) {
1132 cfg->vpd.vpd_w[off].len = dflen = byte2;
1133 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1134 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1135 sizeof(*cfg->vpd.vpd_w[off].value),
1136 M_DEVBUF, M_WAITOK);
1139 /* keep in sync w/ state 6's transistions */
1140 if (dflen == 0 && remain == 0)
1142 else if (dflen == 0)
1148 case 6: /* VPD-W Keyword Value */
1149 cfg->vpd.vpd_w[off].value[i++] = byte;
1152 /* keep in sync w/ state 5's transistions */
1154 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1155 if (dflen == 0 && remain == 0) {
1156 cfg->vpd.vpd_wcnt = off;
1157 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1158 off * sizeof(*cfg->vpd.vpd_w),
1159 M_DEVBUF, M_WAITOK | M_ZERO);
1161 } else if (dflen == 0)
1166 pci_printf(cfg, "invalid state: %d\n", state);
1172 if (cksumvalid == 0 || state < -1) {
1173 /* read-only data bad, clean up */
1174 if (cfg->vpd.vpd_ros != NULL) {
1175 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1176 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1177 free(cfg->vpd.vpd_ros, M_DEVBUF);
1178 cfg->vpd.vpd_ros = NULL;
1182 /* I/O error, clean up */
1183 pci_printf(cfg, "failed to read VPD data.\n");
1184 if (cfg->vpd.vpd_ident != NULL) {
1185 free(cfg->vpd.vpd_ident, M_DEVBUF);
1186 cfg->vpd.vpd_ident = NULL;
1188 if (cfg->vpd.vpd_w != NULL) {
1189 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1190 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1191 free(cfg->vpd.vpd_w, M_DEVBUF);
1192 cfg->vpd.vpd_w = NULL;
1195 cfg->vpd.vpd_cached = 1;
1201 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1203 struct pci_devinfo *dinfo = device_get_ivars(child);
1204 pcicfgregs *cfg = &dinfo->cfg;
1206 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1207 pci_read_vpd(device_get_parent(dev), cfg);
1209 *identptr = cfg->vpd.vpd_ident;
1211 if (*identptr == NULL)
1218 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1221 struct pci_devinfo *dinfo = device_get_ivars(child);
1222 pcicfgregs *cfg = &dinfo->cfg;
1225 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1226 pci_read_vpd(device_get_parent(dev), cfg);
1228 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1229 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1230 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1231 *vptr = cfg->vpd.vpd_ros[i].value;
1240 pci_fetch_vpd_list(device_t dev)
1242 struct pci_devinfo *dinfo = device_get_ivars(dev);
1243 pcicfgregs *cfg = &dinfo->cfg;
1245 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1246 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1251 * Find the requested HyperTransport capability and return the offset
1252 * in configuration space via the pointer provided. The function
1253 * returns 0 on success and an error code otherwise.
1256 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1261 error = pci_find_cap(child, PCIY_HT, &ptr);
1266 * Traverse the capabilities list checking each HT capability
1267 * to see if it matches the requested HT capability.
1270 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1271 if (capability == PCIM_HTCAP_SLAVE ||
1272 capability == PCIM_HTCAP_HOST)
1275 val &= PCIM_HTCMD_CAP_MASK;
1276 if (val == capability) {
1282 /* Skip to the next HT capability. */
1284 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1285 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1294 * Find the requested capability and return the offset in
1295 * configuration space via the pointer provided. The function returns
1296 * 0 on success and an error code otherwise.
1299 pci_find_cap_method(device_t dev, device_t child, int capability,
1302 struct pci_devinfo *dinfo = device_get_ivars(child);
1303 pcicfgregs *cfg = &dinfo->cfg;
1308 * Check the CAP_LIST bit of the PCI status register first.
1310 status = pci_read_config(child, PCIR_STATUS, 2);
1311 if (!(status & PCIM_STATUS_CAPPRESENT))
1315 * Determine the start pointer of the capabilities list.
1317 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1318 case PCIM_HDRTYPE_NORMAL:
1319 case PCIM_HDRTYPE_BRIDGE:
1322 case PCIM_HDRTYPE_CARDBUS:
1323 ptr = PCIR_CAP_PTR_2;
1327 return (ENXIO); /* no extended capabilities support */
1329 ptr = pci_read_config(child, ptr, 1);
1332 * Traverse the capabilities list.
1335 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1340 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1347 * Find the requested extended capability and return the offset in
1348 * configuration space via the pointer provided. The function returns
1349 * 0 on success and an error code otherwise.
1352 pci_find_extcap_method(device_t dev, device_t child, int capability,
1355 struct pci_devinfo *dinfo = device_get_ivars(child);
1356 pcicfgregs *cfg = &dinfo->cfg;
1360 /* Only supported for PCI-express devices. */
1361 if (cfg->pcie.pcie_location == 0)
1365 ecap = pci_read_config(child, ptr, 4);
1366 if (ecap == 0xffffffff || ecap == 0)
1369 if (PCI_EXTCAP_ID(ecap) == capability) {
1374 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1377 ecap = pci_read_config(child, ptr, 4);
1384 * Support for MSI-X message interrupts.
1387 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1389 struct pci_devinfo *dinfo = device_get_ivars(dev);
1390 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1393 KASSERT(msix->msix_table_len > index, ("bogus index"));
1394 offset = msix->msix_table_offset + index * 16;
1395 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1396 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1397 bus_write_4(msix->msix_table_res, offset + 8, data);
1399 /* Enable MSI -> HT mapping. */
1400 pci_ht_map_msi(dev, address);
1404 pci_mask_msix(device_t dev, u_int index)
1406 struct pci_devinfo *dinfo = device_get_ivars(dev);
1407 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1408 uint32_t offset, val;
1410 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1411 offset = msix->msix_table_offset + index * 16 + 12;
1412 val = bus_read_4(msix->msix_table_res, offset);
1413 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1414 val |= PCIM_MSIX_VCTRL_MASK;
1415 bus_write_4(msix->msix_table_res, offset, val);
1420 pci_unmask_msix(device_t dev, u_int index)
1422 struct pci_devinfo *dinfo = device_get_ivars(dev);
1423 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1424 uint32_t offset, val;
1426 KASSERT(msix->msix_table_len > index, ("bogus index"));
1427 offset = msix->msix_table_offset + index * 16 + 12;
1428 val = bus_read_4(msix->msix_table_res, offset);
1429 if (val & PCIM_MSIX_VCTRL_MASK) {
1430 val &= ~PCIM_MSIX_VCTRL_MASK;
1431 bus_write_4(msix->msix_table_res, offset, val);
1436 pci_pending_msix(device_t dev, u_int index)
1438 struct pci_devinfo *dinfo = device_get_ivars(dev);
1439 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1440 uint32_t offset, bit;
1442 KASSERT(msix->msix_table_len > index, ("bogus index"));
1443 offset = msix->msix_pba_offset + (index / 32) * 4;
1444 bit = 1 << index % 32;
1445 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1449 * Restore MSI-X registers and table during resume. If MSI-X is
1450 * enabled then walk the virtual table to restore the actual MSI-X
1454 pci_resume_msix(device_t dev)
1456 struct pci_devinfo *dinfo = device_get_ivars(dev);
1457 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1458 struct msix_table_entry *mte;
1459 struct msix_vector *mv;
1462 if (msix->msix_alloc > 0) {
1463 /* First, mask all vectors. */
1464 for (i = 0; i < msix->msix_msgnum; i++)
1465 pci_mask_msix(dev, i);
1467 /* Second, program any messages with at least one handler. */
1468 for (i = 0; i < msix->msix_table_len; i++) {
1469 mte = &msix->msix_table[i];
1470 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1472 mv = &msix->msix_vectors[mte->mte_vector - 1];
1473 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1474 pci_unmask_msix(dev, i);
1477 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1478 msix->msix_ctrl, 2);
1482 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1483 * returned in *count. After this function returns, each message will be
1484 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1487 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1489 struct pci_devinfo *dinfo = device_get_ivars(child);
1490 pcicfgregs *cfg = &dinfo->cfg;
1491 struct resource_list_entry *rle;
1492 int actual, error, i, irq, max;
1494 /* Don't let count == 0 get us into trouble. */
1498 /* If rid 0 is allocated, then fail. */
1499 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1500 if (rle != NULL && rle->res != NULL)
1503 /* Already have allocated messages? */
1504 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1507 /* If MSI-X is blacklisted for this system, fail. */
1508 if (pci_msix_blacklisted())
1511 /* MSI-X capability present? */
1512 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1515 /* Make sure the appropriate BARs are mapped. */
1516 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1517 cfg->msix.msix_table_bar);
1518 if (rle == NULL || rle->res == NULL ||
1519 !(rman_get_flags(rle->res) & RF_ACTIVE))
1521 cfg->msix.msix_table_res = rle->res;
1522 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1523 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1524 cfg->msix.msix_pba_bar);
1525 if (rle == NULL || rle->res == NULL ||
1526 !(rman_get_flags(rle->res) & RF_ACTIVE))
1529 cfg->msix.msix_pba_res = rle->res;
1532 device_printf(child,
1533 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1534 *count, cfg->msix.msix_msgnum);
1535 max = min(*count, cfg->msix.msix_msgnum);
1536 for (i = 0; i < max; i++) {
1537 /* Allocate a message. */
1538 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1544 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1550 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1552 device_printf(child, "using IRQ %lu for MSI-X\n",
1558 * Be fancy and try to print contiguous runs of
1559 * IRQ values as ranges. 'irq' is the previous IRQ.
1560 * 'run' is true if we are in a range.
1562 device_printf(child, "using IRQs %lu", rle->start);
1565 for (i = 1; i < actual; i++) {
1566 rle = resource_list_find(&dinfo->resources,
1567 SYS_RES_IRQ, i + 1);
1569 /* Still in a run? */
1570 if (rle->start == irq + 1) {
1576 /* Finish previous range. */
1582 /* Start new range. */
1583 printf(",%lu", rle->start);
1587 /* Unfinished range? */
1590 printf(" for MSI-X\n");
1594 /* Mask all vectors. */
1595 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1596 pci_mask_msix(child, i);
1598 /* Allocate and initialize vector data and virtual table. */
1599 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1600 M_DEVBUF, M_WAITOK | M_ZERO);
1601 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1602 M_DEVBUF, M_WAITOK | M_ZERO);
1603 for (i = 0; i < actual; i++) {
1604 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1605 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1606 cfg->msix.msix_table[i].mte_vector = i + 1;
1609 /* Update control register to enable MSI-X. */
1610 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1611 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1612 cfg->msix.msix_ctrl, 2);
1614 /* Update counts of alloc'd messages. */
1615 cfg->msix.msix_alloc = actual;
1616 cfg->msix.msix_table_len = actual;
1622 * By default, pci_alloc_msix() will assign the allocated IRQ
1623 * resources consecutively to the first N messages in the MSI-X table.
1624 * However, device drivers may want to use different layouts if they
1625 * either receive fewer messages than they asked for, or they wish to
1626 * populate the MSI-X table sparsely. This method allows the driver
1627 * to specify what layout it wants. It must be called after a
1628 * successful pci_alloc_msix() but before any of the associated
1629 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1631 * The 'vectors' array contains 'count' message vectors. The array
1632 * maps directly to the MSI-X table in that index 0 in the array
1633 * specifies the vector for the first message in the MSI-X table, etc.
1634 * The vector value in each array index can either be 0 to indicate
1635 * that no vector should be assigned to a message slot, or it can be a
1636 * number from 1 to N (where N is the count returned from a
1637 * succcessful call to pci_alloc_msix()) to indicate which message
1638 * vector (IRQ) to be used for the corresponding message.
1640 * On successful return, each message with a non-zero vector will have
1641 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1642 * 1. Additionally, if any of the IRQs allocated via the previous
1643 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1644 * will be freed back to the system automatically.
1646 * For example, suppose a driver has a MSI-X table with 6 messages and
1647 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1648 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1649 * C. After the call to pci_alloc_msix(), the device will be setup to
1650 * have an MSI-X table of ABC--- (where - means no vector assigned).
1651 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1652 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1653 * be freed back to the system. This device will also have valid
1654 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1656 * In any case, the SYS_RES_IRQ rid X will always map to the message
1657 * at MSI-X table index X - 1 and will only be valid if a vector is
1658 * assigned to that table entry.
1661 pci_remap_msix_method(device_t dev, device_t child, int count,
1662 const u_int *vectors)
1664 struct pci_devinfo *dinfo = device_get_ivars(child);
1665 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1666 struct resource_list_entry *rle;
1667 int i, irq, j, *used;
1670 * Have to have at least one message in the table but the
1671 * table can't be bigger than the actual MSI-X table in the
1674 if (count == 0 || count > msix->msix_msgnum)
1677 /* Sanity check the vectors. */
1678 for (i = 0; i < count; i++)
1679 if (vectors[i] > msix->msix_alloc)
1683 * Make sure there aren't any holes in the vectors to be used.
1684 * It's a big pain to support it, and it doesn't really make
1685 * sense anyway. Also, at least one vector must be used.
1687 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1689 for (i = 0; i < count; i++)
1690 if (vectors[i] != 0)
1691 used[vectors[i] - 1] = 1;
1692 for (i = 0; i < msix->msix_alloc - 1; i++)
1693 if (used[i] == 0 && used[i + 1] == 1) {
1694 free(used, M_DEVBUF);
1698 free(used, M_DEVBUF);
1702 /* Make sure none of the resources are allocated. */
1703 for (i = 0; i < msix->msix_table_len; i++) {
1704 if (msix->msix_table[i].mte_vector == 0)
1706 if (msix->msix_table[i].mte_handlers > 0)
1708 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1709 KASSERT(rle != NULL, ("missing resource"));
1710 if (rle->res != NULL)
1714 /* Free the existing resource list entries. */
1715 for (i = 0; i < msix->msix_table_len; i++) {
1716 if (msix->msix_table[i].mte_vector == 0)
1718 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1722 * Build the new virtual table keeping track of which vectors are
1725 free(msix->msix_table, M_DEVBUF);
1726 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1727 M_DEVBUF, M_WAITOK | M_ZERO);
1728 for (i = 0; i < count; i++)
1729 msix->msix_table[i].mte_vector = vectors[i];
1730 msix->msix_table_len = count;
1732 /* Free any unused IRQs and resize the vectors array if necessary. */
1733 j = msix->msix_alloc - 1;
1735 struct msix_vector *vec;
1737 while (used[j] == 0) {
1738 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1739 msix->msix_vectors[j].mv_irq);
1742 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1744 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1746 free(msix->msix_vectors, M_DEVBUF);
1747 msix->msix_vectors = vec;
1748 msix->msix_alloc = j + 1;
1750 free(used, M_DEVBUF);
1752 /* Map the IRQs onto the rids. */
1753 for (i = 0; i < count; i++) {
1754 if (vectors[i] == 0)
1756 irq = msix->msix_vectors[vectors[i]].mv_irq;
1757 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1762 device_printf(child, "Remapped MSI-X IRQs as: ");
1763 for (i = 0; i < count; i++) {
1766 if (vectors[i] == 0)
1770 msix->msix_vectors[vectors[i]].mv_irq);
1779 pci_release_msix(device_t dev, device_t child)
1781 struct pci_devinfo *dinfo = device_get_ivars(child);
1782 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1783 struct resource_list_entry *rle;
1786 /* Do we have any messages to release? */
1787 if (msix->msix_alloc == 0)
1790 /* Make sure none of the resources are allocated. */
1791 for (i = 0; i < msix->msix_table_len; i++) {
1792 if (msix->msix_table[i].mte_vector == 0)
1794 if (msix->msix_table[i].mte_handlers > 0)
1796 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1797 KASSERT(rle != NULL, ("missing resource"));
1798 if (rle->res != NULL)
1802 /* Update control register to disable MSI-X. */
1803 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1804 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1805 msix->msix_ctrl, 2);
1807 /* Free the resource list entries. */
1808 for (i = 0; i < msix->msix_table_len; i++) {
1809 if (msix->msix_table[i].mte_vector == 0)
1811 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1813 free(msix->msix_table, M_DEVBUF);
1814 msix->msix_table_len = 0;
1816 /* Release the IRQs. */
1817 for (i = 0; i < msix->msix_alloc; i++)
1818 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1819 msix->msix_vectors[i].mv_irq);
1820 free(msix->msix_vectors, M_DEVBUF);
1821 msix->msix_alloc = 0;
1826 * Return the max supported MSI-X messages this device supports.
1827 * Basically, assuming the MD code can alloc messages, this function
1828 * should return the maximum value that pci_alloc_msix() can return.
1829 * Thus, it is subject to the tunables, etc.
1832 pci_msix_count_method(device_t dev, device_t child)
1834 struct pci_devinfo *dinfo = device_get_ivars(child);
1835 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1837 if (pci_do_msix && msix->msix_location != 0)
1838 return (msix->msix_msgnum);
1843 * HyperTransport MSI mapping control
1846 pci_ht_map_msi(device_t dev, uint64_t addr)
1848 struct pci_devinfo *dinfo = device_get_ivars(dev);
1849 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1854 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1855 ht->ht_msiaddr >> 20 == addr >> 20) {
1856 /* Enable MSI -> HT mapping. */
1857 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1858 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1862 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1863 /* Disable MSI -> HT mapping. */
1864 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1865 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1871 pci_get_max_read_req(device_t dev)
1873 struct pci_devinfo *dinfo = device_get_ivars(dev);
1877 cap = dinfo->cfg.pcie.pcie_location;
1880 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1881 val &= PCIEM_CTL_MAX_READ_REQUEST;
1883 return (1 << (val + 7));
1887 pci_set_max_read_req(device_t dev, int size)
1889 struct pci_devinfo *dinfo = device_get_ivars(dev);
1893 cap = dinfo->cfg.pcie.pcie_location;
1900 size = (1 << (fls(size) - 1));
1901 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1902 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1903 val |= (fls(size) - 8) << 12;
1904 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1909 * Support for MSI message signalled interrupts.
1912 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1914 struct pci_devinfo *dinfo = device_get_ivars(dev);
1915 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1917 /* Write data and address values. */
1918 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1919 address & 0xffffffff, 4);
1920 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1921 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1923 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1926 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1929 /* Enable MSI in the control register. */
1930 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1931 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1934 /* Enable MSI -> HT mapping. */
1935 pci_ht_map_msi(dev, address);
1939 pci_disable_msi(device_t dev)
1941 struct pci_devinfo *dinfo = device_get_ivars(dev);
1942 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1944 /* Disable MSI -> HT mapping. */
1945 pci_ht_map_msi(dev, 0);
1947 /* Disable MSI in the control register. */
1948 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1949 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1954 * Restore MSI registers during resume. If MSI is enabled then
1955 * restore the data and address registers in addition to the control
1959 pci_resume_msi(device_t dev)
1961 struct pci_devinfo *dinfo = device_get_ivars(dev);
1962 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1966 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1967 address = msi->msi_addr;
1968 data = msi->msi_data;
1969 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1970 address & 0xffffffff, 4);
1971 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1972 pci_write_config(dev, msi->msi_location +
1973 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1974 pci_write_config(dev, msi->msi_location +
1975 PCIR_MSI_DATA_64BIT, data, 2);
1977 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1980 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1985 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1987 struct pci_devinfo *dinfo = device_get_ivars(dev);
1988 pcicfgregs *cfg = &dinfo->cfg;
1989 struct resource_list_entry *rle;
1990 struct msix_table_entry *mte;
1991 struct msix_vector *mv;
1997 * Handle MSI first. We try to find this IRQ among our list
1998 * of MSI IRQs. If we find it, we request updated address and
1999 * data registers and apply the results.
2001 if (cfg->msi.msi_alloc > 0) {
2003 /* If we don't have any active handlers, nothing to do. */
2004 if (cfg->msi.msi_handlers == 0)
2006 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2007 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2009 if (rle->start == irq) {
2010 error = PCIB_MAP_MSI(device_get_parent(bus),
2011 dev, irq, &addr, &data);
2014 pci_disable_msi(dev);
2015 dinfo->cfg.msi.msi_addr = addr;
2016 dinfo->cfg.msi.msi_data = data;
2017 pci_enable_msi(dev, addr, data);
2025 * For MSI-X, we check to see if we have this IRQ. If we do,
2026 * we request the updated mapping info. If that works, we go
2027 * through all the slots that use this IRQ and update them.
2029 if (cfg->msix.msix_alloc > 0) {
2030 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2031 mv = &cfg->msix.msix_vectors[i];
2032 if (mv->mv_irq == irq) {
2033 error = PCIB_MAP_MSI(device_get_parent(bus),
2034 dev, irq, &addr, &data);
2037 mv->mv_address = addr;
2039 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2040 mte = &cfg->msix.msix_table[j];
2041 if (mte->mte_vector != i + 1)
2043 if (mte->mte_handlers == 0)
2045 pci_mask_msix(dev, j);
2046 pci_enable_msix(dev, j, addr, data);
2047 pci_unmask_msix(dev, j);
2058 * Returns true if the specified device is blacklisted because MSI
2062 pci_msi_device_blacklisted(device_t dev)
2065 if (!pci_honor_msi_blacklist)
2068 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2072 * Determine if MSI is blacklisted globally on this system. Currently,
2073 * we just check for blacklisted chipsets as represented by the
2074 * host-PCI bridge at device 0:0:0. In the future, it may become
2075 * necessary to check other system attributes, such as the kenv values
2076 * that give the motherboard manufacturer and model number.
2079 pci_msi_blacklisted(void)
2083 if (!pci_honor_msi_blacklist)
2086 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2087 if (!(pcie_chipset || pcix_chipset)) {
2088 if (vm_guest != VM_GUEST_NO) {
2090 * Whitelist older chipsets in virtual
2091 * machines known to support MSI.
2093 dev = pci_find_bsf(0, 0, 0);
2095 return (!pci_has_quirk(pci_get_devid(dev),
2096 PCI_QUIRK_ENABLE_MSI_VM));
2101 dev = pci_find_bsf(0, 0, 0);
2103 return (pci_msi_device_blacklisted(dev));
2108 * Returns true if the specified device is blacklisted because MSI-X
2109 * doesn't work. Note that this assumes that if MSI doesn't work,
2110 * MSI-X doesn't either.
2113 pci_msix_device_blacklisted(device_t dev)
2116 if (!pci_honor_msi_blacklist)
2119 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2122 return (pci_msi_device_blacklisted(dev));
2126 * Determine if MSI-X is blacklisted globally on this system. If MSI
2127 * is blacklisted, assume that MSI-X is as well. Check for additional
2128 * chipsets where MSI works but MSI-X does not.
2131 pci_msix_blacklisted(void)
2135 if (!pci_honor_msi_blacklist)
2138 dev = pci_find_bsf(0, 0, 0);
2139 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2140 PCI_QUIRK_DISABLE_MSIX))
2143 return (pci_msi_blacklisted());
2147 * Attempt to allocate *count MSI messages. The actual number allocated is
2148 * returned in *count. After this function returns, each message will be
2149 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2152 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2154 struct pci_devinfo *dinfo = device_get_ivars(child);
2155 pcicfgregs *cfg = &dinfo->cfg;
2156 struct resource_list_entry *rle;
2157 int actual, error, i, irqs[32];
2160 /* Don't let count == 0 get us into trouble. */
2164 /* If rid 0 is allocated, then fail. */
2165 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2166 if (rle != NULL && rle->res != NULL)
2169 /* Already have allocated messages? */
2170 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2173 /* If MSI is blacklisted for this system, fail. */
2174 if (pci_msi_blacklisted())
2177 /* MSI capability present? */
2178 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2182 device_printf(child,
2183 "attempting to allocate %d MSI vectors (%d supported)\n",
2184 *count, cfg->msi.msi_msgnum);
2186 /* Don't ask for more than the device supports. */
2187 actual = min(*count, cfg->msi.msi_msgnum);
2189 /* Don't ask for more than 32 messages. */
2190 actual = min(actual, 32);
2192 /* MSI requires power of 2 number of messages. */
2193 if (!powerof2(actual))
2197 /* Try to allocate N messages. */
2198 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2210 * We now have N actual messages mapped onto SYS_RES_IRQ
2211 * resources in the irqs[] array, so add new resources
2212 * starting at rid 1.
2214 for (i = 0; i < actual; i++)
2215 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2216 irqs[i], irqs[i], 1);
2220 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2225 * Be fancy and try to print contiguous runs
2226 * of IRQ values as ranges. 'run' is true if
2227 * we are in a range.
2229 device_printf(child, "using IRQs %d", irqs[0]);
2231 for (i = 1; i < actual; i++) {
2233 /* Still in a run? */
2234 if (irqs[i] == irqs[i - 1] + 1) {
2239 /* Finish previous range. */
2241 printf("-%d", irqs[i - 1]);
2245 /* Start new range. */
2246 printf(",%d", irqs[i]);
2249 /* Unfinished range? */
2251 printf("-%d", irqs[actual - 1]);
2252 printf(" for MSI\n");
2256 /* Update control register with actual count. */
2257 ctrl = cfg->msi.msi_ctrl;
2258 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2259 ctrl |= (ffs(actual) - 1) << 4;
2260 cfg->msi.msi_ctrl = ctrl;
2261 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2263 /* Update counts of alloc'd messages. */
2264 cfg->msi.msi_alloc = actual;
2265 cfg->msi.msi_handlers = 0;
2270 /* Release the MSI messages associated with this device. */
2272 pci_release_msi_method(device_t dev, device_t child)
2274 struct pci_devinfo *dinfo = device_get_ivars(child);
2275 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2276 struct resource_list_entry *rle;
2277 int error, i, irqs[32];
2279 /* Try MSI-X first. */
2280 error = pci_release_msix(dev, child);
2281 if (error != ENODEV)
2284 /* Do we have any messages to release? */
2285 if (msi->msi_alloc == 0)
2287 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2289 /* Make sure none of the resources are allocated. */
2290 if (msi->msi_handlers > 0)
2292 for (i = 0; i < msi->msi_alloc; i++) {
2293 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2294 KASSERT(rle != NULL, ("missing MSI resource"));
2295 if (rle->res != NULL)
2297 irqs[i] = rle->start;
2300 /* Update control register with 0 count. */
2301 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2302 ("%s: MSI still enabled", __func__));
2303 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2304 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2307 /* Release the messages. */
2308 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2309 for (i = 0; i < msi->msi_alloc; i++)
2310 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2312 /* Update alloc count. */
2320 * Return the max supported MSI messages this device supports.
2321 * Basically, assuming the MD code can alloc messages, this function
2322 * should return the maximum value that pci_alloc_msi() can return.
2323 * Thus, it is subject to the tunables, etc.
2326 pci_msi_count_method(device_t dev, device_t child)
2328 struct pci_devinfo *dinfo = device_get_ivars(child);
2329 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2331 if (pci_do_msi && msi->msi_location != 0)
2332 return (msi->msi_msgnum);
2336 /* free pcicfgregs structure and all depending data structures */
2339 pci_freecfg(struct pci_devinfo *dinfo)
2341 struct devlist *devlist_head;
2342 struct pci_map *pm, *next;
2345 devlist_head = &pci_devq;
2347 if (dinfo->cfg.vpd.vpd_reg) {
2348 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2349 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2350 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2351 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2352 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2353 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2354 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2356 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2359 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2360 free(dinfo, M_DEVBUF);
2362 /* increment the generation count */
2365 /* we're losing one device */
2371 * PCI power manangement
2374 pci_set_powerstate_method(device_t dev, device_t child, int state)
2376 struct pci_devinfo *dinfo = device_get_ivars(child);
2377 pcicfgregs *cfg = &dinfo->cfg;
2379 int oldstate, highest, delay;
2381 if (cfg->pp.pp_cap == 0)
2382 return (EOPNOTSUPP);
2385 * Optimize a no state change request away. While it would be OK to
2386 * write to the hardware in theory, some devices have shown odd
2387 * behavior when going from D3 -> D3.
2389 oldstate = pci_get_powerstate(child);
2390 if (oldstate == state)
2394 * The PCI power management specification states that after a state
2395 * transition between PCI power states, system software must
2396 * guarantee a minimal delay before the function accesses the device.
2397 * Compute the worst case delay that we need to guarantee before we
2398 * access the device. Many devices will be responsive much more
2399 * quickly than this delay, but there are some that don't respond
2400 * instantly to state changes. Transitions to/from D3 state require
2401 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2402 * is done below with DELAY rather than a sleeper function because
2403 * this function can be called from contexts where we cannot sleep.
2405 highest = (oldstate > state) ? oldstate : state;
2406 if (highest == PCI_POWERSTATE_D3)
2408 else if (highest == PCI_POWERSTATE_D2)
2412 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2413 & ~PCIM_PSTAT_DMASK;
2415 case PCI_POWERSTATE_D0:
2416 status |= PCIM_PSTAT_D0;
2418 case PCI_POWERSTATE_D1:
2419 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2420 return (EOPNOTSUPP);
2421 status |= PCIM_PSTAT_D1;
2423 case PCI_POWERSTATE_D2:
2424 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2425 return (EOPNOTSUPP);
2426 status |= PCIM_PSTAT_D2;
2428 case PCI_POWERSTATE_D3:
2429 status |= PCIM_PSTAT_D3;
2436 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2439 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2446 pci_get_powerstate_method(device_t dev, device_t child)
2448 struct pci_devinfo *dinfo = device_get_ivars(child);
2449 pcicfgregs *cfg = &dinfo->cfg;
2453 if (cfg->pp.pp_cap != 0) {
2454 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2455 switch (status & PCIM_PSTAT_DMASK) {
2457 result = PCI_POWERSTATE_D0;
2460 result = PCI_POWERSTATE_D1;
2463 result = PCI_POWERSTATE_D2;
2466 result = PCI_POWERSTATE_D3;
2469 result = PCI_POWERSTATE_UNKNOWN;
2473 /* No support, device is always at D0 */
2474 result = PCI_POWERSTATE_D0;
2480 * Some convenience functions for PCI device drivers.
2483 static __inline void
2484 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2488 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2490 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2493 static __inline void
2494 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2498 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2500 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2504 pci_enable_busmaster_method(device_t dev, device_t child)
2506 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2511 pci_disable_busmaster_method(device_t dev, device_t child)
2513 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2518 pci_enable_io_method(device_t dev, device_t child, int space)
2523 case SYS_RES_IOPORT:
2524 bit = PCIM_CMD_PORTEN;
2526 case SYS_RES_MEMORY:
2527 bit = PCIM_CMD_MEMEN;
2532 pci_set_command_bit(dev, child, bit);
2537 pci_disable_io_method(device_t dev, device_t child, int space)
2542 case SYS_RES_IOPORT:
2543 bit = PCIM_CMD_PORTEN;
2545 case SYS_RES_MEMORY:
2546 bit = PCIM_CMD_MEMEN;
2551 pci_clear_command_bit(dev, child, bit);
2556 * New style pci driver. Parent device is either a pci-host-bridge or a
2557 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2561 pci_print_verbose(struct pci_devinfo *dinfo)
2565 pcicfgregs *cfg = &dinfo->cfg;
2567 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2568 cfg->vendor, cfg->device, cfg->revid);
2569 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2570 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2571 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2572 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2574 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2575 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2576 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2577 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2578 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2579 if (cfg->intpin > 0)
2580 printf("\tintpin=%c, irq=%d\n",
2581 cfg->intpin +'a' -1, cfg->intline);
2582 if (cfg->pp.pp_cap) {
2585 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2586 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2587 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2588 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2589 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2590 status & PCIM_PSTAT_DMASK);
2592 if (cfg->msi.msi_location) {
2595 ctrl = cfg->msi.msi_ctrl;
2596 printf("\tMSI supports %d message%s%s%s\n",
2597 cfg->msi.msi_msgnum,
2598 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2599 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2600 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2602 if (cfg->msix.msix_location) {
2603 printf("\tMSI-X supports %d message%s ",
2604 cfg->msix.msix_msgnum,
2605 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2606 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2607 printf("in map 0x%x\n",
2608 cfg->msix.msix_table_bar);
2610 printf("in maps 0x%x and 0x%x\n",
2611 cfg->msix.msix_table_bar,
2612 cfg->msix.msix_pba_bar);
2618 pci_porten(device_t dev)
2620 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2624 pci_memen(device_t dev)
2626 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2630 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2632 struct pci_devinfo *dinfo;
2633 pci_addr_t map, testval;
2638 * The device ROM BAR is special. It is always a 32-bit
2639 * memory BAR. Bit 0 is special and should not be set when
2642 dinfo = device_get_ivars(dev);
2643 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2644 map = pci_read_config(dev, reg, 4);
2645 pci_write_config(dev, reg, 0xfffffffe, 4);
2646 testval = pci_read_config(dev, reg, 4);
2647 pci_write_config(dev, reg, map, 4);
2649 *testvalp = testval;
2653 map = pci_read_config(dev, reg, 4);
2654 ln2range = pci_maprange(map);
2656 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2659 * Disable decoding via the command register before
2660 * determining the BAR's length since we will be placing it in
2663 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2664 pci_write_config(dev, PCIR_COMMAND,
2665 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2668 * Determine the BAR's length by writing all 1's. The bottom
2669 * log_2(size) bits of the BAR will stick as 0 when we read
2672 pci_write_config(dev, reg, 0xffffffff, 4);
2673 testval = pci_read_config(dev, reg, 4);
2674 if (ln2range == 64) {
2675 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2676 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2680 * Restore the original value of the BAR. We may have reprogrammed
2681 * the BAR of the low-level console device and when booting verbose,
2682 * we need the console device addressable.
2684 pci_write_config(dev, reg, map, 4);
2686 pci_write_config(dev, reg + 4, map >> 32, 4);
2687 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2690 *testvalp = testval;
2694 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2696 struct pci_devinfo *dinfo;
2699 /* The device ROM BAR is always a 32-bit memory BAR. */
2700 dinfo = device_get_ivars(dev);
2701 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2704 ln2range = pci_maprange(pm->pm_value);
2705 pci_write_config(dev, pm->pm_reg, base, 4);
2707 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2708 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2710 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2711 pm->pm_reg + 4, 4) << 32;
2715 pci_find_bar(device_t dev, int reg)
2717 struct pci_devinfo *dinfo;
2720 dinfo = device_get_ivars(dev);
2721 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2722 if (pm->pm_reg == reg)
2729 pci_bar_enabled(device_t dev, struct pci_map *pm)
2731 struct pci_devinfo *dinfo;
2734 dinfo = device_get_ivars(dev);
2735 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2736 !(pm->pm_value & PCIM_BIOS_ENABLE))
2738 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2739 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2740 return ((cmd & PCIM_CMD_MEMEN) != 0);
2742 return ((cmd & PCIM_CMD_PORTEN) != 0);
2745 static struct pci_map *
2746 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2748 struct pci_devinfo *dinfo;
2749 struct pci_map *pm, *prev;
2751 dinfo = device_get_ivars(dev);
2752 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2754 pm->pm_value = value;
2756 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2757 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2759 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2760 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2764 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2766 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2771 pci_restore_bars(device_t dev)
2773 struct pci_devinfo *dinfo;
2777 dinfo = device_get_ivars(dev);
2778 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2779 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2782 ln2range = pci_maprange(pm->pm_value);
2783 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2785 pci_write_config(dev, pm->pm_reg + 4,
2786 pm->pm_value >> 32, 4);
2791 * Add a resource based on a pci map register. Return 1 if the map
2792 * register is a 32bit map register or 2 if it is a 64bit register.
2795 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2796 int force, int prefetch)
2799 pci_addr_t base, map, testval;
2800 pci_addr_t start, end, count;
2801 int barlen, basezero, flags, maprange, mapsize, type;
2803 struct resource *res;
2806 * The BAR may already exist if the device is a CardBus card
2807 * whose CIS is stored in this BAR.
2809 pm = pci_find_bar(dev, reg);
2811 maprange = pci_maprange(pm->pm_value);
2812 barlen = maprange == 64 ? 2 : 1;
2816 pci_read_bar(dev, reg, &map, &testval);
2817 if (PCI_BAR_MEM(map)) {
2818 type = SYS_RES_MEMORY;
2819 if (map & PCIM_BAR_MEM_PREFETCH)
2822 type = SYS_RES_IOPORT;
2823 mapsize = pci_mapsize(testval);
2824 base = pci_mapbase(map);
2825 #ifdef __PCI_BAR_ZERO_VALID
2828 basezero = base == 0;
2830 maprange = pci_maprange(map);
2831 barlen = maprange == 64 ? 2 : 1;
2834 * For I/O registers, if bottom bit is set, and the next bit up
2835 * isn't clear, we know we have a BAR that doesn't conform to the
2836 * spec, so ignore it. Also, sanity check the size of the data
2837 * areas to the type of memory involved. Memory must be at least
2838 * 16 bytes in size, while I/O ranges must be at least 4.
2840 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2842 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2843 (type == SYS_RES_IOPORT && mapsize < 2))
2846 /* Save a record of this BAR. */
2847 pm = pci_add_bar(dev, reg, map, mapsize);
2849 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2850 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2851 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2852 printf(", port disabled\n");
2853 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2854 printf(", memory disabled\n");
2856 printf(", enabled\n");
2860 * If base is 0, then we have problems if this architecture does
2861 * not allow that. It is best to ignore such entries for the
2862 * moment. These will be allocated later if the driver specifically
2863 * requests them. However, some removable busses look better when
2864 * all resources are allocated, so allow '0' to be overriden.
2866 * Similarly treat maps whose values is the same as the test value
2867 * read back. These maps have had all f's written to them by the
2868 * BIOS in an attempt to disable the resources.
2870 if (!force && (basezero || map == testval))
2872 if ((u_long)base != base) {
2874 "pci%d:%d:%d:%d bar %#x too many address bits",
2875 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2876 pci_get_function(dev), reg);
2881 * This code theoretically does the right thing, but has
2882 * undesirable side effects in some cases where peripherals
2883 * respond oddly to having these bits enabled. Let the user
2884 * be able to turn them off (since pci_enable_io_modes is 1 by
2887 if (pci_enable_io_modes) {
2888 /* Turn on resources that have been left off by a lazy BIOS */
2889 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2890 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2891 cmd |= PCIM_CMD_PORTEN;
2892 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2894 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2895 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2896 cmd |= PCIM_CMD_MEMEN;
2897 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2900 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2902 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2906 count = (pci_addr_t)1 << mapsize;
2907 flags = RF_ALIGNMENT_LOG2(mapsize);
2909 flags |= RF_PREFETCHABLE;
2910 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
2911 start = 0; /* Let the parent decide. */
2915 end = base + count - 1;
2917 resource_list_add(rl, type, reg, start, end, count);
2920 * Try to allocate the resource for this BAR from our parent
2921 * so that this resource range is already reserved. The
2922 * driver for this device will later inherit this resource in
2923 * pci_alloc_resource().
2925 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2927 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2929 * If the allocation fails, try to allocate a resource for
2930 * this BAR using any available range. The firmware felt
2931 * it was important enough to assign a resource, so don't
2932 * disable decoding if we can help it.
2934 resource_list_delete(rl, type, reg);
2935 resource_list_add(rl, type, reg, 0, ~0ul, count);
2936 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
2941 * If the allocation fails, delete the resource list entry
2942 * and disable decoding for this device.
2944 * If the driver requests this resource in the future,
2945 * pci_reserve_map() will try to allocate a fresh
2948 resource_list_delete(rl, type, reg);
2949 pci_disable_io(dev, type);
2952 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2953 pci_get_domain(dev), pci_get_bus(dev),
2954 pci_get_slot(dev), pci_get_function(dev), reg);
2956 start = rman_get_start(res);
2957 pci_write_bar(dev, pm, start);
2963 * For ATA devices we need to decide early what addressing mode to use.
2964 * Legacy demands that the primary and secondary ATA ports sits on the
2965 * same addresses that old ISA hardware did. This dictates that we use
2966 * those addresses and ignore the BAR's if we cannot set PCI native
2970 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2971 uint32_t prefetchmask)
2973 int rid, type, progif;
2975 /* if this device supports PCI native addressing use it */
2976 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2977 if ((progif & 0x8a) == 0x8a) {
2978 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2979 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2980 printf("Trying ATA native PCI addressing mode\n");
2981 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2985 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2986 type = SYS_RES_IOPORT;
2987 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2988 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2989 prefetchmask & (1 << 0));
2990 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2991 prefetchmask & (1 << 1));
2994 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2995 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
2998 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2999 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3002 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3003 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3004 prefetchmask & (1 << 2));
3005 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3006 prefetchmask & (1 << 3));
3009 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3010 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3013 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3014 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3017 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3018 prefetchmask & (1 << 4));
3019 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3020 prefetchmask & (1 << 5));
3024 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3026 struct pci_devinfo *dinfo = device_get_ivars(dev);
3027 pcicfgregs *cfg = &dinfo->cfg;
3028 char tunable_name[64];
3031 /* Has to have an intpin to have an interrupt. */
3032 if (cfg->intpin == 0)
3035 /* Let the user override the IRQ with a tunable. */
3036 irq = PCI_INVALID_IRQ;
3037 snprintf(tunable_name, sizeof(tunable_name),
3038 "hw.pci%d.%d.%d.INT%c.irq",
3039 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3040 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3041 irq = PCI_INVALID_IRQ;
3044 * If we didn't get an IRQ via the tunable, then we either use the
3045 * IRQ value in the intline register or we ask the bus to route an
3046 * interrupt for us. If force_route is true, then we only use the
3047 * value in the intline register if the bus was unable to assign an
3050 if (!PCI_INTERRUPT_VALID(irq)) {
3051 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3052 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3053 if (!PCI_INTERRUPT_VALID(irq))
3057 /* If after all that we don't have an IRQ, just bail. */
3058 if (!PCI_INTERRUPT_VALID(irq))
3061 /* Update the config register if it changed. */
3062 if (irq != cfg->intline) {
3064 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3067 /* Add this IRQ as rid 0 interrupt resource. */
3068 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3071 /* Perform early OHCI takeover from SMM. */
3073 ohci_early_takeover(device_t self)
3075 struct resource *res;
3081 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3085 ctl = bus_read_4(res, OHCI_CONTROL);
3086 if (ctl & OHCI_IR) {
3088 printf("ohci early: "
3089 "SMM active, request owner change\n");
3090 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3091 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3093 ctl = bus_read_4(res, OHCI_CONTROL);
3095 if (ctl & OHCI_IR) {
3097 printf("ohci early: "
3098 "SMM does not respond, resetting\n");
3099 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3101 /* Disable interrupts */
3102 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3105 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3108 /* Perform early UHCI takeover from SMM. */
3110 uhci_early_takeover(device_t self)
3112 struct resource *res;
3116 * Set the PIRQD enable bit and switch off all the others. We don't
3117 * want legacy support to interfere with us XXX Does this also mean
3118 * that the BIOS won't touch the keyboard anymore if it is connected
3119 * to the ports of the root hub?
3121 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3123 /* Disable interrupts */
3124 rid = PCI_UHCI_BASE_REG;
3125 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3127 bus_write_2(res, UHCI_INTR, 0);
3128 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3132 /* Perform early EHCI takeover from SMM. */
3134 ehci_early_takeover(device_t self)
3136 struct resource *res;
3146 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3150 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3152 /* Synchronise with the BIOS if it owns the controller. */
3153 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3154 eecp = EHCI_EECP_NEXT(eec)) {
3155 eec = pci_read_config(self, eecp, 4);
3156 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3159 bios_sem = pci_read_config(self, eecp +
3160 EHCI_LEGSUP_BIOS_SEM, 1);
3161 if (bios_sem == 0) {
3165 printf("ehci early: "
3166 "SMM active, request owner change\n");
3168 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3170 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3172 bios_sem = pci_read_config(self, eecp +
3173 EHCI_LEGSUP_BIOS_SEM, 1);
3176 if (bios_sem != 0) {
3178 printf("ehci early: "
3179 "SMM does not respond\n");
3181 /* Disable interrupts */
3182 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3183 bus_write_4(res, offs + EHCI_USBINTR, 0);
3185 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3188 /* Perform early XHCI takeover from SMM. */
3190 xhci_early_takeover(device_t self)
3192 struct resource *res;
3202 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3206 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3210 /* Synchronise with the BIOS if it owns the controller. */
3211 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3212 eecp += XHCI_XECP_NEXT(eec) << 2) {
3213 eec = bus_read_4(res, eecp);
3215 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3218 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3223 printf("xhci early: "
3224 "SMM active, request owner change\n");
3226 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3228 /* wait a maximum of 5 second */
3230 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3232 bios_sem = bus_read_1(res, eecp +
3233 XHCI_XECP_BIOS_SEM);
3236 if (bios_sem != 0) {
3238 printf("xhci early: "
3239 "SMM does not respond\n");
3242 /* Disable interrupts */
3243 offs = bus_read_1(res, XHCI_CAPLENGTH);
3244 bus_write_4(res, offs + XHCI_USBCMD, 0);
3245 bus_read_4(res, offs + XHCI_USBSTS);
3247 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3250 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3252 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3253 struct resource_list *rl)
3255 struct resource *res;
3257 u_long start, end, count;
3258 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3260 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3261 case PCIM_HDRTYPE_BRIDGE:
3262 sec_reg = PCIR_SECBUS_1;
3263 sub_reg = PCIR_SUBBUS_1;
3265 case PCIM_HDRTYPE_CARDBUS:
3266 sec_reg = PCIR_SECBUS_2;
3267 sub_reg = PCIR_SUBBUS_2;
3274 * If the existing bus range is valid, attempt to reserve it
3275 * from our parent. If this fails for any reason, clear the
3276 * secbus and subbus registers.
3278 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3279 * This would at least preserve the existing sec_bus if it is
3282 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3283 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3285 /* Quirk handling. */
3286 switch (pci_get_devid(dev)) {
3287 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3288 sup_bus = pci_read_config(dev, 0x41, 1);
3289 if (sup_bus != 0xff) {
3290 sec_bus = sup_bus + 1;
3291 sub_bus = sup_bus + 1;
3292 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3293 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3298 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3299 if ((cp = getenv("smbios.planar.maker")) == NULL)
3301 if (strncmp(cp, "Compal", 6) != 0) {
3306 if ((cp = getenv("smbios.planar.product")) == NULL)
3308 if (strncmp(cp, "08A0", 4) != 0) {
3313 if (sub_bus < 0xa) {
3315 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3321 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3322 if (sec_bus > 0 && sub_bus >= sec_bus) {
3325 count = end - start + 1;
3327 resource_list_add(rl, PCI_RES_BUS, 0, 0ul, ~0ul, count);
3330 * If requested, clear secondary bus registers in
3331 * bridge devices to force a complete renumbering
3332 * rather than reserving the existing range. However,
3333 * preserve the existing size.
3335 if (pci_clear_buses)
3339 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3340 start, end, count, 0);
3346 "pci%d:%d:%d:%d secbus failed to allocate\n",
3347 pci_get_domain(dev), pci_get_bus(dev),
3348 pci_get_slot(dev), pci_get_function(dev));
3352 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3353 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3356 static struct resource *
3357 pci_alloc_secbus(device_t dev, device_t child, int *rid, u_long start,
3358 u_long end, u_long count, u_int flags)
3360 struct pci_devinfo *dinfo;
3362 struct resource_list *rl;
3363 struct resource *res;
3364 int sec_reg, sub_reg;
3366 dinfo = device_get_ivars(child);
3368 rl = &dinfo->resources;
3369 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3370 case PCIM_HDRTYPE_BRIDGE:
3371 sec_reg = PCIR_SECBUS_1;
3372 sub_reg = PCIR_SUBBUS_1;
3374 case PCIM_HDRTYPE_CARDBUS:
3375 sec_reg = PCIR_SECBUS_2;
3376 sub_reg = PCIR_SUBBUS_2;
3385 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3386 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3387 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3388 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3389 start, end, count, flags & ~RF_ACTIVE);
3391 resource_list_delete(rl, PCI_RES_BUS, *rid);
3392 device_printf(child, "allocating %lu bus%s failed\n",
3393 count, count == 1 ? "" : "es");
3397 device_printf(child,
3398 "Lazy allocation of %lu bus%s at %lu\n", count,
3399 count == 1 ? "" : "es", rman_get_start(res));
3400 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3401 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3403 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3404 end, count, flags));
3409 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3411 struct pci_devinfo *dinfo;
3413 struct resource_list *rl;
3414 const struct pci_quirk *q;
3418 dinfo = device_get_ivars(dev);
3420 rl = &dinfo->resources;
3421 devid = (cfg->device << 16) | cfg->vendor;
3423 /* ATA devices needs special map treatment */
3424 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3425 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3426 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3427 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3428 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3429 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3431 for (i = 0; i < cfg->nummaps;) {
3433 * Skip quirked resources.
3435 for (q = &pci_quirks[0]; q->devid != 0; q++)
3436 if (q->devid == devid &&
3437 q->type == PCI_QUIRK_UNMAP_REG &&
3438 q->arg1 == PCIR_BAR(i))
3440 if (q->devid != 0) {
3444 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3445 prefetchmask & (1 << i));
3449 * Add additional, quirked resources.
3451 for (q = &pci_quirks[0]; q->devid != 0; q++)
3452 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3453 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3455 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3456 #ifdef __PCI_REROUTE_INTERRUPT
3458 * Try to re-route interrupts. Sometimes the BIOS or
3459 * firmware may leave bogus values in these registers.
3460 * If the re-route fails, then just stick with what we
3463 pci_assign_interrupt(bus, dev, 1);
3465 pci_assign_interrupt(bus, dev, 0);
3469 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3470 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3471 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3472 xhci_early_takeover(dev);
3473 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3474 ehci_early_takeover(dev);
3475 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3476 ohci_early_takeover(dev);
3477 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3478 uhci_early_takeover(dev);
3481 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3483 * Reserve resources for secondary bus ranges behind bridge
3486 pci_reserve_secbus(bus, dev, cfg, rl);
3490 static struct pci_devinfo *
3491 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3492 int slot, int func, size_t dinfo_size)
3494 struct pci_devinfo *dinfo;
3496 dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size);
3498 pci_add_child(dev, dinfo);
3504 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3506 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3507 device_t pcib = device_get_parent(dev);
3508 struct pci_devinfo *dinfo;
3510 int s, f, pcifunchigh;
3515 * Try to detect a device at slot 0, function 0. If it exists, try to
3516 * enable ARI. We must enable ARI before detecting the rest of the
3517 * functions on this bus as ARI changes the set of slots and functions
3518 * that are legal on this bus.
3520 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0,
3522 if (dinfo != NULL && pci_enable_ari)
3523 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3526 * Start looking for new devices on slot 0 at function 1 because we
3527 * just identified the device at slot 0, function 0.
3531 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3532 ("dinfo_size too small"));
3533 maxslots = PCIB_MAXSLOTS(pcib);
3534 for (s = 0; s <= maxslots; s++, first_func = 0) {
3538 hdrtype = REG(PCIR_HDRTYPE, 1);
3539 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3541 if (hdrtype & PCIM_MFDEV)
3542 pcifunchigh = PCIB_MAXFUNCS(pcib);
3543 for (f = first_func; f <= pcifunchigh; f++)
3544 pci_identify_function(pcib, dev, domain, busno, s, f,
3551 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3553 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3554 device_set_ivars(dinfo->cfg.dev, dinfo);
3555 resource_list_init(&dinfo->resources);
3556 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3557 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3558 pci_print_verbose(dinfo);
3559 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3563 pci_probe(device_t dev)
3566 device_set_desc(dev, "PCI bus");
3568 /* Allow other subclasses to override this driver. */
3569 return (BUS_PROBE_GENERIC);
3573 pci_attach_common(device_t dev)
3575 struct pci_softc *sc;
3577 #ifdef PCI_DMA_BOUNDARY
3578 int error, tag_valid;
3584 sc = device_get_softc(dev);
3585 domain = pcib_get_domain(dev);
3586 busno = pcib_get_bus(dev);
3589 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
3591 if (sc->sc_bus == NULL) {
3592 device_printf(dev, "failed to allocate bus number\n");
3597 device_printf(dev, "domain=%d, physical bus=%d\n",
3599 #ifdef PCI_DMA_BOUNDARY
3601 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3602 devclass_find("pci")) {
3603 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3604 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3605 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3606 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3608 device_printf(dev, "Failed to create DMA tag: %d\n",
3615 sc->sc_dma_tag = bus_get_dma_tag(dev);
3620 pci_attach(device_t dev)
3622 int busno, domain, error;
3624 error = pci_attach_common(dev);
3629 * Since there can be multiple independantly numbered PCI
3630 * busses on systems with multiple PCI domains, we can't use
3631 * the unit number to decide which bus we are probing. We ask
3632 * the parent pcib what our domain and bus numbers are.
3634 domain = pcib_get_domain(dev);
3635 busno = pcib_get_bus(dev);
3636 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3637 return (bus_generic_attach(dev));
3642 pci_detach(device_t dev)
3644 struct pci_softc *sc;
3647 error = bus_generic_detach(dev);
3650 sc = device_get_softc(dev);
3651 return (bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus));
3656 pci_set_power_children(device_t dev, device_t *devlist, int numdevs,
3659 device_t child, pcib;
3663 * Set the device to the given state. If the firmware suggests
3664 * a different power state, use it instead. If power management
3665 * is not present, the firmware is responsible for managing
3666 * device power. Skip children who aren't attached since they
3667 * are handled separately.
3669 pcib = device_get_parent(dev);
3670 for (i = 0; i < numdevs; i++) {
3673 if (device_is_attached(child) &&
3674 PCIB_POWER_FOR_SLEEP(pcib, dev, &dstate) == 0)
3675 pci_set_powerstate(child, dstate);
3680 pci_suspend(device_t dev)
3682 device_t child, *devlist;
3683 struct pci_devinfo *dinfo;
3684 int error, i, numdevs;
3687 * Save the PCI configuration space for each child and set the
3688 * device in the appropriate power state for this sleep state.
3690 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3692 for (i = 0; i < numdevs; i++) {
3694 dinfo = device_get_ivars(child);
3695 pci_cfg_save(child, dinfo, 0);
3698 /* Suspend devices before potentially powering them down. */
3699 error = bus_generic_suspend(dev);
3701 free(devlist, M_TEMP);
3704 if (pci_do_power_suspend)
3705 pci_set_power_children(dev, devlist, numdevs,
3707 free(devlist, M_TEMP);
3712 pci_resume(device_t dev)
3714 device_t child, *devlist;
3715 struct pci_devinfo *dinfo;
3716 int error, i, numdevs;
3719 * Set each child to D0 and restore its PCI configuration space.
3721 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3723 if (pci_do_power_resume)
3724 pci_set_power_children(dev, devlist, numdevs,
3727 /* Now the device is powered up, restore its config space. */
3728 for (i = 0; i < numdevs; i++) {
3730 dinfo = device_get_ivars(child);
3732 pci_cfg_restore(child, dinfo);
3733 if (!device_is_attached(child))
3734 pci_cfg_save(child, dinfo, 1);
3738 * Resume critical devices first, then everything else later.
3740 for (i = 0; i < numdevs; i++) {
3742 switch (pci_get_class(child)) {
3746 case PCIC_BASEPERIPH:
3747 DEVICE_RESUME(child);
3751 for (i = 0; i < numdevs; i++) {
3753 switch (pci_get_class(child)) {
3757 case PCIC_BASEPERIPH:
3760 DEVICE_RESUME(child);
3763 free(devlist, M_TEMP);
3768 pci_load_vendor_data(void)
3774 data = preload_search_by_type("pci_vendor_data");
3776 ptr = preload_fetch_addr(data);
3777 sz = preload_fetch_size(data);
3778 if (ptr != NULL && sz != 0) {
3779 pci_vendordata = ptr;
3780 pci_vendordata_size = sz;
3781 /* terminate the database */
3782 pci_vendordata[pci_vendordata_size] = '\n';
3788 pci_driver_added(device_t dev, driver_t *driver)
3793 struct pci_devinfo *dinfo;
3797 device_printf(dev, "driver added\n");
3798 DEVICE_IDENTIFY(driver, dev);
3799 if (device_get_children(dev, &devlist, &numdevs) != 0)
3801 for (i = 0; i < numdevs; i++) {
3803 if (device_get_state(child) != DS_NOTPRESENT)
3805 dinfo = device_get_ivars(child);
3806 pci_print_verbose(dinfo);
3808 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3809 pci_cfg_restore(child, dinfo);
3810 if (device_probe_and_attach(child) != 0)
3811 pci_child_detached(dev, child);
3813 free(devlist, M_TEMP);
3817 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3818 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3820 struct pci_devinfo *dinfo;
3821 struct msix_table_entry *mte;
3822 struct msix_vector *mv;
3828 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3833 /* If this is not a direct child, just bail out. */
3834 if (device_get_parent(child) != dev) {
3839 rid = rman_get_rid(irq);
3841 /* Make sure that INTx is enabled */
3842 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3845 * Check to see if the interrupt is MSI or MSI-X.
3846 * Ask our parent to map the MSI and give
3847 * us the address and data register values.
3848 * If we fail for some reason, teardown the
3849 * interrupt handler.
3851 dinfo = device_get_ivars(child);
3852 if (dinfo->cfg.msi.msi_alloc > 0) {
3853 if (dinfo->cfg.msi.msi_addr == 0) {
3854 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3855 ("MSI has handlers, but vectors not mapped"));
3856 error = PCIB_MAP_MSI(device_get_parent(dev),
3857 child, rman_get_start(irq), &addr, &data);
3860 dinfo->cfg.msi.msi_addr = addr;
3861 dinfo->cfg.msi.msi_data = data;
3863 if (dinfo->cfg.msi.msi_handlers == 0)
3864 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3865 dinfo->cfg.msi.msi_data);
3866 dinfo->cfg.msi.msi_handlers++;
3868 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3869 ("No MSI or MSI-X interrupts allocated"));
3870 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3871 ("MSI-X index too high"));
3872 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3873 KASSERT(mte->mte_vector != 0, ("no message vector"));
3874 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3875 KASSERT(mv->mv_irq == rman_get_start(irq),
3877 if (mv->mv_address == 0) {
3878 KASSERT(mte->mte_handlers == 0,
3879 ("MSI-X table entry has handlers, but vector not mapped"));
3880 error = PCIB_MAP_MSI(device_get_parent(dev),
3881 child, rman_get_start(irq), &addr, &data);
3884 mv->mv_address = addr;
3887 if (mte->mte_handlers == 0) {
3888 pci_enable_msix(child, rid - 1, mv->mv_address,
3890 pci_unmask_msix(child, rid - 1);
3892 mte->mte_handlers++;
3896 * Make sure that INTx is disabled if we are using MSI/MSI-X,
3897 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
3898 * in which case we "enable" INTx so MSI/MSI-X actually works.
3900 if (!pci_has_quirk(pci_get_devid(child),
3901 PCI_QUIRK_MSI_INTX_BUG))
3902 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3904 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3907 (void)bus_generic_teardown_intr(dev, child, irq,
3917 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3920 struct msix_table_entry *mte;
3921 struct resource_list_entry *rle;
3922 struct pci_devinfo *dinfo;
3925 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3928 /* If this isn't a direct child, just bail out */
3929 if (device_get_parent(child) != dev)
3930 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3932 rid = rman_get_rid(irq);
3935 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3938 * Check to see if the interrupt is MSI or MSI-X. If so,
3939 * decrement the appropriate handlers count and mask the
3940 * MSI-X message, or disable MSI messages if the count
3943 dinfo = device_get_ivars(child);
3944 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3945 if (rle->res != irq)
3947 if (dinfo->cfg.msi.msi_alloc > 0) {
3948 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3949 ("MSI-X index too high"));
3950 if (dinfo->cfg.msi.msi_handlers == 0)
3952 dinfo->cfg.msi.msi_handlers--;
3953 if (dinfo->cfg.msi.msi_handlers == 0)
3954 pci_disable_msi(child);
3956 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3957 ("No MSI or MSI-X interrupts allocated"));
3958 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3959 ("MSI-X index too high"));
3960 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3961 if (mte->mte_handlers == 0)
3963 mte->mte_handlers--;
3964 if (mte->mte_handlers == 0)
3965 pci_mask_msix(child, rid - 1);
3968 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3971 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3976 pci_print_child(device_t dev, device_t child)
3978 struct pci_devinfo *dinfo;
3979 struct resource_list *rl;
3982 dinfo = device_get_ivars(child);
3983 rl = &dinfo->resources;
3985 retval += bus_print_child_header(dev, child);
3987 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3988 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3989 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3990 if (device_get_flags(dev))
3991 retval += printf(" flags %#x", device_get_flags(dev));
3993 retval += printf(" at device %d.%d", pci_get_slot(child),
3994 pci_get_function(child));
3996 retval += bus_print_child_domain(dev, child);
3997 retval += bus_print_child_footer(dev, child);
4006 int report; /* 0 = bootverbose, 1 = always */
4008 } pci_nomatch_tab[] = {
4009 {PCIC_OLD, -1, 1, "old"},
4010 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4011 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4012 {PCIC_STORAGE, -1, 1, "mass storage"},
4013 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4014 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4015 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4016 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4017 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4018 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4019 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4020 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4021 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4022 {PCIC_NETWORK, -1, 1, "network"},
4023 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4024 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4025 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4026 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4027 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4028 {PCIC_DISPLAY, -1, 1, "display"},
4029 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4030 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4031 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4032 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4033 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
4034 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
4035 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4036 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
4037 {PCIC_MEMORY, -1, 1, "memory"},
4038 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4039 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4040 {PCIC_BRIDGE, -1, 1, "bridge"},
4041 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4042 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4043 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4044 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4045 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4046 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4047 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4048 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4049 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4050 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4051 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4052 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4053 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4054 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4055 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4056 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4057 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4058 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
4059 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4060 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4061 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4062 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
4063 {PCIC_INPUTDEV, -1, 1, "input device"},
4064 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
4065 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4066 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
4067 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
4068 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
4069 {PCIC_DOCKING, -1, 1, "docking station"},
4070 {PCIC_PROCESSOR, -1, 1, "processor"},
4071 {PCIC_SERIALBUS, -1, 1, "serial bus"},
4072 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
4073 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
4074 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
4075 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
4076 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
4077 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
4078 {PCIC_WIRELESS, -1, 1, "wireless controller"},
4079 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
4080 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
4081 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
4082 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
4083 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
4084 {PCIC_SATCOM, -1, 1, "satellite communication"},
4085 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
4086 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
4087 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
4088 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
4089 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
4090 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
4091 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
4092 {PCIC_DASP, -1, 0, "dasp"},
4093 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
4098 pci_probe_nomatch(device_t dev, device_t child)
4101 const char *cp, *scp;
4105 * Look for a listing for this device in a loaded device database.
4108 if ((device = pci_describe_device(child)) != NULL) {
4109 device_printf(dev, "<%s>", device);
4110 free(device, M_DEVBUF);
4113 * Scan the class/subclass descriptions for a general
4118 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4119 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4120 if (pci_nomatch_tab[i].subclass == -1) {
4121 cp = pci_nomatch_tab[i].desc;
4122 report = pci_nomatch_tab[i].report;
4123 } else if (pci_nomatch_tab[i].subclass ==
4124 pci_get_subclass(child)) {
4125 scp = pci_nomatch_tab[i].desc;
4126 report = pci_nomatch_tab[i].report;
4130 if (report || bootverbose) {
4131 device_printf(dev, "<%s%s%s>",
4133 ((cp != NULL) && (scp != NULL)) ? ", " : "",
4137 if (report || bootverbose) {
4138 printf(" at device %d.%d (no driver attached)\n",
4139 pci_get_slot(child), pci_get_function(child));
4141 pci_cfg_save(child, device_get_ivars(child), 1);
4145 pci_child_detached(device_t dev, device_t child)
4147 struct pci_devinfo *dinfo;
4148 struct resource_list *rl;
4150 dinfo = device_get_ivars(child);
4151 rl = &dinfo->resources;
4154 * Have to deallocate IRQs before releasing any MSI messages and
4155 * have to release MSI messages before deallocating any memory
4158 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4159 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4160 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4161 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4162 (void)pci_release_msi(child);
4164 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4165 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4166 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4167 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4169 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4170 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4173 pci_cfg_save(child, dinfo, 1);
4177 * Parse the PCI device database, if loaded, and return a pointer to a
4178 * description of the device.
4180 * The database is flat text formatted as follows:
4182 * Any line not in a valid format is ignored.
4183 * Lines are terminated with newline '\n' characters.
4185 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4188 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4189 * - devices cannot be listed without a corresponding VENDOR line.
4190 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4191 * another TAB, then the device name.
4195 * Assuming (ptr) points to the beginning of a line in the database,
4196 * return the vendor or device and description of the next entry.
4197 * The value of (vendor) or (device) inappropriate for the entry type
4198 * is set to -1. Returns nonzero at the end of the database.
4200 * Note that this is slightly unrobust in the face of corrupt data;
4201 * we attempt to safeguard against this by spamming the end of the
4202 * database with a newline when we initialise.
4205 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4214 left = pci_vendordata_size - (cp - pci_vendordata);
4222 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4226 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4229 /* skip to next line */
4230 while (*cp != '\n' && left > 0) {
4239 /* skip to next line */
4240 while (*cp != '\n' && left > 0) {
4244 if (*cp == '\n' && left > 0)
4251 pci_describe_device(device_t dev)
4254 char *desc, *vp, *dp, *line;
4256 desc = vp = dp = NULL;
4259 * If we have no vendor data, we can't do anything.
4261 if (pci_vendordata == NULL)
4265 * Scan the vendor data looking for this device
4267 line = pci_vendordata;
4268 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4271 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4273 if (vendor == pci_get_vendor(dev))
4276 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4279 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4287 if (device == pci_get_device(dev))
4291 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4292 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4294 sprintf(desc, "%s, %s", vp, dp);
4304 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4306 struct pci_devinfo *dinfo;
4309 dinfo = device_get_ivars(child);
4313 case PCI_IVAR_ETHADDR:
4315 * The generic accessor doesn't deal with failure, so
4316 * we set the return value, then return an error.
4318 *((uint8_t **) result) = NULL;
4320 case PCI_IVAR_SUBVENDOR:
4321 *result = cfg->subvendor;
4323 case PCI_IVAR_SUBDEVICE:
4324 *result = cfg->subdevice;
4326 case PCI_IVAR_VENDOR:
4327 *result = cfg->vendor;
4329 case PCI_IVAR_DEVICE:
4330 *result = cfg->device;
4332 case PCI_IVAR_DEVID:
4333 *result = (cfg->device << 16) | cfg->vendor;
4335 case PCI_IVAR_CLASS:
4336 *result = cfg->baseclass;
4338 case PCI_IVAR_SUBCLASS:
4339 *result = cfg->subclass;
4341 case PCI_IVAR_PROGIF:
4342 *result = cfg->progif;
4344 case PCI_IVAR_REVID:
4345 *result = cfg->revid;
4347 case PCI_IVAR_INTPIN:
4348 *result = cfg->intpin;
4351 *result = cfg->intline;
4353 case PCI_IVAR_DOMAIN:
4354 *result = cfg->domain;
4360 *result = cfg->slot;
4362 case PCI_IVAR_FUNCTION:
4363 *result = cfg->func;
4365 case PCI_IVAR_CMDREG:
4366 *result = cfg->cmdreg;
4368 case PCI_IVAR_CACHELNSZ:
4369 *result = cfg->cachelnsz;
4371 case PCI_IVAR_MINGNT:
4372 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4376 *result = cfg->mingnt;
4378 case PCI_IVAR_MAXLAT:
4379 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4383 *result = cfg->maxlat;
4385 case PCI_IVAR_LATTIMER:
4386 *result = cfg->lattimer;
4395 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4397 struct pci_devinfo *dinfo;
4399 dinfo = device_get_ivars(child);
4402 case PCI_IVAR_INTPIN:
4403 dinfo->cfg.intpin = value;
4405 case PCI_IVAR_ETHADDR:
4406 case PCI_IVAR_SUBVENDOR:
4407 case PCI_IVAR_SUBDEVICE:
4408 case PCI_IVAR_VENDOR:
4409 case PCI_IVAR_DEVICE:
4410 case PCI_IVAR_DEVID:
4411 case PCI_IVAR_CLASS:
4412 case PCI_IVAR_SUBCLASS:
4413 case PCI_IVAR_PROGIF:
4414 case PCI_IVAR_REVID:
4416 case PCI_IVAR_DOMAIN:
4419 case PCI_IVAR_FUNCTION:
4420 return (EINVAL); /* disallow for now */
4427 #include "opt_ddb.h"
4429 #include <ddb/ddb.h>
4430 #include <sys/cons.h>
4433 * List resources based on pci map registers, used for within ddb
4436 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4438 struct pci_devinfo *dinfo;
4439 struct devlist *devlist_head;
4442 int i, error, none_count;
4445 /* get the head of the device queue */
4446 devlist_head = &pci_devq;
4449 * Go through the list of devices and print out devices
4451 for (error = 0, i = 0,
4452 dinfo = STAILQ_FIRST(devlist_head);
4453 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4454 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4456 /* Populate pd_name and pd_unit */
4459 name = device_get_name(dinfo->cfg.dev);
4462 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4463 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4464 (name && *name) ? name : "none",
4465 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4467 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4468 p->pc_sel.pc_func, (p->pc_class << 16) |
4469 (p->pc_subclass << 8) | p->pc_progif,
4470 (p->pc_subdevice << 16) | p->pc_subvendor,
4471 (p->pc_device << 16) | p->pc_vendor,
4472 p->pc_revid, p->pc_hdr);
4477 static struct resource *
4478 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4479 u_long start, u_long end, u_long count, u_int flags)
4481 struct pci_devinfo *dinfo = device_get_ivars(child);
4482 struct resource_list *rl = &dinfo->resources;
4483 struct resource *res;
4485 pci_addr_t map, testval;
4489 pm = pci_find_bar(child, *rid);
4491 /* This is a BAR that we failed to allocate earlier. */
4492 mapsize = pm->pm_size;
4496 * Weed out the bogons, and figure out how large the
4497 * BAR/map is. BARs that read back 0 here are bogus
4498 * and unimplemented. Note: atapci in legacy mode are
4499 * special and handled elsewhere in the code. If you
4500 * have a atapci device in legacy mode and it fails
4501 * here, that other code is broken.
4503 pci_read_bar(child, *rid, &map, &testval);
4506 * Determine the size of the BAR and ignore BARs with a size
4507 * of 0. Device ROM BARs use a different mask value.
4509 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4510 mapsize = pci_romsize(testval);
4512 mapsize = pci_mapsize(testval);
4515 pm = pci_add_bar(child, *rid, map, mapsize);
4518 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4519 if (type != SYS_RES_MEMORY) {
4522 "child %s requested type %d for rid %#x,"
4523 " but the BAR says it is an memio\n",
4524 device_get_nameunit(child), type, *rid);
4528 if (type != SYS_RES_IOPORT) {
4531 "child %s requested type %d for rid %#x,"
4532 " but the BAR says it is an ioport\n",
4533 device_get_nameunit(child), type, *rid);
4539 * For real BARs, we need to override the size that
4540 * the driver requests, because that's what the BAR
4541 * actually uses and we would otherwise have a
4542 * situation where we might allocate the excess to
4543 * another driver, which won't work.
4545 count = (pci_addr_t)1 << mapsize;
4546 if (RF_ALIGNMENT(flags) < mapsize)
4547 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4548 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4549 flags |= RF_PREFETCHABLE;
4552 * Allocate enough resource, and then write back the
4553 * appropriate BAR for that resource.
4555 resource_list_add(rl, type, *rid, start, end, count);
4556 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
4557 count, flags & ~RF_ACTIVE);
4559 resource_list_delete(rl, type, *rid);
4560 device_printf(child,
4561 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4562 count, *rid, type, start, end);
4566 device_printf(child,
4567 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4568 count, *rid, type, rman_get_start(res));
4569 map = rman_get_start(res);
4570 pci_write_bar(child, pm, map);
4576 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4577 u_long start, u_long end, u_long count, u_int flags)
4579 struct pci_devinfo *dinfo;
4580 struct resource_list *rl;
4581 struct resource_list_entry *rle;
4582 struct resource *res;
4585 if (device_get_parent(child) != dev)
4586 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4587 type, rid, start, end, count, flags));
4590 * Perform lazy resource allocation
4592 dinfo = device_get_ivars(child);
4593 rl = &dinfo->resources;
4596 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
4598 return (pci_alloc_secbus(dev, child, rid, start, end, count,
4603 * Can't alloc legacy interrupt once MSI messages have
4606 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4607 cfg->msix.msix_alloc > 0))
4611 * If the child device doesn't have an interrupt
4612 * routed and is deserving of an interrupt, try to
4615 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4617 pci_assign_interrupt(dev, child, 0);
4619 case SYS_RES_IOPORT:
4620 case SYS_RES_MEMORY:
4623 * PCI-PCI bridge I/O window resources are not BARs.
4624 * For those allocations just pass the request up the
4627 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4629 case PCIR_IOBASEL_1:
4630 case PCIR_MEMBASE_1:
4631 case PCIR_PMBASEL_1:
4633 * XXX: Should we bother creating a resource
4636 return (bus_generic_alloc_resource(dev, child,
4637 type, rid, start, end, count, flags));
4641 /* Reserve resources for this BAR if needed. */
4642 rle = resource_list_find(rl, type, *rid);
4644 res = pci_reserve_map(dev, child, type, rid, start, end,
4650 return (resource_list_alloc(rl, dev, child, type, rid,
4651 start, end, count, flags));
4655 pci_release_resource(device_t dev, device_t child, int type, int rid,
4658 struct pci_devinfo *dinfo;
4659 struct resource_list *rl;
4662 if (device_get_parent(child) != dev)
4663 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4666 dinfo = device_get_ivars(child);
4670 * PCI-PCI bridge I/O window resources are not BARs. For
4671 * those allocations just pass the request up the tree.
4673 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4674 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4676 case PCIR_IOBASEL_1:
4677 case PCIR_MEMBASE_1:
4678 case PCIR_PMBASEL_1:
4679 return (bus_generic_release_resource(dev, child, type,
4685 rl = &dinfo->resources;
4686 return (resource_list_release(rl, dev, child, type, rid, r));
4690 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4693 struct pci_devinfo *dinfo;
4696 error = bus_generic_activate_resource(dev, child, type, rid, r);
4700 /* Enable decoding in the command register when activating BARs. */
4701 if (device_get_parent(child) == dev) {
4702 /* Device ROMs need their decoding explicitly enabled. */
4703 dinfo = device_get_ivars(child);
4704 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4705 pci_write_bar(child, pci_find_bar(child, rid),
4706 rman_get_start(r) | PCIM_BIOS_ENABLE);
4708 case SYS_RES_IOPORT:
4709 case SYS_RES_MEMORY:
4710 error = PCI_ENABLE_IO(dev, child, type);
4718 pci_deactivate_resource(device_t dev, device_t child, int type,
4719 int rid, struct resource *r)
4721 struct pci_devinfo *dinfo;
4724 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4728 /* Disable decoding for device ROMs. */
4729 if (device_get_parent(child) == dev) {
4730 dinfo = device_get_ivars(child);
4731 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4732 pci_write_bar(child, pci_find_bar(child, rid),
4739 pci_delete_child(device_t dev, device_t child)
4741 struct resource_list_entry *rle;
4742 struct resource_list *rl;
4743 struct pci_devinfo *dinfo;
4745 dinfo = device_get_ivars(child);
4746 rl = &dinfo->resources;
4748 if (device_is_attached(child))
4749 device_detach(child);
4751 /* Turn off access to resources we're about to free */
4752 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4753 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4755 /* Free all allocated resources */
4756 STAILQ_FOREACH(rle, rl, link) {
4758 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4759 resource_list_busy(rl, rle->type, rle->rid)) {
4760 pci_printf(&dinfo->cfg,
4761 "Resource still owned, oops. "
4762 "(type=%d, rid=%d, addr=%lx)\n",
4763 rle->type, rle->rid,
4764 rman_get_start(rle->res));
4765 bus_release_resource(child, rle->type, rle->rid,
4768 resource_list_unreserve(rl, dev, child, rle->type,
4772 resource_list_free(rl);
4774 device_delete_child(dev, child);
4779 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4781 struct pci_devinfo *dinfo;
4782 struct resource_list *rl;
4783 struct resource_list_entry *rle;
4785 if (device_get_parent(child) != dev)
4788 dinfo = device_get_ivars(child);
4789 rl = &dinfo->resources;
4790 rle = resource_list_find(rl, type, rid);
4795 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4796 resource_list_busy(rl, type, rid)) {
4797 device_printf(dev, "delete_resource: "
4798 "Resource still owned by child, oops. "
4799 "(type=%d, rid=%d, addr=%lx)\n",
4800 type, rid, rman_get_start(rle->res));
4803 resource_list_unreserve(rl, dev, child, type, rid);
4805 resource_list_delete(rl, type, rid);
4808 struct resource_list *
4809 pci_get_resource_list (device_t dev, device_t child)
4811 struct pci_devinfo *dinfo = device_get_ivars(child);
4813 return (&dinfo->resources);
4817 pci_get_dma_tag(device_t bus, device_t dev)
4819 struct pci_softc *sc = device_get_softc(bus);
4821 return (sc->sc_dma_tag);
4825 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4827 struct pci_devinfo *dinfo = device_get_ivars(child);
4828 pcicfgregs *cfg = &dinfo->cfg;
4830 return (PCIB_READ_CONFIG(device_get_parent(dev),
4831 cfg->bus, cfg->slot, cfg->func, reg, width));
4835 pci_write_config_method(device_t dev, device_t child, int reg,
4836 uint32_t val, int width)
4838 struct pci_devinfo *dinfo = device_get_ivars(child);
4839 pcicfgregs *cfg = &dinfo->cfg;
4841 PCIB_WRITE_CONFIG(device_get_parent(dev),
4842 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4846 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4850 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4851 pci_get_function(child));
4856 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4859 struct pci_devinfo *dinfo;
4862 dinfo = device_get_ivars(child);
4864 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4865 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4866 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4872 pci_assign_interrupt_method(device_t dev, device_t child)
4874 struct pci_devinfo *dinfo = device_get_ivars(child);
4875 pcicfgregs *cfg = &dinfo->cfg;
4877 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4882 pci_modevent(module_t mod, int what, void *arg)
4884 static struct cdev *pci_cdev;
4888 STAILQ_INIT(&pci_devq);
4890 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4892 pci_load_vendor_data();
4896 destroy_dev(pci_cdev);
4904 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
4906 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
4907 struct pcicfg_pcie *cfg;
4910 cfg = &dinfo->cfg.pcie;
4911 pos = cfg->pcie_location;
4913 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4915 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
4917 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4918 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4919 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4920 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
4922 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4923 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4924 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4925 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
4927 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4928 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4929 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
4932 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
4933 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
4934 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
4940 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
4942 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
4943 dinfo->cfg.pcix.pcix_command, 2);
4947 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
4951 * Restore the device to full power mode. We must do this
4952 * before we restore the registers because moving from D3 to
4953 * D0 will cause the chip's BARs and some other registers to
4954 * be reset to some unknown power on reset values. Cut down
4955 * the noise on boot by doing nothing if we are already in
4958 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
4959 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4960 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
4961 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
4962 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
4963 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
4964 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
4965 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
4966 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
4967 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
4968 case PCIM_HDRTYPE_NORMAL:
4969 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
4970 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
4972 case PCIM_HDRTYPE_BRIDGE:
4973 pci_write_config(dev, PCIR_SECLAT_1,
4974 dinfo->cfg.bridge.br_seclat, 1);
4975 pci_write_config(dev, PCIR_SUBBUS_1,
4976 dinfo->cfg.bridge.br_subbus, 1);
4977 pci_write_config(dev, PCIR_SECBUS_1,
4978 dinfo->cfg.bridge.br_secbus, 1);
4979 pci_write_config(dev, PCIR_PRIBUS_1,
4980 dinfo->cfg.bridge.br_pribus, 1);
4981 pci_write_config(dev, PCIR_BRIDGECTL_1,
4982 dinfo->cfg.bridge.br_control, 2);
4984 case PCIM_HDRTYPE_CARDBUS:
4985 pci_write_config(dev, PCIR_SECLAT_2,
4986 dinfo->cfg.bridge.br_seclat, 1);
4987 pci_write_config(dev, PCIR_SUBBUS_2,
4988 dinfo->cfg.bridge.br_subbus, 1);
4989 pci_write_config(dev, PCIR_SECBUS_2,
4990 dinfo->cfg.bridge.br_secbus, 1);
4991 pci_write_config(dev, PCIR_PRIBUS_2,
4992 dinfo->cfg.bridge.br_pribus, 1);
4993 pci_write_config(dev, PCIR_BRIDGECTL_2,
4994 dinfo->cfg.bridge.br_control, 2);
4997 pci_restore_bars(dev);
5000 * Restore extended capabilities for PCI-Express and PCI-X
5002 if (dinfo->cfg.pcie.pcie_location != 0)
5003 pci_cfg_restore_pcie(dev, dinfo);
5004 if (dinfo->cfg.pcix.pcix_location != 0)
5005 pci_cfg_restore_pcix(dev, dinfo);
5007 /* Restore MSI and MSI-X configurations if they are present. */
5008 if (dinfo->cfg.msi.msi_location != 0)
5009 pci_resume_msi(dev);
5010 if (dinfo->cfg.msix.msix_location != 0)
5011 pci_resume_msix(dev);
5015 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
5017 #define RREG(n) pci_read_config(dev, pos + (n), 2)
5018 struct pcicfg_pcie *cfg;
5021 cfg = &dinfo->cfg.pcie;
5022 pos = cfg->pcie_location;
5024 cfg->pcie_flags = RREG(PCIER_FLAGS);
5026 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5028 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
5030 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5031 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5032 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5033 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
5035 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5036 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5037 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5038 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
5040 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5041 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5042 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
5045 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
5046 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
5047 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
5053 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5055 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5056 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5060 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5066 * Some drivers apparently write to these registers w/o updating our
5067 * cached copy. No harm happens if we update the copy, so do so here
5068 * so we can restore them. The COMMAND register is modified by the
5069 * bus w/o updating the cache. This should represent the normally
5070 * writable portion of the 'defined' part of type 0/1/2 headers.
5072 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5073 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5074 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5075 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5076 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5077 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5078 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5079 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5080 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5081 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5082 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5083 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
5084 case PCIM_HDRTYPE_NORMAL:
5085 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5086 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5087 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5088 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5090 case PCIM_HDRTYPE_BRIDGE:
5091 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
5093 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
5095 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
5097 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
5099 dinfo->cfg.bridge.br_control = pci_read_config(dev,
5100 PCIR_BRIDGECTL_1, 2);
5102 case PCIM_HDRTYPE_CARDBUS:
5103 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
5105 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
5107 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
5109 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
5111 dinfo->cfg.bridge.br_control = pci_read_config(dev,
5112 PCIR_BRIDGECTL_2, 2);
5113 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2);
5114 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2);
5118 if (dinfo->cfg.pcie.pcie_location != 0)
5119 pci_cfg_save_pcie(dev, dinfo);
5121 if (dinfo->cfg.pcix.pcix_location != 0)
5122 pci_cfg_save_pcix(dev, dinfo);
5125 * don't set the state for display devices, base peripherals and
5126 * memory devices since bad things happen when they are powered down.
5127 * We should (a) have drivers that can easily detach and (b) use
5128 * generic drivers for these devices so that some device actually
5129 * attaches. We need to make sure that when we implement (a) we don't
5130 * power the device down on a reattach.
5132 cls = pci_get_class(dev);
5135 switch (pci_do_power_nodriver)
5137 case 0: /* NO powerdown at all */
5139 case 1: /* Conservative about what to power down */
5140 if (cls == PCIC_STORAGE)
5143 case 2: /* Agressive about what to power down */
5144 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5145 cls == PCIC_BASEPERIPH)
5148 case 3: /* Power down everything */
5152 * PCI spec says we can only go into D3 state from D0 state.
5153 * Transition from D[12] into D0 before going to D3 state.
5155 ps = pci_get_powerstate(dev);
5156 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5157 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5158 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5159 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5162 /* Wrapper APIs suitable for device driver use. */
5164 pci_save_state(device_t dev)
5166 struct pci_devinfo *dinfo;
5168 dinfo = device_get_ivars(dev);
5169 pci_cfg_save(dev, dinfo, 0);
5173 pci_restore_state(device_t dev)
5175 struct pci_devinfo *dinfo;
5177 dinfo = device_get_ivars(dev);
5178 pci_cfg_restore(dev, dinfo);
5182 pci_get_rid_method(device_t dev, device_t child)
5185 return (PCIB_GET_RID(device_get_parent(dev), child));