2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/limits.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
49 #include <vm/vm_extern.h>
52 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <machine/stdarg.h>
57 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
58 #include <machine/intr_machdep.h>
61 #include <sys/pciio.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pci_private.h>
66 #include <dev/usb/controller/xhcireg.h>
67 #include <dev/usb/controller/ehcireg.h>
68 #include <dev/usb/controller/ohcireg.h>
69 #include <dev/usb/controller/uhcireg.h>
74 #define PCIR_IS_BIOS(cfg, reg) \
75 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
76 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
78 static int pci_has_quirk(uint32_t devid, int quirk);
79 static pci_addr_t pci_mapbase(uint64_t mapreg);
80 static const char *pci_maptype(uint64_t mapreg);
81 static int pci_maprange(uint64_t mapreg);
82 static pci_addr_t pci_rombase(uint64_t mapreg);
83 static int pci_romsize(uint64_t testval);
84 static void pci_fixancient(pcicfgregs *cfg);
85 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
87 static int pci_porten(device_t dev);
88 static int pci_memen(device_t dev);
89 static void pci_assign_interrupt(device_t bus, device_t dev,
91 static int pci_add_map(device_t bus, device_t dev, int reg,
92 struct resource_list *rl, int force, int prefetch);
93 static int pci_probe(device_t dev);
94 static int pci_attach(device_t dev);
96 static int pci_detach(device_t dev);
98 static void pci_load_vendor_data(void);
99 static int pci_describe_parse_line(char **ptr, int *vendor,
100 int *device, char **desc);
101 static char *pci_describe_device(device_t dev);
102 static int pci_modevent(module_t mod, int what, void *arg);
103 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
105 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
106 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
107 int reg, uint32_t *data);
109 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
110 int reg, uint32_t data);
112 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
113 static void pci_mask_msix(device_t dev, u_int index);
114 static void pci_unmask_msix(device_t dev, u_int index);
115 static int pci_msi_blacklisted(void);
116 static int pci_msix_blacklisted(void);
117 static void pci_resume_msi(device_t dev);
118 static void pci_resume_msix(device_t dev);
119 static int pci_remap_intr_method(device_t bus, device_t dev,
122 static uint16_t pci_get_rid_method(device_t dev, device_t child);
124 static struct pci_devinfo * pci_fill_devinfo(device_t pcib, int d, int b, int s,
125 int f, uint16_t vid, uint16_t did, size_t size);
127 static device_method_t pci_methods[] = {
128 /* Device interface */
129 DEVMETHOD(device_probe, pci_probe),
130 DEVMETHOD(device_attach, pci_attach),
132 DEVMETHOD(device_detach, pci_detach),
134 DEVMETHOD(device_detach, bus_generic_detach),
136 DEVMETHOD(device_shutdown, bus_generic_shutdown),
137 DEVMETHOD(device_suspend, bus_generic_suspend),
138 DEVMETHOD(device_resume, pci_resume),
141 DEVMETHOD(bus_print_child, pci_print_child),
142 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
143 DEVMETHOD(bus_read_ivar, pci_read_ivar),
144 DEVMETHOD(bus_write_ivar, pci_write_ivar),
145 DEVMETHOD(bus_driver_added, pci_driver_added),
146 DEVMETHOD(bus_setup_intr, pci_setup_intr),
147 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
149 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
150 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
151 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
152 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
153 DEVMETHOD(bus_delete_resource, pci_delete_resource),
154 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
155 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
156 DEVMETHOD(bus_release_resource, pci_release_resource),
157 DEVMETHOD(bus_activate_resource, pci_activate_resource),
158 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
159 DEVMETHOD(bus_child_detached, pci_child_detached),
160 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
161 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
162 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
163 DEVMETHOD(bus_suspend_child, pci_suspend_child),
164 DEVMETHOD(bus_resume_child, pci_resume_child),
167 DEVMETHOD(pci_read_config, pci_read_config_method),
168 DEVMETHOD(pci_write_config, pci_write_config_method),
169 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
170 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
171 DEVMETHOD(pci_enable_io, pci_enable_io_method),
172 DEVMETHOD(pci_disable_io, pci_disable_io_method),
173 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
174 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
175 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
176 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
177 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
178 DEVMETHOD(pci_find_cap, pci_find_cap_method),
179 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
180 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
181 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
182 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
183 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
184 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
185 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
186 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
187 DEVMETHOD(pci_release_msi, pci_release_msi_method),
188 DEVMETHOD(pci_msi_count, pci_msi_count_method),
189 DEVMETHOD(pci_msix_count, pci_msix_count_method),
190 DEVMETHOD(pci_get_rid, pci_get_rid_method),
191 DEVMETHOD(pci_child_added, pci_child_added_method),
193 DEVMETHOD(pci_iov_attach, pci_iov_attach_method),
194 DEVMETHOD(pci_iov_detach, pci_iov_detach_method),
195 DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method),
201 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
203 static devclass_t pci_devclass;
204 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
205 MODULE_VERSION(pci, 1);
207 static char *pci_vendordata;
208 static size_t pci_vendordata_size;
211 uint32_t devid; /* Vendor/device of the card */
213 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
214 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
215 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
216 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
217 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
218 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
223 static const struct pci_quirk pci_quirks[] = {
224 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
225 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
226 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
227 /* As does the Serverworks OSB4 (the SMBus mapping register) */
228 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
231 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
232 * or the CMIC-SL (AKA ServerWorks GC_LE).
234 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 * MSI doesn't work on earlier Intel chipsets including
239 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
241 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
244 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
245 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
246 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
247 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
250 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
253 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
256 * MSI-X allocation doesn't work properly for devices passed through
257 * by VMware up to at least ESXi 5.1.
259 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
260 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
263 * Some virtualization environments emulate an older chipset
264 * but support MSI just fine. QEMU uses the Intel 82440.
266 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
269 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
270 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
271 * It prevents us from attaching hpet(4) when the bit is unset.
272 * Note this quirk only affects SB600 revision A13 and earlier.
273 * For SB600 A21 and later, firmware must set the bit to hide it.
274 * For SB700 and later, it is unused and hardcoded to zero.
276 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
279 * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
280 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
281 * command register is set.
283 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
284 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
285 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
288 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
289 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
291 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
292 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
293 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
294 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
295 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
296 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
301 /* map register information */
302 #define PCI_MAPMEM 0x01 /* memory map */
303 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
304 #define PCI_MAPPORT 0x04 /* port map */
306 struct devlist pci_devq;
307 uint32_t pci_generation;
308 uint32_t pci_numdevs = 0;
309 static int pcie_chipset, pcix_chipset;
312 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
314 static int pci_enable_io_modes = 1;
315 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN,
316 &pci_enable_io_modes, 1,
317 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
318 enable these bits correctly. We'd like to do this all the time, but there\n\
319 are some peripherals that this causes problems with.");
321 static int pci_do_realloc_bars = 0;
322 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN,
323 &pci_do_realloc_bars, 0,
324 "Attempt to allocate a new range for any BARs whose original "
325 "firmware-assigned ranges fail to allocate during the initial device scan.");
327 static int pci_do_power_nodriver = 0;
328 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN,
329 &pci_do_power_nodriver, 0,
330 "Place a function into D3 state when no driver attaches to it. 0 means\n\
331 disable. 1 means conservatively place devices into D3 state. 2 means\n\
332 agressively place devices into D3 state. 3 means put absolutely everything\n\
335 int pci_do_power_resume = 1;
336 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN,
337 &pci_do_power_resume, 1,
338 "Transition from D3 -> D0 on resume.");
340 int pci_do_power_suspend = 1;
341 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN,
342 &pci_do_power_suspend, 1,
343 "Transition from D0 -> D3 on suspend.");
345 static int pci_do_msi = 1;
346 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1,
347 "Enable support for MSI interrupts");
349 static int pci_do_msix = 1;
350 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1,
351 "Enable support for MSI-X interrupts");
353 static int pci_honor_msi_blacklist = 1;
354 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN,
355 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
357 #if defined(__i386__) || defined(__amd64__)
358 static int pci_usb_takeover = 1;
360 static int pci_usb_takeover = 0;
362 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
363 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
364 Disable this if you depend on BIOS emulation of USB devices, that is\n\
365 you use USB devices (like keyboard or mouse) but do not load USB drivers");
367 static int pci_clear_bars;
368 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
369 "Ignore firmware-assigned resources for BARs.");
371 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
372 static int pci_clear_buses;
373 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
374 "Ignore firmware-assigned bus numbers.");
377 static int pci_enable_ari = 1;
378 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
379 0, "Enable support for PCIe Alternative RID Interpretation");
382 pci_has_quirk(uint32_t devid, int quirk)
384 const struct pci_quirk *q;
386 for (q = &pci_quirks[0]; q->devid; q++) {
387 if (q->devid == devid && q->type == quirk)
393 /* Find a device_t by bus/slot/function in domain 0 */
396 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
399 return (pci_find_dbsf(0, bus, slot, func));
402 /* Find a device_t by domain/bus/slot/function */
405 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
407 struct pci_devinfo *dinfo;
409 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
410 if ((dinfo->cfg.domain == domain) &&
411 (dinfo->cfg.bus == bus) &&
412 (dinfo->cfg.slot == slot) &&
413 (dinfo->cfg.func == func)) {
414 return (dinfo->cfg.dev);
421 /* Find a device_t by vendor/device ID */
424 pci_find_device(uint16_t vendor, uint16_t device)
426 struct pci_devinfo *dinfo;
428 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
429 if ((dinfo->cfg.vendor == vendor) &&
430 (dinfo->cfg.device == device)) {
431 return (dinfo->cfg.dev);
439 pci_find_class(uint8_t class, uint8_t subclass)
441 struct pci_devinfo *dinfo;
443 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
444 if (dinfo->cfg.baseclass == class &&
445 dinfo->cfg.subclass == subclass) {
446 return (dinfo->cfg.dev);
454 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
459 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
462 retval += vprintf(fmt, ap);
467 /* return base address of memory or port map */
470 pci_mapbase(uint64_t mapreg)
473 if (PCI_BAR_MEM(mapreg))
474 return (mapreg & PCIM_BAR_MEM_BASE);
476 return (mapreg & PCIM_BAR_IO_BASE);
479 /* return map type of memory or port map */
482 pci_maptype(uint64_t mapreg)
485 if (PCI_BAR_IO(mapreg))
487 if (mapreg & PCIM_BAR_MEM_PREFETCH)
488 return ("Prefetchable Memory");
492 /* return log2 of map size decoded for memory or port map */
495 pci_mapsize(uint64_t testval)
499 testval = pci_mapbase(testval);
502 while ((testval & 1) == 0)
511 /* return base address of device ROM */
514 pci_rombase(uint64_t mapreg)
517 return (mapreg & PCIM_BIOS_ADDR_MASK);
520 /* return log2 of map size decided for device ROM */
523 pci_romsize(uint64_t testval)
527 testval = pci_rombase(testval);
530 while ((testval & 1) == 0)
539 /* return log2 of address range supported by map register */
542 pci_maprange(uint64_t mapreg)
546 if (PCI_BAR_IO(mapreg))
549 switch (mapreg & PCIM_BAR_MEM_TYPE) {
550 case PCIM_BAR_MEM_32:
553 case PCIM_BAR_MEM_1MB:
556 case PCIM_BAR_MEM_64:
563 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
566 pci_fixancient(pcicfgregs *cfg)
568 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
571 /* PCI to PCI bridges use header type 1 */
572 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
573 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
576 /* extract header type specific config data */
579 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
581 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
582 switch (cfg->hdrtype & PCIM_HDRTYPE) {
583 case PCIM_HDRTYPE_NORMAL:
584 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
585 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
586 cfg->nummaps = PCI_MAXMAPS_0;
588 case PCIM_HDRTYPE_BRIDGE:
589 cfg->nummaps = PCI_MAXMAPS_1;
591 case PCIM_HDRTYPE_CARDBUS:
592 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
593 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
594 cfg->nummaps = PCI_MAXMAPS_2;
600 /* read configuration header into pcicfgregs structure */
602 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
604 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
607 vid = REG(PCIR_VENDOR, 2);
608 did = REG(PCIR_DEVICE, 2);
610 return (pci_fill_devinfo(pcib, d, b, s, f, vid, did, size));
615 static struct pci_devinfo *
616 pci_fill_devinfo(device_t pcib, int d, int b, int s, int f, uint16_t vid,
617 uint16_t did, size_t size)
619 struct pci_devinfo *devlist_entry;
622 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
624 cfg = &devlist_entry->cfg;
632 cfg->cmdreg = REG(PCIR_COMMAND, 2);
633 cfg->statreg = REG(PCIR_STATUS, 2);
634 cfg->baseclass = REG(PCIR_CLASS, 1);
635 cfg->subclass = REG(PCIR_SUBCLASS, 1);
636 cfg->progif = REG(PCIR_PROGIF, 1);
637 cfg->revid = REG(PCIR_REVID, 1);
638 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
639 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
640 cfg->lattimer = REG(PCIR_LATTIMER, 1);
641 cfg->intpin = REG(PCIR_INTPIN, 1);
642 cfg->intline = REG(PCIR_INTLINE, 1);
644 cfg->mingnt = REG(PCIR_MINGNT, 1);
645 cfg->maxlat = REG(PCIR_MAXLAT, 1);
647 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
648 cfg->hdrtype &= ~PCIM_MFDEV;
649 STAILQ_INIT(&cfg->maps);
651 cfg->devinfo_size = size;
655 pci_hdrtypedata(pcib, b, s, f, cfg);
657 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
658 pci_read_cap(pcib, cfg);
660 STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links);
662 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
663 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
664 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
665 devlist_entry->conf.pc_sel.pc_func = cfg->func;
666 devlist_entry->conf.pc_hdr = cfg->hdrtype;
668 devlist_entry->conf.pc_subvendor = cfg->subvendor;
669 devlist_entry->conf.pc_subdevice = cfg->subdevice;
670 devlist_entry->conf.pc_vendor = cfg->vendor;
671 devlist_entry->conf.pc_device = cfg->device;
673 devlist_entry->conf.pc_class = cfg->baseclass;
674 devlist_entry->conf.pc_subclass = cfg->subclass;
675 devlist_entry->conf.pc_progif = cfg->progif;
676 devlist_entry->conf.pc_revid = cfg->revid;
681 return (devlist_entry);
686 pci_read_cap(device_t pcib, pcicfgregs *cfg)
688 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
689 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
690 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
694 int ptr, nextptr, ptrptr;
696 switch (cfg->hdrtype & PCIM_HDRTYPE) {
697 case PCIM_HDRTYPE_NORMAL:
698 case PCIM_HDRTYPE_BRIDGE:
699 ptrptr = PCIR_CAP_PTR;
701 case PCIM_HDRTYPE_CARDBUS:
702 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
705 return; /* no extended capabilities support */
707 nextptr = REG(ptrptr, 1); /* sanity check? */
710 * Read capability entries.
712 while (nextptr != 0) {
715 printf("illegal PCI extended capability offset %d\n",
719 /* Find the next entry */
721 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
723 /* Process this entry */
724 switch (REG(ptr + PCICAP_ID, 1)) {
725 case PCIY_PMG: /* PCI power management */
726 if (cfg->pp.pp_cap == 0) {
727 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
728 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
729 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
730 if ((nextptr - ptr) > PCIR_POWER_DATA)
731 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
734 case PCIY_HT: /* HyperTransport */
735 /* Determine HT-specific capability type. */
736 val = REG(ptr + PCIR_HT_COMMAND, 2);
738 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
739 cfg->ht.ht_slave = ptr;
741 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
742 switch (val & PCIM_HTCMD_CAP_MASK) {
743 case PCIM_HTCAP_MSI_MAPPING:
744 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
745 /* Sanity check the mapping window. */
746 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
749 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
751 if (addr != MSI_INTEL_ADDR_BASE)
753 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
754 cfg->domain, cfg->bus,
755 cfg->slot, cfg->func,
758 addr = MSI_INTEL_ADDR_BASE;
760 cfg->ht.ht_msimap = ptr;
761 cfg->ht.ht_msictrl = val;
762 cfg->ht.ht_msiaddr = addr;
767 case PCIY_MSI: /* PCI MSI */
768 cfg->msi.msi_location = ptr;
769 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
770 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
771 PCIM_MSICTRL_MMC_MASK)>>1);
773 case PCIY_MSIX: /* PCI MSI-X */
774 cfg->msix.msix_location = ptr;
775 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
776 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
777 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
778 val = REG(ptr + PCIR_MSIX_TABLE, 4);
779 cfg->msix.msix_table_bar = PCIR_BAR(val &
781 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
782 val = REG(ptr + PCIR_MSIX_PBA, 4);
783 cfg->msix.msix_pba_bar = PCIR_BAR(val &
785 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
787 case PCIY_VPD: /* PCI Vital Product Data */
788 cfg->vpd.vpd_reg = ptr;
791 /* Should always be true. */
792 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
793 PCIM_HDRTYPE_BRIDGE) {
794 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
795 cfg->subvendor = val & 0xffff;
796 cfg->subdevice = val >> 16;
799 case PCIY_PCIX: /* PCI-X */
801 * Assume we have a PCI-X chipset if we have
802 * at least one PCI-PCI bridge with a PCI-X
803 * capability. Note that some systems with
804 * PCI-express or HT chipsets might match on
805 * this check as well.
807 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
810 cfg->pcix.pcix_location = ptr;
812 case PCIY_EXPRESS: /* PCI-express */
814 * Assume we have a PCI-express chipset if we have
815 * at least one PCI-express device.
818 cfg->pcie.pcie_location = ptr;
819 val = REG(ptr + PCIER_FLAGS, 2);
820 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
827 #if defined(__powerpc__)
829 * Enable the MSI mapping window for all HyperTransport
830 * slaves. PCI-PCI bridges have their windows enabled via
833 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
834 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
836 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
837 cfg->domain, cfg->bus, cfg->slot, cfg->func);
838 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
839 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
843 /* REG and WREG use carry through to next functions */
847 * PCI Vital Product Data
850 #define PCI_VPD_TIMEOUT 1000000
853 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
855 int count = PCI_VPD_TIMEOUT;
857 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
859 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
861 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
864 DELAY(1); /* limit looping */
866 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
873 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
875 int count = PCI_VPD_TIMEOUT;
877 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
879 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
880 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
881 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
884 DELAY(1); /* limit looping */
891 #undef PCI_VPD_TIMEOUT
893 struct vpd_readstate {
903 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
908 if (vrs->bytesinval == 0) {
909 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
911 vrs->val = le32toh(reg);
913 byte = vrs->val & 0xff;
916 vrs->val = vrs->val >> 8;
917 byte = vrs->val & 0xff;
927 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
929 struct vpd_readstate vrs;
934 int alloc, off; /* alloc/off for RO/W arrays */
940 /* init vpd reader */
948 name = remain = i = 0; /* shut up stupid gcc */
949 alloc = off = 0; /* shut up stupid gcc */
950 dflen = 0; /* shut up stupid gcc */
953 if (vpd_nextbyte(&vrs, &byte)) {
958 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
959 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
960 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
963 case 0: /* item name */
965 if (vpd_nextbyte(&vrs, &byte2)) {
970 if (vpd_nextbyte(&vrs, &byte2)) {
974 remain |= byte2 << 8;
975 if (remain > (0x7f*4 - vrs.off)) {
978 "invalid VPD data, remain %#x\n",
984 name = (byte >> 3) & 0xf;
987 case 0x2: /* String */
988 cfg->vpd.vpd_ident = malloc(remain + 1,
996 case 0x10: /* VPD-R */
999 cfg->vpd.vpd_ros = malloc(alloc *
1000 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1004 case 0x11: /* VPD-W */
1007 cfg->vpd.vpd_w = malloc(alloc *
1008 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1012 default: /* Invalid data, abort */
1018 case 1: /* Identifier String */
1019 cfg->vpd.vpd_ident[i++] = byte;
1022 cfg->vpd.vpd_ident[i] = '\0';
1027 case 2: /* VPD-R Keyword Header */
1029 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1030 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1031 M_DEVBUF, M_WAITOK | M_ZERO);
1033 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1034 if (vpd_nextbyte(&vrs, &byte2)) {
1038 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1039 if (vpd_nextbyte(&vrs, &byte2)) {
1043 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1045 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1048 * if this happens, we can't trust the rest
1051 pci_printf(cfg, "bad keyword length: %d\n",
1056 } else if (dflen == 0) {
1057 cfg->vpd.vpd_ros[off].value = malloc(1 *
1058 sizeof(*cfg->vpd.vpd_ros[off].value),
1059 M_DEVBUF, M_WAITOK);
1060 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1062 cfg->vpd.vpd_ros[off].value = malloc(
1064 sizeof(*cfg->vpd.vpd_ros[off].value),
1065 M_DEVBUF, M_WAITOK);
1068 /* keep in sync w/ state 3's transistions */
1069 if (dflen == 0 && remain == 0)
1071 else if (dflen == 0)
1077 case 3: /* VPD-R Keyword Value */
1078 cfg->vpd.vpd_ros[off].value[i++] = byte;
1079 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1080 "RV", 2) == 0 && cksumvalid == -1) {
1086 "bad VPD cksum, remain %hhu\n",
1095 /* keep in sync w/ state 2's transistions */
1097 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1098 if (dflen == 0 && remain == 0) {
1099 cfg->vpd.vpd_rocnt = off;
1100 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1101 off * sizeof(*cfg->vpd.vpd_ros),
1102 M_DEVBUF, M_WAITOK | M_ZERO);
1104 } else if (dflen == 0)
1114 case 5: /* VPD-W Keyword Header */
1116 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1117 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1118 M_DEVBUF, M_WAITOK | M_ZERO);
1120 cfg->vpd.vpd_w[off].keyword[0] = byte;
1121 if (vpd_nextbyte(&vrs, &byte2)) {
1125 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1126 if (vpd_nextbyte(&vrs, &byte2)) {
1130 cfg->vpd.vpd_w[off].len = dflen = byte2;
1131 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1132 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1133 sizeof(*cfg->vpd.vpd_w[off].value),
1134 M_DEVBUF, M_WAITOK);
1137 /* keep in sync w/ state 6's transistions */
1138 if (dflen == 0 && remain == 0)
1140 else if (dflen == 0)
1146 case 6: /* VPD-W Keyword Value */
1147 cfg->vpd.vpd_w[off].value[i++] = byte;
1150 /* keep in sync w/ state 5's transistions */
1152 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1153 if (dflen == 0 && remain == 0) {
1154 cfg->vpd.vpd_wcnt = off;
1155 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1156 off * sizeof(*cfg->vpd.vpd_w),
1157 M_DEVBUF, M_WAITOK | M_ZERO);
1159 } else if (dflen == 0)
1164 pci_printf(cfg, "invalid state: %d\n", state);
1170 if (cksumvalid == 0 || state < -1) {
1171 /* read-only data bad, clean up */
1172 if (cfg->vpd.vpd_ros != NULL) {
1173 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1174 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1175 free(cfg->vpd.vpd_ros, M_DEVBUF);
1176 cfg->vpd.vpd_ros = NULL;
1180 /* I/O error, clean up */
1181 pci_printf(cfg, "failed to read VPD data.\n");
1182 if (cfg->vpd.vpd_ident != NULL) {
1183 free(cfg->vpd.vpd_ident, M_DEVBUF);
1184 cfg->vpd.vpd_ident = NULL;
1186 if (cfg->vpd.vpd_w != NULL) {
1187 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1188 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1189 free(cfg->vpd.vpd_w, M_DEVBUF);
1190 cfg->vpd.vpd_w = NULL;
1193 cfg->vpd.vpd_cached = 1;
1199 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1201 struct pci_devinfo *dinfo = device_get_ivars(child);
1202 pcicfgregs *cfg = &dinfo->cfg;
1204 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1205 pci_read_vpd(device_get_parent(dev), cfg);
1207 *identptr = cfg->vpd.vpd_ident;
1209 if (*identptr == NULL)
1216 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1219 struct pci_devinfo *dinfo = device_get_ivars(child);
1220 pcicfgregs *cfg = &dinfo->cfg;
1223 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1224 pci_read_vpd(device_get_parent(dev), cfg);
1226 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1227 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1228 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1229 *vptr = cfg->vpd.vpd_ros[i].value;
1238 pci_fetch_vpd_list(device_t dev)
1240 struct pci_devinfo *dinfo = device_get_ivars(dev);
1241 pcicfgregs *cfg = &dinfo->cfg;
1243 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1244 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1249 * Find the requested HyperTransport capability and return the offset
1250 * in configuration space via the pointer provided. The function
1251 * returns 0 on success and an error code otherwise.
1254 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1259 error = pci_find_cap(child, PCIY_HT, &ptr);
1264 * Traverse the capabilities list checking each HT capability
1265 * to see if it matches the requested HT capability.
1268 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1269 if (capability == PCIM_HTCAP_SLAVE ||
1270 capability == PCIM_HTCAP_HOST)
1273 val &= PCIM_HTCMD_CAP_MASK;
1274 if (val == capability) {
1280 /* Skip to the next HT capability. */
1282 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1283 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1292 * Find the requested capability and return the offset in
1293 * configuration space via the pointer provided. The function returns
1294 * 0 on success and an error code otherwise.
1297 pci_find_cap_method(device_t dev, device_t child, int capability,
1300 struct pci_devinfo *dinfo = device_get_ivars(child);
1301 pcicfgregs *cfg = &dinfo->cfg;
1306 * Check the CAP_LIST bit of the PCI status register first.
1308 status = pci_read_config(child, PCIR_STATUS, 2);
1309 if (!(status & PCIM_STATUS_CAPPRESENT))
1313 * Determine the start pointer of the capabilities list.
1315 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1316 case PCIM_HDRTYPE_NORMAL:
1317 case PCIM_HDRTYPE_BRIDGE:
1320 case PCIM_HDRTYPE_CARDBUS:
1321 ptr = PCIR_CAP_PTR_2;
1325 return (ENXIO); /* no extended capabilities support */
1327 ptr = pci_read_config(child, ptr, 1);
1330 * Traverse the capabilities list.
1333 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1338 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1345 * Find the requested extended capability and return the offset in
1346 * configuration space via the pointer provided. The function returns
1347 * 0 on success and an error code otherwise.
1350 pci_find_extcap_method(device_t dev, device_t child, int capability,
1353 struct pci_devinfo *dinfo = device_get_ivars(child);
1354 pcicfgregs *cfg = &dinfo->cfg;
1358 /* Only supported for PCI-express devices. */
1359 if (cfg->pcie.pcie_location == 0)
1363 ecap = pci_read_config(child, ptr, 4);
1364 if (ecap == 0xffffffff || ecap == 0)
1367 if (PCI_EXTCAP_ID(ecap) == capability) {
1372 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1375 ecap = pci_read_config(child, ptr, 4);
1382 * Support for MSI-X message interrupts.
1385 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1386 uint64_t address, uint32_t data)
1388 struct pci_devinfo *dinfo = device_get_ivars(child);
1389 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1392 KASSERT(msix->msix_table_len > index, ("bogus index"));
1393 offset = msix->msix_table_offset + index * 16;
1394 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1395 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1396 bus_write_4(msix->msix_table_res, offset + 8, data);
1398 /* Enable MSI -> HT mapping. */
1399 pci_ht_map_msi(child, address);
1403 pci_mask_msix(device_t dev, u_int index)
1405 struct pci_devinfo *dinfo = device_get_ivars(dev);
1406 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1407 uint32_t offset, val;
1409 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1410 offset = msix->msix_table_offset + index * 16 + 12;
1411 val = bus_read_4(msix->msix_table_res, offset);
1412 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1413 val |= PCIM_MSIX_VCTRL_MASK;
1414 bus_write_4(msix->msix_table_res, offset, val);
1419 pci_unmask_msix(device_t dev, u_int index)
1421 struct pci_devinfo *dinfo = device_get_ivars(dev);
1422 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1423 uint32_t offset, val;
1425 KASSERT(msix->msix_table_len > index, ("bogus index"));
1426 offset = msix->msix_table_offset + index * 16 + 12;
1427 val = bus_read_4(msix->msix_table_res, offset);
1428 if (val & PCIM_MSIX_VCTRL_MASK) {
1429 val &= ~PCIM_MSIX_VCTRL_MASK;
1430 bus_write_4(msix->msix_table_res, offset, val);
1435 pci_pending_msix(device_t dev, u_int index)
1437 struct pci_devinfo *dinfo = device_get_ivars(dev);
1438 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1439 uint32_t offset, bit;
1441 KASSERT(msix->msix_table_len > index, ("bogus index"));
1442 offset = msix->msix_pba_offset + (index / 32) * 4;
1443 bit = 1 << index % 32;
1444 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1448 * Restore MSI-X registers and table during resume. If MSI-X is
1449 * enabled then walk the virtual table to restore the actual MSI-X
1453 pci_resume_msix(device_t dev)
1455 struct pci_devinfo *dinfo = device_get_ivars(dev);
1456 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1457 struct msix_table_entry *mte;
1458 struct msix_vector *mv;
1461 if (msix->msix_alloc > 0) {
1462 /* First, mask all vectors. */
1463 for (i = 0; i < msix->msix_msgnum; i++)
1464 pci_mask_msix(dev, i);
1466 /* Second, program any messages with at least one handler. */
1467 for (i = 0; i < msix->msix_table_len; i++) {
1468 mte = &msix->msix_table[i];
1469 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1471 mv = &msix->msix_vectors[mte->mte_vector - 1];
1472 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1473 pci_unmask_msix(dev, i);
1476 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1477 msix->msix_ctrl, 2);
1481 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1482 * returned in *count. After this function returns, each message will be
1483 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1486 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1488 struct pci_devinfo *dinfo = device_get_ivars(child);
1489 pcicfgregs *cfg = &dinfo->cfg;
1490 struct resource_list_entry *rle;
1491 int actual, error, i, irq, max;
1493 /* Don't let count == 0 get us into trouble. */
1497 /* If rid 0 is allocated, then fail. */
1498 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1499 if (rle != NULL && rle->res != NULL)
1502 /* Already have allocated messages? */
1503 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1506 /* If MSI-X is blacklisted for this system, fail. */
1507 if (pci_msix_blacklisted())
1510 /* MSI-X capability present? */
1511 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1514 /* Make sure the appropriate BARs are mapped. */
1515 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1516 cfg->msix.msix_table_bar);
1517 if (rle == NULL || rle->res == NULL ||
1518 !(rman_get_flags(rle->res) & RF_ACTIVE))
1520 cfg->msix.msix_table_res = rle->res;
1521 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1522 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1523 cfg->msix.msix_pba_bar);
1524 if (rle == NULL || rle->res == NULL ||
1525 !(rman_get_flags(rle->res) & RF_ACTIVE))
1528 cfg->msix.msix_pba_res = rle->res;
1531 device_printf(child,
1532 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1533 *count, cfg->msix.msix_msgnum);
1534 max = min(*count, cfg->msix.msix_msgnum);
1535 for (i = 0; i < max; i++) {
1536 /* Allocate a message. */
1537 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1543 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1549 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1551 device_printf(child, "using IRQ %lu for MSI-X\n",
1557 * Be fancy and try to print contiguous runs of
1558 * IRQ values as ranges. 'irq' is the previous IRQ.
1559 * 'run' is true if we are in a range.
1561 device_printf(child, "using IRQs %lu", rle->start);
1564 for (i = 1; i < actual; i++) {
1565 rle = resource_list_find(&dinfo->resources,
1566 SYS_RES_IRQ, i + 1);
1568 /* Still in a run? */
1569 if (rle->start == irq + 1) {
1575 /* Finish previous range. */
1581 /* Start new range. */
1582 printf(",%lu", rle->start);
1586 /* Unfinished range? */
1589 printf(" for MSI-X\n");
1593 /* Mask all vectors. */
1594 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1595 pci_mask_msix(child, i);
1597 /* Allocate and initialize vector data and virtual table. */
1598 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1599 M_DEVBUF, M_WAITOK | M_ZERO);
1600 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1601 M_DEVBUF, M_WAITOK | M_ZERO);
1602 for (i = 0; i < actual; i++) {
1603 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1604 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1605 cfg->msix.msix_table[i].mte_vector = i + 1;
1608 /* Update control register to enable MSI-X. */
1609 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1610 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1611 cfg->msix.msix_ctrl, 2);
1613 /* Update counts of alloc'd messages. */
1614 cfg->msix.msix_alloc = actual;
1615 cfg->msix.msix_table_len = actual;
1621 * By default, pci_alloc_msix() will assign the allocated IRQ
1622 * resources consecutively to the first N messages in the MSI-X table.
1623 * However, device drivers may want to use different layouts if they
1624 * either receive fewer messages than they asked for, or they wish to
1625 * populate the MSI-X table sparsely. This method allows the driver
1626 * to specify what layout it wants. It must be called after a
1627 * successful pci_alloc_msix() but before any of the associated
1628 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1630 * The 'vectors' array contains 'count' message vectors. The array
1631 * maps directly to the MSI-X table in that index 0 in the array
1632 * specifies the vector for the first message in the MSI-X table, etc.
1633 * The vector value in each array index can either be 0 to indicate
1634 * that no vector should be assigned to a message slot, or it can be a
1635 * number from 1 to N (where N is the count returned from a
1636 * succcessful call to pci_alloc_msix()) to indicate which message
1637 * vector (IRQ) to be used for the corresponding message.
1639 * On successful return, each message with a non-zero vector will have
1640 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1641 * 1. Additionally, if any of the IRQs allocated via the previous
1642 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1643 * will be freed back to the system automatically.
1645 * For example, suppose a driver has a MSI-X table with 6 messages and
1646 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1647 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1648 * C. After the call to pci_alloc_msix(), the device will be setup to
1649 * have an MSI-X table of ABC--- (where - means no vector assigned).
1650 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1651 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1652 * be freed back to the system. This device will also have valid
1653 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1655 * In any case, the SYS_RES_IRQ rid X will always map to the message
1656 * at MSI-X table index X - 1 and will only be valid if a vector is
1657 * assigned to that table entry.
1660 pci_remap_msix_method(device_t dev, device_t child, int count,
1661 const u_int *vectors)
1663 struct pci_devinfo *dinfo = device_get_ivars(child);
1664 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1665 struct resource_list_entry *rle;
1666 int i, irq, j, *used;
1669 * Have to have at least one message in the table but the
1670 * table can't be bigger than the actual MSI-X table in the
1673 if (count == 0 || count > msix->msix_msgnum)
1676 /* Sanity check the vectors. */
1677 for (i = 0; i < count; i++)
1678 if (vectors[i] > msix->msix_alloc)
1682 * Make sure there aren't any holes in the vectors to be used.
1683 * It's a big pain to support it, and it doesn't really make
1684 * sense anyway. Also, at least one vector must be used.
1686 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1688 for (i = 0; i < count; i++)
1689 if (vectors[i] != 0)
1690 used[vectors[i] - 1] = 1;
1691 for (i = 0; i < msix->msix_alloc - 1; i++)
1692 if (used[i] == 0 && used[i + 1] == 1) {
1693 free(used, M_DEVBUF);
1697 free(used, M_DEVBUF);
1701 /* Make sure none of the resources are allocated. */
1702 for (i = 0; i < msix->msix_table_len; i++) {
1703 if (msix->msix_table[i].mte_vector == 0)
1705 if (msix->msix_table[i].mte_handlers > 0)
1707 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1708 KASSERT(rle != NULL, ("missing resource"));
1709 if (rle->res != NULL)
1713 /* Free the existing resource list entries. */
1714 for (i = 0; i < msix->msix_table_len; i++) {
1715 if (msix->msix_table[i].mte_vector == 0)
1717 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1721 * Build the new virtual table keeping track of which vectors are
1724 free(msix->msix_table, M_DEVBUF);
1725 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1726 M_DEVBUF, M_WAITOK | M_ZERO);
1727 for (i = 0; i < count; i++)
1728 msix->msix_table[i].mte_vector = vectors[i];
1729 msix->msix_table_len = count;
1731 /* Free any unused IRQs and resize the vectors array if necessary. */
1732 j = msix->msix_alloc - 1;
1734 struct msix_vector *vec;
1736 while (used[j] == 0) {
1737 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1738 msix->msix_vectors[j].mv_irq);
1741 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1743 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1745 free(msix->msix_vectors, M_DEVBUF);
1746 msix->msix_vectors = vec;
1747 msix->msix_alloc = j + 1;
1749 free(used, M_DEVBUF);
1751 /* Map the IRQs onto the rids. */
1752 for (i = 0; i < count; i++) {
1753 if (vectors[i] == 0)
1755 irq = msix->msix_vectors[vectors[i]].mv_irq;
1756 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1761 device_printf(child, "Remapped MSI-X IRQs as: ");
1762 for (i = 0; i < count; i++) {
1765 if (vectors[i] == 0)
1769 msix->msix_vectors[vectors[i]].mv_irq);
1778 pci_release_msix(device_t dev, device_t child)
1780 struct pci_devinfo *dinfo = device_get_ivars(child);
1781 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1782 struct resource_list_entry *rle;
1785 /* Do we have any messages to release? */
1786 if (msix->msix_alloc == 0)
1789 /* Make sure none of the resources are allocated. */
1790 for (i = 0; i < msix->msix_table_len; i++) {
1791 if (msix->msix_table[i].mte_vector == 0)
1793 if (msix->msix_table[i].mte_handlers > 0)
1795 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1796 KASSERT(rle != NULL, ("missing resource"));
1797 if (rle->res != NULL)
1801 /* Update control register to disable MSI-X. */
1802 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1803 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1804 msix->msix_ctrl, 2);
1806 /* Free the resource list entries. */
1807 for (i = 0; i < msix->msix_table_len; i++) {
1808 if (msix->msix_table[i].mte_vector == 0)
1810 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1812 free(msix->msix_table, M_DEVBUF);
1813 msix->msix_table_len = 0;
1815 /* Release the IRQs. */
1816 for (i = 0; i < msix->msix_alloc; i++)
1817 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1818 msix->msix_vectors[i].mv_irq);
1819 free(msix->msix_vectors, M_DEVBUF);
1820 msix->msix_alloc = 0;
1825 * Return the max supported MSI-X messages this device supports.
1826 * Basically, assuming the MD code can alloc messages, this function
1827 * should return the maximum value that pci_alloc_msix() can return.
1828 * Thus, it is subject to the tunables, etc.
1831 pci_msix_count_method(device_t dev, device_t child)
1833 struct pci_devinfo *dinfo = device_get_ivars(child);
1834 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1836 if (pci_do_msix && msix->msix_location != 0)
1837 return (msix->msix_msgnum);
1842 * HyperTransport MSI mapping control
1845 pci_ht_map_msi(device_t dev, uint64_t addr)
1847 struct pci_devinfo *dinfo = device_get_ivars(dev);
1848 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1853 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1854 ht->ht_msiaddr >> 20 == addr >> 20) {
1855 /* Enable MSI -> HT mapping. */
1856 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1857 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1861 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1862 /* Disable MSI -> HT mapping. */
1863 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1864 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1870 pci_get_max_read_req(device_t dev)
1872 struct pci_devinfo *dinfo = device_get_ivars(dev);
1876 cap = dinfo->cfg.pcie.pcie_location;
1879 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1880 val &= PCIEM_CTL_MAX_READ_REQUEST;
1882 return (1 << (val + 7));
1886 pci_set_max_read_req(device_t dev, int size)
1888 struct pci_devinfo *dinfo = device_get_ivars(dev);
1892 cap = dinfo->cfg.pcie.pcie_location;
1899 size = (1 << (fls(size) - 1));
1900 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1901 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1902 val |= (fls(size) - 8) << 12;
1903 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1908 * Support for MSI message signalled interrupts.
1911 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
1914 struct pci_devinfo *dinfo = device_get_ivars(child);
1915 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1917 /* Write data and address values. */
1918 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
1919 address & 0xffffffff, 4);
1920 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1921 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1923 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
1926 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
1929 /* Enable MSI in the control register. */
1930 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1931 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1934 /* Enable MSI -> HT mapping. */
1935 pci_ht_map_msi(child, address);
1939 pci_disable_msi_method(device_t dev, device_t child)
1941 struct pci_devinfo *dinfo = device_get_ivars(child);
1942 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1944 /* Disable MSI -> HT mapping. */
1945 pci_ht_map_msi(child, 0);
1947 /* Disable MSI in the control register. */
1948 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1949 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1954 * Restore MSI registers during resume. If MSI is enabled then
1955 * restore the data and address registers in addition to the control
1959 pci_resume_msi(device_t dev)
1961 struct pci_devinfo *dinfo = device_get_ivars(dev);
1962 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1966 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1967 address = msi->msi_addr;
1968 data = msi->msi_data;
1969 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1970 address & 0xffffffff, 4);
1971 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1972 pci_write_config(dev, msi->msi_location +
1973 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1974 pci_write_config(dev, msi->msi_location +
1975 PCIR_MSI_DATA_64BIT, data, 2);
1977 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1980 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1985 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1987 struct pci_devinfo *dinfo = device_get_ivars(dev);
1988 pcicfgregs *cfg = &dinfo->cfg;
1989 struct resource_list_entry *rle;
1990 struct msix_table_entry *mte;
1991 struct msix_vector *mv;
1997 * Handle MSI first. We try to find this IRQ among our list
1998 * of MSI IRQs. If we find it, we request updated address and
1999 * data registers and apply the results.
2001 if (cfg->msi.msi_alloc > 0) {
2003 /* If we don't have any active handlers, nothing to do. */
2004 if (cfg->msi.msi_handlers == 0)
2006 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2007 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2009 if (rle->start == irq) {
2010 error = PCIB_MAP_MSI(device_get_parent(bus),
2011 dev, irq, &addr, &data);
2014 pci_disable_msi(dev);
2015 dinfo->cfg.msi.msi_addr = addr;
2016 dinfo->cfg.msi.msi_data = data;
2017 pci_enable_msi(dev, addr, data);
2025 * For MSI-X, we check to see if we have this IRQ. If we do,
2026 * we request the updated mapping info. If that works, we go
2027 * through all the slots that use this IRQ and update them.
2029 if (cfg->msix.msix_alloc > 0) {
2030 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2031 mv = &cfg->msix.msix_vectors[i];
2032 if (mv->mv_irq == irq) {
2033 error = PCIB_MAP_MSI(device_get_parent(bus),
2034 dev, irq, &addr, &data);
2037 mv->mv_address = addr;
2039 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2040 mte = &cfg->msix.msix_table[j];
2041 if (mte->mte_vector != i + 1)
2043 if (mte->mte_handlers == 0)
2045 pci_mask_msix(dev, j);
2046 pci_enable_msix(dev, j, addr, data);
2047 pci_unmask_msix(dev, j);
2058 * Returns true if the specified device is blacklisted because MSI
2062 pci_msi_device_blacklisted(device_t dev)
2065 if (!pci_honor_msi_blacklist)
2068 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2072 * Determine if MSI is blacklisted globally on this system. Currently,
2073 * we just check for blacklisted chipsets as represented by the
2074 * host-PCI bridge at device 0:0:0. In the future, it may become
2075 * necessary to check other system attributes, such as the kenv values
2076 * that give the motherboard manufacturer and model number.
2079 pci_msi_blacklisted(void)
2083 if (!pci_honor_msi_blacklist)
2086 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2087 if (!(pcie_chipset || pcix_chipset)) {
2088 if (vm_guest != VM_GUEST_NO) {
2090 * Whitelist older chipsets in virtual
2091 * machines known to support MSI.
2093 dev = pci_find_bsf(0, 0, 0);
2095 return (!pci_has_quirk(pci_get_devid(dev),
2096 PCI_QUIRK_ENABLE_MSI_VM));
2101 dev = pci_find_bsf(0, 0, 0);
2103 return (pci_msi_device_blacklisted(dev));
2108 * Returns true if the specified device is blacklisted because MSI-X
2109 * doesn't work. Note that this assumes that if MSI doesn't work,
2110 * MSI-X doesn't either.
2113 pci_msix_device_blacklisted(device_t dev)
2116 if (!pci_honor_msi_blacklist)
2119 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2122 return (pci_msi_device_blacklisted(dev));
2126 * Determine if MSI-X is blacklisted globally on this system. If MSI
2127 * is blacklisted, assume that MSI-X is as well. Check for additional
2128 * chipsets where MSI works but MSI-X does not.
2131 pci_msix_blacklisted(void)
2135 if (!pci_honor_msi_blacklist)
2138 dev = pci_find_bsf(0, 0, 0);
2139 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2140 PCI_QUIRK_DISABLE_MSIX))
2143 return (pci_msi_blacklisted());
2147 * Attempt to allocate *count MSI messages. The actual number allocated is
2148 * returned in *count. After this function returns, each message will be
2149 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2152 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2154 struct pci_devinfo *dinfo = device_get_ivars(child);
2155 pcicfgregs *cfg = &dinfo->cfg;
2156 struct resource_list_entry *rle;
2157 int actual, error, i, irqs[32];
2160 /* Don't let count == 0 get us into trouble. */
2164 /* If rid 0 is allocated, then fail. */
2165 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2166 if (rle != NULL && rle->res != NULL)
2169 /* Already have allocated messages? */
2170 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2173 /* If MSI is blacklisted for this system, fail. */
2174 if (pci_msi_blacklisted())
2177 /* MSI capability present? */
2178 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2182 device_printf(child,
2183 "attempting to allocate %d MSI vectors (%d supported)\n",
2184 *count, cfg->msi.msi_msgnum);
2186 /* Don't ask for more than the device supports. */
2187 actual = min(*count, cfg->msi.msi_msgnum);
2189 /* Don't ask for more than 32 messages. */
2190 actual = min(actual, 32);
2192 /* MSI requires power of 2 number of messages. */
2193 if (!powerof2(actual))
2197 /* Try to allocate N messages. */
2198 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2210 * We now have N actual messages mapped onto SYS_RES_IRQ
2211 * resources in the irqs[] array, so add new resources
2212 * starting at rid 1.
2214 for (i = 0; i < actual; i++)
2215 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2216 irqs[i], irqs[i], 1);
2220 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2225 * Be fancy and try to print contiguous runs
2226 * of IRQ values as ranges. 'run' is true if
2227 * we are in a range.
2229 device_printf(child, "using IRQs %d", irqs[0]);
2231 for (i = 1; i < actual; i++) {
2233 /* Still in a run? */
2234 if (irqs[i] == irqs[i - 1] + 1) {
2239 /* Finish previous range. */
2241 printf("-%d", irqs[i - 1]);
2245 /* Start new range. */
2246 printf(",%d", irqs[i]);
2249 /* Unfinished range? */
2251 printf("-%d", irqs[actual - 1]);
2252 printf(" for MSI\n");
2256 /* Update control register with actual count. */
2257 ctrl = cfg->msi.msi_ctrl;
2258 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2259 ctrl |= (ffs(actual) - 1) << 4;
2260 cfg->msi.msi_ctrl = ctrl;
2261 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2263 /* Update counts of alloc'd messages. */
2264 cfg->msi.msi_alloc = actual;
2265 cfg->msi.msi_handlers = 0;
2270 /* Release the MSI messages associated with this device. */
2272 pci_release_msi_method(device_t dev, device_t child)
2274 struct pci_devinfo *dinfo = device_get_ivars(child);
2275 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2276 struct resource_list_entry *rle;
2277 int error, i, irqs[32];
2279 /* Try MSI-X first. */
2280 error = pci_release_msix(dev, child);
2281 if (error != ENODEV)
2284 /* Do we have any messages to release? */
2285 if (msi->msi_alloc == 0)
2287 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2289 /* Make sure none of the resources are allocated. */
2290 if (msi->msi_handlers > 0)
2292 for (i = 0; i < msi->msi_alloc; i++) {
2293 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2294 KASSERT(rle != NULL, ("missing MSI resource"));
2295 if (rle->res != NULL)
2297 irqs[i] = rle->start;
2300 /* Update control register with 0 count. */
2301 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2302 ("%s: MSI still enabled", __func__));
2303 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2304 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2307 /* Release the messages. */
2308 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2309 for (i = 0; i < msi->msi_alloc; i++)
2310 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2312 /* Update alloc count. */
2320 * Return the max supported MSI messages this device supports.
2321 * Basically, assuming the MD code can alloc messages, this function
2322 * should return the maximum value that pci_alloc_msi() can return.
2323 * Thus, it is subject to the tunables, etc.
2326 pci_msi_count_method(device_t dev, device_t child)
2328 struct pci_devinfo *dinfo = device_get_ivars(child);
2329 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2331 if (pci_do_msi && msi->msi_location != 0)
2332 return (msi->msi_msgnum);
2336 /* free pcicfgregs structure and all depending data structures */
2339 pci_freecfg(struct pci_devinfo *dinfo)
2341 struct devlist *devlist_head;
2342 struct pci_map *pm, *next;
2345 devlist_head = &pci_devq;
2347 if (dinfo->cfg.vpd.vpd_reg) {
2348 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2349 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2350 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2351 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2352 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2353 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2354 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2356 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2359 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2360 free(dinfo, M_DEVBUF);
2362 /* increment the generation count */
2365 /* we're losing one device */
2371 * PCI power manangement
2374 pci_set_powerstate_method(device_t dev, device_t child, int state)
2376 struct pci_devinfo *dinfo = device_get_ivars(child);
2377 pcicfgregs *cfg = &dinfo->cfg;
2379 int result, oldstate, highest, delay;
2381 if (cfg->pp.pp_cap == 0)
2382 return (EOPNOTSUPP);
2385 * Optimize a no state change request away. While it would be OK to
2386 * write to the hardware in theory, some devices have shown odd
2387 * behavior when going from D3 -> D3.
2389 oldstate = pci_get_powerstate(child);
2390 if (oldstate == state)
2394 * The PCI power management specification states that after a state
2395 * transition between PCI power states, system software must
2396 * guarantee a minimal delay before the function accesses the device.
2397 * Compute the worst case delay that we need to guarantee before we
2398 * access the device. Many devices will be responsive much more
2399 * quickly than this delay, but there are some that don't respond
2400 * instantly to state changes. Transitions to/from D3 state require
2401 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2402 * is done below with DELAY rather than a sleeper function because
2403 * this function can be called from contexts where we cannot sleep.
2405 highest = (oldstate > state) ? oldstate : state;
2406 if (highest == PCI_POWERSTATE_D3)
2408 else if (highest == PCI_POWERSTATE_D2)
2412 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2413 & ~PCIM_PSTAT_DMASK;
2416 case PCI_POWERSTATE_D0:
2417 status |= PCIM_PSTAT_D0;
2419 case PCI_POWERSTATE_D1:
2420 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2421 return (EOPNOTSUPP);
2422 status |= PCIM_PSTAT_D1;
2424 case PCI_POWERSTATE_D2:
2425 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2426 return (EOPNOTSUPP);
2427 status |= PCIM_PSTAT_D2;
2429 case PCI_POWERSTATE_D3:
2430 status |= PCIM_PSTAT_D3;
2437 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2440 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2447 pci_get_powerstate_method(device_t dev, device_t child)
2449 struct pci_devinfo *dinfo = device_get_ivars(child);
2450 pcicfgregs *cfg = &dinfo->cfg;
2454 if (cfg->pp.pp_cap != 0) {
2455 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2456 switch (status & PCIM_PSTAT_DMASK) {
2458 result = PCI_POWERSTATE_D0;
2461 result = PCI_POWERSTATE_D1;
2464 result = PCI_POWERSTATE_D2;
2467 result = PCI_POWERSTATE_D3;
2470 result = PCI_POWERSTATE_UNKNOWN;
2474 /* No support, device is always at D0 */
2475 result = PCI_POWERSTATE_D0;
2481 * Some convenience functions for PCI device drivers.
2484 static __inline void
2485 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2489 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2491 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2494 static __inline void
2495 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2499 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2501 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2505 pci_enable_busmaster_method(device_t dev, device_t child)
2507 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2512 pci_disable_busmaster_method(device_t dev, device_t child)
2514 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2519 pci_enable_io_method(device_t dev, device_t child, int space)
2524 case SYS_RES_IOPORT:
2525 bit = PCIM_CMD_PORTEN;
2527 case SYS_RES_MEMORY:
2528 bit = PCIM_CMD_MEMEN;
2533 pci_set_command_bit(dev, child, bit);
2538 pci_disable_io_method(device_t dev, device_t child, int space)
2543 case SYS_RES_IOPORT:
2544 bit = PCIM_CMD_PORTEN;
2546 case SYS_RES_MEMORY:
2547 bit = PCIM_CMD_MEMEN;
2552 pci_clear_command_bit(dev, child, bit);
2557 * New style pci driver. Parent device is either a pci-host-bridge or a
2558 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2562 pci_print_verbose(struct pci_devinfo *dinfo)
2566 pcicfgregs *cfg = &dinfo->cfg;
2568 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2569 cfg->vendor, cfg->device, cfg->revid);
2570 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2571 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2572 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2573 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2575 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2576 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2577 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2578 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2579 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2580 if (cfg->intpin > 0)
2581 printf("\tintpin=%c, irq=%d\n",
2582 cfg->intpin +'a' -1, cfg->intline);
2583 if (cfg->pp.pp_cap) {
2586 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2587 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2588 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2589 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2590 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2591 status & PCIM_PSTAT_DMASK);
2593 if (cfg->msi.msi_location) {
2596 ctrl = cfg->msi.msi_ctrl;
2597 printf("\tMSI supports %d message%s%s%s\n",
2598 cfg->msi.msi_msgnum,
2599 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2600 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2601 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2603 if (cfg->msix.msix_location) {
2604 printf("\tMSI-X supports %d message%s ",
2605 cfg->msix.msix_msgnum,
2606 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2607 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2608 printf("in map 0x%x\n",
2609 cfg->msix.msix_table_bar);
2611 printf("in maps 0x%x and 0x%x\n",
2612 cfg->msix.msix_table_bar,
2613 cfg->msix.msix_pba_bar);
2619 pci_porten(device_t dev)
2621 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2625 pci_memen(device_t dev)
2627 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2631 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp,
2634 struct pci_devinfo *dinfo;
2635 pci_addr_t map, testval;
2640 * The device ROM BAR is special. It is always a 32-bit
2641 * memory BAR. Bit 0 is special and should not be set when
2644 dinfo = device_get_ivars(dev);
2645 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2646 map = pci_read_config(dev, reg, 4);
2647 pci_write_config(dev, reg, 0xfffffffe, 4);
2648 testval = pci_read_config(dev, reg, 4);
2649 pci_write_config(dev, reg, map, 4);
2651 *testvalp = testval;
2657 map = pci_read_config(dev, reg, 4);
2658 ln2range = pci_maprange(map);
2660 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2663 * Disable decoding via the command register before
2664 * determining the BAR's length since we will be placing it in
2667 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2668 pci_write_config(dev, PCIR_COMMAND,
2669 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2672 * Determine the BAR's length by writing all 1's. The bottom
2673 * log_2(size) bits of the BAR will stick as 0 when we read
2676 pci_write_config(dev, reg, 0xffffffff, 4);
2677 testval = pci_read_config(dev, reg, 4);
2678 if (ln2range == 64) {
2679 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2680 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2684 * Restore the original value of the BAR. We may have reprogrammed
2685 * the BAR of the low-level console device and when booting verbose,
2686 * we need the console device addressable.
2688 pci_write_config(dev, reg, map, 4);
2690 pci_write_config(dev, reg + 4, map >> 32, 4);
2691 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2694 *testvalp = testval;
2696 *bar64 = (ln2range == 64);
2700 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2702 struct pci_devinfo *dinfo;
2705 /* The device ROM BAR is always a 32-bit memory BAR. */
2706 dinfo = device_get_ivars(dev);
2707 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2710 ln2range = pci_maprange(pm->pm_value);
2711 pci_write_config(dev, pm->pm_reg, base, 4);
2713 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2714 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2716 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2717 pm->pm_reg + 4, 4) << 32;
2721 pci_find_bar(device_t dev, int reg)
2723 struct pci_devinfo *dinfo;
2726 dinfo = device_get_ivars(dev);
2727 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2728 if (pm->pm_reg == reg)
2735 pci_bar_enabled(device_t dev, struct pci_map *pm)
2737 struct pci_devinfo *dinfo;
2740 dinfo = device_get_ivars(dev);
2741 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2742 !(pm->pm_value & PCIM_BIOS_ENABLE))
2744 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2745 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2746 return ((cmd & PCIM_CMD_MEMEN) != 0);
2748 return ((cmd & PCIM_CMD_PORTEN) != 0);
2752 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2754 struct pci_devinfo *dinfo;
2755 struct pci_map *pm, *prev;
2757 dinfo = device_get_ivars(dev);
2758 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2760 pm->pm_value = value;
2762 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2763 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2765 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2766 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2770 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2772 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2777 pci_restore_bars(device_t dev)
2779 struct pci_devinfo *dinfo;
2783 dinfo = device_get_ivars(dev);
2784 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2785 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2788 ln2range = pci_maprange(pm->pm_value);
2789 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2791 pci_write_config(dev, pm->pm_reg + 4,
2792 pm->pm_value >> 32, 4);
2797 * Add a resource based on a pci map register. Return 1 if the map
2798 * register is a 32bit map register or 2 if it is a 64bit register.
2801 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2802 int force, int prefetch)
2805 pci_addr_t base, map, testval;
2806 pci_addr_t start, end, count;
2807 int barlen, basezero, flags, maprange, mapsize, type;
2809 struct resource *res;
2812 * The BAR may already exist if the device is a CardBus card
2813 * whose CIS is stored in this BAR.
2815 pm = pci_find_bar(dev, reg);
2817 maprange = pci_maprange(pm->pm_value);
2818 barlen = maprange == 64 ? 2 : 1;
2822 pci_read_bar(dev, reg, &map, &testval, NULL);
2823 if (PCI_BAR_MEM(map)) {
2824 type = SYS_RES_MEMORY;
2825 if (map & PCIM_BAR_MEM_PREFETCH)
2828 type = SYS_RES_IOPORT;
2829 mapsize = pci_mapsize(testval);
2830 base = pci_mapbase(map);
2831 #ifdef __PCI_BAR_ZERO_VALID
2834 basezero = base == 0;
2836 maprange = pci_maprange(map);
2837 barlen = maprange == 64 ? 2 : 1;
2840 * For I/O registers, if bottom bit is set, and the next bit up
2841 * isn't clear, we know we have a BAR that doesn't conform to the
2842 * spec, so ignore it. Also, sanity check the size of the data
2843 * areas to the type of memory involved. Memory must be at least
2844 * 16 bytes in size, while I/O ranges must be at least 4.
2846 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2848 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2849 (type == SYS_RES_IOPORT && mapsize < 2))
2852 /* Save a record of this BAR. */
2853 pm = pci_add_bar(dev, reg, map, mapsize);
2855 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2856 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2857 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2858 printf(", port disabled\n");
2859 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2860 printf(", memory disabled\n");
2862 printf(", enabled\n");
2866 * If base is 0, then we have problems if this architecture does
2867 * not allow that. It is best to ignore such entries for the
2868 * moment. These will be allocated later if the driver specifically
2869 * requests them. However, some removable busses look better when
2870 * all resources are allocated, so allow '0' to be overriden.
2872 * Similarly treat maps whose values is the same as the test value
2873 * read back. These maps have had all f's written to them by the
2874 * BIOS in an attempt to disable the resources.
2876 if (!force && (basezero || map == testval))
2878 if ((u_long)base != base) {
2880 "pci%d:%d:%d:%d bar %#x too many address bits",
2881 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2882 pci_get_function(dev), reg);
2887 * This code theoretically does the right thing, but has
2888 * undesirable side effects in some cases where peripherals
2889 * respond oddly to having these bits enabled. Let the user
2890 * be able to turn them off (since pci_enable_io_modes is 1 by
2893 if (pci_enable_io_modes) {
2894 /* Turn on resources that have been left off by a lazy BIOS */
2895 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2896 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2897 cmd |= PCIM_CMD_PORTEN;
2898 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2900 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2901 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2902 cmd |= PCIM_CMD_MEMEN;
2903 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2906 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2908 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2912 count = (pci_addr_t)1 << mapsize;
2913 flags = RF_ALIGNMENT_LOG2(mapsize);
2915 flags |= RF_PREFETCHABLE;
2916 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
2917 start = 0; /* Let the parent decide. */
2921 end = base + count - 1;
2923 resource_list_add(rl, type, reg, start, end, count);
2926 * Try to allocate the resource for this BAR from our parent
2927 * so that this resource range is already reserved. The
2928 * driver for this device will later inherit this resource in
2929 * pci_alloc_resource().
2931 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2933 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2935 * If the allocation fails, try to allocate a resource for
2936 * this BAR using any available range. The firmware felt
2937 * it was important enough to assign a resource, so don't
2938 * disable decoding if we can help it.
2940 resource_list_delete(rl, type, reg);
2941 resource_list_add(rl, type, reg, 0, ~0ul, count);
2942 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
2947 * If the allocation fails, delete the resource list entry
2948 * and disable decoding for this device.
2950 * If the driver requests this resource in the future,
2951 * pci_reserve_map() will try to allocate a fresh
2954 resource_list_delete(rl, type, reg);
2955 pci_disable_io(dev, type);
2958 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2959 pci_get_domain(dev), pci_get_bus(dev),
2960 pci_get_slot(dev), pci_get_function(dev), reg);
2962 start = rman_get_start(res);
2963 pci_write_bar(dev, pm, start);
2969 * For ATA devices we need to decide early what addressing mode to use.
2970 * Legacy demands that the primary and secondary ATA ports sits on the
2971 * same addresses that old ISA hardware did. This dictates that we use
2972 * those addresses and ignore the BAR's if we cannot set PCI native
2976 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2977 uint32_t prefetchmask)
2980 int rid, type, progif;
2982 /* if this device supports PCI native addressing use it */
2983 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2984 if ((progif & 0x8a) == 0x8a) {
2985 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2986 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2987 printf("Trying ATA native PCI addressing mode\n");
2988 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2992 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2993 type = SYS_RES_IOPORT;
2994 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2995 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2996 prefetchmask & (1 << 0));
2997 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2998 prefetchmask & (1 << 1));
3001 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
3002 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
3005 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
3006 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3009 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3010 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3011 prefetchmask & (1 << 2));
3012 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3013 prefetchmask & (1 << 3));
3016 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3017 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3020 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3021 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3024 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3025 prefetchmask & (1 << 4));
3026 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3027 prefetchmask & (1 << 5));
3031 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3033 struct pci_devinfo *dinfo = device_get_ivars(dev);
3034 pcicfgregs *cfg = &dinfo->cfg;
3035 char tunable_name[64];
3038 /* Has to have an intpin to have an interrupt. */
3039 if (cfg->intpin == 0)
3042 /* Let the user override the IRQ with a tunable. */
3043 irq = PCI_INVALID_IRQ;
3044 snprintf(tunable_name, sizeof(tunable_name),
3045 "hw.pci%d.%d.%d.INT%c.irq",
3046 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3047 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3048 irq = PCI_INVALID_IRQ;
3051 * If we didn't get an IRQ via the tunable, then we either use the
3052 * IRQ value in the intline register or we ask the bus to route an
3053 * interrupt for us. If force_route is true, then we only use the
3054 * value in the intline register if the bus was unable to assign an
3057 if (!PCI_INTERRUPT_VALID(irq)) {
3058 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3059 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3060 if (!PCI_INTERRUPT_VALID(irq))
3064 /* If after all that we don't have an IRQ, just bail. */
3065 if (!PCI_INTERRUPT_VALID(irq))
3068 /* Update the config register if it changed. */
3069 if (irq != cfg->intline) {
3071 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3074 /* Add this IRQ as rid 0 interrupt resource. */
3075 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3078 /* Perform early OHCI takeover from SMM. */
3080 ohci_early_takeover(device_t self)
3082 struct resource *res;
3088 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3092 ctl = bus_read_4(res, OHCI_CONTROL);
3093 if (ctl & OHCI_IR) {
3095 printf("ohci early: "
3096 "SMM active, request owner change\n");
3097 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3098 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3100 ctl = bus_read_4(res, OHCI_CONTROL);
3102 if (ctl & OHCI_IR) {
3104 printf("ohci early: "
3105 "SMM does not respond, resetting\n");
3106 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3108 /* Disable interrupts */
3109 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3112 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3115 /* Perform early UHCI takeover from SMM. */
3117 uhci_early_takeover(device_t self)
3119 struct resource *res;
3123 * Set the PIRQD enable bit and switch off all the others. We don't
3124 * want legacy support to interfere with us XXX Does this also mean
3125 * that the BIOS won't touch the keyboard anymore if it is connected
3126 * to the ports of the root hub?
3128 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3130 /* Disable interrupts */
3131 rid = PCI_UHCI_BASE_REG;
3132 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3134 bus_write_2(res, UHCI_INTR, 0);
3135 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3139 /* Perform early EHCI takeover from SMM. */
3141 ehci_early_takeover(device_t self)
3143 struct resource *res;
3153 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3157 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3159 /* Synchronise with the BIOS if it owns the controller. */
3160 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3161 eecp = EHCI_EECP_NEXT(eec)) {
3162 eec = pci_read_config(self, eecp, 4);
3163 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3166 bios_sem = pci_read_config(self, eecp +
3167 EHCI_LEGSUP_BIOS_SEM, 1);
3168 if (bios_sem == 0) {
3172 printf("ehci early: "
3173 "SMM active, request owner change\n");
3175 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3177 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3179 bios_sem = pci_read_config(self, eecp +
3180 EHCI_LEGSUP_BIOS_SEM, 1);
3183 if (bios_sem != 0) {
3185 printf("ehci early: "
3186 "SMM does not respond\n");
3188 /* Disable interrupts */
3189 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3190 bus_write_4(res, offs + EHCI_USBINTR, 0);
3192 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3195 /* Perform early XHCI takeover from SMM. */
3197 xhci_early_takeover(device_t self)
3199 struct resource *res;
3209 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3213 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3217 /* Synchronise with the BIOS if it owns the controller. */
3218 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3219 eecp += XHCI_XECP_NEXT(eec) << 2) {
3220 eec = bus_read_4(res, eecp);
3222 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3225 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3230 printf("xhci early: "
3231 "SMM active, request owner change\n");
3233 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3235 /* wait a maximum of 5 second */
3237 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3239 bios_sem = bus_read_1(res, eecp +
3240 XHCI_XECP_BIOS_SEM);
3243 if (bios_sem != 0) {
3245 printf("xhci early: "
3246 "SMM does not respond\n");
3249 /* Disable interrupts */
3250 offs = bus_read_1(res, XHCI_CAPLENGTH);
3251 bus_write_4(res, offs + XHCI_USBCMD, 0);
3252 bus_read_4(res, offs + XHCI_USBSTS);
3254 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3257 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3259 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3260 struct resource_list *rl)
3262 struct resource *res;
3264 u_long start, end, count;
3265 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3267 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3268 case PCIM_HDRTYPE_BRIDGE:
3269 sec_reg = PCIR_SECBUS_1;
3270 sub_reg = PCIR_SUBBUS_1;
3272 case PCIM_HDRTYPE_CARDBUS:
3273 sec_reg = PCIR_SECBUS_2;
3274 sub_reg = PCIR_SUBBUS_2;
3281 * If the existing bus range is valid, attempt to reserve it
3282 * from our parent. If this fails for any reason, clear the
3283 * secbus and subbus registers.
3285 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3286 * This would at least preserve the existing sec_bus if it is
3289 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3290 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3292 /* Quirk handling. */
3293 switch (pci_get_devid(dev)) {
3294 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3295 sup_bus = pci_read_config(dev, 0x41, 1);
3296 if (sup_bus != 0xff) {
3297 sec_bus = sup_bus + 1;
3298 sub_bus = sup_bus + 1;
3299 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3300 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3305 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3306 if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
3308 if (strncmp(cp, "Compal", 6) != 0) {
3313 if ((cp = kern_getenv("smbios.planar.product")) == NULL)
3315 if (strncmp(cp, "08A0", 4) != 0) {
3320 if (sub_bus < 0xa) {
3322 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3328 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3329 if (sec_bus > 0 && sub_bus >= sec_bus) {
3332 count = end - start + 1;
3334 resource_list_add(rl, PCI_RES_BUS, 0, 0ul, ~0ul, count);
3337 * If requested, clear secondary bus registers in
3338 * bridge devices to force a complete renumbering
3339 * rather than reserving the existing range. However,
3340 * preserve the existing size.
3342 if (pci_clear_buses)
3346 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3347 start, end, count, 0);
3353 "pci%d:%d:%d:%d secbus failed to allocate\n",
3354 pci_get_domain(dev), pci_get_bus(dev),
3355 pci_get_slot(dev), pci_get_function(dev));
3359 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3360 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3363 static struct resource *
3364 pci_alloc_secbus(device_t dev, device_t child, int *rid, u_long start,
3365 u_long end, u_long count, u_int flags)
3367 struct pci_devinfo *dinfo;
3369 struct resource_list *rl;
3370 struct resource *res;
3371 int sec_reg, sub_reg;
3373 dinfo = device_get_ivars(child);
3375 rl = &dinfo->resources;
3376 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3377 case PCIM_HDRTYPE_BRIDGE:
3378 sec_reg = PCIR_SECBUS_1;
3379 sub_reg = PCIR_SUBBUS_1;
3381 case PCIM_HDRTYPE_CARDBUS:
3382 sec_reg = PCIR_SECBUS_2;
3383 sub_reg = PCIR_SUBBUS_2;
3392 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3393 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3394 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3395 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3396 start, end, count, flags & ~RF_ACTIVE);
3398 resource_list_delete(rl, PCI_RES_BUS, *rid);
3399 device_printf(child, "allocating %lu bus%s failed\n",
3400 count, count == 1 ? "" : "es");
3404 device_printf(child,
3405 "Lazy allocation of %lu bus%s at %lu\n", count,
3406 count == 1 ? "" : "es", rman_get_start(res));
3407 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3408 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3410 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3411 end, count, flags));
3416 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3418 struct pci_devinfo *dinfo;
3420 struct resource_list *rl;
3421 const struct pci_quirk *q;
3425 dinfo = device_get_ivars(dev);
3427 rl = &dinfo->resources;
3428 devid = (cfg->device << 16) | cfg->vendor;
3430 /* ATA devices needs special map treatment */
3431 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3432 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3433 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3434 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3435 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3436 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3438 for (i = 0; i < cfg->nummaps;) {
3440 * Skip quirked resources.
3442 for (q = &pci_quirks[0]; q->devid != 0; q++)
3443 if (q->devid == devid &&
3444 q->type == PCI_QUIRK_UNMAP_REG &&
3445 q->arg1 == PCIR_BAR(i))
3447 if (q->devid != 0) {
3451 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3452 prefetchmask & (1 << i));
3456 * Add additional, quirked resources.
3458 for (q = &pci_quirks[0]; q->devid != 0; q++)
3459 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3460 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3462 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3463 #ifdef __PCI_REROUTE_INTERRUPT
3465 * Try to re-route interrupts. Sometimes the BIOS or
3466 * firmware may leave bogus values in these registers.
3467 * If the re-route fails, then just stick with what we
3470 pci_assign_interrupt(bus, dev, 1);
3472 pci_assign_interrupt(bus, dev, 0);
3476 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3477 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3478 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3479 xhci_early_takeover(dev);
3480 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3481 ehci_early_takeover(dev);
3482 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3483 ohci_early_takeover(dev);
3484 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3485 uhci_early_takeover(dev);
3488 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3490 * Reserve resources for secondary bus ranges behind bridge
3493 pci_reserve_secbus(bus, dev, cfg, rl);
3497 static struct pci_devinfo *
3498 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3499 int slot, int func, size_t dinfo_size)
3501 struct pci_devinfo *dinfo;
3503 dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size);
3505 pci_add_child(dev, dinfo);
3511 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3513 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3514 device_t pcib = device_get_parent(dev);
3515 struct pci_devinfo *dinfo;
3517 int s, f, pcifunchigh;
3522 * Try to detect a device at slot 0, function 0. If it exists, try to
3523 * enable ARI. We must enable ARI before detecting the rest of the
3524 * functions on this bus as ARI changes the set of slots and functions
3525 * that are legal on this bus.
3527 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0,
3529 if (dinfo != NULL && pci_enable_ari)
3530 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3533 * Start looking for new devices on slot 0 at function 1 because we
3534 * just identified the device at slot 0, function 0.
3538 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3539 ("dinfo_size too small"));
3540 maxslots = PCIB_MAXSLOTS(pcib);
3541 for (s = 0; s <= maxslots; s++, first_func = 0) {
3545 hdrtype = REG(PCIR_HDRTYPE, 1);
3546 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3548 if (hdrtype & PCIM_MFDEV)
3549 pcifunchigh = PCIB_MAXFUNCS(pcib);
3550 for (f = first_func; f <= pcifunchigh; f++)
3551 pci_identify_function(pcib, dev, domain, busno, s, f,
3559 pci_add_iov_child(device_t bus, device_t pf, size_t size, uint16_t rid,
3560 uint16_t vid, uint16_t did)
3562 struct pci_devinfo *pf_dinfo, *vf_dinfo;
3564 int busno, slot, func;
3566 pf_dinfo = device_get_ivars(pf);
3569 * Do a sanity check that we have been passed the correct size. If this
3570 * test fails then likely the pci subclass hasn't implemented the
3571 * pci_create_iov_child method like it's supposed it.
3573 if (size != pf_dinfo->cfg.devinfo_size) {
3575 "PCI subclass does not properly implement PCI_IOV\n");
3579 pcib = device_get_parent(bus);
3581 PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func);
3583 vf_dinfo = pci_fill_devinfo(pcib, pci_get_domain(pcib), busno, slot, func,
3586 vf_dinfo->cfg.flags |= PCICFG_VF;
3587 pci_add_child(bus, vf_dinfo);
3589 return (vf_dinfo->cfg.dev);
3593 pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid,
3594 uint16_t vid, uint16_t did)
3597 return (pci_add_iov_child(bus, pf, sizeof(struct pci_devinfo), rid, vid,
3603 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3605 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3606 device_set_ivars(dinfo->cfg.dev, dinfo);
3607 resource_list_init(&dinfo->resources);
3608 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3609 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3610 pci_print_verbose(dinfo);
3611 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3612 pci_child_added(dinfo->cfg.dev);
3616 pci_child_added_method(device_t dev, device_t child)
3622 pci_probe(device_t dev)
3625 device_set_desc(dev, "PCI bus");
3627 /* Allow other subclasses to override this driver. */
3628 return (BUS_PROBE_GENERIC);
3632 pci_attach_common(device_t dev)
3634 struct pci_softc *sc;
3636 #ifdef PCI_DMA_BOUNDARY
3637 int error, tag_valid;
3643 sc = device_get_softc(dev);
3644 domain = pcib_get_domain(dev);
3645 busno = pcib_get_bus(dev);
3648 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
3650 if (sc->sc_bus == NULL) {
3651 device_printf(dev, "failed to allocate bus number\n");
3656 device_printf(dev, "domain=%d, physical bus=%d\n",
3658 #ifdef PCI_DMA_BOUNDARY
3660 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3661 devclass_find("pci")) {
3662 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3663 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3664 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3665 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3667 device_printf(dev, "Failed to create DMA tag: %d\n",
3674 sc->sc_dma_tag = bus_get_dma_tag(dev);
3679 pci_attach(device_t dev)
3681 int busno, domain, error;
3683 error = pci_attach_common(dev);
3688 * Since there can be multiple independantly numbered PCI
3689 * busses on systems with multiple PCI domains, we can't use
3690 * the unit number to decide which bus we are probing. We ask
3691 * the parent pcib what our domain and bus numbers are.
3693 domain = pcib_get_domain(dev);
3694 busno = pcib_get_bus(dev);
3695 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3696 return (bus_generic_attach(dev));
3701 pci_detach(device_t dev)
3703 struct pci_softc *sc;
3706 error = bus_generic_detach(dev);
3709 sc = device_get_softc(dev);
3710 return (bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus));
3715 pci_set_power_child(device_t dev, device_t child, int state)
3717 struct pci_devinfo *dinfo;
3722 * Set the device to the given state. If the firmware suggests
3723 * a different power state, use it instead. If power management
3724 * is not present, the firmware is responsible for managing
3725 * device power. Skip children who aren't attached since they
3726 * are handled separately.
3728 pcib = device_get_parent(dev);
3729 dinfo = device_get_ivars(child);
3731 if (device_is_attached(child) &&
3732 PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0)
3733 pci_set_powerstate(child, dstate);
3737 pci_suspend_child(device_t dev, device_t child)
3739 struct pci_devinfo *dinfo;
3742 dinfo = device_get_ivars(child);
3745 * Save the PCI configuration space for the child and set the
3746 * device in the appropriate power state for this sleep state.
3748 pci_cfg_save(child, dinfo, 0);
3750 /* Suspend devices before potentially powering them down. */
3751 error = bus_generic_suspend_child(dev, child);
3756 if (pci_do_power_suspend)
3757 pci_set_power_child(dev, child, PCI_POWERSTATE_D3);
3763 pci_resume_child(device_t dev, device_t child)
3765 struct pci_devinfo *dinfo;
3767 if (pci_do_power_resume)
3768 pci_set_power_child(dev, child, PCI_POWERSTATE_D0);
3770 dinfo = device_get_ivars(child);
3771 pci_cfg_restore(child, dinfo);
3772 if (!device_is_attached(child))
3773 pci_cfg_save(child, dinfo, 1);
3775 bus_generic_resume_child(dev, child);
3781 pci_resume(device_t dev)
3783 device_t child, *devlist;
3784 int error, i, numdevs;
3786 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3790 * Resume critical devices first, then everything else later.
3792 for (i = 0; i < numdevs; i++) {
3794 switch (pci_get_class(child)) {
3798 case PCIC_BASEPERIPH:
3799 BUS_RESUME_CHILD(dev, child);
3803 for (i = 0; i < numdevs; i++) {
3805 switch (pci_get_class(child)) {
3809 case PCIC_BASEPERIPH:
3812 BUS_RESUME_CHILD(dev, child);
3815 free(devlist, M_TEMP);
3820 pci_load_vendor_data(void)
3826 data = preload_search_by_type("pci_vendor_data");
3828 ptr = preload_fetch_addr(data);
3829 sz = preload_fetch_size(data);
3830 if (ptr != NULL && sz != 0) {
3831 pci_vendordata = ptr;
3832 pci_vendordata_size = sz;
3833 /* terminate the database */
3834 pci_vendordata[pci_vendordata_size] = '\n';
3840 pci_driver_added(device_t dev, driver_t *driver)
3845 struct pci_devinfo *dinfo;
3849 device_printf(dev, "driver added\n");
3850 DEVICE_IDENTIFY(driver, dev);
3851 if (device_get_children(dev, &devlist, &numdevs) != 0)
3853 for (i = 0; i < numdevs; i++) {
3855 if (device_get_state(child) != DS_NOTPRESENT)
3857 dinfo = device_get_ivars(child);
3858 pci_print_verbose(dinfo);
3860 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3861 pci_cfg_restore(child, dinfo);
3862 if (device_probe_and_attach(child) != 0)
3863 pci_child_detached(dev, child);
3865 free(devlist, M_TEMP);
3869 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3870 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3872 struct pci_devinfo *dinfo;
3873 struct msix_table_entry *mte;
3874 struct msix_vector *mv;
3880 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3885 /* If this is not a direct child, just bail out. */
3886 if (device_get_parent(child) != dev) {
3891 rid = rman_get_rid(irq);
3893 /* Make sure that INTx is enabled */
3894 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3897 * Check to see if the interrupt is MSI or MSI-X.
3898 * Ask our parent to map the MSI and give
3899 * us the address and data register values.
3900 * If we fail for some reason, teardown the
3901 * interrupt handler.
3903 dinfo = device_get_ivars(child);
3904 if (dinfo->cfg.msi.msi_alloc > 0) {
3905 if (dinfo->cfg.msi.msi_addr == 0) {
3906 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3907 ("MSI has handlers, but vectors not mapped"));
3908 error = PCIB_MAP_MSI(device_get_parent(dev),
3909 child, rman_get_start(irq), &addr, &data);
3912 dinfo->cfg.msi.msi_addr = addr;
3913 dinfo->cfg.msi.msi_data = data;
3915 if (dinfo->cfg.msi.msi_handlers == 0)
3916 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3917 dinfo->cfg.msi.msi_data);
3918 dinfo->cfg.msi.msi_handlers++;
3920 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3921 ("No MSI or MSI-X interrupts allocated"));
3922 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3923 ("MSI-X index too high"));
3924 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3925 KASSERT(mte->mte_vector != 0, ("no message vector"));
3926 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3927 KASSERT(mv->mv_irq == rman_get_start(irq),
3929 if (mv->mv_address == 0) {
3930 KASSERT(mte->mte_handlers == 0,
3931 ("MSI-X table entry has handlers, but vector not mapped"));
3932 error = PCIB_MAP_MSI(device_get_parent(dev),
3933 child, rman_get_start(irq), &addr, &data);
3936 mv->mv_address = addr;
3939 if (mte->mte_handlers == 0) {
3940 pci_enable_msix(child, rid - 1, mv->mv_address,
3942 pci_unmask_msix(child, rid - 1);
3944 mte->mte_handlers++;
3948 * Make sure that INTx is disabled if we are using MSI/MSI-X,
3949 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
3950 * in which case we "enable" INTx so MSI/MSI-X actually works.
3952 if (!pci_has_quirk(pci_get_devid(child),
3953 PCI_QUIRK_MSI_INTX_BUG))
3954 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3956 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3959 (void)bus_generic_teardown_intr(dev, child, irq,
3969 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3972 struct msix_table_entry *mte;
3973 struct resource_list_entry *rle;
3974 struct pci_devinfo *dinfo;
3977 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3980 /* If this isn't a direct child, just bail out */
3981 if (device_get_parent(child) != dev)
3982 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3984 rid = rman_get_rid(irq);
3987 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3990 * Check to see if the interrupt is MSI or MSI-X. If so,
3991 * decrement the appropriate handlers count and mask the
3992 * MSI-X message, or disable MSI messages if the count
3995 dinfo = device_get_ivars(child);
3996 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3997 if (rle->res != irq)
3999 if (dinfo->cfg.msi.msi_alloc > 0) {
4000 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
4001 ("MSI-X index too high"));
4002 if (dinfo->cfg.msi.msi_handlers == 0)
4004 dinfo->cfg.msi.msi_handlers--;
4005 if (dinfo->cfg.msi.msi_handlers == 0)
4006 pci_disable_msi(child);
4008 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4009 ("No MSI or MSI-X interrupts allocated"));
4010 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4011 ("MSI-X index too high"));
4012 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4013 if (mte->mte_handlers == 0)
4015 mte->mte_handlers--;
4016 if (mte->mte_handlers == 0)
4017 pci_mask_msix(child, rid - 1);
4020 error = bus_generic_teardown_intr(dev, child, irq, cookie);
4023 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4028 pci_print_child(device_t dev, device_t child)
4030 struct pci_devinfo *dinfo;
4031 struct resource_list *rl;
4034 dinfo = device_get_ivars(child);
4035 rl = &dinfo->resources;
4037 retval += bus_print_child_header(dev, child);
4039 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
4040 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
4041 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
4042 if (device_get_flags(dev))
4043 retval += printf(" flags %#x", device_get_flags(dev));
4045 retval += printf(" at device %d.%d", pci_get_slot(child),
4046 pci_get_function(child));
4048 retval += bus_print_child_domain(dev, child);
4049 retval += bus_print_child_footer(dev, child);
4058 int report; /* 0 = bootverbose, 1 = always */
4060 } pci_nomatch_tab[] = {
4061 {PCIC_OLD, -1, 1, "old"},
4062 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4063 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4064 {PCIC_STORAGE, -1, 1, "mass storage"},
4065 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4066 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4067 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4068 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4069 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4070 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4071 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4072 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4073 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4074 {PCIC_NETWORK, -1, 1, "network"},
4075 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4076 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4077 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4078 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4079 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4080 {PCIC_DISPLAY, -1, 1, "display"},
4081 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4082 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4083 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4084 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4085 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
4086 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
4087 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4088 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
4089 {PCIC_MEMORY, -1, 1, "memory"},
4090 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4091 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4092 {PCIC_BRIDGE, -1, 1, "bridge"},
4093 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4094 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4095 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4096 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4097 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4098 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4099 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4100 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4101 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4102 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4103 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4104 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4105 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4106 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4107 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4108 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4109 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4110 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
4111 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4112 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4113 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4114 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
4115 {PCIC_INPUTDEV, -1, 1, "input device"},
4116 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
4117 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4118 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
4119 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
4120 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
4121 {PCIC_DOCKING, -1, 1, "docking station"},
4122 {PCIC_PROCESSOR, -1, 1, "processor"},
4123 {PCIC_SERIALBUS, -1, 1, "serial bus"},
4124 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
4125 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
4126 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
4127 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
4128 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
4129 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
4130 {PCIC_WIRELESS, -1, 1, "wireless controller"},
4131 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
4132 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
4133 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
4134 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
4135 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
4136 {PCIC_SATCOM, -1, 1, "satellite communication"},
4137 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
4138 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
4139 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
4140 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
4141 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
4142 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
4143 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
4144 {PCIC_DASP, -1, 0, "dasp"},
4145 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
4150 pci_probe_nomatch(device_t dev, device_t child)
4153 const char *cp, *scp;
4157 * Look for a listing for this device in a loaded device database.
4160 if ((device = pci_describe_device(child)) != NULL) {
4161 device_printf(dev, "<%s>", device);
4162 free(device, M_DEVBUF);
4165 * Scan the class/subclass descriptions for a general
4170 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4171 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4172 if (pci_nomatch_tab[i].subclass == -1) {
4173 cp = pci_nomatch_tab[i].desc;
4174 report = pci_nomatch_tab[i].report;
4175 } else if (pci_nomatch_tab[i].subclass ==
4176 pci_get_subclass(child)) {
4177 scp = pci_nomatch_tab[i].desc;
4178 report = pci_nomatch_tab[i].report;
4182 if (report || bootverbose) {
4183 device_printf(dev, "<%s%s%s>",
4185 ((cp != NULL) && (scp != NULL)) ? ", " : "",
4189 if (report || bootverbose) {
4190 printf(" at device %d.%d (no driver attached)\n",
4191 pci_get_slot(child), pci_get_function(child));
4193 pci_cfg_save(child, device_get_ivars(child), 1);
4197 pci_child_detached(device_t dev, device_t child)
4199 struct pci_devinfo *dinfo;
4200 struct resource_list *rl;
4202 dinfo = device_get_ivars(child);
4203 rl = &dinfo->resources;
4206 * Have to deallocate IRQs before releasing any MSI messages and
4207 * have to release MSI messages before deallocating any memory
4210 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4211 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4212 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4213 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4214 (void)pci_release_msi(child);
4216 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4217 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4218 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4219 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4221 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4222 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4225 pci_cfg_save(child, dinfo, 1);
4229 * Parse the PCI device database, if loaded, and return a pointer to a
4230 * description of the device.
4232 * The database is flat text formatted as follows:
4234 * Any line not in a valid format is ignored.
4235 * Lines are terminated with newline '\n' characters.
4237 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4240 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4241 * - devices cannot be listed without a corresponding VENDOR line.
4242 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4243 * another TAB, then the device name.
4247 * Assuming (ptr) points to the beginning of a line in the database,
4248 * return the vendor or device and description of the next entry.
4249 * The value of (vendor) or (device) inappropriate for the entry type
4250 * is set to -1. Returns nonzero at the end of the database.
4252 * Note that this is slightly unrobust in the face of corrupt data;
4253 * we attempt to safeguard against this by spamming the end of the
4254 * database with a newline when we initialise.
4257 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4266 left = pci_vendordata_size - (cp - pci_vendordata);
4274 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4278 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4281 /* skip to next line */
4282 while (*cp != '\n' && left > 0) {
4291 /* skip to next line */
4292 while (*cp != '\n' && left > 0) {
4296 if (*cp == '\n' && left > 0)
4303 pci_describe_device(device_t dev)
4306 char *desc, *vp, *dp, *line;
4308 desc = vp = dp = NULL;
4311 * If we have no vendor data, we can't do anything.
4313 if (pci_vendordata == NULL)
4317 * Scan the vendor data looking for this device
4319 line = pci_vendordata;
4320 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4323 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4325 if (vendor == pci_get_vendor(dev))
4328 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4331 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4339 if (device == pci_get_device(dev))
4343 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4344 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4346 sprintf(desc, "%s, %s", vp, dp);
4356 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4358 struct pci_devinfo *dinfo;
4361 dinfo = device_get_ivars(child);
4365 case PCI_IVAR_ETHADDR:
4367 * The generic accessor doesn't deal with failure, so
4368 * we set the return value, then return an error.
4370 *((uint8_t **) result) = NULL;
4372 case PCI_IVAR_SUBVENDOR:
4373 *result = cfg->subvendor;
4375 case PCI_IVAR_SUBDEVICE:
4376 *result = cfg->subdevice;
4378 case PCI_IVAR_VENDOR:
4379 *result = cfg->vendor;
4381 case PCI_IVAR_DEVICE:
4382 *result = cfg->device;
4384 case PCI_IVAR_DEVID:
4385 *result = (cfg->device << 16) | cfg->vendor;
4387 case PCI_IVAR_CLASS:
4388 *result = cfg->baseclass;
4390 case PCI_IVAR_SUBCLASS:
4391 *result = cfg->subclass;
4393 case PCI_IVAR_PROGIF:
4394 *result = cfg->progif;
4396 case PCI_IVAR_REVID:
4397 *result = cfg->revid;
4399 case PCI_IVAR_INTPIN:
4400 *result = cfg->intpin;
4403 *result = cfg->intline;
4405 case PCI_IVAR_DOMAIN:
4406 *result = cfg->domain;
4412 *result = cfg->slot;
4414 case PCI_IVAR_FUNCTION:
4415 *result = cfg->func;
4417 case PCI_IVAR_CMDREG:
4418 *result = cfg->cmdreg;
4420 case PCI_IVAR_CACHELNSZ:
4421 *result = cfg->cachelnsz;
4423 case PCI_IVAR_MINGNT:
4424 *result = cfg->mingnt;
4426 case PCI_IVAR_MAXLAT:
4427 *result = cfg->maxlat;
4429 case PCI_IVAR_LATTIMER:
4430 *result = cfg->lattimer;
4439 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4441 struct pci_devinfo *dinfo;
4443 dinfo = device_get_ivars(child);
4446 case PCI_IVAR_INTPIN:
4447 dinfo->cfg.intpin = value;
4449 case PCI_IVAR_ETHADDR:
4450 case PCI_IVAR_SUBVENDOR:
4451 case PCI_IVAR_SUBDEVICE:
4452 case PCI_IVAR_VENDOR:
4453 case PCI_IVAR_DEVICE:
4454 case PCI_IVAR_DEVID:
4455 case PCI_IVAR_CLASS:
4456 case PCI_IVAR_SUBCLASS:
4457 case PCI_IVAR_PROGIF:
4458 case PCI_IVAR_REVID:
4460 case PCI_IVAR_DOMAIN:
4463 case PCI_IVAR_FUNCTION:
4464 return (EINVAL); /* disallow for now */
4471 #include "opt_ddb.h"
4473 #include <ddb/ddb.h>
4474 #include <sys/cons.h>
4477 * List resources based on pci map registers, used for within ddb
4480 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4482 struct pci_devinfo *dinfo;
4483 struct devlist *devlist_head;
4486 int i, error, none_count;
4489 /* get the head of the device queue */
4490 devlist_head = &pci_devq;
4493 * Go through the list of devices and print out devices
4495 for (error = 0, i = 0,
4496 dinfo = STAILQ_FIRST(devlist_head);
4497 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4498 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4500 /* Populate pd_name and pd_unit */
4503 name = device_get_name(dinfo->cfg.dev);
4506 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4507 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4508 (name && *name) ? name : "none",
4509 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4511 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4512 p->pc_sel.pc_func, (p->pc_class << 16) |
4513 (p->pc_subclass << 8) | p->pc_progif,
4514 (p->pc_subdevice << 16) | p->pc_subvendor,
4515 (p->pc_device << 16) | p->pc_vendor,
4516 p->pc_revid, p->pc_hdr);
4521 static struct resource *
4522 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4523 u_long start, u_long end, u_long count, u_int num, u_int flags)
4525 struct pci_devinfo *dinfo = device_get_ivars(child);
4526 struct resource_list *rl = &dinfo->resources;
4527 struct resource *res;
4529 pci_addr_t map, testval;
4533 pm = pci_find_bar(child, *rid);
4535 /* This is a BAR that we failed to allocate earlier. */
4536 mapsize = pm->pm_size;
4540 * Weed out the bogons, and figure out how large the
4541 * BAR/map is. BARs that read back 0 here are bogus
4542 * and unimplemented. Note: atapci in legacy mode are
4543 * special and handled elsewhere in the code. If you
4544 * have a atapci device in legacy mode and it fails
4545 * here, that other code is broken.
4547 pci_read_bar(child, *rid, &map, &testval, NULL);
4550 * Determine the size of the BAR and ignore BARs with a size
4551 * of 0. Device ROM BARs use a different mask value.
4553 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4554 mapsize = pci_romsize(testval);
4556 mapsize = pci_mapsize(testval);
4559 pm = pci_add_bar(child, *rid, map, mapsize);
4562 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4563 if (type != SYS_RES_MEMORY) {
4566 "child %s requested type %d for rid %#x,"
4567 " but the BAR says it is an memio\n",
4568 device_get_nameunit(child), type, *rid);
4572 if (type != SYS_RES_IOPORT) {
4575 "child %s requested type %d for rid %#x,"
4576 " but the BAR says it is an ioport\n",
4577 device_get_nameunit(child), type, *rid);
4583 * For real BARs, we need to override the size that
4584 * the driver requests, because that's what the BAR
4585 * actually uses and we would otherwise have a
4586 * situation where we might allocate the excess to
4587 * another driver, which won't work.
4589 count = ((pci_addr_t)1 << mapsize) * num;
4590 if (RF_ALIGNMENT(flags) < mapsize)
4591 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4592 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4593 flags |= RF_PREFETCHABLE;
4596 * Allocate enough resource, and then write back the
4597 * appropriate BAR for that resource.
4599 resource_list_add(rl, type, *rid, start, end, count);
4600 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
4601 count, flags & ~RF_ACTIVE);
4603 resource_list_delete(rl, type, *rid);
4604 device_printf(child,
4605 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4606 count, *rid, type, start, end);
4610 device_printf(child,
4611 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4612 count, *rid, type, rman_get_start(res));
4613 map = rman_get_start(res);
4614 pci_write_bar(child, pm, map);
4620 pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid,
4621 u_long start, u_long end, u_long count, u_long num, u_int flags)
4623 struct pci_devinfo *dinfo;
4624 struct resource_list *rl;
4625 struct resource_list_entry *rle;
4626 struct resource *res;
4630 * Perform lazy resource allocation
4632 dinfo = device_get_ivars(child);
4633 rl = &dinfo->resources;
4636 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
4638 return (pci_alloc_secbus(dev, child, rid, start, end, count,
4643 * Can't alloc legacy interrupt once MSI messages have
4646 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4647 cfg->msix.msix_alloc > 0))
4651 * If the child device doesn't have an interrupt
4652 * routed and is deserving of an interrupt, try to
4655 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4657 pci_assign_interrupt(dev, child, 0);
4659 case SYS_RES_IOPORT:
4660 case SYS_RES_MEMORY:
4663 * PCI-PCI bridge I/O window resources are not BARs.
4664 * For those allocations just pass the request up the
4667 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4669 case PCIR_IOBASEL_1:
4670 case PCIR_MEMBASE_1:
4671 case PCIR_PMBASEL_1:
4673 * XXX: Should we bother creating a resource
4676 return (bus_generic_alloc_resource(dev, child,
4677 type, rid, start, end, count, flags));
4681 /* Reserve resources for this BAR if needed. */
4682 rle = resource_list_find(rl, type, *rid);
4684 res = pci_reserve_map(dev, child, type, rid, start, end,
4690 return (resource_list_alloc(rl, dev, child, type, rid,
4691 start, end, count, flags));
4695 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4696 u_long start, u_long end, u_long count, u_int flags)
4699 if (device_get_parent(child) != dev)
4700 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4701 type, rid, start, end, count, flags));
4703 return (pci_alloc_multi_resource(dev, child, type, rid, start, end,
4708 pci_release_resource(device_t dev, device_t child, int type, int rid,
4711 struct pci_devinfo *dinfo;
4712 struct resource_list *rl;
4715 if (device_get_parent(child) != dev)
4716 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4719 dinfo = device_get_ivars(child);
4723 * PCI-PCI bridge I/O window resources are not BARs. For
4724 * those allocations just pass the request up the tree.
4726 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4727 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4729 case PCIR_IOBASEL_1:
4730 case PCIR_MEMBASE_1:
4731 case PCIR_PMBASEL_1:
4732 return (bus_generic_release_resource(dev, child, type,
4738 rl = &dinfo->resources;
4739 return (resource_list_release(rl, dev, child, type, rid, r));
4743 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4746 struct pci_devinfo *dinfo;
4749 error = bus_generic_activate_resource(dev, child, type, rid, r);
4753 /* Enable decoding in the command register when activating BARs. */
4754 if (device_get_parent(child) == dev) {
4755 /* Device ROMs need their decoding explicitly enabled. */
4756 dinfo = device_get_ivars(child);
4757 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4758 pci_write_bar(child, pci_find_bar(child, rid),
4759 rman_get_start(r) | PCIM_BIOS_ENABLE);
4761 case SYS_RES_IOPORT:
4762 case SYS_RES_MEMORY:
4763 error = PCI_ENABLE_IO(dev, child, type);
4771 pci_deactivate_resource(device_t dev, device_t child, int type,
4772 int rid, struct resource *r)
4774 struct pci_devinfo *dinfo;
4777 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4781 /* Disable decoding for device ROMs. */
4782 if (device_get_parent(child) == dev) {
4783 dinfo = device_get_ivars(child);
4784 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4785 pci_write_bar(child, pci_find_bar(child, rid),
4792 pci_delete_child(device_t dev, device_t child)
4794 struct resource_list_entry *rle;
4795 struct resource_list *rl;
4796 struct pci_devinfo *dinfo;
4798 dinfo = device_get_ivars(child);
4799 rl = &dinfo->resources;
4801 if (device_is_attached(child))
4802 device_detach(child);
4804 /* Turn off access to resources we're about to free */
4805 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4806 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4808 /* Free all allocated resources */
4809 STAILQ_FOREACH(rle, rl, link) {
4811 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4812 resource_list_busy(rl, rle->type, rle->rid)) {
4813 pci_printf(&dinfo->cfg,
4814 "Resource still owned, oops. "
4815 "(type=%d, rid=%d, addr=%lx)\n",
4816 rle->type, rle->rid,
4817 rman_get_start(rle->res));
4818 bus_release_resource(child, rle->type, rle->rid,
4821 resource_list_unreserve(rl, dev, child, rle->type,
4825 resource_list_free(rl);
4827 device_delete_child(dev, child);
4832 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4834 struct pci_devinfo *dinfo;
4835 struct resource_list *rl;
4836 struct resource_list_entry *rle;
4838 if (device_get_parent(child) != dev)
4841 dinfo = device_get_ivars(child);
4842 rl = &dinfo->resources;
4843 rle = resource_list_find(rl, type, rid);
4848 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4849 resource_list_busy(rl, type, rid)) {
4850 device_printf(dev, "delete_resource: "
4851 "Resource still owned by child, oops. "
4852 "(type=%d, rid=%d, addr=%lx)\n",
4853 type, rid, rman_get_start(rle->res));
4856 resource_list_unreserve(rl, dev, child, type, rid);
4858 resource_list_delete(rl, type, rid);
4861 struct resource_list *
4862 pci_get_resource_list (device_t dev, device_t child)
4864 struct pci_devinfo *dinfo = device_get_ivars(child);
4866 return (&dinfo->resources);
4870 pci_get_dma_tag(device_t bus, device_t dev)
4872 struct pci_softc *sc = device_get_softc(bus);
4874 return (sc->sc_dma_tag);
4878 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4880 struct pci_devinfo *dinfo = device_get_ivars(child);
4881 pcicfgregs *cfg = &dinfo->cfg;
4883 return (PCIB_READ_CONFIG(device_get_parent(dev),
4884 cfg->bus, cfg->slot, cfg->func, reg, width));
4888 pci_write_config_method(device_t dev, device_t child, int reg,
4889 uint32_t val, int width)
4891 struct pci_devinfo *dinfo = device_get_ivars(child);
4892 pcicfgregs *cfg = &dinfo->cfg;
4894 PCIB_WRITE_CONFIG(device_get_parent(dev),
4895 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4899 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4903 snprintf(buf, buflen, "pci%d:%d:%d:%d", pci_get_domain(child),
4904 pci_get_bus(child), pci_get_slot(child), pci_get_function(child));
4909 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4912 struct pci_devinfo *dinfo;
4915 dinfo = device_get_ivars(child);
4917 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4918 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4919 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4925 pci_assign_interrupt_method(device_t dev, device_t child)
4927 struct pci_devinfo *dinfo = device_get_ivars(child);
4928 pcicfgregs *cfg = &dinfo->cfg;
4930 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4935 pci_lookup(void *arg, const char *name, device_t *dev)
4939 int domain, bus, slot, func;
4945 * Accept pciconf-style selectors of either pciD:B:S:F or
4946 * pciB:S:F. In the latter case, the domain is assumed to
4949 if (strncmp(name, "pci", 3) != 0)
4951 val = strtol(name + 3, &end, 10);
4952 if (val < 0 || val > INT_MAX || *end != ':')
4955 val = strtol(end + 1, &end, 10);
4956 if (val < 0 || val > INT_MAX || *end != ':')
4959 val = strtol(end + 1, &end, 10);
4960 if (val < 0 || val > INT_MAX)
4964 val = strtol(end + 1, &end, 10);
4965 if (val < 0 || val > INT_MAX || *end != '\0')
4968 } else if (*end == '\0') {
4976 if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX ||
4977 func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX))
4980 *dev = pci_find_dbsf(domain, bus, slot, func);
4984 pci_modevent(module_t mod, int what, void *arg)
4986 static struct cdev *pci_cdev;
4987 static eventhandler_tag tag;
4991 STAILQ_INIT(&pci_devq);
4993 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4995 pci_load_vendor_data();
4996 tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL,
5002 EVENTHANDLER_DEREGISTER(dev_lookup, tag);
5003 destroy_dev(pci_cdev);
5011 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
5013 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
5014 struct pcicfg_pcie *cfg;
5017 cfg = &dinfo->cfg.pcie;
5018 pos = cfg->pcie_location;
5020 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5022 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
5024 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5025 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5026 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5027 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
5029 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5030 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5031 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5032 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
5034 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5035 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5036 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
5039 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
5040 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
5041 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
5047 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
5049 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
5050 dinfo->cfg.pcix.pcix_command, 2);
5054 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
5058 * Only do header type 0 devices. Type 1 devices are bridges,
5059 * which we know need special treatment. Type 2 devices are
5060 * cardbus bridges which also require special treatment.
5061 * Other types are unknown, and we err on the side of safety
5064 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5068 * Restore the device to full power mode. We must do this
5069 * before we restore the registers because moving from D3 to
5070 * D0 will cause the chip's BARs and some other registers to
5071 * be reset to some unknown power on reset values. Cut down
5072 * the noise on boot by doing nothing if we are already in
5075 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
5076 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5077 pci_restore_bars(dev);
5078 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
5079 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
5080 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
5081 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
5082 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
5083 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
5084 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
5085 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
5086 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
5089 * Restore extended capabilities for PCI-Express and PCI-X
5091 if (dinfo->cfg.pcie.pcie_location != 0)
5092 pci_cfg_restore_pcie(dev, dinfo);
5093 if (dinfo->cfg.pcix.pcix_location != 0)
5094 pci_cfg_restore_pcix(dev, dinfo);
5096 /* Restore MSI and MSI-X configurations if they are present. */
5097 if (dinfo->cfg.msi.msi_location != 0)
5098 pci_resume_msi(dev);
5099 if (dinfo->cfg.msix.msix_location != 0)
5100 pci_resume_msix(dev);
5104 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
5106 #define RREG(n) pci_read_config(dev, pos + (n), 2)
5107 struct pcicfg_pcie *cfg;
5110 cfg = &dinfo->cfg.pcie;
5111 pos = cfg->pcie_location;
5113 cfg->pcie_flags = RREG(PCIER_FLAGS);
5115 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5117 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
5119 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5120 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5121 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5122 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
5124 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5125 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5126 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5127 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
5129 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5130 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5131 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
5134 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
5135 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
5136 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
5142 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5144 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5145 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5149 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5155 * Only do header type 0 devices. Type 1 devices are bridges, which
5156 * we know need special treatment. Type 2 devices are cardbus bridges
5157 * which also require special treatment. Other types are unknown, and
5158 * we err on the side of safety by ignoring them. Powering down
5159 * bridges should not be undertaken lightly.
5161 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5165 * Some drivers apparently write to these registers w/o updating our
5166 * cached copy. No harm happens if we update the copy, so do so here
5167 * so we can restore them. The COMMAND register is modified by the
5168 * bus w/o updating the cache. This should represent the normally
5169 * writable portion of the 'defined' part of type 0 headers. In
5170 * theory we also need to save/restore the PCI capability structures
5171 * we know about, but apart from power we don't know any that are
5174 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5175 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5176 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5177 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5178 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5179 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5180 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5181 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5182 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5183 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5184 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5185 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5186 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5187 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5188 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5190 if (dinfo->cfg.pcie.pcie_location != 0)
5191 pci_cfg_save_pcie(dev, dinfo);
5193 if (dinfo->cfg.pcix.pcix_location != 0)
5194 pci_cfg_save_pcix(dev, dinfo);
5197 * don't set the state for display devices, base peripherals and
5198 * memory devices since bad things happen when they are powered down.
5199 * We should (a) have drivers that can easily detach and (b) use
5200 * generic drivers for these devices so that some device actually
5201 * attaches. We need to make sure that when we implement (a) we don't
5202 * power the device down on a reattach.
5204 cls = pci_get_class(dev);
5207 switch (pci_do_power_nodriver)
5209 case 0: /* NO powerdown at all */
5211 case 1: /* Conservative about what to power down */
5212 if (cls == PCIC_STORAGE)
5215 case 2: /* Agressive about what to power down */
5216 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5217 cls == PCIC_BASEPERIPH)
5220 case 3: /* Power down everything */
5224 * PCI spec says we can only go into D3 state from D0 state.
5225 * Transition from D[12] into D0 before going to D3 state.
5227 ps = pci_get_powerstate(dev);
5228 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5229 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5230 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5231 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5234 /* Wrapper APIs suitable for device driver use. */
5236 pci_save_state(device_t dev)
5238 struct pci_devinfo *dinfo;
5240 dinfo = device_get_ivars(dev);
5241 pci_cfg_save(dev, dinfo, 0);
5245 pci_restore_state(device_t dev)
5247 struct pci_devinfo *dinfo;
5249 dinfo = device_get_ivars(dev);
5250 pci_cfg_restore(dev, dinfo);
5254 pci_get_rid_method(device_t dev, device_t child)
5257 return (PCIB_GET_RID(device_get_parent(dev), child));