2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/xhcireg.h>
66 #include <dev/usb/controller/ehcireg.h>
67 #include <dev/usb/controller/ohcireg.h>
68 #include <dev/usb/controller/uhcireg.h>
73 #define PCIR_IS_BIOS(cfg, reg) \
74 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
75 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
77 static int pci_has_quirk(uint32_t devid, int quirk);
78 static pci_addr_t pci_mapbase(uint64_t mapreg);
79 static const char *pci_maptype(uint64_t mapreg);
80 static int pci_mapsize(uint64_t testval);
81 static int pci_maprange(uint64_t mapreg);
82 static pci_addr_t pci_rombase(uint64_t mapreg);
83 static int pci_romsize(uint64_t testval);
84 static void pci_fixancient(pcicfgregs *cfg);
85 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
87 static int pci_porten(device_t dev);
88 static int pci_memen(device_t dev);
89 static void pci_assign_interrupt(device_t bus, device_t dev,
91 static int pci_add_map(device_t bus, device_t dev, int reg,
92 struct resource_list *rl, int force, int prefetch);
93 static int pci_probe(device_t dev);
94 static int pci_attach(device_t dev);
96 static int pci_detach(device_t dev);
98 static void pci_load_vendor_data(void);
99 static int pci_describe_parse_line(char **ptr, int *vendor,
100 int *device, char **desc);
101 static char *pci_describe_device(device_t dev);
102 static int pci_modevent(module_t mod, int what, void *arg);
103 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
105 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
106 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
107 int reg, uint32_t *data);
109 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
110 int reg, uint32_t data);
112 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
113 static void pci_mask_msix(device_t dev, u_int index);
114 static void pci_unmask_msix(device_t dev, u_int index);
115 static int pci_msi_blacklisted(void);
116 static int pci_msix_blacklisted(void);
117 static void pci_resume_msi(device_t dev);
118 static void pci_resume_msix(device_t dev);
119 static int pci_remap_intr_method(device_t bus, device_t dev,
122 static uint16_t pci_get_rid_method(device_t dev, device_t child);
124 static device_method_t pci_methods[] = {
125 /* Device interface */
126 DEVMETHOD(device_probe, pci_probe),
127 DEVMETHOD(device_attach, pci_attach),
129 DEVMETHOD(device_detach, pci_detach),
131 DEVMETHOD(device_detach, bus_generic_detach),
133 DEVMETHOD(device_shutdown, bus_generic_shutdown),
134 DEVMETHOD(device_suspend, pci_suspend),
135 DEVMETHOD(device_resume, pci_resume),
138 DEVMETHOD(bus_print_child, pci_print_child),
139 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
140 DEVMETHOD(bus_read_ivar, pci_read_ivar),
141 DEVMETHOD(bus_write_ivar, pci_write_ivar),
142 DEVMETHOD(bus_driver_added, pci_driver_added),
143 DEVMETHOD(bus_setup_intr, pci_setup_intr),
144 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
146 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
147 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
148 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
149 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
150 DEVMETHOD(bus_delete_resource, pci_delete_resource),
151 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
152 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
153 DEVMETHOD(bus_release_resource, pci_release_resource),
154 DEVMETHOD(bus_activate_resource, pci_activate_resource),
155 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
156 DEVMETHOD(bus_child_detached, pci_child_detached),
157 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
158 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
159 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
162 DEVMETHOD(pci_read_config, pci_read_config_method),
163 DEVMETHOD(pci_write_config, pci_write_config_method),
164 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
165 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
166 DEVMETHOD(pci_enable_io, pci_enable_io_method),
167 DEVMETHOD(pci_disable_io, pci_disable_io_method),
168 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
169 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
170 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
171 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
172 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
173 DEVMETHOD(pci_find_cap, pci_find_cap_method),
174 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
175 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
176 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
177 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
178 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
179 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
180 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
181 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
182 DEVMETHOD(pci_release_msi, pci_release_msi_method),
183 DEVMETHOD(pci_msi_count, pci_msi_count_method),
184 DEVMETHOD(pci_msix_count, pci_msix_count_method),
185 DEVMETHOD(pci_get_rid, pci_get_rid_method),
186 DEVMETHOD(pci_child_added, pci_child_added_method),
191 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
193 static devclass_t pci_devclass;
194 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
195 MODULE_VERSION(pci, 1);
197 static char *pci_vendordata;
198 static size_t pci_vendordata_size;
201 uint32_t devid; /* Vendor/device of the card */
203 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
204 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
205 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
206 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
207 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
208 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
213 static const struct pci_quirk pci_quirks[] = {
214 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
215 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
216 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
217 /* As does the Serverworks OSB4 (the SMBus mapping register) */
218 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
221 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
222 * or the CMIC-SL (AKA ServerWorks GC_LE).
224 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
225 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
228 * MSI doesn't work on earlier Intel chipsets including
229 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
231 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
233 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
234 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
243 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
246 * MSI-X allocation doesn't work properly for devices passed through
247 * by VMware up to at least ESXi 5.1.
249 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
250 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
253 * Some virtualization environments emulate an older chipset
254 * but support MSI just fine. QEMU uses the Intel 82440.
256 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
259 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
260 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
261 * It prevents us from attaching hpet(4) when the bit is unset.
262 * Note this quirk only affects SB600 revision A13 and earlier.
263 * For SB600 A21 and later, firmware must set the bit to hide it.
264 * For SB700 and later, it is unused and hardcoded to zero.
266 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
269 * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
270 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
271 * command register is set.
273 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
274 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
275 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
278 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
279 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
281 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
282 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
283 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
284 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
285 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
286 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
291 /* map register information */
292 #define PCI_MAPMEM 0x01 /* memory map */
293 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
294 #define PCI_MAPPORT 0x04 /* port map */
296 struct devlist pci_devq;
297 uint32_t pci_generation;
298 uint32_t pci_numdevs = 0;
299 static int pcie_chipset, pcix_chipset;
302 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
304 static int pci_enable_io_modes = 1;
305 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
306 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
307 &pci_enable_io_modes, 1,
308 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
309 enable these bits correctly. We'd like to do this all the time, but there\n\
310 are some peripherals that this causes problems with.");
312 static int pci_do_realloc_bars = 0;
313 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
314 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
315 &pci_do_realloc_bars, 0,
316 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
318 static int pci_do_power_nodriver = 0;
319 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
320 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
321 &pci_do_power_nodriver, 0,
322 "Place a function into D3 state when no driver attaches to it. 0 means\n\
323 disable. 1 means conservatively place devices into D3 state. 2 means\n\
324 agressively place devices into D3 state. 3 means put absolutely everything\n\
327 int pci_do_power_resume = 1;
328 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
329 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
330 &pci_do_power_resume, 1,
331 "Transition from D3 -> D0 on resume.");
333 int pci_do_power_suspend = 1;
334 TUNABLE_INT("hw.pci.do_power_suspend", &pci_do_power_suspend);
335 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RW,
336 &pci_do_power_suspend, 1,
337 "Transition from D0 -> D3 on suspend.");
339 static int pci_do_msi = 1;
340 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
341 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
342 "Enable support for MSI interrupts");
344 static int pci_do_msix = 1;
345 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
346 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
347 "Enable support for MSI-X interrupts");
349 static int pci_honor_msi_blacklist = 1;
350 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
351 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
352 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
354 #if defined(__i386__) || defined(__amd64__)
355 static int pci_usb_takeover = 1;
357 static int pci_usb_takeover = 0;
359 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
360 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
361 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
362 Disable this if you depend on BIOS emulation of USB devices, that is\n\
363 you use USB devices (like keyboard or mouse) but do not load USB drivers");
365 static int pci_clear_bars;
366 TUNABLE_INT("hw.pci.clear_bars", &pci_clear_bars);
367 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
368 "Ignore firmware-assigned resources for BARs.");
370 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
371 static int pci_clear_buses;
372 TUNABLE_INT("hw.pci.clear_buses", &pci_clear_buses);
373 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
374 "Ignore firmware-assigned bus numbers.");
377 static int pci_enable_ari = 1;
378 TUNABLE_INT("hw.pci.enable_ari", &pci_enable_ari);
379 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
380 0, "Enable support for PCIe Alternative RID Interpretation");
383 pci_has_quirk(uint32_t devid, int quirk)
385 const struct pci_quirk *q;
387 for (q = &pci_quirks[0]; q->devid; q++) {
388 if (q->devid == devid && q->type == quirk)
394 /* Find a device_t by bus/slot/function in domain 0 */
397 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
400 return (pci_find_dbsf(0, bus, slot, func));
403 /* Find a device_t by domain/bus/slot/function */
406 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
408 struct pci_devinfo *dinfo;
410 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
411 if ((dinfo->cfg.domain == domain) &&
412 (dinfo->cfg.bus == bus) &&
413 (dinfo->cfg.slot == slot) &&
414 (dinfo->cfg.func == func)) {
415 return (dinfo->cfg.dev);
422 /* Find a device_t by vendor/device ID */
425 pci_find_device(uint16_t vendor, uint16_t device)
427 struct pci_devinfo *dinfo;
429 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
430 if ((dinfo->cfg.vendor == vendor) &&
431 (dinfo->cfg.device == device)) {
432 return (dinfo->cfg.dev);
440 pci_find_class(uint8_t class, uint8_t subclass)
442 struct pci_devinfo *dinfo;
444 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
445 if (dinfo->cfg.baseclass == class &&
446 dinfo->cfg.subclass == subclass) {
447 return (dinfo->cfg.dev);
455 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
460 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
463 retval += vprintf(fmt, ap);
468 /* return base address of memory or port map */
471 pci_mapbase(uint64_t mapreg)
474 if (PCI_BAR_MEM(mapreg))
475 return (mapreg & PCIM_BAR_MEM_BASE);
477 return (mapreg & PCIM_BAR_IO_BASE);
480 /* return map type of memory or port map */
483 pci_maptype(uint64_t mapreg)
486 if (PCI_BAR_IO(mapreg))
488 if (mapreg & PCIM_BAR_MEM_PREFETCH)
489 return ("Prefetchable Memory");
493 /* return log2 of map size decoded for memory or port map */
496 pci_mapsize(uint64_t testval)
500 testval = pci_mapbase(testval);
503 while ((testval & 1) == 0)
512 /* return base address of device ROM */
515 pci_rombase(uint64_t mapreg)
518 return (mapreg & PCIM_BIOS_ADDR_MASK);
521 /* return log2 of map size decided for device ROM */
524 pci_romsize(uint64_t testval)
528 testval = pci_rombase(testval);
531 while ((testval & 1) == 0)
540 /* return log2 of address range supported by map register */
543 pci_maprange(uint64_t mapreg)
547 if (PCI_BAR_IO(mapreg))
550 switch (mapreg & PCIM_BAR_MEM_TYPE) {
551 case PCIM_BAR_MEM_32:
554 case PCIM_BAR_MEM_1MB:
557 case PCIM_BAR_MEM_64:
564 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
567 pci_fixancient(pcicfgregs *cfg)
569 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
572 /* PCI to PCI bridges use header type 1 */
573 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
574 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
577 /* extract header type specific config data */
580 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
582 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
583 switch (cfg->hdrtype & PCIM_HDRTYPE) {
584 case PCIM_HDRTYPE_NORMAL:
585 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
586 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
587 cfg->mingnt = REG(PCIR_MINGNT, 1);
588 cfg->maxlat = REG(PCIR_MAXLAT, 1);
589 cfg->nummaps = PCI_MAXMAPS_0;
591 case PCIM_HDRTYPE_BRIDGE:
592 cfg->nummaps = PCI_MAXMAPS_1;
594 case PCIM_HDRTYPE_CARDBUS:
595 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
596 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
597 cfg->nummaps = PCI_MAXMAPS_2;
603 /* read configuration header into pcicfgregs structure */
605 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
607 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
608 pcicfgregs *cfg = NULL;
609 struct pci_devinfo *devlist_entry;
610 struct devlist *devlist_head;
612 devlist_head = &pci_devq;
614 devlist_entry = NULL;
616 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
617 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
619 cfg = &devlist_entry->cfg;
625 cfg->vendor = REG(PCIR_VENDOR, 2);
626 cfg->device = REG(PCIR_DEVICE, 2);
627 cfg->cmdreg = REG(PCIR_COMMAND, 2);
628 cfg->statreg = REG(PCIR_STATUS, 2);
629 cfg->baseclass = REG(PCIR_CLASS, 1);
630 cfg->subclass = REG(PCIR_SUBCLASS, 1);
631 cfg->progif = REG(PCIR_PROGIF, 1);
632 cfg->revid = REG(PCIR_REVID, 1);
633 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
634 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
635 cfg->lattimer = REG(PCIR_LATTIMER, 1);
636 cfg->intpin = REG(PCIR_INTPIN, 1);
637 cfg->intline = REG(PCIR_INTLINE, 1);
639 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
640 cfg->hdrtype &= ~PCIM_MFDEV;
641 STAILQ_INIT(&cfg->maps);
644 pci_hdrtypedata(pcib, b, s, f, cfg);
646 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
647 pci_read_cap(pcib, cfg);
649 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
651 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
652 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
653 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
654 devlist_entry->conf.pc_sel.pc_func = cfg->func;
655 devlist_entry->conf.pc_hdr = cfg->hdrtype;
657 devlist_entry->conf.pc_subvendor = cfg->subvendor;
658 devlist_entry->conf.pc_subdevice = cfg->subdevice;
659 devlist_entry->conf.pc_vendor = cfg->vendor;
660 devlist_entry->conf.pc_device = cfg->device;
662 devlist_entry->conf.pc_class = cfg->baseclass;
663 devlist_entry->conf.pc_subclass = cfg->subclass;
664 devlist_entry->conf.pc_progif = cfg->progif;
665 devlist_entry->conf.pc_revid = cfg->revid;
670 return (devlist_entry);
675 pci_read_cap(device_t pcib, pcicfgregs *cfg)
677 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
678 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
679 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
683 int ptr, nextptr, ptrptr;
685 switch (cfg->hdrtype & PCIM_HDRTYPE) {
686 case PCIM_HDRTYPE_NORMAL:
687 case PCIM_HDRTYPE_BRIDGE:
688 ptrptr = PCIR_CAP_PTR;
690 case PCIM_HDRTYPE_CARDBUS:
691 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
694 return; /* no extended capabilities support */
696 nextptr = REG(ptrptr, 1); /* sanity check? */
699 * Read capability entries.
701 while (nextptr != 0) {
704 printf("illegal PCI extended capability offset %d\n",
708 /* Find the next entry */
710 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
712 /* Process this entry */
713 switch (REG(ptr + PCICAP_ID, 1)) {
714 case PCIY_PMG: /* PCI power management */
715 if (cfg->pp.pp_cap == 0) {
716 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
717 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
718 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
719 if ((nextptr - ptr) > PCIR_POWER_DATA)
720 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
723 case PCIY_HT: /* HyperTransport */
724 /* Determine HT-specific capability type. */
725 val = REG(ptr + PCIR_HT_COMMAND, 2);
727 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
728 cfg->ht.ht_slave = ptr;
730 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
731 switch (val & PCIM_HTCMD_CAP_MASK) {
732 case PCIM_HTCAP_MSI_MAPPING:
733 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
734 /* Sanity check the mapping window. */
735 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
738 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
740 if (addr != MSI_INTEL_ADDR_BASE)
742 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
743 cfg->domain, cfg->bus,
744 cfg->slot, cfg->func,
747 addr = MSI_INTEL_ADDR_BASE;
749 cfg->ht.ht_msimap = ptr;
750 cfg->ht.ht_msictrl = val;
751 cfg->ht.ht_msiaddr = addr;
756 case PCIY_MSI: /* PCI MSI */
757 cfg->msi.msi_location = ptr;
758 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
759 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
760 PCIM_MSICTRL_MMC_MASK)>>1);
762 case PCIY_MSIX: /* PCI MSI-X */
763 cfg->msix.msix_location = ptr;
764 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
765 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
766 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
767 val = REG(ptr + PCIR_MSIX_TABLE, 4);
768 cfg->msix.msix_table_bar = PCIR_BAR(val &
770 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
771 val = REG(ptr + PCIR_MSIX_PBA, 4);
772 cfg->msix.msix_pba_bar = PCIR_BAR(val &
774 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
776 case PCIY_VPD: /* PCI Vital Product Data */
777 cfg->vpd.vpd_reg = ptr;
780 /* Should always be true. */
781 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
782 PCIM_HDRTYPE_BRIDGE) {
783 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
784 cfg->subvendor = val & 0xffff;
785 cfg->subdevice = val >> 16;
788 case PCIY_PCIX: /* PCI-X */
790 * Assume we have a PCI-X chipset if we have
791 * at least one PCI-PCI bridge with a PCI-X
792 * capability. Note that some systems with
793 * PCI-express or HT chipsets might match on
794 * this check as well.
796 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
799 cfg->pcix.pcix_location = ptr;
801 case PCIY_EXPRESS: /* PCI-express */
803 * Assume we have a PCI-express chipset if we have
804 * at least one PCI-express device.
807 cfg->pcie.pcie_location = ptr;
808 val = REG(ptr + PCIER_FLAGS, 2);
809 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
816 #if defined(__powerpc__)
818 * Enable the MSI mapping window for all HyperTransport
819 * slaves. PCI-PCI bridges have their windows enabled via
822 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
823 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
825 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
826 cfg->domain, cfg->bus, cfg->slot, cfg->func);
827 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
828 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
832 /* REG and WREG use carry through to next functions */
836 * PCI Vital Product Data
839 #define PCI_VPD_TIMEOUT 1000000
842 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
844 int count = PCI_VPD_TIMEOUT;
846 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
848 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
850 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
853 DELAY(1); /* limit looping */
855 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
862 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
864 int count = PCI_VPD_TIMEOUT;
866 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
868 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
869 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
870 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
873 DELAY(1); /* limit looping */
880 #undef PCI_VPD_TIMEOUT
882 struct vpd_readstate {
892 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
897 if (vrs->bytesinval == 0) {
898 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
900 vrs->val = le32toh(reg);
902 byte = vrs->val & 0xff;
905 vrs->val = vrs->val >> 8;
906 byte = vrs->val & 0xff;
916 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
918 struct vpd_readstate vrs;
923 int alloc, off; /* alloc/off for RO/W arrays */
929 /* init vpd reader */
937 name = remain = i = 0; /* shut up stupid gcc */
938 alloc = off = 0; /* shut up stupid gcc */
939 dflen = 0; /* shut up stupid gcc */
942 if (vpd_nextbyte(&vrs, &byte)) {
947 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
948 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
949 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
952 case 0: /* item name */
954 if (vpd_nextbyte(&vrs, &byte2)) {
959 if (vpd_nextbyte(&vrs, &byte2)) {
963 remain |= byte2 << 8;
964 if (remain > (0x7f*4 - vrs.off)) {
967 "invalid VPD data, remain %#x\n",
973 name = (byte >> 3) & 0xf;
976 case 0x2: /* String */
977 cfg->vpd.vpd_ident = malloc(remain + 1,
985 case 0x10: /* VPD-R */
988 cfg->vpd.vpd_ros = malloc(alloc *
989 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
993 case 0x11: /* VPD-W */
996 cfg->vpd.vpd_w = malloc(alloc *
997 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1001 default: /* Invalid data, abort */
1007 case 1: /* Identifier String */
1008 cfg->vpd.vpd_ident[i++] = byte;
1011 cfg->vpd.vpd_ident[i] = '\0';
1016 case 2: /* VPD-R Keyword Header */
1018 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1019 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1020 M_DEVBUF, M_WAITOK | M_ZERO);
1022 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1023 if (vpd_nextbyte(&vrs, &byte2)) {
1027 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1028 if (vpd_nextbyte(&vrs, &byte2)) {
1032 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1034 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1037 * if this happens, we can't trust the rest
1040 pci_printf(cfg, "bad keyword length: %d\n",
1045 } else if (dflen == 0) {
1046 cfg->vpd.vpd_ros[off].value = malloc(1 *
1047 sizeof(*cfg->vpd.vpd_ros[off].value),
1048 M_DEVBUF, M_WAITOK);
1049 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1051 cfg->vpd.vpd_ros[off].value = malloc(
1053 sizeof(*cfg->vpd.vpd_ros[off].value),
1054 M_DEVBUF, M_WAITOK);
1057 /* keep in sync w/ state 3's transistions */
1058 if (dflen == 0 && remain == 0)
1060 else if (dflen == 0)
1066 case 3: /* VPD-R Keyword Value */
1067 cfg->vpd.vpd_ros[off].value[i++] = byte;
1068 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1069 "RV", 2) == 0 && cksumvalid == -1) {
1075 "bad VPD cksum, remain %hhu\n",
1084 /* keep in sync w/ state 2's transistions */
1086 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1087 if (dflen == 0 && remain == 0) {
1088 cfg->vpd.vpd_rocnt = off;
1089 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1090 off * sizeof(*cfg->vpd.vpd_ros),
1091 M_DEVBUF, M_WAITOK | M_ZERO);
1093 } else if (dflen == 0)
1103 case 5: /* VPD-W Keyword Header */
1105 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1106 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1107 M_DEVBUF, M_WAITOK | M_ZERO);
1109 cfg->vpd.vpd_w[off].keyword[0] = byte;
1110 if (vpd_nextbyte(&vrs, &byte2)) {
1114 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1115 if (vpd_nextbyte(&vrs, &byte2)) {
1119 cfg->vpd.vpd_w[off].len = dflen = byte2;
1120 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1121 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1122 sizeof(*cfg->vpd.vpd_w[off].value),
1123 M_DEVBUF, M_WAITOK);
1126 /* keep in sync w/ state 6's transistions */
1127 if (dflen == 0 && remain == 0)
1129 else if (dflen == 0)
1135 case 6: /* VPD-W Keyword Value */
1136 cfg->vpd.vpd_w[off].value[i++] = byte;
1139 /* keep in sync w/ state 5's transistions */
1141 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1142 if (dflen == 0 && remain == 0) {
1143 cfg->vpd.vpd_wcnt = off;
1144 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1145 off * sizeof(*cfg->vpd.vpd_w),
1146 M_DEVBUF, M_WAITOK | M_ZERO);
1148 } else if (dflen == 0)
1153 pci_printf(cfg, "invalid state: %d\n", state);
1159 if (cksumvalid == 0 || state < -1) {
1160 /* read-only data bad, clean up */
1161 if (cfg->vpd.vpd_ros != NULL) {
1162 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1163 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1164 free(cfg->vpd.vpd_ros, M_DEVBUF);
1165 cfg->vpd.vpd_ros = NULL;
1169 /* I/O error, clean up */
1170 pci_printf(cfg, "failed to read VPD data.\n");
1171 if (cfg->vpd.vpd_ident != NULL) {
1172 free(cfg->vpd.vpd_ident, M_DEVBUF);
1173 cfg->vpd.vpd_ident = NULL;
1175 if (cfg->vpd.vpd_w != NULL) {
1176 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1177 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1178 free(cfg->vpd.vpd_w, M_DEVBUF);
1179 cfg->vpd.vpd_w = NULL;
1182 cfg->vpd.vpd_cached = 1;
1188 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1190 struct pci_devinfo *dinfo = device_get_ivars(child);
1191 pcicfgregs *cfg = &dinfo->cfg;
1193 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1194 pci_read_vpd(device_get_parent(dev), cfg);
1196 *identptr = cfg->vpd.vpd_ident;
1198 if (*identptr == NULL)
1205 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1208 struct pci_devinfo *dinfo = device_get_ivars(child);
1209 pcicfgregs *cfg = &dinfo->cfg;
1212 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1213 pci_read_vpd(device_get_parent(dev), cfg);
1215 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1216 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1217 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1218 *vptr = cfg->vpd.vpd_ros[i].value;
1227 pci_fetch_vpd_list(device_t dev)
1229 struct pci_devinfo *dinfo = device_get_ivars(dev);
1230 pcicfgregs *cfg = &dinfo->cfg;
1232 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1233 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1238 * Find the requested HyperTransport capability and return the offset
1239 * in configuration space via the pointer provided. The function
1240 * returns 0 on success and an error code otherwise.
1243 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1248 error = pci_find_cap(child, PCIY_HT, &ptr);
1253 * Traverse the capabilities list checking each HT capability
1254 * to see if it matches the requested HT capability.
1257 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1258 if (capability == PCIM_HTCAP_SLAVE ||
1259 capability == PCIM_HTCAP_HOST)
1262 val &= PCIM_HTCMD_CAP_MASK;
1263 if (val == capability) {
1269 /* Skip to the next HT capability. */
1271 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1272 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1281 * Find the requested capability and return the offset in
1282 * configuration space via the pointer provided. The function returns
1283 * 0 on success and an error code otherwise.
1286 pci_find_cap_method(device_t dev, device_t child, int capability,
1289 struct pci_devinfo *dinfo = device_get_ivars(child);
1290 pcicfgregs *cfg = &dinfo->cfg;
1295 * Check the CAP_LIST bit of the PCI status register first.
1297 status = pci_read_config(child, PCIR_STATUS, 2);
1298 if (!(status & PCIM_STATUS_CAPPRESENT))
1302 * Determine the start pointer of the capabilities list.
1304 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1305 case PCIM_HDRTYPE_NORMAL:
1306 case PCIM_HDRTYPE_BRIDGE:
1309 case PCIM_HDRTYPE_CARDBUS:
1310 ptr = PCIR_CAP_PTR_2;
1314 return (ENXIO); /* no extended capabilities support */
1316 ptr = pci_read_config(child, ptr, 1);
1319 * Traverse the capabilities list.
1322 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1327 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1334 * Find the requested extended capability and return the offset in
1335 * configuration space via the pointer provided. The function returns
1336 * 0 on success and an error code otherwise.
1339 pci_find_extcap_method(device_t dev, device_t child, int capability,
1342 struct pci_devinfo *dinfo = device_get_ivars(child);
1343 pcicfgregs *cfg = &dinfo->cfg;
1347 /* Only supported for PCI-express devices. */
1348 if (cfg->pcie.pcie_location == 0)
1352 ecap = pci_read_config(child, ptr, 4);
1353 if (ecap == 0xffffffff || ecap == 0)
1356 if (PCI_EXTCAP_ID(ecap) == capability) {
1361 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1364 ecap = pci_read_config(child, ptr, 4);
1371 * Support for MSI-X message interrupts.
1374 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1375 uint64_t address, uint32_t data)
1377 struct pci_devinfo *dinfo = device_get_ivars(child);
1378 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1381 KASSERT(msix->msix_table_len > index, ("bogus index"));
1382 offset = msix->msix_table_offset + index * 16;
1383 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1384 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1385 bus_write_4(msix->msix_table_res, offset + 8, data);
1387 /* Enable MSI -> HT mapping. */
1388 pci_ht_map_msi(child, address);
1392 pci_mask_msix(device_t dev, u_int index)
1394 struct pci_devinfo *dinfo = device_get_ivars(dev);
1395 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1396 uint32_t offset, val;
1398 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1399 offset = msix->msix_table_offset + index * 16 + 12;
1400 val = bus_read_4(msix->msix_table_res, offset);
1401 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1402 val |= PCIM_MSIX_VCTRL_MASK;
1403 bus_write_4(msix->msix_table_res, offset, val);
1408 pci_unmask_msix(device_t dev, u_int index)
1410 struct pci_devinfo *dinfo = device_get_ivars(dev);
1411 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1412 uint32_t offset, val;
1414 KASSERT(msix->msix_table_len > index, ("bogus index"));
1415 offset = msix->msix_table_offset + index * 16 + 12;
1416 val = bus_read_4(msix->msix_table_res, offset);
1417 if (val & PCIM_MSIX_VCTRL_MASK) {
1418 val &= ~PCIM_MSIX_VCTRL_MASK;
1419 bus_write_4(msix->msix_table_res, offset, val);
1424 pci_pending_msix(device_t dev, u_int index)
1426 struct pci_devinfo *dinfo = device_get_ivars(dev);
1427 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1428 uint32_t offset, bit;
1430 KASSERT(msix->msix_table_len > index, ("bogus index"));
1431 offset = msix->msix_pba_offset + (index / 32) * 4;
1432 bit = 1 << index % 32;
1433 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1437 * Restore MSI-X registers and table during resume. If MSI-X is
1438 * enabled then walk the virtual table to restore the actual MSI-X
1442 pci_resume_msix(device_t dev)
1444 struct pci_devinfo *dinfo = device_get_ivars(dev);
1445 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1446 struct msix_table_entry *mte;
1447 struct msix_vector *mv;
1450 if (msix->msix_alloc > 0) {
1451 /* First, mask all vectors. */
1452 for (i = 0; i < msix->msix_msgnum; i++)
1453 pci_mask_msix(dev, i);
1455 /* Second, program any messages with at least one handler. */
1456 for (i = 0; i < msix->msix_table_len; i++) {
1457 mte = &msix->msix_table[i];
1458 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1460 mv = &msix->msix_vectors[mte->mte_vector - 1];
1461 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1462 pci_unmask_msix(dev, i);
1465 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1466 msix->msix_ctrl, 2);
1470 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1471 * returned in *count. After this function returns, each message will be
1472 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1475 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1477 struct pci_devinfo *dinfo = device_get_ivars(child);
1478 pcicfgregs *cfg = &dinfo->cfg;
1479 struct resource_list_entry *rle;
1480 int actual, error, i, irq, max;
1482 /* Don't let count == 0 get us into trouble. */
1486 /* If rid 0 is allocated, then fail. */
1487 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1488 if (rle != NULL && rle->res != NULL)
1491 /* Already have allocated messages? */
1492 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1495 /* If MSI-X is blacklisted for this system, fail. */
1496 if (pci_msix_blacklisted())
1499 /* MSI-X capability present? */
1500 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1503 /* Make sure the appropriate BARs are mapped. */
1504 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1505 cfg->msix.msix_table_bar);
1506 if (rle == NULL || rle->res == NULL ||
1507 !(rman_get_flags(rle->res) & RF_ACTIVE))
1509 cfg->msix.msix_table_res = rle->res;
1510 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1511 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1512 cfg->msix.msix_pba_bar);
1513 if (rle == NULL || rle->res == NULL ||
1514 !(rman_get_flags(rle->res) & RF_ACTIVE))
1517 cfg->msix.msix_pba_res = rle->res;
1520 device_printf(child,
1521 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1522 *count, cfg->msix.msix_msgnum);
1523 max = min(*count, cfg->msix.msix_msgnum);
1524 for (i = 0; i < max; i++) {
1525 /* Allocate a message. */
1526 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1532 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1538 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1540 device_printf(child, "using IRQ %lu for MSI-X\n",
1546 * Be fancy and try to print contiguous runs of
1547 * IRQ values as ranges. 'irq' is the previous IRQ.
1548 * 'run' is true if we are in a range.
1550 device_printf(child, "using IRQs %lu", rle->start);
1553 for (i = 1; i < actual; i++) {
1554 rle = resource_list_find(&dinfo->resources,
1555 SYS_RES_IRQ, i + 1);
1557 /* Still in a run? */
1558 if (rle->start == irq + 1) {
1564 /* Finish previous range. */
1570 /* Start new range. */
1571 printf(",%lu", rle->start);
1575 /* Unfinished range? */
1578 printf(" for MSI-X\n");
1582 /* Mask all vectors. */
1583 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1584 pci_mask_msix(child, i);
1586 /* Allocate and initialize vector data and virtual table. */
1587 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1588 M_DEVBUF, M_WAITOK | M_ZERO);
1589 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1590 M_DEVBUF, M_WAITOK | M_ZERO);
1591 for (i = 0; i < actual; i++) {
1592 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1593 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1594 cfg->msix.msix_table[i].mte_vector = i + 1;
1597 /* Update control register to enable MSI-X. */
1598 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1599 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1600 cfg->msix.msix_ctrl, 2);
1602 /* Update counts of alloc'd messages. */
1603 cfg->msix.msix_alloc = actual;
1604 cfg->msix.msix_table_len = actual;
1610 * By default, pci_alloc_msix() will assign the allocated IRQ
1611 * resources consecutively to the first N messages in the MSI-X table.
1612 * However, device drivers may want to use different layouts if they
1613 * either receive fewer messages than they asked for, or they wish to
1614 * populate the MSI-X table sparsely. This method allows the driver
1615 * to specify what layout it wants. It must be called after a
1616 * successful pci_alloc_msix() but before any of the associated
1617 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1619 * The 'vectors' array contains 'count' message vectors. The array
1620 * maps directly to the MSI-X table in that index 0 in the array
1621 * specifies the vector for the first message in the MSI-X table, etc.
1622 * The vector value in each array index can either be 0 to indicate
1623 * that no vector should be assigned to a message slot, or it can be a
1624 * number from 1 to N (where N is the count returned from a
1625 * succcessful call to pci_alloc_msix()) to indicate which message
1626 * vector (IRQ) to be used for the corresponding message.
1628 * On successful return, each message with a non-zero vector will have
1629 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1630 * 1. Additionally, if any of the IRQs allocated via the previous
1631 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1632 * will be freed back to the system automatically.
1634 * For example, suppose a driver has a MSI-X table with 6 messages and
1635 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1636 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1637 * C. After the call to pci_alloc_msix(), the device will be setup to
1638 * have an MSI-X table of ABC--- (where - means no vector assigned).
1639 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1640 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1641 * be freed back to the system. This device will also have valid
1642 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1644 * In any case, the SYS_RES_IRQ rid X will always map to the message
1645 * at MSI-X table index X - 1 and will only be valid if a vector is
1646 * assigned to that table entry.
1649 pci_remap_msix_method(device_t dev, device_t child, int count,
1650 const u_int *vectors)
1652 struct pci_devinfo *dinfo = device_get_ivars(child);
1653 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1654 struct resource_list_entry *rle;
1655 int i, irq, j, *used;
1658 * Have to have at least one message in the table but the
1659 * table can't be bigger than the actual MSI-X table in the
1662 if (count == 0 || count > msix->msix_msgnum)
1665 /* Sanity check the vectors. */
1666 for (i = 0; i < count; i++)
1667 if (vectors[i] > msix->msix_alloc)
1671 * Make sure there aren't any holes in the vectors to be used.
1672 * It's a big pain to support it, and it doesn't really make
1673 * sense anyway. Also, at least one vector must be used.
1675 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1677 for (i = 0; i < count; i++)
1678 if (vectors[i] != 0)
1679 used[vectors[i] - 1] = 1;
1680 for (i = 0; i < msix->msix_alloc - 1; i++)
1681 if (used[i] == 0 && used[i + 1] == 1) {
1682 free(used, M_DEVBUF);
1686 free(used, M_DEVBUF);
1690 /* Make sure none of the resources are allocated. */
1691 for (i = 0; i < msix->msix_table_len; i++) {
1692 if (msix->msix_table[i].mte_vector == 0)
1694 if (msix->msix_table[i].mte_handlers > 0)
1696 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1697 KASSERT(rle != NULL, ("missing resource"));
1698 if (rle->res != NULL)
1702 /* Free the existing resource list entries. */
1703 for (i = 0; i < msix->msix_table_len; i++) {
1704 if (msix->msix_table[i].mte_vector == 0)
1706 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1710 * Build the new virtual table keeping track of which vectors are
1713 free(msix->msix_table, M_DEVBUF);
1714 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1715 M_DEVBUF, M_WAITOK | M_ZERO);
1716 for (i = 0; i < count; i++)
1717 msix->msix_table[i].mte_vector = vectors[i];
1718 msix->msix_table_len = count;
1720 /* Free any unused IRQs and resize the vectors array if necessary. */
1721 j = msix->msix_alloc - 1;
1723 struct msix_vector *vec;
1725 while (used[j] == 0) {
1726 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1727 msix->msix_vectors[j].mv_irq);
1730 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1732 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1734 free(msix->msix_vectors, M_DEVBUF);
1735 msix->msix_vectors = vec;
1736 msix->msix_alloc = j + 1;
1738 free(used, M_DEVBUF);
1740 /* Map the IRQs onto the rids. */
1741 for (i = 0; i < count; i++) {
1742 if (vectors[i] == 0)
1744 irq = msix->msix_vectors[vectors[i]].mv_irq;
1745 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1750 device_printf(child, "Remapped MSI-X IRQs as: ");
1751 for (i = 0; i < count; i++) {
1754 if (vectors[i] == 0)
1758 msix->msix_vectors[vectors[i]].mv_irq);
1767 pci_release_msix(device_t dev, device_t child)
1769 struct pci_devinfo *dinfo = device_get_ivars(child);
1770 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1771 struct resource_list_entry *rle;
1774 /* Do we have any messages to release? */
1775 if (msix->msix_alloc == 0)
1778 /* Make sure none of the resources are allocated. */
1779 for (i = 0; i < msix->msix_table_len; i++) {
1780 if (msix->msix_table[i].mte_vector == 0)
1782 if (msix->msix_table[i].mte_handlers > 0)
1784 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1785 KASSERT(rle != NULL, ("missing resource"));
1786 if (rle->res != NULL)
1790 /* Update control register to disable MSI-X. */
1791 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1792 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1793 msix->msix_ctrl, 2);
1795 /* Free the resource list entries. */
1796 for (i = 0; i < msix->msix_table_len; i++) {
1797 if (msix->msix_table[i].mte_vector == 0)
1799 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1801 free(msix->msix_table, M_DEVBUF);
1802 msix->msix_table_len = 0;
1804 /* Release the IRQs. */
1805 for (i = 0; i < msix->msix_alloc; i++)
1806 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1807 msix->msix_vectors[i].mv_irq);
1808 free(msix->msix_vectors, M_DEVBUF);
1809 msix->msix_alloc = 0;
1814 * Return the max supported MSI-X messages this device supports.
1815 * Basically, assuming the MD code can alloc messages, this function
1816 * should return the maximum value that pci_alloc_msix() can return.
1817 * Thus, it is subject to the tunables, etc.
1820 pci_msix_count_method(device_t dev, device_t child)
1822 struct pci_devinfo *dinfo = device_get_ivars(child);
1823 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1825 if (pci_do_msix && msix->msix_location != 0)
1826 return (msix->msix_msgnum);
1831 * HyperTransport MSI mapping control
1834 pci_ht_map_msi(device_t dev, uint64_t addr)
1836 struct pci_devinfo *dinfo = device_get_ivars(dev);
1837 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1842 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1843 ht->ht_msiaddr >> 20 == addr >> 20) {
1844 /* Enable MSI -> HT mapping. */
1845 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1846 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1850 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1851 /* Disable MSI -> HT mapping. */
1852 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1853 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1859 pci_get_max_read_req(device_t dev)
1861 struct pci_devinfo *dinfo = device_get_ivars(dev);
1865 cap = dinfo->cfg.pcie.pcie_location;
1868 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1869 val &= PCIEM_CTL_MAX_READ_REQUEST;
1871 return (1 << (val + 7));
1875 pci_set_max_read_req(device_t dev, int size)
1877 struct pci_devinfo *dinfo = device_get_ivars(dev);
1881 cap = dinfo->cfg.pcie.pcie_location;
1888 size = (1 << (fls(size) - 1));
1889 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1890 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1891 val |= (fls(size) - 8) << 12;
1892 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1897 pcie_read_config(device_t dev, int reg, int width)
1899 struct pci_devinfo *dinfo = device_get_ivars(dev);
1902 cap = dinfo->cfg.pcie.pcie_location;
1906 return (0xffffffff);
1909 return (pci_read_config(dev, cap + reg, width));
1913 pcie_write_config(device_t dev, int reg, uint32_t value, int width)
1915 struct pci_devinfo *dinfo = device_get_ivars(dev);
1918 cap = dinfo->cfg.pcie.pcie_location;
1921 pci_write_config(dev, cap + reg, value, width);
1925 * Adjusts a PCI-e capability register by clearing the bits in mask
1926 * and setting the bits in (value & mask). Bits not set in mask are
1929 * Returns the old value on success or all ones on failure.
1932 pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value,
1935 struct pci_devinfo *dinfo = device_get_ivars(dev);
1939 cap = dinfo->cfg.pcie.pcie_location;
1943 return (0xffffffff);
1946 old = pci_read_config(dev, cap + reg, width);
1948 new |= (value & mask);
1949 pci_write_config(dev, cap + reg, new, width);
1954 * Support for MSI message signalled interrupts.
1957 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
1960 struct pci_devinfo *dinfo = device_get_ivars(child);
1961 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1963 /* Write data and address values. */
1964 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
1965 address & 0xffffffff, 4);
1966 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1967 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1969 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
1972 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
1975 /* Enable MSI in the control register. */
1976 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1977 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1980 /* Enable MSI -> HT mapping. */
1981 pci_ht_map_msi(child, address);
1985 pci_disable_msi_method(device_t dev, device_t child)
1987 struct pci_devinfo *dinfo = device_get_ivars(child);
1988 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1990 /* Disable MSI -> HT mapping. */
1991 pci_ht_map_msi(child, 0);
1993 /* Disable MSI in the control register. */
1994 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1995 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2000 * Restore MSI registers during resume. If MSI is enabled then
2001 * restore the data and address registers in addition to the control
2005 pci_resume_msi(device_t dev)
2007 struct pci_devinfo *dinfo = device_get_ivars(dev);
2008 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2012 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
2013 address = msi->msi_addr;
2014 data = msi->msi_data;
2015 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
2016 address & 0xffffffff, 4);
2017 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2018 pci_write_config(dev, msi->msi_location +
2019 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
2020 pci_write_config(dev, msi->msi_location +
2021 PCIR_MSI_DATA_64BIT, data, 2);
2023 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
2026 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
2031 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
2033 struct pci_devinfo *dinfo = device_get_ivars(dev);
2034 pcicfgregs *cfg = &dinfo->cfg;
2035 struct resource_list_entry *rle;
2036 struct msix_table_entry *mte;
2037 struct msix_vector *mv;
2043 * Handle MSI first. We try to find this IRQ among our list
2044 * of MSI IRQs. If we find it, we request updated address and
2045 * data registers and apply the results.
2047 if (cfg->msi.msi_alloc > 0) {
2049 /* If we don't have any active handlers, nothing to do. */
2050 if (cfg->msi.msi_handlers == 0)
2052 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2053 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2055 if (rle->start == irq) {
2056 error = PCIB_MAP_MSI(device_get_parent(bus),
2057 dev, irq, &addr, &data);
2060 pci_disable_msi(dev);
2061 dinfo->cfg.msi.msi_addr = addr;
2062 dinfo->cfg.msi.msi_data = data;
2063 pci_enable_msi(dev, addr, data);
2071 * For MSI-X, we check to see if we have this IRQ. If we do,
2072 * we request the updated mapping info. If that works, we go
2073 * through all the slots that use this IRQ and update them.
2075 if (cfg->msix.msix_alloc > 0) {
2076 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2077 mv = &cfg->msix.msix_vectors[i];
2078 if (mv->mv_irq == irq) {
2079 error = PCIB_MAP_MSI(device_get_parent(bus),
2080 dev, irq, &addr, &data);
2083 mv->mv_address = addr;
2085 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2086 mte = &cfg->msix.msix_table[j];
2087 if (mte->mte_vector != i + 1)
2089 if (mte->mte_handlers == 0)
2091 pci_mask_msix(dev, j);
2092 pci_enable_msix(dev, j, addr, data);
2093 pci_unmask_msix(dev, j);
2104 * Returns true if the specified device is blacklisted because MSI
2108 pci_msi_device_blacklisted(device_t dev)
2111 if (!pci_honor_msi_blacklist)
2114 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2118 * Determine if MSI is blacklisted globally on this system. Currently,
2119 * we just check for blacklisted chipsets as represented by the
2120 * host-PCI bridge at device 0:0:0. In the future, it may become
2121 * necessary to check other system attributes, such as the kenv values
2122 * that give the motherboard manufacturer and model number.
2125 pci_msi_blacklisted(void)
2129 if (!pci_honor_msi_blacklist)
2132 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2133 if (!(pcie_chipset || pcix_chipset)) {
2134 if (vm_guest != VM_GUEST_NO) {
2136 * Whitelist older chipsets in virtual
2137 * machines known to support MSI.
2139 dev = pci_find_bsf(0, 0, 0);
2141 return (!pci_has_quirk(pci_get_devid(dev),
2142 PCI_QUIRK_ENABLE_MSI_VM));
2147 dev = pci_find_bsf(0, 0, 0);
2149 return (pci_msi_device_blacklisted(dev));
2154 * Returns true if the specified device is blacklisted because MSI-X
2155 * doesn't work. Note that this assumes that if MSI doesn't work,
2156 * MSI-X doesn't either.
2159 pci_msix_device_blacklisted(device_t dev)
2162 if (!pci_honor_msi_blacklist)
2165 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2168 return (pci_msi_device_blacklisted(dev));
2172 * Determine if MSI-X is blacklisted globally on this system. If MSI
2173 * is blacklisted, assume that MSI-X is as well. Check for additional
2174 * chipsets where MSI works but MSI-X does not.
2177 pci_msix_blacklisted(void)
2181 if (!pci_honor_msi_blacklist)
2184 dev = pci_find_bsf(0, 0, 0);
2185 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2186 PCI_QUIRK_DISABLE_MSIX))
2189 return (pci_msi_blacklisted());
2193 * Attempt to allocate *count MSI messages. The actual number allocated is
2194 * returned in *count. After this function returns, each message will be
2195 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2198 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2200 struct pci_devinfo *dinfo = device_get_ivars(child);
2201 pcicfgregs *cfg = &dinfo->cfg;
2202 struct resource_list_entry *rle;
2203 int actual, error, i, irqs[32];
2206 /* Don't let count == 0 get us into trouble. */
2210 /* If rid 0 is allocated, then fail. */
2211 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2212 if (rle != NULL && rle->res != NULL)
2215 /* Already have allocated messages? */
2216 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2219 /* If MSI is blacklisted for this system, fail. */
2220 if (pci_msi_blacklisted())
2223 /* MSI capability present? */
2224 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2228 device_printf(child,
2229 "attempting to allocate %d MSI vectors (%d supported)\n",
2230 *count, cfg->msi.msi_msgnum);
2232 /* Don't ask for more than the device supports. */
2233 actual = min(*count, cfg->msi.msi_msgnum);
2235 /* Don't ask for more than 32 messages. */
2236 actual = min(actual, 32);
2238 /* MSI requires power of 2 number of messages. */
2239 if (!powerof2(actual))
2243 /* Try to allocate N messages. */
2244 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2256 * We now have N actual messages mapped onto SYS_RES_IRQ
2257 * resources in the irqs[] array, so add new resources
2258 * starting at rid 1.
2260 for (i = 0; i < actual; i++)
2261 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2262 irqs[i], irqs[i], 1);
2266 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2271 * Be fancy and try to print contiguous runs
2272 * of IRQ values as ranges. 'run' is true if
2273 * we are in a range.
2275 device_printf(child, "using IRQs %d", irqs[0]);
2277 for (i = 1; i < actual; i++) {
2279 /* Still in a run? */
2280 if (irqs[i] == irqs[i - 1] + 1) {
2285 /* Finish previous range. */
2287 printf("-%d", irqs[i - 1]);
2291 /* Start new range. */
2292 printf(",%d", irqs[i]);
2295 /* Unfinished range? */
2297 printf("-%d", irqs[actual - 1]);
2298 printf(" for MSI\n");
2302 /* Update control register with actual count. */
2303 ctrl = cfg->msi.msi_ctrl;
2304 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2305 ctrl |= (ffs(actual) - 1) << 4;
2306 cfg->msi.msi_ctrl = ctrl;
2307 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2309 /* Update counts of alloc'd messages. */
2310 cfg->msi.msi_alloc = actual;
2311 cfg->msi.msi_handlers = 0;
2316 /* Release the MSI messages associated with this device. */
2318 pci_release_msi_method(device_t dev, device_t child)
2320 struct pci_devinfo *dinfo = device_get_ivars(child);
2321 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2322 struct resource_list_entry *rle;
2323 int error, i, irqs[32];
2325 /* Try MSI-X first. */
2326 error = pci_release_msix(dev, child);
2327 if (error != ENODEV)
2330 /* Do we have any messages to release? */
2331 if (msi->msi_alloc == 0)
2333 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2335 /* Make sure none of the resources are allocated. */
2336 if (msi->msi_handlers > 0)
2338 for (i = 0; i < msi->msi_alloc; i++) {
2339 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2340 KASSERT(rle != NULL, ("missing MSI resource"));
2341 if (rle->res != NULL)
2343 irqs[i] = rle->start;
2346 /* Update control register with 0 count. */
2347 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2348 ("%s: MSI still enabled", __func__));
2349 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2350 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2353 /* Release the messages. */
2354 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2355 for (i = 0; i < msi->msi_alloc; i++)
2356 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2358 /* Update alloc count. */
2366 * Return the max supported MSI messages this device supports.
2367 * Basically, assuming the MD code can alloc messages, this function
2368 * should return the maximum value that pci_alloc_msi() can return.
2369 * Thus, it is subject to the tunables, etc.
2372 pci_msi_count_method(device_t dev, device_t child)
2374 struct pci_devinfo *dinfo = device_get_ivars(child);
2375 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2377 if (pci_do_msi && msi->msi_location != 0)
2378 return (msi->msi_msgnum);
2382 /* free pcicfgregs structure and all depending data structures */
2385 pci_freecfg(struct pci_devinfo *dinfo)
2387 struct devlist *devlist_head;
2388 struct pci_map *pm, *next;
2391 devlist_head = &pci_devq;
2393 if (dinfo->cfg.vpd.vpd_reg) {
2394 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2395 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2396 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2397 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2398 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2399 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2400 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2402 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2405 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2406 free(dinfo, M_DEVBUF);
2408 /* increment the generation count */
2411 /* we're losing one device */
2417 * PCI power manangement
2420 pci_set_powerstate_method(device_t dev, device_t child, int state)
2422 struct pci_devinfo *dinfo = device_get_ivars(child);
2423 pcicfgregs *cfg = &dinfo->cfg;
2425 int oldstate, highest, delay;
2427 if (cfg->pp.pp_cap == 0)
2428 return (EOPNOTSUPP);
2431 * Optimize a no state change request away. While it would be OK to
2432 * write to the hardware in theory, some devices have shown odd
2433 * behavior when going from D3 -> D3.
2435 oldstate = pci_get_powerstate(child);
2436 if (oldstate == state)
2440 * The PCI power management specification states that after a state
2441 * transition between PCI power states, system software must
2442 * guarantee a minimal delay before the function accesses the device.
2443 * Compute the worst case delay that we need to guarantee before we
2444 * access the device. Many devices will be responsive much more
2445 * quickly than this delay, but there are some that don't respond
2446 * instantly to state changes. Transitions to/from D3 state require
2447 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2448 * is done below with DELAY rather than a sleeper function because
2449 * this function can be called from contexts where we cannot sleep.
2451 highest = (oldstate > state) ? oldstate : state;
2452 if (highest == PCI_POWERSTATE_D3)
2454 else if (highest == PCI_POWERSTATE_D2)
2458 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2459 & ~PCIM_PSTAT_DMASK;
2461 case PCI_POWERSTATE_D0:
2462 status |= PCIM_PSTAT_D0;
2464 case PCI_POWERSTATE_D1:
2465 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2466 return (EOPNOTSUPP);
2467 status |= PCIM_PSTAT_D1;
2469 case PCI_POWERSTATE_D2:
2470 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2471 return (EOPNOTSUPP);
2472 status |= PCIM_PSTAT_D2;
2474 case PCI_POWERSTATE_D3:
2475 status |= PCIM_PSTAT_D3;
2482 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2485 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2492 pci_get_powerstate_method(device_t dev, device_t child)
2494 struct pci_devinfo *dinfo = device_get_ivars(child);
2495 pcicfgregs *cfg = &dinfo->cfg;
2499 if (cfg->pp.pp_cap != 0) {
2500 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2501 switch (status & PCIM_PSTAT_DMASK) {
2503 result = PCI_POWERSTATE_D0;
2506 result = PCI_POWERSTATE_D1;
2509 result = PCI_POWERSTATE_D2;
2512 result = PCI_POWERSTATE_D3;
2515 result = PCI_POWERSTATE_UNKNOWN;
2519 /* No support, device is always at D0 */
2520 result = PCI_POWERSTATE_D0;
2526 * Some convenience functions for PCI device drivers.
2529 static __inline void
2530 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2534 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2536 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2539 static __inline void
2540 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2544 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2546 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2550 pci_enable_busmaster_method(device_t dev, device_t child)
2552 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2557 pci_disable_busmaster_method(device_t dev, device_t child)
2559 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2564 pci_enable_io_method(device_t dev, device_t child, int space)
2569 case SYS_RES_IOPORT:
2570 bit = PCIM_CMD_PORTEN;
2572 case SYS_RES_MEMORY:
2573 bit = PCIM_CMD_MEMEN;
2578 pci_set_command_bit(dev, child, bit);
2583 pci_disable_io_method(device_t dev, device_t child, int space)
2588 case SYS_RES_IOPORT:
2589 bit = PCIM_CMD_PORTEN;
2591 case SYS_RES_MEMORY:
2592 bit = PCIM_CMD_MEMEN;
2597 pci_clear_command_bit(dev, child, bit);
2602 * New style pci driver. Parent device is either a pci-host-bridge or a
2603 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2607 pci_print_verbose(struct pci_devinfo *dinfo)
2611 pcicfgregs *cfg = &dinfo->cfg;
2613 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2614 cfg->vendor, cfg->device, cfg->revid);
2615 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2616 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2617 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2618 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2620 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2621 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2622 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2623 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2624 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2625 if (cfg->intpin > 0)
2626 printf("\tintpin=%c, irq=%d\n",
2627 cfg->intpin +'a' -1, cfg->intline);
2628 if (cfg->pp.pp_cap) {
2631 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2632 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2633 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2634 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2635 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2636 status & PCIM_PSTAT_DMASK);
2638 if (cfg->msi.msi_location) {
2641 ctrl = cfg->msi.msi_ctrl;
2642 printf("\tMSI supports %d message%s%s%s\n",
2643 cfg->msi.msi_msgnum,
2644 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2645 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2646 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2648 if (cfg->msix.msix_location) {
2649 printf("\tMSI-X supports %d message%s ",
2650 cfg->msix.msix_msgnum,
2651 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2652 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2653 printf("in map 0x%x\n",
2654 cfg->msix.msix_table_bar);
2656 printf("in maps 0x%x and 0x%x\n",
2657 cfg->msix.msix_table_bar,
2658 cfg->msix.msix_pba_bar);
2664 pci_porten(device_t dev)
2666 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2670 pci_memen(device_t dev)
2672 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2676 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2678 struct pci_devinfo *dinfo;
2679 pci_addr_t map, testval;
2684 * The device ROM BAR is special. It is always a 32-bit
2685 * memory BAR. Bit 0 is special and should not be set when
2688 dinfo = device_get_ivars(dev);
2689 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2690 map = pci_read_config(dev, reg, 4);
2691 pci_write_config(dev, reg, 0xfffffffe, 4);
2692 testval = pci_read_config(dev, reg, 4);
2693 pci_write_config(dev, reg, map, 4);
2695 *testvalp = testval;
2699 map = pci_read_config(dev, reg, 4);
2700 ln2range = pci_maprange(map);
2702 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2705 * Disable decoding via the command register before
2706 * determining the BAR's length since we will be placing it in
2709 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2710 pci_write_config(dev, PCIR_COMMAND,
2711 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2714 * Determine the BAR's length by writing all 1's. The bottom
2715 * log_2(size) bits of the BAR will stick as 0 when we read
2718 pci_write_config(dev, reg, 0xffffffff, 4);
2719 testval = pci_read_config(dev, reg, 4);
2720 if (ln2range == 64) {
2721 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2722 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2726 * Restore the original value of the BAR. We may have reprogrammed
2727 * the BAR of the low-level console device and when booting verbose,
2728 * we need the console device addressable.
2730 pci_write_config(dev, reg, map, 4);
2732 pci_write_config(dev, reg + 4, map >> 32, 4);
2733 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2736 *testvalp = testval;
2740 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2742 struct pci_devinfo *dinfo;
2745 /* The device ROM BAR is always a 32-bit memory BAR. */
2746 dinfo = device_get_ivars(dev);
2747 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2750 ln2range = pci_maprange(pm->pm_value);
2751 pci_write_config(dev, pm->pm_reg, base, 4);
2753 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2754 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2756 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2757 pm->pm_reg + 4, 4) << 32;
2761 pci_find_bar(device_t dev, int reg)
2763 struct pci_devinfo *dinfo;
2766 dinfo = device_get_ivars(dev);
2767 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2768 if (pm->pm_reg == reg)
2775 pci_bar_enabled(device_t dev, struct pci_map *pm)
2777 struct pci_devinfo *dinfo;
2780 dinfo = device_get_ivars(dev);
2781 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2782 !(pm->pm_value & PCIM_BIOS_ENABLE))
2784 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2785 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2786 return ((cmd & PCIM_CMD_MEMEN) != 0);
2788 return ((cmd & PCIM_CMD_PORTEN) != 0);
2791 static struct pci_map *
2792 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2794 struct pci_devinfo *dinfo;
2795 struct pci_map *pm, *prev;
2797 dinfo = device_get_ivars(dev);
2798 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2800 pm->pm_value = value;
2802 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2803 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2805 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2806 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2810 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2812 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2817 pci_restore_bars(device_t dev)
2819 struct pci_devinfo *dinfo;
2823 dinfo = device_get_ivars(dev);
2824 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2825 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2828 ln2range = pci_maprange(pm->pm_value);
2829 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2831 pci_write_config(dev, pm->pm_reg + 4,
2832 pm->pm_value >> 32, 4);
2837 * Add a resource based on a pci map register. Return 1 if the map
2838 * register is a 32bit map register or 2 if it is a 64bit register.
2841 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2842 int force, int prefetch)
2845 pci_addr_t base, map, testval;
2846 pci_addr_t start, end, count;
2847 int barlen, basezero, flags, maprange, mapsize, type;
2849 struct resource *res;
2852 * The BAR may already exist if the device is a CardBus card
2853 * whose CIS is stored in this BAR.
2855 pm = pci_find_bar(dev, reg);
2857 maprange = pci_maprange(pm->pm_value);
2858 barlen = maprange == 64 ? 2 : 1;
2862 pci_read_bar(dev, reg, &map, &testval);
2863 if (PCI_BAR_MEM(map)) {
2864 type = SYS_RES_MEMORY;
2865 if (map & PCIM_BAR_MEM_PREFETCH)
2868 type = SYS_RES_IOPORT;
2869 mapsize = pci_mapsize(testval);
2870 base = pci_mapbase(map);
2871 #ifdef __PCI_BAR_ZERO_VALID
2874 basezero = base == 0;
2876 maprange = pci_maprange(map);
2877 barlen = maprange == 64 ? 2 : 1;
2880 * For I/O registers, if bottom bit is set, and the next bit up
2881 * isn't clear, we know we have a BAR that doesn't conform to the
2882 * spec, so ignore it. Also, sanity check the size of the data
2883 * areas to the type of memory involved. Memory must be at least
2884 * 16 bytes in size, while I/O ranges must be at least 4.
2886 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2888 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2889 (type == SYS_RES_IOPORT && mapsize < 2))
2892 /* Save a record of this BAR. */
2893 pm = pci_add_bar(dev, reg, map, mapsize);
2895 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2896 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2897 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2898 printf(", port disabled\n");
2899 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2900 printf(", memory disabled\n");
2902 printf(", enabled\n");
2906 * If base is 0, then we have problems if this architecture does
2907 * not allow that. It is best to ignore such entries for the
2908 * moment. These will be allocated later if the driver specifically
2909 * requests them. However, some removable busses look better when
2910 * all resources are allocated, so allow '0' to be overriden.
2912 * Similarly treat maps whose values is the same as the test value
2913 * read back. These maps have had all f's written to them by the
2914 * BIOS in an attempt to disable the resources.
2916 if (!force && (basezero || map == testval))
2918 if ((u_long)base != base) {
2920 "pci%d:%d:%d:%d bar %#x too many address bits",
2921 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2922 pci_get_function(dev), reg);
2927 * This code theoretically does the right thing, but has
2928 * undesirable side effects in some cases where peripherals
2929 * respond oddly to having these bits enabled. Let the user
2930 * be able to turn them off (since pci_enable_io_modes is 1 by
2933 if (pci_enable_io_modes) {
2934 /* Turn on resources that have been left off by a lazy BIOS */
2935 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2936 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2937 cmd |= PCIM_CMD_PORTEN;
2938 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2940 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2941 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2942 cmd |= PCIM_CMD_MEMEN;
2943 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2946 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2948 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2952 count = (pci_addr_t)1 << mapsize;
2953 flags = RF_ALIGNMENT_LOG2(mapsize);
2955 flags |= RF_PREFETCHABLE;
2956 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
2957 start = 0; /* Let the parent decide. */
2961 end = base + count - 1;
2963 resource_list_add(rl, type, reg, start, end, count);
2966 * Try to allocate the resource for this BAR from our parent
2967 * so that this resource range is already reserved. The
2968 * driver for this device will later inherit this resource in
2969 * pci_alloc_resource().
2971 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2973 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2975 * If the allocation fails, try to allocate a resource for
2976 * this BAR using any available range. The firmware felt
2977 * it was important enough to assign a resource, so don't
2978 * disable decoding if we can help it.
2980 resource_list_delete(rl, type, reg);
2981 resource_list_add(rl, type, reg, 0, ~0ul, count);
2982 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
2987 * If the allocation fails, delete the resource list entry
2988 * and disable decoding for this device.
2990 * If the driver requests this resource in the future,
2991 * pci_reserve_map() will try to allocate a fresh
2994 resource_list_delete(rl, type, reg);
2995 pci_disable_io(dev, type);
2998 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2999 pci_get_domain(dev), pci_get_bus(dev),
3000 pci_get_slot(dev), pci_get_function(dev), reg);
3002 start = rman_get_start(res);
3003 pci_write_bar(dev, pm, start);
3009 * For ATA devices we need to decide early what addressing mode to use.
3010 * Legacy demands that the primary and secondary ATA ports sits on the
3011 * same addresses that old ISA hardware did. This dictates that we use
3012 * those addresses and ignore the BAR's if we cannot set PCI native
3016 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
3017 uint32_t prefetchmask)
3019 int rid, type, progif;
3021 /* if this device supports PCI native addressing use it */
3022 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3023 if ((progif & 0x8a) == 0x8a) {
3024 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
3025 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
3026 printf("Trying ATA native PCI addressing mode\n");
3027 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
3031 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3032 type = SYS_RES_IOPORT;
3033 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
3034 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
3035 prefetchmask & (1 << 0));
3036 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
3037 prefetchmask & (1 << 1));
3040 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
3041 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
3044 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
3045 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3048 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3049 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3050 prefetchmask & (1 << 2));
3051 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3052 prefetchmask & (1 << 3));
3055 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3056 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3059 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3060 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3063 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3064 prefetchmask & (1 << 4));
3065 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3066 prefetchmask & (1 << 5));
3070 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3072 struct pci_devinfo *dinfo = device_get_ivars(dev);
3073 pcicfgregs *cfg = &dinfo->cfg;
3074 char tunable_name[64];
3077 /* Has to have an intpin to have an interrupt. */
3078 if (cfg->intpin == 0)
3081 /* Let the user override the IRQ with a tunable. */
3082 irq = PCI_INVALID_IRQ;
3083 snprintf(tunable_name, sizeof(tunable_name),
3084 "hw.pci%d.%d.%d.INT%c.irq",
3085 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3086 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3087 irq = PCI_INVALID_IRQ;
3090 * If we didn't get an IRQ via the tunable, then we either use the
3091 * IRQ value in the intline register or we ask the bus to route an
3092 * interrupt for us. If force_route is true, then we only use the
3093 * value in the intline register if the bus was unable to assign an
3096 if (!PCI_INTERRUPT_VALID(irq)) {
3097 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3098 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3099 if (!PCI_INTERRUPT_VALID(irq))
3103 /* If after all that we don't have an IRQ, just bail. */
3104 if (!PCI_INTERRUPT_VALID(irq))
3107 /* Update the config register if it changed. */
3108 if (irq != cfg->intline) {
3110 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3113 /* Add this IRQ as rid 0 interrupt resource. */
3114 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3117 /* Perform early OHCI takeover from SMM. */
3119 ohci_early_takeover(device_t self)
3121 struct resource *res;
3127 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3131 ctl = bus_read_4(res, OHCI_CONTROL);
3132 if (ctl & OHCI_IR) {
3134 printf("ohci early: "
3135 "SMM active, request owner change\n");
3136 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3137 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3139 ctl = bus_read_4(res, OHCI_CONTROL);
3141 if (ctl & OHCI_IR) {
3143 printf("ohci early: "
3144 "SMM does not respond, resetting\n");
3145 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3147 /* Disable interrupts */
3148 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3151 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3154 /* Perform early UHCI takeover from SMM. */
3156 uhci_early_takeover(device_t self)
3158 struct resource *res;
3162 * Set the PIRQD enable bit and switch off all the others. We don't
3163 * want legacy support to interfere with us XXX Does this also mean
3164 * that the BIOS won't touch the keyboard anymore if it is connected
3165 * to the ports of the root hub?
3167 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3169 /* Disable interrupts */
3170 rid = PCI_UHCI_BASE_REG;
3171 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3173 bus_write_2(res, UHCI_INTR, 0);
3174 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3178 /* Perform early EHCI takeover from SMM. */
3180 ehci_early_takeover(device_t self)
3182 struct resource *res;
3192 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3196 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3198 /* Synchronise with the BIOS if it owns the controller. */
3199 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3200 eecp = EHCI_EECP_NEXT(eec)) {
3201 eec = pci_read_config(self, eecp, 4);
3202 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3205 bios_sem = pci_read_config(self, eecp +
3206 EHCI_LEGSUP_BIOS_SEM, 1);
3207 if (bios_sem == 0) {
3211 printf("ehci early: "
3212 "SMM active, request owner change\n");
3214 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3216 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3218 bios_sem = pci_read_config(self, eecp +
3219 EHCI_LEGSUP_BIOS_SEM, 1);
3222 if (bios_sem != 0) {
3224 printf("ehci early: "
3225 "SMM does not respond\n");
3227 /* Disable interrupts */
3228 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3229 bus_write_4(res, offs + EHCI_USBINTR, 0);
3231 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3234 /* Perform early XHCI takeover from SMM. */
3236 xhci_early_takeover(device_t self)
3238 struct resource *res;
3248 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3252 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3256 /* Synchronise with the BIOS if it owns the controller. */
3257 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3258 eecp += XHCI_XECP_NEXT(eec) << 2) {
3259 eec = bus_read_4(res, eecp);
3261 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3264 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3269 printf("xhci early: "
3270 "SMM active, request owner change\n");
3272 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3274 /* wait a maximum of 5 second */
3276 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3278 bios_sem = bus_read_1(res, eecp +
3279 XHCI_XECP_BIOS_SEM);
3282 if (bios_sem != 0) {
3284 printf("xhci early: "
3285 "SMM does not respond\n");
3288 /* Disable interrupts */
3289 offs = bus_read_1(res, XHCI_CAPLENGTH);
3290 bus_write_4(res, offs + XHCI_USBCMD, 0);
3291 bus_read_4(res, offs + XHCI_USBSTS);
3293 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3296 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3298 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3299 struct resource_list *rl)
3301 struct resource *res;
3303 u_long start, end, count;
3304 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3306 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3307 case PCIM_HDRTYPE_BRIDGE:
3308 sec_reg = PCIR_SECBUS_1;
3309 sub_reg = PCIR_SUBBUS_1;
3311 case PCIM_HDRTYPE_CARDBUS:
3312 sec_reg = PCIR_SECBUS_2;
3313 sub_reg = PCIR_SUBBUS_2;
3320 * If the existing bus range is valid, attempt to reserve it
3321 * from our parent. If this fails for any reason, clear the
3322 * secbus and subbus registers.
3324 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3325 * This would at least preserve the existing sec_bus if it is
3328 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3329 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3331 /* Quirk handling. */
3332 switch (pci_get_devid(dev)) {
3333 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3334 sup_bus = pci_read_config(dev, 0x41, 1);
3335 if (sup_bus != 0xff) {
3336 sec_bus = sup_bus + 1;
3337 sub_bus = sup_bus + 1;
3338 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3339 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3344 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3345 if ((cp = getenv("smbios.planar.maker")) == NULL)
3347 if (strncmp(cp, "Compal", 6) != 0) {
3352 if ((cp = getenv("smbios.planar.product")) == NULL)
3354 if (strncmp(cp, "08A0", 4) != 0) {
3359 if (sub_bus < 0xa) {
3361 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3367 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3368 if (sec_bus > 0 && sub_bus >= sec_bus) {
3371 count = end - start + 1;
3373 resource_list_add(rl, PCI_RES_BUS, 0, 0ul, ~0ul, count);
3376 * If requested, clear secondary bus registers in
3377 * bridge devices to force a complete renumbering
3378 * rather than reserving the existing range. However,
3379 * preserve the existing size.
3381 if (pci_clear_buses)
3385 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3386 start, end, count, 0);
3392 "pci%d:%d:%d:%d secbus failed to allocate\n",
3393 pci_get_domain(dev), pci_get_bus(dev),
3394 pci_get_slot(dev), pci_get_function(dev));
3398 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3399 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3402 static struct resource *
3403 pci_alloc_secbus(device_t dev, device_t child, int *rid, u_long start,
3404 u_long end, u_long count, u_int flags)
3406 struct pci_devinfo *dinfo;
3408 struct resource_list *rl;
3409 struct resource *res;
3410 int sec_reg, sub_reg;
3412 dinfo = device_get_ivars(child);
3414 rl = &dinfo->resources;
3415 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3416 case PCIM_HDRTYPE_BRIDGE:
3417 sec_reg = PCIR_SECBUS_1;
3418 sub_reg = PCIR_SUBBUS_1;
3420 case PCIM_HDRTYPE_CARDBUS:
3421 sec_reg = PCIR_SECBUS_2;
3422 sub_reg = PCIR_SUBBUS_2;
3431 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3432 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3433 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3434 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3435 start, end, count, flags & ~RF_ACTIVE);
3437 resource_list_delete(rl, PCI_RES_BUS, *rid);
3438 device_printf(child, "allocating %lu bus%s failed\n",
3439 count, count == 1 ? "" : "es");
3443 device_printf(child,
3444 "Lazy allocation of %lu bus%s at %lu\n", count,
3445 count == 1 ? "" : "es", rman_get_start(res));
3446 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3447 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3449 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3450 end, count, flags));
3455 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3457 struct pci_devinfo *dinfo;
3459 struct resource_list *rl;
3460 const struct pci_quirk *q;
3464 dinfo = device_get_ivars(dev);
3466 rl = &dinfo->resources;
3467 devid = (cfg->device << 16) | cfg->vendor;
3469 /* ATA devices needs special map treatment */
3470 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3471 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3472 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3473 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3474 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3475 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3477 for (i = 0; i < cfg->nummaps;) {
3479 * Skip quirked resources.
3481 for (q = &pci_quirks[0]; q->devid != 0; q++)
3482 if (q->devid == devid &&
3483 q->type == PCI_QUIRK_UNMAP_REG &&
3484 q->arg1 == PCIR_BAR(i))
3486 if (q->devid != 0) {
3490 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3491 prefetchmask & (1 << i));
3495 * Add additional, quirked resources.
3497 for (q = &pci_quirks[0]; q->devid != 0; q++)
3498 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3499 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3501 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3502 #ifdef __PCI_REROUTE_INTERRUPT
3504 * Try to re-route interrupts. Sometimes the BIOS or
3505 * firmware may leave bogus values in these registers.
3506 * If the re-route fails, then just stick with what we
3509 pci_assign_interrupt(bus, dev, 1);
3511 pci_assign_interrupt(bus, dev, 0);
3515 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3516 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3517 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3518 xhci_early_takeover(dev);
3519 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3520 ehci_early_takeover(dev);
3521 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3522 ohci_early_takeover(dev);
3523 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3524 uhci_early_takeover(dev);
3527 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3529 * Reserve resources for secondary bus ranges behind bridge
3532 pci_reserve_secbus(bus, dev, cfg, rl);
3536 static struct pci_devinfo *
3537 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3538 int slot, int func, size_t dinfo_size)
3540 struct pci_devinfo *dinfo;
3542 dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size);
3544 pci_add_child(dev, dinfo);
3550 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3552 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3553 device_t pcib = device_get_parent(dev);
3554 struct pci_devinfo *dinfo;
3556 int s, f, pcifunchigh;
3561 * Try to detect a device at slot 0, function 0. If it exists, try to
3562 * enable ARI. We must enable ARI before detecting the rest of the
3563 * functions on this bus as ARI changes the set of slots and functions
3564 * that are legal on this bus.
3566 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0,
3568 if (dinfo != NULL && pci_enable_ari)
3569 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3572 * Start looking for new devices on slot 0 at function 1 because we
3573 * just identified the device at slot 0, function 0.
3577 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3578 ("dinfo_size too small"));
3579 maxslots = PCIB_MAXSLOTS(pcib);
3580 for (s = 0; s <= maxslots; s++, first_func = 0) {
3584 hdrtype = REG(PCIR_HDRTYPE, 1);
3585 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3587 if (hdrtype & PCIM_MFDEV)
3588 pcifunchigh = PCIB_MAXFUNCS(pcib);
3589 for (f = first_func; f <= pcifunchigh; f++)
3590 pci_identify_function(pcib, dev, domain, busno, s, f,
3597 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3599 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3600 device_set_ivars(dinfo->cfg.dev, dinfo);
3601 resource_list_init(&dinfo->resources);
3602 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3603 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3604 pci_print_verbose(dinfo);
3605 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3606 pci_child_added(dinfo->cfg.dev);
3610 pci_child_added_method(device_t dev, device_t child)
3616 pci_probe(device_t dev)
3619 device_set_desc(dev, "PCI bus");
3621 /* Allow other subclasses to override this driver. */
3622 return (BUS_PROBE_GENERIC);
3626 pci_attach_common(device_t dev)
3628 struct pci_softc *sc;
3630 #ifdef PCI_DMA_BOUNDARY
3631 int error, tag_valid;
3637 sc = device_get_softc(dev);
3638 domain = pcib_get_domain(dev);
3639 busno = pcib_get_bus(dev);
3642 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
3644 if (sc->sc_bus == NULL) {
3645 device_printf(dev, "failed to allocate bus number\n");
3650 device_printf(dev, "domain=%d, physical bus=%d\n",
3652 #ifdef PCI_DMA_BOUNDARY
3654 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3655 devclass_find("pci")) {
3656 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3657 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3658 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3659 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3661 device_printf(dev, "Failed to create DMA tag: %d\n",
3668 sc->sc_dma_tag = bus_get_dma_tag(dev);
3673 pci_attach(device_t dev)
3675 int busno, domain, error;
3677 error = pci_attach_common(dev);
3682 * Since there can be multiple independantly numbered PCI
3683 * busses on systems with multiple PCI domains, we can't use
3684 * the unit number to decide which bus we are probing. We ask
3685 * the parent pcib what our domain and bus numbers are.
3687 domain = pcib_get_domain(dev);
3688 busno = pcib_get_bus(dev);
3689 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3690 return (bus_generic_attach(dev));
3695 pci_detach(device_t dev)
3697 struct pci_softc *sc;
3700 error = bus_generic_detach(dev);
3703 sc = device_get_softc(dev);
3704 return (bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus));
3709 pci_set_power_children(device_t dev, device_t *devlist, int numdevs,
3712 device_t child, pcib;
3716 * Set the device to the given state. If the firmware suggests
3717 * a different power state, use it instead. If power management
3718 * is not present, the firmware is responsible for managing
3719 * device power. Skip children who aren't attached since they
3720 * are handled separately.
3722 pcib = device_get_parent(dev);
3723 for (i = 0; i < numdevs; i++) {
3726 if (device_is_attached(child) &&
3727 PCIB_POWER_FOR_SLEEP(pcib, dev, &dstate) == 0)
3728 pci_set_powerstate(child, dstate);
3733 pci_suspend(device_t dev)
3735 device_t child, *devlist;
3736 struct pci_devinfo *dinfo;
3737 int error, i, numdevs;
3740 * Save the PCI configuration space for each child and set the
3741 * device in the appropriate power state for this sleep state.
3743 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3745 for (i = 0; i < numdevs; i++) {
3747 dinfo = device_get_ivars(child);
3748 pci_cfg_save(child, dinfo, 0);
3751 /* Suspend devices before potentially powering them down. */
3752 error = bus_generic_suspend(dev);
3754 free(devlist, M_TEMP);
3757 if (pci_do_power_suspend)
3758 pci_set_power_children(dev, devlist, numdevs,
3760 free(devlist, M_TEMP);
3765 pci_resume(device_t dev)
3767 device_t child, *devlist;
3768 struct pci_devinfo *dinfo;
3769 int error, i, numdevs;
3772 * Set each child to D0 and restore its PCI configuration space.
3774 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3776 if (pci_do_power_resume)
3777 pci_set_power_children(dev, devlist, numdevs,
3780 /* Now the device is powered up, restore its config space. */
3781 for (i = 0; i < numdevs; i++) {
3783 dinfo = device_get_ivars(child);
3785 pci_cfg_restore(child, dinfo);
3786 if (!device_is_attached(child))
3787 pci_cfg_save(child, dinfo, 1);
3791 * Resume critical devices first, then everything else later.
3793 for (i = 0; i < numdevs; i++) {
3795 switch (pci_get_class(child)) {
3799 case PCIC_BASEPERIPH:
3800 DEVICE_RESUME(child);
3804 for (i = 0; i < numdevs; i++) {
3806 switch (pci_get_class(child)) {
3810 case PCIC_BASEPERIPH:
3813 DEVICE_RESUME(child);
3816 free(devlist, M_TEMP);
3821 pci_load_vendor_data(void)
3827 data = preload_search_by_type("pci_vendor_data");
3829 ptr = preload_fetch_addr(data);
3830 sz = preload_fetch_size(data);
3831 if (ptr != NULL && sz != 0) {
3832 pci_vendordata = ptr;
3833 pci_vendordata_size = sz;
3834 /* terminate the database */
3835 pci_vendordata[pci_vendordata_size] = '\n';
3841 pci_driver_added(device_t dev, driver_t *driver)
3846 struct pci_devinfo *dinfo;
3850 device_printf(dev, "driver added\n");
3851 DEVICE_IDENTIFY(driver, dev);
3852 if (device_get_children(dev, &devlist, &numdevs) != 0)
3854 for (i = 0; i < numdevs; i++) {
3856 if (device_get_state(child) != DS_NOTPRESENT)
3858 dinfo = device_get_ivars(child);
3859 pci_print_verbose(dinfo);
3861 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3862 pci_cfg_restore(child, dinfo);
3863 if (device_probe_and_attach(child) != 0)
3864 pci_child_detached(dev, child);
3866 free(devlist, M_TEMP);
3870 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3871 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3873 struct pci_devinfo *dinfo;
3874 struct msix_table_entry *mte;
3875 struct msix_vector *mv;
3881 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3886 /* If this is not a direct child, just bail out. */
3887 if (device_get_parent(child) != dev) {
3892 rid = rman_get_rid(irq);
3894 /* Make sure that INTx is enabled */
3895 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3898 * Check to see if the interrupt is MSI or MSI-X.
3899 * Ask our parent to map the MSI and give
3900 * us the address and data register values.
3901 * If we fail for some reason, teardown the
3902 * interrupt handler.
3904 dinfo = device_get_ivars(child);
3905 if (dinfo->cfg.msi.msi_alloc > 0) {
3906 if (dinfo->cfg.msi.msi_addr == 0) {
3907 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3908 ("MSI has handlers, but vectors not mapped"));
3909 error = PCIB_MAP_MSI(device_get_parent(dev),
3910 child, rman_get_start(irq), &addr, &data);
3913 dinfo->cfg.msi.msi_addr = addr;
3914 dinfo->cfg.msi.msi_data = data;
3916 if (dinfo->cfg.msi.msi_handlers == 0)
3917 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3918 dinfo->cfg.msi.msi_data);
3919 dinfo->cfg.msi.msi_handlers++;
3921 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3922 ("No MSI or MSI-X interrupts allocated"));
3923 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3924 ("MSI-X index too high"));
3925 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3926 KASSERT(mte->mte_vector != 0, ("no message vector"));
3927 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3928 KASSERT(mv->mv_irq == rman_get_start(irq),
3930 if (mv->mv_address == 0) {
3931 KASSERT(mte->mte_handlers == 0,
3932 ("MSI-X table entry has handlers, but vector not mapped"));
3933 error = PCIB_MAP_MSI(device_get_parent(dev),
3934 child, rman_get_start(irq), &addr, &data);
3937 mv->mv_address = addr;
3940 if (mte->mte_handlers == 0) {
3941 pci_enable_msix(child, rid - 1, mv->mv_address,
3943 pci_unmask_msix(child, rid - 1);
3945 mte->mte_handlers++;
3949 * Make sure that INTx is disabled if we are using MSI/MSI-X,
3950 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
3951 * in which case we "enable" INTx so MSI/MSI-X actually works.
3953 if (!pci_has_quirk(pci_get_devid(child),
3954 PCI_QUIRK_MSI_INTX_BUG))
3955 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3957 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3960 (void)bus_generic_teardown_intr(dev, child, irq,
3970 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3973 struct msix_table_entry *mte;
3974 struct resource_list_entry *rle;
3975 struct pci_devinfo *dinfo;
3978 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3981 /* If this isn't a direct child, just bail out */
3982 if (device_get_parent(child) != dev)
3983 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3985 rid = rman_get_rid(irq);
3988 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3991 * Check to see if the interrupt is MSI or MSI-X. If so,
3992 * decrement the appropriate handlers count and mask the
3993 * MSI-X message, or disable MSI messages if the count
3996 dinfo = device_get_ivars(child);
3997 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3998 if (rle->res != irq)
4000 if (dinfo->cfg.msi.msi_alloc > 0) {
4001 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
4002 ("MSI-X index too high"));
4003 if (dinfo->cfg.msi.msi_handlers == 0)
4005 dinfo->cfg.msi.msi_handlers--;
4006 if (dinfo->cfg.msi.msi_handlers == 0)
4007 pci_disable_msi(child);
4009 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4010 ("No MSI or MSI-X interrupts allocated"));
4011 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4012 ("MSI-X index too high"));
4013 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4014 if (mte->mte_handlers == 0)
4016 mte->mte_handlers--;
4017 if (mte->mte_handlers == 0)
4018 pci_mask_msix(child, rid - 1);
4021 error = bus_generic_teardown_intr(dev, child, irq, cookie);
4024 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4029 pci_print_child(device_t dev, device_t child)
4031 struct pci_devinfo *dinfo;
4032 struct resource_list *rl;
4035 dinfo = device_get_ivars(child);
4036 rl = &dinfo->resources;
4038 retval += bus_print_child_header(dev, child);
4040 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
4041 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
4042 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
4043 if (device_get_flags(dev))
4044 retval += printf(" flags %#x", device_get_flags(dev));
4046 retval += printf(" at device %d.%d", pci_get_slot(child),
4047 pci_get_function(child));
4049 retval += bus_print_child_domain(dev, child);
4050 retval += bus_print_child_footer(dev, child);
4059 int report; /* 0 = bootverbose, 1 = always */
4061 } pci_nomatch_tab[] = {
4062 {PCIC_OLD, -1, 1, "old"},
4063 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4064 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4065 {PCIC_STORAGE, -1, 1, "mass storage"},
4066 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4067 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4068 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4069 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4070 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4071 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4072 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4073 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4074 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4075 {PCIC_NETWORK, -1, 1, "network"},
4076 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4077 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4078 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4079 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4080 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4081 {PCIC_DISPLAY, -1, 1, "display"},
4082 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4083 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4084 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4085 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4086 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
4087 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
4088 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4089 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
4090 {PCIC_MEMORY, -1, 1, "memory"},
4091 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4092 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4093 {PCIC_BRIDGE, -1, 1, "bridge"},
4094 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4095 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4096 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4097 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4098 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4099 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4100 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4101 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4102 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4103 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4104 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4105 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4106 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4107 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4108 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4109 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4110 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4111 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
4112 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4113 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4114 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4115 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
4116 {PCIC_INPUTDEV, -1, 1, "input device"},
4117 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
4118 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4119 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
4120 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
4121 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
4122 {PCIC_DOCKING, -1, 1, "docking station"},
4123 {PCIC_PROCESSOR, -1, 1, "processor"},
4124 {PCIC_SERIALBUS, -1, 1, "serial bus"},
4125 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
4126 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
4127 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
4128 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
4129 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
4130 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
4131 {PCIC_WIRELESS, -1, 1, "wireless controller"},
4132 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
4133 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
4134 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
4135 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
4136 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
4137 {PCIC_SATCOM, -1, 1, "satellite communication"},
4138 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
4139 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
4140 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
4141 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
4142 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
4143 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
4144 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
4145 {PCIC_DASP, -1, 0, "dasp"},
4146 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
4151 pci_probe_nomatch(device_t dev, device_t child)
4154 const char *cp, *scp;
4158 * Look for a listing for this device in a loaded device database.
4161 if ((device = pci_describe_device(child)) != NULL) {
4162 device_printf(dev, "<%s>", device);
4163 free(device, M_DEVBUF);
4166 * Scan the class/subclass descriptions for a general
4171 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4172 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4173 if (pci_nomatch_tab[i].subclass == -1) {
4174 cp = pci_nomatch_tab[i].desc;
4175 report = pci_nomatch_tab[i].report;
4176 } else if (pci_nomatch_tab[i].subclass ==
4177 pci_get_subclass(child)) {
4178 scp = pci_nomatch_tab[i].desc;
4179 report = pci_nomatch_tab[i].report;
4183 if (report || bootverbose) {
4184 device_printf(dev, "<%s%s%s>",
4186 ((cp != NULL) && (scp != NULL)) ? ", " : "",
4190 if (report || bootverbose) {
4191 printf(" at device %d.%d (no driver attached)\n",
4192 pci_get_slot(child), pci_get_function(child));
4194 pci_cfg_save(child, device_get_ivars(child), 1);
4198 pci_child_detached(device_t dev, device_t child)
4200 struct pci_devinfo *dinfo;
4201 struct resource_list *rl;
4203 dinfo = device_get_ivars(child);
4204 rl = &dinfo->resources;
4207 * Have to deallocate IRQs before releasing any MSI messages and
4208 * have to release MSI messages before deallocating any memory
4211 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4212 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4213 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4214 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4215 (void)pci_release_msi(child);
4217 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4218 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4219 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4220 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4222 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4223 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4226 pci_cfg_save(child, dinfo, 1);
4230 * Parse the PCI device database, if loaded, and return a pointer to a
4231 * description of the device.
4233 * The database is flat text formatted as follows:
4235 * Any line not in a valid format is ignored.
4236 * Lines are terminated with newline '\n' characters.
4238 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4241 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4242 * - devices cannot be listed without a corresponding VENDOR line.
4243 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4244 * another TAB, then the device name.
4248 * Assuming (ptr) points to the beginning of a line in the database,
4249 * return the vendor or device and description of the next entry.
4250 * The value of (vendor) or (device) inappropriate for the entry type
4251 * is set to -1. Returns nonzero at the end of the database.
4253 * Note that this is slightly unrobust in the face of corrupt data;
4254 * we attempt to safeguard against this by spamming the end of the
4255 * database with a newline when we initialise.
4258 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4267 left = pci_vendordata_size - (cp - pci_vendordata);
4275 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4279 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4282 /* skip to next line */
4283 while (*cp != '\n' && left > 0) {
4292 /* skip to next line */
4293 while (*cp != '\n' && left > 0) {
4297 if (*cp == '\n' && left > 0)
4304 pci_describe_device(device_t dev)
4307 char *desc, *vp, *dp, *line;
4309 desc = vp = dp = NULL;
4312 * If we have no vendor data, we can't do anything.
4314 if (pci_vendordata == NULL)
4318 * Scan the vendor data looking for this device
4320 line = pci_vendordata;
4321 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4324 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4326 if (vendor == pci_get_vendor(dev))
4329 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4332 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4340 if (device == pci_get_device(dev))
4344 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4345 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4347 sprintf(desc, "%s, %s", vp, dp);
4357 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4359 struct pci_devinfo *dinfo;
4362 dinfo = device_get_ivars(child);
4366 case PCI_IVAR_ETHADDR:
4368 * The generic accessor doesn't deal with failure, so
4369 * we set the return value, then return an error.
4371 *((uint8_t **) result) = NULL;
4373 case PCI_IVAR_SUBVENDOR:
4374 *result = cfg->subvendor;
4376 case PCI_IVAR_SUBDEVICE:
4377 *result = cfg->subdevice;
4379 case PCI_IVAR_VENDOR:
4380 *result = cfg->vendor;
4382 case PCI_IVAR_DEVICE:
4383 *result = cfg->device;
4385 case PCI_IVAR_DEVID:
4386 *result = (cfg->device << 16) | cfg->vendor;
4388 case PCI_IVAR_CLASS:
4389 *result = cfg->baseclass;
4391 case PCI_IVAR_SUBCLASS:
4392 *result = cfg->subclass;
4394 case PCI_IVAR_PROGIF:
4395 *result = cfg->progif;
4397 case PCI_IVAR_REVID:
4398 *result = cfg->revid;
4400 case PCI_IVAR_INTPIN:
4401 *result = cfg->intpin;
4404 *result = cfg->intline;
4406 case PCI_IVAR_DOMAIN:
4407 *result = cfg->domain;
4413 *result = cfg->slot;
4415 case PCI_IVAR_FUNCTION:
4416 *result = cfg->func;
4418 case PCI_IVAR_CMDREG:
4419 *result = cfg->cmdreg;
4421 case PCI_IVAR_CACHELNSZ:
4422 *result = cfg->cachelnsz;
4424 case PCI_IVAR_MINGNT:
4425 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4429 *result = cfg->mingnt;
4431 case PCI_IVAR_MAXLAT:
4432 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4436 *result = cfg->maxlat;
4438 case PCI_IVAR_LATTIMER:
4439 *result = cfg->lattimer;
4448 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4450 struct pci_devinfo *dinfo;
4452 dinfo = device_get_ivars(child);
4455 case PCI_IVAR_INTPIN:
4456 dinfo->cfg.intpin = value;
4458 case PCI_IVAR_ETHADDR:
4459 case PCI_IVAR_SUBVENDOR:
4460 case PCI_IVAR_SUBDEVICE:
4461 case PCI_IVAR_VENDOR:
4462 case PCI_IVAR_DEVICE:
4463 case PCI_IVAR_DEVID:
4464 case PCI_IVAR_CLASS:
4465 case PCI_IVAR_SUBCLASS:
4466 case PCI_IVAR_PROGIF:
4467 case PCI_IVAR_REVID:
4469 case PCI_IVAR_DOMAIN:
4472 case PCI_IVAR_FUNCTION:
4473 return (EINVAL); /* disallow for now */
4480 #include "opt_ddb.h"
4482 #include <ddb/ddb.h>
4483 #include <sys/cons.h>
4486 * List resources based on pci map registers, used for within ddb
4489 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4491 struct pci_devinfo *dinfo;
4492 struct devlist *devlist_head;
4495 int i, error, none_count;
4498 /* get the head of the device queue */
4499 devlist_head = &pci_devq;
4502 * Go through the list of devices and print out devices
4504 for (error = 0, i = 0,
4505 dinfo = STAILQ_FIRST(devlist_head);
4506 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4507 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4509 /* Populate pd_name and pd_unit */
4512 name = device_get_name(dinfo->cfg.dev);
4515 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4516 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4517 (name && *name) ? name : "none",
4518 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4520 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4521 p->pc_sel.pc_func, (p->pc_class << 16) |
4522 (p->pc_subclass << 8) | p->pc_progif,
4523 (p->pc_subdevice << 16) | p->pc_subvendor,
4524 (p->pc_device << 16) | p->pc_vendor,
4525 p->pc_revid, p->pc_hdr);
4530 static struct resource *
4531 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4532 u_long start, u_long end, u_long count, u_int flags)
4534 struct pci_devinfo *dinfo = device_get_ivars(child);
4535 struct resource_list *rl = &dinfo->resources;
4536 struct resource *res;
4538 pci_addr_t map, testval;
4542 pm = pci_find_bar(child, *rid);
4544 /* This is a BAR that we failed to allocate earlier. */
4545 mapsize = pm->pm_size;
4549 * Weed out the bogons, and figure out how large the
4550 * BAR/map is. BARs that read back 0 here are bogus
4551 * and unimplemented. Note: atapci in legacy mode are
4552 * special and handled elsewhere in the code. If you
4553 * have a atapci device in legacy mode and it fails
4554 * here, that other code is broken.
4556 pci_read_bar(child, *rid, &map, &testval);
4559 * Determine the size of the BAR and ignore BARs with a size
4560 * of 0. Device ROM BARs use a different mask value.
4562 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4563 mapsize = pci_romsize(testval);
4565 mapsize = pci_mapsize(testval);
4568 pm = pci_add_bar(child, *rid, map, mapsize);
4571 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4572 if (type != SYS_RES_MEMORY) {
4575 "child %s requested type %d for rid %#x,"
4576 " but the BAR says it is an memio\n",
4577 device_get_nameunit(child), type, *rid);
4581 if (type != SYS_RES_IOPORT) {
4584 "child %s requested type %d for rid %#x,"
4585 " but the BAR says it is an ioport\n",
4586 device_get_nameunit(child), type, *rid);
4592 * For real BARs, we need to override the size that
4593 * the driver requests, because that's what the BAR
4594 * actually uses and we would otherwise have a
4595 * situation where we might allocate the excess to
4596 * another driver, which won't work.
4598 count = (pci_addr_t)1 << mapsize;
4599 if (RF_ALIGNMENT(flags) < mapsize)
4600 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4601 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4602 flags |= RF_PREFETCHABLE;
4605 * Allocate enough resource, and then write back the
4606 * appropriate BAR for that resource.
4608 resource_list_add(rl, type, *rid, start, end, count);
4609 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
4610 count, flags & ~RF_ACTIVE);
4612 resource_list_delete(rl, type, *rid);
4613 device_printf(child,
4614 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4615 count, *rid, type, start, end);
4619 device_printf(child,
4620 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4621 count, *rid, type, rman_get_start(res));
4622 map = rman_get_start(res);
4623 pci_write_bar(child, pm, map);
4629 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4630 u_long start, u_long end, u_long count, u_int flags)
4632 struct pci_devinfo *dinfo;
4633 struct resource_list *rl;
4634 struct resource_list_entry *rle;
4635 struct resource *res;
4638 if (device_get_parent(child) != dev)
4639 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4640 type, rid, start, end, count, flags));
4643 * Perform lazy resource allocation
4645 dinfo = device_get_ivars(child);
4646 rl = &dinfo->resources;
4649 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
4651 return (pci_alloc_secbus(dev, child, rid, start, end, count,
4656 * Can't alloc legacy interrupt once MSI messages have
4659 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4660 cfg->msix.msix_alloc > 0))
4664 * If the child device doesn't have an interrupt
4665 * routed and is deserving of an interrupt, try to
4668 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4670 pci_assign_interrupt(dev, child, 0);
4672 case SYS_RES_IOPORT:
4673 case SYS_RES_MEMORY:
4676 * PCI-PCI bridge I/O window resources are not BARs.
4677 * For those allocations just pass the request up the
4680 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4682 case PCIR_IOBASEL_1:
4683 case PCIR_MEMBASE_1:
4684 case PCIR_PMBASEL_1:
4686 * XXX: Should we bother creating a resource
4689 return (bus_generic_alloc_resource(dev, child,
4690 type, rid, start, end, count, flags));
4694 /* Reserve resources for this BAR if needed. */
4695 rle = resource_list_find(rl, type, *rid);
4697 res = pci_reserve_map(dev, child, type, rid, start, end,
4703 return (resource_list_alloc(rl, dev, child, type, rid,
4704 start, end, count, flags));
4708 pci_release_resource(device_t dev, device_t child, int type, int rid,
4711 struct pci_devinfo *dinfo;
4712 struct resource_list *rl;
4715 if (device_get_parent(child) != dev)
4716 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4719 dinfo = device_get_ivars(child);
4723 * PCI-PCI bridge I/O window resources are not BARs. For
4724 * those allocations just pass the request up the tree.
4726 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4727 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4729 case PCIR_IOBASEL_1:
4730 case PCIR_MEMBASE_1:
4731 case PCIR_PMBASEL_1:
4732 return (bus_generic_release_resource(dev, child, type,
4738 rl = &dinfo->resources;
4739 return (resource_list_release(rl, dev, child, type, rid, r));
4743 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4746 struct pci_devinfo *dinfo;
4749 error = bus_generic_activate_resource(dev, child, type, rid, r);
4753 /* Enable decoding in the command register when activating BARs. */
4754 if (device_get_parent(child) == dev) {
4755 /* Device ROMs need their decoding explicitly enabled. */
4756 dinfo = device_get_ivars(child);
4757 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4758 pci_write_bar(child, pci_find_bar(child, rid),
4759 rman_get_start(r) | PCIM_BIOS_ENABLE);
4761 case SYS_RES_IOPORT:
4762 case SYS_RES_MEMORY:
4763 error = PCI_ENABLE_IO(dev, child, type);
4771 pci_deactivate_resource(device_t dev, device_t child, int type,
4772 int rid, struct resource *r)
4774 struct pci_devinfo *dinfo;
4777 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4781 /* Disable decoding for device ROMs. */
4782 if (device_get_parent(child) == dev) {
4783 dinfo = device_get_ivars(child);
4784 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4785 pci_write_bar(child, pci_find_bar(child, rid),
4792 pci_delete_child(device_t dev, device_t child)
4794 struct resource_list_entry *rle;
4795 struct resource_list *rl;
4796 struct pci_devinfo *dinfo;
4798 dinfo = device_get_ivars(child);
4799 rl = &dinfo->resources;
4801 if (device_is_attached(child))
4802 device_detach(child);
4804 /* Turn off access to resources we're about to free */
4805 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4806 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4808 /* Free all allocated resources */
4809 STAILQ_FOREACH(rle, rl, link) {
4811 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4812 resource_list_busy(rl, rle->type, rle->rid)) {
4813 pci_printf(&dinfo->cfg,
4814 "Resource still owned, oops. "
4815 "(type=%d, rid=%d, addr=%lx)\n",
4816 rle->type, rle->rid,
4817 rman_get_start(rle->res));
4818 bus_release_resource(child, rle->type, rle->rid,
4821 resource_list_unreserve(rl, dev, child, rle->type,
4825 resource_list_free(rl);
4827 device_delete_child(dev, child);
4832 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4834 struct pci_devinfo *dinfo;
4835 struct resource_list *rl;
4836 struct resource_list_entry *rle;
4838 if (device_get_parent(child) != dev)
4841 dinfo = device_get_ivars(child);
4842 rl = &dinfo->resources;
4843 rle = resource_list_find(rl, type, rid);
4848 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4849 resource_list_busy(rl, type, rid)) {
4850 device_printf(dev, "delete_resource: "
4851 "Resource still owned by child, oops. "
4852 "(type=%d, rid=%d, addr=%lx)\n",
4853 type, rid, rman_get_start(rle->res));
4856 resource_list_unreserve(rl, dev, child, type, rid);
4858 resource_list_delete(rl, type, rid);
4861 struct resource_list *
4862 pci_get_resource_list (device_t dev, device_t child)
4864 struct pci_devinfo *dinfo = device_get_ivars(child);
4866 return (&dinfo->resources);
4870 pci_get_dma_tag(device_t bus, device_t dev)
4872 struct pci_softc *sc = device_get_softc(bus);
4874 return (sc->sc_dma_tag);
4878 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4880 struct pci_devinfo *dinfo = device_get_ivars(child);
4881 pcicfgregs *cfg = &dinfo->cfg;
4883 return (PCIB_READ_CONFIG(device_get_parent(dev),
4884 cfg->bus, cfg->slot, cfg->func, reg, width));
4888 pci_write_config_method(device_t dev, device_t child, int reg,
4889 uint32_t val, int width)
4891 struct pci_devinfo *dinfo = device_get_ivars(child);
4892 pcicfgregs *cfg = &dinfo->cfg;
4894 PCIB_WRITE_CONFIG(device_get_parent(dev),
4895 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4899 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4903 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4904 pci_get_function(child));
4909 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4912 struct pci_devinfo *dinfo;
4915 dinfo = device_get_ivars(child);
4917 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4918 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4919 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4925 pci_assign_interrupt_method(device_t dev, device_t child)
4927 struct pci_devinfo *dinfo = device_get_ivars(child);
4928 pcicfgregs *cfg = &dinfo->cfg;
4930 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4935 pci_modevent(module_t mod, int what, void *arg)
4937 static struct cdev *pci_cdev;
4941 STAILQ_INIT(&pci_devq);
4943 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4945 pci_load_vendor_data();
4949 destroy_dev(pci_cdev);
4957 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
4959 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
4960 struct pcicfg_pcie *cfg;
4963 cfg = &dinfo->cfg.pcie;
4964 pos = cfg->pcie_location;
4966 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4968 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
4970 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4971 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4972 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4973 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
4975 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4976 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4977 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4978 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
4980 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4981 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4982 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
4985 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
4986 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
4987 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
4993 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
4995 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
4996 dinfo->cfg.pcix.pcix_command, 2);
5000 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
5004 * Only do header type 0 devices. Type 1 devices are bridges,
5005 * which we know need special treatment. Type 2 devices are
5006 * cardbus bridges which also require special treatment.
5007 * Other types are unknown, and we err on the side of safety
5010 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5014 * Restore the device to full power mode. We must do this
5015 * before we restore the registers because moving from D3 to
5016 * D0 will cause the chip's BARs and some other registers to
5017 * be reset to some unknown power on reset values. Cut down
5018 * the noise on boot by doing nothing if we are already in
5021 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
5022 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5023 pci_restore_bars(dev);
5024 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
5025 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
5026 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
5027 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
5028 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
5029 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
5030 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
5031 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
5032 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
5035 * Restore extended capabilities for PCI-Express and PCI-X
5037 if (dinfo->cfg.pcie.pcie_location != 0)
5038 pci_cfg_restore_pcie(dev, dinfo);
5039 if (dinfo->cfg.pcix.pcix_location != 0)
5040 pci_cfg_restore_pcix(dev, dinfo);
5042 /* Restore MSI and MSI-X configurations if they are present. */
5043 if (dinfo->cfg.msi.msi_location != 0)
5044 pci_resume_msi(dev);
5045 if (dinfo->cfg.msix.msix_location != 0)
5046 pci_resume_msix(dev);
5050 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
5052 #define RREG(n) pci_read_config(dev, pos + (n), 2)
5053 struct pcicfg_pcie *cfg;
5056 cfg = &dinfo->cfg.pcie;
5057 pos = cfg->pcie_location;
5059 cfg->pcie_flags = RREG(PCIER_FLAGS);
5061 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5063 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
5065 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5066 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5067 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5068 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
5070 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5071 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5072 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5073 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
5075 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5076 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5077 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
5080 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
5081 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
5082 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
5088 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5090 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5091 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5095 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5101 * Only do header type 0 devices. Type 1 devices are bridges, which
5102 * we know need special treatment. Type 2 devices are cardbus bridges
5103 * which also require special treatment. Other types are unknown, and
5104 * we err on the side of safety by ignoring them. Powering down
5105 * bridges should not be undertaken lightly.
5107 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5111 * Some drivers apparently write to these registers w/o updating our
5112 * cached copy. No harm happens if we update the copy, so do so here
5113 * so we can restore them. The COMMAND register is modified by the
5114 * bus w/o updating the cache. This should represent the normally
5115 * writable portion of the 'defined' part of type 0 headers. In
5116 * theory we also need to save/restore the PCI capability structures
5117 * we know about, but apart from power we don't know any that are
5120 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5121 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5122 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5123 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5124 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5125 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5126 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5127 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5128 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5129 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5130 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5131 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5132 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5133 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5134 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5136 if (dinfo->cfg.pcie.pcie_location != 0)
5137 pci_cfg_save_pcie(dev, dinfo);
5139 if (dinfo->cfg.pcix.pcix_location != 0)
5140 pci_cfg_save_pcix(dev, dinfo);
5143 * don't set the state for display devices, base peripherals and
5144 * memory devices since bad things happen when they are powered down.
5145 * We should (a) have drivers that can easily detach and (b) use
5146 * generic drivers for these devices so that some device actually
5147 * attaches. We need to make sure that when we implement (a) we don't
5148 * power the device down on a reattach.
5150 cls = pci_get_class(dev);
5153 switch (pci_do_power_nodriver)
5155 case 0: /* NO powerdown at all */
5157 case 1: /* Conservative about what to power down */
5158 if (cls == PCIC_STORAGE)
5161 case 2: /* Agressive about what to power down */
5162 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5163 cls == PCIC_BASEPERIPH)
5166 case 3: /* Power down everything */
5170 * PCI spec says we can only go into D3 state from D0 state.
5171 * Transition from D[12] into D0 before going to D3 state.
5173 ps = pci_get_powerstate(dev);
5174 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5175 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5176 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5177 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5180 /* Wrapper APIs suitable for device driver use. */
5182 pci_save_state(device_t dev)
5184 struct pci_devinfo *dinfo;
5186 dinfo = device_get_ivars(dev);
5187 pci_cfg_save(dev, dinfo, 0);
5191 pci_restore_state(device_t dev)
5193 struct pci_devinfo *dinfo;
5195 dinfo = device_get_ivars(dev);
5196 pci_cfg_restore(dev, dinfo);
5200 pci_get_rid_method(device_t dev, device_t child)
5203 return (PCIB_GET_RID(device_get_parent(dev), child));
5206 /* Find the upstream port of a given PCI device in a root complex. */
5208 pci_find_pcie_root_port(device_t dev)
5210 struct pci_devinfo *dinfo;
5211 devclass_t pci_class;
5214 pci_class = devclass_find("pci");
5215 KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class,
5216 ("%s: non-pci device %s", __func__, device_get_nameunit(dev)));
5219 * Walk the bridge hierarchy until we find a PCI-e root
5220 * port or a non-PCI device.
5223 bus = device_get_parent(dev);
5224 KASSERT(bus != NULL, ("%s: null parent of %s", __func__,
5225 device_get_nameunit(dev)));
5227 pcib = device_get_parent(bus);
5228 KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__,
5229 device_get_nameunit(bus)));
5232 * pcib's parent must be a PCI bus for this to be a
5235 if (device_get_devclass(device_get_parent(pcib)) != pci_class)
5238 dinfo = device_get_ivars(pcib);
5239 if (dinfo->cfg.pcie.pcie_location != 0 &&
5240 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)