2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/xhcireg.h>
66 #include <dev/usb/controller/ehcireg.h>
67 #include <dev/usb/controller/ohcireg.h>
68 #include <dev/usb/controller/uhcireg.h>
73 #define PCIR_IS_BIOS(cfg, reg) \
74 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
75 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
77 static int pci_has_quirk(uint32_t devid, int quirk);
78 static pci_addr_t pci_mapbase(uint64_t mapreg);
79 static const char *pci_maptype(uint64_t mapreg);
80 static int pci_mapsize(uint64_t testval);
81 static int pci_maprange(uint64_t mapreg);
82 static pci_addr_t pci_rombase(uint64_t mapreg);
83 static int pci_romsize(uint64_t testval);
84 static void pci_fixancient(pcicfgregs *cfg);
85 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
87 static int pci_porten(device_t dev);
88 static int pci_memen(device_t dev);
89 static void pci_assign_interrupt(device_t bus, device_t dev,
91 static int pci_add_map(device_t bus, device_t dev, int reg,
92 struct resource_list *rl, int force, int prefetch);
93 static int pci_probe(device_t dev);
94 static int pci_attach(device_t dev);
96 static int pci_detach(device_t dev);
98 static void pci_load_vendor_data(void);
99 static int pci_describe_parse_line(char **ptr, int *vendor,
100 int *device, char **desc);
101 static char *pci_describe_device(device_t dev);
102 static int pci_modevent(module_t mod, int what, void *arg);
103 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
105 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
106 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
107 int reg, uint32_t *data);
109 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
110 int reg, uint32_t data);
112 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
113 static void pci_mask_msix(device_t dev, u_int index);
114 static void pci_unmask_msix(device_t dev, u_int index);
115 static int pci_msi_blacklisted(void);
116 static int pci_msix_blacklisted(void);
117 static void pci_resume_msi(device_t dev);
118 static void pci_resume_msix(device_t dev);
119 static int pci_remap_intr_method(device_t bus, device_t dev,
122 static uint16_t pci_get_rid_method(device_t dev, device_t child);
124 static device_method_t pci_methods[] = {
125 /* Device interface */
126 DEVMETHOD(device_probe, pci_probe),
127 DEVMETHOD(device_attach, pci_attach),
129 DEVMETHOD(device_detach, pci_detach),
131 DEVMETHOD(device_detach, bus_generic_detach),
133 DEVMETHOD(device_shutdown, bus_generic_shutdown),
134 DEVMETHOD(device_suspend, pci_suspend),
135 DEVMETHOD(device_resume, pci_resume),
138 DEVMETHOD(bus_print_child, pci_print_child),
139 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
140 DEVMETHOD(bus_read_ivar, pci_read_ivar),
141 DEVMETHOD(bus_write_ivar, pci_write_ivar),
142 DEVMETHOD(bus_driver_added, pci_driver_added),
143 DEVMETHOD(bus_setup_intr, pci_setup_intr),
144 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
146 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
147 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
148 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
149 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
150 DEVMETHOD(bus_delete_resource, pci_delete_resource),
151 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
152 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
153 DEVMETHOD(bus_release_resource, pci_release_resource),
154 DEVMETHOD(bus_activate_resource, pci_activate_resource),
155 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
156 DEVMETHOD(bus_child_detached, pci_child_detached),
157 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
158 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
159 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
162 DEVMETHOD(pci_read_config, pci_read_config_method),
163 DEVMETHOD(pci_write_config, pci_write_config_method),
164 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
165 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
166 DEVMETHOD(pci_enable_io, pci_enable_io_method),
167 DEVMETHOD(pci_disable_io, pci_disable_io_method),
168 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
169 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
170 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
171 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
172 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
173 DEVMETHOD(pci_find_cap, pci_find_cap_method),
174 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
175 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
176 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
177 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
178 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
179 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
180 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
181 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
182 DEVMETHOD(pci_release_msi, pci_release_msi_method),
183 DEVMETHOD(pci_msi_count, pci_msi_count_method),
184 DEVMETHOD(pci_msix_count, pci_msix_count_method),
185 DEVMETHOD(pci_get_rid, pci_get_rid_method),
186 DEVMETHOD(pci_child_added, pci_child_added_method),
191 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
193 static devclass_t pci_devclass;
194 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
195 MODULE_VERSION(pci, 1);
197 static char *pci_vendordata;
198 static size_t pci_vendordata_size;
201 uint32_t devid; /* Vendor/device of the card */
203 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
204 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
205 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
206 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
207 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
208 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
213 static const struct pci_quirk pci_quirks[] = {
214 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
215 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
216 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
217 /* As does the Serverworks OSB4 (the SMBus mapping register) */
218 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
221 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
222 * or the CMIC-SL (AKA ServerWorks GC_LE).
224 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
225 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
228 * MSI doesn't work on earlier Intel chipsets including
229 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
231 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
233 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
234 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
243 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
246 * MSI-X allocation doesn't work properly for devices passed through
247 * by VMware up to at least ESXi 5.1.
249 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
250 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
253 * Some virtualization environments emulate an older chipset
254 * but support MSI just fine. QEMU uses the Intel 82440.
256 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
259 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
260 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
261 * It prevents us from attaching hpet(4) when the bit is unset.
262 * Note this quirk only affects SB600 revision A13 and earlier.
263 * For SB600 A21 and later, firmware must set the bit to hide it.
264 * For SB700 and later, it is unused and hardcoded to zero.
266 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
269 * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
270 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
271 * command register is set.
273 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
274 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
275 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
278 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
279 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
281 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
282 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
283 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
284 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
285 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
286 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
291 /* map register information */
292 #define PCI_MAPMEM 0x01 /* memory map */
293 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
294 #define PCI_MAPPORT 0x04 /* port map */
296 struct devlist pci_devq;
297 uint32_t pci_generation;
298 uint32_t pci_numdevs = 0;
299 static int pcie_chipset, pcix_chipset;
302 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
304 static int pci_enable_io_modes = 1;
305 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
306 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
307 &pci_enable_io_modes, 1,
308 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
309 enable these bits correctly. We'd like to do this all the time, but there\n\
310 are some peripherals that this causes problems with.");
312 static int pci_do_realloc_bars = 0;
313 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
314 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
315 &pci_do_realloc_bars, 0,
316 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
318 static int pci_do_power_nodriver = 0;
319 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
320 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
321 &pci_do_power_nodriver, 0,
322 "Place a function into D3 state when no driver attaches to it. 0 means\n\
323 disable. 1 means conservatively place devices into D3 state. 2 means\n\
324 agressively place devices into D3 state. 3 means put absolutely everything\n\
327 int pci_do_power_resume = 1;
328 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
329 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
330 &pci_do_power_resume, 1,
331 "Transition from D3 -> D0 on resume.");
333 int pci_do_power_suspend = 1;
334 TUNABLE_INT("hw.pci.do_power_suspend", &pci_do_power_suspend);
335 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RW,
336 &pci_do_power_suspend, 1,
337 "Transition from D0 -> D3 on suspend.");
339 static int pci_do_msi = 1;
340 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
341 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
342 "Enable support for MSI interrupts");
344 static int pci_do_msix = 1;
345 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
346 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
347 "Enable support for MSI-X interrupts");
349 static int pci_honor_msi_blacklist = 1;
350 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
351 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
352 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
354 #if defined(__i386__) || defined(__amd64__)
355 static int pci_usb_takeover = 1;
357 static int pci_usb_takeover = 0;
359 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
360 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
361 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
362 Disable this if you depend on BIOS emulation of USB devices, that is\n\
363 you use USB devices (like keyboard or mouse) but do not load USB drivers");
365 static int pci_clear_bars;
366 TUNABLE_INT("hw.pci.clear_bars", &pci_clear_bars);
367 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
368 "Ignore firmware-assigned resources for BARs.");
370 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
371 static int pci_clear_buses;
372 TUNABLE_INT("hw.pci.clear_buses", &pci_clear_buses);
373 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
374 "Ignore firmware-assigned bus numbers.");
377 static int pci_enable_ari = 1;
378 TUNABLE_INT("hw.pci.enable_ari", &pci_enable_ari);
379 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
380 0, "Enable support for PCIe Alternative RID Interpretation");
383 pci_has_quirk(uint32_t devid, int quirk)
385 const struct pci_quirk *q;
387 for (q = &pci_quirks[0]; q->devid; q++) {
388 if (q->devid == devid && q->type == quirk)
394 /* Find a device_t by bus/slot/function in domain 0 */
397 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
400 return (pci_find_dbsf(0, bus, slot, func));
403 /* Find a device_t by domain/bus/slot/function */
406 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
408 struct pci_devinfo *dinfo;
410 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
411 if ((dinfo->cfg.domain == domain) &&
412 (dinfo->cfg.bus == bus) &&
413 (dinfo->cfg.slot == slot) &&
414 (dinfo->cfg.func == func)) {
415 return (dinfo->cfg.dev);
422 /* Find a device_t by vendor/device ID */
425 pci_find_device(uint16_t vendor, uint16_t device)
427 struct pci_devinfo *dinfo;
429 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
430 if ((dinfo->cfg.vendor == vendor) &&
431 (dinfo->cfg.device == device)) {
432 return (dinfo->cfg.dev);
440 pci_find_class(uint8_t class, uint8_t subclass)
442 struct pci_devinfo *dinfo;
444 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
445 if (dinfo->cfg.baseclass == class &&
446 dinfo->cfg.subclass == subclass) {
447 return (dinfo->cfg.dev);
455 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
460 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
463 retval += vprintf(fmt, ap);
468 /* return base address of memory or port map */
471 pci_mapbase(uint64_t mapreg)
474 if (PCI_BAR_MEM(mapreg))
475 return (mapreg & PCIM_BAR_MEM_BASE);
477 return (mapreg & PCIM_BAR_IO_BASE);
480 /* return map type of memory or port map */
483 pci_maptype(uint64_t mapreg)
486 if (PCI_BAR_IO(mapreg))
488 if (mapreg & PCIM_BAR_MEM_PREFETCH)
489 return ("Prefetchable Memory");
493 /* return log2 of map size decoded for memory or port map */
496 pci_mapsize(uint64_t testval)
500 testval = pci_mapbase(testval);
503 while ((testval & 1) == 0)
512 /* return base address of device ROM */
515 pci_rombase(uint64_t mapreg)
518 return (mapreg & PCIM_BIOS_ADDR_MASK);
521 /* return log2 of map size decided for device ROM */
524 pci_romsize(uint64_t testval)
528 testval = pci_rombase(testval);
531 while ((testval & 1) == 0)
540 /* return log2 of address range supported by map register */
543 pci_maprange(uint64_t mapreg)
547 if (PCI_BAR_IO(mapreg))
550 switch (mapreg & PCIM_BAR_MEM_TYPE) {
551 case PCIM_BAR_MEM_32:
554 case PCIM_BAR_MEM_1MB:
557 case PCIM_BAR_MEM_64:
564 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
567 pci_fixancient(pcicfgregs *cfg)
569 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
572 /* PCI to PCI bridges use header type 1 */
573 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
574 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
577 /* extract header type specific config data */
580 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
582 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
583 switch (cfg->hdrtype & PCIM_HDRTYPE) {
584 case PCIM_HDRTYPE_NORMAL:
585 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
586 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
587 cfg->mingnt = REG(PCIR_MINGNT, 1);
588 cfg->maxlat = REG(PCIR_MAXLAT, 1);
589 cfg->nummaps = PCI_MAXMAPS_0;
591 case PCIM_HDRTYPE_BRIDGE:
592 cfg->nummaps = PCI_MAXMAPS_1;
594 case PCIM_HDRTYPE_CARDBUS:
595 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
596 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
597 cfg->nummaps = PCI_MAXMAPS_2;
603 /* read configuration header into pcicfgregs structure */
605 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
607 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
608 pcicfgregs *cfg = NULL;
609 struct pci_devinfo *devlist_entry;
610 struct devlist *devlist_head;
612 devlist_head = &pci_devq;
614 devlist_entry = NULL;
616 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
617 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
618 if (devlist_entry == NULL)
621 cfg = &devlist_entry->cfg;
627 cfg->vendor = REG(PCIR_VENDOR, 2);
628 cfg->device = REG(PCIR_DEVICE, 2);
629 cfg->cmdreg = REG(PCIR_COMMAND, 2);
630 cfg->statreg = REG(PCIR_STATUS, 2);
631 cfg->baseclass = REG(PCIR_CLASS, 1);
632 cfg->subclass = REG(PCIR_SUBCLASS, 1);
633 cfg->progif = REG(PCIR_PROGIF, 1);
634 cfg->revid = REG(PCIR_REVID, 1);
635 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
636 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
637 cfg->lattimer = REG(PCIR_LATTIMER, 1);
638 cfg->intpin = REG(PCIR_INTPIN, 1);
639 cfg->intline = REG(PCIR_INTLINE, 1);
641 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
642 cfg->hdrtype &= ~PCIM_MFDEV;
643 STAILQ_INIT(&cfg->maps);
646 pci_hdrtypedata(pcib, b, s, f, cfg);
648 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
649 pci_read_cap(pcib, cfg);
651 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
653 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
654 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
655 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
656 devlist_entry->conf.pc_sel.pc_func = cfg->func;
657 devlist_entry->conf.pc_hdr = cfg->hdrtype;
659 devlist_entry->conf.pc_subvendor = cfg->subvendor;
660 devlist_entry->conf.pc_subdevice = cfg->subdevice;
661 devlist_entry->conf.pc_vendor = cfg->vendor;
662 devlist_entry->conf.pc_device = cfg->device;
664 devlist_entry->conf.pc_class = cfg->baseclass;
665 devlist_entry->conf.pc_subclass = cfg->subclass;
666 devlist_entry->conf.pc_progif = cfg->progif;
667 devlist_entry->conf.pc_revid = cfg->revid;
672 return (devlist_entry);
677 pci_read_cap(device_t pcib, pcicfgregs *cfg)
679 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
680 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
681 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
685 int ptr, nextptr, ptrptr;
687 switch (cfg->hdrtype & PCIM_HDRTYPE) {
688 case PCIM_HDRTYPE_NORMAL:
689 case PCIM_HDRTYPE_BRIDGE:
690 ptrptr = PCIR_CAP_PTR;
692 case PCIM_HDRTYPE_CARDBUS:
693 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
696 return; /* no extended capabilities support */
698 nextptr = REG(ptrptr, 1); /* sanity check? */
701 * Read capability entries.
703 while (nextptr != 0) {
706 printf("illegal PCI extended capability offset %d\n",
710 /* Find the next entry */
712 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
714 /* Process this entry */
715 switch (REG(ptr + PCICAP_ID, 1)) {
716 case PCIY_PMG: /* PCI power management */
717 if (cfg->pp.pp_cap == 0) {
718 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
719 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
720 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
721 if ((nextptr - ptr) > PCIR_POWER_DATA)
722 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
725 case PCIY_HT: /* HyperTransport */
726 /* Determine HT-specific capability type. */
727 val = REG(ptr + PCIR_HT_COMMAND, 2);
729 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
730 cfg->ht.ht_slave = ptr;
732 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
733 switch (val & PCIM_HTCMD_CAP_MASK) {
734 case PCIM_HTCAP_MSI_MAPPING:
735 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
736 /* Sanity check the mapping window. */
737 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
740 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
742 if (addr != MSI_INTEL_ADDR_BASE)
744 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
745 cfg->domain, cfg->bus,
746 cfg->slot, cfg->func,
749 addr = MSI_INTEL_ADDR_BASE;
751 cfg->ht.ht_msimap = ptr;
752 cfg->ht.ht_msictrl = val;
753 cfg->ht.ht_msiaddr = addr;
758 case PCIY_MSI: /* PCI MSI */
759 cfg->msi.msi_location = ptr;
760 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
761 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
762 PCIM_MSICTRL_MMC_MASK)>>1);
764 case PCIY_MSIX: /* PCI MSI-X */
765 cfg->msix.msix_location = ptr;
766 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
767 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
768 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
769 val = REG(ptr + PCIR_MSIX_TABLE, 4);
770 cfg->msix.msix_table_bar = PCIR_BAR(val &
772 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
773 val = REG(ptr + PCIR_MSIX_PBA, 4);
774 cfg->msix.msix_pba_bar = PCIR_BAR(val &
776 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
778 case PCIY_VPD: /* PCI Vital Product Data */
779 cfg->vpd.vpd_reg = ptr;
782 /* Should always be true. */
783 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
784 PCIM_HDRTYPE_BRIDGE) {
785 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
786 cfg->subvendor = val & 0xffff;
787 cfg->subdevice = val >> 16;
790 case PCIY_PCIX: /* PCI-X */
792 * Assume we have a PCI-X chipset if we have
793 * at least one PCI-PCI bridge with a PCI-X
794 * capability. Note that some systems with
795 * PCI-express or HT chipsets might match on
796 * this check as well.
798 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
801 cfg->pcix.pcix_location = ptr;
803 case PCIY_EXPRESS: /* PCI-express */
805 * Assume we have a PCI-express chipset if we have
806 * at least one PCI-express device.
809 cfg->pcie.pcie_location = ptr;
810 val = REG(ptr + PCIER_FLAGS, 2);
811 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
818 #if defined(__powerpc__)
820 * Enable the MSI mapping window for all HyperTransport
821 * slaves. PCI-PCI bridges have their windows enabled via
824 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
825 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
827 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
828 cfg->domain, cfg->bus, cfg->slot, cfg->func);
829 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
830 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
834 /* REG and WREG use carry through to next functions */
838 * PCI Vital Product Data
841 #define PCI_VPD_TIMEOUT 1000000
844 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
846 int count = PCI_VPD_TIMEOUT;
848 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
850 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
852 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
855 DELAY(1); /* limit looping */
857 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
864 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
866 int count = PCI_VPD_TIMEOUT;
868 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
870 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
871 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
872 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
875 DELAY(1); /* limit looping */
882 #undef PCI_VPD_TIMEOUT
884 struct vpd_readstate {
894 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
899 if (vrs->bytesinval == 0) {
900 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
902 vrs->val = le32toh(reg);
904 byte = vrs->val & 0xff;
907 vrs->val = vrs->val >> 8;
908 byte = vrs->val & 0xff;
918 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
920 struct vpd_readstate vrs;
925 int alloc, off; /* alloc/off for RO/W arrays */
931 /* init vpd reader */
939 name = remain = i = 0; /* shut up stupid gcc */
940 alloc = off = 0; /* shut up stupid gcc */
941 dflen = 0; /* shut up stupid gcc */
944 if (vpd_nextbyte(&vrs, &byte)) {
949 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
950 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
951 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
954 case 0: /* item name */
956 if (vpd_nextbyte(&vrs, &byte2)) {
961 if (vpd_nextbyte(&vrs, &byte2)) {
965 remain |= byte2 << 8;
966 if (remain > (0x7f*4 - vrs.off)) {
969 "invalid VPD data, remain %#x\n",
975 name = (byte >> 3) & 0xf;
978 case 0x2: /* String */
979 cfg->vpd.vpd_ident = malloc(remain + 1,
987 case 0x10: /* VPD-R */
990 cfg->vpd.vpd_ros = malloc(alloc *
991 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
995 case 0x11: /* VPD-W */
998 cfg->vpd.vpd_w = malloc(alloc *
999 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1003 default: /* Invalid data, abort */
1009 case 1: /* Identifier String */
1010 cfg->vpd.vpd_ident[i++] = byte;
1013 cfg->vpd.vpd_ident[i] = '\0';
1018 case 2: /* VPD-R Keyword Header */
1020 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1021 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1022 M_DEVBUF, M_WAITOK | M_ZERO);
1024 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1025 if (vpd_nextbyte(&vrs, &byte2)) {
1029 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1030 if (vpd_nextbyte(&vrs, &byte2)) {
1034 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1036 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1039 * if this happens, we can't trust the rest
1042 pci_printf(cfg, "bad keyword length: %d\n",
1047 } else if (dflen == 0) {
1048 cfg->vpd.vpd_ros[off].value = malloc(1 *
1049 sizeof(*cfg->vpd.vpd_ros[off].value),
1050 M_DEVBUF, M_WAITOK);
1051 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1053 cfg->vpd.vpd_ros[off].value = malloc(
1055 sizeof(*cfg->vpd.vpd_ros[off].value),
1056 M_DEVBUF, M_WAITOK);
1059 /* keep in sync w/ state 3's transistions */
1060 if (dflen == 0 && remain == 0)
1062 else if (dflen == 0)
1068 case 3: /* VPD-R Keyword Value */
1069 cfg->vpd.vpd_ros[off].value[i++] = byte;
1070 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1071 "RV", 2) == 0 && cksumvalid == -1) {
1077 "bad VPD cksum, remain %hhu\n",
1086 /* keep in sync w/ state 2's transistions */
1088 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1089 if (dflen == 0 && remain == 0) {
1090 cfg->vpd.vpd_rocnt = off;
1091 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1092 off * sizeof(*cfg->vpd.vpd_ros),
1093 M_DEVBUF, M_WAITOK | M_ZERO);
1095 } else if (dflen == 0)
1105 case 5: /* VPD-W Keyword Header */
1107 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1108 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1109 M_DEVBUF, M_WAITOK | M_ZERO);
1111 cfg->vpd.vpd_w[off].keyword[0] = byte;
1112 if (vpd_nextbyte(&vrs, &byte2)) {
1116 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1117 if (vpd_nextbyte(&vrs, &byte2)) {
1121 cfg->vpd.vpd_w[off].len = dflen = byte2;
1122 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1123 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1124 sizeof(*cfg->vpd.vpd_w[off].value),
1125 M_DEVBUF, M_WAITOK);
1128 /* keep in sync w/ state 6's transistions */
1129 if (dflen == 0 && remain == 0)
1131 else if (dflen == 0)
1137 case 6: /* VPD-W Keyword Value */
1138 cfg->vpd.vpd_w[off].value[i++] = byte;
1141 /* keep in sync w/ state 5's transistions */
1143 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1144 if (dflen == 0 && remain == 0) {
1145 cfg->vpd.vpd_wcnt = off;
1146 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1147 off * sizeof(*cfg->vpd.vpd_w),
1148 M_DEVBUF, M_WAITOK | M_ZERO);
1150 } else if (dflen == 0)
1155 pci_printf(cfg, "invalid state: %d\n", state);
1161 if (cksumvalid == 0 || state < -1) {
1162 /* read-only data bad, clean up */
1163 if (cfg->vpd.vpd_ros != NULL) {
1164 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1165 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1166 free(cfg->vpd.vpd_ros, M_DEVBUF);
1167 cfg->vpd.vpd_ros = NULL;
1171 /* I/O error, clean up */
1172 pci_printf(cfg, "failed to read VPD data.\n");
1173 if (cfg->vpd.vpd_ident != NULL) {
1174 free(cfg->vpd.vpd_ident, M_DEVBUF);
1175 cfg->vpd.vpd_ident = NULL;
1177 if (cfg->vpd.vpd_w != NULL) {
1178 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1179 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1180 free(cfg->vpd.vpd_w, M_DEVBUF);
1181 cfg->vpd.vpd_w = NULL;
1184 cfg->vpd.vpd_cached = 1;
1190 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1192 struct pci_devinfo *dinfo = device_get_ivars(child);
1193 pcicfgregs *cfg = &dinfo->cfg;
1195 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1196 pci_read_vpd(device_get_parent(dev), cfg);
1198 *identptr = cfg->vpd.vpd_ident;
1200 if (*identptr == NULL)
1207 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1210 struct pci_devinfo *dinfo = device_get_ivars(child);
1211 pcicfgregs *cfg = &dinfo->cfg;
1214 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1215 pci_read_vpd(device_get_parent(dev), cfg);
1217 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1218 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1219 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1220 *vptr = cfg->vpd.vpd_ros[i].value;
1229 pci_fetch_vpd_list(device_t dev)
1231 struct pci_devinfo *dinfo = device_get_ivars(dev);
1232 pcicfgregs *cfg = &dinfo->cfg;
1234 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1235 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1240 * Find the requested HyperTransport capability and return the offset
1241 * in configuration space via the pointer provided. The function
1242 * returns 0 on success and an error code otherwise.
1245 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1250 error = pci_find_cap(child, PCIY_HT, &ptr);
1255 * Traverse the capabilities list checking each HT capability
1256 * to see if it matches the requested HT capability.
1259 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1260 if (capability == PCIM_HTCAP_SLAVE ||
1261 capability == PCIM_HTCAP_HOST)
1264 val &= PCIM_HTCMD_CAP_MASK;
1265 if (val == capability) {
1271 /* Skip to the next HT capability. */
1273 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1274 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1283 * Find the requested capability and return the offset in
1284 * configuration space via the pointer provided. The function returns
1285 * 0 on success and an error code otherwise.
1288 pci_find_cap_method(device_t dev, device_t child, int capability,
1291 struct pci_devinfo *dinfo = device_get_ivars(child);
1292 pcicfgregs *cfg = &dinfo->cfg;
1297 * Check the CAP_LIST bit of the PCI status register first.
1299 status = pci_read_config(child, PCIR_STATUS, 2);
1300 if (!(status & PCIM_STATUS_CAPPRESENT))
1304 * Determine the start pointer of the capabilities list.
1306 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1307 case PCIM_HDRTYPE_NORMAL:
1308 case PCIM_HDRTYPE_BRIDGE:
1311 case PCIM_HDRTYPE_CARDBUS:
1312 ptr = PCIR_CAP_PTR_2;
1316 return (ENXIO); /* no extended capabilities support */
1318 ptr = pci_read_config(child, ptr, 1);
1321 * Traverse the capabilities list.
1324 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1329 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1336 * Find the requested extended capability and return the offset in
1337 * configuration space via the pointer provided. The function returns
1338 * 0 on success and an error code otherwise.
1341 pci_find_extcap_method(device_t dev, device_t child, int capability,
1344 struct pci_devinfo *dinfo = device_get_ivars(child);
1345 pcicfgregs *cfg = &dinfo->cfg;
1349 /* Only supported for PCI-express devices. */
1350 if (cfg->pcie.pcie_location == 0)
1354 ecap = pci_read_config(child, ptr, 4);
1355 if (ecap == 0xffffffff || ecap == 0)
1358 if (PCI_EXTCAP_ID(ecap) == capability) {
1363 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1366 ecap = pci_read_config(child, ptr, 4);
1373 * Support for MSI-X message interrupts.
1376 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1377 uint64_t address, uint32_t data)
1379 struct pci_devinfo *dinfo = device_get_ivars(child);
1380 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1383 KASSERT(msix->msix_table_len > index, ("bogus index"));
1384 offset = msix->msix_table_offset + index * 16;
1385 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1386 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1387 bus_write_4(msix->msix_table_res, offset + 8, data);
1389 /* Enable MSI -> HT mapping. */
1390 pci_ht_map_msi(child, address);
1394 pci_mask_msix(device_t dev, u_int index)
1396 struct pci_devinfo *dinfo = device_get_ivars(dev);
1397 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1398 uint32_t offset, val;
1400 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1401 offset = msix->msix_table_offset + index * 16 + 12;
1402 val = bus_read_4(msix->msix_table_res, offset);
1403 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1404 val |= PCIM_MSIX_VCTRL_MASK;
1405 bus_write_4(msix->msix_table_res, offset, val);
1410 pci_unmask_msix(device_t dev, u_int index)
1412 struct pci_devinfo *dinfo = device_get_ivars(dev);
1413 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1414 uint32_t offset, val;
1416 KASSERT(msix->msix_table_len > index, ("bogus index"));
1417 offset = msix->msix_table_offset + index * 16 + 12;
1418 val = bus_read_4(msix->msix_table_res, offset);
1419 if (val & PCIM_MSIX_VCTRL_MASK) {
1420 val &= ~PCIM_MSIX_VCTRL_MASK;
1421 bus_write_4(msix->msix_table_res, offset, val);
1426 pci_pending_msix(device_t dev, u_int index)
1428 struct pci_devinfo *dinfo = device_get_ivars(dev);
1429 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1430 uint32_t offset, bit;
1432 KASSERT(msix->msix_table_len > index, ("bogus index"));
1433 offset = msix->msix_pba_offset + (index / 32) * 4;
1434 bit = 1 << index % 32;
1435 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1439 * Restore MSI-X registers and table during resume. If MSI-X is
1440 * enabled then walk the virtual table to restore the actual MSI-X
1444 pci_resume_msix(device_t dev)
1446 struct pci_devinfo *dinfo = device_get_ivars(dev);
1447 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1448 struct msix_table_entry *mte;
1449 struct msix_vector *mv;
1452 if (msix->msix_alloc > 0) {
1453 /* First, mask all vectors. */
1454 for (i = 0; i < msix->msix_msgnum; i++)
1455 pci_mask_msix(dev, i);
1457 /* Second, program any messages with at least one handler. */
1458 for (i = 0; i < msix->msix_table_len; i++) {
1459 mte = &msix->msix_table[i];
1460 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1462 mv = &msix->msix_vectors[mte->mte_vector - 1];
1463 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1464 pci_unmask_msix(dev, i);
1467 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1468 msix->msix_ctrl, 2);
1472 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1473 * returned in *count. After this function returns, each message will be
1474 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1477 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1479 struct pci_devinfo *dinfo = device_get_ivars(child);
1480 pcicfgregs *cfg = &dinfo->cfg;
1481 struct resource_list_entry *rle;
1482 int actual, error, i, irq, max;
1484 /* Don't let count == 0 get us into trouble. */
1488 /* If rid 0 is allocated, then fail. */
1489 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1490 if (rle != NULL && rle->res != NULL)
1493 /* Already have allocated messages? */
1494 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1497 /* If MSI-X is blacklisted for this system, fail. */
1498 if (pci_msix_blacklisted())
1501 /* MSI-X capability present? */
1502 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1505 /* Make sure the appropriate BARs are mapped. */
1506 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1507 cfg->msix.msix_table_bar);
1508 if (rle == NULL || rle->res == NULL ||
1509 !(rman_get_flags(rle->res) & RF_ACTIVE))
1511 cfg->msix.msix_table_res = rle->res;
1512 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1513 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1514 cfg->msix.msix_pba_bar);
1515 if (rle == NULL || rle->res == NULL ||
1516 !(rman_get_flags(rle->res) & RF_ACTIVE))
1519 cfg->msix.msix_pba_res = rle->res;
1522 device_printf(child,
1523 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1524 *count, cfg->msix.msix_msgnum);
1525 max = min(*count, cfg->msix.msix_msgnum);
1526 for (i = 0; i < max; i++) {
1527 /* Allocate a message. */
1528 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1534 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1540 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1542 device_printf(child, "using IRQ %lu for MSI-X\n",
1548 * Be fancy and try to print contiguous runs of
1549 * IRQ values as ranges. 'irq' is the previous IRQ.
1550 * 'run' is true if we are in a range.
1552 device_printf(child, "using IRQs %lu", rle->start);
1555 for (i = 1; i < actual; i++) {
1556 rle = resource_list_find(&dinfo->resources,
1557 SYS_RES_IRQ, i + 1);
1559 /* Still in a run? */
1560 if (rle->start == irq + 1) {
1566 /* Finish previous range. */
1572 /* Start new range. */
1573 printf(",%lu", rle->start);
1577 /* Unfinished range? */
1580 printf(" for MSI-X\n");
1584 /* Mask all vectors. */
1585 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1586 pci_mask_msix(child, i);
1588 /* Allocate and initialize vector data and virtual table. */
1589 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1590 M_DEVBUF, M_WAITOK | M_ZERO);
1591 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1592 M_DEVBUF, M_WAITOK | M_ZERO);
1593 for (i = 0; i < actual; i++) {
1594 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1595 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1596 cfg->msix.msix_table[i].mte_vector = i + 1;
1599 /* Update control register to enable MSI-X. */
1600 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1601 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1602 cfg->msix.msix_ctrl, 2);
1604 /* Update counts of alloc'd messages. */
1605 cfg->msix.msix_alloc = actual;
1606 cfg->msix.msix_table_len = actual;
1612 * By default, pci_alloc_msix() will assign the allocated IRQ
1613 * resources consecutively to the first N messages in the MSI-X table.
1614 * However, device drivers may want to use different layouts if they
1615 * either receive fewer messages than they asked for, or they wish to
1616 * populate the MSI-X table sparsely. This method allows the driver
1617 * to specify what layout it wants. It must be called after a
1618 * successful pci_alloc_msix() but before any of the associated
1619 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1621 * The 'vectors' array contains 'count' message vectors. The array
1622 * maps directly to the MSI-X table in that index 0 in the array
1623 * specifies the vector for the first message in the MSI-X table, etc.
1624 * The vector value in each array index can either be 0 to indicate
1625 * that no vector should be assigned to a message slot, or it can be a
1626 * number from 1 to N (where N is the count returned from a
1627 * succcessful call to pci_alloc_msix()) to indicate which message
1628 * vector (IRQ) to be used for the corresponding message.
1630 * On successful return, each message with a non-zero vector will have
1631 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1632 * 1. Additionally, if any of the IRQs allocated via the previous
1633 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1634 * will be freed back to the system automatically.
1636 * For example, suppose a driver has a MSI-X table with 6 messages and
1637 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1638 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1639 * C. After the call to pci_alloc_msix(), the device will be setup to
1640 * have an MSI-X table of ABC--- (where - means no vector assigned).
1641 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1642 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1643 * be freed back to the system. This device will also have valid
1644 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1646 * In any case, the SYS_RES_IRQ rid X will always map to the message
1647 * at MSI-X table index X - 1 and will only be valid if a vector is
1648 * assigned to that table entry.
1651 pci_remap_msix_method(device_t dev, device_t child, int count,
1652 const u_int *vectors)
1654 struct pci_devinfo *dinfo = device_get_ivars(child);
1655 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1656 struct resource_list_entry *rle;
1657 int i, irq, j, *used;
1660 * Have to have at least one message in the table but the
1661 * table can't be bigger than the actual MSI-X table in the
1664 if (count == 0 || count > msix->msix_msgnum)
1667 /* Sanity check the vectors. */
1668 for (i = 0; i < count; i++)
1669 if (vectors[i] > msix->msix_alloc)
1673 * Make sure there aren't any holes in the vectors to be used.
1674 * It's a big pain to support it, and it doesn't really make
1675 * sense anyway. Also, at least one vector must be used.
1677 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1679 for (i = 0; i < count; i++)
1680 if (vectors[i] != 0)
1681 used[vectors[i] - 1] = 1;
1682 for (i = 0; i < msix->msix_alloc - 1; i++)
1683 if (used[i] == 0 && used[i + 1] == 1) {
1684 free(used, M_DEVBUF);
1688 free(used, M_DEVBUF);
1692 /* Make sure none of the resources are allocated. */
1693 for (i = 0; i < msix->msix_table_len; i++) {
1694 if (msix->msix_table[i].mte_vector == 0)
1696 if (msix->msix_table[i].mte_handlers > 0)
1698 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1699 KASSERT(rle != NULL, ("missing resource"));
1700 if (rle->res != NULL)
1704 /* Free the existing resource list entries. */
1705 for (i = 0; i < msix->msix_table_len; i++) {
1706 if (msix->msix_table[i].mte_vector == 0)
1708 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1712 * Build the new virtual table keeping track of which vectors are
1715 free(msix->msix_table, M_DEVBUF);
1716 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1717 M_DEVBUF, M_WAITOK | M_ZERO);
1718 for (i = 0; i < count; i++)
1719 msix->msix_table[i].mte_vector = vectors[i];
1720 msix->msix_table_len = count;
1722 /* Free any unused IRQs and resize the vectors array if necessary. */
1723 j = msix->msix_alloc - 1;
1725 struct msix_vector *vec;
1727 while (used[j] == 0) {
1728 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1729 msix->msix_vectors[j].mv_irq);
1732 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1734 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1736 free(msix->msix_vectors, M_DEVBUF);
1737 msix->msix_vectors = vec;
1738 msix->msix_alloc = j + 1;
1740 free(used, M_DEVBUF);
1742 /* Map the IRQs onto the rids. */
1743 for (i = 0; i < count; i++) {
1744 if (vectors[i] == 0)
1746 irq = msix->msix_vectors[vectors[i]].mv_irq;
1747 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1752 device_printf(child, "Remapped MSI-X IRQs as: ");
1753 for (i = 0; i < count; i++) {
1756 if (vectors[i] == 0)
1760 msix->msix_vectors[vectors[i]].mv_irq);
1769 pci_release_msix(device_t dev, device_t child)
1771 struct pci_devinfo *dinfo = device_get_ivars(child);
1772 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1773 struct resource_list_entry *rle;
1776 /* Do we have any messages to release? */
1777 if (msix->msix_alloc == 0)
1780 /* Make sure none of the resources are allocated. */
1781 for (i = 0; i < msix->msix_table_len; i++) {
1782 if (msix->msix_table[i].mte_vector == 0)
1784 if (msix->msix_table[i].mte_handlers > 0)
1786 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1787 KASSERT(rle != NULL, ("missing resource"));
1788 if (rle->res != NULL)
1792 /* Update control register to disable MSI-X. */
1793 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1794 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1795 msix->msix_ctrl, 2);
1797 /* Free the resource list entries. */
1798 for (i = 0; i < msix->msix_table_len; i++) {
1799 if (msix->msix_table[i].mte_vector == 0)
1801 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1803 free(msix->msix_table, M_DEVBUF);
1804 msix->msix_table_len = 0;
1806 /* Release the IRQs. */
1807 for (i = 0; i < msix->msix_alloc; i++)
1808 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1809 msix->msix_vectors[i].mv_irq);
1810 free(msix->msix_vectors, M_DEVBUF);
1811 msix->msix_alloc = 0;
1816 * Return the max supported MSI-X messages this device supports.
1817 * Basically, assuming the MD code can alloc messages, this function
1818 * should return the maximum value that pci_alloc_msix() can return.
1819 * Thus, it is subject to the tunables, etc.
1822 pci_msix_count_method(device_t dev, device_t child)
1824 struct pci_devinfo *dinfo = device_get_ivars(child);
1825 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1827 if (pci_do_msix && msix->msix_location != 0)
1828 return (msix->msix_msgnum);
1833 * HyperTransport MSI mapping control
1836 pci_ht_map_msi(device_t dev, uint64_t addr)
1838 struct pci_devinfo *dinfo = device_get_ivars(dev);
1839 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1844 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1845 ht->ht_msiaddr >> 20 == addr >> 20) {
1846 /* Enable MSI -> HT mapping. */
1847 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1848 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1852 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1853 /* Disable MSI -> HT mapping. */
1854 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1855 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1861 pci_get_max_read_req(device_t dev)
1863 struct pci_devinfo *dinfo = device_get_ivars(dev);
1867 cap = dinfo->cfg.pcie.pcie_location;
1870 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1871 val &= PCIEM_CTL_MAX_READ_REQUEST;
1873 return (1 << (val + 7));
1877 pci_set_max_read_req(device_t dev, int size)
1879 struct pci_devinfo *dinfo = device_get_ivars(dev);
1883 cap = dinfo->cfg.pcie.pcie_location;
1890 size = (1 << (fls(size) - 1));
1891 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1892 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1893 val |= (fls(size) - 8) << 12;
1894 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1899 pcie_read_config(device_t dev, int reg, int width)
1901 struct pci_devinfo *dinfo = device_get_ivars(dev);
1904 cap = dinfo->cfg.pcie.pcie_location;
1908 return (0xffffffff);
1911 return (pci_read_config(dev, cap + reg, width));
1915 pcie_write_config(device_t dev, int reg, uint32_t value, int width)
1917 struct pci_devinfo *dinfo = device_get_ivars(dev);
1920 cap = dinfo->cfg.pcie.pcie_location;
1923 pci_write_config(dev, cap + reg, value, width);
1927 * Adjusts a PCI-e capability register by clearing the bits in mask
1928 * and setting the bits in (value & mask). Bits not set in mask are
1931 * Returns the old value on success or all ones on failure.
1934 pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value,
1937 struct pci_devinfo *dinfo = device_get_ivars(dev);
1941 cap = dinfo->cfg.pcie.pcie_location;
1945 return (0xffffffff);
1948 old = pci_read_config(dev, cap + reg, width);
1950 new |= (value & mask);
1951 pci_write_config(dev, cap + reg, new, width);
1956 * Support for MSI message signalled interrupts.
1959 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
1962 struct pci_devinfo *dinfo = device_get_ivars(child);
1963 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1965 /* Write data and address values. */
1966 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
1967 address & 0xffffffff, 4);
1968 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1969 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1971 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
1974 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
1977 /* Enable MSI in the control register. */
1978 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1979 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1982 /* Enable MSI -> HT mapping. */
1983 pci_ht_map_msi(child, address);
1987 pci_disable_msi_method(device_t dev, device_t child)
1989 struct pci_devinfo *dinfo = device_get_ivars(child);
1990 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1992 /* Disable MSI -> HT mapping. */
1993 pci_ht_map_msi(child, 0);
1995 /* Disable MSI in the control register. */
1996 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1997 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2002 * Restore MSI registers during resume. If MSI is enabled then
2003 * restore the data and address registers in addition to the control
2007 pci_resume_msi(device_t dev)
2009 struct pci_devinfo *dinfo = device_get_ivars(dev);
2010 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2014 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
2015 address = msi->msi_addr;
2016 data = msi->msi_data;
2017 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
2018 address & 0xffffffff, 4);
2019 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2020 pci_write_config(dev, msi->msi_location +
2021 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
2022 pci_write_config(dev, msi->msi_location +
2023 PCIR_MSI_DATA_64BIT, data, 2);
2025 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
2028 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
2033 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
2035 struct pci_devinfo *dinfo = device_get_ivars(dev);
2036 pcicfgregs *cfg = &dinfo->cfg;
2037 struct resource_list_entry *rle;
2038 struct msix_table_entry *mte;
2039 struct msix_vector *mv;
2045 * Handle MSI first. We try to find this IRQ among our list
2046 * of MSI IRQs. If we find it, we request updated address and
2047 * data registers and apply the results.
2049 if (cfg->msi.msi_alloc > 0) {
2051 /* If we don't have any active handlers, nothing to do. */
2052 if (cfg->msi.msi_handlers == 0)
2054 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2055 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2057 if (rle->start == irq) {
2058 error = PCIB_MAP_MSI(device_get_parent(bus),
2059 dev, irq, &addr, &data);
2062 pci_disable_msi(dev);
2063 dinfo->cfg.msi.msi_addr = addr;
2064 dinfo->cfg.msi.msi_data = data;
2065 pci_enable_msi(dev, addr, data);
2073 * For MSI-X, we check to see if we have this IRQ. If we do,
2074 * we request the updated mapping info. If that works, we go
2075 * through all the slots that use this IRQ and update them.
2077 if (cfg->msix.msix_alloc > 0) {
2078 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2079 mv = &cfg->msix.msix_vectors[i];
2080 if (mv->mv_irq == irq) {
2081 error = PCIB_MAP_MSI(device_get_parent(bus),
2082 dev, irq, &addr, &data);
2085 mv->mv_address = addr;
2087 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2088 mte = &cfg->msix.msix_table[j];
2089 if (mte->mte_vector != i + 1)
2091 if (mte->mte_handlers == 0)
2093 pci_mask_msix(dev, j);
2094 pci_enable_msix(dev, j, addr, data);
2095 pci_unmask_msix(dev, j);
2106 * Returns true if the specified device is blacklisted because MSI
2110 pci_msi_device_blacklisted(device_t dev)
2113 if (!pci_honor_msi_blacklist)
2116 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2120 * Determine if MSI is blacklisted globally on this system. Currently,
2121 * we just check for blacklisted chipsets as represented by the
2122 * host-PCI bridge at device 0:0:0. In the future, it may become
2123 * necessary to check other system attributes, such as the kenv values
2124 * that give the motherboard manufacturer and model number.
2127 pci_msi_blacklisted(void)
2131 if (!pci_honor_msi_blacklist)
2134 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2135 if (!(pcie_chipset || pcix_chipset)) {
2136 if (vm_guest != VM_GUEST_NO) {
2138 * Whitelist older chipsets in virtual
2139 * machines known to support MSI.
2141 dev = pci_find_bsf(0, 0, 0);
2143 return (!pci_has_quirk(pci_get_devid(dev),
2144 PCI_QUIRK_ENABLE_MSI_VM));
2149 dev = pci_find_bsf(0, 0, 0);
2151 return (pci_msi_device_blacklisted(dev));
2156 * Returns true if the specified device is blacklisted because MSI-X
2157 * doesn't work. Note that this assumes that if MSI doesn't work,
2158 * MSI-X doesn't either.
2161 pci_msix_device_blacklisted(device_t dev)
2164 if (!pci_honor_msi_blacklist)
2167 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2170 return (pci_msi_device_blacklisted(dev));
2174 * Determine if MSI-X is blacklisted globally on this system. If MSI
2175 * is blacklisted, assume that MSI-X is as well. Check for additional
2176 * chipsets where MSI works but MSI-X does not.
2179 pci_msix_blacklisted(void)
2183 if (!pci_honor_msi_blacklist)
2186 dev = pci_find_bsf(0, 0, 0);
2187 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2188 PCI_QUIRK_DISABLE_MSIX))
2191 return (pci_msi_blacklisted());
2195 * Attempt to allocate *count MSI messages. The actual number allocated is
2196 * returned in *count. After this function returns, each message will be
2197 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2200 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2202 struct pci_devinfo *dinfo = device_get_ivars(child);
2203 pcicfgregs *cfg = &dinfo->cfg;
2204 struct resource_list_entry *rle;
2205 int actual, error, i, irqs[32];
2208 /* Don't let count == 0 get us into trouble. */
2212 /* If rid 0 is allocated, then fail. */
2213 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2214 if (rle != NULL && rle->res != NULL)
2217 /* Already have allocated messages? */
2218 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2221 /* If MSI is blacklisted for this system, fail. */
2222 if (pci_msi_blacklisted())
2225 /* MSI capability present? */
2226 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2230 device_printf(child,
2231 "attempting to allocate %d MSI vectors (%d supported)\n",
2232 *count, cfg->msi.msi_msgnum);
2234 /* Don't ask for more than the device supports. */
2235 actual = min(*count, cfg->msi.msi_msgnum);
2237 /* Don't ask for more than 32 messages. */
2238 actual = min(actual, 32);
2240 /* MSI requires power of 2 number of messages. */
2241 if (!powerof2(actual))
2245 /* Try to allocate N messages. */
2246 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2258 * We now have N actual messages mapped onto SYS_RES_IRQ
2259 * resources in the irqs[] array, so add new resources
2260 * starting at rid 1.
2262 for (i = 0; i < actual; i++)
2263 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2264 irqs[i], irqs[i], 1);
2268 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2273 * Be fancy and try to print contiguous runs
2274 * of IRQ values as ranges. 'run' is true if
2275 * we are in a range.
2277 device_printf(child, "using IRQs %d", irqs[0]);
2279 for (i = 1; i < actual; i++) {
2281 /* Still in a run? */
2282 if (irqs[i] == irqs[i - 1] + 1) {
2287 /* Finish previous range. */
2289 printf("-%d", irqs[i - 1]);
2293 /* Start new range. */
2294 printf(",%d", irqs[i]);
2297 /* Unfinished range? */
2299 printf("-%d", irqs[actual - 1]);
2300 printf(" for MSI\n");
2304 /* Update control register with actual count. */
2305 ctrl = cfg->msi.msi_ctrl;
2306 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2307 ctrl |= (ffs(actual) - 1) << 4;
2308 cfg->msi.msi_ctrl = ctrl;
2309 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2311 /* Update counts of alloc'd messages. */
2312 cfg->msi.msi_alloc = actual;
2313 cfg->msi.msi_handlers = 0;
2318 /* Release the MSI messages associated with this device. */
2320 pci_release_msi_method(device_t dev, device_t child)
2322 struct pci_devinfo *dinfo = device_get_ivars(child);
2323 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2324 struct resource_list_entry *rle;
2325 int error, i, irqs[32];
2327 /* Try MSI-X first. */
2328 error = pci_release_msix(dev, child);
2329 if (error != ENODEV)
2332 /* Do we have any messages to release? */
2333 if (msi->msi_alloc == 0)
2335 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2337 /* Make sure none of the resources are allocated. */
2338 if (msi->msi_handlers > 0)
2340 for (i = 0; i < msi->msi_alloc; i++) {
2341 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2342 KASSERT(rle != NULL, ("missing MSI resource"));
2343 if (rle->res != NULL)
2345 irqs[i] = rle->start;
2348 /* Update control register with 0 count. */
2349 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2350 ("%s: MSI still enabled", __func__));
2351 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2352 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2355 /* Release the messages. */
2356 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2357 for (i = 0; i < msi->msi_alloc; i++)
2358 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2360 /* Update alloc count. */
2368 * Return the max supported MSI messages this device supports.
2369 * Basically, assuming the MD code can alloc messages, this function
2370 * should return the maximum value that pci_alloc_msi() can return.
2371 * Thus, it is subject to the tunables, etc.
2374 pci_msi_count_method(device_t dev, device_t child)
2376 struct pci_devinfo *dinfo = device_get_ivars(child);
2377 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2379 if (pci_do_msi && msi->msi_location != 0)
2380 return (msi->msi_msgnum);
2384 /* free pcicfgregs structure and all depending data structures */
2387 pci_freecfg(struct pci_devinfo *dinfo)
2389 struct devlist *devlist_head;
2390 struct pci_map *pm, *next;
2393 devlist_head = &pci_devq;
2395 if (dinfo->cfg.vpd.vpd_reg) {
2396 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2397 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2398 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2399 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2400 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2401 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2402 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2404 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2407 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2408 free(dinfo, M_DEVBUF);
2410 /* increment the generation count */
2413 /* we're losing one device */
2419 * PCI power manangement
2422 pci_set_powerstate_method(device_t dev, device_t child, int state)
2424 struct pci_devinfo *dinfo = device_get_ivars(child);
2425 pcicfgregs *cfg = &dinfo->cfg;
2427 int oldstate, highest, delay;
2429 if (cfg->pp.pp_cap == 0)
2430 return (EOPNOTSUPP);
2433 * Optimize a no state change request away. While it would be OK to
2434 * write to the hardware in theory, some devices have shown odd
2435 * behavior when going from D3 -> D3.
2437 oldstate = pci_get_powerstate(child);
2438 if (oldstate == state)
2442 * The PCI power management specification states that after a state
2443 * transition between PCI power states, system software must
2444 * guarantee a minimal delay before the function accesses the device.
2445 * Compute the worst case delay that we need to guarantee before we
2446 * access the device. Many devices will be responsive much more
2447 * quickly than this delay, but there are some that don't respond
2448 * instantly to state changes. Transitions to/from D3 state require
2449 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2450 * is done below with DELAY rather than a sleeper function because
2451 * this function can be called from contexts where we cannot sleep.
2453 highest = (oldstate > state) ? oldstate : state;
2454 if (highest == PCI_POWERSTATE_D3)
2456 else if (highest == PCI_POWERSTATE_D2)
2460 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2461 & ~PCIM_PSTAT_DMASK;
2463 case PCI_POWERSTATE_D0:
2464 status |= PCIM_PSTAT_D0;
2466 case PCI_POWERSTATE_D1:
2467 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2468 return (EOPNOTSUPP);
2469 status |= PCIM_PSTAT_D1;
2471 case PCI_POWERSTATE_D2:
2472 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2473 return (EOPNOTSUPP);
2474 status |= PCIM_PSTAT_D2;
2476 case PCI_POWERSTATE_D3:
2477 status |= PCIM_PSTAT_D3;
2484 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2487 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2494 pci_get_powerstate_method(device_t dev, device_t child)
2496 struct pci_devinfo *dinfo = device_get_ivars(child);
2497 pcicfgregs *cfg = &dinfo->cfg;
2501 if (cfg->pp.pp_cap != 0) {
2502 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2503 switch (status & PCIM_PSTAT_DMASK) {
2505 result = PCI_POWERSTATE_D0;
2508 result = PCI_POWERSTATE_D1;
2511 result = PCI_POWERSTATE_D2;
2514 result = PCI_POWERSTATE_D3;
2517 result = PCI_POWERSTATE_UNKNOWN;
2521 /* No support, device is always at D0 */
2522 result = PCI_POWERSTATE_D0;
2528 * Some convenience functions for PCI device drivers.
2531 static __inline void
2532 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2536 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2538 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2541 static __inline void
2542 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2546 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2548 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2552 pci_enable_busmaster_method(device_t dev, device_t child)
2554 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2559 pci_disable_busmaster_method(device_t dev, device_t child)
2561 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2566 pci_enable_io_method(device_t dev, device_t child, int space)
2571 case SYS_RES_IOPORT:
2572 bit = PCIM_CMD_PORTEN;
2574 case SYS_RES_MEMORY:
2575 bit = PCIM_CMD_MEMEN;
2580 pci_set_command_bit(dev, child, bit);
2585 pci_disable_io_method(device_t dev, device_t child, int space)
2590 case SYS_RES_IOPORT:
2591 bit = PCIM_CMD_PORTEN;
2593 case SYS_RES_MEMORY:
2594 bit = PCIM_CMD_MEMEN;
2599 pci_clear_command_bit(dev, child, bit);
2604 * New style pci driver. Parent device is either a pci-host-bridge or a
2605 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2609 pci_print_verbose(struct pci_devinfo *dinfo)
2613 pcicfgregs *cfg = &dinfo->cfg;
2615 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2616 cfg->vendor, cfg->device, cfg->revid);
2617 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2618 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2619 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2620 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2622 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2623 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2624 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2625 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2626 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2627 if (cfg->intpin > 0)
2628 printf("\tintpin=%c, irq=%d\n",
2629 cfg->intpin +'a' -1, cfg->intline);
2630 if (cfg->pp.pp_cap) {
2633 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2634 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2635 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2636 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2637 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2638 status & PCIM_PSTAT_DMASK);
2640 if (cfg->msi.msi_location) {
2643 ctrl = cfg->msi.msi_ctrl;
2644 printf("\tMSI supports %d message%s%s%s\n",
2645 cfg->msi.msi_msgnum,
2646 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2647 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2648 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2650 if (cfg->msix.msix_location) {
2651 printf("\tMSI-X supports %d message%s ",
2652 cfg->msix.msix_msgnum,
2653 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2654 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2655 printf("in map 0x%x\n",
2656 cfg->msix.msix_table_bar);
2658 printf("in maps 0x%x and 0x%x\n",
2659 cfg->msix.msix_table_bar,
2660 cfg->msix.msix_pba_bar);
2666 pci_porten(device_t dev)
2668 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2672 pci_memen(device_t dev)
2674 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2678 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2680 struct pci_devinfo *dinfo;
2681 pci_addr_t map, testval;
2686 * The device ROM BAR is special. It is always a 32-bit
2687 * memory BAR. Bit 0 is special and should not be set when
2690 dinfo = device_get_ivars(dev);
2691 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2692 map = pci_read_config(dev, reg, 4);
2693 pci_write_config(dev, reg, 0xfffffffe, 4);
2694 testval = pci_read_config(dev, reg, 4);
2695 pci_write_config(dev, reg, map, 4);
2697 *testvalp = testval;
2701 map = pci_read_config(dev, reg, 4);
2702 ln2range = pci_maprange(map);
2704 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2707 * Disable decoding via the command register before
2708 * determining the BAR's length since we will be placing it in
2711 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2712 pci_write_config(dev, PCIR_COMMAND,
2713 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2716 * Determine the BAR's length by writing all 1's. The bottom
2717 * log_2(size) bits of the BAR will stick as 0 when we read
2720 pci_write_config(dev, reg, 0xffffffff, 4);
2721 testval = pci_read_config(dev, reg, 4);
2722 if (ln2range == 64) {
2723 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2724 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2728 * Restore the original value of the BAR. We may have reprogrammed
2729 * the BAR of the low-level console device and when booting verbose,
2730 * we need the console device addressable.
2732 pci_write_config(dev, reg, map, 4);
2734 pci_write_config(dev, reg + 4, map >> 32, 4);
2735 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2738 *testvalp = testval;
2742 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2744 struct pci_devinfo *dinfo;
2747 /* The device ROM BAR is always a 32-bit memory BAR. */
2748 dinfo = device_get_ivars(dev);
2749 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2752 ln2range = pci_maprange(pm->pm_value);
2753 pci_write_config(dev, pm->pm_reg, base, 4);
2755 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2756 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2758 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2759 pm->pm_reg + 4, 4) << 32;
2763 pci_find_bar(device_t dev, int reg)
2765 struct pci_devinfo *dinfo;
2768 dinfo = device_get_ivars(dev);
2769 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2770 if (pm->pm_reg == reg)
2777 pci_bar_enabled(device_t dev, struct pci_map *pm)
2779 struct pci_devinfo *dinfo;
2782 dinfo = device_get_ivars(dev);
2783 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2784 !(pm->pm_value & PCIM_BIOS_ENABLE))
2786 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2787 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2788 return ((cmd & PCIM_CMD_MEMEN) != 0);
2790 return ((cmd & PCIM_CMD_PORTEN) != 0);
2793 static struct pci_map *
2794 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2796 struct pci_devinfo *dinfo;
2797 struct pci_map *pm, *prev;
2799 dinfo = device_get_ivars(dev);
2800 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2802 pm->pm_value = value;
2804 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2805 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2807 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2808 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2812 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2814 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2819 pci_restore_bars(device_t dev)
2821 struct pci_devinfo *dinfo;
2825 dinfo = device_get_ivars(dev);
2826 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2827 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2830 ln2range = pci_maprange(pm->pm_value);
2831 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2833 pci_write_config(dev, pm->pm_reg + 4,
2834 pm->pm_value >> 32, 4);
2839 * Add a resource based on a pci map register. Return 1 if the map
2840 * register is a 32bit map register or 2 if it is a 64bit register.
2843 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2844 int force, int prefetch)
2847 pci_addr_t base, map, testval;
2848 pci_addr_t start, end, count;
2849 int barlen, basezero, flags, maprange, mapsize, type;
2851 struct resource *res;
2854 * The BAR may already exist if the device is a CardBus card
2855 * whose CIS is stored in this BAR.
2857 pm = pci_find_bar(dev, reg);
2859 maprange = pci_maprange(pm->pm_value);
2860 barlen = maprange == 64 ? 2 : 1;
2864 pci_read_bar(dev, reg, &map, &testval);
2865 if (PCI_BAR_MEM(map)) {
2866 type = SYS_RES_MEMORY;
2867 if (map & PCIM_BAR_MEM_PREFETCH)
2870 type = SYS_RES_IOPORT;
2871 mapsize = pci_mapsize(testval);
2872 base = pci_mapbase(map);
2873 #ifdef __PCI_BAR_ZERO_VALID
2876 basezero = base == 0;
2878 maprange = pci_maprange(map);
2879 barlen = maprange == 64 ? 2 : 1;
2882 * For I/O registers, if bottom bit is set, and the next bit up
2883 * isn't clear, we know we have a BAR that doesn't conform to the
2884 * spec, so ignore it. Also, sanity check the size of the data
2885 * areas to the type of memory involved. Memory must be at least
2886 * 16 bytes in size, while I/O ranges must be at least 4.
2888 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2890 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2891 (type == SYS_RES_IOPORT && mapsize < 2))
2894 /* Save a record of this BAR. */
2895 pm = pci_add_bar(dev, reg, map, mapsize);
2897 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2898 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2899 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2900 printf(", port disabled\n");
2901 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2902 printf(", memory disabled\n");
2904 printf(", enabled\n");
2908 * If base is 0, then we have problems if this architecture does
2909 * not allow that. It is best to ignore such entries for the
2910 * moment. These will be allocated later if the driver specifically
2911 * requests them. However, some removable busses look better when
2912 * all resources are allocated, so allow '0' to be overriden.
2914 * Similarly treat maps whose values is the same as the test value
2915 * read back. These maps have had all f's written to them by the
2916 * BIOS in an attempt to disable the resources.
2918 if (!force && (basezero || map == testval))
2920 if ((u_long)base != base) {
2922 "pci%d:%d:%d:%d bar %#x too many address bits",
2923 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2924 pci_get_function(dev), reg);
2929 * This code theoretically does the right thing, but has
2930 * undesirable side effects in some cases where peripherals
2931 * respond oddly to having these bits enabled. Let the user
2932 * be able to turn them off (since pci_enable_io_modes is 1 by
2935 if (pci_enable_io_modes) {
2936 /* Turn on resources that have been left off by a lazy BIOS */
2937 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2938 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2939 cmd |= PCIM_CMD_PORTEN;
2940 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2942 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2943 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2944 cmd |= PCIM_CMD_MEMEN;
2945 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2948 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2950 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2954 count = (pci_addr_t)1 << mapsize;
2955 flags = RF_ALIGNMENT_LOG2(mapsize);
2957 flags |= RF_PREFETCHABLE;
2958 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
2959 start = 0; /* Let the parent decide. */
2963 end = base + count - 1;
2965 resource_list_add(rl, type, reg, start, end, count);
2968 * Try to allocate the resource for this BAR from our parent
2969 * so that this resource range is already reserved. The
2970 * driver for this device will later inherit this resource in
2971 * pci_alloc_resource().
2973 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2975 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2977 * If the allocation fails, try to allocate a resource for
2978 * this BAR using any available range. The firmware felt
2979 * it was important enough to assign a resource, so don't
2980 * disable decoding if we can help it.
2982 resource_list_delete(rl, type, reg);
2983 resource_list_add(rl, type, reg, 0, ~0ul, count);
2984 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
2989 * If the allocation fails, delete the resource list entry
2990 * and disable decoding for this device.
2992 * If the driver requests this resource in the future,
2993 * pci_reserve_map() will try to allocate a fresh
2996 resource_list_delete(rl, type, reg);
2997 pci_disable_io(dev, type);
3000 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
3001 pci_get_domain(dev), pci_get_bus(dev),
3002 pci_get_slot(dev), pci_get_function(dev), reg);
3004 start = rman_get_start(res);
3005 pci_write_bar(dev, pm, start);
3011 * For ATA devices we need to decide early what addressing mode to use.
3012 * Legacy demands that the primary and secondary ATA ports sits on the
3013 * same addresses that old ISA hardware did. This dictates that we use
3014 * those addresses and ignore the BAR's if we cannot set PCI native
3018 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
3019 uint32_t prefetchmask)
3021 int rid, type, progif;
3023 /* if this device supports PCI native addressing use it */
3024 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3025 if ((progif & 0x8a) == 0x8a) {
3026 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
3027 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
3028 printf("Trying ATA native PCI addressing mode\n");
3029 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
3033 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3034 type = SYS_RES_IOPORT;
3035 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
3036 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
3037 prefetchmask & (1 << 0));
3038 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
3039 prefetchmask & (1 << 1));
3042 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
3043 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
3046 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
3047 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3050 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3051 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3052 prefetchmask & (1 << 2));
3053 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3054 prefetchmask & (1 << 3));
3057 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3058 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3061 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3062 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3065 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3066 prefetchmask & (1 << 4));
3067 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3068 prefetchmask & (1 << 5));
3072 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3074 struct pci_devinfo *dinfo = device_get_ivars(dev);
3075 pcicfgregs *cfg = &dinfo->cfg;
3076 char tunable_name[64];
3079 /* Has to have an intpin to have an interrupt. */
3080 if (cfg->intpin == 0)
3083 /* Let the user override the IRQ with a tunable. */
3084 irq = PCI_INVALID_IRQ;
3085 snprintf(tunable_name, sizeof(tunable_name),
3086 "hw.pci%d.%d.%d.INT%c.irq",
3087 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3088 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3089 irq = PCI_INVALID_IRQ;
3092 * If we didn't get an IRQ via the tunable, then we either use the
3093 * IRQ value in the intline register or we ask the bus to route an
3094 * interrupt for us. If force_route is true, then we only use the
3095 * value in the intline register if the bus was unable to assign an
3098 if (!PCI_INTERRUPT_VALID(irq)) {
3099 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3100 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3101 if (!PCI_INTERRUPT_VALID(irq))
3105 /* If after all that we don't have an IRQ, just bail. */
3106 if (!PCI_INTERRUPT_VALID(irq))
3109 /* Update the config register if it changed. */
3110 if (irq != cfg->intline) {
3112 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3115 /* Add this IRQ as rid 0 interrupt resource. */
3116 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3119 /* Perform early OHCI takeover from SMM. */
3121 ohci_early_takeover(device_t self)
3123 struct resource *res;
3129 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3133 ctl = bus_read_4(res, OHCI_CONTROL);
3134 if (ctl & OHCI_IR) {
3136 printf("ohci early: "
3137 "SMM active, request owner change\n");
3138 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3139 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3141 ctl = bus_read_4(res, OHCI_CONTROL);
3143 if (ctl & OHCI_IR) {
3145 printf("ohci early: "
3146 "SMM does not respond, resetting\n");
3147 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3149 /* Disable interrupts */
3150 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3153 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3156 /* Perform early UHCI takeover from SMM. */
3158 uhci_early_takeover(device_t self)
3160 struct resource *res;
3164 * Set the PIRQD enable bit and switch off all the others. We don't
3165 * want legacy support to interfere with us XXX Does this also mean
3166 * that the BIOS won't touch the keyboard anymore if it is connected
3167 * to the ports of the root hub?
3169 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3171 /* Disable interrupts */
3172 rid = PCI_UHCI_BASE_REG;
3173 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3175 bus_write_2(res, UHCI_INTR, 0);
3176 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3180 /* Perform early EHCI takeover from SMM. */
3182 ehci_early_takeover(device_t self)
3184 struct resource *res;
3194 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3198 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3200 /* Synchronise with the BIOS if it owns the controller. */
3201 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3202 eecp = EHCI_EECP_NEXT(eec)) {
3203 eec = pci_read_config(self, eecp, 4);
3204 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3207 bios_sem = pci_read_config(self, eecp +
3208 EHCI_LEGSUP_BIOS_SEM, 1);
3209 if (bios_sem == 0) {
3213 printf("ehci early: "
3214 "SMM active, request owner change\n");
3216 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3218 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3220 bios_sem = pci_read_config(self, eecp +
3221 EHCI_LEGSUP_BIOS_SEM, 1);
3224 if (bios_sem != 0) {
3226 printf("ehci early: "
3227 "SMM does not respond\n");
3229 /* Disable interrupts */
3230 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3231 bus_write_4(res, offs + EHCI_USBINTR, 0);
3233 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3236 /* Perform early XHCI takeover from SMM. */
3238 xhci_early_takeover(device_t self)
3240 struct resource *res;
3250 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3254 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3258 /* Synchronise with the BIOS if it owns the controller. */
3259 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3260 eecp += XHCI_XECP_NEXT(eec) << 2) {
3261 eec = bus_read_4(res, eecp);
3263 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3266 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3271 printf("xhci early: "
3272 "SMM active, request owner change\n");
3274 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3276 /* wait a maximum of 5 second */
3278 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3280 bios_sem = bus_read_1(res, eecp +
3281 XHCI_XECP_BIOS_SEM);
3284 if (bios_sem != 0) {
3286 printf("xhci early: "
3287 "SMM does not respond\n");
3290 /* Disable interrupts */
3291 offs = bus_read_1(res, XHCI_CAPLENGTH);
3292 bus_write_4(res, offs + XHCI_USBCMD, 0);
3293 bus_read_4(res, offs + XHCI_USBSTS);
3295 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3298 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3300 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3301 struct resource_list *rl)
3303 struct resource *res;
3305 u_long start, end, count;
3306 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3308 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3309 case PCIM_HDRTYPE_BRIDGE:
3310 sec_reg = PCIR_SECBUS_1;
3311 sub_reg = PCIR_SUBBUS_1;
3313 case PCIM_HDRTYPE_CARDBUS:
3314 sec_reg = PCIR_SECBUS_2;
3315 sub_reg = PCIR_SUBBUS_2;
3322 * If the existing bus range is valid, attempt to reserve it
3323 * from our parent. If this fails for any reason, clear the
3324 * secbus and subbus registers.
3326 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3327 * This would at least preserve the existing sec_bus if it is
3330 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3331 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3333 /* Quirk handling. */
3334 switch (pci_get_devid(dev)) {
3335 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3336 sup_bus = pci_read_config(dev, 0x41, 1);
3337 if (sup_bus != 0xff) {
3338 sec_bus = sup_bus + 1;
3339 sub_bus = sup_bus + 1;
3340 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3341 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3346 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3347 if ((cp = getenv("smbios.planar.maker")) == NULL)
3349 if (strncmp(cp, "Compal", 6) != 0) {
3354 if ((cp = getenv("smbios.planar.product")) == NULL)
3356 if (strncmp(cp, "08A0", 4) != 0) {
3361 if (sub_bus < 0xa) {
3363 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3369 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3370 if (sec_bus > 0 && sub_bus >= sec_bus) {
3373 count = end - start + 1;
3375 resource_list_add(rl, PCI_RES_BUS, 0, 0ul, ~0ul, count);
3378 * If requested, clear secondary bus registers in
3379 * bridge devices to force a complete renumbering
3380 * rather than reserving the existing range. However,
3381 * preserve the existing size.
3383 if (pci_clear_buses)
3387 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3388 start, end, count, 0);
3394 "pci%d:%d:%d:%d secbus failed to allocate\n",
3395 pci_get_domain(dev), pci_get_bus(dev),
3396 pci_get_slot(dev), pci_get_function(dev));
3400 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3401 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3404 static struct resource *
3405 pci_alloc_secbus(device_t dev, device_t child, int *rid, u_long start,
3406 u_long end, u_long count, u_int flags)
3408 struct pci_devinfo *dinfo;
3410 struct resource_list *rl;
3411 struct resource *res;
3412 int sec_reg, sub_reg;
3414 dinfo = device_get_ivars(child);
3416 rl = &dinfo->resources;
3417 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3418 case PCIM_HDRTYPE_BRIDGE:
3419 sec_reg = PCIR_SECBUS_1;
3420 sub_reg = PCIR_SUBBUS_1;
3422 case PCIM_HDRTYPE_CARDBUS:
3423 sec_reg = PCIR_SECBUS_2;
3424 sub_reg = PCIR_SUBBUS_2;
3433 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3434 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3435 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3436 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3437 start, end, count, flags & ~RF_ACTIVE);
3439 resource_list_delete(rl, PCI_RES_BUS, *rid);
3440 device_printf(child, "allocating %lu bus%s failed\n",
3441 count, count == 1 ? "" : "es");
3445 device_printf(child,
3446 "Lazy allocation of %lu bus%s at %lu\n", count,
3447 count == 1 ? "" : "es", rman_get_start(res));
3448 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3449 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3451 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3452 end, count, flags));
3457 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3459 struct pci_devinfo *dinfo;
3461 struct resource_list *rl;
3462 const struct pci_quirk *q;
3466 dinfo = device_get_ivars(dev);
3468 rl = &dinfo->resources;
3469 devid = (cfg->device << 16) | cfg->vendor;
3471 /* ATA devices needs special map treatment */
3472 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3473 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3474 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3475 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3476 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3477 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3479 for (i = 0; i < cfg->nummaps;) {
3481 * Skip quirked resources.
3483 for (q = &pci_quirks[0]; q->devid != 0; q++)
3484 if (q->devid == devid &&
3485 q->type == PCI_QUIRK_UNMAP_REG &&
3486 q->arg1 == PCIR_BAR(i))
3488 if (q->devid != 0) {
3492 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3493 prefetchmask & (1 << i));
3497 * Add additional, quirked resources.
3499 for (q = &pci_quirks[0]; q->devid != 0; q++)
3500 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3501 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3503 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3504 #ifdef __PCI_REROUTE_INTERRUPT
3506 * Try to re-route interrupts. Sometimes the BIOS or
3507 * firmware may leave bogus values in these registers.
3508 * If the re-route fails, then just stick with what we
3511 pci_assign_interrupt(bus, dev, 1);
3513 pci_assign_interrupt(bus, dev, 0);
3517 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3518 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3519 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3520 xhci_early_takeover(dev);
3521 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3522 ehci_early_takeover(dev);
3523 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3524 ohci_early_takeover(dev);
3525 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3526 uhci_early_takeover(dev);
3529 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3531 * Reserve resources for secondary bus ranges behind bridge
3534 pci_reserve_secbus(bus, dev, cfg, rl);
3538 static struct pci_devinfo *
3539 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3540 int slot, int func, size_t dinfo_size)
3542 struct pci_devinfo *dinfo;
3544 dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size);
3546 pci_add_child(dev, dinfo);
3552 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3554 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3555 device_t pcib = device_get_parent(dev);
3556 struct pci_devinfo *dinfo;
3558 int s, f, pcifunchigh;
3563 * Try to detect a device at slot 0, function 0. If it exists, try to
3564 * enable ARI. We must enable ARI before detecting the rest of the
3565 * functions on this bus as ARI changes the set of slots and functions
3566 * that are legal on this bus.
3568 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0,
3570 if (dinfo != NULL && pci_enable_ari)
3571 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3574 * Start looking for new devices on slot 0 at function 1 because we
3575 * just identified the device at slot 0, function 0.
3579 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3580 ("dinfo_size too small"));
3581 maxslots = PCIB_MAXSLOTS(pcib);
3582 for (s = 0; s <= maxslots; s++, first_func = 0) {
3586 hdrtype = REG(PCIR_HDRTYPE, 1);
3587 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3589 if (hdrtype & PCIM_MFDEV)
3590 pcifunchigh = PCIB_MAXFUNCS(pcib);
3591 for (f = first_func; f <= pcifunchigh; f++)
3592 pci_identify_function(pcib, dev, domain, busno, s, f,
3599 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3601 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3602 device_set_ivars(dinfo->cfg.dev, dinfo);
3603 resource_list_init(&dinfo->resources);
3604 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3605 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3606 pci_print_verbose(dinfo);
3607 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3608 pci_child_added(dinfo->cfg.dev);
3612 pci_child_added_method(device_t dev, device_t child)
3618 pci_probe(device_t dev)
3621 device_set_desc(dev, "PCI bus");
3623 /* Allow other subclasses to override this driver. */
3624 return (BUS_PROBE_GENERIC);
3628 pci_attach_common(device_t dev)
3630 struct pci_softc *sc;
3632 #ifdef PCI_DMA_BOUNDARY
3633 int error, tag_valid;
3639 sc = device_get_softc(dev);
3640 domain = pcib_get_domain(dev);
3641 busno = pcib_get_bus(dev);
3644 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
3646 if (sc->sc_bus == NULL) {
3647 device_printf(dev, "failed to allocate bus number\n");
3652 device_printf(dev, "domain=%d, physical bus=%d\n",
3654 #ifdef PCI_DMA_BOUNDARY
3656 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3657 devclass_find("pci")) {
3658 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3659 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3660 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3661 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3663 device_printf(dev, "Failed to create DMA tag: %d\n",
3670 sc->sc_dma_tag = bus_get_dma_tag(dev);
3675 pci_attach(device_t dev)
3677 int busno, domain, error;
3679 error = pci_attach_common(dev);
3684 * Since there can be multiple independantly numbered PCI
3685 * busses on systems with multiple PCI domains, we can't use
3686 * the unit number to decide which bus we are probing. We ask
3687 * the parent pcib what our domain and bus numbers are.
3689 domain = pcib_get_domain(dev);
3690 busno = pcib_get_bus(dev);
3691 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3692 return (bus_generic_attach(dev));
3697 pci_detach(device_t dev)
3699 struct pci_softc *sc;
3702 error = bus_generic_detach(dev);
3705 sc = device_get_softc(dev);
3706 return (bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus));
3711 pci_set_power_children(device_t dev, device_t *devlist, int numdevs,
3714 device_t child, pcib;
3718 * Set the device to the given state. If the firmware suggests
3719 * a different power state, use it instead. If power management
3720 * is not present, the firmware is responsible for managing
3721 * device power. Skip children who aren't attached since they
3722 * are handled separately.
3724 pcib = device_get_parent(dev);
3725 for (i = 0; i < numdevs; i++) {
3728 if (device_is_attached(child) &&
3729 PCIB_POWER_FOR_SLEEP(pcib, dev, &dstate) == 0)
3730 pci_set_powerstate(child, dstate);
3735 pci_suspend(device_t dev)
3737 device_t child, *devlist;
3738 struct pci_devinfo *dinfo;
3739 int error, i, numdevs;
3742 * Save the PCI configuration space for each child and set the
3743 * device in the appropriate power state for this sleep state.
3745 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3747 for (i = 0; i < numdevs; i++) {
3749 dinfo = device_get_ivars(child);
3750 pci_cfg_save(child, dinfo, 0);
3753 /* Suspend devices before potentially powering them down. */
3754 error = bus_generic_suspend(dev);
3756 free(devlist, M_TEMP);
3759 if (pci_do_power_suspend)
3760 pci_set_power_children(dev, devlist, numdevs,
3762 free(devlist, M_TEMP);
3767 pci_resume(device_t dev)
3769 device_t child, *devlist;
3770 struct pci_devinfo *dinfo;
3771 int error, i, numdevs;
3774 * Set each child to D0 and restore its PCI configuration space.
3776 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3778 if (pci_do_power_resume)
3779 pci_set_power_children(dev, devlist, numdevs,
3782 /* Now the device is powered up, restore its config space. */
3783 for (i = 0; i < numdevs; i++) {
3785 dinfo = device_get_ivars(child);
3787 pci_cfg_restore(child, dinfo);
3788 if (!device_is_attached(child))
3789 pci_cfg_save(child, dinfo, 1);
3793 * Resume critical devices first, then everything else later.
3795 for (i = 0; i < numdevs; i++) {
3797 switch (pci_get_class(child)) {
3801 case PCIC_BASEPERIPH:
3802 DEVICE_RESUME(child);
3806 for (i = 0; i < numdevs; i++) {
3808 switch (pci_get_class(child)) {
3812 case PCIC_BASEPERIPH:
3815 DEVICE_RESUME(child);
3818 free(devlist, M_TEMP);
3823 pci_load_vendor_data(void)
3829 data = preload_search_by_type("pci_vendor_data");
3831 ptr = preload_fetch_addr(data);
3832 sz = preload_fetch_size(data);
3833 if (ptr != NULL && sz != 0) {
3834 pci_vendordata = ptr;
3835 pci_vendordata_size = sz;
3836 /* terminate the database */
3837 pci_vendordata[pci_vendordata_size] = '\n';
3843 pci_driver_added(device_t dev, driver_t *driver)
3848 struct pci_devinfo *dinfo;
3852 device_printf(dev, "driver added\n");
3853 DEVICE_IDENTIFY(driver, dev);
3854 if (device_get_children(dev, &devlist, &numdevs) != 0)
3856 for (i = 0; i < numdevs; i++) {
3858 if (device_get_state(child) != DS_NOTPRESENT)
3860 dinfo = device_get_ivars(child);
3861 pci_print_verbose(dinfo);
3863 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3864 pci_cfg_restore(child, dinfo);
3865 if (device_probe_and_attach(child) != 0)
3866 pci_child_detached(dev, child);
3868 free(devlist, M_TEMP);
3872 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3873 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3875 struct pci_devinfo *dinfo;
3876 struct msix_table_entry *mte;
3877 struct msix_vector *mv;
3883 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3888 /* If this is not a direct child, just bail out. */
3889 if (device_get_parent(child) != dev) {
3894 rid = rman_get_rid(irq);
3896 /* Make sure that INTx is enabled */
3897 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3900 * Check to see if the interrupt is MSI or MSI-X.
3901 * Ask our parent to map the MSI and give
3902 * us the address and data register values.
3903 * If we fail for some reason, teardown the
3904 * interrupt handler.
3906 dinfo = device_get_ivars(child);
3907 if (dinfo->cfg.msi.msi_alloc > 0) {
3908 if (dinfo->cfg.msi.msi_addr == 0) {
3909 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3910 ("MSI has handlers, but vectors not mapped"));
3911 error = PCIB_MAP_MSI(device_get_parent(dev),
3912 child, rman_get_start(irq), &addr, &data);
3915 dinfo->cfg.msi.msi_addr = addr;
3916 dinfo->cfg.msi.msi_data = data;
3918 if (dinfo->cfg.msi.msi_handlers == 0)
3919 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3920 dinfo->cfg.msi.msi_data);
3921 dinfo->cfg.msi.msi_handlers++;
3923 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3924 ("No MSI or MSI-X interrupts allocated"));
3925 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3926 ("MSI-X index too high"));
3927 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3928 KASSERT(mte->mte_vector != 0, ("no message vector"));
3929 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3930 KASSERT(mv->mv_irq == rman_get_start(irq),
3932 if (mv->mv_address == 0) {
3933 KASSERT(mte->mte_handlers == 0,
3934 ("MSI-X table entry has handlers, but vector not mapped"));
3935 error = PCIB_MAP_MSI(device_get_parent(dev),
3936 child, rman_get_start(irq), &addr, &data);
3939 mv->mv_address = addr;
3942 if (mte->mte_handlers == 0) {
3943 pci_enable_msix(child, rid - 1, mv->mv_address,
3945 pci_unmask_msix(child, rid - 1);
3947 mte->mte_handlers++;
3951 * Make sure that INTx is disabled if we are using MSI/MSI-X,
3952 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
3953 * in which case we "enable" INTx so MSI/MSI-X actually works.
3955 if (!pci_has_quirk(pci_get_devid(child),
3956 PCI_QUIRK_MSI_INTX_BUG))
3957 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3959 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3962 (void)bus_generic_teardown_intr(dev, child, irq,
3972 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3975 struct msix_table_entry *mte;
3976 struct resource_list_entry *rle;
3977 struct pci_devinfo *dinfo;
3980 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3983 /* If this isn't a direct child, just bail out */
3984 if (device_get_parent(child) != dev)
3985 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3987 rid = rman_get_rid(irq);
3990 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3993 * Check to see if the interrupt is MSI or MSI-X. If so,
3994 * decrement the appropriate handlers count and mask the
3995 * MSI-X message, or disable MSI messages if the count
3998 dinfo = device_get_ivars(child);
3999 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
4000 if (rle->res != irq)
4002 if (dinfo->cfg.msi.msi_alloc > 0) {
4003 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
4004 ("MSI-X index too high"));
4005 if (dinfo->cfg.msi.msi_handlers == 0)
4007 dinfo->cfg.msi.msi_handlers--;
4008 if (dinfo->cfg.msi.msi_handlers == 0)
4009 pci_disable_msi(child);
4011 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4012 ("No MSI or MSI-X interrupts allocated"));
4013 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4014 ("MSI-X index too high"));
4015 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4016 if (mte->mte_handlers == 0)
4018 mte->mte_handlers--;
4019 if (mte->mte_handlers == 0)
4020 pci_mask_msix(child, rid - 1);
4023 error = bus_generic_teardown_intr(dev, child, irq, cookie);
4026 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4031 pci_print_child(device_t dev, device_t child)
4033 struct pci_devinfo *dinfo;
4034 struct resource_list *rl;
4037 dinfo = device_get_ivars(child);
4038 rl = &dinfo->resources;
4040 retval += bus_print_child_header(dev, child);
4042 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
4043 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
4044 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
4045 if (device_get_flags(dev))
4046 retval += printf(" flags %#x", device_get_flags(dev));
4048 retval += printf(" at device %d.%d", pci_get_slot(child),
4049 pci_get_function(child));
4051 retval += bus_print_child_domain(dev, child);
4052 retval += bus_print_child_footer(dev, child);
4061 int report; /* 0 = bootverbose, 1 = always */
4063 } pci_nomatch_tab[] = {
4064 {PCIC_OLD, -1, 1, "old"},
4065 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4066 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4067 {PCIC_STORAGE, -1, 1, "mass storage"},
4068 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4069 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4070 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4071 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4072 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4073 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4074 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4075 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4076 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4077 {PCIC_NETWORK, -1, 1, "network"},
4078 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4079 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4080 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4081 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4082 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4083 {PCIC_DISPLAY, -1, 1, "display"},
4084 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4085 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4086 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4087 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4088 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
4089 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
4090 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4091 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
4092 {PCIC_MEMORY, -1, 1, "memory"},
4093 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4094 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4095 {PCIC_BRIDGE, -1, 1, "bridge"},
4096 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4097 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4098 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4099 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4100 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4101 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4102 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4103 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4104 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4105 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4106 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4107 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4108 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4109 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4110 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4111 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4112 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4113 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
4114 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4115 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4116 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4117 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
4118 {PCIC_INPUTDEV, -1, 1, "input device"},
4119 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
4120 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4121 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
4122 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
4123 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
4124 {PCIC_DOCKING, -1, 1, "docking station"},
4125 {PCIC_PROCESSOR, -1, 1, "processor"},
4126 {PCIC_SERIALBUS, -1, 1, "serial bus"},
4127 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
4128 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
4129 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
4130 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
4131 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
4132 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
4133 {PCIC_WIRELESS, -1, 1, "wireless controller"},
4134 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
4135 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
4136 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
4137 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
4138 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
4139 {PCIC_SATCOM, -1, 1, "satellite communication"},
4140 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
4141 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
4142 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
4143 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
4144 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
4145 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
4146 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
4147 {PCIC_DASP, -1, 0, "dasp"},
4148 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
4153 pci_probe_nomatch(device_t dev, device_t child)
4156 const char *cp, *scp;
4160 * Look for a listing for this device in a loaded device database.
4163 if ((device = pci_describe_device(child)) != NULL) {
4164 device_printf(dev, "<%s>", device);
4165 free(device, M_DEVBUF);
4168 * Scan the class/subclass descriptions for a general
4173 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4174 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4175 if (pci_nomatch_tab[i].subclass == -1) {
4176 cp = pci_nomatch_tab[i].desc;
4177 report = pci_nomatch_tab[i].report;
4178 } else if (pci_nomatch_tab[i].subclass ==
4179 pci_get_subclass(child)) {
4180 scp = pci_nomatch_tab[i].desc;
4181 report = pci_nomatch_tab[i].report;
4185 if (report || bootverbose) {
4186 device_printf(dev, "<%s%s%s>",
4188 ((cp != NULL) && (scp != NULL)) ? ", " : "",
4192 if (report || bootverbose) {
4193 printf(" at device %d.%d (no driver attached)\n",
4194 pci_get_slot(child), pci_get_function(child));
4196 pci_cfg_save(child, device_get_ivars(child), 1);
4200 pci_child_detached(device_t dev, device_t child)
4202 struct pci_devinfo *dinfo;
4203 struct resource_list *rl;
4205 dinfo = device_get_ivars(child);
4206 rl = &dinfo->resources;
4209 * Have to deallocate IRQs before releasing any MSI messages and
4210 * have to release MSI messages before deallocating any memory
4213 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4214 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4215 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4216 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4217 (void)pci_release_msi(child);
4219 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4220 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4221 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4222 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4224 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4225 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4228 pci_cfg_save(child, dinfo, 1);
4232 * Parse the PCI device database, if loaded, and return a pointer to a
4233 * description of the device.
4235 * The database is flat text formatted as follows:
4237 * Any line not in a valid format is ignored.
4238 * Lines are terminated with newline '\n' characters.
4240 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4243 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4244 * - devices cannot be listed without a corresponding VENDOR line.
4245 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4246 * another TAB, then the device name.
4250 * Assuming (ptr) points to the beginning of a line in the database,
4251 * return the vendor or device and description of the next entry.
4252 * The value of (vendor) or (device) inappropriate for the entry type
4253 * is set to -1. Returns nonzero at the end of the database.
4255 * Note that this is slightly unrobust in the face of corrupt data;
4256 * we attempt to safeguard against this by spamming the end of the
4257 * database with a newline when we initialise.
4260 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4269 left = pci_vendordata_size - (cp - pci_vendordata);
4277 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4281 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4284 /* skip to next line */
4285 while (*cp != '\n' && left > 0) {
4294 /* skip to next line */
4295 while (*cp != '\n' && left > 0) {
4299 if (*cp == '\n' && left > 0)
4306 pci_describe_device(device_t dev)
4309 char *desc, *vp, *dp, *line;
4311 desc = vp = dp = NULL;
4314 * If we have no vendor data, we can't do anything.
4316 if (pci_vendordata == NULL)
4320 * Scan the vendor data looking for this device
4322 line = pci_vendordata;
4323 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4326 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4328 if (vendor == pci_get_vendor(dev))
4331 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4334 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4342 if (device == pci_get_device(dev))
4346 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4347 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4349 sprintf(desc, "%s, %s", vp, dp);
4359 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4361 struct pci_devinfo *dinfo;
4364 dinfo = device_get_ivars(child);
4368 case PCI_IVAR_ETHADDR:
4370 * The generic accessor doesn't deal with failure, so
4371 * we set the return value, then return an error.
4373 *((uint8_t **) result) = NULL;
4375 case PCI_IVAR_SUBVENDOR:
4376 *result = cfg->subvendor;
4378 case PCI_IVAR_SUBDEVICE:
4379 *result = cfg->subdevice;
4381 case PCI_IVAR_VENDOR:
4382 *result = cfg->vendor;
4384 case PCI_IVAR_DEVICE:
4385 *result = cfg->device;
4387 case PCI_IVAR_DEVID:
4388 *result = (cfg->device << 16) | cfg->vendor;
4390 case PCI_IVAR_CLASS:
4391 *result = cfg->baseclass;
4393 case PCI_IVAR_SUBCLASS:
4394 *result = cfg->subclass;
4396 case PCI_IVAR_PROGIF:
4397 *result = cfg->progif;
4399 case PCI_IVAR_REVID:
4400 *result = cfg->revid;
4402 case PCI_IVAR_INTPIN:
4403 *result = cfg->intpin;
4406 *result = cfg->intline;
4408 case PCI_IVAR_DOMAIN:
4409 *result = cfg->domain;
4415 *result = cfg->slot;
4417 case PCI_IVAR_FUNCTION:
4418 *result = cfg->func;
4420 case PCI_IVAR_CMDREG:
4421 *result = cfg->cmdreg;
4423 case PCI_IVAR_CACHELNSZ:
4424 *result = cfg->cachelnsz;
4426 case PCI_IVAR_MINGNT:
4427 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4431 *result = cfg->mingnt;
4433 case PCI_IVAR_MAXLAT:
4434 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4438 *result = cfg->maxlat;
4440 case PCI_IVAR_LATTIMER:
4441 *result = cfg->lattimer;
4450 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4452 struct pci_devinfo *dinfo;
4454 dinfo = device_get_ivars(child);
4457 case PCI_IVAR_INTPIN:
4458 dinfo->cfg.intpin = value;
4460 case PCI_IVAR_ETHADDR:
4461 case PCI_IVAR_SUBVENDOR:
4462 case PCI_IVAR_SUBDEVICE:
4463 case PCI_IVAR_VENDOR:
4464 case PCI_IVAR_DEVICE:
4465 case PCI_IVAR_DEVID:
4466 case PCI_IVAR_CLASS:
4467 case PCI_IVAR_SUBCLASS:
4468 case PCI_IVAR_PROGIF:
4469 case PCI_IVAR_REVID:
4471 case PCI_IVAR_DOMAIN:
4474 case PCI_IVAR_FUNCTION:
4475 return (EINVAL); /* disallow for now */
4482 #include "opt_ddb.h"
4484 #include <ddb/ddb.h>
4485 #include <sys/cons.h>
4488 * List resources based on pci map registers, used for within ddb
4491 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4493 struct pci_devinfo *dinfo;
4494 struct devlist *devlist_head;
4497 int i, error, none_count;
4500 /* get the head of the device queue */
4501 devlist_head = &pci_devq;
4504 * Go through the list of devices and print out devices
4506 for (error = 0, i = 0,
4507 dinfo = STAILQ_FIRST(devlist_head);
4508 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4509 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4511 /* Populate pd_name and pd_unit */
4514 name = device_get_name(dinfo->cfg.dev);
4517 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4518 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4519 (name && *name) ? name : "none",
4520 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4522 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4523 p->pc_sel.pc_func, (p->pc_class << 16) |
4524 (p->pc_subclass << 8) | p->pc_progif,
4525 (p->pc_subdevice << 16) | p->pc_subvendor,
4526 (p->pc_device << 16) | p->pc_vendor,
4527 p->pc_revid, p->pc_hdr);
4532 static struct resource *
4533 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4534 u_long start, u_long end, u_long count, u_int flags)
4536 struct pci_devinfo *dinfo = device_get_ivars(child);
4537 struct resource_list *rl = &dinfo->resources;
4538 struct resource *res;
4540 pci_addr_t map, testval;
4544 pm = pci_find_bar(child, *rid);
4546 /* This is a BAR that we failed to allocate earlier. */
4547 mapsize = pm->pm_size;
4551 * Weed out the bogons, and figure out how large the
4552 * BAR/map is. BARs that read back 0 here are bogus
4553 * and unimplemented. Note: atapci in legacy mode are
4554 * special and handled elsewhere in the code. If you
4555 * have a atapci device in legacy mode and it fails
4556 * here, that other code is broken.
4558 pci_read_bar(child, *rid, &map, &testval);
4561 * Determine the size of the BAR and ignore BARs with a size
4562 * of 0. Device ROM BARs use a different mask value.
4564 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4565 mapsize = pci_romsize(testval);
4567 mapsize = pci_mapsize(testval);
4570 pm = pci_add_bar(child, *rid, map, mapsize);
4573 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4574 if (type != SYS_RES_MEMORY) {
4577 "child %s requested type %d for rid %#x,"
4578 " but the BAR says it is an memio\n",
4579 device_get_nameunit(child), type, *rid);
4583 if (type != SYS_RES_IOPORT) {
4586 "child %s requested type %d for rid %#x,"
4587 " but the BAR says it is an ioport\n",
4588 device_get_nameunit(child), type, *rid);
4594 * For real BARs, we need to override the size that
4595 * the driver requests, because that's what the BAR
4596 * actually uses and we would otherwise have a
4597 * situation where we might allocate the excess to
4598 * another driver, which won't work.
4600 count = (pci_addr_t)1 << mapsize;
4601 if (RF_ALIGNMENT(flags) < mapsize)
4602 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4603 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4604 flags |= RF_PREFETCHABLE;
4607 * Allocate enough resource, and then write back the
4608 * appropriate BAR for that resource.
4610 resource_list_add(rl, type, *rid, start, end, count);
4611 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
4612 count, flags & ~RF_ACTIVE);
4614 resource_list_delete(rl, type, *rid);
4615 device_printf(child,
4616 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4617 count, *rid, type, start, end);
4621 device_printf(child,
4622 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4623 count, *rid, type, rman_get_start(res));
4624 map = rman_get_start(res);
4625 pci_write_bar(child, pm, map);
4631 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4632 u_long start, u_long end, u_long count, u_int flags)
4634 struct pci_devinfo *dinfo;
4635 struct resource_list *rl;
4636 struct resource_list_entry *rle;
4637 struct resource *res;
4640 if (device_get_parent(child) != dev)
4641 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4642 type, rid, start, end, count, flags));
4645 * Perform lazy resource allocation
4647 dinfo = device_get_ivars(child);
4648 rl = &dinfo->resources;
4651 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
4653 return (pci_alloc_secbus(dev, child, rid, start, end, count,
4658 * Can't alloc legacy interrupt once MSI messages have
4661 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4662 cfg->msix.msix_alloc > 0))
4666 * If the child device doesn't have an interrupt
4667 * routed and is deserving of an interrupt, try to
4670 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4672 pci_assign_interrupt(dev, child, 0);
4674 case SYS_RES_IOPORT:
4675 case SYS_RES_MEMORY:
4678 * PCI-PCI bridge I/O window resources are not BARs.
4679 * For those allocations just pass the request up the
4682 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4684 case PCIR_IOBASEL_1:
4685 case PCIR_MEMBASE_1:
4686 case PCIR_PMBASEL_1:
4688 * XXX: Should we bother creating a resource
4691 return (bus_generic_alloc_resource(dev, child,
4692 type, rid, start, end, count, flags));
4696 /* Reserve resources for this BAR if needed. */
4697 rle = resource_list_find(rl, type, *rid);
4699 res = pci_reserve_map(dev, child, type, rid, start, end,
4705 return (resource_list_alloc(rl, dev, child, type, rid,
4706 start, end, count, flags));
4710 pci_release_resource(device_t dev, device_t child, int type, int rid,
4713 struct pci_devinfo *dinfo;
4714 struct resource_list *rl;
4717 if (device_get_parent(child) != dev)
4718 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4721 dinfo = device_get_ivars(child);
4725 * PCI-PCI bridge I/O window resources are not BARs. For
4726 * those allocations just pass the request up the tree.
4728 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4729 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4731 case PCIR_IOBASEL_1:
4732 case PCIR_MEMBASE_1:
4733 case PCIR_PMBASEL_1:
4734 return (bus_generic_release_resource(dev, child, type,
4740 rl = &dinfo->resources;
4741 return (resource_list_release(rl, dev, child, type, rid, r));
4745 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4748 struct pci_devinfo *dinfo;
4751 error = bus_generic_activate_resource(dev, child, type, rid, r);
4755 /* Enable decoding in the command register when activating BARs. */
4756 if (device_get_parent(child) == dev) {
4757 /* Device ROMs need their decoding explicitly enabled. */
4758 dinfo = device_get_ivars(child);
4759 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4760 pci_write_bar(child, pci_find_bar(child, rid),
4761 rman_get_start(r) | PCIM_BIOS_ENABLE);
4763 case SYS_RES_IOPORT:
4764 case SYS_RES_MEMORY:
4765 error = PCI_ENABLE_IO(dev, child, type);
4773 pci_deactivate_resource(device_t dev, device_t child, int type,
4774 int rid, struct resource *r)
4776 struct pci_devinfo *dinfo;
4779 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4783 /* Disable decoding for device ROMs. */
4784 if (device_get_parent(child) == dev) {
4785 dinfo = device_get_ivars(child);
4786 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4787 pci_write_bar(child, pci_find_bar(child, rid),
4794 pci_delete_child(device_t dev, device_t child)
4796 struct resource_list_entry *rle;
4797 struct resource_list *rl;
4798 struct pci_devinfo *dinfo;
4800 dinfo = device_get_ivars(child);
4801 rl = &dinfo->resources;
4803 if (device_is_attached(child))
4804 device_detach(child);
4806 /* Turn off access to resources we're about to free */
4807 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4808 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4810 /* Free all allocated resources */
4811 STAILQ_FOREACH(rle, rl, link) {
4813 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4814 resource_list_busy(rl, rle->type, rle->rid)) {
4815 pci_printf(&dinfo->cfg,
4816 "Resource still owned, oops. "
4817 "(type=%d, rid=%d, addr=%lx)\n",
4818 rle->type, rle->rid,
4819 rman_get_start(rle->res));
4820 bus_release_resource(child, rle->type, rle->rid,
4823 resource_list_unreserve(rl, dev, child, rle->type,
4827 resource_list_free(rl);
4829 device_delete_child(dev, child);
4834 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4836 struct pci_devinfo *dinfo;
4837 struct resource_list *rl;
4838 struct resource_list_entry *rle;
4840 if (device_get_parent(child) != dev)
4843 dinfo = device_get_ivars(child);
4844 rl = &dinfo->resources;
4845 rle = resource_list_find(rl, type, rid);
4850 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4851 resource_list_busy(rl, type, rid)) {
4852 device_printf(dev, "delete_resource: "
4853 "Resource still owned by child, oops. "
4854 "(type=%d, rid=%d, addr=%lx)\n",
4855 type, rid, rman_get_start(rle->res));
4858 resource_list_unreserve(rl, dev, child, type, rid);
4860 resource_list_delete(rl, type, rid);
4863 struct resource_list *
4864 pci_get_resource_list (device_t dev, device_t child)
4866 struct pci_devinfo *dinfo = device_get_ivars(child);
4868 return (&dinfo->resources);
4872 pci_get_dma_tag(device_t bus, device_t dev)
4874 struct pci_softc *sc = device_get_softc(bus);
4876 return (sc->sc_dma_tag);
4880 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4882 struct pci_devinfo *dinfo = device_get_ivars(child);
4883 pcicfgregs *cfg = &dinfo->cfg;
4885 return (PCIB_READ_CONFIG(device_get_parent(dev),
4886 cfg->bus, cfg->slot, cfg->func, reg, width));
4890 pci_write_config_method(device_t dev, device_t child, int reg,
4891 uint32_t val, int width)
4893 struct pci_devinfo *dinfo = device_get_ivars(child);
4894 pcicfgregs *cfg = &dinfo->cfg;
4896 PCIB_WRITE_CONFIG(device_get_parent(dev),
4897 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4901 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4905 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4906 pci_get_function(child));
4911 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4914 struct pci_devinfo *dinfo;
4917 dinfo = device_get_ivars(child);
4919 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4920 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4921 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4927 pci_assign_interrupt_method(device_t dev, device_t child)
4929 struct pci_devinfo *dinfo = device_get_ivars(child);
4930 pcicfgregs *cfg = &dinfo->cfg;
4932 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4937 pci_modevent(module_t mod, int what, void *arg)
4939 static struct cdev *pci_cdev;
4943 STAILQ_INIT(&pci_devq);
4945 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4947 pci_load_vendor_data();
4951 destroy_dev(pci_cdev);
4959 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
4961 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
4962 struct pcicfg_pcie *cfg;
4965 cfg = &dinfo->cfg.pcie;
4966 pos = cfg->pcie_location;
4968 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4970 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
4972 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4973 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4974 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4975 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
4977 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4978 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4979 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4980 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
4982 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4983 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4984 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
4987 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
4988 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
4989 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
4995 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
4997 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
4998 dinfo->cfg.pcix.pcix_command, 2);
5002 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
5006 * Only do header type 0 devices. Type 1 devices are bridges,
5007 * which we know need special treatment. Type 2 devices are
5008 * cardbus bridges which also require special treatment.
5009 * Other types are unknown, and we err on the side of safety
5012 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5016 * Restore the device to full power mode. We must do this
5017 * before we restore the registers because moving from D3 to
5018 * D0 will cause the chip's BARs and some other registers to
5019 * be reset to some unknown power on reset values. Cut down
5020 * the noise on boot by doing nothing if we are already in
5023 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
5024 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5025 pci_restore_bars(dev);
5026 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
5027 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
5028 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
5029 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
5030 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
5031 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
5032 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
5033 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
5034 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
5037 * Restore extended capabilities for PCI-Express and PCI-X
5039 if (dinfo->cfg.pcie.pcie_location != 0)
5040 pci_cfg_restore_pcie(dev, dinfo);
5041 if (dinfo->cfg.pcix.pcix_location != 0)
5042 pci_cfg_restore_pcix(dev, dinfo);
5044 /* Restore MSI and MSI-X configurations if they are present. */
5045 if (dinfo->cfg.msi.msi_location != 0)
5046 pci_resume_msi(dev);
5047 if (dinfo->cfg.msix.msix_location != 0)
5048 pci_resume_msix(dev);
5052 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
5054 #define RREG(n) pci_read_config(dev, pos + (n), 2)
5055 struct pcicfg_pcie *cfg;
5058 cfg = &dinfo->cfg.pcie;
5059 pos = cfg->pcie_location;
5061 cfg->pcie_flags = RREG(PCIER_FLAGS);
5063 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5065 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
5067 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5068 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5069 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5070 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
5072 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5073 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5074 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5075 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
5077 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5078 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5079 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
5082 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
5083 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
5084 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
5090 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5092 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5093 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5097 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5103 * Only do header type 0 devices. Type 1 devices are bridges, which
5104 * we know need special treatment. Type 2 devices are cardbus bridges
5105 * which also require special treatment. Other types are unknown, and
5106 * we err on the side of safety by ignoring them. Powering down
5107 * bridges should not be undertaken lightly.
5109 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5113 * Some drivers apparently write to these registers w/o updating our
5114 * cached copy. No harm happens if we update the copy, so do so here
5115 * so we can restore them. The COMMAND register is modified by the
5116 * bus w/o updating the cache. This should represent the normally
5117 * writable portion of the 'defined' part of type 0 headers. In
5118 * theory we also need to save/restore the PCI capability structures
5119 * we know about, but apart from power we don't know any that are
5122 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5123 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5124 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5125 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5126 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5127 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5128 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5129 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5130 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5131 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5132 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5133 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5134 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5135 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5136 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5138 if (dinfo->cfg.pcie.pcie_location != 0)
5139 pci_cfg_save_pcie(dev, dinfo);
5141 if (dinfo->cfg.pcix.pcix_location != 0)
5142 pci_cfg_save_pcix(dev, dinfo);
5145 * don't set the state for display devices, base peripherals and
5146 * memory devices since bad things happen when they are powered down.
5147 * We should (a) have drivers that can easily detach and (b) use
5148 * generic drivers for these devices so that some device actually
5149 * attaches. We need to make sure that when we implement (a) we don't
5150 * power the device down on a reattach.
5152 cls = pci_get_class(dev);
5155 switch (pci_do_power_nodriver)
5157 case 0: /* NO powerdown at all */
5159 case 1: /* Conservative about what to power down */
5160 if (cls == PCIC_STORAGE)
5163 case 2: /* Agressive about what to power down */
5164 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5165 cls == PCIC_BASEPERIPH)
5168 case 3: /* Power down everything */
5172 * PCI spec says we can only go into D3 state from D0 state.
5173 * Transition from D[12] into D0 before going to D3 state.
5175 ps = pci_get_powerstate(dev);
5176 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5177 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5178 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5179 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5182 /* Wrapper APIs suitable for device driver use. */
5184 pci_save_state(device_t dev)
5186 struct pci_devinfo *dinfo;
5188 dinfo = device_get_ivars(dev);
5189 pci_cfg_save(dev, dinfo, 0);
5193 pci_restore_state(device_t dev)
5195 struct pci_devinfo *dinfo;
5197 dinfo = device_get_ivars(dev);
5198 pci_cfg_restore(dev, dinfo);
5202 pci_get_rid_method(device_t dev, device_t child)
5205 return (PCIB_GET_RID(device_get_parent(dev), child));
5208 /* Find the upstream port of a given PCI device in a root complex. */
5210 pci_find_pcie_root_port(device_t dev)
5212 struct pci_devinfo *dinfo;
5213 devclass_t pci_class;
5216 pci_class = devclass_find("pci");
5217 KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class,
5218 ("%s: non-pci device %s", __func__, device_get_nameunit(dev)));
5221 * Walk the bridge hierarchy until we find a PCI-e root
5222 * port or a non-PCI device.
5225 bus = device_get_parent(dev);
5226 KASSERT(bus != NULL, ("%s: null parent of %s", __func__,
5227 device_get_nameunit(dev)));
5229 pcib = device_get_parent(bus);
5230 KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__,
5231 device_get_nameunit(bus)));
5234 * pcib's parent must be a PCI bus for this to be a
5237 if (device_get_devclass(device_get_parent(pcib)) != pci_class)
5240 dinfo = device_get_ivars(pcib);
5241 if (dinfo->cfg.pcie.pcie_location != 0 &&
5242 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)