2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/limits.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
49 #include <vm/vm_extern.h>
52 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <machine/stdarg.h>
57 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
58 #include <machine/intr_machdep.h>
61 #include <sys/pciio.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pci_private.h>
66 #include <dev/usb/controller/xhcireg.h>
67 #include <dev/usb/controller/ehcireg.h>
68 #include <dev/usb/controller/ohcireg.h>
69 #include <dev/usb/controller/uhcireg.h>
74 #define PCIR_IS_BIOS(cfg, reg) \
75 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
76 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
78 static int pci_has_quirk(uint32_t devid, int quirk);
79 static pci_addr_t pci_mapbase(uint64_t mapreg);
80 static const char *pci_maptype(uint64_t mapreg);
81 static int pci_mapsize(uint64_t testval);
82 static int pci_maprange(uint64_t mapreg);
83 static pci_addr_t pci_rombase(uint64_t mapreg);
84 static int pci_romsize(uint64_t testval);
85 static void pci_fixancient(pcicfgregs *cfg);
86 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
88 static int pci_porten(device_t dev);
89 static int pci_memen(device_t dev);
90 static void pci_assign_interrupt(device_t bus, device_t dev,
92 static int pci_add_map(device_t bus, device_t dev, int reg,
93 struct resource_list *rl, int force, int prefetch);
94 static int pci_probe(device_t dev);
95 static int pci_attach(device_t dev);
97 static int pci_detach(device_t dev);
99 static void pci_load_vendor_data(void);
100 static int pci_describe_parse_line(char **ptr, int *vendor,
101 int *device, char **desc);
102 static char *pci_describe_device(device_t dev);
103 static int pci_modevent(module_t mod, int what, void *arg);
104 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
106 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
107 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
108 int reg, uint32_t *data);
110 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
111 int reg, uint32_t data);
113 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
114 static void pci_mask_msix(device_t dev, u_int index);
115 static void pci_unmask_msix(device_t dev, u_int index);
116 static int pci_msi_blacklisted(void);
117 static int pci_msix_blacklisted(void);
118 static void pci_resume_msi(device_t dev);
119 static void pci_resume_msix(device_t dev);
120 static int pci_remap_intr_method(device_t bus, device_t dev,
123 static uint16_t pci_get_rid_method(device_t dev, device_t child);
125 static device_method_t pci_methods[] = {
126 /* Device interface */
127 DEVMETHOD(device_probe, pci_probe),
128 DEVMETHOD(device_attach, pci_attach),
130 DEVMETHOD(device_detach, pci_detach),
132 DEVMETHOD(device_detach, bus_generic_detach),
134 DEVMETHOD(device_shutdown, bus_generic_shutdown),
135 DEVMETHOD(device_suspend, pci_suspend),
136 DEVMETHOD(device_resume, pci_resume),
139 DEVMETHOD(bus_print_child, pci_print_child),
140 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
141 DEVMETHOD(bus_read_ivar, pci_read_ivar),
142 DEVMETHOD(bus_write_ivar, pci_write_ivar),
143 DEVMETHOD(bus_driver_added, pci_driver_added),
144 DEVMETHOD(bus_setup_intr, pci_setup_intr),
145 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
147 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
148 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
149 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
150 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
151 DEVMETHOD(bus_delete_resource, pci_delete_resource),
152 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
153 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
154 DEVMETHOD(bus_release_resource, pci_release_resource),
155 DEVMETHOD(bus_activate_resource, pci_activate_resource),
156 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
157 DEVMETHOD(bus_child_detached, pci_child_detached),
158 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
159 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
160 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
163 DEVMETHOD(pci_read_config, pci_read_config_method),
164 DEVMETHOD(pci_write_config, pci_write_config_method),
165 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
166 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
167 DEVMETHOD(pci_enable_io, pci_enable_io_method),
168 DEVMETHOD(pci_disable_io, pci_disable_io_method),
169 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
170 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
171 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
172 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
173 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
174 DEVMETHOD(pci_find_cap, pci_find_cap_method),
175 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
176 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
177 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
178 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
179 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
180 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
181 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
182 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
183 DEVMETHOD(pci_release_msi, pci_release_msi_method),
184 DEVMETHOD(pci_msi_count, pci_msi_count_method),
185 DEVMETHOD(pci_msix_count, pci_msix_count_method),
186 DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method),
187 DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method),
188 DEVMETHOD(pci_get_rid, pci_get_rid_method),
189 DEVMETHOD(pci_child_added, pci_child_added_method),
194 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
196 static devclass_t pci_devclass;
197 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
198 MODULE_VERSION(pci, 1);
200 static char *pci_vendordata;
201 static size_t pci_vendordata_size;
204 uint32_t devid; /* Vendor/device of the card */
206 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
207 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
208 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
209 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
210 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
211 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
216 static const struct pci_quirk pci_quirks[] = {
217 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
218 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
219 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
220 /* As does the Serverworks OSB4 (the SMBus mapping register) */
221 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
224 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
225 * or the CMIC-SL (AKA ServerWorks GC_LE).
227 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
228 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
231 * MSI doesn't work on earlier Intel chipsets including
232 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
234 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
246 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
249 * MSI-X allocation doesn't work properly for devices passed through
250 * by VMware up to at least ESXi 5.1.
252 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
253 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
256 * Some virtualization environments emulate an older chipset
257 * but support MSI just fine. QEMU uses the Intel 82440.
259 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
262 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
263 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
264 * It prevents us from attaching hpet(4) when the bit is unset.
265 * Note this quirk only affects SB600 revision A13 and earlier.
266 * For SB600 A21 and later, firmware must set the bit to hide it.
267 * For SB700 and later, it is unused and hardcoded to zero.
269 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
272 * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
273 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
274 * command register is set.
276 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
277 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
278 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
281 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
282 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
284 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
285 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
286 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
287 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
288 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
289 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
294 /* map register information */
295 #define PCI_MAPMEM 0x01 /* memory map */
296 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
297 #define PCI_MAPPORT 0x04 /* port map */
299 struct devlist pci_devq;
300 uint32_t pci_generation;
301 uint32_t pci_numdevs = 0;
302 static int pcie_chipset, pcix_chipset;
305 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
307 static int pci_enable_io_modes = 1;
308 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
309 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
310 &pci_enable_io_modes, 1,
311 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
312 enable these bits correctly. We'd like to do this all the time, but there\n\
313 are some peripherals that this causes problems with.");
315 static int pci_do_realloc_bars = 0;
316 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
317 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
318 &pci_do_realloc_bars, 0,
319 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
321 static int pci_do_power_nodriver = 0;
322 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
323 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
324 &pci_do_power_nodriver, 0,
325 "Place a function into D3 state when no driver attaches to it. 0 means\n\
326 disable. 1 means conservatively place devices into D3 state. 2 means\n\
327 agressively place devices into D3 state. 3 means put absolutely everything\n\
330 int pci_do_power_resume = 1;
331 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
332 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
333 &pci_do_power_resume, 1,
334 "Transition from D3 -> D0 on resume.");
336 int pci_do_power_suspend = 1;
337 TUNABLE_INT("hw.pci.do_power_suspend", &pci_do_power_suspend);
338 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RW,
339 &pci_do_power_suspend, 1,
340 "Transition from D0 -> D3 on suspend.");
342 static int pci_do_msi = 1;
343 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
344 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
345 "Enable support for MSI interrupts");
347 static int pci_do_msix = 1;
348 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
349 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
350 "Enable support for MSI-X interrupts");
352 static int pci_honor_msi_blacklist = 1;
353 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
354 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
355 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
357 #if defined(__i386__) || defined(__amd64__)
358 static int pci_usb_takeover = 1;
360 static int pci_usb_takeover = 0;
362 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
363 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
364 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
365 Disable this if you depend on BIOS emulation of USB devices, that is\n\
366 you use USB devices (like keyboard or mouse) but do not load USB drivers");
368 static int pci_clear_bars;
369 TUNABLE_INT("hw.pci.clear_bars", &pci_clear_bars);
370 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
371 "Ignore firmware-assigned resources for BARs.");
373 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
374 static int pci_clear_buses;
375 TUNABLE_INT("hw.pci.clear_buses", &pci_clear_buses);
376 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
377 "Ignore firmware-assigned bus numbers.");
380 static int pci_enable_ari = 1;
381 TUNABLE_INT("hw.pci.enable_ari", &pci_enable_ari);
382 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
383 0, "Enable support for PCIe Alternative RID Interpretation");
386 pci_has_quirk(uint32_t devid, int quirk)
388 const struct pci_quirk *q;
390 for (q = &pci_quirks[0]; q->devid; q++) {
391 if (q->devid == devid && q->type == quirk)
397 /* Find a device_t by bus/slot/function in domain 0 */
400 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
403 return (pci_find_dbsf(0, bus, slot, func));
406 /* Find a device_t by domain/bus/slot/function */
409 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
411 struct pci_devinfo *dinfo;
413 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
414 if ((dinfo->cfg.domain == domain) &&
415 (dinfo->cfg.bus == bus) &&
416 (dinfo->cfg.slot == slot) &&
417 (dinfo->cfg.func == func)) {
418 return (dinfo->cfg.dev);
425 /* Find a device_t by vendor/device ID */
428 pci_find_device(uint16_t vendor, uint16_t device)
430 struct pci_devinfo *dinfo;
432 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
433 if ((dinfo->cfg.vendor == vendor) &&
434 (dinfo->cfg.device == device)) {
435 return (dinfo->cfg.dev);
443 pci_find_class(uint8_t class, uint8_t subclass)
445 struct pci_devinfo *dinfo;
447 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
448 if (dinfo->cfg.baseclass == class &&
449 dinfo->cfg.subclass == subclass) {
450 return (dinfo->cfg.dev);
458 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
463 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
466 retval += vprintf(fmt, ap);
471 /* return base address of memory or port map */
474 pci_mapbase(uint64_t mapreg)
477 if (PCI_BAR_MEM(mapreg))
478 return (mapreg & PCIM_BAR_MEM_BASE);
480 return (mapreg & PCIM_BAR_IO_BASE);
483 /* return map type of memory or port map */
486 pci_maptype(uint64_t mapreg)
489 if (PCI_BAR_IO(mapreg))
491 if (mapreg & PCIM_BAR_MEM_PREFETCH)
492 return ("Prefetchable Memory");
496 /* return log2 of map size decoded for memory or port map */
499 pci_mapsize(uint64_t testval)
503 testval = pci_mapbase(testval);
506 while ((testval & 1) == 0)
515 /* return base address of device ROM */
518 pci_rombase(uint64_t mapreg)
521 return (mapreg & PCIM_BIOS_ADDR_MASK);
524 /* return log2 of map size decided for device ROM */
527 pci_romsize(uint64_t testval)
531 testval = pci_rombase(testval);
534 while ((testval & 1) == 0)
543 /* return log2 of address range supported by map register */
546 pci_maprange(uint64_t mapreg)
550 if (PCI_BAR_IO(mapreg))
553 switch (mapreg & PCIM_BAR_MEM_TYPE) {
554 case PCIM_BAR_MEM_32:
557 case PCIM_BAR_MEM_1MB:
560 case PCIM_BAR_MEM_64:
567 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
570 pci_fixancient(pcicfgregs *cfg)
572 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
575 /* PCI to PCI bridges use header type 1 */
576 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
577 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
580 /* extract header type specific config data */
583 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
585 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
586 switch (cfg->hdrtype & PCIM_HDRTYPE) {
587 case PCIM_HDRTYPE_NORMAL:
588 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
589 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
590 cfg->mingnt = REG(PCIR_MINGNT, 1);
591 cfg->maxlat = REG(PCIR_MAXLAT, 1);
592 cfg->nummaps = PCI_MAXMAPS_0;
594 case PCIM_HDRTYPE_BRIDGE:
595 cfg->nummaps = PCI_MAXMAPS_1;
597 case PCIM_HDRTYPE_CARDBUS:
598 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
599 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
600 cfg->nummaps = PCI_MAXMAPS_2;
606 /* read configuration header into pcicfgregs structure */
608 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
610 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
611 pcicfgregs *cfg = NULL;
612 struct pci_devinfo *devlist_entry;
613 struct devlist *devlist_head;
615 devlist_head = &pci_devq;
617 devlist_entry = NULL;
619 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
620 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
622 cfg = &devlist_entry->cfg;
628 cfg->vendor = REG(PCIR_VENDOR, 2);
629 cfg->device = REG(PCIR_DEVICE, 2);
630 cfg->cmdreg = REG(PCIR_COMMAND, 2);
631 cfg->statreg = REG(PCIR_STATUS, 2);
632 cfg->baseclass = REG(PCIR_CLASS, 1);
633 cfg->subclass = REG(PCIR_SUBCLASS, 1);
634 cfg->progif = REG(PCIR_PROGIF, 1);
635 cfg->revid = REG(PCIR_REVID, 1);
636 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
637 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
638 cfg->lattimer = REG(PCIR_LATTIMER, 1);
639 cfg->intpin = REG(PCIR_INTPIN, 1);
640 cfg->intline = REG(PCIR_INTLINE, 1);
642 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
643 cfg->hdrtype &= ~PCIM_MFDEV;
644 STAILQ_INIT(&cfg->maps);
647 pci_hdrtypedata(pcib, b, s, f, cfg);
649 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
650 pci_read_cap(pcib, cfg);
652 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
654 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
655 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
656 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
657 devlist_entry->conf.pc_sel.pc_func = cfg->func;
658 devlist_entry->conf.pc_hdr = cfg->hdrtype;
660 devlist_entry->conf.pc_subvendor = cfg->subvendor;
661 devlist_entry->conf.pc_subdevice = cfg->subdevice;
662 devlist_entry->conf.pc_vendor = cfg->vendor;
663 devlist_entry->conf.pc_device = cfg->device;
665 devlist_entry->conf.pc_class = cfg->baseclass;
666 devlist_entry->conf.pc_subclass = cfg->subclass;
667 devlist_entry->conf.pc_progif = cfg->progif;
668 devlist_entry->conf.pc_revid = cfg->revid;
673 return (devlist_entry);
678 pci_read_cap(device_t pcib, pcicfgregs *cfg)
680 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
681 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
682 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
686 int ptr, nextptr, ptrptr;
688 switch (cfg->hdrtype & PCIM_HDRTYPE) {
689 case PCIM_HDRTYPE_NORMAL:
690 case PCIM_HDRTYPE_BRIDGE:
691 ptrptr = PCIR_CAP_PTR;
693 case PCIM_HDRTYPE_CARDBUS:
694 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
697 return; /* no extended capabilities support */
699 nextptr = REG(ptrptr, 1); /* sanity check? */
702 * Read capability entries.
704 while (nextptr != 0) {
707 printf("illegal PCI extended capability offset %d\n",
711 /* Find the next entry */
713 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
715 /* Process this entry */
716 switch (REG(ptr + PCICAP_ID, 1)) {
717 case PCIY_PMG: /* PCI power management */
718 if (cfg->pp.pp_cap == 0) {
719 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
720 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
721 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
722 if ((nextptr - ptr) > PCIR_POWER_DATA)
723 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
726 case PCIY_HT: /* HyperTransport */
727 /* Determine HT-specific capability type. */
728 val = REG(ptr + PCIR_HT_COMMAND, 2);
730 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
731 cfg->ht.ht_slave = ptr;
733 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
734 switch (val & PCIM_HTCMD_CAP_MASK) {
735 case PCIM_HTCAP_MSI_MAPPING:
736 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
737 /* Sanity check the mapping window. */
738 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
741 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
743 if (addr != MSI_INTEL_ADDR_BASE)
745 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
746 cfg->domain, cfg->bus,
747 cfg->slot, cfg->func,
750 addr = MSI_INTEL_ADDR_BASE;
752 cfg->ht.ht_msimap = ptr;
753 cfg->ht.ht_msictrl = val;
754 cfg->ht.ht_msiaddr = addr;
759 case PCIY_MSI: /* PCI MSI */
760 cfg->msi.msi_location = ptr;
761 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
762 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
763 PCIM_MSICTRL_MMC_MASK)>>1);
765 case PCIY_MSIX: /* PCI MSI-X */
766 cfg->msix.msix_location = ptr;
767 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
768 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
769 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
770 val = REG(ptr + PCIR_MSIX_TABLE, 4);
771 cfg->msix.msix_table_bar = PCIR_BAR(val &
773 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
774 val = REG(ptr + PCIR_MSIX_PBA, 4);
775 cfg->msix.msix_pba_bar = PCIR_BAR(val &
777 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
779 case PCIY_VPD: /* PCI Vital Product Data */
780 cfg->vpd.vpd_reg = ptr;
783 /* Should always be true. */
784 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
785 PCIM_HDRTYPE_BRIDGE) {
786 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
787 cfg->subvendor = val & 0xffff;
788 cfg->subdevice = val >> 16;
791 case PCIY_PCIX: /* PCI-X */
793 * Assume we have a PCI-X chipset if we have
794 * at least one PCI-PCI bridge with a PCI-X
795 * capability. Note that some systems with
796 * PCI-express or HT chipsets might match on
797 * this check as well.
799 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
802 cfg->pcix.pcix_location = ptr;
804 case PCIY_EXPRESS: /* PCI-express */
806 * Assume we have a PCI-express chipset if we have
807 * at least one PCI-express device.
810 cfg->pcie.pcie_location = ptr;
811 val = REG(ptr + PCIER_FLAGS, 2);
812 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
819 #if defined(__powerpc__)
821 * Enable the MSI mapping window for all HyperTransport
822 * slaves. PCI-PCI bridges have their windows enabled via
825 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
826 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
828 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
829 cfg->domain, cfg->bus, cfg->slot, cfg->func);
830 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
831 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
835 /* REG and WREG use carry through to next functions */
839 * PCI Vital Product Data
842 #define PCI_VPD_TIMEOUT 1000000
845 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
847 int count = PCI_VPD_TIMEOUT;
849 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
851 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
853 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
856 DELAY(1); /* limit looping */
858 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
865 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
867 int count = PCI_VPD_TIMEOUT;
869 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
871 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
872 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
873 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
876 DELAY(1); /* limit looping */
883 #undef PCI_VPD_TIMEOUT
885 struct vpd_readstate {
895 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
900 if (vrs->bytesinval == 0) {
901 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
903 vrs->val = le32toh(reg);
905 byte = vrs->val & 0xff;
908 vrs->val = vrs->val >> 8;
909 byte = vrs->val & 0xff;
919 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
921 struct vpd_readstate vrs;
926 int alloc, off; /* alloc/off for RO/W arrays */
932 /* init vpd reader */
940 name = remain = i = 0; /* shut up stupid gcc */
941 alloc = off = 0; /* shut up stupid gcc */
942 dflen = 0; /* shut up stupid gcc */
945 if (vpd_nextbyte(&vrs, &byte)) {
950 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
951 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
952 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
955 case 0: /* item name */
957 if (vpd_nextbyte(&vrs, &byte2)) {
962 if (vpd_nextbyte(&vrs, &byte2)) {
966 remain |= byte2 << 8;
967 if (remain > (0x7f*4 - vrs.off)) {
970 "invalid VPD data, remain %#x\n",
976 name = (byte >> 3) & 0xf;
979 case 0x2: /* String */
980 cfg->vpd.vpd_ident = malloc(remain + 1,
988 case 0x10: /* VPD-R */
991 cfg->vpd.vpd_ros = malloc(alloc *
992 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
996 case 0x11: /* VPD-W */
999 cfg->vpd.vpd_w = malloc(alloc *
1000 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1004 default: /* Invalid data, abort */
1010 case 1: /* Identifier String */
1011 cfg->vpd.vpd_ident[i++] = byte;
1014 cfg->vpd.vpd_ident[i] = '\0';
1019 case 2: /* VPD-R Keyword Header */
1021 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1022 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1023 M_DEVBUF, M_WAITOK | M_ZERO);
1025 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1026 if (vpd_nextbyte(&vrs, &byte2)) {
1030 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1031 if (vpd_nextbyte(&vrs, &byte2)) {
1035 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1037 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1040 * if this happens, we can't trust the rest
1043 pci_printf(cfg, "bad keyword length: %d\n",
1048 } else if (dflen == 0) {
1049 cfg->vpd.vpd_ros[off].value = malloc(1 *
1050 sizeof(*cfg->vpd.vpd_ros[off].value),
1051 M_DEVBUF, M_WAITOK);
1052 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1054 cfg->vpd.vpd_ros[off].value = malloc(
1056 sizeof(*cfg->vpd.vpd_ros[off].value),
1057 M_DEVBUF, M_WAITOK);
1060 /* keep in sync w/ state 3's transistions */
1061 if (dflen == 0 && remain == 0)
1063 else if (dflen == 0)
1069 case 3: /* VPD-R Keyword Value */
1070 cfg->vpd.vpd_ros[off].value[i++] = byte;
1071 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1072 "RV", 2) == 0 && cksumvalid == -1) {
1078 "bad VPD cksum, remain %hhu\n",
1087 /* keep in sync w/ state 2's transistions */
1089 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1090 if (dflen == 0 && remain == 0) {
1091 cfg->vpd.vpd_rocnt = off;
1092 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1093 off * sizeof(*cfg->vpd.vpd_ros),
1094 M_DEVBUF, M_WAITOK | M_ZERO);
1096 } else if (dflen == 0)
1106 case 5: /* VPD-W Keyword Header */
1108 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1109 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1110 M_DEVBUF, M_WAITOK | M_ZERO);
1112 cfg->vpd.vpd_w[off].keyword[0] = byte;
1113 if (vpd_nextbyte(&vrs, &byte2)) {
1117 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1118 if (vpd_nextbyte(&vrs, &byte2)) {
1122 cfg->vpd.vpd_w[off].len = dflen = byte2;
1123 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1124 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1125 sizeof(*cfg->vpd.vpd_w[off].value),
1126 M_DEVBUF, M_WAITOK);
1129 /* keep in sync w/ state 6's transistions */
1130 if (dflen == 0 && remain == 0)
1132 else if (dflen == 0)
1138 case 6: /* VPD-W Keyword Value */
1139 cfg->vpd.vpd_w[off].value[i++] = byte;
1142 /* keep in sync w/ state 5's transistions */
1144 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1145 if (dflen == 0 && remain == 0) {
1146 cfg->vpd.vpd_wcnt = off;
1147 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1148 off * sizeof(*cfg->vpd.vpd_w),
1149 M_DEVBUF, M_WAITOK | M_ZERO);
1151 } else if (dflen == 0)
1156 pci_printf(cfg, "invalid state: %d\n", state);
1162 if (cksumvalid == 0 || state < -1) {
1163 /* read-only data bad, clean up */
1164 if (cfg->vpd.vpd_ros != NULL) {
1165 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1166 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1167 free(cfg->vpd.vpd_ros, M_DEVBUF);
1168 cfg->vpd.vpd_ros = NULL;
1172 /* I/O error, clean up */
1173 pci_printf(cfg, "failed to read VPD data.\n");
1174 if (cfg->vpd.vpd_ident != NULL) {
1175 free(cfg->vpd.vpd_ident, M_DEVBUF);
1176 cfg->vpd.vpd_ident = NULL;
1178 if (cfg->vpd.vpd_w != NULL) {
1179 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1180 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1181 free(cfg->vpd.vpd_w, M_DEVBUF);
1182 cfg->vpd.vpd_w = NULL;
1185 cfg->vpd.vpd_cached = 1;
1191 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1193 struct pci_devinfo *dinfo = device_get_ivars(child);
1194 pcicfgregs *cfg = &dinfo->cfg;
1196 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1197 pci_read_vpd(device_get_parent(dev), cfg);
1199 *identptr = cfg->vpd.vpd_ident;
1201 if (*identptr == NULL)
1208 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1211 struct pci_devinfo *dinfo = device_get_ivars(child);
1212 pcicfgregs *cfg = &dinfo->cfg;
1215 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1216 pci_read_vpd(device_get_parent(dev), cfg);
1218 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1219 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1220 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1221 *vptr = cfg->vpd.vpd_ros[i].value;
1230 pci_fetch_vpd_list(device_t dev)
1232 struct pci_devinfo *dinfo = device_get_ivars(dev);
1233 pcicfgregs *cfg = &dinfo->cfg;
1235 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1236 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1241 * Find the requested HyperTransport capability and return the offset
1242 * in configuration space via the pointer provided. The function
1243 * returns 0 on success and an error code otherwise.
1246 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1251 error = pci_find_cap(child, PCIY_HT, &ptr);
1256 * Traverse the capabilities list checking each HT capability
1257 * to see if it matches the requested HT capability.
1260 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1261 if (capability == PCIM_HTCAP_SLAVE ||
1262 capability == PCIM_HTCAP_HOST)
1265 val &= PCIM_HTCMD_CAP_MASK;
1266 if (val == capability) {
1272 /* Skip to the next HT capability. */
1274 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1275 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1284 * Find the requested capability and return the offset in
1285 * configuration space via the pointer provided. The function returns
1286 * 0 on success and an error code otherwise.
1289 pci_find_cap_method(device_t dev, device_t child, int capability,
1292 struct pci_devinfo *dinfo = device_get_ivars(child);
1293 pcicfgregs *cfg = &dinfo->cfg;
1298 * Check the CAP_LIST bit of the PCI status register first.
1300 status = pci_read_config(child, PCIR_STATUS, 2);
1301 if (!(status & PCIM_STATUS_CAPPRESENT))
1305 * Determine the start pointer of the capabilities list.
1307 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1308 case PCIM_HDRTYPE_NORMAL:
1309 case PCIM_HDRTYPE_BRIDGE:
1312 case PCIM_HDRTYPE_CARDBUS:
1313 ptr = PCIR_CAP_PTR_2;
1317 return (ENXIO); /* no extended capabilities support */
1319 ptr = pci_read_config(child, ptr, 1);
1322 * Traverse the capabilities list.
1325 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1330 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1337 * Find the requested extended capability and return the offset in
1338 * configuration space via the pointer provided. The function returns
1339 * 0 on success and an error code otherwise.
1342 pci_find_extcap_method(device_t dev, device_t child, int capability,
1345 struct pci_devinfo *dinfo = device_get_ivars(child);
1346 pcicfgregs *cfg = &dinfo->cfg;
1350 /* Only supported for PCI-express devices. */
1351 if (cfg->pcie.pcie_location == 0)
1355 ecap = pci_read_config(child, ptr, 4);
1356 if (ecap == 0xffffffff || ecap == 0)
1359 if (PCI_EXTCAP_ID(ecap) == capability) {
1364 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1367 ecap = pci_read_config(child, ptr, 4);
1374 * Support for MSI-X message interrupts.
1377 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1378 uint64_t address, uint32_t data)
1380 struct pci_devinfo *dinfo = device_get_ivars(child);
1381 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1384 KASSERT(msix->msix_table_len > index, ("bogus index"));
1385 offset = msix->msix_table_offset + index * 16;
1386 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1387 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1388 bus_write_4(msix->msix_table_res, offset + 8, data);
1390 /* Enable MSI -> HT mapping. */
1391 pci_ht_map_msi(child, address);
1395 pci_mask_msix(device_t dev, u_int index)
1397 struct pci_devinfo *dinfo = device_get_ivars(dev);
1398 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1399 uint32_t offset, val;
1401 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1402 offset = msix->msix_table_offset + index * 16 + 12;
1403 val = bus_read_4(msix->msix_table_res, offset);
1404 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1405 val |= PCIM_MSIX_VCTRL_MASK;
1406 bus_write_4(msix->msix_table_res, offset, val);
1411 pci_unmask_msix(device_t dev, u_int index)
1413 struct pci_devinfo *dinfo = device_get_ivars(dev);
1414 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1415 uint32_t offset, val;
1417 KASSERT(msix->msix_table_len > index, ("bogus index"));
1418 offset = msix->msix_table_offset + index * 16 + 12;
1419 val = bus_read_4(msix->msix_table_res, offset);
1420 if (val & PCIM_MSIX_VCTRL_MASK) {
1421 val &= ~PCIM_MSIX_VCTRL_MASK;
1422 bus_write_4(msix->msix_table_res, offset, val);
1427 pci_pending_msix(device_t dev, u_int index)
1429 struct pci_devinfo *dinfo = device_get_ivars(dev);
1430 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1431 uint32_t offset, bit;
1433 KASSERT(msix->msix_table_len > index, ("bogus index"));
1434 offset = msix->msix_pba_offset + (index / 32) * 4;
1435 bit = 1 << index % 32;
1436 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1440 * Restore MSI-X registers and table during resume. If MSI-X is
1441 * enabled then walk the virtual table to restore the actual MSI-X
1445 pci_resume_msix(device_t dev)
1447 struct pci_devinfo *dinfo = device_get_ivars(dev);
1448 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1449 struct msix_table_entry *mte;
1450 struct msix_vector *mv;
1453 if (msix->msix_alloc > 0) {
1454 /* First, mask all vectors. */
1455 for (i = 0; i < msix->msix_msgnum; i++)
1456 pci_mask_msix(dev, i);
1458 /* Second, program any messages with at least one handler. */
1459 for (i = 0; i < msix->msix_table_len; i++) {
1460 mte = &msix->msix_table[i];
1461 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1463 mv = &msix->msix_vectors[mte->mte_vector - 1];
1464 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1465 pci_unmask_msix(dev, i);
1468 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1469 msix->msix_ctrl, 2);
1473 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1474 * returned in *count. After this function returns, each message will be
1475 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1478 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1480 struct pci_devinfo *dinfo = device_get_ivars(child);
1481 pcicfgregs *cfg = &dinfo->cfg;
1482 struct resource_list_entry *rle;
1483 int actual, error, i, irq, max;
1485 /* Don't let count == 0 get us into trouble. */
1489 /* If rid 0 is allocated, then fail. */
1490 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1491 if (rle != NULL && rle->res != NULL)
1494 /* Already have allocated messages? */
1495 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1498 /* If MSI-X is blacklisted for this system, fail. */
1499 if (pci_msix_blacklisted())
1502 /* MSI-X capability present? */
1503 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1506 /* Make sure the appropriate BARs are mapped. */
1507 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1508 cfg->msix.msix_table_bar);
1509 if (rle == NULL || rle->res == NULL ||
1510 !(rman_get_flags(rle->res) & RF_ACTIVE))
1512 cfg->msix.msix_table_res = rle->res;
1513 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1514 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1515 cfg->msix.msix_pba_bar);
1516 if (rle == NULL || rle->res == NULL ||
1517 !(rman_get_flags(rle->res) & RF_ACTIVE))
1520 cfg->msix.msix_pba_res = rle->res;
1523 device_printf(child,
1524 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1525 *count, cfg->msix.msix_msgnum);
1526 max = min(*count, cfg->msix.msix_msgnum);
1527 for (i = 0; i < max; i++) {
1528 /* Allocate a message. */
1529 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1535 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1541 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1543 device_printf(child, "using IRQ %lu for MSI-X\n",
1549 * Be fancy and try to print contiguous runs of
1550 * IRQ values as ranges. 'irq' is the previous IRQ.
1551 * 'run' is true if we are in a range.
1553 device_printf(child, "using IRQs %lu", rle->start);
1556 for (i = 1; i < actual; i++) {
1557 rle = resource_list_find(&dinfo->resources,
1558 SYS_RES_IRQ, i + 1);
1560 /* Still in a run? */
1561 if (rle->start == irq + 1) {
1567 /* Finish previous range. */
1573 /* Start new range. */
1574 printf(",%lu", rle->start);
1578 /* Unfinished range? */
1581 printf(" for MSI-X\n");
1585 /* Mask all vectors. */
1586 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1587 pci_mask_msix(child, i);
1589 /* Allocate and initialize vector data and virtual table. */
1590 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1591 M_DEVBUF, M_WAITOK | M_ZERO);
1592 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1593 M_DEVBUF, M_WAITOK | M_ZERO);
1594 for (i = 0; i < actual; i++) {
1595 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1596 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1597 cfg->msix.msix_table[i].mte_vector = i + 1;
1600 /* Update control register to enable MSI-X. */
1601 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1602 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1603 cfg->msix.msix_ctrl, 2);
1605 /* Update counts of alloc'd messages. */
1606 cfg->msix.msix_alloc = actual;
1607 cfg->msix.msix_table_len = actual;
1613 * By default, pci_alloc_msix() will assign the allocated IRQ
1614 * resources consecutively to the first N messages in the MSI-X table.
1615 * However, device drivers may want to use different layouts if they
1616 * either receive fewer messages than they asked for, or they wish to
1617 * populate the MSI-X table sparsely. This method allows the driver
1618 * to specify what layout it wants. It must be called after a
1619 * successful pci_alloc_msix() but before any of the associated
1620 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1622 * The 'vectors' array contains 'count' message vectors. The array
1623 * maps directly to the MSI-X table in that index 0 in the array
1624 * specifies the vector for the first message in the MSI-X table, etc.
1625 * The vector value in each array index can either be 0 to indicate
1626 * that no vector should be assigned to a message slot, or it can be a
1627 * number from 1 to N (where N is the count returned from a
1628 * succcessful call to pci_alloc_msix()) to indicate which message
1629 * vector (IRQ) to be used for the corresponding message.
1631 * On successful return, each message with a non-zero vector will have
1632 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1633 * 1. Additionally, if any of the IRQs allocated via the previous
1634 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1635 * will be freed back to the system automatically.
1637 * For example, suppose a driver has a MSI-X table with 6 messages and
1638 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1639 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1640 * C. After the call to pci_alloc_msix(), the device will be setup to
1641 * have an MSI-X table of ABC--- (where - means no vector assigned).
1642 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1643 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1644 * be freed back to the system. This device will also have valid
1645 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1647 * In any case, the SYS_RES_IRQ rid X will always map to the message
1648 * at MSI-X table index X - 1 and will only be valid if a vector is
1649 * assigned to that table entry.
1652 pci_remap_msix_method(device_t dev, device_t child, int count,
1653 const u_int *vectors)
1655 struct pci_devinfo *dinfo = device_get_ivars(child);
1656 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1657 struct resource_list_entry *rle;
1658 int i, irq, j, *used;
1661 * Have to have at least one message in the table but the
1662 * table can't be bigger than the actual MSI-X table in the
1665 if (count == 0 || count > msix->msix_msgnum)
1668 /* Sanity check the vectors. */
1669 for (i = 0; i < count; i++)
1670 if (vectors[i] > msix->msix_alloc)
1674 * Make sure there aren't any holes in the vectors to be used.
1675 * It's a big pain to support it, and it doesn't really make
1676 * sense anyway. Also, at least one vector must be used.
1678 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1680 for (i = 0; i < count; i++)
1681 if (vectors[i] != 0)
1682 used[vectors[i] - 1] = 1;
1683 for (i = 0; i < msix->msix_alloc - 1; i++)
1684 if (used[i] == 0 && used[i + 1] == 1) {
1685 free(used, M_DEVBUF);
1689 free(used, M_DEVBUF);
1693 /* Make sure none of the resources are allocated. */
1694 for (i = 0; i < msix->msix_table_len; i++) {
1695 if (msix->msix_table[i].mte_vector == 0)
1697 if (msix->msix_table[i].mte_handlers > 0)
1699 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1700 KASSERT(rle != NULL, ("missing resource"));
1701 if (rle->res != NULL)
1705 /* Free the existing resource list entries. */
1706 for (i = 0; i < msix->msix_table_len; i++) {
1707 if (msix->msix_table[i].mte_vector == 0)
1709 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1713 * Build the new virtual table keeping track of which vectors are
1716 free(msix->msix_table, M_DEVBUF);
1717 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1718 M_DEVBUF, M_WAITOK | M_ZERO);
1719 for (i = 0; i < count; i++)
1720 msix->msix_table[i].mte_vector = vectors[i];
1721 msix->msix_table_len = count;
1723 /* Free any unused IRQs and resize the vectors array if necessary. */
1724 j = msix->msix_alloc - 1;
1726 struct msix_vector *vec;
1728 while (used[j] == 0) {
1729 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1730 msix->msix_vectors[j].mv_irq);
1733 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1735 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1737 free(msix->msix_vectors, M_DEVBUF);
1738 msix->msix_vectors = vec;
1739 msix->msix_alloc = j + 1;
1741 free(used, M_DEVBUF);
1743 /* Map the IRQs onto the rids. */
1744 for (i = 0; i < count; i++) {
1745 if (vectors[i] == 0)
1747 irq = msix->msix_vectors[vectors[i]].mv_irq;
1748 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1753 device_printf(child, "Remapped MSI-X IRQs as: ");
1754 for (i = 0; i < count; i++) {
1757 if (vectors[i] == 0)
1761 msix->msix_vectors[vectors[i]].mv_irq);
1770 pci_release_msix(device_t dev, device_t child)
1772 struct pci_devinfo *dinfo = device_get_ivars(child);
1773 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1774 struct resource_list_entry *rle;
1777 /* Do we have any messages to release? */
1778 if (msix->msix_alloc == 0)
1781 /* Make sure none of the resources are allocated. */
1782 for (i = 0; i < msix->msix_table_len; i++) {
1783 if (msix->msix_table[i].mte_vector == 0)
1785 if (msix->msix_table[i].mte_handlers > 0)
1787 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1788 KASSERT(rle != NULL, ("missing resource"));
1789 if (rle->res != NULL)
1793 /* Update control register to disable MSI-X. */
1794 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1795 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1796 msix->msix_ctrl, 2);
1798 /* Free the resource list entries. */
1799 for (i = 0; i < msix->msix_table_len; i++) {
1800 if (msix->msix_table[i].mte_vector == 0)
1802 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1804 free(msix->msix_table, M_DEVBUF);
1805 msix->msix_table_len = 0;
1807 /* Release the IRQs. */
1808 for (i = 0; i < msix->msix_alloc; i++)
1809 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1810 msix->msix_vectors[i].mv_irq);
1811 free(msix->msix_vectors, M_DEVBUF);
1812 msix->msix_alloc = 0;
1817 * Return the max supported MSI-X messages this device supports.
1818 * Basically, assuming the MD code can alloc messages, this function
1819 * should return the maximum value that pci_alloc_msix() can return.
1820 * Thus, it is subject to the tunables, etc.
1823 pci_msix_count_method(device_t dev, device_t child)
1825 struct pci_devinfo *dinfo = device_get_ivars(child);
1826 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1828 if (pci_do_msix && msix->msix_location != 0)
1829 return (msix->msix_msgnum);
1834 pci_msix_pba_bar_method(device_t dev, device_t child)
1836 struct pci_devinfo *dinfo = device_get_ivars(child);
1837 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1839 if (pci_do_msix && msix->msix_location != 0)
1840 return (msix->msix_pba_bar);
1845 pci_msix_table_bar_method(device_t dev, device_t child)
1847 struct pci_devinfo *dinfo = device_get_ivars(child);
1848 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1850 if (pci_do_msix && msix->msix_location != 0)
1851 return (msix->msix_table_bar);
1856 * HyperTransport MSI mapping control
1859 pci_ht_map_msi(device_t dev, uint64_t addr)
1861 struct pci_devinfo *dinfo = device_get_ivars(dev);
1862 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1867 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1868 ht->ht_msiaddr >> 20 == addr >> 20) {
1869 /* Enable MSI -> HT mapping. */
1870 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1871 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1875 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1876 /* Disable MSI -> HT mapping. */
1877 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1878 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1884 pci_get_max_read_req(device_t dev)
1886 struct pci_devinfo *dinfo = device_get_ivars(dev);
1890 cap = dinfo->cfg.pcie.pcie_location;
1893 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1894 val &= PCIEM_CTL_MAX_READ_REQUEST;
1896 return (1 << (val + 7));
1900 pci_set_max_read_req(device_t dev, int size)
1902 struct pci_devinfo *dinfo = device_get_ivars(dev);
1906 cap = dinfo->cfg.pcie.pcie_location;
1913 size = (1 << (fls(size) - 1));
1914 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1915 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1916 val |= (fls(size) - 8) << 12;
1917 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1922 pcie_read_config(device_t dev, int reg, int width)
1924 struct pci_devinfo *dinfo = device_get_ivars(dev);
1927 cap = dinfo->cfg.pcie.pcie_location;
1931 return (0xffffffff);
1934 return (pci_read_config(dev, cap + reg, width));
1938 pcie_write_config(device_t dev, int reg, uint32_t value, int width)
1940 struct pci_devinfo *dinfo = device_get_ivars(dev);
1943 cap = dinfo->cfg.pcie.pcie_location;
1946 pci_write_config(dev, cap + reg, value, width);
1950 * Adjusts a PCI-e capability register by clearing the bits in mask
1951 * and setting the bits in (value & mask). Bits not set in mask are
1954 * Returns the old value on success or all ones on failure.
1957 pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value,
1960 struct pci_devinfo *dinfo = device_get_ivars(dev);
1964 cap = dinfo->cfg.pcie.pcie_location;
1968 return (0xffffffff);
1971 old = pci_read_config(dev, cap + reg, width);
1973 new |= (value & mask);
1974 pci_write_config(dev, cap + reg, new, width);
1979 * Support for MSI message signalled interrupts.
1982 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
1985 struct pci_devinfo *dinfo = device_get_ivars(child);
1986 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1988 /* Write data and address values. */
1989 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
1990 address & 0xffffffff, 4);
1991 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1992 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1994 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
1997 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
2000 /* Enable MSI in the control register. */
2001 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
2002 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2005 /* Enable MSI -> HT mapping. */
2006 pci_ht_map_msi(child, address);
2010 pci_disable_msi_method(device_t dev, device_t child)
2012 struct pci_devinfo *dinfo = device_get_ivars(child);
2013 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2015 /* Disable MSI -> HT mapping. */
2016 pci_ht_map_msi(child, 0);
2018 /* Disable MSI in the control register. */
2019 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
2020 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2025 * Restore MSI registers during resume. If MSI is enabled then
2026 * restore the data and address registers in addition to the control
2030 pci_resume_msi(device_t dev)
2032 struct pci_devinfo *dinfo = device_get_ivars(dev);
2033 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2037 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
2038 address = msi->msi_addr;
2039 data = msi->msi_data;
2040 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
2041 address & 0xffffffff, 4);
2042 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2043 pci_write_config(dev, msi->msi_location +
2044 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
2045 pci_write_config(dev, msi->msi_location +
2046 PCIR_MSI_DATA_64BIT, data, 2);
2048 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
2051 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
2056 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
2058 struct pci_devinfo *dinfo = device_get_ivars(dev);
2059 pcicfgregs *cfg = &dinfo->cfg;
2060 struct resource_list_entry *rle;
2061 struct msix_table_entry *mte;
2062 struct msix_vector *mv;
2068 * Handle MSI first. We try to find this IRQ among our list
2069 * of MSI IRQs. If we find it, we request updated address and
2070 * data registers and apply the results.
2072 if (cfg->msi.msi_alloc > 0) {
2074 /* If we don't have any active handlers, nothing to do. */
2075 if (cfg->msi.msi_handlers == 0)
2077 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2078 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2080 if (rle->start == irq) {
2081 error = PCIB_MAP_MSI(device_get_parent(bus),
2082 dev, irq, &addr, &data);
2085 pci_disable_msi(dev);
2086 dinfo->cfg.msi.msi_addr = addr;
2087 dinfo->cfg.msi.msi_data = data;
2088 pci_enable_msi(dev, addr, data);
2096 * For MSI-X, we check to see if we have this IRQ. If we do,
2097 * we request the updated mapping info. If that works, we go
2098 * through all the slots that use this IRQ and update them.
2100 if (cfg->msix.msix_alloc > 0) {
2101 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2102 mv = &cfg->msix.msix_vectors[i];
2103 if (mv->mv_irq == irq) {
2104 error = PCIB_MAP_MSI(device_get_parent(bus),
2105 dev, irq, &addr, &data);
2108 mv->mv_address = addr;
2110 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2111 mte = &cfg->msix.msix_table[j];
2112 if (mte->mte_vector != i + 1)
2114 if (mte->mte_handlers == 0)
2116 pci_mask_msix(dev, j);
2117 pci_enable_msix(dev, j, addr, data);
2118 pci_unmask_msix(dev, j);
2129 * Returns true if the specified device is blacklisted because MSI
2133 pci_msi_device_blacklisted(device_t dev)
2136 if (!pci_honor_msi_blacklist)
2139 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2143 * Determine if MSI is blacklisted globally on this system. Currently,
2144 * we just check for blacklisted chipsets as represented by the
2145 * host-PCI bridge at device 0:0:0. In the future, it may become
2146 * necessary to check other system attributes, such as the kenv values
2147 * that give the motherboard manufacturer and model number.
2150 pci_msi_blacklisted(void)
2154 if (!pci_honor_msi_blacklist)
2157 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2158 if (!(pcie_chipset || pcix_chipset)) {
2159 if (vm_guest != VM_GUEST_NO) {
2161 * Whitelist older chipsets in virtual
2162 * machines known to support MSI.
2164 dev = pci_find_bsf(0, 0, 0);
2166 return (!pci_has_quirk(pci_get_devid(dev),
2167 PCI_QUIRK_ENABLE_MSI_VM));
2172 dev = pci_find_bsf(0, 0, 0);
2174 return (pci_msi_device_blacklisted(dev));
2179 * Returns true if the specified device is blacklisted because MSI-X
2180 * doesn't work. Note that this assumes that if MSI doesn't work,
2181 * MSI-X doesn't either.
2184 pci_msix_device_blacklisted(device_t dev)
2187 if (!pci_honor_msi_blacklist)
2190 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2193 return (pci_msi_device_blacklisted(dev));
2197 * Determine if MSI-X is blacklisted globally on this system. If MSI
2198 * is blacklisted, assume that MSI-X is as well. Check for additional
2199 * chipsets where MSI works but MSI-X does not.
2202 pci_msix_blacklisted(void)
2206 if (!pci_honor_msi_blacklist)
2209 dev = pci_find_bsf(0, 0, 0);
2210 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2211 PCI_QUIRK_DISABLE_MSIX))
2214 return (pci_msi_blacklisted());
2218 * Attempt to allocate *count MSI messages. The actual number allocated is
2219 * returned in *count. After this function returns, each message will be
2220 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2223 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2225 struct pci_devinfo *dinfo = device_get_ivars(child);
2226 pcicfgregs *cfg = &dinfo->cfg;
2227 struct resource_list_entry *rle;
2228 int actual, error, i, irqs[32];
2231 /* Don't let count == 0 get us into trouble. */
2235 /* If rid 0 is allocated, then fail. */
2236 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2237 if (rle != NULL && rle->res != NULL)
2240 /* Already have allocated messages? */
2241 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2244 /* If MSI is blacklisted for this system, fail. */
2245 if (pci_msi_blacklisted())
2248 /* MSI capability present? */
2249 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2253 device_printf(child,
2254 "attempting to allocate %d MSI vectors (%d supported)\n",
2255 *count, cfg->msi.msi_msgnum);
2257 /* Don't ask for more than the device supports. */
2258 actual = min(*count, cfg->msi.msi_msgnum);
2260 /* Don't ask for more than 32 messages. */
2261 actual = min(actual, 32);
2263 /* MSI requires power of 2 number of messages. */
2264 if (!powerof2(actual))
2268 /* Try to allocate N messages. */
2269 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2281 * We now have N actual messages mapped onto SYS_RES_IRQ
2282 * resources in the irqs[] array, so add new resources
2283 * starting at rid 1.
2285 for (i = 0; i < actual; i++)
2286 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2287 irqs[i], irqs[i], 1);
2291 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2296 * Be fancy and try to print contiguous runs
2297 * of IRQ values as ranges. 'run' is true if
2298 * we are in a range.
2300 device_printf(child, "using IRQs %d", irqs[0]);
2302 for (i = 1; i < actual; i++) {
2304 /* Still in a run? */
2305 if (irqs[i] == irqs[i - 1] + 1) {
2310 /* Finish previous range. */
2312 printf("-%d", irqs[i - 1]);
2316 /* Start new range. */
2317 printf(",%d", irqs[i]);
2320 /* Unfinished range? */
2322 printf("-%d", irqs[actual - 1]);
2323 printf(" for MSI\n");
2327 /* Update control register with actual count. */
2328 ctrl = cfg->msi.msi_ctrl;
2329 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2330 ctrl |= (ffs(actual) - 1) << 4;
2331 cfg->msi.msi_ctrl = ctrl;
2332 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2334 /* Update counts of alloc'd messages. */
2335 cfg->msi.msi_alloc = actual;
2336 cfg->msi.msi_handlers = 0;
2341 /* Release the MSI messages associated with this device. */
2343 pci_release_msi_method(device_t dev, device_t child)
2345 struct pci_devinfo *dinfo = device_get_ivars(child);
2346 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2347 struct resource_list_entry *rle;
2348 int error, i, irqs[32];
2350 /* Try MSI-X first. */
2351 error = pci_release_msix(dev, child);
2352 if (error != ENODEV)
2355 /* Do we have any messages to release? */
2356 if (msi->msi_alloc == 0)
2358 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2360 /* Make sure none of the resources are allocated. */
2361 if (msi->msi_handlers > 0)
2363 for (i = 0; i < msi->msi_alloc; i++) {
2364 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2365 KASSERT(rle != NULL, ("missing MSI resource"));
2366 if (rle->res != NULL)
2368 irqs[i] = rle->start;
2371 /* Update control register with 0 count. */
2372 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2373 ("%s: MSI still enabled", __func__));
2374 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2375 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2378 /* Release the messages. */
2379 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2380 for (i = 0; i < msi->msi_alloc; i++)
2381 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2383 /* Update alloc count. */
2391 * Return the max supported MSI messages this device supports.
2392 * Basically, assuming the MD code can alloc messages, this function
2393 * should return the maximum value that pci_alloc_msi() can return.
2394 * Thus, it is subject to the tunables, etc.
2397 pci_msi_count_method(device_t dev, device_t child)
2399 struct pci_devinfo *dinfo = device_get_ivars(child);
2400 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2402 if (pci_do_msi && msi->msi_location != 0)
2403 return (msi->msi_msgnum);
2407 /* free pcicfgregs structure and all depending data structures */
2410 pci_freecfg(struct pci_devinfo *dinfo)
2412 struct devlist *devlist_head;
2413 struct pci_map *pm, *next;
2416 devlist_head = &pci_devq;
2418 if (dinfo->cfg.vpd.vpd_reg) {
2419 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2420 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2421 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2422 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2423 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2424 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2425 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2427 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2430 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2431 free(dinfo, M_DEVBUF);
2433 /* increment the generation count */
2436 /* we're losing one device */
2442 * PCI power manangement
2445 pci_set_powerstate_method(device_t dev, device_t child, int state)
2447 struct pci_devinfo *dinfo = device_get_ivars(child);
2448 pcicfgregs *cfg = &dinfo->cfg;
2450 int oldstate, highest, delay;
2452 if (cfg->pp.pp_cap == 0)
2453 return (EOPNOTSUPP);
2456 * Optimize a no state change request away. While it would be OK to
2457 * write to the hardware in theory, some devices have shown odd
2458 * behavior when going from D3 -> D3.
2460 oldstate = pci_get_powerstate(child);
2461 if (oldstate == state)
2465 * The PCI power management specification states that after a state
2466 * transition between PCI power states, system software must
2467 * guarantee a minimal delay before the function accesses the device.
2468 * Compute the worst case delay that we need to guarantee before we
2469 * access the device. Many devices will be responsive much more
2470 * quickly than this delay, but there are some that don't respond
2471 * instantly to state changes. Transitions to/from D3 state require
2472 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2473 * is done below with DELAY rather than a sleeper function because
2474 * this function can be called from contexts where we cannot sleep.
2476 highest = (oldstate > state) ? oldstate : state;
2477 if (highest == PCI_POWERSTATE_D3)
2479 else if (highest == PCI_POWERSTATE_D2)
2483 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2484 & ~PCIM_PSTAT_DMASK;
2486 case PCI_POWERSTATE_D0:
2487 status |= PCIM_PSTAT_D0;
2489 case PCI_POWERSTATE_D1:
2490 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2491 return (EOPNOTSUPP);
2492 status |= PCIM_PSTAT_D1;
2494 case PCI_POWERSTATE_D2:
2495 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2496 return (EOPNOTSUPP);
2497 status |= PCIM_PSTAT_D2;
2499 case PCI_POWERSTATE_D3:
2500 status |= PCIM_PSTAT_D3;
2507 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2510 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2517 pci_get_powerstate_method(device_t dev, device_t child)
2519 struct pci_devinfo *dinfo = device_get_ivars(child);
2520 pcicfgregs *cfg = &dinfo->cfg;
2524 if (cfg->pp.pp_cap != 0) {
2525 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2526 switch (status & PCIM_PSTAT_DMASK) {
2528 result = PCI_POWERSTATE_D0;
2531 result = PCI_POWERSTATE_D1;
2534 result = PCI_POWERSTATE_D2;
2537 result = PCI_POWERSTATE_D3;
2540 result = PCI_POWERSTATE_UNKNOWN;
2544 /* No support, device is always at D0 */
2545 result = PCI_POWERSTATE_D0;
2551 * Some convenience functions for PCI device drivers.
2554 static __inline void
2555 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2559 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2561 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2564 static __inline void
2565 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2569 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2571 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2575 pci_enable_busmaster_method(device_t dev, device_t child)
2577 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2582 pci_disable_busmaster_method(device_t dev, device_t child)
2584 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2589 pci_enable_io_method(device_t dev, device_t child, int space)
2594 case SYS_RES_IOPORT:
2595 bit = PCIM_CMD_PORTEN;
2597 case SYS_RES_MEMORY:
2598 bit = PCIM_CMD_MEMEN;
2603 pci_set_command_bit(dev, child, bit);
2608 pci_disable_io_method(device_t dev, device_t child, int space)
2613 case SYS_RES_IOPORT:
2614 bit = PCIM_CMD_PORTEN;
2616 case SYS_RES_MEMORY:
2617 bit = PCIM_CMD_MEMEN;
2622 pci_clear_command_bit(dev, child, bit);
2627 * New style pci driver. Parent device is either a pci-host-bridge or a
2628 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2632 pci_print_verbose(struct pci_devinfo *dinfo)
2636 pcicfgregs *cfg = &dinfo->cfg;
2638 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2639 cfg->vendor, cfg->device, cfg->revid);
2640 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2641 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2642 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2643 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2645 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2646 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2647 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2648 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2649 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2650 if (cfg->intpin > 0)
2651 printf("\tintpin=%c, irq=%d\n",
2652 cfg->intpin +'a' -1, cfg->intline);
2653 if (cfg->pp.pp_cap) {
2656 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2657 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2658 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2659 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2660 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2661 status & PCIM_PSTAT_DMASK);
2663 if (cfg->msi.msi_location) {
2666 ctrl = cfg->msi.msi_ctrl;
2667 printf("\tMSI supports %d message%s%s%s\n",
2668 cfg->msi.msi_msgnum,
2669 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2670 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2671 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2673 if (cfg->msix.msix_location) {
2674 printf("\tMSI-X supports %d message%s ",
2675 cfg->msix.msix_msgnum,
2676 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2677 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2678 printf("in map 0x%x\n",
2679 cfg->msix.msix_table_bar);
2681 printf("in maps 0x%x and 0x%x\n",
2682 cfg->msix.msix_table_bar,
2683 cfg->msix.msix_pba_bar);
2689 pci_porten(device_t dev)
2691 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2695 pci_memen(device_t dev)
2697 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2701 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2703 struct pci_devinfo *dinfo;
2704 pci_addr_t map, testval;
2709 * The device ROM BAR is special. It is always a 32-bit
2710 * memory BAR. Bit 0 is special and should not be set when
2713 dinfo = device_get_ivars(dev);
2714 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2715 map = pci_read_config(dev, reg, 4);
2716 pci_write_config(dev, reg, 0xfffffffe, 4);
2717 testval = pci_read_config(dev, reg, 4);
2718 pci_write_config(dev, reg, map, 4);
2720 *testvalp = testval;
2724 map = pci_read_config(dev, reg, 4);
2725 ln2range = pci_maprange(map);
2727 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2730 * Disable decoding via the command register before
2731 * determining the BAR's length since we will be placing it in
2734 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2735 pci_write_config(dev, PCIR_COMMAND,
2736 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2739 * Determine the BAR's length by writing all 1's. The bottom
2740 * log_2(size) bits of the BAR will stick as 0 when we read
2743 pci_write_config(dev, reg, 0xffffffff, 4);
2744 testval = pci_read_config(dev, reg, 4);
2745 if (ln2range == 64) {
2746 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2747 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2751 * Restore the original value of the BAR. We may have reprogrammed
2752 * the BAR of the low-level console device and when booting verbose,
2753 * we need the console device addressable.
2755 pci_write_config(dev, reg, map, 4);
2757 pci_write_config(dev, reg + 4, map >> 32, 4);
2758 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2761 *testvalp = testval;
2765 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2767 struct pci_devinfo *dinfo;
2770 /* The device ROM BAR is always a 32-bit memory BAR. */
2771 dinfo = device_get_ivars(dev);
2772 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2775 ln2range = pci_maprange(pm->pm_value);
2776 pci_write_config(dev, pm->pm_reg, base, 4);
2778 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2779 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2781 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2782 pm->pm_reg + 4, 4) << 32;
2786 pci_find_bar(device_t dev, int reg)
2788 struct pci_devinfo *dinfo;
2791 dinfo = device_get_ivars(dev);
2792 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2793 if (pm->pm_reg == reg)
2800 pci_bar_enabled(device_t dev, struct pci_map *pm)
2802 struct pci_devinfo *dinfo;
2805 dinfo = device_get_ivars(dev);
2806 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2807 !(pm->pm_value & PCIM_BIOS_ENABLE))
2809 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2810 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2811 return ((cmd & PCIM_CMD_MEMEN) != 0);
2813 return ((cmd & PCIM_CMD_PORTEN) != 0);
2816 static struct pci_map *
2817 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2819 struct pci_devinfo *dinfo;
2820 struct pci_map *pm, *prev;
2822 dinfo = device_get_ivars(dev);
2823 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2825 pm->pm_value = value;
2827 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2828 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2830 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2831 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2835 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2837 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2842 pci_restore_bars(device_t dev)
2844 struct pci_devinfo *dinfo;
2848 dinfo = device_get_ivars(dev);
2849 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2850 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2853 ln2range = pci_maprange(pm->pm_value);
2854 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2856 pci_write_config(dev, pm->pm_reg + 4,
2857 pm->pm_value >> 32, 4);
2862 * Add a resource based on a pci map register. Return 1 if the map
2863 * register is a 32bit map register or 2 if it is a 64bit register.
2866 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2867 int force, int prefetch)
2870 pci_addr_t base, map, testval;
2871 pci_addr_t start, end, count;
2872 int barlen, basezero, flags, maprange, mapsize, type;
2874 struct resource *res;
2877 * The BAR may already exist if the device is a CardBus card
2878 * whose CIS is stored in this BAR.
2880 pm = pci_find_bar(dev, reg);
2882 maprange = pci_maprange(pm->pm_value);
2883 barlen = maprange == 64 ? 2 : 1;
2887 pci_read_bar(dev, reg, &map, &testval);
2888 if (PCI_BAR_MEM(map)) {
2889 type = SYS_RES_MEMORY;
2890 if (map & PCIM_BAR_MEM_PREFETCH)
2893 type = SYS_RES_IOPORT;
2894 mapsize = pci_mapsize(testval);
2895 base = pci_mapbase(map);
2896 #ifdef __PCI_BAR_ZERO_VALID
2899 basezero = base == 0;
2901 maprange = pci_maprange(map);
2902 barlen = maprange == 64 ? 2 : 1;
2905 * For I/O registers, if bottom bit is set, and the next bit up
2906 * isn't clear, we know we have a BAR that doesn't conform to the
2907 * spec, so ignore it. Also, sanity check the size of the data
2908 * areas to the type of memory involved. Memory must be at least
2909 * 16 bytes in size, while I/O ranges must be at least 4.
2911 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2913 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2914 (type == SYS_RES_IOPORT && mapsize < 2))
2917 /* Save a record of this BAR. */
2918 pm = pci_add_bar(dev, reg, map, mapsize);
2920 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2921 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2922 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2923 printf(", port disabled\n");
2924 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2925 printf(", memory disabled\n");
2927 printf(", enabled\n");
2931 * If base is 0, then we have problems if this architecture does
2932 * not allow that. It is best to ignore such entries for the
2933 * moment. These will be allocated later if the driver specifically
2934 * requests them. However, some removable busses look better when
2935 * all resources are allocated, so allow '0' to be overriden.
2937 * Similarly treat maps whose values is the same as the test value
2938 * read back. These maps have had all f's written to them by the
2939 * BIOS in an attempt to disable the resources.
2941 if (!force && (basezero || map == testval))
2943 if ((u_long)base != base) {
2945 "pci%d:%d:%d:%d bar %#x too many address bits",
2946 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2947 pci_get_function(dev), reg);
2952 * This code theoretically does the right thing, but has
2953 * undesirable side effects in some cases where peripherals
2954 * respond oddly to having these bits enabled. Let the user
2955 * be able to turn them off (since pci_enable_io_modes is 1 by
2958 if (pci_enable_io_modes) {
2959 /* Turn on resources that have been left off by a lazy BIOS */
2960 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2961 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2962 cmd |= PCIM_CMD_PORTEN;
2963 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2965 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2966 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2967 cmd |= PCIM_CMD_MEMEN;
2968 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2971 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2973 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2977 count = (pci_addr_t)1 << mapsize;
2978 flags = RF_ALIGNMENT_LOG2(mapsize);
2980 flags |= RF_PREFETCHABLE;
2981 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
2982 start = 0; /* Let the parent decide. */
2986 end = base + count - 1;
2988 resource_list_add(rl, type, reg, start, end, count);
2991 * Try to allocate the resource for this BAR from our parent
2992 * so that this resource range is already reserved. The
2993 * driver for this device will later inherit this resource in
2994 * pci_alloc_resource().
2996 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2998 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
3000 * If the allocation fails, try to allocate a resource for
3001 * this BAR using any available range. The firmware felt
3002 * it was important enough to assign a resource, so don't
3003 * disable decoding if we can help it.
3005 resource_list_delete(rl, type, reg);
3006 resource_list_add(rl, type, reg, 0, ~0ul, count);
3007 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
3012 * If the allocation fails, delete the resource list entry
3013 * and disable decoding for this device.
3015 * If the driver requests this resource in the future,
3016 * pci_reserve_map() will try to allocate a fresh
3019 resource_list_delete(rl, type, reg);
3020 pci_disable_io(dev, type);
3023 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
3024 pci_get_domain(dev), pci_get_bus(dev),
3025 pci_get_slot(dev), pci_get_function(dev), reg);
3027 start = rman_get_start(res);
3028 pci_write_bar(dev, pm, start);
3034 * For ATA devices we need to decide early what addressing mode to use.
3035 * Legacy demands that the primary and secondary ATA ports sits on the
3036 * same addresses that old ISA hardware did. This dictates that we use
3037 * those addresses and ignore the BAR's if we cannot set PCI native
3041 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
3042 uint32_t prefetchmask)
3044 int rid, type, progif;
3046 /* if this device supports PCI native addressing use it */
3047 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3048 if ((progif & 0x8a) == 0x8a) {
3049 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
3050 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
3051 printf("Trying ATA native PCI addressing mode\n");
3052 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
3056 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3057 type = SYS_RES_IOPORT;
3058 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
3059 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
3060 prefetchmask & (1 << 0));
3061 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
3062 prefetchmask & (1 << 1));
3065 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
3066 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
3069 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
3070 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3073 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3074 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3075 prefetchmask & (1 << 2));
3076 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3077 prefetchmask & (1 << 3));
3080 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3081 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3084 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3085 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3088 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3089 prefetchmask & (1 << 4));
3090 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3091 prefetchmask & (1 << 5));
3095 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3097 struct pci_devinfo *dinfo = device_get_ivars(dev);
3098 pcicfgregs *cfg = &dinfo->cfg;
3099 char tunable_name[64];
3102 /* Has to have an intpin to have an interrupt. */
3103 if (cfg->intpin == 0)
3106 /* Let the user override the IRQ with a tunable. */
3107 irq = PCI_INVALID_IRQ;
3108 snprintf(tunable_name, sizeof(tunable_name),
3109 "hw.pci%d.%d.%d.INT%c.irq",
3110 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3111 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3112 irq = PCI_INVALID_IRQ;
3115 * If we didn't get an IRQ via the tunable, then we either use the
3116 * IRQ value in the intline register or we ask the bus to route an
3117 * interrupt for us. If force_route is true, then we only use the
3118 * value in the intline register if the bus was unable to assign an
3121 if (!PCI_INTERRUPT_VALID(irq)) {
3122 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3123 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3124 if (!PCI_INTERRUPT_VALID(irq))
3128 /* If after all that we don't have an IRQ, just bail. */
3129 if (!PCI_INTERRUPT_VALID(irq))
3132 /* Update the config register if it changed. */
3133 if (irq != cfg->intline) {
3135 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3138 /* Add this IRQ as rid 0 interrupt resource. */
3139 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3142 /* Perform early OHCI takeover from SMM. */
3144 ohci_early_takeover(device_t self)
3146 struct resource *res;
3152 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3156 ctl = bus_read_4(res, OHCI_CONTROL);
3157 if (ctl & OHCI_IR) {
3159 printf("ohci early: "
3160 "SMM active, request owner change\n");
3161 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3162 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3164 ctl = bus_read_4(res, OHCI_CONTROL);
3166 if (ctl & OHCI_IR) {
3168 printf("ohci early: "
3169 "SMM does not respond, resetting\n");
3170 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3172 /* Disable interrupts */
3173 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3176 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3179 /* Perform early UHCI takeover from SMM. */
3181 uhci_early_takeover(device_t self)
3183 struct resource *res;
3187 * Set the PIRQD enable bit and switch off all the others. We don't
3188 * want legacy support to interfere with us XXX Does this also mean
3189 * that the BIOS won't touch the keyboard anymore if it is connected
3190 * to the ports of the root hub?
3192 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3194 /* Disable interrupts */
3195 rid = PCI_UHCI_BASE_REG;
3196 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3198 bus_write_2(res, UHCI_INTR, 0);
3199 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3203 /* Perform early EHCI takeover from SMM. */
3205 ehci_early_takeover(device_t self)
3207 struct resource *res;
3217 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3221 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3223 /* Synchronise with the BIOS if it owns the controller. */
3224 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3225 eecp = EHCI_EECP_NEXT(eec)) {
3226 eec = pci_read_config(self, eecp, 4);
3227 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3230 bios_sem = pci_read_config(self, eecp +
3231 EHCI_LEGSUP_BIOS_SEM, 1);
3232 if (bios_sem == 0) {
3236 printf("ehci early: "
3237 "SMM active, request owner change\n");
3239 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3241 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3243 bios_sem = pci_read_config(self, eecp +
3244 EHCI_LEGSUP_BIOS_SEM, 1);
3247 if (bios_sem != 0) {
3249 printf("ehci early: "
3250 "SMM does not respond\n");
3252 /* Disable interrupts */
3253 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3254 bus_write_4(res, offs + EHCI_USBINTR, 0);
3256 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3259 /* Perform early XHCI takeover from SMM. */
3261 xhci_early_takeover(device_t self)
3263 struct resource *res;
3273 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3277 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3281 /* Synchronise with the BIOS if it owns the controller. */
3282 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3283 eecp += XHCI_XECP_NEXT(eec) << 2) {
3284 eec = bus_read_4(res, eecp);
3286 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3289 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3294 printf("xhci early: "
3295 "SMM active, request owner change\n");
3297 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3299 /* wait a maximum of 5 second */
3301 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3303 bios_sem = bus_read_1(res, eecp +
3304 XHCI_XECP_BIOS_SEM);
3307 if (bios_sem != 0) {
3309 printf("xhci early: "
3310 "SMM does not respond\n");
3313 /* Disable interrupts */
3314 offs = bus_read_1(res, XHCI_CAPLENGTH);
3315 bus_write_4(res, offs + XHCI_USBCMD, 0);
3316 bus_read_4(res, offs + XHCI_USBSTS);
3318 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3321 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3323 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3324 struct resource_list *rl)
3326 struct resource *res;
3328 u_long start, end, count;
3329 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3331 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3332 case PCIM_HDRTYPE_BRIDGE:
3333 sec_reg = PCIR_SECBUS_1;
3334 sub_reg = PCIR_SUBBUS_1;
3336 case PCIM_HDRTYPE_CARDBUS:
3337 sec_reg = PCIR_SECBUS_2;
3338 sub_reg = PCIR_SUBBUS_2;
3345 * If the existing bus range is valid, attempt to reserve it
3346 * from our parent. If this fails for any reason, clear the
3347 * secbus and subbus registers.
3349 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3350 * This would at least preserve the existing sec_bus if it is
3353 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3354 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3356 /* Quirk handling. */
3357 switch (pci_get_devid(dev)) {
3358 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3359 sup_bus = pci_read_config(dev, 0x41, 1);
3360 if (sup_bus != 0xff) {
3361 sec_bus = sup_bus + 1;
3362 sub_bus = sup_bus + 1;
3363 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3364 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3369 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3370 if ((cp = getenv("smbios.planar.maker")) == NULL)
3372 if (strncmp(cp, "Compal", 6) != 0) {
3377 if ((cp = getenv("smbios.planar.product")) == NULL)
3379 if (strncmp(cp, "08A0", 4) != 0) {
3384 if (sub_bus < 0xa) {
3386 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3392 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3393 if (sec_bus > 0 && sub_bus >= sec_bus) {
3396 count = end - start + 1;
3398 resource_list_add(rl, PCI_RES_BUS, 0, 0ul, ~0ul, count);
3401 * If requested, clear secondary bus registers in
3402 * bridge devices to force a complete renumbering
3403 * rather than reserving the existing range. However,
3404 * preserve the existing size.
3406 if (pci_clear_buses)
3410 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3411 start, end, count, 0);
3417 "pci%d:%d:%d:%d secbus failed to allocate\n",
3418 pci_get_domain(dev), pci_get_bus(dev),
3419 pci_get_slot(dev), pci_get_function(dev));
3423 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3424 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3427 static struct resource *
3428 pci_alloc_secbus(device_t dev, device_t child, int *rid, u_long start,
3429 u_long end, u_long count, u_int flags)
3431 struct pci_devinfo *dinfo;
3433 struct resource_list *rl;
3434 struct resource *res;
3435 int sec_reg, sub_reg;
3437 dinfo = device_get_ivars(child);
3439 rl = &dinfo->resources;
3440 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3441 case PCIM_HDRTYPE_BRIDGE:
3442 sec_reg = PCIR_SECBUS_1;
3443 sub_reg = PCIR_SUBBUS_1;
3445 case PCIM_HDRTYPE_CARDBUS:
3446 sec_reg = PCIR_SECBUS_2;
3447 sub_reg = PCIR_SUBBUS_2;
3456 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3457 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3458 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3459 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3460 start, end, count, flags & ~RF_ACTIVE);
3462 resource_list_delete(rl, PCI_RES_BUS, *rid);
3463 device_printf(child, "allocating %lu bus%s failed\n",
3464 count, count == 1 ? "" : "es");
3468 device_printf(child,
3469 "Lazy allocation of %lu bus%s at %lu\n", count,
3470 count == 1 ? "" : "es", rman_get_start(res));
3471 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3472 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3474 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3475 end, count, flags));
3480 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3482 struct pci_devinfo *dinfo;
3484 struct resource_list *rl;
3485 const struct pci_quirk *q;
3489 dinfo = device_get_ivars(dev);
3491 rl = &dinfo->resources;
3492 devid = (cfg->device << 16) | cfg->vendor;
3494 /* ATA devices needs special map treatment */
3495 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3496 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3497 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3498 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3499 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3500 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3502 for (i = 0; i < cfg->nummaps;) {
3504 * Skip quirked resources.
3506 for (q = &pci_quirks[0]; q->devid != 0; q++)
3507 if (q->devid == devid &&
3508 q->type == PCI_QUIRK_UNMAP_REG &&
3509 q->arg1 == PCIR_BAR(i))
3511 if (q->devid != 0) {
3515 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3516 prefetchmask & (1 << i));
3520 * Add additional, quirked resources.
3522 for (q = &pci_quirks[0]; q->devid != 0; q++)
3523 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3524 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3526 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3527 #ifdef __PCI_REROUTE_INTERRUPT
3529 * Try to re-route interrupts. Sometimes the BIOS or
3530 * firmware may leave bogus values in these registers.
3531 * If the re-route fails, then just stick with what we
3534 pci_assign_interrupt(bus, dev, 1);
3536 pci_assign_interrupt(bus, dev, 0);
3540 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3541 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3542 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3543 xhci_early_takeover(dev);
3544 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3545 ehci_early_takeover(dev);
3546 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3547 ohci_early_takeover(dev);
3548 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3549 uhci_early_takeover(dev);
3552 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3554 * Reserve resources for secondary bus ranges behind bridge
3557 pci_reserve_secbus(bus, dev, cfg, rl);
3561 static struct pci_devinfo *
3562 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3563 int slot, int func, size_t dinfo_size)
3565 struct pci_devinfo *dinfo;
3567 dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size);
3569 pci_add_child(dev, dinfo);
3575 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3577 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3578 device_t pcib = device_get_parent(dev);
3579 struct pci_devinfo *dinfo;
3581 int s, f, pcifunchigh;
3586 * Try to detect a device at slot 0, function 0. If it exists, try to
3587 * enable ARI. We must enable ARI before detecting the rest of the
3588 * functions on this bus as ARI changes the set of slots and functions
3589 * that are legal on this bus.
3591 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0,
3593 if (dinfo != NULL && pci_enable_ari)
3594 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3597 * Start looking for new devices on slot 0 at function 1 because we
3598 * just identified the device at slot 0, function 0.
3602 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3603 ("dinfo_size too small"));
3604 maxslots = PCIB_MAXSLOTS(pcib);
3605 for (s = 0; s <= maxslots; s++, first_func = 0) {
3609 hdrtype = REG(PCIR_HDRTYPE, 1);
3610 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3612 if (hdrtype & PCIM_MFDEV)
3613 pcifunchigh = PCIB_MAXFUNCS(pcib);
3614 for (f = first_func; f <= pcifunchigh; f++)
3615 pci_identify_function(pcib, dev, domain, busno, s, f,
3622 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3624 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3625 device_set_ivars(dinfo->cfg.dev, dinfo);
3626 resource_list_init(&dinfo->resources);
3627 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3628 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3629 pci_print_verbose(dinfo);
3630 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3631 pci_child_added(dinfo->cfg.dev);
3635 pci_child_added_method(device_t dev, device_t child)
3641 pci_probe(device_t dev)
3644 device_set_desc(dev, "PCI bus");
3646 /* Allow other subclasses to override this driver. */
3647 return (BUS_PROBE_GENERIC);
3651 pci_attach_common(device_t dev)
3653 struct pci_softc *sc;
3655 #ifdef PCI_DMA_BOUNDARY
3656 int error, tag_valid;
3662 sc = device_get_softc(dev);
3663 domain = pcib_get_domain(dev);
3664 busno = pcib_get_bus(dev);
3667 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
3669 if (sc->sc_bus == NULL) {
3670 device_printf(dev, "failed to allocate bus number\n");
3675 device_printf(dev, "domain=%d, physical bus=%d\n",
3677 #ifdef PCI_DMA_BOUNDARY
3679 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3680 devclass_find("pci")) {
3681 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3682 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3683 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3684 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3686 device_printf(dev, "Failed to create DMA tag: %d\n",
3693 sc->sc_dma_tag = bus_get_dma_tag(dev);
3698 pci_attach(device_t dev)
3700 int busno, domain, error;
3702 error = pci_attach_common(dev);
3707 * Since there can be multiple independantly numbered PCI
3708 * busses on systems with multiple PCI domains, we can't use
3709 * the unit number to decide which bus we are probing. We ask
3710 * the parent pcib what our domain and bus numbers are.
3712 domain = pcib_get_domain(dev);
3713 busno = pcib_get_bus(dev);
3714 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3715 return (bus_generic_attach(dev));
3720 pci_detach(device_t dev)
3722 struct pci_softc *sc;
3725 error = bus_generic_detach(dev);
3728 sc = device_get_softc(dev);
3729 return (bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus));
3734 pci_set_power_children(device_t dev, device_t *devlist, int numdevs,
3737 device_t child, pcib;
3741 * Set the device to the given state. If the firmware suggests
3742 * a different power state, use it instead. If power management
3743 * is not present, the firmware is responsible for managing
3744 * device power. Skip children who aren't attached since they
3745 * are handled separately.
3747 pcib = device_get_parent(dev);
3748 for (i = 0; i < numdevs; i++) {
3751 if (device_is_attached(child) &&
3752 PCIB_POWER_FOR_SLEEP(pcib, dev, &dstate) == 0)
3753 pci_set_powerstate(child, dstate);
3758 pci_suspend(device_t dev)
3760 device_t child, *devlist;
3761 struct pci_devinfo *dinfo;
3762 int error, i, numdevs;
3765 * Save the PCI configuration space for each child and set the
3766 * device in the appropriate power state for this sleep state.
3768 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3770 for (i = 0; i < numdevs; i++) {
3772 dinfo = device_get_ivars(child);
3773 pci_cfg_save(child, dinfo, 0);
3776 /* Suspend devices before potentially powering them down. */
3777 error = bus_generic_suspend(dev);
3779 free(devlist, M_TEMP);
3782 if (pci_do_power_suspend)
3783 pci_set_power_children(dev, devlist, numdevs,
3785 free(devlist, M_TEMP);
3790 pci_resume(device_t dev)
3792 device_t child, *devlist;
3793 struct pci_devinfo *dinfo;
3794 int error, i, numdevs;
3797 * Set each child to D0 and restore its PCI configuration space.
3799 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3801 if (pci_do_power_resume)
3802 pci_set_power_children(dev, devlist, numdevs,
3805 /* Now the device is powered up, restore its config space. */
3806 for (i = 0; i < numdevs; i++) {
3808 dinfo = device_get_ivars(child);
3810 pci_cfg_restore(child, dinfo);
3811 if (!device_is_attached(child))
3812 pci_cfg_save(child, dinfo, 1);
3816 * Resume critical devices first, then everything else later.
3818 for (i = 0; i < numdevs; i++) {
3820 switch (pci_get_class(child)) {
3824 case PCIC_BASEPERIPH:
3825 DEVICE_RESUME(child);
3829 for (i = 0; i < numdevs; i++) {
3831 switch (pci_get_class(child)) {
3835 case PCIC_BASEPERIPH:
3838 DEVICE_RESUME(child);
3841 free(devlist, M_TEMP);
3846 pci_load_vendor_data(void)
3852 data = preload_search_by_type("pci_vendor_data");
3854 ptr = preload_fetch_addr(data);
3855 sz = preload_fetch_size(data);
3856 if (ptr != NULL && sz != 0) {
3857 pci_vendordata = ptr;
3858 pci_vendordata_size = sz;
3859 /* terminate the database */
3860 pci_vendordata[pci_vendordata_size] = '\n';
3866 pci_driver_added(device_t dev, driver_t *driver)
3871 struct pci_devinfo *dinfo;
3875 device_printf(dev, "driver added\n");
3876 DEVICE_IDENTIFY(driver, dev);
3877 if (device_get_children(dev, &devlist, &numdevs) != 0)
3879 for (i = 0; i < numdevs; i++) {
3881 if (device_get_state(child) != DS_NOTPRESENT)
3883 dinfo = device_get_ivars(child);
3884 pci_print_verbose(dinfo);
3886 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3887 pci_cfg_restore(child, dinfo);
3888 if (device_probe_and_attach(child) != 0)
3889 pci_child_detached(dev, child);
3891 free(devlist, M_TEMP);
3895 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3896 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3898 struct pci_devinfo *dinfo;
3899 struct msix_table_entry *mte;
3900 struct msix_vector *mv;
3906 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3911 /* If this is not a direct child, just bail out. */
3912 if (device_get_parent(child) != dev) {
3917 rid = rman_get_rid(irq);
3919 /* Make sure that INTx is enabled */
3920 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3923 * Check to see if the interrupt is MSI or MSI-X.
3924 * Ask our parent to map the MSI and give
3925 * us the address and data register values.
3926 * If we fail for some reason, teardown the
3927 * interrupt handler.
3929 dinfo = device_get_ivars(child);
3930 if (dinfo->cfg.msi.msi_alloc > 0) {
3931 if (dinfo->cfg.msi.msi_addr == 0) {
3932 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3933 ("MSI has handlers, but vectors not mapped"));
3934 error = PCIB_MAP_MSI(device_get_parent(dev),
3935 child, rman_get_start(irq), &addr, &data);
3938 dinfo->cfg.msi.msi_addr = addr;
3939 dinfo->cfg.msi.msi_data = data;
3941 if (dinfo->cfg.msi.msi_handlers == 0)
3942 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3943 dinfo->cfg.msi.msi_data);
3944 dinfo->cfg.msi.msi_handlers++;
3946 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3947 ("No MSI or MSI-X interrupts allocated"));
3948 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3949 ("MSI-X index too high"));
3950 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3951 KASSERT(mte->mte_vector != 0, ("no message vector"));
3952 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3953 KASSERT(mv->mv_irq == rman_get_start(irq),
3955 if (mv->mv_address == 0) {
3956 KASSERT(mte->mte_handlers == 0,
3957 ("MSI-X table entry has handlers, but vector not mapped"));
3958 error = PCIB_MAP_MSI(device_get_parent(dev),
3959 child, rman_get_start(irq), &addr, &data);
3962 mv->mv_address = addr;
3965 if (mte->mte_handlers == 0) {
3966 pci_enable_msix(child, rid - 1, mv->mv_address,
3968 pci_unmask_msix(child, rid - 1);
3970 mte->mte_handlers++;
3974 * Make sure that INTx is disabled if we are using MSI/MSI-X,
3975 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
3976 * in which case we "enable" INTx so MSI/MSI-X actually works.
3978 if (!pci_has_quirk(pci_get_devid(child),
3979 PCI_QUIRK_MSI_INTX_BUG))
3980 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3982 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3985 (void)bus_generic_teardown_intr(dev, child, irq,
3995 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3998 struct msix_table_entry *mte;
3999 struct resource_list_entry *rle;
4000 struct pci_devinfo *dinfo;
4003 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
4006 /* If this isn't a direct child, just bail out */
4007 if (device_get_parent(child) != dev)
4008 return(bus_generic_teardown_intr(dev, child, irq, cookie));
4010 rid = rman_get_rid(irq);
4013 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
4016 * Check to see if the interrupt is MSI or MSI-X. If so,
4017 * decrement the appropriate handlers count and mask the
4018 * MSI-X message, or disable MSI messages if the count
4021 dinfo = device_get_ivars(child);
4022 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
4023 if (rle->res != irq)
4025 if (dinfo->cfg.msi.msi_alloc > 0) {
4026 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
4027 ("MSI-X index too high"));
4028 if (dinfo->cfg.msi.msi_handlers == 0)
4030 dinfo->cfg.msi.msi_handlers--;
4031 if (dinfo->cfg.msi.msi_handlers == 0)
4032 pci_disable_msi(child);
4034 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4035 ("No MSI or MSI-X interrupts allocated"));
4036 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4037 ("MSI-X index too high"));
4038 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4039 if (mte->mte_handlers == 0)
4041 mte->mte_handlers--;
4042 if (mte->mte_handlers == 0)
4043 pci_mask_msix(child, rid - 1);
4046 error = bus_generic_teardown_intr(dev, child, irq, cookie);
4049 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4054 pci_print_child(device_t dev, device_t child)
4056 struct pci_devinfo *dinfo;
4057 struct resource_list *rl;
4060 dinfo = device_get_ivars(child);
4061 rl = &dinfo->resources;
4063 retval += bus_print_child_header(dev, child);
4065 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
4066 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
4067 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
4068 if (device_get_flags(dev))
4069 retval += printf(" flags %#x", device_get_flags(dev));
4071 retval += printf(" at device %d.%d", pci_get_slot(child),
4072 pci_get_function(child));
4074 retval += bus_print_child_domain(dev, child);
4075 retval += bus_print_child_footer(dev, child);
4084 int report; /* 0 = bootverbose, 1 = always */
4086 } pci_nomatch_tab[] = {
4087 {PCIC_OLD, -1, 1, "old"},
4088 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4089 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4090 {PCIC_STORAGE, -1, 1, "mass storage"},
4091 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4092 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4093 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4094 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4095 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4096 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4097 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4098 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4099 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4100 {PCIC_NETWORK, -1, 1, "network"},
4101 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4102 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4103 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4104 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4105 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4106 {PCIC_DISPLAY, -1, 1, "display"},
4107 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4108 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4109 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4110 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4111 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
4112 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
4113 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4114 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
4115 {PCIC_MEMORY, -1, 1, "memory"},
4116 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4117 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4118 {PCIC_BRIDGE, -1, 1, "bridge"},
4119 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4120 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4121 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4122 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4123 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4124 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4125 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4126 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4127 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4128 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4129 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4130 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4131 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4132 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4133 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4134 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4135 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4136 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
4137 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4138 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4139 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4140 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
4141 {PCIC_INPUTDEV, -1, 1, "input device"},
4142 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
4143 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4144 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
4145 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
4146 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
4147 {PCIC_DOCKING, -1, 1, "docking station"},
4148 {PCIC_PROCESSOR, -1, 1, "processor"},
4149 {PCIC_SERIALBUS, -1, 1, "serial bus"},
4150 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
4151 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
4152 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
4153 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
4154 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
4155 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
4156 {PCIC_WIRELESS, -1, 1, "wireless controller"},
4157 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
4158 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
4159 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
4160 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
4161 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
4162 {PCIC_SATCOM, -1, 1, "satellite communication"},
4163 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
4164 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
4165 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
4166 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
4167 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
4168 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
4169 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
4170 {PCIC_DASP, -1, 0, "dasp"},
4171 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
4176 pci_probe_nomatch(device_t dev, device_t child)
4179 const char *cp, *scp;
4183 * Look for a listing for this device in a loaded device database.
4186 if ((device = pci_describe_device(child)) != NULL) {
4187 device_printf(dev, "<%s>", device);
4188 free(device, M_DEVBUF);
4191 * Scan the class/subclass descriptions for a general
4196 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4197 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4198 if (pci_nomatch_tab[i].subclass == -1) {
4199 cp = pci_nomatch_tab[i].desc;
4200 report = pci_nomatch_tab[i].report;
4201 } else if (pci_nomatch_tab[i].subclass ==
4202 pci_get_subclass(child)) {
4203 scp = pci_nomatch_tab[i].desc;
4204 report = pci_nomatch_tab[i].report;
4208 if (report || bootverbose) {
4209 device_printf(dev, "<%s%s%s>",
4211 ((cp != NULL) && (scp != NULL)) ? ", " : "",
4215 if (report || bootverbose) {
4216 printf(" at device %d.%d (no driver attached)\n",
4217 pci_get_slot(child), pci_get_function(child));
4219 pci_cfg_save(child, device_get_ivars(child), 1);
4223 pci_child_detached(device_t dev, device_t child)
4225 struct pci_devinfo *dinfo;
4226 struct resource_list *rl;
4228 dinfo = device_get_ivars(child);
4229 rl = &dinfo->resources;
4232 * Have to deallocate IRQs before releasing any MSI messages and
4233 * have to release MSI messages before deallocating any memory
4236 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4237 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4238 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4239 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4240 (void)pci_release_msi(child);
4242 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4243 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4244 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4245 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4247 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4248 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4251 pci_cfg_save(child, dinfo, 1);
4255 * Parse the PCI device database, if loaded, and return a pointer to a
4256 * description of the device.
4258 * The database is flat text formatted as follows:
4260 * Any line not in a valid format is ignored.
4261 * Lines are terminated with newline '\n' characters.
4263 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4266 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4267 * - devices cannot be listed without a corresponding VENDOR line.
4268 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4269 * another TAB, then the device name.
4273 * Assuming (ptr) points to the beginning of a line in the database,
4274 * return the vendor or device and description of the next entry.
4275 * The value of (vendor) or (device) inappropriate for the entry type
4276 * is set to -1. Returns nonzero at the end of the database.
4278 * Note that this is slightly unrobust in the face of corrupt data;
4279 * we attempt to safeguard against this by spamming the end of the
4280 * database with a newline when we initialise.
4283 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4292 left = pci_vendordata_size - (cp - pci_vendordata);
4300 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4304 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4307 /* skip to next line */
4308 while (*cp != '\n' && left > 0) {
4317 /* skip to next line */
4318 while (*cp != '\n' && left > 0) {
4322 if (*cp == '\n' && left > 0)
4329 pci_describe_device(device_t dev)
4332 char *desc, *vp, *dp, *line;
4334 desc = vp = dp = NULL;
4337 * If we have no vendor data, we can't do anything.
4339 if (pci_vendordata == NULL)
4343 * Scan the vendor data looking for this device
4345 line = pci_vendordata;
4346 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4349 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4351 if (vendor == pci_get_vendor(dev))
4354 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4357 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4365 if (device == pci_get_device(dev))
4369 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4370 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4372 sprintf(desc, "%s, %s", vp, dp);
4382 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4384 struct pci_devinfo *dinfo;
4387 dinfo = device_get_ivars(child);
4391 case PCI_IVAR_ETHADDR:
4393 * The generic accessor doesn't deal with failure, so
4394 * we set the return value, then return an error.
4396 *((uint8_t **) result) = NULL;
4398 case PCI_IVAR_SUBVENDOR:
4399 *result = cfg->subvendor;
4401 case PCI_IVAR_SUBDEVICE:
4402 *result = cfg->subdevice;
4404 case PCI_IVAR_VENDOR:
4405 *result = cfg->vendor;
4407 case PCI_IVAR_DEVICE:
4408 *result = cfg->device;
4410 case PCI_IVAR_DEVID:
4411 *result = (cfg->device << 16) | cfg->vendor;
4413 case PCI_IVAR_CLASS:
4414 *result = cfg->baseclass;
4416 case PCI_IVAR_SUBCLASS:
4417 *result = cfg->subclass;
4419 case PCI_IVAR_PROGIF:
4420 *result = cfg->progif;
4422 case PCI_IVAR_REVID:
4423 *result = cfg->revid;
4425 case PCI_IVAR_INTPIN:
4426 *result = cfg->intpin;
4429 *result = cfg->intline;
4431 case PCI_IVAR_DOMAIN:
4432 *result = cfg->domain;
4438 *result = cfg->slot;
4440 case PCI_IVAR_FUNCTION:
4441 *result = cfg->func;
4443 case PCI_IVAR_CMDREG:
4444 *result = cfg->cmdreg;
4446 case PCI_IVAR_CACHELNSZ:
4447 *result = cfg->cachelnsz;
4449 case PCI_IVAR_MINGNT:
4450 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4454 *result = cfg->mingnt;
4456 case PCI_IVAR_MAXLAT:
4457 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4461 *result = cfg->maxlat;
4463 case PCI_IVAR_LATTIMER:
4464 *result = cfg->lattimer;
4473 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4475 struct pci_devinfo *dinfo;
4477 dinfo = device_get_ivars(child);
4480 case PCI_IVAR_INTPIN:
4481 dinfo->cfg.intpin = value;
4483 case PCI_IVAR_ETHADDR:
4484 case PCI_IVAR_SUBVENDOR:
4485 case PCI_IVAR_SUBDEVICE:
4486 case PCI_IVAR_VENDOR:
4487 case PCI_IVAR_DEVICE:
4488 case PCI_IVAR_DEVID:
4489 case PCI_IVAR_CLASS:
4490 case PCI_IVAR_SUBCLASS:
4491 case PCI_IVAR_PROGIF:
4492 case PCI_IVAR_REVID:
4494 case PCI_IVAR_DOMAIN:
4497 case PCI_IVAR_FUNCTION:
4498 return (EINVAL); /* disallow for now */
4505 #include "opt_ddb.h"
4507 #include <ddb/ddb.h>
4508 #include <sys/cons.h>
4511 * List resources based on pci map registers, used for within ddb
4514 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4516 struct pci_devinfo *dinfo;
4517 struct devlist *devlist_head;
4520 int i, error, none_count;
4523 /* get the head of the device queue */
4524 devlist_head = &pci_devq;
4527 * Go through the list of devices and print out devices
4529 for (error = 0, i = 0,
4530 dinfo = STAILQ_FIRST(devlist_head);
4531 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4532 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4534 /* Populate pd_name and pd_unit */
4537 name = device_get_name(dinfo->cfg.dev);
4540 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4541 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4542 (name && *name) ? name : "none",
4543 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4545 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4546 p->pc_sel.pc_func, (p->pc_class << 16) |
4547 (p->pc_subclass << 8) | p->pc_progif,
4548 (p->pc_subdevice << 16) | p->pc_subvendor,
4549 (p->pc_device << 16) | p->pc_vendor,
4550 p->pc_revid, p->pc_hdr);
4555 static struct resource *
4556 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4557 u_long start, u_long end, u_long count, u_int flags)
4559 struct pci_devinfo *dinfo = device_get_ivars(child);
4560 struct resource_list *rl = &dinfo->resources;
4561 struct resource *res;
4563 pci_addr_t map, testval;
4567 pm = pci_find_bar(child, *rid);
4569 /* This is a BAR that we failed to allocate earlier. */
4570 mapsize = pm->pm_size;
4574 * Weed out the bogons, and figure out how large the
4575 * BAR/map is. BARs that read back 0 here are bogus
4576 * and unimplemented. Note: atapci in legacy mode are
4577 * special and handled elsewhere in the code. If you
4578 * have a atapci device in legacy mode and it fails
4579 * here, that other code is broken.
4581 pci_read_bar(child, *rid, &map, &testval);
4584 * Determine the size of the BAR and ignore BARs with a size
4585 * of 0. Device ROM BARs use a different mask value.
4587 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4588 mapsize = pci_romsize(testval);
4590 mapsize = pci_mapsize(testval);
4593 pm = pci_add_bar(child, *rid, map, mapsize);
4596 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4597 if (type != SYS_RES_MEMORY) {
4600 "child %s requested type %d for rid %#x,"
4601 " but the BAR says it is an memio\n",
4602 device_get_nameunit(child), type, *rid);
4606 if (type != SYS_RES_IOPORT) {
4609 "child %s requested type %d for rid %#x,"
4610 " but the BAR says it is an ioport\n",
4611 device_get_nameunit(child), type, *rid);
4617 * For real BARs, we need to override the size that
4618 * the driver requests, because that's what the BAR
4619 * actually uses and we would otherwise have a
4620 * situation where we might allocate the excess to
4621 * another driver, which won't work.
4623 count = (pci_addr_t)1 << mapsize;
4624 if (RF_ALIGNMENT(flags) < mapsize)
4625 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4626 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4627 flags |= RF_PREFETCHABLE;
4630 * Allocate enough resource, and then write back the
4631 * appropriate BAR for that resource.
4633 resource_list_add(rl, type, *rid, start, end, count);
4634 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
4635 count, flags & ~RF_ACTIVE);
4637 resource_list_delete(rl, type, *rid);
4638 device_printf(child,
4639 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4640 count, *rid, type, start, end);
4644 device_printf(child,
4645 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4646 count, *rid, type, rman_get_start(res));
4647 map = rman_get_start(res);
4648 pci_write_bar(child, pm, map);
4654 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4655 u_long start, u_long end, u_long count, u_int flags)
4657 struct pci_devinfo *dinfo;
4658 struct resource_list *rl;
4659 struct resource_list_entry *rle;
4660 struct resource *res;
4663 if (device_get_parent(child) != dev)
4664 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4665 type, rid, start, end, count, flags));
4668 * Perform lazy resource allocation
4670 dinfo = device_get_ivars(child);
4671 rl = &dinfo->resources;
4674 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
4676 return (pci_alloc_secbus(dev, child, rid, start, end, count,
4681 * Can't alloc legacy interrupt once MSI messages have
4684 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4685 cfg->msix.msix_alloc > 0))
4689 * If the child device doesn't have an interrupt
4690 * routed and is deserving of an interrupt, try to
4693 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4695 pci_assign_interrupt(dev, child, 0);
4697 case SYS_RES_IOPORT:
4698 case SYS_RES_MEMORY:
4701 * PCI-PCI bridge I/O window resources are not BARs.
4702 * For those allocations just pass the request up the
4705 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4707 case PCIR_IOBASEL_1:
4708 case PCIR_MEMBASE_1:
4709 case PCIR_PMBASEL_1:
4711 * XXX: Should we bother creating a resource
4714 return (bus_generic_alloc_resource(dev, child,
4715 type, rid, start, end, count, flags));
4719 /* Reserve resources for this BAR if needed. */
4720 rle = resource_list_find(rl, type, *rid);
4722 res = pci_reserve_map(dev, child, type, rid, start, end,
4728 return (resource_list_alloc(rl, dev, child, type, rid,
4729 start, end, count, flags));
4733 pci_release_resource(device_t dev, device_t child, int type, int rid,
4736 struct pci_devinfo *dinfo;
4737 struct resource_list *rl;
4740 if (device_get_parent(child) != dev)
4741 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4744 dinfo = device_get_ivars(child);
4748 * PCI-PCI bridge I/O window resources are not BARs. For
4749 * those allocations just pass the request up the tree.
4751 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4752 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4754 case PCIR_IOBASEL_1:
4755 case PCIR_MEMBASE_1:
4756 case PCIR_PMBASEL_1:
4757 return (bus_generic_release_resource(dev, child, type,
4763 rl = &dinfo->resources;
4764 return (resource_list_release(rl, dev, child, type, rid, r));
4768 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4771 struct pci_devinfo *dinfo;
4774 error = bus_generic_activate_resource(dev, child, type, rid, r);
4778 /* Enable decoding in the command register when activating BARs. */
4779 if (device_get_parent(child) == dev) {
4780 /* Device ROMs need their decoding explicitly enabled. */
4781 dinfo = device_get_ivars(child);
4782 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4783 pci_write_bar(child, pci_find_bar(child, rid),
4784 rman_get_start(r) | PCIM_BIOS_ENABLE);
4786 case SYS_RES_IOPORT:
4787 case SYS_RES_MEMORY:
4788 error = PCI_ENABLE_IO(dev, child, type);
4796 pci_deactivate_resource(device_t dev, device_t child, int type,
4797 int rid, struct resource *r)
4799 struct pci_devinfo *dinfo;
4802 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4806 /* Disable decoding for device ROMs. */
4807 if (device_get_parent(child) == dev) {
4808 dinfo = device_get_ivars(child);
4809 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4810 pci_write_bar(child, pci_find_bar(child, rid),
4817 pci_delete_child(device_t dev, device_t child)
4819 struct resource_list_entry *rle;
4820 struct resource_list *rl;
4821 struct pci_devinfo *dinfo;
4823 dinfo = device_get_ivars(child);
4824 rl = &dinfo->resources;
4826 if (device_is_attached(child))
4827 device_detach(child);
4829 /* Turn off access to resources we're about to free */
4830 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4831 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4833 /* Free all allocated resources */
4834 STAILQ_FOREACH(rle, rl, link) {
4836 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4837 resource_list_busy(rl, rle->type, rle->rid)) {
4838 pci_printf(&dinfo->cfg,
4839 "Resource still owned, oops. "
4840 "(type=%d, rid=%d, addr=%lx)\n",
4841 rle->type, rle->rid,
4842 rman_get_start(rle->res));
4843 bus_release_resource(child, rle->type, rle->rid,
4846 resource_list_unreserve(rl, dev, child, rle->type,
4850 resource_list_free(rl);
4852 device_delete_child(dev, child);
4857 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4859 struct pci_devinfo *dinfo;
4860 struct resource_list *rl;
4861 struct resource_list_entry *rle;
4863 if (device_get_parent(child) != dev)
4866 dinfo = device_get_ivars(child);
4867 rl = &dinfo->resources;
4868 rle = resource_list_find(rl, type, rid);
4873 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4874 resource_list_busy(rl, type, rid)) {
4875 device_printf(dev, "delete_resource: "
4876 "Resource still owned by child, oops. "
4877 "(type=%d, rid=%d, addr=%lx)\n",
4878 type, rid, rman_get_start(rle->res));
4881 resource_list_unreserve(rl, dev, child, type, rid);
4883 resource_list_delete(rl, type, rid);
4886 struct resource_list *
4887 pci_get_resource_list (device_t dev, device_t child)
4889 struct pci_devinfo *dinfo = device_get_ivars(child);
4891 return (&dinfo->resources);
4895 pci_get_dma_tag(device_t bus, device_t dev)
4897 struct pci_softc *sc = device_get_softc(bus);
4899 return (sc->sc_dma_tag);
4903 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4905 struct pci_devinfo *dinfo = device_get_ivars(child);
4906 pcicfgregs *cfg = &dinfo->cfg;
4908 return (PCIB_READ_CONFIG(device_get_parent(dev),
4909 cfg->bus, cfg->slot, cfg->func, reg, width));
4913 pci_write_config_method(device_t dev, device_t child, int reg,
4914 uint32_t val, int width)
4916 struct pci_devinfo *dinfo = device_get_ivars(child);
4917 pcicfgregs *cfg = &dinfo->cfg;
4919 PCIB_WRITE_CONFIG(device_get_parent(dev),
4920 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4924 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4928 snprintf(buf, buflen, "pci%d:%d:%d:%d", pci_get_domain(child),
4929 pci_get_bus(child), pci_get_slot(child), pci_get_function(child));
4934 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4937 struct pci_devinfo *dinfo;
4940 dinfo = device_get_ivars(child);
4942 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4943 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4944 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4950 pci_assign_interrupt_method(device_t dev, device_t child)
4952 struct pci_devinfo *dinfo = device_get_ivars(child);
4953 pcicfgregs *cfg = &dinfo->cfg;
4955 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4960 pci_lookup(void *arg, const char *name, device_t *dev)
4964 int domain, bus, slot, func;
4970 * Accept pciconf-style selectors of either pciD:B:S:F or
4971 * pciB:S:F. In the latter case, the domain is assumed to
4974 if (strncmp(name, "pci", 3) != 0)
4976 val = strtol(name + 3, &end, 10);
4977 if (val < 0 || val > INT_MAX || *end != ':')
4980 val = strtol(end + 1, &end, 10);
4981 if (val < 0 || val > INT_MAX || *end != ':')
4984 val = strtol(end + 1, &end, 10);
4985 if (val < 0 || val > INT_MAX)
4989 val = strtol(end + 1, &end, 10);
4990 if (val < 0 || val > INT_MAX || *end != '\0')
4993 } else if (*end == '\0') {
5001 if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX ||
5002 func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX))
5005 *dev = pci_find_dbsf(domain, bus, slot, func);
5009 pci_modevent(module_t mod, int what, void *arg)
5011 static struct cdev *pci_cdev;
5012 static eventhandler_tag tag;
5016 STAILQ_INIT(&pci_devq);
5018 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
5020 pci_load_vendor_data();
5021 tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL,
5027 EVENTHANDLER_DEREGISTER(dev_lookup, tag);
5028 destroy_dev(pci_cdev);
5036 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
5038 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
5039 struct pcicfg_pcie *cfg;
5042 cfg = &dinfo->cfg.pcie;
5043 pos = cfg->pcie_location;
5045 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5047 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
5049 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5050 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5051 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5052 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
5054 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5055 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5056 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5057 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
5059 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5060 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5061 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
5064 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
5065 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
5066 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
5072 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
5074 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
5075 dinfo->cfg.pcix.pcix_command, 2);
5079 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
5083 * Only do header type 0 devices. Type 1 devices are bridges,
5084 * which we know need special treatment. Type 2 devices are
5085 * cardbus bridges which also require special treatment.
5086 * Other types are unknown, and we err on the side of safety
5089 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5093 * Restore the device to full power mode. We must do this
5094 * before we restore the registers because moving from D3 to
5095 * D0 will cause the chip's BARs and some other registers to
5096 * be reset to some unknown power on reset values. Cut down
5097 * the noise on boot by doing nothing if we are already in
5100 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
5101 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5102 pci_restore_bars(dev);
5103 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
5104 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
5105 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
5106 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
5107 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
5108 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
5109 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
5110 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
5111 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
5114 * Restore extended capabilities for PCI-Express and PCI-X
5116 if (dinfo->cfg.pcie.pcie_location != 0)
5117 pci_cfg_restore_pcie(dev, dinfo);
5118 if (dinfo->cfg.pcix.pcix_location != 0)
5119 pci_cfg_restore_pcix(dev, dinfo);
5121 /* Restore MSI and MSI-X configurations if they are present. */
5122 if (dinfo->cfg.msi.msi_location != 0)
5123 pci_resume_msi(dev);
5124 if (dinfo->cfg.msix.msix_location != 0)
5125 pci_resume_msix(dev);
5129 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
5131 #define RREG(n) pci_read_config(dev, pos + (n), 2)
5132 struct pcicfg_pcie *cfg;
5135 cfg = &dinfo->cfg.pcie;
5136 pos = cfg->pcie_location;
5138 cfg->pcie_flags = RREG(PCIER_FLAGS);
5140 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5142 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
5144 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5145 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5146 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5147 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
5149 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5150 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5151 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5152 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
5154 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5155 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5156 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
5159 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
5160 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
5161 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
5167 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5169 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5170 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5174 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5180 * Only do header type 0 devices. Type 1 devices are bridges, which
5181 * we know need special treatment. Type 2 devices are cardbus bridges
5182 * which also require special treatment. Other types are unknown, and
5183 * we err on the side of safety by ignoring them. Powering down
5184 * bridges should not be undertaken lightly.
5186 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5190 * Some drivers apparently write to these registers w/o updating our
5191 * cached copy. No harm happens if we update the copy, so do so here
5192 * so we can restore them. The COMMAND register is modified by the
5193 * bus w/o updating the cache. This should represent the normally
5194 * writable portion of the 'defined' part of type 0 headers. In
5195 * theory we also need to save/restore the PCI capability structures
5196 * we know about, but apart from power we don't know any that are
5199 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5200 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5201 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5202 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5203 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5204 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5205 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5206 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5207 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5208 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5209 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5210 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5211 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5212 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5213 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5215 if (dinfo->cfg.pcie.pcie_location != 0)
5216 pci_cfg_save_pcie(dev, dinfo);
5218 if (dinfo->cfg.pcix.pcix_location != 0)
5219 pci_cfg_save_pcix(dev, dinfo);
5222 * don't set the state for display devices, base peripherals and
5223 * memory devices since bad things happen when they are powered down.
5224 * We should (a) have drivers that can easily detach and (b) use
5225 * generic drivers for these devices so that some device actually
5226 * attaches. We need to make sure that when we implement (a) we don't
5227 * power the device down on a reattach.
5229 cls = pci_get_class(dev);
5232 switch (pci_do_power_nodriver)
5234 case 0: /* NO powerdown at all */
5236 case 1: /* Conservative about what to power down */
5237 if (cls == PCIC_STORAGE)
5240 case 2: /* Agressive about what to power down */
5241 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5242 cls == PCIC_BASEPERIPH)
5245 case 3: /* Power down everything */
5249 * PCI spec says we can only go into D3 state from D0 state.
5250 * Transition from D[12] into D0 before going to D3 state.
5252 ps = pci_get_powerstate(dev);
5253 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5254 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5255 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5256 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5259 /* Wrapper APIs suitable for device driver use. */
5261 pci_save_state(device_t dev)
5263 struct pci_devinfo *dinfo;
5265 dinfo = device_get_ivars(dev);
5266 pci_cfg_save(dev, dinfo, 0);
5270 pci_restore_state(device_t dev)
5272 struct pci_devinfo *dinfo;
5274 dinfo = device_get_ivars(dev);
5275 pci_cfg_restore(dev, dinfo);
5279 pci_get_rid_method(device_t dev, device_t child)
5282 return (PCIB_GET_RID(device_get_parent(dev), child));
5285 /* Find the upstream port of a given PCI device in a root complex. */
5287 pci_find_pcie_root_port(device_t dev)
5289 struct pci_devinfo *dinfo;
5290 devclass_t pci_class;
5293 pci_class = devclass_find("pci");
5294 KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class,
5295 ("%s: non-pci device %s", __func__, device_get_nameunit(dev)));
5298 * Walk the bridge hierarchy until we find a PCI-e root
5299 * port or a non-PCI device.
5302 bus = device_get_parent(dev);
5303 KASSERT(bus != NULL, ("%s: null parent of %s", __func__,
5304 device_get_nameunit(dev)));
5306 pcib = device_get_parent(bus);
5307 KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__,
5308 device_get_nameunit(bus)));
5311 * pcib's parent must be a PCI bus for this to be a
5314 if (device_get_devclass(device_get_parent(pcib)) != pci_class)
5317 dinfo = device_get_ivars(pcib);
5318 if (dinfo->cfg.pcie.pcie_location != 0 &&
5319 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)