2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/limits.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
49 #include <vm/vm_extern.h>
52 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <machine/stdarg.h>
57 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
58 #include <machine/intr_machdep.h>
61 #include <sys/pciio.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pci_private.h>
68 #include <dev/pci/pci_iov_private.h>
71 #include <dev/usb/controller/xhcireg.h>
72 #include <dev/usb/controller/ehcireg.h>
73 #include <dev/usb/controller/ohcireg.h>
74 #include <dev/usb/controller/uhcireg.h>
79 #define PCIR_IS_BIOS(cfg, reg) \
80 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
81 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
83 static int pci_has_quirk(uint32_t devid, int quirk);
84 static pci_addr_t pci_mapbase(uint64_t mapreg);
85 static const char *pci_maptype(uint64_t mapreg);
86 static int pci_maprange(uint64_t mapreg);
87 static pci_addr_t pci_rombase(uint64_t mapreg);
88 static int pci_romsize(uint64_t testval);
89 static void pci_fixancient(pcicfgregs *cfg);
90 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
92 static int pci_porten(device_t dev);
93 static int pci_memen(device_t dev);
94 static void pci_assign_interrupt(device_t bus, device_t dev,
96 static int pci_add_map(device_t bus, device_t dev, int reg,
97 struct resource_list *rl, int force, int prefetch);
98 static int pci_probe(device_t dev);
99 static int pci_attach(device_t dev);
100 static int pci_detach(device_t dev);
101 static void pci_load_vendor_data(void);
102 static int pci_describe_parse_line(char **ptr, int *vendor,
103 int *device, char **desc);
104 static char *pci_describe_device(device_t dev);
105 static int pci_modevent(module_t mod, int what, void *arg);
106 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
108 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
109 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
110 int reg, uint32_t *data);
112 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
113 int reg, uint32_t data);
115 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
116 static void pci_mask_msix(device_t dev, u_int index);
117 static void pci_unmask_msix(device_t dev, u_int index);
118 static int pci_msi_blacklisted(void);
119 static int pci_msix_blacklisted(void);
120 static void pci_resume_msi(device_t dev);
121 static void pci_resume_msix(device_t dev);
122 static int pci_remap_intr_method(device_t bus, device_t dev,
125 static int pci_get_id_method(device_t dev, device_t child,
126 enum pci_id_type type, uintptr_t *rid);
128 static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d,
129 int b, int s, int f, uint16_t vid, uint16_t did);
131 static device_method_t pci_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, pci_probe),
134 DEVMETHOD(device_attach, pci_attach),
135 DEVMETHOD(device_detach, pci_detach),
136 DEVMETHOD(device_shutdown, bus_generic_shutdown),
137 DEVMETHOD(device_suspend, bus_generic_suspend),
138 DEVMETHOD(device_resume, pci_resume),
141 DEVMETHOD(bus_print_child, pci_print_child),
142 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
143 DEVMETHOD(bus_read_ivar, pci_read_ivar),
144 DEVMETHOD(bus_write_ivar, pci_write_ivar),
145 DEVMETHOD(bus_driver_added, pci_driver_added),
146 DEVMETHOD(bus_setup_intr, pci_setup_intr),
147 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
149 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
150 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
151 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
152 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
153 DEVMETHOD(bus_delete_resource, pci_delete_resource),
154 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
155 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
156 DEVMETHOD(bus_release_resource, pci_release_resource),
157 DEVMETHOD(bus_activate_resource, pci_activate_resource),
158 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
159 DEVMETHOD(bus_child_deleted, pci_child_deleted),
160 DEVMETHOD(bus_child_detached, pci_child_detached),
161 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
162 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
163 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
164 DEVMETHOD(bus_suspend_child, pci_suspend_child),
165 DEVMETHOD(bus_resume_child, pci_resume_child),
166 DEVMETHOD(bus_rescan, pci_rescan_method),
169 DEVMETHOD(pci_read_config, pci_read_config_method),
170 DEVMETHOD(pci_write_config, pci_write_config_method),
171 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
172 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
173 DEVMETHOD(pci_enable_io, pci_enable_io_method),
174 DEVMETHOD(pci_disable_io, pci_disable_io_method),
175 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
176 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
177 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
178 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
179 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
180 DEVMETHOD(pci_find_cap, pci_find_cap_method),
181 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
182 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
183 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
184 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
185 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
186 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
187 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
188 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
189 DEVMETHOD(pci_release_msi, pci_release_msi_method),
190 DEVMETHOD(pci_msi_count, pci_msi_count_method),
191 DEVMETHOD(pci_msix_count, pci_msix_count_method),
192 DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method),
193 DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method),
194 DEVMETHOD(pci_get_id, pci_get_id_method),
195 DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method),
196 DEVMETHOD(pci_child_added, pci_child_added_method),
198 DEVMETHOD(pci_iov_attach, pci_iov_attach_method),
199 DEVMETHOD(pci_iov_detach, pci_iov_detach_method),
200 DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method),
206 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
208 static devclass_t pci_devclass;
209 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
210 MODULE_VERSION(pci, 1);
212 static char *pci_vendordata;
213 static size_t pci_vendordata_size;
216 uint32_t devid; /* Vendor/device of the card */
218 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
219 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
220 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
221 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
222 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
223 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
228 static const struct pci_quirk pci_quirks[] = {
229 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
230 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
231 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
232 /* As does the Serverworks OSB4 (the SMBus mapping register) */
233 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
236 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
237 * or the CMIC-SL (AKA ServerWorks GC_LE).
239 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 * MSI doesn't work on earlier Intel chipsets including
244 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
246 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
247 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
248 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
249 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
250 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
251 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
252 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
255 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
258 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
261 * MSI-X allocation doesn't work properly for devices passed through
262 * by VMware up to at least ESXi 5.1.
264 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
265 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
268 * Some virtualization environments emulate an older chipset
269 * but support MSI just fine. QEMU uses the Intel 82440.
271 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
274 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
275 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
276 * It prevents us from attaching hpet(4) when the bit is unset.
277 * Note this quirk only affects SB600 revision A13 and earlier.
278 * For SB600 A21 and later, firmware must set the bit to hide it.
279 * For SB700 and later, it is unused and hardcoded to zero.
281 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
284 * Atheros AR8161/AR8162/E2200/E2400 Ethernet controllers have a
285 * bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit
286 * of the command register is set.
288 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
289 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
290 { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
291 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
294 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
295 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
297 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
298 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
299 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
300 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
301 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
302 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
307 /* map register information */
308 #define PCI_MAPMEM 0x01 /* memory map */
309 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
310 #define PCI_MAPPORT 0x04 /* port map */
312 struct devlist pci_devq;
313 uint32_t pci_generation;
314 uint32_t pci_numdevs = 0;
315 static int pcie_chipset, pcix_chipset;
318 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
320 static int pci_enable_io_modes = 1;
321 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN,
322 &pci_enable_io_modes, 1,
323 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
324 enable these bits correctly. We'd like to do this all the time, but there\n\
325 are some peripherals that this causes problems with.");
327 static int pci_do_realloc_bars = 0;
328 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN,
329 &pci_do_realloc_bars, 0,
330 "Attempt to allocate a new range for any BARs whose original "
331 "firmware-assigned ranges fail to allocate during the initial device scan.");
333 static int pci_do_power_nodriver = 0;
334 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN,
335 &pci_do_power_nodriver, 0,
336 "Place a function into D3 state when no driver attaches to it. 0 means\n\
337 disable. 1 means conservatively place devices into D3 state. 2 means\n\
338 aggressively place devices into D3 state. 3 means put absolutely everything\n\
341 int pci_do_power_resume = 1;
342 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN,
343 &pci_do_power_resume, 1,
344 "Transition from D3 -> D0 on resume.");
346 int pci_do_power_suspend = 1;
347 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN,
348 &pci_do_power_suspend, 1,
349 "Transition from D0 -> D3 on suspend.");
351 static int pci_do_msi = 1;
352 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1,
353 "Enable support for MSI interrupts");
355 static int pci_do_msix = 1;
356 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1,
357 "Enable support for MSI-X interrupts");
359 static int pci_honor_msi_blacklist = 1;
360 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN,
361 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
363 #if defined(__i386__) || defined(__amd64__)
364 static int pci_usb_takeover = 1;
366 static int pci_usb_takeover = 0;
368 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
369 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
370 Disable this if you depend on BIOS emulation of USB devices, that is\n\
371 you use USB devices (like keyboard or mouse) but do not load USB drivers");
373 static int pci_clear_bars;
374 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
375 "Ignore firmware-assigned resources for BARs.");
377 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
378 static int pci_clear_buses;
379 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
380 "Ignore firmware-assigned bus numbers.");
383 static int pci_enable_ari = 1;
384 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
385 0, "Enable support for PCIe Alternative RID Interpretation");
388 pci_has_quirk(uint32_t devid, int quirk)
390 const struct pci_quirk *q;
392 for (q = &pci_quirks[0]; q->devid; q++) {
393 if (q->devid == devid && q->type == quirk)
399 /* Find a device_t by bus/slot/function in domain 0 */
402 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
405 return (pci_find_dbsf(0, bus, slot, func));
408 /* Find a device_t by domain/bus/slot/function */
411 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
413 struct pci_devinfo *dinfo;
415 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
416 if ((dinfo->cfg.domain == domain) &&
417 (dinfo->cfg.bus == bus) &&
418 (dinfo->cfg.slot == slot) &&
419 (dinfo->cfg.func == func)) {
420 return (dinfo->cfg.dev);
427 /* Find a device_t by vendor/device ID */
430 pci_find_device(uint16_t vendor, uint16_t device)
432 struct pci_devinfo *dinfo;
434 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
435 if ((dinfo->cfg.vendor == vendor) &&
436 (dinfo->cfg.device == device)) {
437 return (dinfo->cfg.dev);
445 pci_find_class(uint8_t class, uint8_t subclass)
447 struct pci_devinfo *dinfo;
449 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
450 if (dinfo->cfg.baseclass == class &&
451 dinfo->cfg.subclass == subclass) {
452 return (dinfo->cfg.dev);
460 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
465 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
468 retval += vprintf(fmt, ap);
473 /* return base address of memory or port map */
476 pci_mapbase(uint64_t mapreg)
479 if (PCI_BAR_MEM(mapreg))
480 return (mapreg & PCIM_BAR_MEM_BASE);
482 return (mapreg & PCIM_BAR_IO_BASE);
485 /* return map type of memory or port map */
488 pci_maptype(uint64_t mapreg)
491 if (PCI_BAR_IO(mapreg))
493 if (mapreg & PCIM_BAR_MEM_PREFETCH)
494 return ("Prefetchable Memory");
498 /* return log2 of map size decoded for memory or port map */
501 pci_mapsize(uint64_t testval)
505 testval = pci_mapbase(testval);
508 while ((testval & 1) == 0)
517 /* return base address of device ROM */
520 pci_rombase(uint64_t mapreg)
523 return (mapreg & PCIM_BIOS_ADDR_MASK);
526 /* return log2 of map size decided for device ROM */
529 pci_romsize(uint64_t testval)
533 testval = pci_rombase(testval);
536 while ((testval & 1) == 0)
545 /* return log2 of address range supported by map register */
548 pci_maprange(uint64_t mapreg)
552 if (PCI_BAR_IO(mapreg))
555 switch (mapreg & PCIM_BAR_MEM_TYPE) {
556 case PCIM_BAR_MEM_32:
559 case PCIM_BAR_MEM_1MB:
562 case PCIM_BAR_MEM_64:
569 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
572 pci_fixancient(pcicfgregs *cfg)
574 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
577 /* PCI to PCI bridges use header type 1 */
578 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
579 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
582 /* extract header type specific config data */
585 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
587 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
588 switch (cfg->hdrtype & PCIM_HDRTYPE) {
589 case PCIM_HDRTYPE_NORMAL:
590 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
591 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
592 cfg->mingnt = REG(PCIR_MINGNT, 1);
593 cfg->maxlat = REG(PCIR_MAXLAT, 1);
594 cfg->nummaps = PCI_MAXMAPS_0;
596 case PCIM_HDRTYPE_BRIDGE:
597 cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1);
598 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1);
599 cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1);
600 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1);
601 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2);
602 cfg->nummaps = PCI_MAXMAPS_1;
604 case PCIM_HDRTYPE_CARDBUS:
605 cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1);
606 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1);
607 cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1);
608 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1);
609 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2);
610 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
611 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
612 cfg->nummaps = PCI_MAXMAPS_2;
618 /* read configuration header into pcicfgregs structure */
620 pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f)
622 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
625 vid = REG(PCIR_VENDOR, 2);
626 did = REG(PCIR_DEVICE, 2);
628 return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did));
634 pci_alloc_devinfo_method(device_t dev)
637 return (malloc(sizeof(struct pci_devinfo), M_DEVBUF,
641 static struct pci_devinfo *
642 pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f,
643 uint16_t vid, uint16_t did)
645 struct pci_devinfo *devlist_entry;
648 devlist_entry = PCI_ALLOC_DEVINFO(bus);
650 cfg = &devlist_entry->cfg;
658 cfg->cmdreg = REG(PCIR_COMMAND, 2);
659 cfg->statreg = REG(PCIR_STATUS, 2);
660 cfg->baseclass = REG(PCIR_CLASS, 1);
661 cfg->subclass = REG(PCIR_SUBCLASS, 1);
662 cfg->progif = REG(PCIR_PROGIF, 1);
663 cfg->revid = REG(PCIR_REVID, 1);
664 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
665 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
666 cfg->lattimer = REG(PCIR_LATTIMER, 1);
667 cfg->intpin = REG(PCIR_INTPIN, 1);
668 cfg->intline = REG(PCIR_INTLINE, 1);
670 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
671 cfg->hdrtype &= ~PCIM_MFDEV;
672 STAILQ_INIT(&cfg->maps);
677 pci_hdrtypedata(pcib, b, s, f, cfg);
679 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
680 pci_read_cap(pcib, cfg);
682 STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links);
684 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
685 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
686 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
687 devlist_entry->conf.pc_sel.pc_func = cfg->func;
688 devlist_entry->conf.pc_hdr = cfg->hdrtype;
690 devlist_entry->conf.pc_subvendor = cfg->subvendor;
691 devlist_entry->conf.pc_subdevice = cfg->subdevice;
692 devlist_entry->conf.pc_vendor = cfg->vendor;
693 devlist_entry->conf.pc_device = cfg->device;
695 devlist_entry->conf.pc_class = cfg->baseclass;
696 devlist_entry->conf.pc_subclass = cfg->subclass;
697 devlist_entry->conf.pc_progif = cfg->progif;
698 devlist_entry->conf.pc_revid = cfg->revid;
703 return (devlist_entry);
708 pci_ea_fill_info(device_t pcib, pcicfgregs *cfg)
710 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \
711 cfg->ea.ea_location + (n), w)
718 uint64_t base, max_offset;
719 struct pci_ea_entry *eae;
721 if (cfg->ea.ea_location == 0)
724 STAILQ_INIT(&cfg->ea.ea_entries);
726 /* Determine the number of entries */
727 num_ent = REG(PCIR_EA_NUM_ENT, 2);
728 num_ent &= PCIM_EA_NUM_ENT_MASK;
730 /* Find the first entry to care of */
731 ptr = PCIR_EA_FIRST_ENT;
733 /* Skip DWORD 2 for type 1 functions */
734 if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE)
737 for (a = 0; a < num_ent; a++) {
739 eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO);
740 eae->eae_cfg_offset = cfg->ea.ea_location + ptr;
742 /* Read a number of dwords in the entry */
745 ent_size = (val & PCIM_EA_ES);
747 for (b = 0; b < ent_size; b++) {
752 eae->eae_flags = val;
753 eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET;
755 base = dw[0] & PCIM_EA_FIELD_MASK;
756 max_offset = dw[1] | ~PCIM_EA_FIELD_MASK;
758 if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) {
759 base |= (uint64_t)dw[b] << 32UL;
762 if (((dw[1] & PCIM_EA_IS_64) != 0)
764 max_offset |= (uint64_t)dw[b] << 32UL;
768 eae->eae_base = base;
769 eae->eae_max_offset = max_offset;
771 STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link);
774 printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n",
775 cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags,
776 (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset);
783 pci_read_cap(device_t pcib, pcicfgregs *cfg)
785 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
786 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
787 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
791 int ptr, nextptr, ptrptr;
793 switch (cfg->hdrtype & PCIM_HDRTYPE) {
794 case PCIM_HDRTYPE_NORMAL:
795 case PCIM_HDRTYPE_BRIDGE:
796 ptrptr = PCIR_CAP_PTR;
798 case PCIM_HDRTYPE_CARDBUS:
799 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
802 return; /* no extended capabilities support */
804 nextptr = REG(ptrptr, 1); /* sanity check? */
807 * Read capability entries.
809 while (nextptr != 0) {
812 printf("illegal PCI extended capability offset %d\n",
816 /* Find the next entry */
818 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
820 /* Process this entry */
821 switch (REG(ptr + PCICAP_ID, 1)) {
822 case PCIY_PMG: /* PCI power management */
823 if (cfg->pp.pp_cap == 0) {
824 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
825 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
826 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
827 if ((nextptr - ptr) > PCIR_POWER_DATA)
828 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
831 case PCIY_HT: /* HyperTransport */
832 /* Determine HT-specific capability type. */
833 val = REG(ptr + PCIR_HT_COMMAND, 2);
835 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
836 cfg->ht.ht_slave = ptr;
838 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
839 switch (val & PCIM_HTCMD_CAP_MASK) {
840 case PCIM_HTCAP_MSI_MAPPING:
841 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
842 /* Sanity check the mapping window. */
843 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
846 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
848 if (addr != MSI_INTEL_ADDR_BASE)
850 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
851 cfg->domain, cfg->bus,
852 cfg->slot, cfg->func,
855 addr = MSI_INTEL_ADDR_BASE;
857 cfg->ht.ht_msimap = ptr;
858 cfg->ht.ht_msictrl = val;
859 cfg->ht.ht_msiaddr = addr;
864 case PCIY_MSI: /* PCI MSI */
865 cfg->msi.msi_location = ptr;
866 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
867 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
868 PCIM_MSICTRL_MMC_MASK)>>1);
870 case PCIY_MSIX: /* PCI MSI-X */
871 cfg->msix.msix_location = ptr;
872 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
873 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
874 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
875 val = REG(ptr + PCIR_MSIX_TABLE, 4);
876 cfg->msix.msix_table_bar = PCIR_BAR(val &
878 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
879 val = REG(ptr + PCIR_MSIX_PBA, 4);
880 cfg->msix.msix_pba_bar = PCIR_BAR(val &
882 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
884 case PCIY_VPD: /* PCI Vital Product Data */
885 cfg->vpd.vpd_reg = ptr;
888 /* Should always be true. */
889 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
890 PCIM_HDRTYPE_BRIDGE) {
891 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
892 cfg->subvendor = val & 0xffff;
893 cfg->subdevice = val >> 16;
896 case PCIY_PCIX: /* PCI-X */
898 * Assume we have a PCI-X chipset if we have
899 * at least one PCI-PCI bridge with a PCI-X
900 * capability. Note that some systems with
901 * PCI-express or HT chipsets might match on
902 * this check as well.
904 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
907 cfg->pcix.pcix_location = ptr;
909 case PCIY_EXPRESS: /* PCI-express */
911 * Assume we have a PCI-express chipset if we have
912 * at least one PCI-express device.
915 cfg->pcie.pcie_location = ptr;
916 val = REG(ptr + PCIER_FLAGS, 2);
917 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
919 case PCIY_EA: /* Enhanced Allocation */
920 cfg->ea.ea_location = ptr;
921 pci_ea_fill_info(pcib, cfg);
928 #if defined(__powerpc__)
930 * Enable the MSI mapping window for all HyperTransport
931 * slaves. PCI-PCI bridges have their windows enabled via
934 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
935 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
937 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
938 cfg->domain, cfg->bus, cfg->slot, cfg->func);
939 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
940 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
944 /* REG and WREG use carry through to next functions */
948 * PCI Vital Product Data
951 #define PCI_VPD_TIMEOUT 1000000
954 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
956 int count = PCI_VPD_TIMEOUT;
958 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
960 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
962 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
965 DELAY(1); /* limit looping */
967 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
974 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
976 int count = PCI_VPD_TIMEOUT;
978 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
980 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
981 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
982 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
985 DELAY(1); /* limit looping */
992 #undef PCI_VPD_TIMEOUT
994 struct vpd_readstate {
1004 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
1009 if (vrs->bytesinval == 0) {
1010 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
1012 vrs->val = le32toh(reg);
1014 byte = vrs->val & 0xff;
1015 vrs->bytesinval = 3;
1017 vrs->val = vrs->val >> 8;
1018 byte = vrs->val & 0xff;
1028 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
1030 struct vpd_readstate vrs;
1035 int alloc, off; /* alloc/off for RO/W arrays */
1041 /* init vpd reader */
1049 name = remain = i = 0; /* shut up stupid gcc */
1050 alloc = off = 0; /* shut up stupid gcc */
1051 dflen = 0; /* shut up stupid gcc */
1053 while (state >= 0) {
1054 if (vpd_nextbyte(&vrs, &byte)) {
1059 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1060 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1061 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1064 case 0: /* item name */
1066 if (vpd_nextbyte(&vrs, &byte2)) {
1071 if (vpd_nextbyte(&vrs, &byte2)) {
1075 remain |= byte2 << 8;
1076 if (remain > (0x7f*4 - vrs.off)) {
1079 "invalid VPD data, remain %#x\n",
1084 remain = byte & 0x7;
1085 name = (byte >> 3) & 0xf;
1088 case 0x2: /* String */
1089 cfg->vpd.vpd_ident = malloc(remain + 1,
1090 M_DEVBUF, M_WAITOK);
1097 case 0x10: /* VPD-R */
1100 cfg->vpd.vpd_ros = malloc(alloc *
1101 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1105 case 0x11: /* VPD-W */
1108 cfg->vpd.vpd_w = malloc(alloc *
1109 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1113 default: /* Invalid data, abort */
1119 case 1: /* Identifier String */
1120 cfg->vpd.vpd_ident[i++] = byte;
1123 cfg->vpd.vpd_ident[i] = '\0';
1128 case 2: /* VPD-R Keyword Header */
1130 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1131 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1132 M_DEVBUF, M_WAITOK | M_ZERO);
1134 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1135 if (vpd_nextbyte(&vrs, &byte2)) {
1139 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1140 if (vpd_nextbyte(&vrs, &byte2)) {
1144 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1146 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1149 * if this happens, we can't trust the rest
1152 pci_printf(cfg, "bad keyword length: %d\n",
1157 } else if (dflen == 0) {
1158 cfg->vpd.vpd_ros[off].value = malloc(1 *
1159 sizeof(*cfg->vpd.vpd_ros[off].value),
1160 M_DEVBUF, M_WAITOK);
1161 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1163 cfg->vpd.vpd_ros[off].value = malloc(
1165 sizeof(*cfg->vpd.vpd_ros[off].value),
1166 M_DEVBUF, M_WAITOK);
1169 /* keep in sync w/ state 3's transistions */
1170 if (dflen == 0 && remain == 0)
1172 else if (dflen == 0)
1178 case 3: /* VPD-R Keyword Value */
1179 cfg->vpd.vpd_ros[off].value[i++] = byte;
1180 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1181 "RV", 2) == 0 && cksumvalid == -1) {
1187 "bad VPD cksum, remain %hhu\n",
1196 /* keep in sync w/ state 2's transistions */
1198 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1199 if (dflen == 0 && remain == 0) {
1200 cfg->vpd.vpd_rocnt = off;
1201 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1202 off * sizeof(*cfg->vpd.vpd_ros),
1203 M_DEVBUF, M_WAITOK | M_ZERO);
1205 } else if (dflen == 0)
1215 case 5: /* VPD-W Keyword Header */
1217 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1218 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1219 M_DEVBUF, M_WAITOK | M_ZERO);
1221 cfg->vpd.vpd_w[off].keyword[0] = byte;
1222 if (vpd_nextbyte(&vrs, &byte2)) {
1226 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1227 if (vpd_nextbyte(&vrs, &byte2)) {
1231 cfg->vpd.vpd_w[off].len = dflen = byte2;
1232 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1233 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1234 sizeof(*cfg->vpd.vpd_w[off].value),
1235 M_DEVBUF, M_WAITOK);
1238 /* keep in sync w/ state 6's transistions */
1239 if (dflen == 0 && remain == 0)
1241 else if (dflen == 0)
1247 case 6: /* VPD-W Keyword Value */
1248 cfg->vpd.vpd_w[off].value[i++] = byte;
1251 /* keep in sync w/ state 5's transistions */
1253 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1254 if (dflen == 0 && remain == 0) {
1255 cfg->vpd.vpd_wcnt = off;
1256 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1257 off * sizeof(*cfg->vpd.vpd_w),
1258 M_DEVBUF, M_WAITOK | M_ZERO);
1260 } else if (dflen == 0)
1265 pci_printf(cfg, "invalid state: %d\n", state);
1271 if (cksumvalid == 0 || state < -1) {
1272 /* read-only data bad, clean up */
1273 if (cfg->vpd.vpd_ros != NULL) {
1274 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1275 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1276 free(cfg->vpd.vpd_ros, M_DEVBUF);
1277 cfg->vpd.vpd_ros = NULL;
1281 /* I/O error, clean up */
1282 pci_printf(cfg, "failed to read VPD data.\n");
1283 if (cfg->vpd.vpd_ident != NULL) {
1284 free(cfg->vpd.vpd_ident, M_DEVBUF);
1285 cfg->vpd.vpd_ident = NULL;
1287 if (cfg->vpd.vpd_w != NULL) {
1288 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1289 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1290 free(cfg->vpd.vpd_w, M_DEVBUF);
1291 cfg->vpd.vpd_w = NULL;
1294 cfg->vpd.vpd_cached = 1;
1300 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1302 struct pci_devinfo *dinfo = device_get_ivars(child);
1303 pcicfgregs *cfg = &dinfo->cfg;
1305 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1306 pci_read_vpd(device_get_parent(dev), cfg);
1308 *identptr = cfg->vpd.vpd_ident;
1310 if (*identptr == NULL)
1317 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1320 struct pci_devinfo *dinfo = device_get_ivars(child);
1321 pcicfgregs *cfg = &dinfo->cfg;
1324 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1325 pci_read_vpd(device_get_parent(dev), cfg);
1327 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1328 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1329 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1330 *vptr = cfg->vpd.vpd_ros[i].value;
1339 pci_fetch_vpd_list(device_t dev)
1341 struct pci_devinfo *dinfo = device_get_ivars(dev);
1342 pcicfgregs *cfg = &dinfo->cfg;
1344 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1345 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1350 * Find the requested HyperTransport capability and return the offset
1351 * in configuration space via the pointer provided. The function
1352 * returns 0 on success and an error code otherwise.
1355 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1360 error = pci_find_cap(child, PCIY_HT, &ptr);
1365 * Traverse the capabilities list checking each HT capability
1366 * to see if it matches the requested HT capability.
1369 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1370 if (capability == PCIM_HTCAP_SLAVE ||
1371 capability == PCIM_HTCAP_HOST)
1374 val &= PCIM_HTCMD_CAP_MASK;
1375 if (val == capability) {
1381 /* Skip to the next HT capability. */
1383 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1384 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1393 * Find the requested capability and return the offset in
1394 * configuration space via the pointer provided. The function returns
1395 * 0 on success and an error code otherwise.
1398 pci_find_cap_method(device_t dev, device_t child, int capability,
1401 struct pci_devinfo *dinfo = device_get_ivars(child);
1402 pcicfgregs *cfg = &dinfo->cfg;
1407 * Check the CAP_LIST bit of the PCI status register first.
1409 status = pci_read_config(child, PCIR_STATUS, 2);
1410 if (!(status & PCIM_STATUS_CAPPRESENT))
1414 * Determine the start pointer of the capabilities list.
1416 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1417 case PCIM_HDRTYPE_NORMAL:
1418 case PCIM_HDRTYPE_BRIDGE:
1421 case PCIM_HDRTYPE_CARDBUS:
1422 ptr = PCIR_CAP_PTR_2;
1426 return (ENXIO); /* no extended capabilities support */
1428 ptr = pci_read_config(child, ptr, 1);
1431 * Traverse the capabilities list.
1434 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1439 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1446 * Find the requested extended capability and return the offset in
1447 * configuration space via the pointer provided. The function returns
1448 * 0 on success and an error code otherwise.
1451 pci_find_extcap_method(device_t dev, device_t child, int capability,
1454 struct pci_devinfo *dinfo = device_get_ivars(child);
1455 pcicfgregs *cfg = &dinfo->cfg;
1459 /* Only supported for PCI-express devices. */
1460 if (cfg->pcie.pcie_location == 0)
1464 ecap = pci_read_config(child, ptr, 4);
1465 if (ecap == 0xffffffff || ecap == 0)
1468 if (PCI_EXTCAP_ID(ecap) == capability) {
1473 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1476 ecap = pci_read_config(child, ptr, 4);
1483 * Support for MSI-X message interrupts.
1486 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1487 uint64_t address, uint32_t data)
1489 struct pci_devinfo *dinfo = device_get_ivars(child);
1490 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1493 KASSERT(msix->msix_table_len > index, ("bogus index"));
1494 offset = msix->msix_table_offset + index * 16;
1495 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1496 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1497 bus_write_4(msix->msix_table_res, offset + 8, data);
1499 /* Enable MSI -> HT mapping. */
1500 pci_ht_map_msi(child, address);
1504 pci_mask_msix(device_t dev, u_int index)
1506 struct pci_devinfo *dinfo = device_get_ivars(dev);
1507 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1508 uint32_t offset, val;
1510 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1511 offset = msix->msix_table_offset + index * 16 + 12;
1512 val = bus_read_4(msix->msix_table_res, offset);
1513 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1514 val |= PCIM_MSIX_VCTRL_MASK;
1515 bus_write_4(msix->msix_table_res, offset, val);
1520 pci_unmask_msix(device_t dev, u_int index)
1522 struct pci_devinfo *dinfo = device_get_ivars(dev);
1523 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1524 uint32_t offset, val;
1526 KASSERT(msix->msix_table_len > index, ("bogus index"));
1527 offset = msix->msix_table_offset + index * 16 + 12;
1528 val = bus_read_4(msix->msix_table_res, offset);
1529 if (val & PCIM_MSIX_VCTRL_MASK) {
1530 val &= ~PCIM_MSIX_VCTRL_MASK;
1531 bus_write_4(msix->msix_table_res, offset, val);
1536 pci_pending_msix(device_t dev, u_int index)
1538 struct pci_devinfo *dinfo = device_get_ivars(dev);
1539 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1540 uint32_t offset, bit;
1542 KASSERT(msix->msix_table_len > index, ("bogus index"));
1543 offset = msix->msix_pba_offset + (index / 32) * 4;
1544 bit = 1 << index % 32;
1545 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1549 * Restore MSI-X registers and table during resume. If MSI-X is
1550 * enabled then walk the virtual table to restore the actual MSI-X
1554 pci_resume_msix(device_t dev)
1556 struct pci_devinfo *dinfo = device_get_ivars(dev);
1557 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1558 struct msix_table_entry *mte;
1559 struct msix_vector *mv;
1562 if (msix->msix_alloc > 0) {
1563 /* First, mask all vectors. */
1564 for (i = 0; i < msix->msix_msgnum; i++)
1565 pci_mask_msix(dev, i);
1567 /* Second, program any messages with at least one handler. */
1568 for (i = 0; i < msix->msix_table_len; i++) {
1569 mte = &msix->msix_table[i];
1570 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1572 mv = &msix->msix_vectors[mte->mte_vector - 1];
1573 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1574 pci_unmask_msix(dev, i);
1577 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1578 msix->msix_ctrl, 2);
1582 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1583 * returned in *count. After this function returns, each message will be
1584 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1587 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1589 struct pci_devinfo *dinfo = device_get_ivars(child);
1590 pcicfgregs *cfg = &dinfo->cfg;
1591 struct resource_list_entry *rle;
1592 int actual, error, i, irq, max;
1594 /* Don't let count == 0 get us into trouble. */
1598 /* If rid 0 is allocated, then fail. */
1599 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1600 if (rle != NULL && rle->res != NULL)
1603 /* Already have allocated messages? */
1604 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1607 /* If MSI-X is blacklisted for this system, fail. */
1608 if (pci_msix_blacklisted())
1611 /* MSI-X capability present? */
1612 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1615 /* Make sure the appropriate BARs are mapped. */
1616 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1617 cfg->msix.msix_table_bar);
1618 if (rle == NULL || rle->res == NULL ||
1619 !(rman_get_flags(rle->res) & RF_ACTIVE))
1621 cfg->msix.msix_table_res = rle->res;
1622 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1623 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1624 cfg->msix.msix_pba_bar);
1625 if (rle == NULL || rle->res == NULL ||
1626 !(rman_get_flags(rle->res) & RF_ACTIVE))
1629 cfg->msix.msix_pba_res = rle->res;
1632 device_printf(child,
1633 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1634 *count, cfg->msix.msix_msgnum);
1635 max = min(*count, cfg->msix.msix_msgnum);
1636 for (i = 0; i < max; i++) {
1637 /* Allocate a message. */
1638 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1644 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1650 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1652 device_printf(child, "using IRQ %ju for MSI-X\n",
1658 * Be fancy and try to print contiguous runs of
1659 * IRQ values as ranges. 'irq' is the previous IRQ.
1660 * 'run' is true if we are in a range.
1662 device_printf(child, "using IRQs %ju", rle->start);
1665 for (i = 1; i < actual; i++) {
1666 rle = resource_list_find(&dinfo->resources,
1667 SYS_RES_IRQ, i + 1);
1669 /* Still in a run? */
1670 if (rle->start == irq + 1) {
1676 /* Finish previous range. */
1682 /* Start new range. */
1683 printf(",%ju", rle->start);
1687 /* Unfinished range? */
1690 printf(" for MSI-X\n");
1694 /* Mask all vectors. */
1695 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1696 pci_mask_msix(child, i);
1698 /* Allocate and initialize vector data and virtual table. */
1699 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1700 M_DEVBUF, M_WAITOK | M_ZERO);
1701 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1702 M_DEVBUF, M_WAITOK | M_ZERO);
1703 for (i = 0; i < actual; i++) {
1704 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1705 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1706 cfg->msix.msix_table[i].mte_vector = i + 1;
1709 /* Update control register to enable MSI-X. */
1710 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1711 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1712 cfg->msix.msix_ctrl, 2);
1714 /* Update counts of alloc'd messages. */
1715 cfg->msix.msix_alloc = actual;
1716 cfg->msix.msix_table_len = actual;
1722 * By default, pci_alloc_msix() will assign the allocated IRQ
1723 * resources consecutively to the first N messages in the MSI-X table.
1724 * However, device drivers may want to use different layouts if they
1725 * either receive fewer messages than they asked for, or they wish to
1726 * populate the MSI-X table sparsely. This method allows the driver
1727 * to specify what layout it wants. It must be called after a
1728 * successful pci_alloc_msix() but before any of the associated
1729 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1731 * The 'vectors' array contains 'count' message vectors. The array
1732 * maps directly to the MSI-X table in that index 0 in the array
1733 * specifies the vector for the first message in the MSI-X table, etc.
1734 * The vector value in each array index can either be 0 to indicate
1735 * that no vector should be assigned to a message slot, or it can be a
1736 * number from 1 to N (where N is the count returned from a
1737 * succcessful call to pci_alloc_msix()) to indicate which message
1738 * vector (IRQ) to be used for the corresponding message.
1740 * On successful return, each message with a non-zero vector will have
1741 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1742 * 1. Additionally, if any of the IRQs allocated via the previous
1743 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1744 * will be freed back to the system automatically.
1746 * For example, suppose a driver has a MSI-X table with 6 messages and
1747 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1748 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1749 * C. After the call to pci_alloc_msix(), the device will be setup to
1750 * have an MSI-X table of ABC--- (where - means no vector assigned).
1751 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1752 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1753 * be freed back to the system. This device will also have valid
1754 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1756 * In any case, the SYS_RES_IRQ rid X will always map to the message
1757 * at MSI-X table index X - 1 and will only be valid if a vector is
1758 * assigned to that table entry.
1761 pci_remap_msix_method(device_t dev, device_t child, int count,
1762 const u_int *vectors)
1764 struct pci_devinfo *dinfo = device_get_ivars(child);
1765 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1766 struct resource_list_entry *rle;
1767 int i, irq, j, *used;
1770 * Have to have at least one message in the table but the
1771 * table can't be bigger than the actual MSI-X table in the
1774 if (count == 0 || count > msix->msix_msgnum)
1777 /* Sanity check the vectors. */
1778 for (i = 0; i < count; i++)
1779 if (vectors[i] > msix->msix_alloc)
1783 * Make sure there aren't any holes in the vectors to be used.
1784 * It's a big pain to support it, and it doesn't really make
1785 * sense anyway. Also, at least one vector must be used.
1787 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1789 for (i = 0; i < count; i++)
1790 if (vectors[i] != 0)
1791 used[vectors[i] - 1] = 1;
1792 for (i = 0; i < msix->msix_alloc - 1; i++)
1793 if (used[i] == 0 && used[i + 1] == 1) {
1794 free(used, M_DEVBUF);
1798 free(used, M_DEVBUF);
1802 /* Make sure none of the resources are allocated. */
1803 for (i = 0; i < msix->msix_table_len; i++) {
1804 if (msix->msix_table[i].mte_vector == 0)
1806 if (msix->msix_table[i].mte_handlers > 0) {
1807 free(used, M_DEVBUF);
1810 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1811 KASSERT(rle != NULL, ("missing resource"));
1812 if (rle->res != NULL) {
1813 free(used, M_DEVBUF);
1818 /* Free the existing resource list entries. */
1819 for (i = 0; i < msix->msix_table_len; i++) {
1820 if (msix->msix_table[i].mte_vector == 0)
1822 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1826 * Build the new virtual table keeping track of which vectors are
1829 free(msix->msix_table, M_DEVBUF);
1830 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1831 M_DEVBUF, M_WAITOK | M_ZERO);
1832 for (i = 0; i < count; i++)
1833 msix->msix_table[i].mte_vector = vectors[i];
1834 msix->msix_table_len = count;
1836 /* Free any unused IRQs and resize the vectors array if necessary. */
1837 j = msix->msix_alloc - 1;
1839 struct msix_vector *vec;
1841 while (used[j] == 0) {
1842 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1843 msix->msix_vectors[j].mv_irq);
1846 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1848 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1850 free(msix->msix_vectors, M_DEVBUF);
1851 msix->msix_vectors = vec;
1852 msix->msix_alloc = j + 1;
1854 free(used, M_DEVBUF);
1856 /* Map the IRQs onto the rids. */
1857 for (i = 0; i < count; i++) {
1858 if (vectors[i] == 0)
1860 irq = msix->msix_vectors[vectors[i] - 1].mv_irq;
1861 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1866 device_printf(child, "Remapped MSI-X IRQs as: ");
1867 for (i = 0; i < count; i++) {
1870 if (vectors[i] == 0)
1874 msix->msix_vectors[vectors[i] - 1].mv_irq);
1883 pci_release_msix(device_t dev, device_t child)
1885 struct pci_devinfo *dinfo = device_get_ivars(child);
1886 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1887 struct resource_list_entry *rle;
1890 /* Do we have any messages to release? */
1891 if (msix->msix_alloc == 0)
1894 /* Make sure none of the resources are allocated. */
1895 for (i = 0; i < msix->msix_table_len; i++) {
1896 if (msix->msix_table[i].mte_vector == 0)
1898 if (msix->msix_table[i].mte_handlers > 0)
1900 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1901 KASSERT(rle != NULL, ("missing resource"));
1902 if (rle->res != NULL)
1906 /* Update control register to disable MSI-X. */
1907 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1908 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1909 msix->msix_ctrl, 2);
1911 /* Free the resource list entries. */
1912 for (i = 0; i < msix->msix_table_len; i++) {
1913 if (msix->msix_table[i].mte_vector == 0)
1915 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1917 free(msix->msix_table, M_DEVBUF);
1918 msix->msix_table_len = 0;
1920 /* Release the IRQs. */
1921 for (i = 0; i < msix->msix_alloc; i++)
1922 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1923 msix->msix_vectors[i].mv_irq);
1924 free(msix->msix_vectors, M_DEVBUF);
1925 msix->msix_alloc = 0;
1930 * Return the max supported MSI-X messages this device supports.
1931 * Basically, assuming the MD code can alloc messages, this function
1932 * should return the maximum value that pci_alloc_msix() can return.
1933 * Thus, it is subject to the tunables, etc.
1936 pci_msix_count_method(device_t dev, device_t child)
1938 struct pci_devinfo *dinfo = device_get_ivars(child);
1939 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1941 if (pci_do_msix && msix->msix_location != 0)
1942 return (msix->msix_msgnum);
1947 pci_msix_pba_bar_method(device_t dev, device_t child)
1949 struct pci_devinfo *dinfo = device_get_ivars(child);
1950 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1952 if (pci_do_msix && msix->msix_location != 0)
1953 return (msix->msix_pba_bar);
1958 pci_msix_table_bar_method(device_t dev, device_t child)
1960 struct pci_devinfo *dinfo = device_get_ivars(child);
1961 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1963 if (pci_do_msix && msix->msix_location != 0)
1964 return (msix->msix_table_bar);
1969 * HyperTransport MSI mapping control
1972 pci_ht_map_msi(device_t dev, uint64_t addr)
1974 struct pci_devinfo *dinfo = device_get_ivars(dev);
1975 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1980 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1981 ht->ht_msiaddr >> 20 == addr >> 20) {
1982 /* Enable MSI -> HT mapping. */
1983 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1984 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1988 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1989 /* Disable MSI -> HT mapping. */
1990 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1991 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1997 pci_get_max_payload(device_t dev)
1999 struct pci_devinfo *dinfo = device_get_ivars(dev);
2003 cap = dinfo->cfg.pcie.pcie_location;
2006 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2007 val &= PCIEM_CTL_MAX_PAYLOAD;
2009 return (1 << (val + 7));
2013 pci_get_max_read_req(device_t dev)
2015 struct pci_devinfo *dinfo = device_get_ivars(dev);
2019 cap = dinfo->cfg.pcie.pcie_location;
2022 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2023 val &= PCIEM_CTL_MAX_READ_REQUEST;
2025 return (1 << (val + 7));
2029 pci_set_max_read_req(device_t dev, int size)
2031 struct pci_devinfo *dinfo = device_get_ivars(dev);
2035 cap = dinfo->cfg.pcie.pcie_location;
2042 size = (1 << (fls(size) - 1));
2043 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2044 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
2045 val |= (fls(size) - 8) << 12;
2046 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
2051 pcie_read_config(device_t dev, int reg, int width)
2053 struct pci_devinfo *dinfo = device_get_ivars(dev);
2056 cap = dinfo->cfg.pcie.pcie_location;
2060 return (0xffffffff);
2063 return (pci_read_config(dev, cap + reg, width));
2067 pcie_write_config(device_t dev, int reg, uint32_t value, int width)
2069 struct pci_devinfo *dinfo = device_get_ivars(dev);
2072 cap = dinfo->cfg.pcie.pcie_location;
2075 pci_write_config(dev, cap + reg, value, width);
2079 * Adjusts a PCI-e capability register by clearing the bits in mask
2080 * and setting the bits in (value & mask). Bits not set in mask are
2083 * Returns the old value on success or all ones on failure.
2086 pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value,
2089 struct pci_devinfo *dinfo = device_get_ivars(dev);
2093 cap = dinfo->cfg.pcie.pcie_location;
2097 return (0xffffffff);
2100 old = pci_read_config(dev, cap + reg, width);
2102 new |= (value & mask);
2103 pci_write_config(dev, cap + reg, new, width);
2108 * Support for MSI message signalled interrupts.
2111 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
2114 struct pci_devinfo *dinfo = device_get_ivars(child);
2115 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2117 /* Write data and address values. */
2118 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
2119 address & 0xffffffff, 4);
2120 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2121 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
2123 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
2126 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
2129 /* Enable MSI in the control register. */
2130 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
2131 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2134 /* Enable MSI -> HT mapping. */
2135 pci_ht_map_msi(child, address);
2139 pci_disable_msi_method(device_t dev, device_t child)
2141 struct pci_devinfo *dinfo = device_get_ivars(child);
2142 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2144 /* Disable MSI -> HT mapping. */
2145 pci_ht_map_msi(child, 0);
2147 /* Disable MSI in the control register. */
2148 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
2149 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2154 * Restore MSI registers during resume. If MSI is enabled then
2155 * restore the data and address registers in addition to the control
2159 pci_resume_msi(device_t dev)
2161 struct pci_devinfo *dinfo = device_get_ivars(dev);
2162 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2166 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
2167 address = msi->msi_addr;
2168 data = msi->msi_data;
2169 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
2170 address & 0xffffffff, 4);
2171 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2172 pci_write_config(dev, msi->msi_location +
2173 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
2174 pci_write_config(dev, msi->msi_location +
2175 PCIR_MSI_DATA_64BIT, data, 2);
2177 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
2180 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
2185 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
2187 struct pci_devinfo *dinfo = device_get_ivars(dev);
2188 pcicfgregs *cfg = &dinfo->cfg;
2189 struct resource_list_entry *rle;
2190 struct msix_table_entry *mte;
2191 struct msix_vector *mv;
2197 * Handle MSI first. We try to find this IRQ among our list
2198 * of MSI IRQs. If we find it, we request updated address and
2199 * data registers and apply the results.
2201 if (cfg->msi.msi_alloc > 0) {
2203 /* If we don't have any active handlers, nothing to do. */
2204 if (cfg->msi.msi_handlers == 0)
2206 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2207 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2209 if (rle->start == irq) {
2210 error = PCIB_MAP_MSI(device_get_parent(bus),
2211 dev, irq, &addr, &data);
2214 pci_disable_msi(dev);
2215 dinfo->cfg.msi.msi_addr = addr;
2216 dinfo->cfg.msi.msi_data = data;
2217 pci_enable_msi(dev, addr, data);
2225 * For MSI-X, we check to see if we have this IRQ. If we do,
2226 * we request the updated mapping info. If that works, we go
2227 * through all the slots that use this IRQ and update them.
2229 if (cfg->msix.msix_alloc > 0) {
2230 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2231 mv = &cfg->msix.msix_vectors[i];
2232 if (mv->mv_irq == irq) {
2233 error = PCIB_MAP_MSI(device_get_parent(bus),
2234 dev, irq, &addr, &data);
2237 mv->mv_address = addr;
2239 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2240 mte = &cfg->msix.msix_table[j];
2241 if (mte->mte_vector != i + 1)
2243 if (mte->mte_handlers == 0)
2245 pci_mask_msix(dev, j);
2246 pci_enable_msix(dev, j, addr, data);
2247 pci_unmask_msix(dev, j);
2258 * Returns true if the specified device is blacklisted because MSI
2262 pci_msi_device_blacklisted(device_t dev)
2265 if (!pci_honor_msi_blacklist)
2268 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2272 * Determine if MSI is blacklisted globally on this system. Currently,
2273 * we just check for blacklisted chipsets as represented by the
2274 * host-PCI bridge at device 0:0:0. In the future, it may become
2275 * necessary to check other system attributes, such as the kenv values
2276 * that give the motherboard manufacturer and model number.
2279 pci_msi_blacklisted(void)
2283 if (!pci_honor_msi_blacklist)
2286 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2287 if (!(pcie_chipset || pcix_chipset)) {
2288 if (vm_guest != VM_GUEST_NO) {
2290 * Whitelist older chipsets in virtual
2291 * machines known to support MSI.
2293 dev = pci_find_bsf(0, 0, 0);
2295 return (!pci_has_quirk(pci_get_devid(dev),
2296 PCI_QUIRK_ENABLE_MSI_VM));
2301 dev = pci_find_bsf(0, 0, 0);
2303 return (pci_msi_device_blacklisted(dev));
2308 * Returns true if the specified device is blacklisted because MSI-X
2309 * doesn't work. Note that this assumes that if MSI doesn't work,
2310 * MSI-X doesn't either.
2313 pci_msix_device_blacklisted(device_t dev)
2316 if (!pci_honor_msi_blacklist)
2319 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2322 return (pci_msi_device_blacklisted(dev));
2326 * Determine if MSI-X is blacklisted globally on this system. If MSI
2327 * is blacklisted, assume that MSI-X is as well. Check for additional
2328 * chipsets where MSI works but MSI-X does not.
2331 pci_msix_blacklisted(void)
2335 if (!pci_honor_msi_blacklist)
2338 dev = pci_find_bsf(0, 0, 0);
2339 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2340 PCI_QUIRK_DISABLE_MSIX))
2343 return (pci_msi_blacklisted());
2347 * Attempt to allocate *count MSI messages. The actual number allocated is
2348 * returned in *count. After this function returns, each message will be
2349 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2352 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2354 struct pci_devinfo *dinfo = device_get_ivars(child);
2355 pcicfgregs *cfg = &dinfo->cfg;
2356 struct resource_list_entry *rle;
2357 int actual, error, i, irqs[32];
2360 /* Don't let count == 0 get us into trouble. */
2364 /* If rid 0 is allocated, then fail. */
2365 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2366 if (rle != NULL && rle->res != NULL)
2369 /* Already have allocated messages? */
2370 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2373 /* If MSI is blacklisted for this system, fail. */
2374 if (pci_msi_blacklisted())
2377 /* MSI capability present? */
2378 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2382 device_printf(child,
2383 "attempting to allocate %d MSI vectors (%d supported)\n",
2384 *count, cfg->msi.msi_msgnum);
2386 /* Don't ask for more than the device supports. */
2387 actual = min(*count, cfg->msi.msi_msgnum);
2389 /* Don't ask for more than 32 messages. */
2390 actual = min(actual, 32);
2392 /* MSI requires power of 2 number of messages. */
2393 if (!powerof2(actual))
2397 /* Try to allocate N messages. */
2398 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2410 * We now have N actual messages mapped onto SYS_RES_IRQ
2411 * resources in the irqs[] array, so add new resources
2412 * starting at rid 1.
2414 for (i = 0; i < actual; i++)
2415 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2416 irqs[i], irqs[i], 1);
2420 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2425 * Be fancy and try to print contiguous runs
2426 * of IRQ values as ranges. 'run' is true if
2427 * we are in a range.
2429 device_printf(child, "using IRQs %d", irqs[0]);
2431 for (i = 1; i < actual; i++) {
2433 /* Still in a run? */
2434 if (irqs[i] == irqs[i - 1] + 1) {
2439 /* Finish previous range. */
2441 printf("-%d", irqs[i - 1]);
2445 /* Start new range. */
2446 printf(",%d", irqs[i]);
2449 /* Unfinished range? */
2451 printf("-%d", irqs[actual - 1]);
2452 printf(" for MSI\n");
2456 /* Update control register with actual count. */
2457 ctrl = cfg->msi.msi_ctrl;
2458 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2459 ctrl |= (ffs(actual) - 1) << 4;
2460 cfg->msi.msi_ctrl = ctrl;
2461 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2463 /* Update counts of alloc'd messages. */
2464 cfg->msi.msi_alloc = actual;
2465 cfg->msi.msi_handlers = 0;
2470 /* Release the MSI messages associated with this device. */
2472 pci_release_msi_method(device_t dev, device_t child)
2474 struct pci_devinfo *dinfo = device_get_ivars(child);
2475 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2476 struct resource_list_entry *rle;
2477 int error, i, irqs[32];
2479 /* Try MSI-X first. */
2480 error = pci_release_msix(dev, child);
2481 if (error != ENODEV)
2484 /* Do we have any messages to release? */
2485 if (msi->msi_alloc == 0)
2487 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2489 /* Make sure none of the resources are allocated. */
2490 if (msi->msi_handlers > 0)
2492 for (i = 0; i < msi->msi_alloc; i++) {
2493 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2494 KASSERT(rle != NULL, ("missing MSI resource"));
2495 if (rle->res != NULL)
2497 irqs[i] = rle->start;
2500 /* Update control register with 0 count. */
2501 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2502 ("%s: MSI still enabled", __func__));
2503 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2504 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2507 /* Release the messages. */
2508 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2509 for (i = 0; i < msi->msi_alloc; i++)
2510 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2512 /* Update alloc count. */
2520 * Return the max supported MSI messages this device supports.
2521 * Basically, assuming the MD code can alloc messages, this function
2522 * should return the maximum value that pci_alloc_msi() can return.
2523 * Thus, it is subject to the tunables, etc.
2526 pci_msi_count_method(device_t dev, device_t child)
2528 struct pci_devinfo *dinfo = device_get_ivars(child);
2529 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2531 if (pci_do_msi && msi->msi_location != 0)
2532 return (msi->msi_msgnum);
2536 /* free pcicfgregs structure and all depending data structures */
2539 pci_freecfg(struct pci_devinfo *dinfo)
2541 struct devlist *devlist_head;
2542 struct pci_map *pm, *next;
2545 devlist_head = &pci_devq;
2547 if (dinfo->cfg.vpd.vpd_reg) {
2548 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2549 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2550 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2551 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2552 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2553 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2554 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2556 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2559 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2560 free(dinfo, M_DEVBUF);
2562 /* increment the generation count */
2565 /* we're losing one device */
2571 * PCI power manangement
2574 pci_set_powerstate_method(device_t dev, device_t child, int state)
2576 struct pci_devinfo *dinfo = device_get_ivars(child);
2577 pcicfgregs *cfg = &dinfo->cfg;
2579 int oldstate, highest, delay;
2581 if (cfg->pp.pp_cap == 0)
2582 return (EOPNOTSUPP);
2585 * Optimize a no state change request away. While it would be OK to
2586 * write to the hardware in theory, some devices have shown odd
2587 * behavior when going from D3 -> D3.
2589 oldstate = pci_get_powerstate(child);
2590 if (oldstate == state)
2594 * The PCI power management specification states that after a state
2595 * transition between PCI power states, system software must
2596 * guarantee a minimal delay before the function accesses the device.
2597 * Compute the worst case delay that we need to guarantee before we
2598 * access the device. Many devices will be responsive much more
2599 * quickly than this delay, but there are some that don't respond
2600 * instantly to state changes. Transitions to/from D3 state require
2601 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2602 * is done below with DELAY rather than a sleeper function because
2603 * this function can be called from contexts where we cannot sleep.
2605 highest = (oldstate > state) ? oldstate : state;
2606 if (highest == PCI_POWERSTATE_D3)
2608 else if (highest == PCI_POWERSTATE_D2)
2612 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2613 & ~PCIM_PSTAT_DMASK;
2615 case PCI_POWERSTATE_D0:
2616 status |= PCIM_PSTAT_D0;
2618 case PCI_POWERSTATE_D1:
2619 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2620 return (EOPNOTSUPP);
2621 status |= PCIM_PSTAT_D1;
2623 case PCI_POWERSTATE_D2:
2624 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2625 return (EOPNOTSUPP);
2626 status |= PCIM_PSTAT_D2;
2628 case PCI_POWERSTATE_D3:
2629 status |= PCIM_PSTAT_D3;
2636 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2639 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2646 pci_get_powerstate_method(device_t dev, device_t child)
2648 struct pci_devinfo *dinfo = device_get_ivars(child);
2649 pcicfgregs *cfg = &dinfo->cfg;
2653 if (cfg->pp.pp_cap != 0) {
2654 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2655 switch (status & PCIM_PSTAT_DMASK) {
2657 result = PCI_POWERSTATE_D0;
2660 result = PCI_POWERSTATE_D1;
2663 result = PCI_POWERSTATE_D2;
2666 result = PCI_POWERSTATE_D3;
2669 result = PCI_POWERSTATE_UNKNOWN;
2673 /* No support, device is always at D0 */
2674 result = PCI_POWERSTATE_D0;
2680 * Some convenience functions for PCI device drivers.
2683 static __inline void
2684 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2688 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2690 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2693 static __inline void
2694 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2698 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2700 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2704 pci_enable_busmaster_method(device_t dev, device_t child)
2706 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2711 pci_disable_busmaster_method(device_t dev, device_t child)
2713 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2718 pci_enable_io_method(device_t dev, device_t child, int space)
2723 case SYS_RES_IOPORT:
2724 bit = PCIM_CMD_PORTEN;
2726 case SYS_RES_MEMORY:
2727 bit = PCIM_CMD_MEMEN;
2732 pci_set_command_bit(dev, child, bit);
2737 pci_disable_io_method(device_t dev, device_t child, int space)
2742 case SYS_RES_IOPORT:
2743 bit = PCIM_CMD_PORTEN;
2745 case SYS_RES_MEMORY:
2746 bit = PCIM_CMD_MEMEN;
2751 pci_clear_command_bit(dev, child, bit);
2756 * New style pci driver. Parent device is either a pci-host-bridge or a
2757 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2761 pci_print_verbose(struct pci_devinfo *dinfo)
2765 pcicfgregs *cfg = &dinfo->cfg;
2767 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2768 cfg->vendor, cfg->device, cfg->revid);
2769 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2770 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2771 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2772 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2774 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2775 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2776 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2777 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2778 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2779 if (cfg->intpin > 0)
2780 printf("\tintpin=%c, irq=%d\n",
2781 cfg->intpin +'a' -1, cfg->intline);
2782 if (cfg->pp.pp_cap) {
2785 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2786 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2787 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2788 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2789 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2790 status & PCIM_PSTAT_DMASK);
2792 if (cfg->msi.msi_location) {
2795 ctrl = cfg->msi.msi_ctrl;
2796 printf("\tMSI supports %d message%s%s%s\n",
2797 cfg->msi.msi_msgnum,
2798 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2799 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2800 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2802 if (cfg->msix.msix_location) {
2803 printf("\tMSI-X supports %d message%s ",
2804 cfg->msix.msix_msgnum,
2805 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2806 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2807 printf("in map 0x%x\n",
2808 cfg->msix.msix_table_bar);
2810 printf("in maps 0x%x and 0x%x\n",
2811 cfg->msix.msix_table_bar,
2812 cfg->msix.msix_pba_bar);
2818 pci_porten(device_t dev)
2820 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2824 pci_memen(device_t dev)
2826 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2830 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp,
2833 struct pci_devinfo *dinfo;
2834 pci_addr_t map, testval;
2839 * The device ROM BAR is special. It is always a 32-bit
2840 * memory BAR. Bit 0 is special and should not be set when
2843 dinfo = device_get_ivars(dev);
2844 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2845 map = pci_read_config(dev, reg, 4);
2846 pci_write_config(dev, reg, 0xfffffffe, 4);
2847 testval = pci_read_config(dev, reg, 4);
2848 pci_write_config(dev, reg, map, 4);
2850 *testvalp = testval;
2856 map = pci_read_config(dev, reg, 4);
2857 ln2range = pci_maprange(map);
2859 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2862 * Disable decoding via the command register before
2863 * determining the BAR's length since we will be placing it in
2866 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2867 pci_write_config(dev, PCIR_COMMAND,
2868 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2871 * Determine the BAR's length by writing all 1's. The bottom
2872 * log_2(size) bits of the BAR will stick as 0 when we read
2875 pci_write_config(dev, reg, 0xffffffff, 4);
2876 testval = pci_read_config(dev, reg, 4);
2877 if (ln2range == 64) {
2878 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2879 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2883 * Restore the original value of the BAR. We may have reprogrammed
2884 * the BAR of the low-level console device and when booting verbose,
2885 * we need the console device addressable.
2887 pci_write_config(dev, reg, map, 4);
2889 pci_write_config(dev, reg + 4, map >> 32, 4);
2890 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2893 *testvalp = testval;
2895 *bar64 = (ln2range == 64);
2899 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2901 struct pci_devinfo *dinfo;
2904 /* The device ROM BAR is always a 32-bit memory BAR. */
2905 dinfo = device_get_ivars(dev);
2906 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2909 ln2range = pci_maprange(pm->pm_value);
2910 pci_write_config(dev, pm->pm_reg, base, 4);
2912 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2913 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2915 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2916 pm->pm_reg + 4, 4) << 32;
2920 pci_find_bar(device_t dev, int reg)
2922 struct pci_devinfo *dinfo;
2925 dinfo = device_get_ivars(dev);
2926 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2927 if (pm->pm_reg == reg)
2934 pci_bar_enabled(device_t dev, struct pci_map *pm)
2936 struct pci_devinfo *dinfo;
2939 dinfo = device_get_ivars(dev);
2940 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2941 !(pm->pm_value & PCIM_BIOS_ENABLE))
2943 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2944 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2945 return ((cmd & PCIM_CMD_MEMEN) != 0);
2947 return ((cmd & PCIM_CMD_PORTEN) != 0);
2951 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2953 struct pci_devinfo *dinfo;
2954 struct pci_map *pm, *prev;
2956 dinfo = device_get_ivars(dev);
2957 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2959 pm->pm_value = value;
2961 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2962 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2964 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2965 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2969 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2971 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2976 pci_restore_bars(device_t dev)
2978 struct pci_devinfo *dinfo;
2982 dinfo = device_get_ivars(dev);
2983 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2984 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2987 ln2range = pci_maprange(pm->pm_value);
2988 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2990 pci_write_config(dev, pm->pm_reg + 4,
2991 pm->pm_value >> 32, 4);
2996 * Add a resource based on a pci map register. Return 1 if the map
2997 * register is a 32bit map register or 2 if it is a 64bit register.
3000 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
3001 int force, int prefetch)
3004 pci_addr_t base, map, testval;
3005 pci_addr_t start, end, count;
3006 int barlen, basezero, flags, maprange, mapsize, type;
3008 struct resource *res;
3011 * The BAR may already exist if the device is a CardBus card
3012 * whose CIS is stored in this BAR.
3014 pm = pci_find_bar(dev, reg);
3016 maprange = pci_maprange(pm->pm_value);
3017 barlen = maprange == 64 ? 2 : 1;
3021 pci_read_bar(dev, reg, &map, &testval, NULL);
3022 if (PCI_BAR_MEM(map)) {
3023 type = SYS_RES_MEMORY;
3024 if (map & PCIM_BAR_MEM_PREFETCH)
3027 type = SYS_RES_IOPORT;
3028 mapsize = pci_mapsize(testval);
3029 base = pci_mapbase(map);
3030 #ifdef __PCI_BAR_ZERO_VALID
3033 basezero = base == 0;
3035 maprange = pci_maprange(map);
3036 barlen = maprange == 64 ? 2 : 1;
3039 * For I/O registers, if bottom bit is set, and the next bit up
3040 * isn't clear, we know we have a BAR that doesn't conform to the
3041 * spec, so ignore it. Also, sanity check the size of the data
3042 * areas to the type of memory involved. Memory must be at least
3043 * 16 bytes in size, while I/O ranges must be at least 4.
3045 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
3047 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
3048 (type == SYS_RES_IOPORT && mapsize < 2))
3051 /* Save a record of this BAR. */
3052 pm = pci_add_bar(dev, reg, map, mapsize);
3054 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
3055 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
3056 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3057 printf(", port disabled\n");
3058 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
3059 printf(", memory disabled\n");
3061 printf(", enabled\n");
3065 * If base is 0, then we have problems if this architecture does
3066 * not allow that. It is best to ignore such entries for the
3067 * moment. These will be allocated later if the driver specifically
3068 * requests them. However, some removable busses look better when
3069 * all resources are allocated, so allow '0' to be overriden.
3071 * Similarly treat maps whose values is the same as the test value
3072 * read back. These maps have had all f's written to them by the
3073 * BIOS in an attempt to disable the resources.
3075 if (!force && (basezero || map == testval))
3077 if ((u_long)base != base) {
3079 "pci%d:%d:%d:%d bar %#x too many address bits",
3080 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
3081 pci_get_function(dev), reg);
3086 * This code theoretically does the right thing, but has
3087 * undesirable side effects in some cases where peripherals
3088 * respond oddly to having these bits enabled. Let the user
3089 * be able to turn them off (since pci_enable_io_modes is 1 by
3092 if (pci_enable_io_modes) {
3093 /* Turn on resources that have been left off by a lazy BIOS */
3094 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
3095 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3096 cmd |= PCIM_CMD_PORTEN;
3097 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3099 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
3100 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3101 cmd |= PCIM_CMD_MEMEN;
3102 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3105 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3107 if (type == SYS_RES_MEMORY && !pci_memen(dev))
3111 count = (pci_addr_t)1 << mapsize;
3112 flags = RF_ALIGNMENT_LOG2(mapsize);
3114 flags |= RF_PREFETCHABLE;
3115 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
3116 start = 0; /* Let the parent decide. */
3120 end = base + count - 1;
3122 resource_list_add(rl, type, reg, start, end, count);
3125 * Try to allocate the resource for this BAR from our parent
3126 * so that this resource range is already reserved. The
3127 * driver for this device will later inherit this resource in
3128 * pci_alloc_resource().
3130 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
3132 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0)) {
3134 * If the allocation fails, try to allocate a resource for
3135 * this BAR using any available range. The firmware felt
3136 * it was important enough to assign a resource, so don't
3137 * disable decoding if we can help it.
3139 resource_list_delete(rl, type, reg);
3140 resource_list_add(rl, type, reg, 0, ~0, count);
3141 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0,
3146 * If the allocation fails, delete the resource list entry
3147 * and disable decoding for this device.
3149 * If the driver requests this resource in the future,
3150 * pci_reserve_map() will try to allocate a fresh
3153 resource_list_delete(rl, type, reg);
3154 pci_disable_io(dev, type);
3157 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
3158 pci_get_domain(dev), pci_get_bus(dev),
3159 pci_get_slot(dev), pci_get_function(dev), reg);
3161 start = rman_get_start(res);
3162 pci_write_bar(dev, pm, start);
3168 * For ATA devices we need to decide early what addressing mode to use.
3169 * Legacy demands that the primary and secondary ATA ports sits on the
3170 * same addresses that old ISA hardware did. This dictates that we use
3171 * those addresses and ignore the BAR's if we cannot set PCI native
3175 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
3176 uint32_t prefetchmask)
3178 int rid, type, progif;
3180 /* if this device supports PCI native addressing use it */
3181 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3182 if ((progif & 0x8a) == 0x8a) {
3183 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
3184 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
3185 printf("Trying ATA native PCI addressing mode\n");
3186 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
3190 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3191 type = SYS_RES_IOPORT;
3192 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
3193 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
3194 prefetchmask & (1 << 0));
3195 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
3196 prefetchmask & (1 << 1));
3199 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
3200 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
3203 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
3204 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3207 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3208 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3209 prefetchmask & (1 << 2));
3210 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3211 prefetchmask & (1 << 3));
3214 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3215 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3218 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3219 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3222 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3223 prefetchmask & (1 << 4));
3224 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3225 prefetchmask & (1 << 5));
3229 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3231 struct pci_devinfo *dinfo = device_get_ivars(dev);
3232 pcicfgregs *cfg = &dinfo->cfg;
3233 char tunable_name[64];
3236 /* Has to have an intpin to have an interrupt. */
3237 if (cfg->intpin == 0)
3240 /* Let the user override the IRQ with a tunable. */
3241 irq = PCI_INVALID_IRQ;
3242 snprintf(tunable_name, sizeof(tunable_name),
3243 "hw.pci%d.%d.%d.INT%c.irq",
3244 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3245 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3246 irq = PCI_INVALID_IRQ;
3249 * If we didn't get an IRQ via the tunable, then we either use the
3250 * IRQ value in the intline register or we ask the bus to route an
3251 * interrupt for us. If force_route is true, then we only use the
3252 * value in the intline register if the bus was unable to assign an
3255 if (!PCI_INTERRUPT_VALID(irq)) {
3256 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3257 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3258 if (!PCI_INTERRUPT_VALID(irq))
3262 /* If after all that we don't have an IRQ, just bail. */
3263 if (!PCI_INTERRUPT_VALID(irq))
3266 /* Update the config register if it changed. */
3267 if (irq != cfg->intline) {
3269 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3272 /* Add this IRQ as rid 0 interrupt resource. */
3273 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3276 /* Perform early OHCI takeover from SMM. */
3278 ohci_early_takeover(device_t self)
3280 struct resource *res;
3286 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3290 ctl = bus_read_4(res, OHCI_CONTROL);
3291 if (ctl & OHCI_IR) {
3293 printf("ohci early: "
3294 "SMM active, request owner change\n");
3295 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3296 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3298 ctl = bus_read_4(res, OHCI_CONTROL);
3300 if (ctl & OHCI_IR) {
3302 printf("ohci early: "
3303 "SMM does not respond, resetting\n");
3304 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3306 /* Disable interrupts */
3307 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3310 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3313 /* Perform early UHCI takeover from SMM. */
3315 uhci_early_takeover(device_t self)
3317 struct resource *res;
3321 * Set the PIRQD enable bit and switch off all the others. We don't
3322 * want legacy support to interfere with us XXX Does this also mean
3323 * that the BIOS won't touch the keyboard anymore if it is connected
3324 * to the ports of the root hub?
3326 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3328 /* Disable interrupts */
3329 rid = PCI_UHCI_BASE_REG;
3330 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3332 bus_write_2(res, UHCI_INTR, 0);
3333 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3337 /* Perform early EHCI takeover from SMM. */
3339 ehci_early_takeover(device_t self)
3341 struct resource *res;
3351 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3355 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3357 /* Synchronise with the BIOS if it owns the controller. */
3358 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3359 eecp = EHCI_EECP_NEXT(eec)) {
3360 eec = pci_read_config(self, eecp, 4);
3361 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3364 bios_sem = pci_read_config(self, eecp +
3365 EHCI_LEGSUP_BIOS_SEM, 1);
3366 if (bios_sem == 0) {
3370 printf("ehci early: "
3371 "SMM active, request owner change\n");
3373 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3375 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3377 bios_sem = pci_read_config(self, eecp +
3378 EHCI_LEGSUP_BIOS_SEM, 1);
3381 if (bios_sem != 0) {
3383 printf("ehci early: "
3384 "SMM does not respond\n");
3386 /* Disable interrupts */
3387 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3388 bus_write_4(res, offs + EHCI_USBINTR, 0);
3390 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3393 /* Perform early XHCI takeover from SMM. */
3395 xhci_early_takeover(device_t self)
3397 struct resource *res;
3407 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3411 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3415 /* Synchronise with the BIOS if it owns the controller. */
3416 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3417 eecp += XHCI_XECP_NEXT(eec) << 2) {
3418 eec = bus_read_4(res, eecp);
3420 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3423 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3428 printf("xhci early: "
3429 "SMM active, request owner change\n");
3431 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3433 /* wait a maximum of 5 second */
3435 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3437 bios_sem = bus_read_1(res, eecp +
3438 XHCI_XECP_BIOS_SEM);
3441 if (bios_sem != 0) {
3443 printf("xhci early: "
3444 "SMM does not respond\n");
3447 /* Disable interrupts */
3448 offs = bus_read_1(res, XHCI_CAPLENGTH);
3449 bus_write_4(res, offs + XHCI_USBCMD, 0);
3450 bus_read_4(res, offs + XHCI_USBSTS);
3452 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3455 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3457 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3458 struct resource_list *rl)
3460 struct resource *res;
3462 rman_res_t start, end, count;
3463 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3465 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3466 case PCIM_HDRTYPE_BRIDGE:
3467 sec_reg = PCIR_SECBUS_1;
3468 sub_reg = PCIR_SUBBUS_1;
3470 case PCIM_HDRTYPE_CARDBUS:
3471 sec_reg = PCIR_SECBUS_2;
3472 sub_reg = PCIR_SUBBUS_2;
3479 * If the existing bus range is valid, attempt to reserve it
3480 * from our parent. If this fails for any reason, clear the
3481 * secbus and subbus registers.
3483 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3484 * This would at least preserve the existing sec_bus if it is
3487 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3488 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3490 /* Quirk handling. */
3491 switch (pci_get_devid(dev)) {
3492 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3493 sup_bus = pci_read_config(dev, 0x41, 1);
3494 if (sup_bus != 0xff) {
3495 sec_bus = sup_bus + 1;
3496 sub_bus = sup_bus + 1;
3497 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3498 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3503 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3504 if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
3506 if (strncmp(cp, "Compal", 6) != 0) {
3511 if ((cp = kern_getenv("smbios.planar.product")) == NULL)
3513 if (strncmp(cp, "08A0", 4) != 0) {
3518 if (sub_bus < 0xa) {
3520 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3526 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3527 if (sec_bus > 0 && sub_bus >= sec_bus) {
3530 count = end - start + 1;
3532 resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count);
3535 * If requested, clear secondary bus registers in
3536 * bridge devices to force a complete renumbering
3537 * rather than reserving the existing range. However,
3538 * preserve the existing size.
3540 if (pci_clear_buses)
3544 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3545 start, end, count, 0);
3551 "pci%d:%d:%d:%d secbus failed to allocate\n",
3552 pci_get_domain(dev), pci_get_bus(dev),
3553 pci_get_slot(dev), pci_get_function(dev));
3557 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3558 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3561 static struct resource *
3562 pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start,
3563 rman_res_t end, rman_res_t count, u_int flags)
3565 struct pci_devinfo *dinfo;
3567 struct resource_list *rl;
3568 struct resource *res;
3569 int sec_reg, sub_reg;
3571 dinfo = device_get_ivars(child);
3573 rl = &dinfo->resources;
3574 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3575 case PCIM_HDRTYPE_BRIDGE:
3576 sec_reg = PCIR_SECBUS_1;
3577 sub_reg = PCIR_SUBBUS_1;
3579 case PCIM_HDRTYPE_CARDBUS:
3580 sec_reg = PCIR_SECBUS_2;
3581 sub_reg = PCIR_SUBBUS_2;
3590 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3591 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3592 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3593 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3594 start, end, count, flags & ~RF_ACTIVE);
3596 resource_list_delete(rl, PCI_RES_BUS, *rid);
3597 device_printf(child, "allocating %ju bus%s failed\n",
3598 count, count == 1 ? "" : "es");
3602 device_printf(child,
3603 "Lazy allocation of %ju bus%s at %ju\n", count,
3604 count == 1 ? "" : "es", rman_get_start(res));
3605 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3606 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3608 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3609 end, count, flags));
3614 pci_ea_bei_to_rid(device_t dev, int bei)
3617 struct pci_devinfo *dinfo;
3619 struct pcicfg_iov *iov;
3621 dinfo = device_get_ivars(dev);
3622 iov = dinfo->cfg.iov;
3624 iov_pos = iov->iov_pos;
3629 /* Check if matches BAR */
3630 if ((bei >= PCIM_EA_BEI_BAR_0) &&
3631 (bei <= PCIM_EA_BEI_BAR_5))
3632 return (PCIR_BAR(bei));
3635 if (bei == PCIM_EA_BEI_ROM)
3639 /* Check if matches VF_BAR */
3640 if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) &&
3641 (bei <= PCIM_EA_BEI_VF_BAR_5))
3642 return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) +
3650 pci_ea_is_enabled(device_t dev, int rid)
3652 struct pci_ea_entry *ea;
3653 struct pci_devinfo *dinfo;
3655 dinfo = device_get_ivars(dev);
3657 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
3658 if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid)
3659 return ((ea->eae_flags & PCIM_EA_ENABLE) > 0);
3666 pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov)
3668 struct pci_ea_entry *ea;
3669 struct pci_devinfo *dinfo;
3670 pci_addr_t start, end, count;
3671 struct resource_list *rl;
3672 int type, flags, rid;
3673 struct resource *res;
3676 struct pcicfg_iov *iov;
3679 dinfo = device_get_ivars(dev);
3680 rl = &dinfo->resources;
3684 iov = dinfo->cfg.iov;
3687 if (dinfo->cfg.ea.ea_location == 0)
3690 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
3693 * TODO: Ignore EA-BAR if is not enabled.
3694 * Currently the EA implementation supports
3695 * only situation, where EA structure contains
3696 * predefined entries. In case they are not enabled
3697 * leave them unallocated and proceed with
3698 * a legacy-BAR mechanism.
3700 if ((ea->eae_flags & PCIM_EA_ENABLE) == 0)
3703 switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) {
3704 case PCIM_EA_P_MEM_PREFETCH:
3705 case PCIM_EA_P_VF_MEM_PREFETCH:
3706 flags = RF_PREFETCHABLE;
3708 case PCIM_EA_P_VF_MEM:
3710 type = SYS_RES_MEMORY;
3713 type = SYS_RES_IOPORT;
3719 if (alloc_iov != 0) {
3721 /* Allocating IOV, confirm BEI matches */
3722 if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) ||
3723 (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5))
3729 /* Allocating BAR, confirm BEI matches */
3730 if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) ||
3731 (ea->eae_bei > PCIM_EA_BEI_BAR_5)) &&
3732 (ea->eae_bei != PCIM_EA_BEI_ROM))
3736 rid = pci_ea_bei_to_rid(dev, ea->eae_bei);
3740 /* Skip resources already allocated by EA */
3741 if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) ||
3742 (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL))
3745 start = ea->eae_base;
3746 count = ea->eae_max_offset + 1;
3749 count = count * iov->iov_num_vfs;
3751 end = start + count - 1;
3755 resource_list_add(rl, type, rid, start, end, count);
3756 res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count,
3759 resource_list_delete(rl, type, rid);
3762 * Failed to allocate using EA, disable entry.
3763 * Another attempt to allocation will be performed
3764 * further, but this time using legacy BAR registers
3766 tmp = pci_read_config(dev, ea->eae_cfg_offset, 4);
3767 tmp &= ~PCIM_EA_ENABLE;
3768 pci_write_config(dev, ea->eae_cfg_offset, tmp, 4);
3771 * Disabling entry might fail in case it is hardwired.
3772 * Read flags again to match current status.
3774 ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4);
3779 /* As per specification, fill BAR with zeros */
3780 pci_write_config(dev, rid, 0, 4);
3785 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3787 struct pci_devinfo *dinfo;
3789 struct resource_list *rl;
3790 const struct pci_quirk *q;
3794 dinfo = device_get_ivars(dev);
3796 rl = &dinfo->resources;
3797 devid = (cfg->device << 16) | cfg->vendor;
3799 /* Allocate resources using Enhanced Allocation */
3800 pci_add_resources_ea(bus, dev, 0);
3802 /* ATA devices needs special map treatment */
3803 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3804 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3805 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3806 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3807 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3808 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3810 for (i = 0; i < cfg->nummaps;) {
3811 /* Skip resources already managed by EA */
3812 if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) ||
3813 (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) ||
3814 pci_ea_is_enabled(dev, PCIR_BAR(i))) {
3820 * Skip quirked resources.
3822 for (q = &pci_quirks[0]; q->devid != 0; q++)
3823 if (q->devid == devid &&
3824 q->type == PCI_QUIRK_UNMAP_REG &&
3825 q->arg1 == PCIR_BAR(i))
3827 if (q->devid != 0) {
3831 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3832 prefetchmask & (1 << i));
3836 * Add additional, quirked resources.
3838 for (q = &pci_quirks[0]; q->devid != 0; q++)
3839 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3840 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3842 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3843 #ifdef __PCI_REROUTE_INTERRUPT
3845 * Try to re-route interrupts. Sometimes the BIOS or
3846 * firmware may leave bogus values in these registers.
3847 * If the re-route fails, then just stick with what we
3850 pci_assign_interrupt(bus, dev, 1);
3852 pci_assign_interrupt(bus, dev, 0);
3856 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3857 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3858 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3859 xhci_early_takeover(dev);
3860 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3861 ehci_early_takeover(dev);
3862 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3863 ohci_early_takeover(dev);
3864 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3865 uhci_early_takeover(dev);
3868 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3870 * Reserve resources for secondary bus ranges behind bridge
3873 pci_reserve_secbus(bus, dev, cfg, rl);
3877 static struct pci_devinfo *
3878 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3881 struct pci_devinfo *dinfo;
3883 dinfo = pci_read_device(pcib, dev, domain, busno, slot, func);
3885 pci_add_child(dev, dinfo);
3891 pci_add_children(device_t dev, int domain, int busno)
3893 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3894 device_t pcib = device_get_parent(dev);
3895 struct pci_devinfo *dinfo;
3897 int s, f, pcifunchigh;
3902 * Try to detect a device at slot 0, function 0. If it exists, try to
3903 * enable ARI. We must enable ARI before detecting the rest of the
3904 * functions on this bus as ARI changes the set of slots and functions
3905 * that are legal on this bus.
3907 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0);
3908 if (dinfo != NULL && pci_enable_ari)
3909 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3912 * Start looking for new devices on slot 0 at function 1 because we
3913 * just identified the device at slot 0, function 0.
3917 maxslots = PCIB_MAXSLOTS(pcib);
3918 for (s = 0; s <= maxslots; s++, first_func = 0) {
3922 hdrtype = REG(PCIR_HDRTYPE, 1);
3923 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3925 if (hdrtype & PCIM_MFDEV)
3926 pcifunchigh = PCIB_MAXFUNCS(pcib);
3927 for (f = first_func; f <= pcifunchigh; f++)
3928 pci_identify_function(pcib, dev, domain, busno, s, f);
3934 pci_rescan_method(device_t dev)
3936 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3937 device_t pcib = device_get_parent(dev);
3938 struct pci_softc *sc;
3939 device_t child, *devlist, *unchanged;
3940 int devcount, error, i, j, maxslots, oldcount;
3941 int busno, domain, s, f, pcifunchigh;
3944 /* No need to check for ARI on a rescan. */
3945 error = device_get_children(dev, &devlist, &devcount);
3948 if (devcount != 0) {
3949 unchanged = malloc(devcount * sizeof(device_t), M_TEMP,
3951 if (unchanged == NULL) {
3952 free(devlist, M_TEMP);
3958 sc = device_get_softc(dev);
3959 domain = pcib_get_domain(dev);
3960 busno = pcib_get_bus(dev);
3961 maxslots = PCIB_MAXSLOTS(pcib);
3962 for (s = 0; s <= maxslots; s++) {
3963 /* If function 0 is not present, skip to the next slot. */
3965 if (REG(PCIR_VENDOR, 2) == 0xffff)
3968 hdrtype = REG(PCIR_HDRTYPE, 1);
3969 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3971 if (hdrtype & PCIM_MFDEV)
3972 pcifunchigh = PCIB_MAXFUNCS(pcib);
3973 for (f = 0; f <= pcifunchigh; f++) {
3974 if (REG(PCIR_VENDOR, 2) == 0xffff)
3978 * Found a valid function. Check if a
3979 * device_t for this device already exists.
3981 for (i = 0; i < devcount; i++) {
3985 if (pci_get_slot(child) == s &&
3986 pci_get_function(child) == f) {
3987 unchanged[i] = child;
3992 pci_identify_function(pcib, dev, domain, busno, s, f);
3997 /* Remove devices that are no longer present. */
3998 for (i = 0; i < devcount; i++) {
3999 if (unchanged[i] != NULL)
4001 device_delete_child(dev, devlist[i]);
4004 free(devlist, M_TEMP);
4005 oldcount = devcount;
4007 /* Try to attach the devices just added. */
4008 error = device_get_children(dev, &devlist, &devcount);
4010 free(unchanged, M_TEMP);
4014 for (i = 0; i < devcount; i++) {
4015 for (j = 0; j < oldcount; j++) {
4016 if (devlist[i] == unchanged[j])
4020 device_probe_and_attach(devlist[i]);
4024 free(unchanged, M_TEMP);
4025 free(devlist, M_TEMP);
4032 pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid,
4035 struct pci_devinfo *pf_dinfo, *vf_dinfo;
4037 int busno, slot, func;
4039 pf_dinfo = device_get_ivars(pf);
4041 pcib = device_get_parent(bus);
4043 PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func);
4045 vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno,
4046 slot, func, vid, did);
4048 vf_dinfo->cfg.flags |= PCICFG_VF;
4049 pci_add_child(bus, vf_dinfo);
4051 return (vf_dinfo->cfg.dev);
4055 pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid,
4056 uint16_t vid, uint16_t did)
4059 return (pci_add_iov_child(bus, pf, rid, vid, did));
4064 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
4066 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
4067 device_set_ivars(dinfo->cfg.dev, dinfo);
4068 resource_list_init(&dinfo->resources);
4069 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
4070 pci_cfg_restore(dinfo->cfg.dev, dinfo);
4071 pci_print_verbose(dinfo);
4072 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
4073 pci_child_added(dinfo->cfg.dev);
4074 EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev);
4078 pci_child_added_method(device_t dev, device_t child)
4084 pci_probe(device_t dev)
4087 device_set_desc(dev, "PCI bus");
4089 /* Allow other subclasses to override this driver. */
4090 return (BUS_PROBE_GENERIC);
4094 pci_attach_common(device_t dev)
4096 struct pci_softc *sc;
4098 #ifdef PCI_DMA_BOUNDARY
4099 int error, tag_valid;
4105 sc = device_get_softc(dev);
4106 domain = pcib_get_domain(dev);
4107 busno = pcib_get_bus(dev);
4110 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
4112 if (sc->sc_bus == NULL) {
4113 device_printf(dev, "failed to allocate bus number\n");
4118 device_printf(dev, "domain=%d, physical bus=%d\n",
4120 #ifdef PCI_DMA_BOUNDARY
4122 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
4123 devclass_find("pci")) {
4124 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
4125 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4126 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
4127 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
4129 device_printf(dev, "Failed to create DMA tag: %d\n",
4136 sc->sc_dma_tag = bus_get_dma_tag(dev);
4141 pci_attach(device_t dev)
4143 int busno, domain, error;
4145 error = pci_attach_common(dev);
4150 * Since there can be multiple independently numbered PCI
4151 * busses on systems with multiple PCI domains, we can't use
4152 * the unit number to decide which bus we are probing. We ask
4153 * the parent pcib what our domain and bus numbers are.
4155 domain = pcib_get_domain(dev);
4156 busno = pcib_get_bus(dev);
4157 pci_add_children(dev, domain, busno);
4158 return (bus_generic_attach(dev));
4162 pci_detach(device_t dev)
4165 struct pci_softc *sc;
4169 error = bus_generic_detach(dev);
4173 sc = device_get_softc(dev);
4174 error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus);
4178 return (device_delete_children(dev));
4182 pci_set_power_child(device_t dev, device_t child, int state)
4188 * Set the device to the given state. If the firmware suggests
4189 * a different power state, use it instead. If power management
4190 * is not present, the firmware is responsible for managing
4191 * device power. Skip children who aren't attached since they
4192 * are handled separately.
4194 pcib = device_get_parent(dev);
4196 if (device_is_attached(child) &&
4197 PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0)
4198 pci_set_powerstate(child, dstate);
4202 pci_suspend_child(device_t dev, device_t child)
4204 struct pci_devinfo *dinfo;
4207 dinfo = device_get_ivars(child);
4210 * Save the PCI configuration space for the child and set the
4211 * device in the appropriate power state for this sleep state.
4213 pci_cfg_save(child, dinfo, 0);
4215 /* Suspend devices before potentially powering them down. */
4216 error = bus_generic_suspend_child(dev, child);
4221 if (pci_do_power_suspend)
4222 pci_set_power_child(dev, child, PCI_POWERSTATE_D3);
4228 pci_resume_child(device_t dev, device_t child)
4230 struct pci_devinfo *dinfo;
4232 if (pci_do_power_resume)
4233 pci_set_power_child(dev, child, PCI_POWERSTATE_D0);
4235 dinfo = device_get_ivars(child);
4236 pci_cfg_restore(child, dinfo);
4237 if (!device_is_attached(child))
4238 pci_cfg_save(child, dinfo, 1);
4240 bus_generic_resume_child(dev, child);
4246 pci_resume(device_t dev)
4248 device_t child, *devlist;
4249 int error, i, numdevs;
4251 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
4255 * Resume critical devices first, then everything else later.
4257 for (i = 0; i < numdevs; i++) {
4259 switch (pci_get_class(child)) {
4263 case PCIC_BASEPERIPH:
4264 BUS_RESUME_CHILD(dev, child);
4268 for (i = 0; i < numdevs; i++) {
4270 switch (pci_get_class(child)) {
4274 case PCIC_BASEPERIPH:
4277 BUS_RESUME_CHILD(dev, child);
4280 free(devlist, M_TEMP);
4285 pci_load_vendor_data(void)
4291 data = preload_search_by_type("pci_vendor_data");
4293 ptr = preload_fetch_addr(data);
4294 sz = preload_fetch_size(data);
4295 if (ptr != NULL && sz != 0) {
4296 pci_vendordata = ptr;
4297 pci_vendordata_size = sz;
4298 /* terminate the database */
4299 pci_vendordata[pci_vendordata_size] = '\n';
4305 pci_driver_added(device_t dev, driver_t *driver)
4310 struct pci_devinfo *dinfo;
4314 device_printf(dev, "driver added\n");
4315 DEVICE_IDENTIFY(driver, dev);
4316 if (device_get_children(dev, &devlist, &numdevs) != 0)
4318 for (i = 0; i < numdevs; i++) {
4320 if (device_get_state(child) != DS_NOTPRESENT)
4322 dinfo = device_get_ivars(child);
4323 pci_print_verbose(dinfo);
4325 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
4326 pci_cfg_restore(child, dinfo);
4327 if (device_probe_and_attach(child) != 0)
4328 pci_child_detached(dev, child);
4330 free(devlist, M_TEMP);
4334 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
4335 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
4337 struct pci_devinfo *dinfo;
4338 struct msix_table_entry *mte;
4339 struct msix_vector *mv;
4345 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
4350 /* If this is not a direct child, just bail out. */
4351 if (device_get_parent(child) != dev) {
4356 rid = rman_get_rid(irq);
4358 /* Make sure that INTx is enabled */
4359 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
4362 * Check to see if the interrupt is MSI or MSI-X.
4363 * Ask our parent to map the MSI and give
4364 * us the address and data register values.
4365 * If we fail for some reason, teardown the
4366 * interrupt handler.
4368 dinfo = device_get_ivars(child);
4369 if (dinfo->cfg.msi.msi_alloc > 0) {
4370 if (dinfo->cfg.msi.msi_addr == 0) {
4371 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
4372 ("MSI has handlers, but vectors not mapped"));
4373 error = PCIB_MAP_MSI(device_get_parent(dev),
4374 child, rman_get_start(irq), &addr, &data);
4377 dinfo->cfg.msi.msi_addr = addr;
4378 dinfo->cfg.msi.msi_data = data;
4380 if (dinfo->cfg.msi.msi_handlers == 0)
4381 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
4382 dinfo->cfg.msi.msi_data);
4383 dinfo->cfg.msi.msi_handlers++;
4385 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4386 ("No MSI or MSI-X interrupts allocated"));
4387 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4388 ("MSI-X index too high"));
4389 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4390 KASSERT(mte->mte_vector != 0, ("no message vector"));
4391 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
4392 KASSERT(mv->mv_irq == rman_get_start(irq),
4394 if (mv->mv_address == 0) {
4395 KASSERT(mte->mte_handlers == 0,
4396 ("MSI-X table entry has handlers, but vector not mapped"));
4397 error = PCIB_MAP_MSI(device_get_parent(dev),
4398 child, rman_get_start(irq), &addr, &data);
4401 mv->mv_address = addr;
4404 if (mte->mte_handlers == 0) {
4405 pci_enable_msix(child, rid - 1, mv->mv_address,
4407 pci_unmask_msix(child, rid - 1);
4409 mte->mte_handlers++;
4413 * Make sure that INTx is disabled if we are using MSI/MSI-X,
4414 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
4415 * in which case we "enable" INTx so MSI/MSI-X actually works.
4417 if (!pci_has_quirk(pci_get_devid(child),
4418 PCI_QUIRK_MSI_INTX_BUG))
4419 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
4421 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
4424 (void)bus_generic_teardown_intr(dev, child, irq,
4434 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
4437 struct msix_table_entry *mte;
4438 struct resource_list_entry *rle;
4439 struct pci_devinfo *dinfo;
4442 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
4445 /* If this isn't a direct child, just bail out */
4446 if (device_get_parent(child) != dev)
4447 return(bus_generic_teardown_intr(dev, child, irq, cookie));
4449 rid = rman_get_rid(irq);
4452 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
4455 * Check to see if the interrupt is MSI or MSI-X. If so,
4456 * decrement the appropriate handlers count and mask the
4457 * MSI-X message, or disable MSI messages if the count
4460 dinfo = device_get_ivars(child);
4461 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
4462 if (rle->res != irq)
4464 if (dinfo->cfg.msi.msi_alloc > 0) {
4465 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
4466 ("MSI-X index too high"));
4467 if (dinfo->cfg.msi.msi_handlers == 0)
4469 dinfo->cfg.msi.msi_handlers--;
4470 if (dinfo->cfg.msi.msi_handlers == 0)
4471 pci_disable_msi(child);
4473 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4474 ("No MSI or MSI-X interrupts allocated"));
4475 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4476 ("MSI-X index too high"));
4477 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4478 if (mte->mte_handlers == 0)
4480 mte->mte_handlers--;
4481 if (mte->mte_handlers == 0)
4482 pci_mask_msix(child, rid - 1);
4485 error = bus_generic_teardown_intr(dev, child, irq, cookie);
4488 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4493 pci_print_child(device_t dev, device_t child)
4495 struct pci_devinfo *dinfo;
4496 struct resource_list *rl;
4499 dinfo = device_get_ivars(child);
4500 rl = &dinfo->resources;
4502 retval += bus_print_child_header(dev, child);
4504 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx");
4505 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
4506 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
4507 if (device_get_flags(dev))
4508 retval += printf(" flags %#x", device_get_flags(dev));
4510 retval += printf(" at device %d.%d", pci_get_slot(child),
4511 pci_get_function(child));
4513 retval += bus_print_child_domain(dev, child);
4514 retval += bus_print_child_footer(dev, child);
4523 int report; /* 0 = bootverbose, 1 = always */
4525 } pci_nomatch_tab[] = {
4526 {PCIC_OLD, -1, 1, "old"},
4527 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4528 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4529 {PCIC_STORAGE, -1, 1, "mass storage"},
4530 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4531 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4532 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4533 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4534 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4535 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4536 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4537 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4538 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4539 {PCIC_NETWORK, -1, 1, "network"},
4540 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4541 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4542 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4543 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4544 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4545 {PCIC_DISPLAY, -1, 1, "display"},
4546 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4547 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4548 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4549 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4550 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
4551 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
4552 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4553 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
4554 {PCIC_MEMORY, -1, 1, "memory"},
4555 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4556 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4557 {PCIC_BRIDGE, -1, 1, "bridge"},
4558 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4559 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4560 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4561 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4562 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4563 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4564 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4565 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4566 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4567 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4568 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4569 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4570 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4571 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4572 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4573 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4574 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4575 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
4576 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4577 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4578 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4579 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
4580 {PCIC_INPUTDEV, -1, 1, "input device"},
4581 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
4582 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4583 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
4584 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
4585 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
4586 {PCIC_DOCKING, -1, 1, "docking station"},
4587 {PCIC_PROCESSOR, -1, 1, "processor"},
4588 {PCIC_SERIALBUS, -1, 1, "serial bus"},
4589 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
4590 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
4591 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
4592 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
4593 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
4594 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
4595 {PCIC_WIRELESS, -1, 1, "wireless controller"},
4596 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
4597 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
4598 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
4599 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
4600 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
4601 {PCIC_SATCOM, -1, 1, "satellite communication"},
4602 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
4603 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
4604 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
4605 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
4606 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
4607 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
4608 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
4609 {PCIC_DASP, -1, 0, "dasp"},
4610 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
4615 pci_probe_nomatch(device_t dev, device_t child)
4618 const char *cp, *scp;
4622 * Look for a listing for this device in a loaded device database.
4625 if ((device = pci_describe_device(child)) != NULL) {
4626 device_printf(dev, "<%s>", device);
4627 free(device, M_DEVBUF);
4630 * Scan the class/subclass descriptions for a general
4635 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4636 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4637 if (pci_nomatch_tab[i].subclass == -1) {
4638 cp = pci_nomatch_tab[i].desc;
4639 report = pci_nomatch_tab[i].report;
4640 } else if (pci_nomatch_tab[i].subclass ==
4641 pci_get_subclass(child)) {
4642 scp = pci_nomatch_tab[i].desc;
4643 report = pci_nomatch_tab[i].report;
4647 if (report || bootverbose) {
4648 device_printf(dev, "<%s%s%s>",
4650 ((cp != NULL) && (scp != NULL)) ? ", " : "",
4654 if (report || bootverbose) {
4655 printf(" at device %d.%d (no driver attached)\n",
4656 pci_get_slot(child), pci_get_function(child));
4658 pci_cfg_save(child, device_get_ivars(child), 1);
4662 pci_child_detached(device_t dev, device_t child)
4664 struct pci_devinfo *dinfo;
4665 struct resource_list *rl;
4667 dinfo = device_get_ivars(child);
4668 rl = &dinfo->resources;
4671 * Have to deallocate IRQs before releasing any MSI messages and
4672 * have to release MSI messages before deallocating any memory
4675 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4676 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4677 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4678 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4679 (void)pci_release_msi(child);
4681 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4682 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4683 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4684 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4686 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4687 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4690 pci_cfg_save(child, dinfo, 1);
4694 * Parse the PCI device database, if loaded, and return a pointer to a
4695 * description of the device.
4697 * The database is flat text formatted as follows:
4699 * Any line not in a valid format is ignored.
4700 * Lines are terminated with newline '\n' characters.
4702 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4705 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4706 * - devices cannot be listed without a corresponding VENDOR line.
4707 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4708 * another TAB, then the device name.
4712 * Assuming (ptr) points to the beginning of a line in the database,
4713 * return the vendor or device and description of the next entry.
4714 * The value of (vendor) or (device) inappropriate for the entry type
4715 * is set to -1. Returns nonzero at the end of the database.
4717 * Note that this is slightly unrobust in the face of corrupt data;
4718 * we attempt to safeguard against this by spamming the end of the
4719 * database with a newline when we initialise.
4722 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4731 left = pci_vendordata_size - (cp - pci_vendordata);
4739 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4743 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4746 /* skip to next line */
4747 while (*cp != '\n' && left > 0) {
4756 /* skip to next line */
4757 while (*cp != '\n' && left > 0) {
4761 if (*cp == '\n' && left > 0)
4768 pci_describe_device(device_t dev)
4771 char *desc, *vp, *dp, *line;
4773 desc = vp = dp = NULL;
4776 * If we have no vendor data, we can't do anything.
4778 if (pci_vendordata == NULL)
4782 * Scan the vendor data looking for this device
4784 line = pci_vendordata;
4785 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4788 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4790 if (vendor == pci_get_vendor(dev))
4793 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4796 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4804 if (device == pci_get_device(dev))
4808 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4809 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4811 sprintf(desc, "%s, %s", vp, dp);
4821 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4823 struct pci_devinfo *dinfo;
4826 dinfo = device_get_ivars(child);
4830 case PCI_IVAR_ETHADDR:
4832 * The generic accessor doesn't deal with failure, so
4833 * we set the return value, then return an error.
4835 *((uint8_t **) result) = NULL;
4837 case PCI_IVAR_SUBVENDOR:
4838 *result = cfg->subvendor;
4840 case PCI_IVAR_SUBDEVICE:
4841 *result = cfg->subdevice;
4843 case PCI_IVAR_VENDOR:
4844 *result = cfg->vendor;
4846 case PCI_IVAR_DEVICE:
4847 *result = cfg->device;
4849 case PCI_IVAR_DEVID:
4850 *result = (cfg->device << 16) | cfg->vendor;
4852 case PCI_IVAR_CLASS:
4853 *result = cfg->baseclass;
4855 case PCI_IVAR_SUBCLASS:
4856 *result = cfg->subclass;
4858 case PCI_IVAR_PROGIF:
4859 *result = cfg->progif;
4861 case PCI_IVAR_REVID:
4862 *result = cfg->revid;
4864 case PCI_IVAR_INTPIN:
4865 *result = cfg->intpin;
4868 *result = cfg->intline;
4870 case PCI_IVAR_DOMAIN:
4871 *result = cfg->domain;
4877 *result = cfg->slot;
4879 case PCI_IVAR_FUNCTION:
4880 *result = cfg->func;
4882 case PCI_IVAR_CMDREG:
4883 *result = cfg->cmdreg;
4885 case PCI_IVAR_CACHELNSZ:
4886 *result = cfg->cachelnsz;
4888 case PCI_IVAR_MINGNT:
4889 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4893 *result = cfg->mingnt;
4895 case PCI_IVAR_MAXLAT:
4896 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4900 *result = cfg->maxlat;
4902 case PCI_IVAR_LATTIMER:
4903 *result = cfg->lattimer;
4912 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4914 struct pci_devinfo *dinfo;
4916 dinfo = device_get_ivars(child);
4919 case PCI_IVAR_INTPIN:
4920 dinfo->cfg.intpin = value;
4922 case PCI_IVAR_ETHADDR:
4923 case PCI_IVAR_SUBVENDOR:
4924 case PCI_IVAR_SUBDEVICE:
4925 case PCI_IVAR_VENDOR:
4926 case PCI_IVAR_DEVICE:
4927 case PCI_IVAR_DEVID:
4928 case PCI_IVAR_CLASS:
4929 case PCI_IVAR_SUBCLASS:
4930 case PCI_IVAR_PROGIF:
4931 case PCI_IVAR_REVID:
4933 case PCI_IVAR_DOMAIN:
4936 case PCI_IVAR_FUNCTION:
4937 return (EINVAL); /* disallow for now */
4944 #include "opt_ddb.h"
4946 #include <ddb/ddb.h>
4947 #include <sys/cons.h>
4950 * List resources based on pci map registers, used for within ddb
4953 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4955 struct pci_devinfo *dinfo;
4956 struct devlist *devlist_head;
4959 int i, error, none_count;
4962 /* get the head of the device queue */
4963 devlist_head = &pci_devq;
4966 * Go through the list of devices and print out devices
4968 for (error = 0, i = 0,
4969 dinfo = STAILQ_FIRST(devlist_head);
4970 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4971 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4973 /* Populate pd_name and pd_unit */
4976 name = device_get_name(dinfo->cfg.dev);
4979 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4980 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4981 (name && *name) ? name : "none",
4982 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4984 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4985 p->pc_sel.pc_func, (p->pc_class << 16) |
4986 (p->pc_subclass << 8) | p->pc_progif,
4987 (p->pc_subdevice << 16) | p->pc_subvendor,
4988 (p->pc_device << 16) | p->pc_vendor,
4989 p->pc_revid, p->pc_hdr);
4994 static struct resource *
4995 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4996 rman_res_t start, rman_res_t end, rman_res_t count, u_int num,
4999 struct pci_devinfo *dinfo = device_get_ivars(child);
5000 struct resource_list *rl = &dinfo->resources;
5001 struct resource *res;
5004 pci_addr_t map, testval;
5009 /* If rid is managed by EA, ignore it */
5010 if (pci_ea_is_enabled(child, *rid))
5013 pm = pci_find_bar(child, *rid);
5015 /* This is a BAR that we failed to allocate earlier. */
5016 mapsize = pm->pm_size;
5020 * Weed out the bogons, and figure out how large the
5021 * BAR/map is. BARs that read back 0 here are bogus
5022 * and unimplemented. Note: atapci in legacy mode are
5023 * special and handled elsewhere in the code. If you
5024 * have a atapci device in legacy mode and it fails
5025 * here, that other code is broken.
5027 pci_read_bar(child, *rid, &map, &testval, NULL);
5030 * Determine the size of the BAR and ignore BARs with a size
5031 * of 0. Device ROM BARs use a different mask value.
5033 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
5034 mapsize = pci_romsize(testval);
5036 mapsize = pci_mapsize(testval);
5039 pm = pci_add_bar(child, *rid, map, mapsize);
5042 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
5043 if (type != SYS_RES_MEMORY) {
5046 "child %s requested type %d for rid %#x,"
5047 " but the BAR says it is an memio\n",
5048 device_get_nameunit(child), type, *rid);
5052 if (type != SYS_RES_IOPORT) {
5055 "child %s requested type %d for rid %#x,"
5056 " but the BAR says it is an ioport\n",
5057 device_get_nameunit(child), type, *rid);
5063 * For real BARs, we need to override the size that
5064 * the driver requests, because that's what the BAR
5065 * actually uses and we would otherwise have a
5066 * situation where we might allocate the excess to
5067 * another driver, which won't work.
5069 count = ((pci_addr_t)1 << mapsize) * num;
5070 if (RF_ALIGNMENT(flags) < mapsize)
5071 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
5072 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
5073 flags |= RF_PREFETCHABLE;
5076 * Allocate enough resource, and then write back the
5077 * appropriate BAR for that resource.
5079 resource_list_add(rl, type, *rid, start, end, count);
5080 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
5081 count, flags & ~RF_ACTIVE);
5083 resource_list_delete(rl, type, *rid);
5084 device_printf(child,
5085 "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n",
5086 count, *rid, type, start, end);
5090 device_printf(child,
5091 "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n",
5092 count, *rid, type, rman_get_start(res));
5094 /* Disable decoding via the CMD register before updating the BAR */
5095 cmd = pci_read_config(child, PCIR_COMMAND, 2);
5096 pci_write_config(child, PCIR_COMMAND,
5097 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
5099 map = rman_get_start(res);
5100 pci_write_bar(child, pm, map);
5102 /* Restore the original value of the CMD register */
5103 pci_write_config(child, PCIR_COMMAND, cmd, 2);
5109 pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid,
5110 rman_res_t start, rman_res_t end, rman_res_t count, u_long num,
5113 struct pci_devinfo *dinfo;
5114 struct resource_list *rl;
5115 struct resource_list_entry *rle;
5116 struct resource *res;
5120 * Perform lazy resource allocation
5122 dinfo = device_get_ivars(child);
5123 rl = &dinfo->resources;
5126 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
5128 return (pci_alloc_secbus(dev, child, rid, start, end, count,
5133 * Can't alloc legacy interrupt once MSI messages have
5136 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
5137 cfg->msix.msix_alloc > 0))
5141 * If the child device doesn't have an interrupt
5142 * routed and is deserving of an interrupt, try to
5145 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
5147 pci_assign_interrupt(dev, child, 0);
5149 case SYS_RES_IOPORT:
5150 case SYS_RES_MEMORY:
5153 * PCI-PCI bridge I/O window resources are not BARs.
5154 * For those allocations just pass the request up the
5157 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
5159 case PCIR_IOBASEL_1:
5160 case PCIR_MEMBASE_1:
5161 case PCIR_PMBASEL_1:
5163 * XXX: Should we bother creating a resource
5166 return (bus_generic_alloc_resource(dev, child,
5167 type, rid, start, end, count, flags));
5171 /* Reserve resources for this BAR if needed. */
5172 rle = resource_list_find(rl, type, *rid);
5174 res = pci_reserve_map(dev, child, type, rid, start, end,
5180 return (resource_list_alloc(rl, dev, child, type, rid,
5181 start, end, count, flags));
5185 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
5186 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
5189 struct pci_devinfo *dinfo;
5192 if (device_get_parent(child) != dev)
5193 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
5194 type, rid, start, end, count, flags));
5197 dinfo = device_get_ivars(child);
5198 if (dinfo->cfg.flags & PCICFG_VF) {
5200 /* VFs can't have I/O BARs. */
5201 case SYS_RES_IOPORT:
5203 case SYS_RES_MEMORY:
5204 return (pci_vf_alloc_mem_resource(dev, child, rid,
5205 start, end, count, flags));
5208 /* Fall through for other types of resource allocations. */
5212 return (pci_alloc_multi_resource(dev, child, type, rid, start, end,
5217 pci_release_resource(device_t dev, device_t child, int type, int rid,
5220 struct pci_devinfo *dinfo;
5221 struct resource_list *rl;
5224 if (device_get_parent(child) != dev)
5225 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
5228 dinfo = device_get_ivars(child);
5232 if (dinfo->cfg.flags & PCICFG_VF) {
5234 /* VFs can't have I/O BARs. */
5235 case SYS_RES_IOPORT:
5237 case SYS_RES_MEMORY:
5238 return (pci_vf_release_mem_resource(dev, child, rid,
5242 /* Fall through for other types of resource allocations. */
5248 * PCI-PCI bridge I/O window resources are not BARs. For
5249 * those allocations just pass the request up the tree.
5251 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
5252 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
5254 case PCIR_IOBASEL_1:
5255 case PCIR_MEMBASE_1:
5256 case PCIR_PMBASEL_1:
5257 return (bus_generic_release_resource(dev, child, type,
5263 rl = &dinfo->resources;
5264 return (resource_list_release(rl, dev, child, type, rid, r));
5268 pci_activate_resource(device_t dev, device_t child, int type, int rid,
5271 struct pci_devinfo *dinfo;
5274 error = bus_generic_activate_resource(dev, child, type, rid, r);
5278 /* Enable decoding in the command register when activating BARs. */
5279 if (device_get_parent(child) == dev) {
5280 /* Device ROMs need their decoding explicitly enabled. */
5281 dinfo = device_get_ivars(child);
5282 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5283 pci_write_bar(child, pci_find_bar(child, rid),
5284 rman_get_start(r) | PCIM_BIOS_ENABLE);
5286 case SYS_RES_IOPORT:
5287 case SYS_RES_MEMORY:
5288 error = PCI_ENABLE_IO(dev, child, type);
5296 pci_deactivate_resource(device_t dev, device_t child, int type,
5297 int rid, struct resource *r)
5299 struct pci_devinfo *dinfo;
5302 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
5306 /* Disable decoding for device ROMs. */
5307 if (device_get_parent(child) == dev) {
5308 dinfo = device_get_ivars(child);
5309 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5310 pci_write_bar(child, pci_find_bar(child, rid),
5317 pci_child_deleted(device_t dev, device_t child)
5319 struct resource_list_entry *rle;
5320 struct resource_list *rl;
5321 struct pci_devinfo *dinfo;
5323 dinfo = device_get_ivars(child);
5324 rl = &dinfo->resources;
5326 EVENTHANDLER_INVOKE(pci_delete_device, child);
5328 /* Turn off access to resources we're about to free */
5329 if (bus_child_present(child) != 0) {
5330 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
5331 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
5333 pci_disable_busmaster(child);
5336 /* Free all allocated resources */
5337 STAILQ_FOREACH(rle, rl, link) {
5339 if (rman_get_flags(rle->res) & RF_ACTIVE ||
5340 resource_list_busy(rl, rle->type, rle->rid)) {
5341 pci_printf(&dinfo->cfg,
5342 "Resource still owned, oops. "
5343 "(type=%d, rid=%d, addr=%lx)\n",
5344 rle->type, rle->rid,
5345 rman_get_start(rle->res));
5346 bus_release_resource(child, rle->type, rle->rid,
5349 resource_list_unreserve(rl, dev, child, rle->type,
5353 resource_list_free(rl);
5359 pci_delete_resource(device_t dev, device_t child, int type, int rid)
5361 struct pci_devinfo *dinfo;
5362 struct resource_list *rl;
5363 struct resource_list_entry *rle;
5365 if (device_get_parent(child) != dev)
5368 dinfo = device_get_ivars(child);
5369 rl = &dinfo->resources;
5370 rle = resource_list_find(rl, type, rid);
5375 if (rman_get_flags(rle->res) & RF_ACTIVE ||
5376 resource_list_busy(rl, type, rid)) {
5377 device_printf(dev, "delete_resource: "
5378 "Resource still owned by child, oops. "
5379 "(type=%d, rid=%d, addr=%jx)\n",
5380 type, rid, rman_get_start(rle->res));
5383 resource_list_unreserve(rl, dev, child, type, rid);
5385 resource_list_delete(rl, type, rid);
5388 struct resource_list *
5389 pci_get_resource_list (device_t dev, device_t child)
5391 struct pci_devinfo *dinfo = device_get_ivars(child);
5393 return (&dinfo->resources);
5397 pci_get_dma_tag(device_t bus, device_t dev)
5399 struct pci_softc *sc = device_get_softc(bus);
5401 return (sc->sc_dma_tag);
5405 pci_read_config_method(device_t dev, device_t child, int reg, int width)
5407 struct pci_devinfo *dinfo = device_get_ivars(child);
5408 pcicfgregs *cfg = &dinfo->cfg;
5412 * SR-IOV VFs don't implement the VID or DID registers, so we have to
5413 * emulate them here.
5415 if (cfg->flags & PCICFG_VF) {
5416 if (reg == PCIR_VENDOR) {
5419 return (cfg->device << 16 | cfg->vendor);
5421 return (cfg->vendor);
5423 return (cfg->vendor & 0xff);
5425 return (0xffffffff);
5427 } else if (reg == PCIR_DEVICE) {
5429 /* Note that an unaligned 4-byte read is an error. */
5431 return (cfg->device);
5433 return (cfg->device & 0xff);
5435 return (0xffffffff);
5441 return (PCIB_READ_CONFIG(device_get_parent(dev),
5442 cfg->bus, cfg->slot, cfg->func, reg, width));
5446 pci_write_config_method(device_t dev, device_t child, int reg,
5447 uint32_t val, int width)
5449 struct pci_devinfo *dinfo = device_get_ivars(child);
5450 pcicfgregs *cfg = &dinfo->cfg;
5452 PCIB_WRITE_CONFIG(device_get_parent(dev),
5453 cfg->bus, cfg->slot, cfg->func, reg, val, width);
5457 pci_child_location_str_method(device_t dev, device_t child, char *buf,
5461 snprintf(buf, buflen, "slot=%d function=%d dbsf=pci%d:%d:%d:%d",
5462 pci_get_slot(child), pci_get_function(child), pci_get_domain(child),
5463 pci_get_bus(child), pci_get_slot(child), pci_get_function(child));
5468 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
5471 struct pci_devinfo *dinfo;
5474 dinfo = device_get_ivars(child);
5476 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
5477 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
5478 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
5484 pci_assign_interrupt_method(device_t dev, device_t child)
5486 struct pci_devinfo *dinfo = device_get_ivars(child);
5487 pcicfgregs *cfg = &dinfo->cfg;
5489 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
5494 pci_lookup(void *arg, const char *name, device_t *dev)
5498 int domain, bus, slot, func;
5504 * Accept pciconf-style selectors of either pciD:B:S:F or
5505 * pciB:S:F. In the latter case, the domain is assumed to
5508 if (strncmp(name, "pci", 3) != 0)
5510 val = strtol(name + 3, &end, 10);
5511 if (val < 0 || val > INT_MAX || *end != ':')
5514 val = strtol(end + 1, &end, 10);
5515 if (val < 0 || val > INT_MAX || *end != ':')
5518 val = strtol(end + 1, &end, 10);
5519 if (val < 0 || val > INT_MAX)
5523 val = strtol(end + 1, &end, 10);
5524 if (val < 0 || val > INT_MAX || *end != '\0')
5527 } else if (*end == '\0') {
5535 if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX ||
5536 func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX))
5539 *dev = pci_find_dbsf(domain, bus, slot, func);
5543 pci_modevent(module_t mod, int what, void *arg)
5545 static struct cdev *pci_cdev;
5546 static eventhandler_tag tag;
5550 STAILQ_INIT(&pci_devq);
5552 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
5554 pci_load_vendor_data();
5555 tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL,
5561 EVENTHANDLER_DEREGISTER(dev_lookup, tag);
5562 destroy_dev(pci_cdev);
5570 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
5572 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
5573 struct pcicfg_pcie *cfg;
5576 cfg = &dinfo->cfg.pcie;
5577 pos = cfg->pcie_location;
5579 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5581 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
5583 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5584 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5585 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5586 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
5588 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5589 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5590 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5591 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
5593 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5594 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5595 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
5598 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
5599 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
5600 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
5606 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
5608 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
5609 dinfo->cfg.pcix.pcix_command, 2);
5613 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
5617 * Restore the device to full power mode. We must do this
5618 * before we restore the registers because moving from D3 to
5619 * D0 will cause the chip's BARs and some other registers to
5620 * be reset to some unknown power on reset values. Cut down
5621 * the noise on boot by doing nothing if we are already in
5624 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
5625 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5626 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
5627 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
5628 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
5629 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
5630 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
5631 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
5632 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
5633 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
5634 case PCIM_HDRTYPE_NORMAL:
5635 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
5636 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
5638 case PCIM_HDRTYPE_BRIDGE:
5639 pci_write_config(dev, PCIR_SECLAT_1,
5640 dinfo->cfg.bridge.br_seclat, 1);
5641 pci_write_config(dev, PCIR_SUBBUS_1,
5642 dinfo->cfg.bridge.br_subbus, 1);
5643 pci_write_config(dev, PCIR_SECBUS_1,
5644 dinfo->cfg.bridge.br_secbus, 1);
5645 pci_write_config(dev, PCIR_PRIBUS_1,
5646 dinfo->cfg.bridge.br_pribus, 1);
5647 pci_write_config(dev, PCIR_BRIDGECTL_1,
5648 dinfo->cfg.bridge.br_control, 2);
5650 case PCIM_HDRTYPE_CARDBUS:
5651 pci_write_config(dev, PCIR_SECLAT_2,
5652 dinfo->cfg.bridge.br_seclat, 1);
5653 pci_write_config(dev, PCIR_SUBBUS_2,
5654 dinfo->cfg.bridge.br_subbus, 1);
5655 pci_write_config(dev, PCIR_SECBUS_2,
5656 dinfo->cfg.bridge.br_secbus, 1);
5657 pci_write_config(dev, PCIR_PRIBUS_2,
5658 dinfo->cfg.bridge.br_pribus, 1);
5659 pci_write_config(dev, PCIR_BRIDGECTL_2,
5660 dinfo->cfg.bridge.br_control, 2);
5663 pci_restore_bars(dev);
5666 * Restore extended capabilities for PCI-Express and PCI-X
5668 if (dinfo->cfg.pcie.pcie_location != 0)
5669 pci_cfg_restore_pcie(dev, dinfo);
5670 if (dinfo->cfg.pcix.pcix_location != 0)
5671 pci_cfg_restore_pcix(dev, dinfo);
5673 /* Restore MSI and MSI-X configurations if they are present. */
5674 if (dinfo->cfg.msi.msi_location != 0)
5675 pci_resume_msi(dev);
5676 if (dinfo->cfg.msix.msix_location != 0)
5677 pci_resume_msix(dev);
5680 if (dinfo->cfg.iov != NULL)
5681 pci_iov_cfg_restore(dev, dinfo);
5686 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
5688 #define RREG(n) pci_read_config(dev, pos + (n), 2)
5689 struct pcicfg_pcie *cfg;
5692 cfg = &dinfo->cfg.pcie;
5693 pos = cfg->pcie_location;
5695 cfg->pcie_flags = RREG(PCIER_FLAGS);
5697 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5699 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
5701 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5702 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5703 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5704 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
5706 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5707 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5708 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5709 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
5711 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5712 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5713 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
5716 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
5717 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
5718 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
5724 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5726 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5727 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5731 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5737 * Some drivers apparently write to these registers w/o updating our
5738 * cached copy. No harm happens if we update the copy, so do so here
5739 * so we can restore them. The COMMAND register is modified by the
5740 * bus w/o updating the cache. This should represent the normally
5741 * writable portion of the 'defined' part of type 0/1/2 headers.
5743 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5744 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5745 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5746 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5747 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5748 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5749 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5750 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5751 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5752 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5753 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5754 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
5755 case PCIM_HDRTYPE_NORMAL:
5756 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5757 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5758 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5759 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5761 case PCIM_HDRTYPE_BRIDGE:
5762 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
5764 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
5766 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
5768 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
5770 dinfo->cfg.bridge.br_control = pci_read_config(dev,
5771 PCIR_BRIDGECTL_1, 2);
5773 case PCIM_HDRTYPE_CARDBUS:
5774 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
5776 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
5778 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
5780 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
5782 dinfo->cfg.bridge.br_control = pci_read_config(dev,
5783 PCIR_BRIDGECTL_2, 2);
5784 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2);
5785 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2);
5789 if (dinfo->cfg.pcie.pcie_location != 0)
5790 pci_cfg_save_pcie(dev, dinfo);
5792 if (dinfo->cfg.pcix.pcix_location != 0)
5793 pci_cfg_save_pcix(dev, dinfo);
5796 if (dinfo->cfg.iov != NULL)
5797 pci_iov_cfg_save(dev, dinfo);
5801 * don't set the state for display devices, base peripherals and
5802 * memory devices since bad things happen when they are powered down.
5803 * We should (a) have drivers that can easily detach and (b) use
5804 * generic drivers for these devices so that some device actually
5805 * attaches. We need to make sure that when we implement (a) we don't
5806 * power the device down on a reattach.
5808 cls = pci_get_class(dev);
5811 switch (pci_do_power_nodriver)
5813 case 0: /* NO powerdown at all */
5815 case 1: /* Conservative about what to power down */
5816 if (cls == PCIC_STORAGE)
5819 case 2: /* Aggressive about what to power down */
5820 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5821 cls == PCIC_BASEPERIPH)
5824 case 3: /* Power down everything */
5828 * PCI spec says we can only go into D3 state from D0 state.
5829 * Transition from D[12] into D0 before going to D3 state.
5831 ps = pci_get_powerstate(dev);
5832 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5833 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5834 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5835 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5838 /* Wrapper APIs suitable for device driver use. */
5840 pci_save_state(device_t dev)
5842 struct pci_devinfo *dinfo;
5844 dinfo = device_get_ivars(dev);
5845 pci_cfg_save(dev, dinfo, 0);
5849 pci_restore_state(device_t dev)
5851 struct pci_devinfo *dinfo;
5853 dinfo = device_get_ivars(dev);
5854 pci_cfg_restore(dev, dinfo);
5858 pci_get_id_method(device_t dev, device_t child, enum pci_id_type type,
5862 return (PCIB_GET_ID(device_get_parent(dev), child, type, id));
5865 /* Find the upstream port of a given PCI device in a root complex. */
5867 pci_find_pcie_root_port(device_t dev)
5869 struct pci_devinfo *dinfo;
5870 devclass_t pci_class;
5873 pci_class = devclass_find("pci");
5874 KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class,
5875 ("%s: non-pci device %s", __func__, device_get_nameunit(dev)));
5878 * Walk the bridge hierarchy until we find a PCI-e root
5879 * port or a non-PCI device.
5882 bus = device_get_parent(dev);
5883 KASSERT(bus != NULL, ("%s: null parent of %s", __func__,
5884 device_get_nameunit(dev)));
5886 pcib = device_get_parent(bus);
5887 KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__,
5888 device_get_nameunit(bus)));
5891 * pcib's parent must be a PCI bus for this to be a
5894 if (device_get_devclass(device_get_parent(pcib)) != pci_class)
5897 dinfo = device_get_ivars(pcib);
5898 if (dinfo->cfg.pcie.pcie_location != 0 &&
5899 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)
5907 * Wait for pending transactions to complete on a PCI-express function.
5909 * The maximum delay is specified in milliseconds in max_delay. Note
5910 * that this function may sleep.
5912 * Returns true if the function is idle and false if the timeout is
5913 * exceeded. If dev is not a PCI-express function, this returns true.
5916 pcie_wait_for_pending_transactions(device_t dev, u_int max_delay)
5918 struct pci_devinfo *dinfo = device_get_ivars(dev);
5922 cap = dinfo->cfg.pcie.pcie_location;
5926 sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2);
5927 while (sta & PCIEM_STA_TRANSACTION_PND) {
5931 /* Poll once every 100 milliseconds up to the timeout. */
5932 if (max_delay > 100) {
5933 pause_sbt("pcietp", 100 * SBT_1MS, 0, C_HARDCLOCK);
5936 pause_sbt("pcietp", max_delay * SBT_1MS, 0,
5940 sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2);
5947 * Determine the maximum Completion Timeout in microseconds.
5949 * For non-PCI-express functions this returns 0.
5952 pcie_get_max_completion_timeout(device_t dev)
5954 struct pci_devinfo *dinfo = device_get_ivars(dev);
5957 cap = dinfo->cfg.pcie.pcie_location;
5962 * Functions using the 1.x spec use the default timeout range of
5963 * 50 microseconds to 50 milliseconds. Functions that do not
5964 * support programmable timeouts also use this range.
5966 if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 ||
5967 (pci_read_config(dev, cap + PCIER_DEVICE_CAP2, 4) &
5968 PCIEM_CAP2_COMP_TIMO_RANGES) == 0)
5971 switch (pci_read_config(dev, cap + PCIER_DEVICE_CTL2, 2) &
5972 PCIEM_CTL2_COMP_TIMO_VAL) {
5973 case PCIEM_CTL2_COMP_TIMO_100US:
5975 case PCIEM_CTL2_COMP_TIMO_10MS:
5977 case PCIEM_CTL2_COMP_TIMO_55MS:
5979 case PCIEM_CTL2_COMP_TIMO_210MS:
5980 return (210 * 1000);
5981 case PCIEM_CTL2_COMP_TIMO_900MS:
5982 return (900 * 1000);
5983 case PCIEM_CTL2_COMP_TIMO_3500MS:
5984 return (3500 * 1000);
5985 case PCIEM_CTL2_COMP_TIMO_13S:
5986 return (13 * 1000 * 1000);
5987 case PCIEM_CTL2_COMP_TIMO_64S:
5988 return (64 * 1000 * 1000);
5995 * Perform a Function Level Reset (FLR) on a device.
5997 * This function first waits for any pending transactions to complete
5998 * within the timeout specified by max_delay. If transactions are
5999 * still pending, the function will return false without attempting a
6002 * If dev is not a PCI-express function or does not support FLR, this
6003 * function returns false.
6005 * Note that no registers are saved or restored. The caller is
6006 * responsible for saving and restoring any registers including
6007 * PCI-standard registers via pci_save_state() and
6008 * pci_restore_state().
6011 pcie_flr(device_t dev, u_int max_delay, bool force)
6013 struct pci_devinfo *dinfo = device_get_ivars(dev);
6018 cap = dinfo->cfg.pcie.pcie_location;
6022 if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR))
6026 * Disable busmastering to prevent generation of new
6027 * transactions while waiting for the device to go idle. If
6028 * the idle timeout fails, the command register is restored
6029 * which will re-enable busmastering.
6031 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
6032 pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCIM_CMD_BUSMASTEREN), 2);
6033 if (!pcie_wait_for_pending_transactions(dev, max_delay)) {
6035 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
6038 pci_printf(&dinfo->cfg,
6039 "Resetting with transactions pending after %d ms\n",
6043 * Extend the post-FLR delay to cover the maximum
6044 * Completion Timeout delay of anything in flight
6045 * during the FLR delay. Enforce a minimum delay of
6048 compl_delay = pcie_get_max_completion_timeout(dev) / 1000;
6049 if (compl_delay < 10)
6054 /* Initiate the reset. */
6055 ctl = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
6056 pci_write_config(dev, cap + PCIER_DEVICE_CTL, ctl |
6057 PCIEM_CTL_INITIATE_FLR, 2);
6059 /* Wait for 100ms. */
6060 pause_sbt("pcieflr", (100 + compl_delay) * SBT_1MS, 0, C_HARDCLOCK);
6062 if (pci_read_config(dev, cap + PCIER_DEVICE_STA, 2) &
6063 PCIEM_STA_TRANSACTION_PND)
6064 pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n");