2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/limits.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
49 #include <vm/vm_extern.h>
52 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <machine/stdarg.h>
57 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
58 #include <machine/intr_machdep.h>
61 #include <sys/pciio.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pci_private.h>
68 #include <dev/pci/pci_iov_private.h>
71 #include <dev/usb/controller/xhcireg.h>
72 #include <dev/usb/controller/ehcireg.h>
73 #include <dev/usb/controller/ohcireg.h>
74 #include <dev/usb/controller/uhcireg.h>
79 #define PCIR_IS_BIOS(cfg, reg) \
80 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
81 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
83 static int pci_has_quirk(uint32_t devid, int quirk);
84 static pci_addr_t pci_mapbase(uint64_t mapreg);
85 static const char *pci_maptype(uint64_t mapreg);
86 static int pci_maprange(uint64_t mapreg);
87 static pci_addr_t pci_rombase(uint64_t mapreg);
88 static int pci_romsize(uint64_t testval);
89 static void pci_fixancient(pcicfgregs *cfg);
90 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
92 static int pci_porten(device_t dev);
93 static int pci_memen(device_t dev);
94 static void pci_assign_interrupt(device_t bus, device_t dev,
96 static int pci_add_map(device_t bus, device_t dev, int reg,
97 struct resource_list *rl, int force, int prefetch);
98 static int pci_probe(device_t dev);
99 static int pci_attach(device_t dev);
100 static int pci_detach(device_t dev);
101 static void pci_load_vendor_data(void);
102 static int pci_describe_parse_line(char **ptr, int *vendor,
103 int *device, char **desc);
104 static char *pci_describe_device(device_t dev);
105 static int pci_modevent(module_t mod, int what, void *arg);
106 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
108 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
109 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
110 int reg, uint32_t *data);
112 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
113 int reg, uint32_t data);
115 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
116 static void pci_mask_msix(device_t dev, u_int index);
117 static void pci_unmask_msix(device_t dev, u_int index);
118 static int pci_msi_blacklisted(void);
119 static int pci_msix_blacklisted(void);
120 static void pci_resume_msi(device_t dev);
121 static void pci_resume_msix(device_t dev);
122 static int pci_remap_intr_method(device_t bus, device_t dev,
125 static int pci_get_id_method(device_t dev, device_t child,
126 enum pci_id_type type, uintptr_t *rid);
128 static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d,
129 int b, int s, int f, uint16_t vid, uint16_t did);
131 static device_method_t pci_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, pci_probe),
134 DEVMETHOD(device_attach, pci_attach),
135 DEVMETHOD(device_detach, pci_detach),
136 DEVMETHOD(device_shutdown, bus_generic_shutdown),
137 DEVMETHOD(device_suspend, bus_generic_suspend),
138 DEVMETHOD(device_resume, pci_resume),
141 DEVMETHOD(bus_print_child, pci_print_child),
142 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
143 DEVMETHOD(bus_read_ivar, pci_read_ivar),
144 DEVMETHOD(bus_write_ivar, pci_write_ivar),
145 DEVMETHOD(bus_driver_added, pci_driver_added),
146 DEVMETHOD(bus_setup_intr, pci_setup_intr),
147 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
149 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
150 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
151 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
152 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
153 DEVMETHOD(bus_delete_resource, pci_delete_resource),
154 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
155 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
156 DEVMETHOD(bus_release_resource, pci_release_resource),
157 DEVMETHOD(bus_activate_resource, pci_activate_resource),
158 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
159 DEVMETHOD(bus_child_deleted, pci_child_deleted),
160 DEVMETHOD(bus_child_detached, pci_child_detached),
161 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
162 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
163 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
164 DEVMETHOD(bus_suspend_child, pci_suspend_child),
165 DEVMETHOD(bus_resume_child, pci_resume_child),
166 DEVMETHOD(bus_rescan, pci_rescan_method),
169 DEVMETHOD(pci_read_config, pci_read_config_method),
170 DEVMETHOD(pci_write_config, pci_write_config_method),
171 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
172 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
173 DEVMETHOD(pci_enable_io, pci_enable_io_method),
174 DEVMETHOD(pci_disable_io, pci_disable_io_method),
175 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
176 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
177 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
178 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
179 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
180 DEVMETHOD(pci_find_cap, pci_find_cap_method),
181 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
182 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
183 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
184 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
185 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
186 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
187 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
188 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
189 DEVMETHOD(pci_release_msi, pci_release_msi_method),
190 DEVMETHOD(pci_msi_count, pci_msi_count_method),
191 DEVMETHOD(pci_msix_count, pci_msix_count_method),
192 DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method),
193 DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method),
194 DEVMETHOD(pci_get_id, pci_get_id_method),
195 DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method),
196 DEVMETHOD(pci_child_added, pci_child_added_method),
198 DEVMETHOD(pci_iov_attach, pci_iov_attach_method),
199 DEVMETHOD(pci_iov_detach, pci_iov_detach_method),
200 DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method),
206 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
208 static devclass_t pci_devclass;
209 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
210 MODULE_VERSION(pci, 1);
212 static char *pci_vendordata;
213 static size_t pci_vendordata_size;
216 uint32_t devid; /* Vendor/device of the card */
218 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
219 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
220 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
221 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
222 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
223 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
228 static const struct pci_quirk pci_quirks[] = {
229 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
230 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
231 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
232 /* As does the Serverworks OSB4 (the SMBus mapping register) */
233 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
236 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
237 * or the CMIC-SL (AKA ServerWorks GC_LE).
239 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 * MSI doesn't work on earlier Intel chipsets including
244 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
246 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
247 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
248 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
249 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
250 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
251 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
252 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
255 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
258 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
261 * MSI-X allocation doesn't work properly for devices passed through
262 * by VMware up to at least ESXi 5.1.
264 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
265 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
268 * Some virtualization environments emulate an older chipset
269 * but support MSI just fine. QEMU uses the Intel 82440.
271 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
274 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
275 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
276 * It prevents us from attaching hpet(4) when the bit is unset.
277 * Note this quirk only affects SB600 revision A13 and earlier.
278 * For SB600 A21 and later, firmware must set the bit to hide it.
279 * For SB700 and later, it is unused and hardcoded to zero.
281 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
284 * Atheros AR8161/AR8162/E2200/E2400 Ethernet controllers have a
285 * bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit
286 * of the command register is set.
288 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
289 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
290 { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
291 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
294 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
295 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
297 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
298 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
299 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
300 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
301 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
302 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
307 /* map register information */
308 #define PCI_MAPMEM 0x01 /* memory map */
309 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
310 #define PCI_MAPPORT 0x04 /* port map */
312 struct devlist pci_devq;
313 uint32_t pci_generation;
314 uint32_t pci_numdevs = 0;
315 static int pcie_chipset, pcix_chipset;
318 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
320 static int pci_enable_io_modes = 1;
321 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN,
322 &pci_enable_io_modes, 1,
323 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
324 enable these bits correctly. We'd like to do this all the time, but there\n\
325 are some peripherals that this causes problems with.");
327 static int pci_do_realloc_bars = 0;
328 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN,
329 &pci_do_realloc_bars, 0,
330 "Attempt to allocate a new range for any BARs whose original "
331 "firmware-assigned ranges fail to allocate during the initial device scan.");
333 static int pci_do_power_nodriver = 0;
334 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN,
335 &pci_do_power_nodriver, 0,
336 "Place a function into D3 state when no driver attaches to it. 0 means\n\
337 disable. 1 means conservatively place devices into D3 state. 2 means\n\
338 aggressively place devices into D3 state. 3 means put absolutely everything\n\
341 int pci_do_power_resume = 1;
342 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN,
343 &pci_do_power_resume, 1,
344 "Transition from D3 -> D0 on resume.");
346 int pci_do_power_suspend = 1;
347 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN,
348 &pci_do_power_suspend, 1,
349 "Transition from D0 -> D3 on suspend.");
351 static int pci_do_msi = 1;
352 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1,
353 "Enable support for MSI interrupts");
355 static int pci_do_msix = 1;
356 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1,
357 "Enable support for MSI-X interrupts");
359 static int pci_msix_rewrite_table = 0;
360 SYSCTL_INT(_hw_pci, OID_AUTO, msix_rewrite_table, CTLFLAG_RWTUN,
361 &pci_msix_rewrite_table, 0,
362 "Rewrite entire MSI-X table when updating MSI-X entries");
364 static int pci_honor_msi_blacklist = 1;
365 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN,
366 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
368 #if defined(__i386__) || defined(__amd64__)
369 static int pci_usb_takeover = 1;
371 static int pci_usb_takeover = 0;
373 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
374 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
375 Disable this if you depend on BIOS emulation of USB devices, that is\n\
376 you use USB devices (like keyboard or mouse) but do not load USB drivers");
378 static int pci_clear_bars;
379 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
380 "Ignore firmware-assigned resources for BARs.");
382 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
383 static int pci_clear_buses;
384 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
385 "Ignore firmware-assigned bus numbers.");
388 static int pci_enable_ari = 1;
389 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
390 0, "Enable support for PCIe Alternative RID Interpretation");
393 pci_has_quirk(uint32_t devid, int quirk)
395 const struct pci_quirk *q;
397 for (q = &pci_quirks[0]; q->devid; q++) {
398 if (q->devid == devid && q->type == quirk)
404 /* Find a device_t by bus/slot/function in domain 0 */
407 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
410 return (pci_find_dbsf(0, bus, slot, func));
413 /* Find a device_t by domain/bus/slot/function */
416 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
418 struct pci_devinfo *dinfo;
420 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
421 if ((dinfo->cfg.domain == domain) &&
422 (dinfo->cfg.bus == bus) &&
423 (dinfo->cfg.slot == slot) &&
424 (dinfo->cfg.func == func)) {
425 return (dinfo->cfg.dev);
432 /* Find a device_t by vendor/device ID */
435 pci_find_device(uint16_t vendor, uint16_t device)
437 struct pci_devinfo *dinfo;
439 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
440 if ((dinfo->cfg.vendor == vendor) &&
441 (dinfo->cfg.device == device)) {
442 return (dinfo->cfg.dev);
450 pci_find_class(uint8_t class, uint8_t subclass)
452 struct pci_devinfo *dinfo;
454 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
455 if (dinfo->cfg.baseclass == class &&
456 dinfo->cfg.subclass == subclass) {
457 return (dinfo->cfg.dev);
465 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
470 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
473 retval += vprintf(fmt, ap);
478 /* return base address of memory or port map */
481 pci_mapbase(uint64_t mapreg)
484 if (PCI_BAR_MEM(mapreg))
485 return (mapreg & PCIM_BAR_MEM_BASE);
487 return (mapreg & PCIM_BAR_IO_BASE);
490 /* return map type of memory or port map */
493 pci_maptype(uint64_t mapreg)
496 if (PCI_BAR_IO(mapreg))
498 if (mapreg & PCIM_BAR_MEM_PREFETCH)
499 return ("Prefetchable Memory");
503 /* return log2 of map size decoded for memory or port map */
506 pci_mapsize(uint64_t testval)
510 testval = pci_mapbase(testval);
513 while ((testval & 1) == 0)
522 /* return base address of device ROM */
525 pci_rombase(uint64_t mapreg)
528 return (mapreg & PCIM_BIOS_ADDR_MASK);
531 /* return log2 of map size decided for device ROM */
534 pci_romsize(uint64_t testval)
538 testval = pci_rombase(testval);
541 while ((testval & 1) == 0)
550 /* return log2 of address range supported by map register */
553 pci_maprange(uint64_t mapreg)
557 if (PCI_BAR_IO(mapreg))
560 switch (mapreg & PCIM_BAR_MEM_TYPE) {
561 case PCIM_BAR_MEM_32:
564 case PCIM_BAR_MEM_1MB:
567 case PCIM_BAR_MEM_64:
574 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
577 pci_fixancient(pcicfgregs *cfg)
579 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
582 /* PCI to PCI bridges use header type 1 */
583 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
584 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
587 /* extract header type specific config data */
590 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
592 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
593 switch (cfg->hdrtype & PCIM_HDRTYPE) {
594 case PCIM_HDRTYPE_NORMAL:
595 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
596 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
597 cfg->mingnt = REG(PCIR_MINGNT, 1);
598 cfg->maxlat = REG(PCIR_MAXLAT, 1);
599 cfg->nummaps = PCI_MAXMAPS_0;
601 case PCIM_HDRTYPE_BRIDGE:
602 cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1);
603 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1);
604 cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1);
605 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1);
606 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2);
607 cfg->nummaps = PCI_MAXMAPS_1;
609 case PCIM_HDRTYPE_CARDBUS:
610 cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1);
611 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1);
612 cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1);
613 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1);
614 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2);
615 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
616 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
617 cfg->nummaps = PCI_MAXMAPS_2;
623 /* read configuration header into pcicfgregs structure */
625 pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f)
627 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
630 vid = REG(PCIR_VENDOR, 2);
631 did = REG(PCIR_DEVICE, 2);
633 return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did));
639 pci_alloc_devinfo_method(device_t dev)
642 return (malloc(sizeof(struct pci_devinfo), M_DEVBUF,
646 static struct pci_devinfo *
647 pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f,
648 uint16_t vid, uint16_t did)
650 struct pci_devinfo *devlist_entry;
653 devlist_entry = PCI_ALLOC_DEVINFO(bus);
655 cfg = &devlist_entry->cfg;
663 cfg->cmdreg = REG(PCIR_COMMAND, 2);
664 cfg->statreg = REG(PCIR_STATUS, 2);
665 cfg->baseclass = REG(PCIR_CLASS, 1);
666 cfg->subclass = REG(PCIR_SUBCLASS, 1);
667 cfg->progif = REG(PCIR_PROGIF, 1);
668 cfg->revid = REG(PCIR_REVID, 1);
669 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
670 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
671 cfg->lattimer = REG(PCIR_LATTIMER, 1);
672 cfg->intpin = REG(PCIR_INTPIN, 1);
673 cfg->intline = REG(PCIR_INTLINE, 1);
675 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
676 cfg->hdrtype &= ~PCIM_MFDEV;
677 STAILQ_INIT(&cfg->maps);
682 pci_hdrtypedata(pcib, b, s, f, cfg);
684 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
685 pci_read_cap(pcib, cfg);
687 STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links);
689 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
690 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
691 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
692 devlist_entry->conf.pc_sel.pc_func = cfg->func;
693 devlist_entry->conf.pc_hdr = cfg->hdrtype;
695 devlist_entry->conf.pc_subvendor = cfg->subvendor;
696 devlist_entry->conf.pc_subdevice = cfg->subdevice;
697 devlist_entry->conf.pc_vendor = cfg->vendor;
698 devlist_entry->conf.pc_device = cfg->device;
700 devlist_entry->conf.pc_class = cfg->baseclass;
701 devlist_entry->conf.pc_subclass = cfg->subclass;
702 devlist_entry->conf.pc_progif = cfg->progif;
703 devlist_entry->conf.pc_revid = cfg->revid;
708 return (devlist_entry);
713 pci_ea_fill_info(device_t pcib, pcicfgregs *cfg)
715 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \
716 cfg->ea.ea_location + (n), w)
723 uint64_t base, max_offset;
724 struct pci_ea_entry *eae;
726 if (cfg->ea.ea_location == 0)
729 STAILQ_INIT(&cfg->ea.ea_entries);
731 /* Determine the number of entries */
732 num_ent = REG(PCIR_EA_NUM_ENT, 2);
733 num_ent &= PCIM_EA_NUM_ENT_MASK;
735 /* Find the first entry to care of */
736 ptr = PCIR_EA_FIRST_ENT;
738 /* Skip DWORD 2 for type 1 functions */
739 if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE)
742 for (a = 0; a < num_ent; a++) {
744 eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO);
745 eae->eae_cfg_offset = cfg->ea.ea_location + ptr;
747 /* Read a number of dwords in the entry */
750 ent_size = (val & PCIM_EA_ES);
752 for (b = 0; b < ent_size; b++) {
757 eae->eae_flags = val;
758 eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET;
760 base = dw[0] & PCIM_EA_FIELD_MASK;
761 max_offset = dw[1] | ~PCIM_EA_FIELD_MASK;
763 if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) {
764 base |= (uint64_t)dw[b] << 32UL;
767 if (((dw[1] & PCIM_EA_IS_64) != 0)
769 max_offset |= (uint64_t)dw[b] << 32UL;
773 eae->eae_base = base;
774 eae->eae_max_offset = max_offset;
776 STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link);
779 printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n",
780 cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags,
781 (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset);
788 pci_read_cap(device_t pcib, pcicfgregs *cfg)
790 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
791 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
792 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
796 int ptr, nextptr, ptrptr;
798 switch (cfg->hdrtype & PCIM_HDRTYPE) {
799 case PCIM_HDRTYPE_NORMAL:
800 case PCIM_HDRTYPE_BRIDGE:
801 ptrptr = PCIR_CAP_PTR;
803 case PCIM_HDRTYPE_CARDBUS:
804 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
807 return; /* no extended capabilities support */
809 nextptr = REG(ptrptr, 1); /* sanity check? */
812 * Read capability entries.
814 while (nextptr != 0) {
817 printf("illegal PCI extended capability offset %d\n",
821 /* Find the next entry */
823 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
825 /* Process this entry */
826 switch (REG(ptr + PCICAP_ID, 1)) {
827 case PCIY_PMG: /* PCI power management */
828 if (cfg->pp.pp_cap == 0) {
829 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
830 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
831 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
832 if ((nextptr - ptr) > PCIR_POWER_DATA)
833 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
836 case PCIY_HT: /* HyperTransport */
837 /* Determine HT-specific capability type. */
838 val = REG(ptr + PCIR_HT_COMMAND, 2);
840 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
841 cfg->ht.ht_slave = ptr;
843 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
844 switch (val & PCIM_HTCMD_CAP_MASK) {
845 case PCIM_HTCAP_MSI_MAPPING:
846 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
847 /* Sanity check the mapping window. */
848 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
851 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
853 if (addr != MSI_INTEL_ADDR_BASE)
855 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
856 cfg->domain, cfg->bus,
857 cfg->slot, cfg->func,
860 addr = MSI_INTEL_ADDR_BASE;
862 cfg->ht.ht_msimap = ptr;
863 cfg->ht.ht_msictrl = val;
864 cfg->ht.ht_msiaddr = addr;
869 case PCIY_MSI: /* PCI MSI */
870 cfg->msi.msi_location = ptr;
871 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
872 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
873 PCIM_MSICTRL_MMC_MASK)>>1);
875 case PCIY_MSIX: /* PCI MSI-X */
876 cfg->msix.msix_location = ptr;
877 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
878 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
879 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
880 val = REG(ptr + PCIR_MSIX_TABLE, 4);
881 cfg->msix.msix_table_bar = PCIR_BAR(val &
883 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
884 val = REG(ptr + PCIR_MSIX_PBA, 4);
885 cfg->msix.msix_pba_bar = PCIR_BAR(val &
887 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
889 case PCIY_VPD: /* PCI Vital Product Data */
890 cfg->vpd.vpd_reg = ptr;
893 /* Should always be true. */
894 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
895 PCIM_HDRTYPE_BRIDGE) {
896 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
897 cfg->subvendor = val & 0xffff;
898 cfg->subdevice = val >> 16;
901 case PCIY_PCIX: /* PCI-X */
903 * Assume we have a PCI-X chipset if we have
904 * at least one PCI-PCI bridge with a PCI-X
905 * capability. Note that some systems with
906 * PCI-express or HT chipsets might match on
907 * this check as well.
909 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
912 cfg->pcix.pcix_location = ptr;
914 case PCIY_EXPRESS: /* PCI-express */
916 * Assume we have a PCI-express chipset if we have
917 * at least one PCI-express device.
920 cfg->pcie.pcie_location = ptr;
921 val = REG(ptr + PCIER_FLAGS, 2);
922 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
924 case PCIY_EA: /* Enhanced Allocation */
925 cfg->ea.ea_location = ptr;
926 pci_ea_fill_info(pcib, cfg);
933 #if defined(__powerpc__)
935 * Enable the MSI mapping window for all HyperTransport
936 * slaves. PCI-PCI bridges have their windows enabled via
939 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
940 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
942 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
943 cfg->domain, cfg->bus, cfg->slot, cfg->func);
944 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
945 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
949 /* REG and WREG use carry through to next functions */
953 * PCI Vital Product Data
956 #define PCI_VPD_TIMEOUT 1000000
959 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
961 int count = PCI_VPD_TIMEOUT;
963 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
965 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
967 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
970 DELAY(1); /* limit looping */
972 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
979 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
981 int count = PCI_VPD_TIMEOUT;
983 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
985 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
986 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
987 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
990 DELAY(1); /* limit looping */
997 #undef PCI_VPD_TIMEOUT
999 struct vpd_readstate {
1009 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
1014 if (vrs->bytesinval == 0) {
1015 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
1017 vrs->val = le32toh(reg);
1019 byte = vrs->val & 0xff;
1020 vrs->bytesinval = 3;
1022 vrs->val = vrs->val >> 8;
1023 byte = vrs->val & 0xff;
1033 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
1035 struct vpd_readstate vrs;
1040 int alloc, off; /* alloc/off for RO/W arrays */
1046 /* init vpd reader */
1054 name = remain = i = 0; /* shut up stupid gcc */
1055 alloc = off = 0; /* shut up stupid gcc */
1056 dflen = 0; /* shut up stupid gcc */
1058 while (state >= 0) {
1059 if (vpd_nextbyte(&vrs, &byte)) {
1064 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1065 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1066 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1069 case 0: /* item name */
1071 if (vpd_nextbyte(&vrs, &byte2)) {
1076 if (vpd_nextbyte(&vrs, &byte2)) {
1080 remain |= byte2 << 8;
1081 if (remain > (0x7f*4 - vrs.off)) {
1084 "invalid VPD data, remain %#x\n",
1089 remain = byte & 0x7;
1090 name = (byte >> 3) & 0xf;
1093 case 0x2: /* String */
1094 cfg->vpd.vpd_ident = malloc(remain + 1,
1095 M_DEVBUF, M_WAITOK);
1102 case 0x10: /* VPD-R */
1105 cfg->vpd.vpd_ros = malloc(alloc *
1106 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1110 case 0x11: /* VPD-W */
1113 cfg->vpd.vpd_w = malloc(alloc *
1114 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1118 default: /* Invalid data, abort */
1124 case 1: /* Identifier String */
1125 cfg->vpd.vpd_ident[i++] = byte;
1128 cfg->vpd.vpd_ident[i] = '\0';
1133 case 2: /* VPD-R Keyword Header */
1135 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1136 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1137 M_DEVBUF, M_WAITOK | M_ZERO);
1139 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1140 if (vpd_nextbyte(&vrs, &byte2)) {
1144 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1145 if (vpd_nextbyte(&vrs, &byte2)) {
1149 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1151 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1154 * if this happens, we can't trust the rest
1157 pci_printf(cfg, "bad keyword length: %d\n",
1162 } else if (dflen == 0) {
1163 cfg->vpd.vpd_ros[off].value = malloc(1 *
1164 sizeof(*cfg->vpd.vpd_ros[off].value),
1165 M_DEVBUF, M_WAITOK);
1166 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1168 cfg->vpd.vpd_ros[off].value = malloc(
1170 sizeof(*cfg->vpd.vpd_ros[off].value),
1171 M_DEVBUF, M_WAITOK);
1174 /* keep in sync w/ state 3's transistions */
1175 if (dflen == 0 && remain == 0)
1177 else if (dflen == 0)
1183 case 3: /* VPD-R Keyword Value */
1184 cfg->vpd.vpd_ros[off].value[i++] = byte;
1185 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1186 "RV", 2) == 0 && cksumvalid == -1) {
1192 "bad VPD cksum, remain %hhu\n",
1201 /* keep in sync w/ state 2's transistions */
1203 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1204 if (dflen == 0 && remain == 0) {
1205 cfg->vpd.vpd_rocnt = off;
1206 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1207 off * sizeof(*cfg->vpd.vpd_ros),
1208 M_DEVBUF, M_WAITOK | M_ZERO);
1210 } else if (dflen == 0)
1220 case 5: /* VPD-W Keyword Header */
1222 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1223 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1224 M_DEVBUF, M_WAITOK | M_ZERO);
1226 cfg->vpd.vpd_w[off].keyword[0] = byte;
1227 if (vpd_nextbyte(&vrs, &byte2)) {
1231 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1232 if (vpd_nextbyte(&vrs, &byte2)) {
1236 cfg->vpd.vpd_w[off].len = dflen = byte2;
1237 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1238 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1239 sizeof(*cfg->vpd.vpd_w[off].value),
1240 M_DEVBUF, M_WAITOK);
1243 /* keep in sync w/ state 6's transistions */
1244 if (dflen == 0 && remain == 0)
1246 else if (dflen == 0)
1252 case 6: /* VPD-W Keyword Value */
1253 cfg->vpd.vpd_w[off].value[i++] = byte;
1256 /* keep in sync w/ state 5's transistions */
1258 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1259 if (dflen == 0 && remain == 0) {
1260 cfg->vpd.vpd_wcnt = off;
1261 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1262 off * sizeof(*cfg->vpd.vpd_w),
1263 M_DEVBUF, M_WAITOK | M_ZERO);
1265 } else if (dflen == 0)
1270 pci_printf(cfg, "invalid state: %d\n", state);
1276 if (cksumvalid == 0 || state < -1) {
1277 /* read-only data bad, clean up */
1278 if (cfg->vpd.vpd_ros != NULL) {
1279 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1280 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1281 free(cfg->vpd.vpd_ros, M_DEVBUF);
1282 cfg->vpd.vpd_ros = NULL;
1286 /* I/O error, clean up */
1287 pci_printf(cfg, "failed to read VPD data.\n");
1288 if (cfg->vpd.vpd_ident != NULL) {
1289 free(cfg->vpd.vpd_ident, M_DEVBUF);
1290 cfg->vpd.vpd_ident = NULL;
1292 if (cfg->vpd.vpd_w != NULL) {
1293 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1294 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1295 free(cfg->vpd.vpd_w, M_DEVBUF);
1296 cfg->vpd.vpd_w = NULL;
1299 cfg->vpd.vpd_cached = 1;
1305 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1307 struct pci_devinfo *dinfo = device_get_ivars(child);
1308 pcicfgregs *cfg = &dinfo->cfg;
1310 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1311 pci_read_vpd(device_get_parent(dev), cfg);
1313 *identptr = cfg->vpd.vpd_ident;
1315 if (*identptr == NULL)
1322 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1325 struct pci_devinfo *dinfo = device_get_ivars(child);
1326 pcicfgregs *cfg = &dinfo->cfg;
1329 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1330 pci_read_vpd(device_get_parent(dev), cfg);
1332 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1333 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1334 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1335 *vptr = cfg->vpd.vpd_ros[i].value;
1344 pci_fetch_vpd_list(device_t dev)
1346 struct pci_devinfo *dinfo = device_get_ivars(dev);
1347 pcicfgregs *cfg = &dinfo->cfg;
1349 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1350 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1355 * Find the requested HyperTransport capability and return the offset
1356 * in configuration space via the pointer provided. The function
1357 * returns 0 on success and an error code otherwise.
1360 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1365 error = pci_find_cap(child, PCIY_HT, &ptr);
1370 * Traverse the capabilities list checking each HT capability
1371 * to see if it matches the requested HT capability.
1374 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1375 if (capability == PCIM_HTCAP_SLAVE ||
1376 capability == PCIM_HTCAP_HOST)
1379 val &= PCIM_HTCMD_CAP_MASK;
1380 if (val == capability) {
1386 /* Skip to the next HT capability. */
1388 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1389 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1398 * Find the requested capability and return the offset in
1399 * configuration space via the pointer provided. The function returns
1400 * 0 on success and an error code otherwise.
1403 pci_find_cap_method(device_t dev, device_t child, int capability,
1406 struct pci_devinfo *dinfo = device_get_ivars(child);
1407 pcicfgregs *cfg = &dinfo->cfg;
1412 * Check the CAP_LIST bit of the PCI status register first.
1414 status = pci_read_config(child, PCIR_STATUS, 2);
1415 if (!(status & PCIM_STATUS_CAPPRESENT))
1419 * Determine the start pointer of the capabilities list.
1421 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1422 case PCIM_HDRTYPE_NORMAL:
1423 case PCIM_HDRTYPE_BRIDGE:
1426 case PCIM_HDRTYPE_CARDBUS:
1427 ptr = PCIR_CAP_PTR_2;
1431 return (ENXIO); /* no extended capabilities support */
1433 ptr = pci_read_config(child, ptr, 1);
1436 * Traverse the capabilities list.
1439 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1444 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1451 * Find the requested extended capability and return the offset in
1452 * configuration space via the pointer provided. The function returns
1453 * 0 on success and an error code otherwise.
1456 pci_find_extcap_method(device_t dev, device_t child, int capability,
1459 struct pci_devinfo *dinfo = device_get_ivars(child);
1460 pcicfgregs *cfg = &dinfo->cfg;
1464 /* Only supported for PCI-express devices. */
1465 if (cfg->pcie.pcie_location == 0)
1469 ecap = pci_read_config(child, ptr, 4);
1470 if (ecap == 0xffffffff || ecap == 0)
1473 if (PCI_EXTCAP_ID(ecap) == capability) {
1478 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1481 ecap = pci_read_config(child, ptr, 4);
1488 * Support for MSI-X message interrupts.
1491 pci_write_msix_entry(device_t dev, u_int index, uint64_t address, uint32_t data)
1493 struct pci_devinfo *dinfo = device_get_ivars(dev);
1494 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1497 KASSERT(msix->msix_table_len > index, ("bogus index"));
1498 offset = msix->msix_table_offset + index * 16;
1499 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1500 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1501 bus_write_4(msix->msix_table_res, offset + 8, data);
1505 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1506 uint64_t address, uint32_t data)
1509 if (pci_msix_rewrite_table) {
1510 struct pci_devinfo *dinfo = device_get_ivars(child);
1511 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1514 * Some VM hosts require MSIX to be disabled in the
1515 * control register before updating the MSIX table
1516 * entries are allowed. It is not enough to only
1517 * disable MSIX while updating a single entry. MSIX
1518 * must be disabled while updating all entries in the
1521 pci_write_config(child,
1522 msix->msix_location + PCIR_MSIX_CTRL,
1523 msix->msix_ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE, 2);
1524 pci_resume_msix(child);
1526 pci_write_msix_entry(child, index, address, data);
1528 /* Enable MSI -> HT mapping. */
1529 pci_ht_map_msi(child, address);
1533 pci_mask_msix(device_t dev, u_int index)
1535 struct pci_devinfo *dinfo = device_get_ivars(dev);
1536 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1537 uint32_t offset, val;
1539 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1540 offset = msix->msix_table_offset + index * 16 + 12;
1541 val = bus_read_4(msix->msix_table_res, offset);
1542 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1543 val |= PCIM_MSIX_VCTRL_MASK;
1544 bus_write_4(msix->msix_table_res, offset, val);
1549 pci_unmask_msix(device_t dev, u_int index)
1551 struct pci_devinfo *dinfo = device_get_ivars(dev);
1552 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1553 uint32_t offset, val;
1555 KASSERT(msix->msix_table_len > index, ("bogus index"));
1556 offset = msix->msix_table_offset + index * 16 + 12;
1557 val = bus_read_4(msix->msix_table_res, offset);
1558 if (val & PCIM_MSIX_VCTRL_MASK) {
1559 val &= ~PCIM_MSIX_VCTRL_MASK;
1560 bus_write_4(msix->msix_table_res, offset, val);
1565 pci_pending_msix(device_t dev, u_int index)
1567 struct pci_devinfo *dinfo = device_get_ivars(dev);
1568 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1569 uint32_t offset, bit;
1571 KASSERT(msix->msix_table_len > index, ("bogus index"));
1572 offset = msix->msix_pba_offset + (index / 32) * 4;
1573 bit = 1 << index % 32;
1574 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1578 * Restore MSI-X registers and table during resume. If MSI-X is
1579 * enabled then walk the virtual table to restore the actual MSI-X
1583 pci_resume_msix(device_t dev)
1585 struct pci_devinfo *dinfo = device_get_ivars(dev);
1586 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1587 struct msix_table_entry *mte;
1588 struct msix_vector *mv;
1591 if (msix->msix_alloc > 0) {
1592 /* First, mask all vectors. */
1593 for (i = 0; i < msix->msix_msgnum; i++)
1594 pci_mask_msix(dev, i);
1596 /* Second, program any messages with at least one handler. */
1597 for (i = 0; i < msix->msix_table_len; i++) {
1598 mte = &msix->msix_table[i];
1599 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1601 mv = &msix->msix_vectors[mte->mte_vector - 1];
1602 pci_write_msix_entry(dev, i, mv->mv_address,
1604 pci_unmask_msix(dev, i);
1607 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1608 msix->msix_ctrl, 2);
1612 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1613 * returned in *count. After this function returns, each message will be
1614 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1617 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1619 struct pci_devinfo *dinfo = device_get_ivars(child);
1620 pcicfgregs *cfg = &dinfo->cfg;
1621 struct resource_list_entry *rle;
1622 int actual, error, i, irq, max;
1624 /* Don't let count == 0 get us into trouble. */
1628 /* If rid 0 is allocated, then fail. */
1629 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1630 if (rle != NULL && rle->res != NULL)
1633 /* Already have allocated messages? */
1634 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1637 /* If MSI-X is blacklisted for this system, fail. */
1638 if (pci_msix_blacklisted())
1641 /* MSI-X capability present? */
1642 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1645 /* Make sure the appropriate BARs are mapped. */
1646 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1647 cfg->msix.msix_table_bar);
1648 if (rle == NULL || rle->res == NULL ||
1649 !(rman_get_flags(rle->res) & RF_ACTIVE))
1651 cfg->msix.msix_table_res = rle->res;
1652 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1653 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1654 cfg->msix.msix_pba_bar);
1655 if (rle == NULL || rle->res == NULL ||
1656 !(rman_get_flags(rle->res) & RF_ACTIVE))
1659 cfg->msix.msix_pba_res = rle->res;
1662 device_printf(child,
1663 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1664 *count, cfg->msix.msix_msgnum);
1665 max = min(*count, cfg->msix.msix_msgnum);
1666 for (i = 0; i < max; i++) {
1667 /* Allocate a message. */
1668 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1674 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1680 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1682 device_printf(child, "using IRQ %ju for MSI-X\n",
1688 * Be fancy and try to print contiguous runs of
1689 * IRQ values as ranges. 'irq' is the previous IRQ.
1690 * 'run' is true if we are in a range.
1692 device_printf(child, "using IRQs %ju", rle->start);
1695 for (i = 1; i < actual; i++) {
1696 rle = resource_list_find(&dinfo->resources,
1697 SYS_RES_IRQ, i + 1);
1699 /* Still in a run? */
1700 if (rle->start == irq + 1) {
1706 /* Finish previous range. */
1712 /* Start new range. */
1713 printf(",%ju", rle->start);
1717 /* Unfinished range? */
1720 printf(" for MSI-X\n");
1724 /* Mask all vectors. */
1725 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1726 pci_mask_msix(child, i);
1728 /* Allocate and initialize vector data and virtual table. */
1729 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1730 M_DEVBUF, M_WAITOK | M_ZERO);
1731 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1732 M_DEVBUF, M_WAITOK | M_ZERO);
1733 for (i = 0; i < actual; i++) {
1734 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1735 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1736 cfg->msix.msix_table[i].mte_vector = i + 1;
1739 /* Update control register to enable MSI-X. */
1740 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1741 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1742 cfg->msix.msix_ctrl, 2);
1744 /* Update counts of alloc'd messages. */
1745 cfg->msix.msix_alloc = actual;
1746 cfg->msix.msix_table_len = actual;
1752 * By default, pci_alloc_msix() will assign the allocated IRQ
1753 * resources consecutively to the first N messages in the MSI-X table.
1754 * However, device drivers may want to use different layouts if they
1755 * either receive fewer messages than they asked for, or they wish to
1756 * populate the MSI-X table sparsely. This method allows the driver
1757 * to specify what layout it wants. It must be called after a
1758 * successful pci_alloc_msix() but before any of the associated
1759 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1761 * The 'vectors' array contains 'count' message vectors. The array
1762 * maps directly to the MSI-X table in that index 0 in the array
1763 * specifies the vector for the first message in the MSI-X table, etc.
1764 * The vector value in each array index can either be 0 to indicate
1765 * that no vector should be assigned to a message slot, or it can be a
1766 * number from 1 to N (where N is the count returned from a
1767 * succcessful call to pci_alloc_msix()) to indicate which message
1768 * vector (IRQ) to be used for the corresponding message.
1770 * On successful return, each message with a non-zero vector will have
1771 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1772 * 1. Additionally, if any of the IRQs allocated via the previous
1773 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1774 * will be freed back to the system automatically.
1776 * For example, suppose a driver has a MSI-X table with 6 messages and
1777 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1778 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1779 * C. After the call to pci_alloc_msix(), the device will be setup to
1780 * have an MSI-X table of ABC--- (where - means no vector assigned).
1781 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1782 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1783 * be freed back to the system. This device will also have valid
1784 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1786 * In any case, the SYS_RES_IRQ rid X will always map to the message
1787 * at MSI-X table index X - 1 and will only be valid if a vector is
1788 * assigned to that table entry.
1791 pci_remap_msix_method(device_t dev, device_t child, int count,
1792 const u_int *vectors)
1794 struct pci_devinfo *dinfo = device_get_ivars(child);
1795 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1796 struct resource_list_entry *rle;
1797 int i, irq, j, *used;
1800 * Have to have at least one message in the table but the
1801 * table can't be bigger than the actual MSI-X table in the
1804 if (count == 0 || count > msix->msix_msgnum)
1807 /* Sanity check the vectors. */
1808 for (i = 0; i < count; i++)
1809 if (vectors[i] > msix->msix_alloc)
1813 * Make sure there aren't any holes in the vectors to be used.
1814 * It's a big pain to support it, and it doesn't really make
1815 * sense anyway. Also, at least one vector must be used.
1817 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1819 for (i = 0; i < count; i++)
1820 if (vectors[i] != 0)
1821 used[vectors[i] - 1] = 1;
1822 for (i = 0; i < msix->msix_alloc - 1; i++)
1823 if (used[i] == 0 && used[i + 1] == 1) {
1824 free(used, M_DEVBUF);
1828 free(used, M_DEVBUF);
1832 /* Make sure none of the resources are allocated. */
1833 for (i = 0; i < msix->msix_table_len; i++) {
1834 if (msix->msix_table[i].mte_vector == 0)
1836 if (msix->msix_table[i].mte_handlers > 0) {
1837 free(used, M_DEVBUF);
1840 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1841 KASSERT(rle != NULL, ("missing resource"));
1842 if (rle->res != NULL) {
1843 free(used, M_DEVBUF);
1848 /* Free the existing resource list entries. */
1849 for (i = 0; i < msix->msix_table_len; i++) {
1850 if (msix->msix_table[i].mte_vector == 0)
1852 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1856 * Build the new virtual table keeping track of which vectors are
1859 free(msix->msix_table, M_DEVBUF);
1860 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1861 M_DEVBUF, M_WAITOK | M_ZERO);
1862 for (i = 0; i < count; i++)
1863 msix->msix_table[i].mte_vector = vectors[i];
1864 msix->msix_table_len = count;
1866 /* Free any unused IRQs and resize the vectors array if necessary. */
1867 j = msix->msix_alloc - 1;
1869 struct msix_vector *vec;
1871 while (used[j] == 0) {
1872 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1873 msix->msix_vectors[j].mv_irq);
1876 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1878 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1880 free(msix->msix_vectors, M_DEVBUF);
1881 msix->msix_vectors = vec;
1882 msix->msix_alloc = j + 1;
1884 free(used, M_DEVBUF);
1886 /* Map the IRQs onto the rids. */
1887 for (i = 0; i < count; i++) {
1888 if (vectors[i] == 0)
1890 irq = msix->msix_vectors[vectors[i] - 1].mv_irq;
1891 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1896 device_printf(child, "Remapped MSI-X IRQs as: ");
1897 for (i = 0; i < count; i++) {
1900 if (vectors[i] == 0)
1904 msix->msix_vectors[vectors[i] - 1].mv_irq);
1913 pci_release_msix(device_t dev, device_t child)
1915 struct pci_devinfo *dinfo = device_get_ivars(child);
1916 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1917 struct resource_list_entry *rle;
1920 /* Do we have any messages to release? */
1921 if (msix->msix_alloc == 0)
1924 /* Make sure none of the resources are allocated. */
1925 for (i = 0; i < msix->msix_table_len; i++) {
1926 if (msix->msix_table[i].mte_vector == 0)
1928 if (msix->msix_table[i].mte_handlers > 0)
1930 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1931 KASSERT(rle != NULL, ("missing resource"));
1932 if (rle->res != NULL)
1936 /* Update control register to disable MSI-X. */
1937 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1938 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1939 msix->msix_ctrl, 2);
1941 /* Free the resource list entries. */
1942 for (i = 0; i < msix->msix_table_len; i++) {
1943 if (msix->msix_table[i].mte_vector == 0)
1945 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1947 free(msix->msix_table, M_DEVBUF);
1948 msix->msix_table_len = 0;
1950 /* Release the IRQs. */
1951 for (i = 0; i < msix->msix_alloc; i++)
1952 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1953 msix->msix_vectors[i].mv_irq);
1954 free(msix->msix_vectors, M_DEVBUF);
1955 msix->msix_alloc = 0;
1960 * Return the max supported MSI-X messages this device supports.
1961 * Basically, assuming the MD code can alloc messages, this function
1962 * should return the maximum value that pci_alloc_msix() can return.
1963 * Thus, it is subject to the tunables, etc.
1966 pci_msix_count_method(device_t dev, device_t child)
1968 struct pci_devinfo *dinfo = device_get_ivars(child);
1969 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1971 if (pci_do_msix && msix->msix_location != 0)
1972 return (msix->msix_msgnum);
1977 pci_msix_pba_bar_method(device_t dev, device_t child)
1979 struct pci_devinfo *dinfo = device_get_ivars(child);
1980 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1982 if (pci_do_msix && msix->msix_location != 0)
1983 return (msix->msix_pba_bar);
1988 pci_msix_table_bar_method(device_t dev, device_t child)
1990 struct pci_devinfo *dinfo = device_get_ivars(child);
1991 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1993 if (pci_do_msix && msix->msix_location != 0)
1994 return (msix->msix_table_bar);
1999 * HyperTransport MSI mapping control
2002 pci_ht_map_msi(device_t dev, uint64_t addr)
2004 struct pci_devinfo *dinfo = device_get_ivars(dev);
2005 struct pcicfg_ht *ht = &dinfo->cfg.ht;
2010 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
2011 ht->ht_msiaddr >> 20 == addr >> 20) {
2012 /* Enable MSI -> HT mapping. */
2013 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
2014 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
2018 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
2019 /* Disable MSI -> HT mapping. */
2020 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
2021 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
2027 pci_get_max_payload(device_t dev)
2029 struct pci_devinfo *dinfo = device_get_ivars(dev);
2033 cap = dinfo->cfg.pcie.pcie_location;
2036 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2037 val &= PCIEM_CTL_MAX_PAYLOAD;
2039 return (1 << (val + 7));
2043 pci_get_max_read_req(device_t dev)
2045 struct pci_devinfo *dinfo = device_get_ivars(dev);
2049 cap = dinfo->cfg.pcie.pcie_location;
2052 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2053 val &= PCIEM_CTL_MAX_READ_REQUEST;
2055 return (1 << (val + 7));
2059 pci_set_max_read_req(device_t dev, int size)
2061 struct pci_devinfo *dinfo = device_get_ivars(dev);
2065 cap = dinfo->cfg.pcie.pcie_location;
2072 size = (1 << (fls(size) - 1));
2073 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2074 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
2075 val |= (fls(size) - 8) << 12;
2076 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
2081 pcie_read_config(device_t dev, int reg, int width)
2083 struct pci_devinfo *dinfo = device_get_ivars(dev);
2086 cap = dinfo->cfg.pcie.pcie_location;
2090 return (0xffffffff);
2093 return (pci_read_config(dev, cap + reg, width));
2097 pcie_write_config(device_t dev, int reg, uint32_t value, int width)
2099 struct pci_devinfo *dinfo = device_get_ivars(dev);
2102 cap = dinfo->cfg.pcie.pcie_location;
2105 pci_write_config(dev, cap + reg, value, width);
2109 * Adjusts a PCI-e capability register by clearing the bits in mask
2110 * and setting the bits in (value & mask). Bits not set in mask are
2113 * Returns the old value on success or all ones on failure.
2116 pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value,
2119 struct pci_devinfo *dinfo = device_get_ivars(dev);
2123 cap = dinfo->cfg.pcie.pcie_location;
2127 return (0xffffffff);
2130 old = pci_read_config(dev, cap + reg, width);
2132 new |= (value & mask);
2133 pci_write_config(dev, cap + reg, new, width);
2138 * Support for MSI message signalled interrupts.
2141 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
2144 struct pci_devinfo *dinfo = device_get_ivars(child);
2145 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2147 /* Write data and address values. */
2148 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
2149 address & 0xffffffff, 4);
2150 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2151 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
2153 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
2156 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
2159 /* Enable MSI in the control register. */
2160 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
2161 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2164 /* Enable MSI -> HT mapping. */
2165 pci_ht_map_msi(child, address);
2169 pci_disable_msi_method(device_t dev, device_t child)
2171 struct pci_devinfo *dinfo = device_get_ivars(child);
2172 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2174 /* Disable MSI -> HT mapping. */
2175 pci_ht_map_msi(child, 0);
2177 /* Disable MSI in the control register. */
2178 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
2179 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2184 * Restore MSI registers during resume. If MSI is enabled then
2185 * restore the data and address registers in addition to the control
2189 pci_resume_msi(device_t dev)
2191 struct pci_devinfo *dinfo = device_get_ivars(dev);
2192 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2196 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
2197 address = msi->msi_addr;
2198 data = msi->msi_data;
2199 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
2200 address & 0xffffffff, 4);
2201 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2202 pci_write_config(dev, msi->msi_location +
2203 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
2204 pci_write_config(dev, msi->msi_location +
2205 PCIR_MSI_DATA_64BIT, data, 2);
2207 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
2210 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
2215 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
2217 struct pci_devinfo *dinfo = device_get_ivars(dev);
2218 pcicfgregs *cfg = &dinfo->cfg;
2219 struct resource_list_entry *rle;
2220 struct msix_table_entry *mte;
2221 struct msix_vector *mv;
2227 * Handle MSI first. We try to find this IRQ among our list
2228 * of MSI IRQs. If we find it, we request updated address and
2229 * data registers and apply the results.
2231 if (cfg->msi.msi_alloc > 0) {
2233 /* If we don't have any active handlers, nothing to do. */
2234 if (cfg->msi.msi_handlers == 0)
2236 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2237 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2239 if (rle->start == irq) {
2240 error = PCIB_MAP_MSI(device_get_parent(bus),
2241 dev, irq, &addr, &data);
2244 pci_disable_msi(dev);
2245 dinfo->cfg.msi.msi_addr = addr;
2246 dinfo->cfg.msi.msi_data = data;
2247 pci_enable_msi(dev, addr, data);
2255 * For MSI-X, we check to see if we have this IRQ. If we do,
2256 * we request the updated mapping info. If that works, we go
2257 * through all the slots that use this IRQ and update them.
2259 if (cfg->msix.msix_alloc > 0) {
2260 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2261 mv = &cfg->msix.msix_vectors[i];
2262 if (mv->mv_irq == irq) {
2263 error = PCIB_MAP_MSI(device_get_parent(bus),
2264 dev, irq, &addr, &data);
2267 mv->mv_address = addr;
2269 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2270 mte = &cfg->msix.msix_table[j];
2271 if (mte->mte_vector != i + 1)
2273 if (mte->mte_handlers == 0)
2275 pci_mask_msix(dev, j);
2276 pci_enable_msix(dev, j, addr, data);
2277 pci_unmask_msix(dev, j);
2288 * Returns true if the specified device is blacklisted because MSI
2292 pci_msi_device_blacklisted(device_t dev)
2295 if (!pci_honor_msi_blacklist)
2298 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2302 * Determine if MSI is blacklisted globally on this system. Currently,
2303 * we just check for blacklisted chipsets as represented by the
2304 * host-PCI bridge at device 0:0:0. In the future, it may become
2305 * necessary to check other system attributes, such as the kenv values
2306 * that give the motherboard manufacturer and model number.
2309 pci_msi_blacklisted(void)
2313 if (!pci_honor_msi_blacklist)
2316 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2317 if (!(pcie_chipset || pcix_chipset)) {
2318 if (vm_guest != VM_GUEST_NO) {
2320 * Whitelist older chipsets in virtual
2321 * machines known to support MSI.
2323 dev = pci_find_bsf(0, 0, 0);
2325 return (!pci_has_quirk(pci_get_devid(dev),
2326 PCI_QUIRK_ENABLE_MSI_VM));
2331 dev = pci_find_bsf(0, 0, 0);
2333 return (pci_msi_device_blacklisted(dev));
2338 * Returns true if the specified device is blacklisted because MSI-X
2339 * doesn't work. Note that this assumes that if MSI doesn't work,
2340 * MSI-X doesn't either.
2343 pci_msix_device_blacklisted(device_t dev)
2346 if (!pci_honor_msi_blacklist)
2349 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2352 return (pci_msi_device_blacklisted(dev));
2356 * Determine if MSI-X is blacklisted globally on this system. If MSI
2357 * is blacklisted, assume that MSI-X is as well. Check for additional
2358 * chipsets where MSI works but MSI-X does not.
2361 pci_msix_blacklisted(void)
2365 if (!pci_honor_msi_blacklist)
2368 dev = pci_find_bsf(0, 0, 0);
2369 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2370 PCI_QUIRK_DISABLE_MSIX))
2373 return (pci_msi_blacklisted());
2377 * Attempt to allocate *count MSI messages. The actual number allocated is
2378 * returned in *count. After this function returns, each message will be
2379 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2382 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2384 struct pci_devinfo *dinfo = device_get_ivars(child);
2385 pcicfgregs *cfg = &dinfo->cfg;
2386 struct resource_list_entry *rle;
2387 int actual, error, i, irqs[32];
2390 /* Don't let count == 0 get us into trouble. */
2394 /* If rid 0 is allocated, then fail. */
2395 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2396 if (rle != NULL && rle->res != NULL)
2399 /* Already have allocated messages? */
2400 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2403 /* If MSI is blacklisted for this system, fail. */
2404 if (pci_msi_blacklisted())
2407 /* MSI capability present? */
2408 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2412 device_printf(child,
2413 "attempting to allocate %d MSI vectors (%d supported)\n",
2414 *count, cfg->msi.msi_msgnum);
2416 /* Don't ask for more than the device supports. */
2417 actual = min(*count, cfg->msi.msi_msgnum);
2419 /* Don't ask for more than 32 messages. */
2420 actual = min(actual, 32);
2422 /* MSI requires power of 2 number of messages. */
2423 if (!powerof2(actual))
2427 /* Try to allocate N messages. */
2428 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2440 * We now have N actual messages mapped onto SYS_RES_IRQ
2441 * resources in the irqs[] array, so add new resources
2442 * starting at rid 1.
2444 for (i = 0; i < actual; i++)
2445 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2446 irqs[i], irqs[i], 1);
2450 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2455 * Be fancy and try to print contiguous runs
2456 * of IRQ values as ranges. 'run' is true if
2457 * we are in a range.
2459 device_printf(child, "using IRQs %d", irqs[0]);
2461 for (i = 1; i < actual; i++) {
2463 /* Still in a run? */
2464 if (irqs[i] == irqs[i - 1] + 1) {
2469 /* Finish previous range. */
2471 printf("-%d", irqs[i - 1]);
2475 /* Start new range. */
2476 printf(",%d", irqs[i]);
2479 /* Unfinished range? */
2481 printf("-%d", irqs[actual - 1]);
2482 printf(" for MSI\n");
2486 /* Update control register with actual count. */
2487 ctrl = cfg->msi.msi_ctrl;
2488 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2489 ctrl |= (ffs(actual) - 1) << 4;
2490 cfg->msi.msi_ctrl = ctrl;
2491 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2493 /* Update counts of alloc'd messages. */
2494 cfg->msi.msi_alloc = actual;
2495 cfg->msi.msi_handlers = 0;
2500 /* Release the MSI messages associated with this device. */
2502 pci_release_msi_method(device_t dev, device_t child)
2504 struct pci_devinfo *dinfo = device_get_ivars(child);
2505 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2506 struct resource_list_entry *rle;
2507 int error, i, irqs[32];
2509 /* Try MSI-X first. */
2510 error = pci_release_msix(dev, child);
2511 if (error != ENODEV)
2514 /* Do we have any messages to release? */
2515 if (msi->msi_alloc == 0)
2517 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2519 /* Make sure none of the resources are allocated. */
2520 if (msi->msi_handlers > 0)
2522 for (i = 0; i < msi->msi_alloc; i++) {
2523 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2524 KASSERT(rle != NULL, ("missing MSI resource"));
2525 if (rle->res != NULL)
2527 irqs[i] = rle->start;
2530 /* Update control register with 0 count. */
2531 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2532 ("%s: MSI still enabled", __func__));
2533 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2534 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2537 /* Release the messages. */
2538 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2539 for (i = 0; i < msi->msi_alloc; i++)
2540 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2542 /* Update alloc count. */
2550 * Return the max supported MSI messages this device supports.
2551 * Basically, assuming the MD code can alloc messages, this function
2552 * should return the maximum value that pci_alloc_msi() can return.
2553 * Thus, it is subject to the tunables, etc.
2556 pci_msi_count_method(device_t dev, device_t child)
2558 struct pci_devinfo *dinfo = device_get_ivars(child);
2559 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2561 if (pci_do_msi && msi->msi_location != 0)
2562 return (msi->msi_msgnum);
2566 /* free pcicfgregs structure and all depending data structures */
2569 pci_freecfg(struct pci_devinfo *dinfo)
2571 struct devlist *devlist_head;
2572 struct pci_map *pm, *next;
2575 devlist_head = &pci_devq;
2577 if (dinfo->cfg.vpd.vpd_reg) {
2578 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2579 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2580 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2581 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2582 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2583 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2584 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2586 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2589 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2590 free(dinfo, M_DEVBUF);
2592 /* increment the generation count */
2595 /* we're losing one device */
2601 * PCI power manangement
2604 pci_set_powerstate_method(device_t dev, device_t child, int state)
2606 struct pci_devinfo *dinfo = device_get_ivars(child);
2607 pcicfgregs *cfg = &dinfo->cfg;
2609 int oldstate, highest, delay;
2611 if (cfg->pp.pp_cap == 0)
2612 return (EOPNOTSUPP);
2615 * Optimize a no state change request away. While it would be OK to
2616 * write to the hardware in theory, some devices have shown odd
2617 * behavior when going from D3 -> D3.
2619 oldstate = pci_get_powerstate(child);
2620 if (oldstate == state)
2624 * The PCI power management specification states that after a state
2625 * transition between PCI power states, system software must
2626 * guarantee a minimal delay before the function accesses the device.
2627 * Compute the worst case delay that we need to guarantee before we
2628 * access the device. Many devices will be responsive much more
2629 * quickly than this delay, but there are some that don't respond
2630 * instantly to state changes. Transitions to/from D3 state require
2631 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2632 * is done below with DELAY rather than a sleeper function because
2633 * this function can be called from contexts where we cannot sleep.
2635 highest = (oldstate > state) ? oldstate : state;
2636 if (highest == PCI_POWERSTATE_D3)
2638 else if (highest == PCI_POWERSTATE_D2)
2642 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2643 & ~PCIM_PSTAT_DMASK;
2645 case PCI_POWERSTATE_D0:
2646 status |= PCIM_PSTAT_D0;
2648 case PCI_POWERSTATE_D1:
2649 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2650 return (EOPNOTSUPP);
2651 status |= PCIM_PSTAT_D1;
2653 case PCI_POWERSTATE_D2:
2654 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2655 return (EOPNOTSUPP);
2656 status |= PCIM_PSTAT_D2;
2658 case PCI_POWERSTATE_D3:
2659 status |= PCIM_PSTAT_D3;
2666 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2669 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2676 pci_get_powerstate_method(device_t dev, device_t child)
2678 struct pci_devinfo *dinfo = device_get_ivars(child);
2679 pcicfgregs *cfg = &dinfo->cfg;
2683 if (cfg->pp.pp_cap != 0) {
2684 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2685 switch (status & PCIM_PSTAT_DMASK) {
2687 result = PCI_POWERSTATE_D0;
2690 result = PCI_POWERSTATE_D1;
2693 result = PCI_POWERSTATE_D2;
2696 result = PCI_POWERSTATE_D3;
2699 result = PCI_POWERSTATE_UNKNOWN;
2703 /* No support, device is always at D0 */
2704 result = PCI_POWERSTATE_D0;
2710 * Some convenience functions for PCI device drivers.
2713 static __inline void
2714 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2718 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2720 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2723 static __inline void
2724 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2728 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2730 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2734 pci_enable_busmaster_method(device_t dev, device_t child)
2736 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2741 pci_disable_busmaster_method(device_t dev, device_t child)
2743 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2748 pci_enable_io_method(device_t dev, device_t child, int space)
2753 case SYS_RES_IOPORT:
2754 bit = PCIM_CMD_PORTEN;
2756 case SYS_RES_MEMORY:
2757 bit = PCIM_CMD_MEMEN;
2762 pci_set_command_bit(dev, child, bit);
2767 pci_disable_io_method(device_t dev, device_t child, int space)
2772 case SYS_RES_IOPORT:
2773 bit = PCIM_CMD_PORTEN;
2775 case SYS_RES_MEMORY:
2776 bit = PCIM_CMD_MEMEN;
2781 pci_clear_command_bit(dev, child, bit);
2786 * New style pci driver. Parent device is either a pci-host-bridge or a
2787 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2791 pci_print_verbose(struct pci_devinfo *dinfo)
2795 pcicfgregs *cfg = &dinfo->cfg;
2797 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2798 cfg->vendor, cfg->device, cfg->revid);
2799 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2800 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2801 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2802 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2804 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2805 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2806 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2807 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2808 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2809 if (cfg->intpin > 0)
2810 printf("\tintpin=%c, irq=%d\n",
2811 cfg->intpin +'a' -1, cfg->intline);
2812 if (cfg->pp.pp_cap) {
2815 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2816 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2817 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2818 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2819 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2820 status & PCIM_PSTAT_DMASK);
2822 if (cfg->msi.msi_location) {
2825 ctrl = cfg->msi.msi_ctrl;
2826 printf("\tMSI supports %d message%s%s%s\n",
2827 cfg->msi.msi_msgnum,
2828 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2829 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2830 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2832 if (cfg->msix.msix_location) {
2833 printf("\tMSI-X supports %d message%s ",
2834 cfg->msix.msix_msgnum,
2835 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2836 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2837 printf("in map 0x%x\n",
2838 cfg->msix.msix_table_bar);
2840 printf("in maps 0x%x and 0x%x\n",
2841 cfg->msix.msix_table_bar,
2842 cfg->msix.msix_pba_bar);
2848 pci_porten(device_t dev)
2850 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2854 pci_memen(device_t dev)
2856 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2860 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp,
2863 struct pci_devinfo *dinfo;
2864 pci_addr_t map, testval;
2869 * The device ROM BAR is special. It is always a 32-bit
2870 * memory BAR. Bit 0 is special and should not be set when
2873 dinfo = device_get_ivars(dev);
2874 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2875 map = pci_read_config(dev, reg, 4);
2876 pci_write_config(dev, reg, 0xfffffffe, 4);
2877 testval = pci_read_config(dev, reg, 4);
2878 pci_write_config(dev, reg, map, 4);
2880 *testvalp = testval;
2886 map = pci_read_config(dev, reg, 4);
2887 ln2range = pci_maprange(map);
2889 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2892 * Disable decoding via the command register before
2893 * determining the BAR's length since we will be placing it in
2896 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2897 pci_write_config(dev, PCIR_COMMAND,
2898 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2901 * Determine the BAR's length by writing all 1's. The bottom
2902 * log_2(size) bits of the BAR will stick as 0 when we read
2905 pci_write_config(dev, reg, 0xffffffff, 4);
2906 testval = pci_read_config(dev, reg, 4);
2907 if (ln2range == 64) {
2908 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2909 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2913 * Restore the original value of the BAR. We may have reprogrammed
2914 * the BAR of the low-level console device and when booting verbose,
2915 * we need the console device addressable.
2917 pci_write_config(dev, reg, map, 4);
2919 pci_write_config(dev, reg + 4, map >> 32, 4);
2920 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2923 *testvalp = testval;
2925 *bar64 = (ln2range == 64);
2929 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2931 struct pci_devinfo *dinfo;
2934 /* The device ROM BAR is always a 32-bit memory BAR. */
2935 dinfo = device_get_ivars(dev);
2936 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2939 ln2range = pci_maprange(pm->pm_value);
2940 pci_write_config(dev, pm->pm_reg, base, 4);
2942 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2943 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2945 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2946 pm->pm_reg + 4, 4) << 32;
2950 pci_find_bar(device_t dev, int reg)
2952 struct pci_devinfo *dinfo;
2955 dinfo = device_get_ivars(dev);
2956 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2957 if (pm->pm_reg == reg)
2964 pci_bar_enabled(device_t dev, struct pci_map *pm)
2966 struct pci_devinfo *dinfo;
2969 dinfo = device_get_ivars(dev);
2970 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2971 !(pm->pm_value & PCIM_BIOS_ENABLE))
2973 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2974 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2975 return ((cmd & PCIM_CMD_MEMEN) != 0);
2977 return ((cmd & PCIM_CMD_PORTEN) != 0);
2981 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2983 struct pci_devinfo *dinfo;
2984 struct pci_map *pm, *prev;
2986 dinfo = device_get_ivars(dev);
2987 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2989 pm->pm_value = value;
2991 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2992 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2994 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2995 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2999 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
3001 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
3006 pci_restore_bars(device_t dev)
3008 struct pci_devinfo *dinfo;
3012 dinfo = device_get_ivars(dev);
3013 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
3014 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
3017 ln2range = pci_maprange(pm->pm_value);
3018 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
3020 pci_write_config(dev, pm->pm_reg + 4,
3021 pm->pm_value >> 32, 4);
3026 * Add a resource based on a pci map register. Return 1 if the map
3027 * register is a 32bit map register or 2 if it is a 64bit register.
3030 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
3031 int force, int prefetch)
3034 pci_addr_t base, map, testval;
3035 pci_addr_t start, end, count;
3036 int barlen, basezero, flags, maprange, mapsize, type;
3038 struct resource *res;
3041 * The BAR may already exist if the device is a CardBus card
3042 * whose CIS is stored in this BAR.
3044 pm = pci_find_bar(dev, reg);
3046 maprange = pci_maprange(pm->pm_value);
3047 barlen = maprange == 64 ? 2 : 1;
3051 pci_read_bar(dev, reg, &map, &testval, NULL);
3052 if (PCI_BAR_MEM(map)) {
3053 type = SYS_RES_MEMORY;
3054 if (map & PCIM_BAR_MEM_PREFETCH)
3057 type = SYS_RES_IOPORT;
3058 mapsize = pci_mapsize(testval);
3059 base = pci_mapbase(map);
3060 #ifdef __PCI_BAR_ZERO_VALID
3063 basezero = base == 0;
3065 maprange = pci_maprange(map);
3066 barlen = maprange == 64 ? 2 : 1;
3069 * For I/O registers, if bottom bit is set, and the next bit up
3070 * isn't clear, we know we have a BAR that doesn't conform to the
3071 * spec, so ignore it. Also, sanity check the size of the data
3072 * areas to the type of memory involved. Memory must be at least
3073 * 16 bytes in size, while I/O ranges must be at least 4.
3075 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
3077 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
3078 (type == SYS_RES_IOPORT && mapsize < 2))
3081 /* Save a record of this BAR. */
3082 pm = pci_add_bar(dev, reg, map, mapsize);
3084 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
3085 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
3086 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3087 printf(", port disabled\n");
3088 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
3089 printf(", memory disabled\n");
3091 printf(", enabled\n");
3095 * If base is 0, then we have problems if this architecture does
3096 * not allow that. It is best to ignore such entries for the
3097 * moment. These will be allocated later if the driver specifically
3098 * requests them. However, some removable busses look better when
3099 * all resources are allocated, so allow '0' to be overriden.
3101 * Similarly treat maps whose values is the same as the test value
3102 * read back. These maps have had all f's written to them by the
3103 * BIOS in an attempt to disable the resources.
3105 if (!force && (basezero || map == testval))
3107 if ((u_long)base != base) {
3109 "pci%d:%d:%d:%d bar %#x too many address bits",
3110 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
3111 pci_get_function(dev), reg);
3116 * This code theoretically does the right thing, but has
3117 * undesirable side effects in some cases where peripherals
3118 * respond oddly to having these bits enabled. Let the user
3119 * be able to turn them off (since pci_enable_io_modes is 1 by
3122 if (pci_enable_io_modes) {
3123 /* Turn on resources that have been left off by a lazy BIOS */
3124 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
3125 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3126 cmd |= PCIM_CMD_PORTEN;
3127 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3129 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
3130 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3131 cmd |= PCIM_CMD_MEMEN;
3132 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3135 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3137 if (type == SYS_RES_MEMORY && !pci_memen(dev))
3141 count = (pci_addr_t)1 << mapsize;
3142 flags = RF_ALIGNMENT_LOG2(mapsize);
3144 flags |= RF_PREFETCHABLE;
3145 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
3146 start = 0; /* Let the parent decide. */
3150 end = base + count - 1;
3152 resource_list_add(rl, type, reg, start, end, count);
3155 * Try to allocate the resource for this BAR from our parent
3156 * so that this resource range is already reserved. The
3157 * driver for this device will later inherit this resource in
3158 * pci_alloc_resource().
3160 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
3162 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0)) {
3164 * If the allocation fails, try to allocate a resource for
3165 * this BAR using any available range. The firmware felt
3166 * it was important enough to assign a resource, so don't
3167 * disable decoding if we can help it.
3169 resource_list_delete(rl, type, reg);
3170 resource_list_add(rl, type, reg, 0, ~0, count);
3171 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0,
3176 * If the allocation fails, delete the resource list entry
3177 * and disable decoding for this device.
3179 * If the driver requests this resource in the future,
3180 * pci_reserve_map() will try to allocate a fresh
3183 resource_list_delete(rl, type, reg);
3184 pci_disable_io(dev, type);
3187 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
3188 pci_get_domain(dev), pci_get_bus(dev),
3189 pci_get_slot(dev), pci_get_function(dev), reg);
3191 start = rman_get_start(res);
3192 pci_write_bar(dev, pm, start);
3198 * For ATA devices we need to decide early what addressing mode to use.
3199 * Legacy demands that the primary and secondary ATA ports sits on the
3200 * same addresses that old ISA hardware did. This dictates that we use
3201 * those addresses and ignore the BAR's if we cannot set PCI native
3205 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
3206 uint32_t prefetchmask)
3208 int rid, type, progif;
3210 /* if this device supports PCI native addressing use it */
3211 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3212 if ((progif & 0x8a) == 0x8a) {
3213 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
3214 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
3215 printf("Trying ATA native PCI addressing mode\n");
3216 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
3220 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3221 type = SYS_RES_IOPORT;
3222 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
3223 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
3224 prefetchmask & (1 << 0));
3225 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
3226 prefetchmask & (1 << 1));
3229 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
3230 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
3233 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
3234 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3237 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3238 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3239 prefetchmask & (1 << 2));
3240 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3241 prefetchmask & (1 << 3));
3244 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3245 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3248 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3249 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3252 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3253 prefetchmask & (1 << 4));
3254 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3255 prefetchmask & (1 << 5));
3259 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3261 struct pci_devinfo *dinfo = device_get_ivars(dev);
3262 pcicfgregs *cfg = &dinfo->cfg;
3263 char tunable_name[64];
3266 /* Has to have an intpin to have an interrupt. */
3267 if (cfg->intpin == 0)
3270 /* Let the user override the IRQ with a tunable. */
3271 irq = PCI_INVALID_IRQ;
3272 snprintf(tunable_name, sizeof(tunable_name),
3273 "hw.pci%d.%d.%d.INT%c.irq",
3274 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3275 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3276 irq = PCI_INVALID_IRQ;
3279 * If we didn't get an IRQ via the tunable, then we either use the
3280 * IRQ value in the intline register or we ask the bus to route an
3281 * interrupt for us. If force_route is true, then we only use the
3282 * value in the intline register if the bus was unable to assign an
3285 if (!PCI_INTERRUPT_VALID(irq)) {
3286 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3287 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3288 if (!PCI_INTERRUPT_VALID(irq))
3292 /* If after all that we don't have an IRQ, just bail. */
3293 if (!PCI_INTERRUPT_VALID(irq))
3296 /* Update the config register if it changed. */
3297 if (irq != cfg->intline) {
3299 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3302 /* Add this IRQ as rid 0 interrupt resource. */
3303 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3306 /* Perform early OHCI takeover from SMM. */
3308 ohci_early_takeover(device_t self)
3310 struct resource *res;
3316 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3320 ctl = bus_read_4(res, OHCI_CONTROL);
3321 if (ctl & OHCI_IR) {
3323 printf("ohci early: "
3324 "SMM active, request owner change\n");
3325 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3326 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3328 ctl = bus_read_4(res, OHCI_CONTROL);
3330 if (ctl & OHCI_IR) {
3332 printf("ohci early: "
3333 "SMM does not respond, resetting\n");
3334 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3336 /* Disable interrupts */
3337 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3340 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3343 /* Perform early UHCI takeover from SMM. */
3345 uhci_early_takeover(device_t self)
3347 struct resource *res;
3351 * Set the PIRQD enable bit and switch off all the others. We don't
3352 * want legacy support to interfere with us XXX Does this also mean
3353 * that the BIOS won't touch the keyboard anymore if it is connected
3354 * to the ports of the root hub?
3356 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3358 /* Disable interrupts */
3359 rid = PCI_UHCI_BASE_REG;
3360 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3362 bus_write_2(res, UHCI_INTR, 0);
3363 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3367 /* Perform early EHCI takeover from SMM. */
3369 ehci_early_takeover(device_t self)
3371 struct resource *res;
3381 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3385 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3387 /* Synchronise with the BIOS if it owns the controller. */
3388 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3389 eecp = EHCI_EECP_NEXT(eec)) {
3390 eec = pci_read_config(self, eecp, 4);
3391 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3394 bios_sem = pci_read_config(self, eecp +
3395 EHCI_LEGSUP_BIOS_SEM, 1);
3396 if (bios_sem == 0) {
3400 printf("ehci early: "
3401 "SMM active, request owner change\n");
3403 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3405 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3407 bios_sem = pci_read_config(self, eecp +
3408 EHCI_LEGSUP_BIOS_SEM, 1);
3411 if (bios_sem != 0) {
3413 printf("ehci early: "
3414 "SMM does not respond\n");
3416 /* Disable interrupts */
3417 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3418 bus_write_4(res, offs + EHCI_USBINTR, 0);
3420 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3423 /* Perform early XHCI takeover from SMM. */
3425 xhci_early_takeover(device_t self)
3427 struct resource *res;
3437 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3441 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3445 /* Synchronise with the BIOS if it owns the controller. */
3446 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3447 eecp += XHCI_XECP_NEXT(eec) << 2) {
3448 eec = bus_read_4(res, eecp);
3450 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3453 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3458 printf("xhci early: "
3459 "SMM active, request owner change\n");
3461 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3463 /* wait a maximum of 5 second */
3465 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3467 bios_sem = bus_read_1(res, eecp +
3468 XHCI_XECP_BIOS_SEM);
3471 if (bios_sem != 0) {
3473 printf("xhci early: "
3474 "SMM does not respond\n");
3477 /* Disable interrupts */
3478 offs = bus_read_1(res, XHCI_CAPLENGTH);
3479 bus_write_4(res, offs + XHCI_USBCMD, 0);
3480 bus_read_4(res, offs + XHCI_USBSTS);
3482 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3485 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3487 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3488 struct resource_list *rl)
3490 struct resource *res;
3492 rman_res_t start, end, count;
3493 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3495 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3496 case PCIM_HDRTYPE_BRIDGE:
3497 sec_reg = PCIR_SECBUS_1;
3498 sub_reg = PCIR_SUBBUS_1;
3500 case PCIM_HDRTYPE_CARDBUS:
3501 sec_reg = PCIR_SECBUS_2;
3502 sub_reg = PCIR_SUBBUS_2;
3509 * If the existing bus range is valid, attempt to reserve it
3510 * from our parent. If this fails for any reason, clear the
3511 * secbus and subbus registers.
3513 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3514 * This would at least preserve the existing sec_bus if it is
3517 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3518 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3520 /* Quirk handling. */
3521 switch (pci_get_devid(dev)) {
3522 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3523 sup_bus = pci_read_config(dev, 0x41, 1);
3524 if (sup_bus != 0xff) {
3525 sec_bus = sup_bus + 1;
3526 sub_bus = sup_bus + 1;
3527 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3528 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3533 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3534 if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
3536 if (strncmp(cp, "Compal", 6) != 0) {
3541 if ((cp = kern_getenv("smbios.planar.product")) == NULL)
3543 if (strncmp(cp, "08A0", 4) != 0) {
3548 if (sub_bus < 0xa) {
3550 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3556 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3557 if (sec_bus > 0 && sub_bus >= sec_bus) {
3560 count = end - start + 1;
3562 resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count);
3565 * If requested, clear secondary bus registers in
3566 * bridge devices to force a complete renumbering
3567 * rather than reserving the existing range. However,
3568 * preserve the existing size.
3570 if (pci_clear_buses)
3574 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3575 start, end, count, 0);
3581 "pci%d:%d:%d:%d secbus failed to allocate\n",
3582 pci_get_domain(dev), pci_get_bus(dev),
3583 pci_get_slot(dev), pci_get_function(dev));
3587 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3588 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3591 static struct resource *
3592 pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start,
3593 rman_res_t end, rman_res_t count, u_int flags)
3595 struct pci_devinfo *dinfo;
3597 struct resource_list *rl;
3598 struct resource *res;
3599 int sec_reg, sub_reg;
3601 dinfo = device_get_ivars(child);
3603 rl = &dinfo->resources;
3604 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3605 case PCIM_HDRTYPE_BRIDGE:
3606 sec_reg = PCIR_SECBUS_1;
3607 sub_reg = PCIR_SUBBUS_1;
3609 case PCIM_HDRTYPE_CARDBUS:
3610 sec_reg = PCIR_SECBUS_2;
3611 sub_reg = PCIR_SUBBUS_2;
3620 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3621 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3622 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3623 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3624 start, end, count, flags & ~RF_ACTIVE);
3626 resource_list_delete(rl, PCI_RES_BUS, *rid);
3627 device_printf(child, "allocating %ju bus%s failed\n",
3628 count, count == 1 ? "" : "es");
3632 device_printf(child,
3633 "Lazy allocation of %ju bus%s at %ju\n", count,
3634 count == 1 ? "" : "es", rman_get_start(res));
3635 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3636 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3638 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3639 end, count, flags));
3644 pci_ea_bei_to_rid(device_t dev, int bei)
3647 struct pci_devinfo *dinfo;
3649 struct pcicfg_iov *iov;
3651 dinfo = device_get_ivars(dev);
3652 iov = dinfo->cfg.iov;
3654 iov_pos = iov->iov_pos;
3659 /* Check if matches BAR */
3660 if ((bei >= PCIM_EA_BEI_BAR_0) &&
3661 (bei <= PCIM_EA_BEI_BAR_5))
3662 return (PCIR_BAR(bei));
3665 if (bei == PCIM_EA_BEI_ROM)
3669 /* Check if matches VF_BAR */
3670 if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) &&
3671 (bei <= PCIM_EA_BEI_VF_BAR_5))
3672 return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) +
3680 pci_ea_is_enabled(device_t dev, int rid)
3682 struct pci_ea_entry *ea;
3683 struct pci_devinfo *dinfo;
3685 dinfo = device_get_ivars(dev);
3687 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
3688 if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid)
3689 return ((ea->eae_flags & PCIM_EA_ENABLE) > 0);
3696 pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov)
3698 struct pci_ea_entry *ea;
3699 struct pci_devinfo *dinfo;
3700 pci_addr_t start, end, count;
3701 struct resource_list *rl;
3702 int type, flags, rid;
3703 struct resource *res;
3706 struct pcicfg_iov *iov;
3709 dinfo = device_get_ivars(dev);
3710 rl = &dinfo->resources;
3714 iov = dinfo->cfg.iov;
3717 if (dinfo->cfg.ea.ea_location == 0)
3720 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
3723 * TODO: Ignore EA-BAR if is not enabled.
3724 * Currently the EA implementation supports
3725 * only situation, where EA structure contains
3726 * predefined entries. In case they are not enabled
3727 * leave them unallocated and proceed with
3728 * a legacy-BAR mechanism.
3730 if ((ea->eae_flags & PCIM_EA_ENABLE) == 0)
3733 switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) {
3734 case PCIM_EA_P_MEM_PREFETCH:
3735 case PCIM_EA_P_VF_MEM_PREFETCH:
3736 flags = RF_PREFETCHABLE;
3738 case PCIM_EA_P_VF_MEM:
3740 type = SYS_RES_MEMORY;
3743 type = SYS_RES_IOPORT;
3749 if (alloc_iov != 0) {
3751 /* Allocating IOV, confirm BEI matches */
3752 if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) ||
3753 (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5))
3759 /* Allocating BAR, confirm BEI matches */
3760 if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) ||
3761 (ea->eae_bei > PCIM_EA_BEI_BAR_5)) &&
3762 (ea->eae_bei != PCIM_EA_BEI_ROM))
3766 rid = pci_ea_bei_to_rid(dev, ea->eae_bei);
3770 /* Skip resources already allocated by EA */
3771 if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) ||
3772 (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL))
3775 start = ea->eae_base;
3776 count = ea->eae_max_offset + 1;
3779 count = count * iov->iov_num_vfs;
3781 end = start + count - 1;
3785 resource_list_add(rl, type, rid, start, end, count);
3786 res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count,
3789 resource_list_delete(rl, type, rid);
3792 * Failed to allocate using EA, disable entry.
3793 * Another attempt to allocation will be performed
3794 * further, but this time using legacy BAR registers
3796 tmp = pci_read_config(dev, ea->eae_cfg_offset, 4);
3797 tmp &= ~PCIM_EA_ENABLE;
3798 pci_write_config(dev, ea->eae_cfg_offset, tmp, 4);
3801 * Disabling entry might fail in case it is hardwired.
3802 * Read flags again to match current status.
3804 ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4);
3809 /* As per specification, fill BAR with zeros */
3810 pci_write_config(dev, rid, 0, 4);
3815 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3817 struct pci_devinfo *dinfo;
3819 struct resource_list *rl;
3820 const struct pci_quirk *q;
3824 dinfo = device_get_ivars(dev);
3826 rl = &dinfo->resources;
3827 devid = (cfg->device << 16) | cfg->vendor;
3829 /* Allocate resources using Enhanced Allocation */
3830 pci_add_resources_ea(bus, dev, 0);
3832 /* ATA devices needs special map treatment */
3833 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3834 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3835 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3836 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3837 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3838 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3840 for (i = 0; i < cfg->nummaps;) {
3841 /* Skip resources already managed by EA */
3842 if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) ||
3843 (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) ||
3844 pci_ea_is_enabled(dev, PCIR_BAR(i))) {
3850 * Skip quirked resources.
3852 for (q = &pci_quirks[0]; q->devid != 0; q++)
3853 if (q->devid == devid &&
3854 q->type == PCI_QUIRK_UNMAP_REG &&
3855 q->arg1 == PCIR_BAR(i))
3857 if (q->devid != 0) {
3861 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3862 prefetchmask & (1 << i));
3866 * Add additional, quirked resources.
3868 for (q = &pci_quirks[0]; q->devid != 0; q++)
3869 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3870 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3872 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3873 #ifdef __PCI_REROUTE_INTERRUPT
3875 * Try to re-route interrupts. Sometimes the BIOS or
3876 * firmware may leave bogus values in these registers.
3877 * If the re-route fails, then just stick with what we
3880 pci_assign_interrupt(bus, dev, 1);
3882 pci_assign_interrupt(bus, dev, 0);
3886 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3887 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3888 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3889 xhci_early_takeover(dev);
3890 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3891 ehci_early_takeover(dev);
3892 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3893 ohci_early_takeover(dev);
3894 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3895 uhci_early_takeover(dev);
3898 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3900 * Reserve resources for secondary bus ranges behind bridge
3903 pci_reserve_secbus(bus, dev, cfg, rl);
3907 static struct pci_devinfo *
3908 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3911 struct pci_devinfo *dinfo;
3913 dinfo = pci_read_device(pcib, dev, domain, busno, slot, func);
3915 pci_add_child(dev, dinfo);
3921 pci_add_children(device_t dev, int domain, int busno)
3923 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3924 device_t pcib = device_get_parent(dev);
3925 struct pci_devinfo *dinfo;
3927 int s, f, pcifunchigh;
3932 * Try to detect a device at slot 0, function 0. If it exists, try to
3933 * enable ARI. We must enable ARI before detecting the rest of the
3934 * functions on this bus as ARI changes the set of slots and functions
3935 * that are legal on this bus.
3937 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0);
3938 if (dinfo != NULL && pci_enable_ari)
3939 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3942 * Start looking for new devices on slot 0 at function 1 because we
3943 * just identified the device at slot 0, function 0.
3947 maxslots = PCIB_MAXSLOTS(pcib);
3948 for (s = 0; s <= maxslots; s++, first_func = 0) {
3952 hdrtype = REG(PCIR_HDRTYPE, 1);
3953 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3955 if (hdrtype & PCIM_MFDEV)
3956 pcifunchigh = PCIB_MAXFUNCS(pcib);
3957 for (f = first_func; f <= pcifunchigh; f++)
3958 pci_identify_function(pcib, dev, domain, busno, s, f);
3964 pci_rescan_method(device_t dev)
3966 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3967 device_t pcib = device_get_parent(dev);
3968 struct pci_softc *sc;
3969 device_t child, *devlist, *unchanged;
3970 int devcount, error, i, j, maxslots, oldcount;
3971 int busno, domain, s, f, pcifunchigh;
3974 /* No need to check for ARI on a rescan. */
3975 error = device_get_children(dev, &devlist, &devcount);
3978 if (devcount != 0) {
3979 unchanged = malloc(devcount * sizeof(device_t), M_TEMP,
3981 if (unchanged == NULL) {
3982 free(devlist, M_TEMP);
3988 sc = device_get_softc(dev);
3989 domain = pcib_get_domain(dev);
3990 busno = pcib_get_bus(dev);
3991 maxslots = PCIB_MAXSLOTS(pcib);
3992 for (s = 0; s <= maxslots; s++) {
3993 /* If function 0 is not present, skip to the next slot. */
3995 if (REG(PCIR_VENDOR, 2) == 0xffff)
3998 hdrtype = REG(PCIR_HDRTYPE, 1);
3999 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
4001 if (hdrtype & PCIM_MFDEV)
4002 pcifunchigh = PCIB_MAXFUNCS(pcib);
4003 for (f = 0; f <= pcifunchigh; f++) {
4004 if (REG(PCIR_VENDOR, 2) == 0xffff)
4008 * Found a valid function. Check if a
4009 * device_t for this device already exists.
4011 for (i = 0; i < devcount; i++) {
4015 if (pci_get_slot(child) == s &&
4016 pci_get_function(child) == f) {
4017 unchanged[i] = child;
4022 pci_identify_function(pcib, dev, domain, busno, s, f);
4027 /* Remove devices that are no longer present. */
4028 for (i = 0; i < devcount; i++) {
4029 if (unchanged[i] != NULL)
4031 device_delete_child(dev, devlist[i]);
4034 free(devlist, M_TEMP);
4035 oldcount = devcount;
4037 /* Try to attach the devices just added. */
4038 error = device_get_children(dev, &devlist, &devcount);
4040 free(unchanged, M_TEMP);
4044 for (i = 0; i < devcount; i++) {
4045 for (j = 0; j < oldcount; j++) {
4046 if (devlist[i] == unchanged[j])
4050 device_probe_and_attach(devlist[i]);
4054 free(unchanged, M_TEMP);
4055 free(devlist, M_TEMP);
4062 pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid,
4065 struct pci_devinfo *pf_dinfo, *vf_dinfo;
4067 int busno, slot, func;
4069 pf_dinfo = device_get_ivars(pf);
4071 pcib = device_get_parent(bus);
4073 PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func);
4075 vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno,
4076 slot, func, vid, did);
4078 vf_dinfo->cfg.flags |= PCICFG_VF;
4079 pci_add_child(bus, vf_dinfo);
4081 return (vf_dinfo->cfg.dev);
4085 pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid,
4086 uint16_t vid, uint16_t did)
4089 return (pci_add_iov_child(bus, pf, rid, vid, did));
4094 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
4096 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
4097 device_set_ivars(dinfo->cfg.dev, dinfo);
4098 resource_list_init(&dinfo->resources);
4099 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
4100 pci_cfg_restore(dinfo->cfg.dev, dinfo);
4101 pci_print_verbose(dinfo);
4102 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
4103 pci_child_added(dinfo->cfg.dev);
4104 EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev);
4108 pci_child_added_method(device_t dev, device_t child)
4114 pci_probe(device_t dev)
4117 device_set_desc(dev, "PCI bus");
4119 /* Allow other subclasses to override this driver. */
4120 return (BUS_PROBE_GENERIC);
4124 pci_attach_common(device_t dev)
4126 struct pci_softc *sc;
4128 #ifdef PCI_DMA_BOUNDARY
4129 int error, tag_valid;
4135 sc = device_get_softc(dev);
4136 domain = pcib_get_domain(dev);
4137 busno = pcib_get_bus(dev);
4140 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
4142 if (sc->sc_bus == NULL) {
4143 device_printf(dev, "failed to allocate bus number\n");
4148 device_printf(dev, "domain=%d, physical bus=%d\n",
4150 #ifdef PCI_DMA_BOUNDARY
4152 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
4153 devclass_find("pci")) {
4154 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
4155 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4156 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
4157 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
4159 device_printf(dev, "Failed to create DMA tag: %d\n",
4166 sc->sc_dma_tag = bus_get_dma_tag(dev);
4171 pci_attach(device_t dev)
4173 int busno, domain, error;
4175 error = pci_attach_common(dev);
4180 * Since there can be multiple independently numbered PCI
4181 * busses on systems with multiple PCI domains, we can't use
4182 * the unit number to decide which bus we are probing. We ask
4183 * the parent pcib what our domain and bus numbers are.
4185 domain = pcib_get_domain(dev);
4186 busno = pcib_get_bus(dev);
4187 pci_add_children(dev, domain, busno);
4188 return (bus_generic_attach(dev));
4192 pci_detach(device_t dev)
4195 struct pci_softc *sc;
4199 error = bus_generic_detach(dev);
4203 sc = device_get_softc(dev);
4204 error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus);
4208 return (device_delete_children(dev));
4212 pci_set_power_child(device_t dev, device_t child, int state)
4218 * Set the device to the given state. If the firmware suggests
4219 * a different power state, use it instead. If power management
4220 * is not present, the firmware is responsible for managing
4221 * device power. Skip children who aren't attached since they
4222 * are handled separately.
4224 pcib = device_get_parent(dev);
4226 if (device_is_attached(child) &&
4227 PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0)
4228 pci_set_powerstate(child, dstate);
4232 pci_suspend_child(device_t dev, device_t child)
4234 struct pci_devinfo *dinfo;
4237 dinfo = device_get_ivars(child);
4240 * Save the PCI configuration space for the child and set the
4241 * device in the appropriate power state for this sleep state.
4243 pci_cfg_save(child, dinfo, 0);
4245 /* Suspend devices before potentially powering them down. */
4246 error = bus_generic_suspend_child(dev, child);
4251 if (pci_do_power_suspend)
4252 pci_set_power_child(dev, child, PCI_POWERSTATE_D3);
4258 pci_resume_child(device_t dev, device_t child)
4260 struct pci_devinfo *dinfo;
4262 if (pci_do_power_resume)
4263 pci_set_power_child(dev, child, PCI_POWERSTATE_D0);
4265 dinfo = device_get_ivars(child);
4266 pci_cfg_restore(child, dinfo);
4267 if (!device_is_attached(child))
4268 pci_cfg_save(child, dinfo, 1);
4270 bus_generic_resume_child(dev, child);
4276 pci_resume(device_t dev)
4278 device_t child, *devlist;
4279 int error, i, numdevs;
4281 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
4285 * Resume critical devices first, then everything else later.
4287 for (i = 0; i < numdevs; i++) {
4289 switch (pci_get_class(child)) {
4293 case PCIC_BASEPERIPH:
4294 BUS_RESUME_CHILD(dev, child);
4298 for (i = 0; i < numdevs; i++) {
4300 switch (pci_get_class(child)) {
4304 case PCIC_BASEPERIPH:
4307 BUS_RESUME_CHILD(dev, child);
4310 free(devlist, M_TEMP);
4315 pci_load_vendor_data(void)
4321 data = preload_search_by_type("pci_vendor_data");
4323 ptr = preload_fetch_addr(data);
4324 sz = preload_fetch_size(data);
4325 if (ptr != NULL && sz != 0) {
4326 pci_vendordata = ptr;
4327 pci_vendordata_size = sz;
4328 /* terminate the database */
4329 pci_vendordata[pci_vendordata_size] = '\n';
4335 pci_driver_added(device_t dev, driver_t *driver)
4340 struct pci_devinfo *dinfo;
4344 device_printf(dev, "driver added\n");
4345 DEVICE_IDENTIFY(driver, dev);
4346 if (device_get_children(dev, &devlist, &numdevs) != 0)
4348 for (i = 0; i < numdevs; i++) {
4350 if (device_get_state(child) != DS_NOTPRESENT)
4352 dinfo = device_get_ivars(child);
4353 pci_print_verbose(dinfo);
4355 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
4356 pci_cfg_restore(child, dinfo);
4357 if (device_probe_and_attach(child) != 0)
4358 pci_child_detached(dev, child);
4360 free(devlist, M_TEMP);
4364 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
4365 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
4367 struct pci_devinfo *dinfo;
4368 struct msix_table_entry *mte;
4369 struct msix_vector *mv;
4375 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
4380 /* If this is not a direct child, just bail out. */
4381 if (device_get_parent(child) != dev) {
4386 rid = rman_get_rid(irq);
4388 /* Make sure that INTx is enabled */
4389 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
4392 * Check to see if the interrupt is MSI or MSI-X.
4393 * Ask our parent to map the MSI and give
4394 * us the address and data register values.
4395 * If we fail for some reason, teardown the
4396 * interrupt handler.
4398 dinfo = device_get_ivars(child);
4399 if (dinfo->cfg.msi.msi_alloc > 0) {
4400 if (dinfo->cfg.msi.msi_addr == 0) {
4401 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
4402 ("MSI has handlers, but vectors not mapped"));
4403 error = PCIB_MAP_MSI(device_get_parent(dev),
4404 child, rman_get_start(irq), &addr, &data);
4407 dinfo->cfg.msi.msi_addr = addr;
4408 dinfo->cfg.msi.msi_data = data;
4410 if (dinfo->cfg.msi.msi_handlers == 0)
4411 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
4412 dinfo->cfg.msi.msi_data);
4413 dinfo->cfg.msi.msi_handlers++;
4415 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4416 ("No MSI or MSI-X interrupts allocated"));
4417 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4418 ("MSI-X index too high"));
4419 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4420 KASSERT(mte->mte_vector != 0, ("no message vector"));
4421 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
4422 KASSERT(mv->mv_irq == rman_get_start(irq),
4424 if (mv->mv_address == 0) {
4425 KASSERT(mte->mte_handlers == 0,
4426 ("MSI-X table entry has handlers, but vector not mapped"));
4427 error = PCIB_MAP_MSI(device_get_parent(dev),
4428 child, rman_get_start(irq), &addr, &data);
4431 mv->mv_address = addr;
4436 * The MSIX table entry must be made valid by
4437 * incrementing the mte_handlers before
4438 * calling pci_enable_msix() and
4439 * pci_resume_msix(). Else the MSIX rewrite
4440 * table quirk will not work as expected.
4442 mte->mte_handlers++;
4443 if (mte->mte_handlers == 1) {
4444 pci_enable_msix(child, rid - 1, mv->mv_address,
4446 pci_unmask_msix(child, rid - 1);
4451 * Make sure that INTx is disabled if we are using MSI/MSI-X,
4452 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
4453 * in which case we "enable" INTx so MSI/MSI-X actually works.
4455 if (!pci_has_quirk(pci_get_devid(child),
4456 PCI_QUIRK_MSI_INTX_BUG))
4457 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
4459 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
4462 (void)bus_generic_teardown_intr(dev, child, irq,
4472 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
4475 struct msix_table_entry *mte;
4476 struct resource_list_entry *rle;
4477 struct pci_devinfo *dinfo;
4480 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
4483 /* If this isn't a direct child, just bail out */
4484 if (device_get_parent(child) != dev)
4485 return(bus_generic_teardown_intr(dev, child, irq, cookie));
4487 rid = rman_get_rid(irq);
4490 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
4493 * Check to see if the interrupt is MSI or MSI-X. If so,
4494 * decrement the appropriate handlers count and mask the
4495 * MSI-X message, or disable MSI messages if the count
4498 dinfo = device_get_ivars(child);
4499 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
4500 if (rle->res != irq)
4502 if (dinfo->cfg.msi.msi_alloc > 0) {
4503 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
4504 ("MSI-X index too high"));
4505 if (dinfo->cfg.msi.msi_handlers == 0)
4507 dinfo->cfg.msi.msi_handlers--;
4508 if (dinfo->cfg.msi.msi_handlers == 0)
4509 pci_disable_msi(child);
4511 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4512 ("No MSI or MSI-X interrupts allocated"));
4513 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4514 ("MSI-X index too high"));
4515 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4516 if (mte->mte_handlers == 0)
4518 mte->mte_handlers--;
4519 if (mte->mte_handlers == 0)
4520 pci_mask_msix(child, rid - 1);
4523 error = bus_generic_teardown_intr(dev, child, irq, cookie);
4526 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4531 pci_print_child(device_t dev, device_t child)
4533 struct pci_devinfo *dinfo;
4534 struct resource_list *rl;
4537 dinfo = device_get_ivars(child);
4538 rl = &dinfo->resources;
4540 retval += bus_print_child_header(dev, child);
4542 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx");
4543 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
4544 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
4545 if (device_get_flags(dev))
4546 retval += printf(" flags %#x", device_get_flags(dev));
4548 retval += printf(" at device %d.%d", pci_get_slot(child),
4549 pci_get_function(child));
4551 retval += bus_print_child_domain(dev, child);
4552 retval += bus_print_child_footer(dev, child);
4561 int report; /* 0 = bootverbose, 1 = always */
4563 } pci_nomatch_tab[] = {
4564 {PCIC_OLD, -1, 1, "old"},
4565 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4566 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4567 {PCIC_STORAGE, -1, 1, "mass storage"},
4568 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4569 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4570 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4571 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4572 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4573 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4574 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4575 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4576 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4577 {PCIC_NETWORK, -1, 1, "network"},
4578 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4579 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4580 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4581 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4582 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4583 {PCIC_DISPLAY, -1, 1, "display"},
4584 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4585 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4586 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4587 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4588 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
4589 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
4590 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4591 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
4592 {PCIC_MEMORY, -1, 1, "memory"},
4593 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4594 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4595 {PCIC_BRIDGE, -1, 1, "bridge"},
4596 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4597 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4598 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4599 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4600 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4601 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4602 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4603 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4604 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4605 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4606 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4607 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4608 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4609 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4610 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4611 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4612 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4613 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
4614 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4615 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4616 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4617 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
4618 {PCIC_INPUTDEV, -1, 1, "input device"},
4619 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
4620 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4621 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
4622 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
4623 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
4624 {PCIC_DOCKING, -1, 1, "docking station"},
4625 {PCIC_PROCESSOR, -1, 1, "processor"},
4626 {PCIC_SERIALBUS, -1, 1, "serial bus"},
4627 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
4628 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
4629 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
4630 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
4631 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
4632 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
4633 {PCIC_WIRELESS, -1, 1, "wireless controller"},
4634 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
4635 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
4636 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
4637 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
4638 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
4639 {PCIC_SATCOM, -1, 1, "satellite communication"},
4640 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
4641 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
4642 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
4643 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
4644 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
4645 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
4646 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
4647 {PCIC_DASP, -1, 0, "dasp"},
4648 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
4649 {PCIC_DASP, PCIS_DASP_PERFCNTRS, 1, "performance counters"},
4650 {PCIC_DASP, PCIS_DASP_COMM_SYNC, 1, "communication synchronizer"},
4651 {PCIC_DASP, PCIS_DASP_MGMT_CARD, 1, "signal processing management"},
4656 pci_probe_nomatch(device_t dev, device_t child)
4659 const char *cp, *scp;
4663 * Look for a listing for this device in a loaded device database.
4666 if ((device = pci_describe_device(child)) != NULL) {
4667 device_printf(dev, "<%s>", device);
4668 free(device, M_DEVBUF);
4671 * Scan the class/subclass descriptions for a general
4676 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4677 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4678 if (pci_nomatch_tab[i].subclass == -1) {
4679 cp = pci_nomatch_tab[i].desc;
4680 report = pci_nomatch_tab[i].report;
4681 } else if (pci_nomatch_tab[i].subclass ==
4682 pci_get_subclass(child)) {
4683 scp = pci_nomatch_tab[i].desc;
4684 report = pci_nomatch_tab[i].report;
4688 if (report || bootverbose) {
4689 device_printf(dev, "<%s%s%s>",
4691 ((cp != NULL) && (scp != NULL)) ? ", " : "",
4695 if (report || bootverbose) {
4696 printf(" at device %d.%d (no driver attached)\n",
4697 pci_get_slot(child), pci_get_function(child));
4699 pci_cfg_save(child, device_get_ivars(child), 1);
4703 pci_child_detached(device_t dev, device_t child)
4705 struct pci_devinfo *dinfo;
4706 struct resource_list *rl;
4708 dinfo = device_get_ivars(child);
4709 rl = &dinfo->resources;
4712 * Have to deallocate IRQs before releasing any MSI messages and
4713 * have to release MSI messages before deallocating any memory
4716 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4717 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4718 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4719 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4720 (void)pci_release_msi(child);
4722 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4723 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4724 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4725 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4727 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4728 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4731 pci_cfg_save(child, dinfo, 1);
4735 * Parse the PCI device database, if loaded, and return a pointer to a
4736 * description of the device.
4738 * The database is flat text formatted as follows:
4740 * Any line not in a valid format is ignored.
4741 * Lines are terminated with newline '\n' characters.
4743 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4746 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4747 * - devices cannot be listed without a corresponding VENDOR line.
4748 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4749 * another TAB, then the device name.
4753 * Assuming (ptr) points to the beginning of a line in the database,
4754 * return the vendor or device and description of the next entry.
4755 * The value of (vendor) or (device) inappropriate for the entry type
4756 * is set to -1. Returns nonzero at the end of the database.
4758 * Note that this is slightly unrobust in the face of corrupt data;
4759 * we attempt to safeguard against this by spamming the end of the
4760 * database with a newline when we initialise.
4763 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4772 left = pci_vendordata_size - (cp - pci_vendordata);
4780 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4784 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4787 /* skip to next line */
4788 while (*cp != '\n' && left > 0) {
4797 /* skip to next line */
4798 while (*cp != '\n' && left > 0) {
4802 if (*cp == '\n' && left > 0)
4809 pci_describe_device(device_t dev)
4812 char *desc, *vp, *dp, *line;
4814 desc = vp = dp = NULL;
4817 * If we have no vendor data, we can't do anything.
4819 if (pci_vendordata == NULL)
4823 * Scan the vendor data looking for this device
4825 line = pci_vendordata;
4826 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4829 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4831 if (vendor == pci_get_vendor(dev))
4834 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4837 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4845 if (device == pci_get_device(dev))
4849 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4850 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4852 sprintf(desc, "%s, %s", vp, dp);
4862 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4864 struct pci_devinfo *dinfo;
4867 dinfo = device_get_ivars(child);
4871 case PCI_IVAR_ETHADDR:
4873 * The generic accessor doesn't deal with failure, so
4874 * we set the return value, then return an error.
4876 *((uint8_t **) result) = NULL;
4878 case PCI_IVAR_SUBVENDOR:
4879 *result = cfg->subvendor;
4881 case PCI_IVAR_SUBDEVICE:
4882 *result = cfg->subdevice;
4884 case PCI_IVAR_VENDOR:
4885 *result = cfg->vendor;
4887 case PCI_IVAR_DEVICE:
4888 *result = cfg->device;
4890 case PCI_IVAR_DEVID:
4891 *result = (cfg->device << 16) | cfg->vendor;
4893 case PCI_IVAR_CLASS:
4894 *result = cfg->baseclass;
4896 case PCI_IVAR_SUBCLASS:
4897 *result = cfg->subclass;
4899 case PCI_IVAR_PROGIF:
4900 *result = cfg->progif;
4902 case PCI_IVAR_REVID:
4903 *result = cfg->revid;
4905 case PCI_IVAR_INTPIN:
4906 *result = cfg->intpin;
4909 *result = cfg->intline;
4911 case PCI_IVAR_DOMAIN:
4912 *result = cfg->domain;
4918 *result = cfg->slot;
4920 case PCI_IVAR_FUNCTION:
4921 *result = cfg->func;
4923 case PCI_IVAR_CMDREG:
4924 *result = cfg->cmdreg;
4926 case PCI_IVAR_CACHELNSZ:
4927 *result = cfg->cachelnsz;
4929 case PCI_IVAR_MINGNT:
4930 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4934 *result = cfg->mingnt;
4936 case PCI_IVAR_MAXLAT:
4937 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4941 *result = cfg->maxlat;
4943 case PCI_IVAR_LATTIMER:
4944 *result = cfg->lattimer;
4953 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4955 struct pci_devinfo *dinfo;
4957 dinfo = device_get_ivars(child);
4960 case PCI_IVAR_INTPIN:
4961 dinfo->cfg.intpin = value;
4963 case PCI_IVAR_ETHADDR:
4964 case PCI_IVAR_SUBVENDOR:
4965 case PCI_IVAR_SUBDEVICE:
4966 case PCI_IVAR_VENDOR:
4967 case PCI_IVAR_DEVICE:
4968 case PCI_IVAR_DEVID:
4969 case PCI_IVAR_CLASS:
4970 case PCI_IVAR_SUBCLASS:
4971 case PCI_IVAR_PROGIF:
4972 case PCI_IVAR_REVID:
4974 case PCI_IVAR_DOMAIN:
4977 case PCI_IVAR_FUNCTION:
4978 return (EINVAL); /* disallow for now */
4985 #include "opt_ddb.h"
4987 #include <ddb/ddb.h>
4988 #include <sys/cons.h>
4991 * List resources based on pci map registers, used for within ddb
4994 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4996 struct pci_devinfo *dinfo;
4997 struct devlist *devlist_head;
5000 int i, error, none_count;
5003 /* get the head of the device queue */
5004 devlist_head = &pci_devq;
5007 * Go through the list of devices and print out devices
5009 for (error = 0, i = 0,
5010 dinfo = STAILQ_FIRST(devlist_head);
5011 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
5012 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
5014 /* Populate pd_name and pd_unit */
5017 name = device_get_name(dinfo->cfg.dev);
5020 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
5021 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
5022 (name && *name) ? name : "none",
5023 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
5025 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
5026 p->pc_sel.pc_func, (p->pc_class << 16) |
5027 (p->pc_subclass << 8) | p->pc_progif,
5028 (p->pc_subdevice << 16) | p->pc_subvendor,
5029 (p->pc_device << 16) | p->pc_vendor,
5030 p->pc_revid, p->pc_hdr);
5035 static struct resource *
5036 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
5037 rman_res_t start, rman_res_t end, rman_res_t count, u_int num,
5040 struct pci_devinfo *dinfo = device_get_ivars(child);
5041 struct resource_list *rl = &dinfo->resources;
5042 struct resource *res;
5045 pci_addr_t map, testval;
5050 /* If rid is managed by EA, ignore it */
5051 if (pci_ea_is_enabled(child, *rid))
5054 pm = pci_find_bar(child, *rid);
5056 /* This is a BAR that we failed to allocate earlier. */
5057 mapsize = pm->pm_size;
5061 * Weed out the bogons, and figure out how large the
5062 * BAR/map is. BARs that read back 0 here are bogus
5063 * and unimplemented. Note: atapci in legacy mode are
5064 * special and handled elsewhere in the code. If you
5065 * have a atapci device in legacy mode and it fails
5066 * here, that other code is broken.
5068 pci_read_bar(child, *rid, &map, &testval, NULL);
5071 * Determine the size of the BAR and ignore BARs with a size
5072 * of 0. Device ROM BARs use a different mask value.
5074 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
5075 mapsize = pci_romsize(testval);
5077 mapsize = pci_mapsize(testval);
5080 pm = pci_add_bar(child, *rid, map, mapsize);
5083 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
5084 if (type != SYS_RES_MEMORY) {
5087 "child %s requested type %d for rid %#x,"
5088 " but the BAR says it is an memio\n",
5089 device_get_nameunit(child), type, *rid);
5093 if (type != SYS_RES_IOPORT) {
5096 "child %s requested type %d for rid %#x,"
5097 " but the BAR says it is an ioport\n",
5098 device_get_nameunit(child), type, *rid);
5104 * For real BARs, we need to override the size that
5105 * the driver requests, because that's what the BAR
5106 * actually uses and we would otherwise have a
5107 * situation where we might allocate the excess to
5108 * another driver, which won't work.
5110 count = ((pci_addr_t)1 << mapsize) * num;
5111 if (RF_ALIGNMENT(flags) < mapsize)
5112 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
5113 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
5114 flags |= RF_PREFETCHABLE;
5117 * Allocate enough resource, and then write back the
5118 * appropriate BAR for that resource.
5120 resource_list_add(rl, type, *rid, start, end, count);
5121 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
5122 count, flags & ~RF_ACTIVE);
5124 resource_list_delete(rl, type, *rid);
5125 device_printf(child,
5126 "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n",
5127 count, *rid, type, start, end);
5131 device_printf(child,
5132 "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n",
5133 count, *rid, type, rman_get_start(res));
5135 /* Disable decoding via the CMD register before updating the BAR */
5136 cmd = pci_read_config(child, PCIR_COMMAND, 2);
5137 pci_write_config(child, PCIR_COMMAND,
5138 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
5140 map = rman_get_start(res);
5141 pci_write_bar(child, pm, map);
5143 /* Restore the original value of the CMD register */
5144 pci_write_config(child, PCIR_COMMAND, cmd, 2);
5150 pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid,
5151 rman_res_t start, rman_res_t end, rman_res_t count, u_long num,
5154 struct pci_devinfo *dinfo;
5155 struct resource_list *rl;
5156 struct resource_list_entry *rle;
5157 struct resource *res;
5161 * Perform lazy resource allocation
5163 dinfo = device_get_ivars(child);
5164 rl = &dinfo->resources;
5167 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
5169 return (pci_alloc_secbus(dev, child, rid, start, end, count,
5174 * Can't alloc legacy interrupt once MSI messages have
5177 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
5178 cfg->msix.msix_alloc > 0))
5182 * If the child device doesn't have an interrupt
5183 * routed and is deserving of an interrupt, try to
5186 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
5188 pci_assign_interrupt(dev, child, 0);
5190 case SYS_RES_IOPORT:
5191 case SYS_RES_MEMORY:
5194 * PCI-PCI bridge I/O window resources are not BARs.
5195 * For those allocations just pass the request up the
5198 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
5200 case PCIR_IOBASEL_1:
5201 case PCIR_MEMBASE_1:
5202 case PCIR_PMBASEL_1:
5204 * XXX: Should we bother creating a resource
5207 return (bus_generic_alloc_resource(dev, child,
5208 type, rid, start, end, count, flags));
5212 /* Reserve resources for this BAR if needed. */
5213 rle = resource_list_find(rl, type, *rid);
5215 res = pci_reserve_map(dev, child, type, rid, start, end,
5221 return (resource_list_alloc(rl, dev, child, type, rid,
5222 start, end, count, flags));
5226 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
5227 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
5230 struct pci_devinfo *dinfo;
5233 if (device_get_parent(child) != dev)
5234 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
5235 type, rid, start, end, count, flags));
5238 dinfo = device_get_ivars(child);
5239 if (dinfo->cfg.flags & PCICFG_VF) {
5241 /* VFs can't have I/O BARs. */
5242 case SYS_RES_IOPORT:
5244 case SYS_RES_MEMORY:
5245 return (pci_vf_alloc_mem_resource(dev, child, rid,
5246 start, end, count, flags));
5249 /* Fall through for other types of resource allocations. */
5253 return (pci_alloc_multi_resource(dev, child, type, rid, start, end,
5258 pci_release_resource(device_t dev, device_t child, int type, int rid,
5261 struct pci_devinfo *dinfo;
5262 struct resource_list *rl;
5265 if (device_get_parent(child) != dev)
5266 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
5269 dinfo = device_get_ivars(child);
5273 if (dinfo->cfg.flags & PCICFG_VF) {
5275 /* VFs can't have I/O BARs. */
5276 case SYS_RES_IOPORT:
5278 case SYS_RES_MEMORY:
5279 return (pci_vf_release_mem_resource(dev, child, rid,
5283 /* Fall through for other types of resource allocations. */
5289 * PCI-PCI bridge I/O window resources are not BARs. For
5290 * those allocations just pass the request up the tree.
5292 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
5293 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
5295 case PCIR_IOBASEL_1:
5296 case PCIR_MEMBASE_1:
5297 case PCIR_PMBASEL_1:
5298 return (bus_generic_release_resource(dev, child, type,
5304 rl = &dinfo->resources;
5305 return (resource_list_release(rl, dev, child, type, rid, r));
5309 pci_activate_resource(device_t dev, device_t child, int type, int rid,
5312 struct pci_devinfo *dinfo;
5315 error = bus_generic_activate_resource(dev, child, type, rid, r);
5319 /* Enable decoding in the command register when activating BARs. */
5320 if (device_get_parent(child) == dev) {
5321 /* Device ROMs need their decoding explicitly enabled. */
5322 dinfo = device_get_ivars(child);
5323 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5324 pci_write_bar(child, pci_find_bar(child, rid),
5325 rman_get_start(r) | PCIM_BIOS_ENABLE);
5327 case SYS_RES_IOPORT:
5328 case SYS_RES_MEMORY:
5329 error = PCI_ENABLE_IO(dev, child, type);
5337 pci_deactivate_resource(device_t dev, device_t child, int type,
5338 int rid, struct resource *r)
5340 struct pci_devinfo *dinfo;
5343 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
5347 /* Disable decoding for device ROMs. */
5348 if (device_get_parent(child) == dev) {
5349 dinfo = device_get_ivars(child);
5350 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5351 pci_write_bar(child, pci_find_bar(child, rid),
5358 pci_child_deleted(device_t dev, device_t child)
5360 struct resource_list_entry *rle;
5361 struct resource_list *rl;
5362 struct pci_devinfo *dinfo;
5364 dinfo = device_get_ivars(child);
5365 rl = &dinfo->resources;
5367 EVENTHANDLER_INVOKE(pci_delete_device, child);
5369 /* Turn off access to resources we're about to free */
5370 if (bus_child_present(child) != 0) {
5371 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
5372 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
5374 pci_disable_busmaster(child);
5377 /* Free all allocated resources */
5378 STAILQ_FOREACH(rle, rl, link) {
5380 if (rman_get_flags(rle->res) & RF_ACTIVE ||
5381 resource_list_busy(rl, rle->type, rle->rid)) {
5382 pci_printf(&dinfo->cfg,
5383 "Resource still owned, oops. "
5384 "(type=%d, rid=%d, addr=%lx)\n",
5385 rle->type, rle->rid,
5386 rman_get_start(rle->res));
5387 bus_release_resource(child, rle->type, rle->rid,
5390 resource_list_unreserve(rl, dev, child, rle->type,
5394 resource_list_free(rl);
5400 pci_delete_resource(device_t dev, device_t child, int type, int rid)
5402 struct pci_devinfo *dinfo;
5403 struct resource_list *rl;
5404 struct resource_list_entry *rle;
5406 if (device_get_parent(child) != dev)
5409 dinfo = device_get_ivars(child);
5410 rl = &dinfo->resources;
5411 rle = resource_list_find(rl, type, rid);
5416 if (rman_get_flags(rle->res) & RF_ACTIVE ||
5417 resource_list_busy(rl, type, rid)) {
5418 device_printf(dev, "delete_resource: "
5419 "Resource still owned by child, oops. "
5420 "(type=%d, rid=%d, addr=%jx)\n",
5421 type, rid, rman_get_start(rle->res));
5424 resource_list_unreserve(rl, dev, child, type, rid);
5426 resource_list_delete(rl, type, rid);
5429 struct resource_list *
5430 pci_get_resource_list (device_t dev, device_t child)
5432 struct pci_devinfo *dinfo = device_get_ivars(child);
5434 return (&dinfo->resources);
5438 pci_get_dma_tag(device_t bus, device_t dev)
5440 struct pci_softc *sc = device_get_softc(bus);
5442 return (sc->sc_dma_tag);
5446 pci_read_config_method(device_t dev, device_t child, int reg, int width)
5448 struct pci_devinfo *dinfo = device_get_ivars(child);
5449 pcicfgregs *cfg = &dinfo->cfg;
5453 * SR-IOV VFs don't implement the VID or DID registers, so we have to
5454 * emulate them here.
5456 if (cfg->flags & PCICFG_VF) {
5457 if (reg == PCIR_VENDOR) {
5460 return (cfg->device << 16 | cfg->vendor);
5462 return (cfg->vendor);
5464 return (cfg->vendor & 0xff);
5466 return (0xffffffff);
5468 } else if (reg == PCIR_DEVICE) {
5470 /* Note that an unaligned 4-byte read is an error. */
5472 return (cfg->device);
5474 return (cfg->device & 0xff);
5476 return (0xffffffff);
5482 return (PCIB_READ_CONFIG(device_get_parent(dev),
5483 cfg->bus, cfg->slot, cfg->func, reg, width));
5487 pci_write_config_method(device_t dev, device_t child, int reg,
5488 uint32_t val, int width)
5490 struct pci_devinfo *dinfo = device_get_ivars(child);
5491 pcicfgregs *cfg = &dinfo->cfg;
5493 PCIB_WRITE_CONFIG(device_get_parent(dev),
5494 cfg->bus, cfg->slot, cfg->func, reg, val, width);
5498 pci_child_location_str_method(device_t dev, device_t child, char *buf,
5502 snprintf(buf, buflen, "slot=%d function=%d dbsf=pci%d:%d:%d:%d",
5503 pci_get_slot(child), pci_get_function(child), pci_get_domain(child),
5504 pci_get_bus(child), pci_get_slot(child), pci_get_function(child));
5509 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
5512 struct pci_devinfo *dinfo;
5515 dinfo = device_get_ivars(child);
5517 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
5518 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
5519 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
5525 pci_assign_interrupt_method(device_t dev, device_t child)
5527 struct pci_devinfo *dinfo = device_get_ivars(child);
5528 pcicfgregs *cfg = &dinfo->cfg;
5530 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
5535 pci_lookup(void *arg, const char *name, device_t *dev)
5539 int domain, bus, slot, func;
5545 * Accept pciconf-style selectors of either pciD:B:S:F or
5546 * pciB:S:F. In the latter case, the domain is assumed to
5549 if (strncmp(name, "pci", 3) != 0)
5551 val = strtol(name + 3, &end, 10);
5552 if (val < 0 || val > INT_MAX || *end != ':')
5555 val = strtol(end + 1, &end, 10);
5556 if (val < 0 || val > INT_MAX || *end != ':')
5559 val = strtol(end + 1, &end, 10);
5560 if (val < 0 || val > INT_MAX)
5564 val = strtol(end + 1, &end, 10);
5565 if (val < 0 || val > INT_MAX || *end != '\0')
5568 } else if (*end == '\0') {
5576 if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX ||
5577 func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX))
5580 *dev = pci_find_dbsf(domain, bus, slot, func);
5584 pci_modevent(module_t mod, int what, void *arg)
5586 static struct cdev *pci_cdev;
5587 static eventhandler_tag tag;
5591 STAILQ_INIT(&pci_devq);
5593 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
5595 pci_load_vendor_data();
5596 tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL,
5602 EVENTHANDLER_DEREGISTER(dev_lookup, tag);
5603 destroy_dev(pci_cdev);
5611 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
5613 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
5614 struct pcicfg_pcie *cfg;
5617 cfg = &dinfo->cfg.pcie;
5618 pos = cfg->pcie_location;
5620 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5622 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
5624 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5625 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5626 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5627 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
5629 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5630 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5631 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5632 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
5634 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5635 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5636 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
5639 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
5640 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
5641 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
5647 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
5649 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
5650 dinfo->cfg.pcix.pcix_command, 2);
5654 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
5658 * Restore the device to full power mode. We must do this
5659 * before we restore the registers because moving from D3 to
5660 * D0 will cause the chip's BARs and some other registers to
5661 * be reset to some unknown power on reset values. Cut down
5662 * the noise on boot by doing nothing if we are already in
5665 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
5666 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5667 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
5668 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
5669 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
5670 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
5671 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
5672 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
5673 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
5674 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
5675 case PCIM_HDRTYPE_NORMAL:
5676 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
5677 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
5679 case PCIM_HDRTYPE_BRIDGE:
5680 pci_write_config(dev, PCIR_SECLAT_1,
5681 dinfo->cfg.bridge.br_seclat, 1);
5682 pci_write_config(dev, PCIR_SUBBUS_1,
5683 dinfo->cfg.bridge.br_subbus, 1);
5684 pci_write_config(dev, PCIR_SECBUS_1,
5685 dinfo->cfg.bridge.br_secbus, 1);
5686 pci_write_config(dev, PCIR_PRIBUS_1,
5687 dinfo->cfg.bridge.br_pribus, 1);
5688 pci_write_config(dev, PCIR_BRIDGECTL_1,
5689 dinfo->cfg.bridge.br_control, 2);
5691 case PCIM_HDRTYPE_CARDBUS:
5692 pci_write_config(dev, PCIR_SECLAT_2,
5693 dinfo->cfg.bridge.br_seclat, 1);
5694 pci_write_config(dev, PCIR_SUBBUS_2,
5695 dinfo->cfg.bridge.br_subbus, 1);
5696 pci_write_config(dev, PCIR_SECBUS_2,
5697 dinfo->cfg.bridge.br_secbus, 1);
5698 pci_write_config(dev, PCIR_PRIBUS_2,
5699 dinfo->cfg.bridge.br_pribus, 1);
5700 pci_write_config(dev, PCIR_BRIDGECTL_2,
5701 dinfo->cfg.bridge.br_control, 2);
5704 pci_restore_bars(dev);
5707 * Restore extended capabilities for PCI-Express and PCI-X
5709 if (dinfo->cfg.pcie.pcie_location != 0)
5710 pci_cfg_restore_pcie(dev, dinfo);
5711 if (dinfo->cfg.pcix.pcix_location != 0)
5712 pci_cfg_restore_pcix(dev, dinfo);
5714 /* Restore MSI and MSI-X configurations if they are present. */
5715 if (dinfo->cfg.msi.msi_location != 0)
5716 pci_resume_msi(dev);
5717 if (dinfo->cfg.msix.msix_location != 0)
5718 pci_resume_msix(dev);
5721 if (dinfo->cfg.iov != NULL)
5722 pci_iov_cfg_restore(dev, dinfo);
5727 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
5729 #define RREG(n) pci_read_config(dev, pos + (n), 2)
5730 struct pcicfg_pcie *cfg;
5733 cfg = &dinfo->cfg.pcie;
5734 pos = cfg->pcie_location;
5736 cfg->pcie_flags = RREG(PCIER_FLAGS);
5738 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5740 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
5742 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5743 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5744 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5745 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
5747 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5748 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5749 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5750 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
5752 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5753 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5754 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
5757 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
5758 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
5759 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
5765 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5767 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5768 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5772 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5778 * Some drivers apparently write to these registers w/o updating our
5779 * cached copy. No harm happens if we update the copy, so do so here
5780 * so we can restore them. The COMMAND register is modified by the
5781 * bus w/o updating the cache. This should represent the normally
5782 * writable portion of the 'defined' part of type 0/1/2 headers.
5784 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5785 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5786 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5787 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5788 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5789 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5790 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5791 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5792 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5793 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5794 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5795 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
5796 case PCIM_HDRTYPE_NORMAL:
5797 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5798 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5799 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5800 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5802 case PCIM_HDRTYPE_BRIDGE:
5803 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
5805 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
5807 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
5809 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
5811 dinfo->cfg.bridge.br_control = pci_read_config(dev,
5812 PCIR_BRIDGECTL_1, 2);
5814 case PCIM_HDRTYPE_CARDBUS:
5815 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
5817 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
5819 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
5821 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
5823 dinfo->cfg.bridge.br_control = pci_read_config(dev,
5824 PCIR_BRIDGECTL_2, 2);
5825 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2);
5826 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2);
5830 if (dinfo->cfg.pcie.pcie_location != 0)
5831 pci_cfg_save_pcie(dev, dinfo);
5833 if (dinfo->cfg.pcix.pcix_location != 0)
5834 pci_cfg_save_pcix(dev, dinfo);
5837 if (dinfo->cfg.iov != NULL)
5838 pci_iov_cfg_save(dev, dinfo);
5842 * don't set the state for display devices, base peripherals and
5843 * memory devices since bad things happen when they are powered down.
5844 * We should (a) have drivers that can easily detach and (b) use
5845 * generic drivers for these devices so that some device actually
5846 * attaches. We need to make sure that when we implement (a) we don't
5847 * power the device down on a reattach.
5849 cls = pci_get_class(dev);
5852 switch (pci_do_power_nodriver)
5854 case 0: /* NO powerdown at all */
5856 case 1: /* Conservative about what to power down */
5857 if (cls == PCIC_STORAGE)
5860 case 2: /* Aggressive about what to power down */
5861 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5862 cls == PCIC_BASEPERIPH)
5865 case 3: /* Power down everything */
5869 * PCI spec says we can only go into D3 state from D0 state.
5870 * Transition from D[12] into D0 before going to D3 state.
5872 ps = pci_get_powerstate(dev);
5873 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5874 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5875 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5876 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5879 /* Wrapper APIs suitable for device driver use. */
5881 pci_save_state(device_t dev)
5883 struct pci_devinfo *dinfo;
5885 dinfo = device_get_ivars(dev);
5886 pci_cfg_save(dev, dinfo, 0);
5890 pci_restore_state(device_t dev)
5892 struct pci_devinfo *dinfo;
5894 dinfo = device_get_ivars(dev);
5895 pci_cfg_restore(dev, dinfo);
5899 pci_get_id_method(device_t dev, device_t child, enum pci_id_type type,
5903 return (PCIB_GET_ID(device_get_parent(dev), child, type, id));
5906 /* Find the upstream port of a given PCI device in a root complex. */
5908 pci_find_pcie_root_port(device_t dev)
5910 struct pci_devinfo *dinfo;
5911 devclass_t pci_class;
5914 pci_class = devclass_find("pci");
5915 KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class,
5916 ("%s: non-pci device %s", __func__, device_get_nameunit(dev)));
5919 * Walk the bridge hierarchy until we find a PCI-e root
5920 * port or a non-PCI device.
5923 bus = device_get_parent(dev);
5924 KASSERT(bus != NULL, ("%s: null parent of %s", __func__,
5925 device_get_nameunit(dev)));
5927 pcib = device_get_parent(bus);
5928 KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__,
5929 device_get_nameunit(bus)));
5932 * pcib's parent must be a PCI bus for this to be a
5935 if (device_get_devclass(device_get_parent(pcib)) != pci_class)
5938 dinfo = device_get_ivars(pcib);
5939 if (dinfo->cfg.pcie.pcie_location != 0 &&
5940 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)
5948 * Wait for pending transactions to complete on a PCI-express function.
5950 * The maximum delay is specified in milliseconds in max_delay. Note
5951 * that this function may sleep.
5953 * Returns true if the function is idle and false if the timeout is
5954 * exceeded. If dev is not a PCI-express function, this returns true.
5957 pcie_wait_for_pending_transactions(device_t dev, u_int max_delay)
5959 struct pci_devinfo *dinfo = device_get_ivars(dev);
5963 cap = dinfo->cfg.pcie.pcie_location;
5967 sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2);
5968 while (sta & PCIEM_STA_TRANSACTION_PND) {
5972 /* Poll once every 100 milliseconds up to the timeout. */
5973 if (max_delay > 100) {
5974 pause_sbt("pcietp", 100 * SBT_1MS, 0, C_HARDCLOCK);
5977 pause_sbt("pcietp", max_delay * SBT_1MS, 0,
5981 sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2);
5988 * Determine the maximum Completion Timeout in microseconds.
5990 * For non-PCI-express functions this returns 0.
5993 pcie_get_max_completion_timeout(device_t dev)
5995 struct pci_devinfo *dinfo = device_get_ivars(dev);
5998 cap = dinfo->cfg.pcie.pcie_location;
6003 * Functions using the 1.x spec use the default timeout range of
6004 * 50 microseconds to 50 milliseconds. Functions that do not
6005 * support programmable timeouts also use this range.
6007 if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 ||
6008 (pci_read_config(dev, cap + PCIER_DEVICE_CAP2, 4) &
6009 PCIEM_CAP2_COMP_TIMO_RANGES) == 0)
6012 switch (pci_read_config(dev, cap + PCIER_DEVICE_CTL2, 2) &
6013 PCIEM_CTL2_COMP_TIMO_VAL) {
6014 case PCIEM_CTL2_COMP_TIMO_100US:
6016 case PCIEM_CTL2_COMP_TIMO_10MS:
6018 case PCIEM_CTL2_COMP_TIMO_55MS:
6020 case PCIEM_CTL2_COMP_TIMO_210MS:
6021 return (210 * 1000);
6022 case PCIEM_CTL2_COMP_TIMO_900MS:
6023 return (900 * 1000);
6024 case PCIEM_CTL2_COMP_TIMO_3500MS:
6025 return (3500 * 1000);
6026 case PCIEM_CTL2_COMP_TIMO_13S:
6027 return (13 * 1000 * 1000);
6028 case PCIEM_CTL2_COMP_TIMO_64S:
6029 return (64 * 1000 * 1000);
6036 * Perform a Function Level Reset (FLR) on a device.
6038 * This function first waits for any pending transactions to complete
6039 * within the timeout specified by max_delay. If transactions are
6040 * still pending, the function will return false without attempting a
6043 * If dev is not a PCI-express function or does not support FLR, this
6044 * function returns false.
6046 * Note that no registers are saved or restored. The caller is
6047 * responsible for saving and restoring any registers including
6048 * PCI-standard registers via pci_save_state() and
6049 * pci_restore_state().
6052 pcie_flr(device_t dev, u_int max_delay, bool force)
6054 struct pci_devinfo *dinfo = device_get_ivars(dev);
6059 cap = dinfo->cfg.pcie.pcie_location;
6063 if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR))
6067 * Disable busmastering to prevent generation of new
6068 * transactions while waiting for the device to go idle. If
6069 * the idle timeout fails, the command register is restored
6070 * which will re-enable busmastering.
6072 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
6073 pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCIM_CMD_BUSMASTEREN), 2);
6074 if (!pcie_wait_for_pending_transactions(dev, max_delay)) {
6076 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
6079 pci_printf(&dinfo->cfg,
6080 "Resetting with transactions pending after %d ms\n",
6084 * Extend the post-FLR delay to cover the maximum
6085 * Completion Timeout delay of anything in flight
6086 * during the FLR delay. Enforce a minimum delay of
6089 compl_delay = pcie_get_max_completion_timeout(dev) / 1000;
6090 if (compl_delay < 10)
6095 /* Initiate the reset. */
6096 ctl = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
6097 pci_write_config(dev, cap + PCIER_DEVICE_CTL, ctl |
6098 PCIEM_CTL_INITIATE_FLR, 2);
6100 /* Wait for 100ms. */
6101 pause_sbt("pcieflr", (100 + compl_delay) * SBT_1MS, 0, C_HARDCLOCK);
6103 if (pci_read_config(dev, cap + PCIER_DEVICE_STA, 2) &
6104 PCIEM_STA_TRANSACTION_PND)
6105 pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n");