2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/limits.h>
39 #include <sys/linker.h>
40 #include <sys/fcntl.h>
42 #include <sys/kernel.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/endian.h>
49 #include <vm/vm_extern.h>
52 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <machine/stdarg.h>
57 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
58 #include <machine/intr_machdep.h>
61 #include <sys/pciio.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pci_private.h>
68 #include <dev/pci/pci_iov_private.h>
71 #include <dev/usb/controller/xhcireg.h>
72 #include <dev/usb/controller/ehcireg.h>
73 #include <dev/usb/controller/ohcireg.h>
74 #include <dev/usb/controller/uhcireg.h>
79 #define PCIR_IS_BIOS(cfg, reg) \
80 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
81 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
83 static int pci_has_quirk(uint32_t devid, int quirk);
84 static pci_addr_t pci_mapbase(uint64_t mapreg);
85 static const char *pci_maptype(uint64_t mapreg);
86 static int pci_maprange(uint64_t mapreg);
87 static pci_addr_t pci_rombase(uint64_t mapreg);
88 static int pci_romsize(uint64_t testval);
89 static void pci_fixancient(pcicfgregs *cfg);
90 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
92 static int pci_porten(device_t dev);
93 static int pci_memen(device_t dev);
94 static void pci_assign_interrupt(device_t bus, device_t dev,
96 static int pci_add_map(device_t bus, device_t dev, int reg,
97 struct resource_list *rl, int force, int prefetch);
98 static int pci_probe(device_t dev);
99 static int pci_attach(device_t dev);
100 static int pci_detach(device_t dev);
101 static void pci_load_vendor_data(void);
102 static int pci_describe_parse_line(char **ptr, int *vendor,
103 int *device, char **desc);
104 static char *pci_describe_device(device_t dev);
105 static int pci_modevent(module_t mod, int what, void *arg);
106 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
108 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
109 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
110 int reg, uint32_t *data);
112 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
113 int reg, uint32_t data);
115 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
116 static void pci_mask_msix(device_t dev, u_int index);
117 static void pci_unmask_msix(device_t dev, u_int index);
118 static int pci_msi_blacklisted(void);
119 static int pci_msix_blacklisted(void);
120 static void pci_resume_msi(device_t dev);
121 static void pci_resume_msix(device_t dev);
122 static int pci_remap_intr_method(device_t bus, device_t dev,
125 static int pci_get_id_method(device_t dev, device_t child,
126 enum pci_id_type type, uintptr_t *rid);
128 static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d,
129 int b, int s, int f, uint16_t vid, uint16_t did);
131 static device_method_t pci_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, pci_probe),
134 DEVMETHOD(device_attach, pci_attach),
135 DEVMETHOD(device_detach, pci_detach),
136 DEVMETHOD(device_shutdown, bus_generic_shutdown),
137 DEVMETHOD(device_suspend, bus_generic_suspend),
138 DEVMETHOD(device_resume, pci_resume),
141 DEVMETHOD(bus_print_child, pci_print_child),
142 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
143 DEVMETHOD(bus_read_ivar, pci_read_ivar),
144 DEVMETHOD(bus_write_ivar, pci_write_ivar),
145 DEVMETHOD(bus_driver_added, pci_driver_added),
146 DEVMETHOD(bus_setup_intr, pci_setup_intr),
147 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
149 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
150 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
151 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
152 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
153 DEVMETHOD(bus_delete_resource, pci_delete_resource),
154 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
155 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
156 DEVMETHOD(bus_release_resource, pci_release_resource),
157 DEVMETHOD(bus_activate_resource, pci_activate_resource),
158 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
159 DEVMETHOD(bus_child_deleted, pci_child_deleted),
160 DEVMETHOD(bus_child_detached, pci_child_detached),
161 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
162 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
163 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
164 DEVMETHOD(bus_suspend_child, pci_suspend_child),
165 DEVMETHOD(bus_resume_child, pci_resume_child),
166 DEVMETHOD(bus_rescan, pci_rescan_method),
169 DEVMETHOD(pci_read_config, pci_read_config_method),
170 DEVMETHOD(pci_write_config, pci_write_config_method),
171 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
172 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
173 DEVMETHOD(pci_enable_io, pci_enable_io_method),
174 DEVMETHOD(pci_disable_io, pci_disable_io_method),
175 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
176 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
177 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
178 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
179 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
180 DEVMETHOD(pci_find_cap, pci_find_cap_method),
181 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
182 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
183 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
184 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
185 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
186 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
187 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
188 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
189 DEVMETHOD(pci_release_msi, pci_release_msi_method),
190 DEVMETHOD(pci_msi_count, pci_msi_count_method),
191 DEVMETHOD(pci_msix_count, pci_msix_count_method),
192 DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method),
193 DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method),
194 DEVMETHOD(pci_get_id, pci_get_id_method),
195 DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method),
196 DEVMETHOD(pci_child_added, pci_child_added_method),
198 DEVMETHOD(pci_iov_attach, pci_iov_attach_method),
199 DEVMETHOD(pci_iov_detach, pci_iov_detach_method),
200 DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method),
206 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
208 static devclass_t pci_devclass;
209 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
210 MODULE_VERSION(pci, 1);
212 static char *pci_vendordata;
213 static size_t pci_vendordata_size;
216 uint32_t devid; /* Vendor/device of the card */
218 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
219 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
220 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
221 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
222 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
223 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
228 static const struct pci_quirk pci_quirks[] = {
229 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
230 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
231 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
232 /* As does the Serverworks OSB4 (the SMBus mapping register) */
233 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
236 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
237 * or the CMIC-SL (AKA ServerWorks GC_LE).
239 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 * MSI doesn't work on earlier Intel chipsets including
244 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
246 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
247 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
248 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
249 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
250 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
251 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
252 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
255 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
258 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
261 * MSI-X allocation doesn't work properly for devices passed through
262 * by VMware up to at least ESXi 5.1.
264 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
265 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
268 * Some virtualization environments emulate an older chipset
269 * but support MSI just fine. QEMU uses the Intel 82440.
271 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
274 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
275 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
276 * It prevents us from attaching hpet(4) when the bit is unset.
277 * Note this quirk only affects SB600 revision A13 and earlier.
278 * For SB600 A21 and later, firmware must set the bit to hide it.
279 * For SB700 and later, it is unused and hardcoded to zero.
281 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
284 * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
285 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
286 * command register is set.
288 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
289 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
290 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
293 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
294 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
296 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
297 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
298 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
299 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
300 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
301 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
306 /* map register information */
307 #define PCI_MAPMEM 0x01 /* memory map */
308 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
309 #define PCI_MAPPORT 0x04 /* port map */
311 struct devlist pci_devq;
312 uint32_t pci_generation;
313 uint32_t pci_numdevs = 0;
314 static int pcie_chipset, pcix_chipset;
317 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
319 static int pci_enable_io_modes = 1;
320 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN,
321 &pci_enable_io_modes, 1,
322 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
323 enable these bits correctly. We'd like to do this all the time, but there\n\
324 are some peripherals that this causes problems with.");
326 static int pci_do_realloc_bars = 0;
327 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN,
328 &pci_do_realloc_bars, 0,
329 "Attempt to allocate a new range for any BARs whose original "
330 "firmware-assigned ranges fail to allocate during the initial device scan.");
332 static int pci_do_power_nodriver = 0;
333 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN,
334 &pci_do_power_nodriver, 0,
335 "Place a function into D3 state when no driver attaches to it. 0 means\n\
336 disable. 1 means conservatively place devices into D3 state. 2 means\n\
337 aggressively place devices into D3 state. 3 means put absolutely everything\n\
340 int pci_do_power_resume = 1;
341 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN,
342 &pci_do_power_resume, 1,
343 "Transition from D3 -> D0 on resume.");
345 int pci_do_power_suspend = 1;
346 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN,
347 &pci_do_power_suspend, 1,
348 "Transition from D0 -> D3 on suspend.");
350 static int pci_do_msi = 1;
351 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1,
352 "Enable support for MSI interrupts");
354 static int pci_do_msix = 1;
355 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1,
356 "Enable support for MSI-X interrupts");
358 static int pci_honor_msi_blacklist = 1;
359 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN,
360 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
362 #if defined(__i386__) || defined(__amd64__)
363 static int pci_usb_takeover = 1;
365 static int pci_usb_takeover = 0;
367 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
368 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
369 Disable this if you depend on BIOS emulation of USB devices, that is\n\
370 you use USB devices (like keyboard or mouse) but do not load USB drivers");
372 static int pci_clear_bars;
373 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
374 "Ignore firmware-assigned resources for BARs.");
376 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
377 static int pci_clear_buses;
378 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
379 "Ignore firmware-assigned bus numbers.");
382 static int pci_enable_ari = 1;
383 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
384 0, "Enable support for PCIe Alternative RID Interpretation");
387 pci_has_quirk(uint32_t devid, int quirk)
389 const struct pci_quirk *q;
391 for (q = &pci_quirks[0]; q->devid; q++) {
392 if (q->devid == devid && q->type == quirk)
398 /* Find a device_t by bus/slot/function in domain 0 */
401 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
404 return (pci_find_dbsf(0, bus, slot, func));
407 /* Find a device_t by domain/bus/slot/function */
410 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
412 struct pci_devinfo *dinfo;
414 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
415 if ((dinfo->cfg.domain == domain) &&
416 (dinfo->cfg.bus == bus) &&
417 (dinfo->cfg.slot == slot) &&
418 (dinfo->cfg.func == func)) {
419 return (dinfo->cfg.dev);
426 /* Find a device_t by vendor/device ID */
429 pci_find_device(uint16_t vendor, uint16_t device)
431 struct pci_devinfo *dinfo;
433 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
434 if ((dinfo->cfg.vendor == vendor) &&
435 (dinfo->cfg.device == device)) {
436 return (dinfo->cfg.dev);
444 pci_find_class(uint8_t class, uint8_t subclass)
446 struct pci_devinfo *dinfo;
448 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
449 if (dinfo->cfg.baseclass == class &&
450 dinfo->cfg.subclass == subclass) {
451 return (dinfo->cfg.dev);
459 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
464 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
467 retval += vprintf(fmt, ap);
472 /* return base address of memory or port map */
475 pci_mapbase(uint64_t mapreg)
478 if (PCI_BAR_MEM(mapreg))
479 return (mapreg & PCIM_BAR_MEM_BASE);
481 return (mapreg & PCIM_BAR_IO_BASE);
484 /* return map type of memory or port map */
487 pci_maptype(uint64_t mapreg)
490 if (PCI_BAR_IO(mapreg))
492 if (mapreg & PCIM_BAR_MEM_PREFETCH)
493 return ("Prefetchable Memory");
497 /* return log2 of map size decoded for memory or port map */
500 pci_mapsize(uint64_t testval)
504 testval = pci_mapbase(testval);
507 while ((testval & 1) == 0)
516 /* return base address of device ROM */
519 pci_rombase(uint64_t mapreg)
522 return (mapreg & PCIM_BIOS_ADDR_MASK);
525 /* return log2 of map size decided for device ROM */
528 pci_romsize(uint64_t testval)
532 testval = pci_rombase(testval);
535 while ((testval & 1) == 0)
544 /* return log2 of address range supported by map register */
547 pci_maprange(uint64_t mapreg)
551 if (PCI_BAR_IO(mapreg))
554 switch (mapreg & PCIM_BAR_MEM_TYPE) {
555 case PCIM_BAR_MEM_32:
558 case PCIM_BAR_MEM_1MB:
561 case PCIM_BAR_MEM_64:
568 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
571 pci_fixancient(pcicfgregs *cfg)
573 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
576 /* PCI to PCI bridges use header type 1 */
577 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
578 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
581 /* extract header type specific config data */
584 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
586 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
587 switch (cfg->hdrtype & PCIM_HDRTYPE) {
588 case PCIM_HDRTYPE_NORMAL:
589 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
590 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
591 cfg->mingnt = REG(PCIR_MINGNT, 1);
592 cfg->maxlat = REG(PCIR_MAXLAT, 1);
593 cfg->nummaps = PCI_MAXMAPS_0;
595 case PCIM_HDRTYPE_BRIDGE:
596 cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1);
597 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1);
598 cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1);
599 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1);
600 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2);
601 cfg->nummaps = PCI_MAXMAPS_1;
603 case PCIM_HDRTYPE_CARDBUS:
604 cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1);
605 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1);
606 cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1);
607 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1);
608 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2);
609 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
610 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
611 cfg->nummaps = PCI_MAXMAPS_2;
617 /* read configuration header into pcicfgregs structure */
619 pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f)
621 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
624 vid = REG(PCIR_VENDOR, 2);
625 did = REG(PCIR_DEVICE, 2);
627 return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did));
633 pci_alloc_devinfo_method(device_t dev)
636 return (malloc(sizeof(struct pci_devinfo), M_DEVBUF,
640 static struct pci_devinfo *
641 pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f,
642 uint16_t vid, uint16_t did)
644 struct pci_devinfo *devlist_entry;
647 devlist_entry = PCI_ALLOC_DEVINFO(bus);
649 cfg = &devlist_entry->cfg;
657 cfg->cmdreg = REG(PCIR_COMMAND, 2);
658 cfg->statreg = REG(PCIR_STATUS, 2);
659 cfg->baseclass = REG(PCIR_CLASS, 1);
660 cfg->subclass = REG(PCIR_SUBCLASS, 1);
661 cfg->progif = REG(PCIR_PROGIF, 1);
662 cfg->revid = REG(PCIR_REVID, 1);
663 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
664 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
665 cfg->lattimer = REG(PCIR_LATTIMER, 1);
666 cfg->intpin = REG(PCIR_INTPIN, 1);
667 cfg->intline = REG(PCIR_INTLINE, 1);
669 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
670 cfg->hdrtype &= ~PCIM_MFDEV;
671 STAILQ_INIT(&cfg->maps);
676 pci_hdrtypedata(pcib, b, s, f, cfg);
678 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
679 pci_read_cap(pcib, cfg);
681 STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links);
683 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
684 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
685 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
686 devlist_entry->conf.pc_sel.pc_func = cfg->func;
687 devlist_entry->conf.pc_hdr = cfg->hdrtype;
689 devlist_entry->conf.pc_subvendor = cfg->subvendor;
690 devlist_entry->conf.pc_subdevice = cfg->subdevice;
691 devlist_entry->conf.pc_vendor = cfg->vendor;
692 devlist_entry->conf.pc_device = cfg->device;
694 devlist_entry->conf.pc_class = cfg->baseclass;
695 devlist_entry->conf.pc_subclass = cfg->subclass;
696 devlist_entry->conf.pc_progif = cfg->progif;
697 devlist_entry->conf.pc_revid = cfg->revid;
702 return (devlist_entry);
707 pci_ea_fill_info(device_t pcib, pcicfgregs *cfg)
709 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \
710 cfg->ea.ea_location + (n), w)
717 uint64_t base, max_offset;
718 struct pci_ea_entry *eae;
720 if (cfg->ea.ea_location == 0)
723 STAILQ_INIT(&cfg->ea.ea_entries);
725 /* Determine the number of entries */
726 num_ent = REG(PCIR_EA_NUM_ENT, 2);
727 num_ent &= PCIM_EA_NUM_ENT_MASK;
729 /* Find the first entry to care of */
730 ptr = PCIR_EA_FIRST_ENT;
732 /* Skip DWORD 2 for type 1 functions */
733 if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE)
736 for (a = 0; a < num_ent; a++) {
738 eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO);
739 eae->eae_cfg_offset = cfg->ea.ea_location + ptr;
741 /* Read a number of dwords in the entry */
744 ent_size = (val & PCIM_EA_ES);
746 for (b = 0; b < ent_size; b++) {
751 eae->eae_flags = val;
752 eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET;
754 base = dw[0] & PCIM_EA_FIELD_MASK;
755 max_offset = dw[1] | ~PCIM_EA_FIELD_MASK;
757 if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) {
758 base |= (uint64_t)dw[b] << 32UL;
761 if (((dw[1] & PCIM_EA_IS_64) != 0)
763 max_offset |= (uint64_t)dw[b] << 32UL;
767 eae->eae_base = base;
768 eae->eae_max_offset = max_offset;
770 STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link);
773 printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n",
774 cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags,
775 (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset);
782 pci_read_cap(device_t pcib, pcicfgregs *cfg)
784 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
785 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
786 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
790 int ptr, nextptr, ptrptr;
792 switch (cfg->hdrtype & PCIM_HDRTYPE) {
793 case PCIM_HDRTYPE_NORMAL:
794 case PCIM_HDRTYPE_BRIDGE:
795 ptrptr = PCIR_CAP_PTR;
797 case PCIM_HDRTYPE_CARDBUS:
798 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
801 return; /* no extended capabilities support */
803 nextptr = REG(ptrptr, 1); /* sanity check? */
806 * Read capability entries.
808 while (nextptr != 0) {
811 printf("illegal PCI extended capability offset %d\n",
815 /* Find the next entry */
817 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
819 /* Process this entry */
820 switch (REG(ptr + PCICAP_ID, 1)) {
821 case PCIY_PMG: /* PCI power management */
822 if (cfg->pp.pp_cap == 0) {
823 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
824 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
825 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
826 if ((nextptr - ptr) > PCIR_POWER_DATA)
827 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
830 case PCIY_HT: /* HyperTransport */
831 /* Determine HT-specific capability type. */
832 val = REG(ptr + PCIR_HT_COMMAND, 2);
834 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
835 cfg->ht.ht_slave = ptr;
837 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
838 switch (val & PCIM_HTCMD_CAP_MASK) {
839 case PCIM_HTCAP_MSI_MAPPING:
840 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
841 /* Sanity check the mapping window. */
842 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
845 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
847 if (addr != MSI_INTEL_ADDR_BASE)
849 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
850 cfg->domain, cfg->bus,
851 cfg->slot, cfg->func,
854 addr = MSI_INTEL_ADDR_BASE;
856 cfg->ht.ht_msimap = ptr;
857 cfg->ht.ht_msictrl = val;
858 cfg->ht.ht_msiaddr = addr;
863 case PCIY_MSI: /* PCI MSI */
864 cfg->msi.msi_location = ptr;
865 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
866 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
867 PCIM_MSICTRL_MMC_MASK)>>1);
869 case PCIY_MSIX: /* PCI MSI-X */
870 cfg->msix.msix_location = ptr;
871 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
872 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
873 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
874 val = REG(ptr + PCIR_MSIX_TABLE, 4);
875 cfg->msix.msix_table_bar = PCIR_BAR(val &
877 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
878 val = REG(ptr + PCIR_MSIX_PBA, 4);
879 cfg->msix.msix_pba_bar = PCIR_BAR(val &
881 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
883 case PCIY_VPD: /* PCI Vital Product Data */
884 cfg->vpd.vpd_reg = ptr;
887 /* Should always be true. */
888 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
889 PCIM_HDRTYPE_BRIDGE) {
890 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
891 cfg->subvendor = val & 0xffff;
892 cfg->subdevice = val >> 16;
895 case PCIY_PCIX: /* PCI-X */
897 * Assume we have a PCI-X chipset if we have
898 * at least one PCI-PCI bridge with a PCI-X
899 * capability. Note that some systems with
900 * PCI-express or HT chipsets might match on
901 * this check as well.
903 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
906 cfg->pcix.pcix_location = ptr;
908 case PCIY_EXPRESS: /* PCI-express */
910 * Assume we have a PCI-express chipset if we have
911 * at least one PCI-express device.
914 cfg->pcie.pcie_location = ptr;
915 val = REG(ptr + PCIER_FLAGS, 2);
916 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
918 case PCIY_EA: /* Enhanced Allocation */
919 cfg->ea.ea_location = ptr;
920 pci_ea_fill_info(pcib, cfg);
927 #if defined(__powerpc__)
929 * Enable the MSI mapping window for all HyperTransport
930 * slaves. PCI-PCI bridges have their windows enabled via
933 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
934 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
936 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
937 cfg->domain, cfg->bus, cfg->slot, cfg->func);
938 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
939 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
943 /* REG and WREG use carry through to next functions */
947 * PCI Vital Product Data
950 #define PCI_VPD_TIMEOUT 1000000
953 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
955 int count = PCI_VPD_TIMEOUT;
957 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
959 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
961 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
964 DELAY(1); /* limit looping */
966 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
973 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
975 int count = PCI_VPD_TIMEOUT;
977 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
979 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
980 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
981 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
984 DELAY(1); /* limit looping */
991 #undef PCI_VPD_TIMEOUT
993 struct vpd_readstate {
1003 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
1008 if (vrs->bytesinval == 0) {
1009 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
1011 vrs->val = le32toh(reg);
1013 byte = vrs->val & 0xff;
1014 vrs->bytesinval = 3;
1016 vrs->val = vrs->val >> 8;
1017 byte = vrs->val & 0xff;
1027 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
1029 struct vpd_readstate vrs;
1034 int alloc, off; /* alloc/off for RO/W arrays */
1040 /* init vpd reader */
1048 name = remain = i = 0; /* shut up stupid gcc */
1049 alloc = off = 0; /* shut up stupid gcc */
1050 dflen = 0; /* shut up stupid gcc */
1052 while (state >= 0) {
1053 if (vpd_nextbyte(&vrs, &byte)) {
1058 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
1059 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1060 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1063 case 0: /* item name */
1065 if (vpd_nextbyte(&vrs, &byte2)) {
1070 if (vpd_nextbyte(&vrs, &byte2)) {
1074 remain |= byte2 << 8;
1075 if (remain > (0x7f*4 - vrs.off)) {
1078 "invalid VPD data, remain %#x\n",
1083 remain = byte & 0x7;
1084 name = (byte >> 3) & 0xf;
1087 case 0x2: /* String */
1088 cfg->vpd.vpd_ident = malloc(remain + 1,
1089 M_DEVBUF, M_WAITOK);
1096 case 0x10: /* VPD-R */
1099 cfg->vpd.vpd_ros = malloc(alloc *
1100 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1104 case 0x11: /* VPD-W */
1107 cfg->vpd.vpd_w = malloc(alloc *
1108 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1112 default: /* Invalid data, abort */
1118 case 1: /* Identifier String */
1119 cfg->vpd.vpd_ident[i++] = byte;
1122 cfg->vpd.vpd_ident[i] = '\0';
1127 case 2: /* VPD-R Keyword Header */
1129 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1130 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1131 M_DEVBUF, M_WAITOK | M_ZERO);
1133 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1134 if (vpd_nextbyte(&vrs, &byte2)) {
1138 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1139 if (vpd_nextbyte(&vrs, &byte2)) {
1143 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1145 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1148 * if this happens, we can't trust the rest
1151 pci_printf(cfg, "bad keyword length: %d\n",
1156 } else if (dflen == 0) {
1157 cfg->vpd.vpd_ros[off].value = malloc(1 *
1158 sizeof(*cfg->vpd.vpd_ros[off].value),
1159 M_DEVBUF, M_WAITOK);
1160 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1162 cfg->vpd.vpd_ros[off].value = malloc(
1164 sizeof(*cfg->vpd.vpd_ros[off].value),
1165 M_DEVBUF, M_WAITOK);
1168 /* keep in sync w/ state 3's transistions */
1169 if (dflen == 0 && remain == 0)
1171 else if (dflen == 0)
1177 case 3: /* VPD-R Keyword Value */
1178 cfg->vpd.vpd_ros[off].value[i++] = byte;
1179 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1180 "RV", 2) == 0 && cksumvalid == -1) {
1186 "bad VPD cksum, remain %hhu\n",
1195 /* keep in sync w/ state 2's transistions */
1197 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1198 if (dflen == 0 && remain == 0) {
1199 cfg->vpd.vpd_rocnt = off;
1200 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1201 off * sizeof(*cfg->vpd.vpd_ros),
1202 M_DEVBUF, M_WAITOK | M_ZERO);
1204 } else if (dflen == 0)
1214 case 5: /* VPD-W Keyword Header */
1216 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1217 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1218 M_DEVBUF, M_WAITOK | M_ZERO);
1220 cfg->vpd.vpd_w[off].keyword[0] = byte;
1221 if (vpd_nextbyte(&vrs, &byte2)) {
1225 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1226 if (vpd_nextbyte(&vrs, &byte2)) {
1230 cfg->vpd.vpd_w[off].len = dflen = byte2;
1231 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1232 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1233 sizeof(*cfg->vpd.vpd_w[off].value),
1234 M_DEVBUF, M_WAITOK);
1237 /* keep in sync w/ state 6's transistions */
1238 if (dflen == 0 && remain == 0)
1240 else if (dflen == 0)
1246 case 6: /* VPD-W Keyword Value */
1247 cfg->vpd.vpd_w[off].value[i++] = byte;
1250 /* keep in sync w/ state 5's transistions */
1252 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1253 if (dflen == 0 && remain == 0) {
1254 cfg->vpd.vpd_wcnt = off;
1255 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1256 off * sizeof(*cfg->vpd.vpd_w),
1257 M_DEVBUF, M_WAITOK | M_ZERO);
1259 } else if (dflen == 0)
1264 pci_printf(cfg, "invalid state: %d\n", state);
1270 if (cksumvalid == 0 || state < -1) {
1271 /* read-only data bad, clean up */
1272 if (cfg->vpd.vpd_ros != NULL) {
1273 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1274 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1275 free(cfg->vpd.vpd_ros, M_DEVBUF);
1276 cfg->vpd.vpd_ros = NULL;
1280 /* I/O error, clean up */
1281 pci_printf(cfg, "failed to read VPD data.\n");
1282 if (cfg->vpd.vpd_ident != NULL) {
1283 free(cfg->vpd.vpd_ident, M_DEVBUF);
1284 cfg->vpd.vpd_ident = NULL;
1286 if (cfg->vpd.vpd_w != NULL) {
1287 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1288 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1289 free(cfg->vpd.vpd_w, M_DEVBUF);
1290 cfg->vpd.vpd_w = NULL;
1293 cfg->vpd.vpd_cached = 1;
1299 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1301 struct pci_devinfo *dinfo = device_get_ivars(child);
1302 pcicfgregs *cfg = &dinfo->cfg;
1304 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1305 pci_read_vpd(device_get_parent(dev), cfg);
1307 *identptr = cfg->vpd.vpd_ident;
1309 if (*identptr == NULL)
1316 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1319 struct pci_devinfo *dinfo = device_get_ivars(child);
1320 pcicfgregs *cfg = &dinfo->cfg;
1323 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1324 pci_read_vpd(device_get_parent(dev), cfg);
1326 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1327 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1328 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1329 *vptr = cfg->vpd.vpd_ros[i].value;
1338 pci_fetch_vpd_list(device_t dev)
1340 struct pci_devinfo *dinfo = device_get_ivars(dev);
1341 pcicfgregs *cfg = &dinfo->cfg;
1343 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1344 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1349 * Find the requested HyperTransport capability and return the offset
1350 * in configuration space via the pointer provided. The function
1351 * returns 0 on success and an error code otherwise.
1354 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1359 error = pci_find_cap(child, PCIY_HT, &ptr);
1364 * Traverse the capabilities list checking each HT capability
1365 * to see if it matches the requested HT capability.
1368 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1369 if (capability == PCIM_HTCAP_SLAVE ||
1370 capability == PCIM_HTCAP_HOST)
1373 val &= PCIM_HTCMD_CAP_MASK;
1374 if (val == capability) {
1380 /* Skip to the next HT capability. */
1382 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1383 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1392 * Find the requested capability and return the offset in
1393 * configuration space via the pointer provided. The function returns
1394 * 0 on success and an error code otherwise.
1397 pci_find_cap_method(device_t dev, device_t child, int capability,
1400 struct pci_devinfo *dinfo = device_get_ivars(child);
1401 pcicfgregs *cfg = &dinfo->cfg;
1406 * Check the CAP_LIST bit of the PCI status register first.
1408 status = pci_read_config(child, PCIR_STATUS, 2);
1409 if (!(status & PCIM_STATUS_CAPPRESENT))
1413 * Determine the start pointer of the capabilities list.
1415 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1416 case PCIM_HDRTYPE_NORMAL:
1417 case PCIM_HDRTYPE_BRIDGE:
1420 case PCIM_HDRTYPE_CARDBUS:
1421 ptr = PCIR_CAP_PTR_2;
1425 return (ENXIO); /* no extended capabilities support */
1427 ptr = pci_read_config(child, ptr, 1);
1430 * Traverse the capabilities list.
1433 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1438 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1445 * Find the requested extended capability and return the offset in
1446 * configuration space via the pointer provided. The function returns
1447 * 0 on success and an error code otherwise.
1450 pci_find_extcap_method(device_t dev, device_t child, int capability,
1453 struct pci_devinfo *dinfo = device_get_ivars(child);
1454 pcicfgregs *cfg = &dinfo->cfg;
1458 /* Only supported for PCI-express devices. */
1459 if (cfg->pcie.pcie_location == 0)
1463 ecap = pci_read_config(child, ptr, 4);
1464 if (ecap == 0xffffffff || ecap == 0)
1467 if (PCI_EXTCAP_ID(ecap) == capability) {
1472 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1475 ecap = pci_read_config(child, ptr, 4);
1482 * Support for MSI-X message interrupts.
1485 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1486 uint64_t address, uint32_t data)
1488 struct pci_devinfo *dinfo = device_get_ivars(child);
1489 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1492 KASSERT(msix->msix_table_len > index, ("bogus index"));
1493 offset = msix->msix_table_offset + index * 16;
1494 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1495 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1496 bus_write_4(msix->msix_table_res, offset + 8, data);
1498 /* Enable MSI -> HT mapping. */
1499 pci_ht_map_msi(child, address);
1503 pci_mask_msix(device_t dev, u_int index)
1505 struct pci_devinfo *dinfo = device_get_ivars(dev);
1506 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1507 uint32_t offset, val;
1509 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1510 offset = msix->msix_table_offset + index * 16 + 12;
1511 val = bus_read_4(msix->msix_table_res, offset);
1512 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1513 val |= PCIM_MSIX_VCTRL_MASK;
1514 bus_write_4(msix->msix_table_res, offset, val);
1519 pci_unmask_msix(device_t dev, u_int index)
1521 struct pci_devinfo *dinfo = device_get_ivars(dev);
1522 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1523 uint32_t offset, val;
1525 KASSERT(msix->msix_table_len > index, ("bogus index"));
1526 offset = msix->msix_table_offset + index * 16 + 12;
1527 val = bus_read_4(msix->msix_table_res, offset);
1528 if (val & PCIM_MSIX_VCTRL_MASK) {
1529 val &= ~PCIM_MSIX_VCTRL_MASK;
1530 bus_write_4(msix->msix_table_res, offset, val);
1535 pci_pending_msix(device_t dev, u_int index)
1537 struct pci_devinfo *dinfo = device_get_ivars(dev);
1538 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1539 uint32_t offset, bit;
1541 KASSERT(msix->msix_table_len > index, ("bogus index"));
1542 offset = msix->msix_pba_offset + (index / 32) * 4;
1543 bit = 1 << index % 32;
1544 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1548 * Restore MSI-X registers and table during resume. If MSI-X is
1549 * enabled then walk the virtual table to restore the actual MSI-X
1553 pci_resume_msix(device_t dev)
1555 struct pci_devinfo *dinfo = device_get_ivars(dev);
1556 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1557 struct msix_table_entry *mte;
1558 struct msix_vector *mv;
1561 if (msix->msix_alloc > 0) {
1562 /* First, mask all vectors. */
1563 for (i = 0; i < msix->msix_msgnum; i++)
1564 pci_mask_msix(dev, i);
1566 /* Second, program any messages with at least one handler. */
1567 for (i = 0; i < msix->msix_table_len; i++) {
1568 mte = &msix->msix_table[i];
1569 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1571 mv = &msix->msix_vectors[mte->mte_vector - 1];
1572 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1573 pci_unmask_msix(dev, i);
1576 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1577 msix->msix_ctrl, 2);
1581 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1582 * returned in *count. After this function returns, each message will be
1583 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1586 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1588 struct pci_devinfo *dinfo = device_get_ivars(child);
1589 pcicfgregs *cfg = &dinfo->cfg;
1590 struct resource_list_entry *rle;
1591 int actual, error, i, irq, max;
1593 /* Don't let count == 0 get us into trouble. */
1597 /* If rid 0 is allocated, then fail. */
1598 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1599 if (rle != NULL && rle->res != NULL)
1602 /* Already have allocated messages? */
1603 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1606 /* If MSI-X is blacklisted for this system, fail. */
1607 if (pci_msix_blacklisted())
1610 /* MSI-X capability present? */
1611 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1614 /* Make sure the appropriate BARs are mapped. */
1615 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1616 cfg->msix.msix_table_bar);
1617 if (rle == NULL || rle->res == NULL ||
1618 !(rman_get_flags(rle->res) & RF_ACTIVE))
1620 cfg->msix.msix_table_res = rle->res;
1621 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1622 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1623 cfg->msix.msix_pba_bar);
1624 if (rle == NULL || rle->res == NULL ||
1625 !(rman_get_flags(rle->res) & RF_ACTIVE))
1628 cfg->msix.msix_pba_res = rle->res;
1631 device_printf(child,
1632 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1633 *count, cfg->msix.msix_msgnum);
1634 max = min(*count, cfg->msix.msix_msgnum);
1635 for (i = 0; i < max; i++) {
1636 /* Allocate a message. */
1637 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1643 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1649 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1651 device_printf(child, "using IRQ %ju for MSI-X\n",
1657 * Be fancy and try to print contiguous runs of
1658 * IRQ values as ranges. 'irq' is the previous IRQ.
1659 * 'run' is true if we are in a range.
1661 device_printf(child, "using IRQs %ju", rle->start);
1664 for (i = 1; i < actual; i++) {
1665 rle = resource_list_find(&dinfo->resources,
1666 SYS_RES_IRQ, i + 1);
1668 /* Still in a run? */
1669 if (rle->start == irq + 1) {
1675 /* Finish previous range. */
1681 /* Start new range. */
1682 printf(",%ju", rle->start);
1686 /* Unfinished range? */
1689 printf(" for MSI-X\n");
1693 /* Mask all vectors. */
1694 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1695 pci_mask_msix(child, i);
1697 /* Allocate and initialize vector data and virtual table. */
1698 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1699 M_DEVBUF, M_WAITOK | M_ZERO);
1700 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1701 M_DEVBUF, M_WAITOK | M_ZERO);
1702 for (i = 0; i < actual; i++) {
1703 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1704 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1705 cfg->msix.msix_table[i].mte_vector = i + 1;
1708 /* Update control register to enable MSI-X. */
1709 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1710 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1711 cfg->msix.msix_ctrl, 2);
1713 /* Update counts of alloc'd messages. */
1714 cfg->msix.msix_alloc = actual;
1715 cfg->msix.msix_table_len = actual;
1721 * By default, pci_alloc_msix() will assign the allocated IRQ
1722 * resources consecutively to the first N messages in the MSI-X table.
1723 * However, device drivers may want to use different layouts if they
1724 * either receive fewer messages than they asked for, or they wish to
1725 * populate the MSI-X table sparsely. This method allows the driver
1726 * to specify what layout it wants. It must be called after a
1727 * successful pci_alloc_msix() but before any of the associated
1728 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1730 * The 'vectors' array contains 'count' message vectors. The array
1731 * maps directly to the MSI-X table in that index 0 in the array
1732 * specifies the vector for the first message in the MSI-X table, etc.
1733 * The vector value in each array index can either be 0 to indicate
1734 * that no vector should be assigned to a message slot, or it can be a
1735 * number from 1 to N (where N is the count returned from a
1736 * succcessful call to pci_alloc_msix()) to indicate which message
1737 * vector (IRQ) to be used for the corresponding message.
1739 * On successful return, each message with a non-zero vector will have
1740 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1741 * 1. Additionally, if any of the IRQs allocated via the previous
1742 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1743 * will be freed back to the system automatically.
1745 * For example, suppose a driver has a MSI-X table with 6 messages and
1746 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1747 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1748 * C. After the call to pci_alloc_msix(), the device will be setup to
1749 * have an MSI-X table of ABC--- (where - means no vector assigned).
1750 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1751 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1752 * be freed back to the system. This device will also have valid
1753 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1755 * In any case, the SYS_RES_IRQ rid X will always map to the message
1756 * at MSI-X table index X - 1 and will only be valid if a vector is
1757 * assigned to that table entry.
1760 pci_remap_msix_method(device_t dev, device_t child, int count,
1761 const u_int *vectors)
1763 struct pci_devinfo *dinfo = device_get_ivars(child);
1764 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1765 struct resource_list_entry *rle;
1766 int i, irq, j, *used;
1769 * Have to have at least one message in the table but the
1770 * table can't be bigger than the actual MSI-X table in the
1773 if (count == 0 || count > msix->msix_msgnum)
1776 /* Sanity check the vectors. */
1777 for (i = 0; i < count; i++)
1778 if (vectors[i] > msix->msix_alloc)
1782 * Make sure there aren't any holes in the vectors to be used.
1783 * It's a big pain to support it, and it doesn't really make
1784 * sense anyway. Also, at least one vector must be used.
1786 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1788 for (i = 0; i < count; i++)
1789 if (vectors[i] != 0)
1790 used[vectors[i] - 1] = 1;
1791 for (i = 0; i < msix->msix_alloc - 1; i++)
1792 if (used[i] == 0 && used[i + 1] == 1) {
1793 free(used, M_DEVBUF);
1797 free(used, M_DEVBUF);
1801 /* Make sure none of the resources are allocated. */
1802 for (i = 0; i < msix->msix_table_len; i++) {
1803 if (msix->msix_table[i].mte_vector == 0)
1805 if (msix->msix_table[i].mte_handlers > 0) {
1806 free(used, M_DEVBUF);
1809 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1810 KASSERT(rle != NULL, ("missing resource"));
1811 if (rle->res != NULL) {
1812 free(used, M_DEVBUF);
1817 /* Free the existing resource list entries. */
1818 for (i = 0; i < msix->msix_table_len; i++) {
1819 if (msix->msix_table[i].mte_vector == 0)
1821 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1825 * Build the new virtual table keeping track of which vectors are
1828 free(msix->msix_table, M_DEVBUF);
1829 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1830 M_DEVBUF, M_WAITOK | M_ZERO);
1831 for (i = 0; i < count; i++)
1832 msix->msix_table[i].mte_vector = vectors[i];
1833 msix->msix_table_len = count;
1835 /* Free any unused IRQs and resize the vectors array if necessary. */
1836 j = msix->msix_alloc - 1;
1838 struct msix_vector *vec;
1840 while (used[j] == 0) {
1841 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1842 msix->msix_vectors[j].mv_irq);
1845 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1847 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1849 free(msix->msix_vectors, M_DEVBUF);
1850 msix->msix_vectors = vec;
1851 msix->msix_alloc = j + 1;
1853 free(used, M_DEVBUF);
1855 /* Map the IRQs onto the rids. */
1856 for (i = 0; i < count; i++) {
1857 if (vectors[i] == 0)
1859 irq = msix->msix_vectors[vectors[i] - 1].mv_irq;
1860 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1865 device_printf(child, "Remapped MSI-X IRQs as: ");
1866 for (i = 0; i < count; i++) {
1869 if (vectors[i] == 0)
1873 msix->msix_vectors[vectors[i] - 1].mv_irq);
1882 pci_release_msix(device_t dev, device_t child)
1884 struct pci_devinfo *dinfo = device_get_ivars(child);
1885 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1886 struct resource_list_entry *rle;
1889 /* Do we have any messages to release? */
1890 if (msix->msix_alloc == 0)
1893 /* Make sure none of the resources are allocated. */
1894 for (i = 0; i < msix->msix_table_len; i++) {
1895 if (msix->msix_table[i].mte_vector == 0)
1897 if (msix->msix_table[i].mte_handlers > 0)
1899 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1900 KASSERT(rle != NULL, ("missing resource"));
1901 if (rle->res != NULL)
1905 /* Update control register to disable MSI-X. */
1906 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1907 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1908 msix->msix_ctrl, 2);
1910 /* Free the resource list entries. */
1911 for (i = 0; i < msix->msix_table_len; i++) {
1912 if (msix->msix_table[i].mte_vector == 0)
1914 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1916 free(msix->msix_table, M_DEVBUF);
1917 msix->msix_table_len = 0;
1919 /* Release the IRQs. */
1920 for (i = 0; i < msix->msix_alloc; i++)
1921 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1922 msix->msix_vectors[i].mv_irq);
1923 free(msix->msix_vectors, M_DEVBUF);
1924 msix->msix_alloc = 0;
1929 * Return the max supported MSI-X messages this device supports.
1930 * Basically, assuming the MD code can alloc messages, this function
1931 * should return the maximum value that pci_alloc_msix() can return.
1932 * Thus, it is subject to the tunables, etc.
1935 pci_msix_count_method(device_t dev, device_t child)
1937 struct pci_devinfo *dinfo = device_get_ivars(child);
1938 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1940 if (pci_do_msix && msix->msix_location != 0)
1941 return (msix->msix_msgnum);
1946 pci_msix_pba_bar_method(device_t dev, device_t child)
1948 struct pci_devinfo *dinfo = device_get_ivars(child);
1949 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1951 if (pci_do_msix && msix->msix_location != 0)
1952 return (msix->msix_pba_bar);
1957 pci_msix_table_bar_method(device_t dev, device_t child)
1959 struct pci_devinfo *dinfo = device_get_ivars(child);
1960 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1962 if (pci_do_msix && msix->msix_location != 0)
1963 return (msix->msix_table_bar);
1968 * HyperTransport MSI mapping control
1971 pci_ht_map_msi(device_t dev, uint64_t addr)
1973 struct pci_devinfo *dinfo = device_get_ivars(dev);
1974 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1979 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1980 ht->ht_msiaddr >> 20 == addr >> 20) {
1981 /* Enable MSI -> HT mapping. */
1982 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1983 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1987 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1988 /* Disable MSI -> HT mapping. */
1989 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1990 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1996 pci_get_max_payload(device_t dev)
1998 struct pci_devinfo *dinfo = device_get_ivars(dev);
2002 cap = dinfo->cfg.pcie.pcie_location;
2005 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2006 val &= PCIEM_CTL_MAX_PAYLOAD;
2008 return (1 << (val + 7));
2012 pci_get_max_read_req(device_t dev)
2014 struct pci_devinfo *dinfo = device_get_ivars(dev);
2018 cap = dinfo->cfg.pcie.pcie_location;
2021 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2022 val &= PCIEM_CTL_MAX_READ_REQUEST;
2024 return (1 << (val + 7));
2028 pci_set_max_read_req(device_t dev, int size)
2030 struct pci_devinfo *dinfo = device_get_ivars(dev);
2034 cap = dinfo->cfg.pcie.pcie_location;
2041 size = (1 << (fls(size) - 1));
2042 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2043 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
2044 val |= (fls(size) - 8) << 12;
2045 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
2050 pcie_read_config(device_t dev, int reg, int width)
2052 struct pci_devinfo *dinfo = device_get_ivars(dev);
2055 cap = dinfo->cfg.pcie.pcie_location;
2059 return (0xffffffff);
2062 return (pci_read_config(dev, cap + reg, width));
2066 pcie_write_config(device_t dev, int reg, uint32_t value, int width)
2068 struct pci_devinfo *dinfo = device_get_ivars(dev);
2071 cap = dinfo->cfg.pcie.pcie_location;
2074 pci_write_config(dev, cap + reg, value, width);
2078 * Adjusts a PCI-e capability register by clearing the bits in mask
2079 * and setting the bits in (value & mask). Bits not set in mask are
2082 * Returns the old value on success or all ones on failure.
2085 pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value,
2088 struct pci_devinfo *dinfo = device_get_ivars(dev);
2092 cap = dinfo->cfg.pcie.pcie_location;
2096 return (0xffffffff);
2099 old = pci_read_config(dev, cap + reg, width);
2101 new |= (value & mask);
2102 pci_write_config(dev, cap + reg, new, width);
2107 * Support for MSI message signalled interrupts.
2110 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
2113 struct pci_devinfo *dinfo = device_get_ivars(child);
2114 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2116 /* Write data and address values. */
2117 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
2118 address & 0xffffffff, 4);
2119 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2120 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
2122 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
2125 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
2128 /* Enable MSI in the control register. */
2129 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
2130 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2133 /* Enable MSI -> HT mapping. */
2134 pci_ht_map_msi(child, address);
2138 pci_disable_msi_method(device_t dev, device_t child)
2140 struct pci_devinfo *dinfo = device_get_ivars(child);
2141 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2143 /* Disable MSI -> HT mapping. */
2144 pci_ht_map_msi(child, 0);
2146 /* Disable MSI in the control register. */
2147 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
2148 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2153 * Restore MSI registers during resume. If MSI is enabled then
2154 * restore the data and address registers in addition to the control
2158 pci_resume_msi(device_t dev)
2160 struct pci_devinfo *dinfo = device_get_ivars(dev);
2161 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2165 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
2166 address = msi->msi_addr;
2167 data = msi->msi_data;
2168 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
2169 address & 0xffffffff, 4);
2170 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2171 pci_write_config(dev, msi->msi_location +
2172 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
2173 pci_write_config(dev, msi->msi_location +
2174 PCIR_MSI_DATA_64BIT, data, 2);
2176 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
2179 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
2184 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
2186 struct pci_devinfo *dinfo = device_get_ivars(dev);
2187 pcicfgregs *cfg = &dinfo->cfg;
2188 struct resource_list_entry *rle;
2189 struct msix_table_entry *mte;
2190 struct msix_vector *mv;
2196 * Handle MSI first. We try to find this IRQ among our list
2197 * of MSI IRQs. If we find it, we request updated address and
2198 * data registers and apply the results.
2200 if (cfg->msi.msi_alloc > 0) {
2202 /* If we don't have any active handlers, nothing to do. */
2203 if (cfg->msi.msi_handlers == 0)
2205 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2206 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2208 if (rle->start == irq) {
2209 error = PCIB_MAP_MSI(device_get_parent(bus),
2210 dev, irq, &addr, &data);
2213 pci_disable_msi(dev);
2214 dinfo->cfg.msi.msi_addr = addr;
2215 dinfo->cfg.msi.msi_data = data;
2216 pci_enable_msi(dev, addr, data);
2224 * For MSI-X, we check to see if we have this IRQ. If we do,
2225 * we request the updated mapping info. If that works, we go
2226 * through all the slots that use this IRQ and update them.
2228 if (cfg->msix.msix_alloc > 0) {
2229 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2230 mv = &cfg->msix.msix_vectors[i];
2231 if (mv->mv_irq == irq) {
2232 error = PCIB_MAP_MSI(device_get_parent(bus),
2233 dev, irq, &addr, &data);
2236 mv->mv_address = addr;
2238 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2239 mte = &cfg->msix.msix_table[j];
2240 if (mte->mte_vector != i + 1)
2242 if (mte->mte_handlers == 0)
2244 pci_mask_msix(dev, j);
2245 pci_enable_msix(dev, j, addr, data);
2246 pci_unmask_msix(dev, j);
2257 * Returns true if the specified device is blacklisted because MSI
2261 pci_msi_device_blacklisted(device_t dev)
2264 if (!pci_honor_msi_blacklist)
2267 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2271 * Determine if MSI is blacklisted globally on this system. Currently,
2272 * we just check for blacklisted chipsets as represented by the
2273 * host-PCI bridge at device 0:0:0. In the future, it may become
2274 * necessary to check other system attributes, such as the kenv values
2275 * that give the motherboard manufacturer and model number.
2278 pci_msi_blacklisted(void)
2282 if (!pci_honor_msi_blacklist)
2285 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2286 if (!(pcie_chipset || pcix_chipset)) {
2287 if (vm_guest != VM_GUEST_NO) {
2289 * Whitelist older chipsets in virtual
2290 * machines known to support MSI.
2292 dev = pci_find_bsf(0, 0, 0);
2294 return (!pci_has_quirk(pci_get_devid(dev),
2295 PCI_QUIRK_ENABLE_MSI_VM));
2300 dev = pci_find_bsf(0, 0, 0);
2302 return (pci_msi_device_blacklisted(dev));
2307 * Returns true if the specified device is blacklisted because MSI-X
2308 * doesn't work. Note that this assumes that if MSI doesn't work,
2309 * MSI-X doesn't either.
2312 pci_msix_device_blacklisted(device_t dev)
2315 if (!pci_honor_msi_blacklist)
2318 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2321 return (pci_msi_device_blacklisted(dev));
2325 * Determine if MSI-X is blacklisted globally on this system. If MSI
2326 * is blacklisted, assume that MSI-X is as well. Check for additional
2327 * chipsets where MSI works but MSI-X does not.
2330 pci_msix_blacklisted(void)
2334 if (!pci_honor_msi_blacklist)
2337 dev = pci_find_bsf(0, 0, 0);
2338 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2339 PCI_QUIRK_DISABLE_MSIX))
2342 return (pci_msi_blacklisted());
2346 * Attempt to allocate *count MSI messages. The actual number allocated is
2347 * returned in *count. After this function returns, each message will be
2348 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2351 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2353 struct pci_devinfo *dinfo = device_get_ivars(child);
2354 pcicfgregs *cfg = &dinfo->cfg;
2355 struct resource_list_entry *rle;
2356 int actual, error, i, irqs[32];
2359 /* Don't let count == 0 get us into trouble. */
2363 /* If rid 0 is allocated, then fail. */
2364 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2365 if (rle != NULL && rle->res != NULL)
2368 /* Already have allocated messages? */
2369 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2372 /* If MSI is blacklisted for this system, fail. */
2373 if (pci_msi_blacklisted())
2376 /* MSI capability present? */
2377 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2381 device_printf(child,
2382 "attempting to allocate %d MSI vectors (%d supported)\n",
2383 *count, cfg->msi.msi_msgnum);
2385 /* Don't ask for more than the device supports. */
2386 actual = min(*count, cfg->msi.msi_msgnum);
2388 /* Don't ask for more than 32 messages. */
2389 actual = min(actual, 32);
2391 /* MSI requires power of 2 number of messages. */
2392 if (!powerof2(actual))
2396 /* Try to allocate N messages. */
2397 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2409 * We now have N actual messages mapped onto SYS_RES_IRQ
2410 * resources in the irqs[] array, so add new resources
2411 * starting at rid 1.
2413 for (i = 0; i < actual; i++)
2414 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2415 irqs[i], irqs[i], 1);
2419 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2424 * Be fancy and try to print contiguous runs
2425 * of IRQ values as ranges. 'run' is true if
2426 * we are in a range.
2428 device_printf(child, "using IRQs %d", irqs[0]);
2430 for (i = 1; i < actual; i++) {
2432 /* Still in a run? */
2433 if (irqs[i] == irqs[i - 1] + 1) {
2438 /* Finish previous range. */
2440 printf("-%d", irqs[i - 1]);
2444 /* Start new range. */
2445 printf(",%d", irqs[i]);
2448 /* Unfinished range? */
2450 printf("-%d", irqs[actual - 1]);
2451 printf(" for MSI\n");
2455 /* Update control register with actual count. */
2456 ctrl = cfg->msi.msi_ctrl;
2457 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2458 ctrl |= (ffs(actual) - 1) << 4;
2459 cfg->msi.msi_ctrl = ctrl;
2460 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2462 /* Update counts of alloc'd messages. */
2463 cfg->msi.msi_alloc = actual;
2464 cfg->msi.msi_handlers = 0;
2469 /* Release the MSI messages associated with this device. */
2471 pci_release_msi_method(device_t dev, device_t child)
2473 struct pci_devinfo *dinfo = device_get_ivars(child);
2474 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2475 struct resource_list_entry *rle;
2476 int error, i, irqs[32];
2478 /* Try MSI-X first. */
2479 error = pci_release_msix(dev, child);
2480 if (error != ENODEV)
2483 /* Do we have any messages to release? */
2484 if (msi->msi_alloc == 0)
2486 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2488 /* Make sure none of the resources are allocated. */
2489 if (msi->msi_handlers > 0)
2491 for (i = 0; i < msi->msi_alloc; i++) {
2492 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2493 KASSERT(rle != NULL, ("missing MSI resource"));
2494 if (rle->res != NULL)
2496 irqs[i] = rle->start;
2499 /* Update control register with 0 count. */
2500 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2501 ("%s: MSI still enabled", __func__));
2502 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2503 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2506 /* Release the messages. */
2507 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2508 for (i = 0; i < msi->msi_alloc; i++)
2509 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2511 /* Update alloc count. */
2519 * Return the max supported MSI messages this device supports.
2520 * Basically, assuming the MD code can alloc messages, this function
2521 * should return the maximum value that pci_alloc_msi() can return.
2522 * Thus, it is subject to the tunables, etc.
2525 pci_msi_count_method(device_t dev, device_t child)
2527 struct pci_devinfo *dinfo = device_get_ivars(child);
2528 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2530 if (pci_do_msi && msi->msi_location != 0)
2531 return (msi->msi_msgnum);
2535 /* free pcicfgregs structure and all depending data structures */
2538 pci_freecfg(struct pci_devinfo *dinfo)
2540 struct devlist *devlist_head;
2541 struct pci_map *pm, *next;
2544 devlist_head = &pci_devq;
2546 if (dinfo->cfg.vpd.vpd_reg) {
2547 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2548 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2549 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2550 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2551 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2552 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2553 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2555 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2558 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2559 free(dinfo, M_DEVBUF);
2561 /* increment the generation count */
2564 /* we're losing one device */
2570 * PCI power manangement
2573 pci_set_powerstate_method(device_t dev, device_t child, int state)
2575 struct pci_devinfo *dinfo = device_get_ivars(child);
2576 pcicfgregs *cfg = &dinfo->cfg;
2578 int oldstate, highest, delay;
2580 if (cfg->pp.pp_cap == 0)
2581 return (EOPNOTSUPP);
2584 * Optimize a no state change request away. While it would be OK to
2585 * write to the hardware in theory, some devices have shown odd
2586 * behavior when going from D3 -> D3.
2588 oldstate = pci_get_powerstate(child);
2589 if (oldstate == state)
2593 * The PCI power management specification states that after a state
2594 * transition between PCI power states, system software must
2595 * guarantee a minimal delay before the function accesses the device.
2596 * Compute the worst case delay that we need to guarantee before we
2597 * access the device. Many devices will be responsive much more
2598 * quickly than this delay, but there are some that don't respond
2599 * instantly to state changes. Transitions to/from D3 state require
2600 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2601 * is done below with DELAY rather than a sleeper function because
2602 * this function can be called from contexts where we cannot sleep.
2604 highest = (oldstate > state) ? oldstate : state;
2605 if (highest == PCI_POWERSTATE_D3)
2607 else if (highest == PCI_POWERSTATE_D2)
2611 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2612 & ~PCIM_PSTAT_DMASK;
2614 case PCI_POWERSTATE_D0:
2615 status |= PCIM_PSTAT_D0;
2617 case PCI_POWERSTATE_D1:
2618 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2619 return (EOPNOTSUPP);
2620 status |= PCIM_PSTAT_D1;
2622 case PCI_POWERSTATE_D2:
2623 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2624 return (EOPNOTSUPP);
2625 status |= PCIM_PSTAT_D2;
2627 case PCI_POWERSTATE_D3:
2628 status |= PCIM_PSTAT_D3;
2635 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2638 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2645 pci_get_powerstate_method(device_t dev, device_t child)
2647 struct pci_devinfo *dinfo = device_get_ivars(child);
2648 pcicfgregs *cfg = &dinfo->cfg;
2652 if (cfg->pp.pp_cap != 0) {
2653 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2654 switch (status & PCIM_PSTAT_DMASK) {
2656 result = PCI_POWERSTATE_D0;
2659 result = PCI_POWERSTATE_D1;
2662 result = PCI_POWERSTATE_D2;
2665 result = PCI_POWERSTATE_D3;
2668 result = PCI_POWERSTATE_UNKNOWN;
2672 /* No support, device is always at D0 */
2673 result = PCI_POWERSTATE_D0;
2679 * Some convenience functions for PCI device drivers.
2682 static __inline void
2683 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2687 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2689 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2692 static __inline void
2693 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2697 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2699 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2703 pci_enable_busmaster_method(device_t dev, device_t child)
2705 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2710 pci_disable_busmaster_method(device_t dev, device_t child)
2712 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2717 pci_enable_io_method(device_t dev, device_t child, int space)
2722 case SYS_RES_IOPORT:
2723 bit = PCIM_CMD_PORTEN;
2725 case SYS_RES_MEMORY:
2726 bit = PCIM_CMD_MEMEN;
2731 pci_set_command_bit(dev, child, bit);
2736 pci_disable_io_method(device_t dev, device_t child, int space)
2741 case SYS_RES_IOPORT:
2742 bit = PCIM_CMD_PORTEN;
2744 case SYS_RES_MEMORY:
2745 bit = PCIM_CMD_MEMEN;
2750 pci_clear_command_bit(dev, child, bit);
2755 * New style pci driver. Parent device is either a pci-host-bridge or a
2756 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2760 pci_print_verbose(struct pci_devinfo *dinfo)
2764 pcicfgregs *cfg = &dinfo->cfg;
2766 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2767 cfg->vendor, cfg->device, cfg->revid);
2768 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2769 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2770 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2771 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2773 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2774 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2775 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2776 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2777 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2778 if (cfg->intpin > 0)
2779 printf("\tintpin=%c, irq=%d\n",
2780 cfg->intpin +'a' -1, cfg->intline);
2781 if (cfg->pp.pp_cap) {
2784 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2785 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2786 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2787 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2788 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2789 status & PCIM_PSTAT_DMASK);
2791 if (cfg->msi.msi_location) {
2794 ctrl = cfg->msi.msi_ctrl;
2795 printf("\tMSI supports %d message%s%s%s\n",
2796 cfg->msi.msi_msgnum,
2797 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2798 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2799 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2801 if (cfg->msix.msix_location) {
2802 printf("\tMSI-X supports %d message%s ",
2803 cfg->msix.msix_msgnum,
2804 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2805 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2806 printf("in map 0x%x\n",
2807 cfg->msix.msix_table_bar);
2809 printf("in maps 0x%x and 0x%x\n",
2810 cfg->msix.msix_table_bar,
2811 cfg->msix.msix_pba_bar);
2817 pci_porten(device_t dev)
2819 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2823 pci_memen(device_t dev)
2825 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2829 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp,
2832 struct pci_devinfo *dinfo;
2833 pci_addr_t map, testval;
2838 * The device ROM BAR is special. It is always a 32-bit
2839 * memory BAR. Bit 0 is special and should not be set when
2842 dinfo = device_get_ivars(dev);
2843 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2844 map = pci_read_config(dev, reg, 4);
2845 pci_write_config(dev, reg, 0xfffffffe, 4);
2846 testval = pci_read_config(dev, reg, 4);
2847 pci_write_config(dev, reg, map, 4);
2849 *testvalp = testval;
2855 map = pci_read_config(dev, reg, 4);
2856 ln2range = pci_maprange(map);
2858 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2861 * Disable decoding via the command register before
2862 * determining the BAR's length since we will be placing it in
2865 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2866 pci_write_config(dev, PCIR_COMMAND,
2867 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2870 * Determine the BAR's length by writing all 1's. The bottom
2871 * log_2(size) bits of the BAR will stick as 0 when we read
2874 pci_write_config(dev, reg, 0xffffffff, 4);
2875 testval = pci_read_config(dev, reg, 4);
2876 if (ln2range == 64) {
2877 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2878 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2882 * Restore the original value of the BAR. We may have reprogrammed
2883 * the BAR of the low-level console device and when booting verbose,
2884 * we need the console device addressable.
2886 pci_write_config(dev, reg, map, 4);
2888 pci_write_config(dev, reg + 4, map >> 32, 4);
2889 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2892 *testvalp = testval;
2894 *bar64 = (ln2range == 64);
2898 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2900 struct pci_devinfo *dinfo;
2903 /* The device ROM BAR is always a 32-bit memory BAR. */
2904 dinfo = device_get_ivars(dev);
2905 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2908 ln2range = pci_maprange(pm->pm_value);
2909 pci_write_config(dev, pm->pm_reg, base, 4);
2911 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2912 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2914 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2915 pm->pm_reg + 4, 4) << 32;
2919 pci_find_bar(device_t dev, int reg)
2921 struct pci_devinfo *dinfo;
2924 dinfo = device_get_ivars(dev);
2925 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2926 if (pm->pm_reg == reg)
2933 pci_bar_enabled(device_t dev, struct pci_map *pm)
2935 struct pci_devinfo *dinfo;
2938 dinfo = device_get_ivars(dev);
2939 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2940 !(pm->pm_value & PCIM_BIOS_ENABLE))
2942 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2943 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2944 return ((cmd & PCIM_CMD_MEMEN) != 0);
2946 return ((cmd & PCIM_CMD_PORTEN) != 0);
2950 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2952 struct pci_devinfo *dinfo;
2953 struct pci_map *pm, *prev;
2955 dinfo = device_get_ivars(dev);
2956 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2958 pm->pm_value = value;
2960 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2961 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2963 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2964 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2968 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2970 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2975 pci_restore_bars(device_t dev)
2977 struct pci_devinfo *dinfo;
2981 dinfo = device_get_ivars(dev);
2982 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2983 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2986 ln2range = pci_maprange(pm->pm_value);
2987 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2989 pci_write_config(dev, pm->pm_reg + 4,
2990 pm->pm_value >> 32, 4);
2995 * Add a resource based on a pci map register. Return 1 if the map
2996 * register is a 32bit map register or 2 if it is a 64bit register.
2999 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
3000 int force, int prefetch)
3003 pci_addr_t base, map, testval;
3004 pci_addr_t start, end, count;
3005 int barlen, basezero, flags, maprange, mapsize, type;
3007 struct resource *res;
3010 * The BAR may already exist if the device is a CardBus card
3011 * whose CIS is stored in this BAR.
3013 pm = pci_find_bar(dev, reg);
3015 maprange = pci_maprange(pm->pm_value);
3016 barlen = maprange == 64 ? 2 : 1;
3020 pci_read_bar(dev, reg, &map, &testval, NULL);
3021 if (PCI_BAR_MEM(map)) {
3022 type = SYS_RES_MEMORY;
3023 if (map & PCIM_BAR_MEM_PREFETCH)
3026 type = SYS_RES_IOPORT;
3027 mapsize = pci_mapsize(testval);
3028 base = pci_mapbase(map);
3029 #ifdef __PCI_BAR_ZERO_VALID
3032 basezero = base == 0;
3034 maprange = pci_maprange(map);
3035 barlen = maprange == 64 ? 2 : 1;
3038 * For I/O registers, if bottom bit is set, and the next bit up
3039 * isn't clear, we know we have a BAR that doesn't conform to the
3040 * spec, so ignore it. Also, sanity check the size of the data
3041 * areas to the type of memory involved. Memory must be at least
3042 * 16 bytes in size, while I/O ranges must be at least 4.
3044 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
3046 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
3047 (type == SYS_RES_IOPORT && mapsize < 2))
3050 /* Save a record of this BAR. */
3051 pm = pci_add_bar(dev, reg, map, mapsize);
3053 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
3054 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
3055 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3056 printf(", port disabled\n");
3057 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
3058 printf(", memory disabled\n");
3060 printf(", enabled\n");
3064 * If base is 0, then we have problems if this architecture does
3065 * not allow that. It is best to ignore such entries for the
3066 * moment. These will be allocated later if the driver specifically
3067 * requests them. However, some removable busses look better when
3068 * all resources are allocated, so allow '0' to be overriden.
3070 * Similarly treat maps whose values is the same as the test value
3071 * read back. These maps have had all f's written to them by the
3072 * BIOS in an attempt to disable the resources.
3074 if (!force && (basezero || map == testval))
3076 if ((u_long)base != base) {
3078 "pci%d:%d:%d:%d bar %#x too many address bits",
3079 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
3080 pci_get_function(dev), reg);
3085 * This code theoretically does the right thing, but has
3086 * undesirable side effects in some cases where peripherals
3087 * respond oddly to having these bits enabled. Let the user
3088 * be able to turn them off (since pci_enable_io_modes is 1 by
3091 if (pci_enable_io_modes) {
3092 /* Turn on resources that have been left off by a lazy BIOS */
3093 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
3094 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3095 cmd |= PCIM_CMD_PORTEN;
3096 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3098 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
3099 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3100 cmd |= PCIM_CMD_MEMEN;
3101 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3104 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3106 if (type == SYS_RES_MEMORY && !pci_memen(dev))
3110 count = (pci_addr_t)1 << mapsize;
3111 flags = RF_ALIGNMENT_LOG2(mapsize);
3113 flags |= RF_PREFETCHABLE;
3114 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
3115 start = 0; /* Let the parent decide. */
3119 end = base + count - 1;
3121 resource_list_add(rl, type, reg, start, end, count);
3124 * Try to allocate the resource for this BAR from our parent
3125 * so that this resource range is already reserved. The
3126 * driver for this device will later inherit this resource in
3127 * pci_alloc_resource().
3129 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
3131 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0)) {
3133 * If the allocation fails, try to allocate a resource for
3134 * this BAR using any available range. The firmware felt
3135 * it was important enough to assign a resource, so don't
3136 * disable decoding if we can help it.
3138 resource_list_delete(rl, type, reg);
3139 resource_list_add(rl, type, reg, 0, ~0, count);
3140 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0,
3145 * If the allocation fails, delete the resource list entry
3146 * and disable decoding for this device.
3148 * If the driver requests this resource in the future,
3149 * pci_reserve_map() will try to allocate a fresh
3152 resource_list_delete(rl, type, reg);
3153 pci_disable_io(dev, type);
3156 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
3157 pci_get_domain(dev), pci_get_bus(dev),
3158 pci_get_slot(dev), pci_get_function(dev), reg);
3160 start = rman_get_start(res);
3161 pci_write_bar(dev, pm, start);
3167 * For ATA devices we need to decide early what addressing mode to use.
3168 * Legacy demands that the primary and secondary ATA ports sits on the
3169 * same addresses that old ISA hardware did. This dictates that we use
3170 * those addresses and ignore the BAR's if we cannot set PCI native
3174 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
3175 uint32_t prefetchmask)
3177 int rid, type, progif;
3179 /* if this device supports PCI native addressing use it */
3180 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3181 if ((progif & 0x8a) == 0x8a) {
3182 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
3183 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
3184 printf("Trying ATA native PCI addressing mode\n");
3185 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
3189 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3190 type = SYS_RES_IOPORT;
3191 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
3192 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
3193 prefetchmask & (1 << 0));
3194 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
3195 prefetchmask & (1 << 1));
3198 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
3199 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
3202 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
3203 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3206 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3207 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3208 prefetchmask & (1 << 2));
3209 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3210 prefetchmask & (1 << 3));
3213 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3214 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3217 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3218 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3221 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3222 prefetchmask & (1 << 4));
3223 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3224 prefetchmask & (1 << 5));
3228 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3230 struct pci_devinfo *dinfo = device_get_ivars(dev);
3231 pcicfgregs *cfg = &dinfo->cfg;
3232 char tunable_name[64];
3235 /* Has to have an intpin to have an interrupt. */
3236 if (cfg->intpin == 0)
3239 /* Let the user override the IRQ with a tunable. */
3240 irq = PCI_INVALID_IRQ;
3241 snprintf(tunable_name, sizeof(tunable_name),
3242 "hw.pci%d.%d.%d.INT%c.irq",
3243 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3244 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3245 irq = PCI_INVALID_IRQ;
3248 * If we didn't get an IRQ via the tunable, then we either use the
3249 * IRQ value in the intline register or we ask the bus to route an
3250 * interrupt for us. If force_route is true, then we only use the
3251 * value in the intline register if the bus was unable to assign an
3254 if (!PCI_INTERRUPT_VALID(irq)) {
3255 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3256 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3257 if (!PCI_INTERRUPT_VALID(irq))
3261 /* If after all that we don't have an IRQ, just bail. */
3262 if (!PCI_INTERRUPT_VALID(irq))
3265 /* Update the config register if it changed. */
3266 if (irq != cfg->intline) {
3268 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3271 /* Add this IRQ as rid 0 interrupt resource. */
3272 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3275 /* Perform early OHCI takeover from SMM. */
3277 ohci_early_takeover(device_t self)
3279 struct resource *res;
3285 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3289 ctl = bus_read_4(res, OHCI_CONTROL);
3290 if (ctl & OHCI_IR) {
3292 printf("ohci early: "
3293 "SMM active, request owner change\n");
3294 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3295 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3297 ctl = bus_read_4(res, OHCI_CONTROL);
3299 if (ctl & OHCI_IR) {
3301 printf("ohci early: "
3302 "SMM does not respond, resetting\n");
3303 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3305 /* Disable interrupts */
3306 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3309 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3312 /* Perform early UHCI takeover from SMM. */
3314 uhci_early_takeover(device_t self)
3316 struct resource *res;
3320 * Set the PIRQD enable bit and switch off all the others. We don't
3321 * want legacy support to interfere with us XXX Does this also mean
3322 * that the BIOS won't touch the keyboard anymore if it is connected
3323 * to the ports of the root hub?
3325 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3327 /* Disable interrupts */
3328 rid = PCI_UHCI_BASE_REG;
3329 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3331 bus_write_2(res, UHCI_INTR, 0);
3332 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3336 /* Perform early EHCI takeover from SMM. */
3338 ehci_early_takeover(device_t self)
3340 struct resource *res;
3350 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3354 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3356 /* Synchronise with the BIOS if it owns the controller. */
3357 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3358 eecp = EHCI_EECP_NEXT(eec)) {
3359 eec = pci_read_config(self, eecp, 4);
3360 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3363 bios_sem = pci_read_config(self, eecp +
3364 EHCI_LEGSUP_BIOS_SEM, 1);
3365 if (bios_sem == 0) {
3369 printf("ehci early: "
3370 "SMM active, request owner change\n");
3372 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3374 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3376 bios_sem = pci_read_config(self, eecp +
3377 EHCI_LEGSUP_BIOS_SEM, 1);
3380 if (bios_sem != 0) {
3382 printf("ehci early: "
3383 "SMM does not respond\n");
3385 /* Disable interrupts */
3386 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3387 bus_write_4(res, offs + EHCI_USBINTR, 0);
3389 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3392 /* Perform early XHCI takeover from SMM. */
3394 xhci_early_takeover(device_t self)
3396 struct resource *res;
3406 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3410 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3414 /* Synchronise with the BIOS if it owns the controller. */
3415 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3416 eecp += XHCI_XECP_NEXT(eec) << 2) {
3417 eec = bus_read_4(res, eecp);
3419 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3422 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3427 printf("xhci early: "
3428 "SMM active, request owner change\n");
3430 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3432 /* wait a maximum of 5 second */
3434 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3436 bios_sem = bus_read_1(res, eecp +
3437 XHCI_XECP_BIOS_SEM);
3440 if (bios_sem != 0) {
3442 printf("xhci early: "
3443 "SMM does not respond\n");
3446 /* Disable interrupts */
3447 offs = bus_read_1(res, XHCI_CAPLENGTH);
3448 bus_write_4(res, offs + XHCI_USBCMD, 0);
3449 bus_read_4(res, offs + XHCI_USBSTS);
3451 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3454 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3456 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3457 struct resource_list *rl)
3459 struct resource *res;
3461 rman_res_t start, end, count;
3462 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3464 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3465 case PCIM_HDRTYPE_BRIDGE:
3466 sec_reg = PCIR_SECBUS_1;
3467 sub_reg = PCIR_SUBBUS_1;
3469 case PCIM_HDRTYPE_CARDBUS:
3470 sec_reg = PCIR_SECBUS_2;
3471 sub_reg = PCIR_SUBBUS_2;
3478 * If the existing bus range is valid, attempt to reserve it
3479 * from our parent. If this fails for any reason, clear the
3480 * secbus and subbus registers.
3482 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3483 * This would at least preserve the existing sec_bus if it is
3486 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3487 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3489 /* Quirk handling. */
3490 switch (pci_get_devid(dev)) {
3491 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3492 sup_bus = pci_read_config(dev, 0x41, 1);
3493 if (sup_bus != 0xff) {
3494 sec_bus = sup_bus + 1;
3495 sub_bus = sup_bus + 1;
3496 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3497 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3502 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3503 if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
3505 if (strncmp(cp, "Compal", 6) != 0) {
3510 if ((cp = kern_getenv("smbios.planar.product")) == NULL)
3512 if (strncmp(cp, "08A0", 4) != 0) {
3517 if (sub_bus < 0xa) {
3519 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3525 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3526 if (sec_bus > 0 && sub_bus >= sec_bus) {
3529 count = end - start + 1;
3531 resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count);
3534 * If requested, clear secondary bus registers in
3535 * bridge devices to force a complete renumbering
3536 * rather than reserving the existing range. However,
3537 * preserve the existing size.
3539 if (pci_clear_buses)
3543 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3544 start, end, count, 0);
3550 "pci%d:%d:%d:%d secbus failed to allocate\n",
3551 pci_get_domain(dev), pci_get_bus(dev),
3552 pci_get_slot(dev), pci_get_function(dev));
3556 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3557 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3560 static struct resource *
3561 pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start,
3562 rman_res_t end, rman_res_t count, u_int flags)
3564 struct pci_devinfo *dinfo;
3566 struct resource_list *rl;
3567 struct resource *res;
3568 int sec_reg, sub_reg;
3570 dinfo = device_get_ivars(child);
3572 rl = &dinfo->resources;
3573 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3574 case PCIM_HDRTYPE_BRIDGE:
3575 sec_reg = PCIR_SECBUS_1;
3576 sub_reg = PCIR_SUBBUS_1;
3578 case PCIM_HDRTYPE_CARDBUS:
3579 sec_reg = PCIR_SECBUS_2;
3580 sub_reg = PCIR_SUBBUS_2;
3589 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3590 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3591 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3592 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3593 start, end, count, flags & ~RF_ACTIVE);
3595 resource_list_delete(rl, PCI_RES_BUS, *rid);
3596 device_printf(child, "allocating %ju bus%s failed\n",
3597 count, count == 1 ? "" : "es");
3601 device_printf(child,
3602 "Lazy allocation of %ju bus%s at %ju\n", count,
3603 count == 1 ? "" : "es", rman_get_start(res));
3604 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3605 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3607 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3608 end, count, flags));
3613 pci_ea_bei_to_rid(device_t dev, int bei)
3616 struct pci_devinfo *dinfo;
3618 struct pcicfg_iov *iov;
3620 dinfo = device_get_ivars(dev);
3621 iov = dinfo->cfg.iov;
3623 iov_pos = iov->iov_pos;
3628 /* Check if matches BAR */
3629 if ((bei >= PCIM_EA_BEI_BAR_0) &&
3630 (bei <= PCIM_EA_BEI_BAR_5))
3631 return (PCIR_BAR(bei));
3634 if (bei == PCIM_EA_BEI_ROM)
3638 /* Check if matches VF_BAR */
3639 if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) &&
3640 (bei <= PCIM_EA_BEI_VF_BAR_5))
3641 return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) +
3649 pci_ea_is_enabled(device_t dev, int rid)
3651 struct pci_ea_entry *ea;
3652 struct pci_devinfo *dinfo;
3654 dinfo = device_get_ivars(dev);
3656 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
3657 if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid)
3658 return ((ea->eae_flags & PCIM_EA_ENABLE) > 0);
3665 pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov)
3667 struct pci_ea_entry *ea;
3668 struct pci_devinfo *dinfo;
3669 pci_addr_t start, end, count;
3670 struct resource_list *rl;
3671 int type, flags, rid;
3672 struct resource *res;
3675 struct pcicfg_iov *iov;
3678 dinfo = device_get_ivars(dev);
3679 rl = &dinfo->resources;
3683 iov = dinfo->cfg.iov;
3686 if (dinfo->cfg.ea.ea_location == 0)
3689 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
3692 * TODO: Ignore EA-BAR if is not enabled.
3693 * Currently the EA implementation supports
3694 * only situation, where EA structure contains
3695 * predefined entries. In case they are not enabled
3696 * leave them unallocated and proceed with
3697 * a legacy-BAR mechanism.
3699 if ((ea->eae_flags & PCIM_EA_ENABLE) == 0)
3702 switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) {
3703 case PCIM_EA_P_MEM_PREFETCH:
3704 case PCIM_EA_P_VF_MEM_PREFETCH:
3705 flags = RF_PREFETCHABLE;
3707 case PCIM_EA_P_VF_MEM:
3709 type = SYS_RES_MEMORY;
3712 type = SYS_RES_IOPORT;
3718 if (alloc_iov != 0) {
3720 /* Allocating IOV, confirm BEI matches */
3721 if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) ||
3722 (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5))
3728 /* Allocating BAR, confirm BEI matches */
3729 if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) ||
3730 (ea->eae_bei > PCIM_EA_BEI_BAR_5)) &&
3731 (ea->eae_bei != PCIM_EA_BEI_ROM))
3735 rid = pci_ea_bei_to_rid(dev, ea->eae_bei);
3739 /* Skip resources already allocated by EA */
3740 if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) ||
3741 (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL))
3744 start = ea->eae_base;
3745 count = ea->eae_max_offset + 1;
3748 count = count * iov->iov_num_vfs;
3750 end = start + count - 1;
3754 resource_list_add(rl, type, rid, start, end, count);
3755 res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count,
3758 resource_list_delete(rl, type, rid);
3761 * Failed to allocate using EA, disable entry.
3762 * Another attempt to allocation will be performed
3763 * further, but this time using legacy BAR registers
3765 tmp = pci_read_config(dev, ea->eae_cfg_offset, 4);
3766 tmp &= ~PCIM_EA_ENABLE;
3767 pci_write_config(dev, ea->eae_cfg_offset, tmp, 4);
3770 * Disabling entry might fail in case it is hardwired.
3771 * Read flags again to match current status.
3773 ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4);
3778 /* As per specification, fill BAR with zeros */
3779 pci_write_config(dev, rid, 0, 4);
3784 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3786 struct pci_devinfo *dinfo;
3788 struct resource_list *rl;
3789 const struct pci_quirk *q;
3793 dinfo = device_get_ivars(dev);
3795 rl = &dinfo->resources;
3796 devid = (cfg->device << 16) | cfg->vendor;
3798 /* Allocate resources using Enhanced Allocation */
3799 pci_add_resources_ea(bus, dev, 0);
3801 /* ATA devices needs special map treatment */
3802 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3803 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3804 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3805 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3806 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3807 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3809 for (i = 0; i < cfg->nummaps;) {
3810 /* Skip resources already managed by EA */
3811 if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) ||
3812 (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) ||
3813 pci_ea_is_enabled(dev, PCIR_BAR(i))) {
3819 * Skip quirked resources.
3821 for (q = &pci_quirks[0]; q->devid != 0; q++)
3822 if (q->devid == devid &&
3823 q->type == PCI_QUIRK_UNMAP_REG &&
3824 q->arg1 == PCIR_BAR(i))
3826 if (q->devid != 0) {
3830 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3831 prefetchmask & (1 << i));
3835 * Add additional, quirked resources.
3837 for (q = &pci_quirks[0]; q->devid != 0; q++)
3838 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3839 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3841 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3842 #ifdef __PCI_REROUTE_INTERRUPT
3844 * Try to re-route interrupts. Sometimes the BIOS or
3845 * firmware may leave bogus values in these registers.
3846 * If the re-route fails, then just stick with what we
3849 pci_assign_interrupt(bus, dev, 1);
3851 pci_assign_interrupt(bus, dev, 0);
3855 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3856 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3857 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3858 xhci_early_takeover(dev);
3859 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3860 ehci_early_takeover(dev);
3861 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3862 ohci_early_takeover(dev);
3863 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3864 uhci_early_takeover(dev);
3867 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3869 * Reserve resources for secondary bus ranges behind bridge
3872 pci_reserve_secbus(bus, dev, cfg, rl);
3876 static struct pci_devinfo *
3877 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3880 struct pci_devinfo *dinfo;
3882 dinfo = pci_read_device(pcib, dev, domain, busno, slot, func);
3884 pci_add_child(dev, dinfo);
3890 pci_add_children(device_t dev, int domain, int busno)
3892 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3893 device_t pcib = device_get_parent(dev);
3894 struct pci_devinfo *dinfo;
3896 int s, f, pcifunchigh;
3901 * Try to detect a device at slot 0, function 0. If it exists, try to
3902 * enable ARI. We must enable ARI before detecting the rest of the
3903 * functions on this bus as ARI changes the set of slots and functions
3904 * that are legal on this bus.
3906 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0);
3907 if (dinfo != NULL && pci_enable_ari)
3908 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3911 * Start looking for new devices on slot 0 at function 1 because we
3912 * just identified the device at slot 0, function 0.
3916 maxslots = PCIB_MAXSLOTS(pcib);
3917 for (s = 0; s <= maxslots; s++, first_func = 0) {
3921 hdrtype = REG(PCIR_HDRTYPE, 1);
3922 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3924 if (hdrtype & PCIM_MFDEV)
3925 pcifunchigh = PCIB_MAXFUNCS(pcib);
3926 for (f = first_func; f <= pcifunchigh; f++)
3927 pci_identify_function(pcib, dev, domain, busno, s, f);
3933 pci_rescan_method(device_t dev)
3935 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3936 device_t pcib = device_get_parent(dev);
3937 struct pci_softc *sc;
3938 device_t child, *devlist, *unchanged;
3939 int devcount, error, i, j, maxslots, oldcount;
3940 int busno, domain, s, f, pcifunchigh;
3943 /* No need to check for ARI on a rescan. */
3944 error = device_get_children(dev, &devlist, &devcount);
3947 if (devcount != 0) {
3948 unchanged = malloc(devcount * sizeof(device_t), M_TEMP,
3950 if (unchanged == NULL) {
3951 free(devlist, M_TEMP);
3957 sc = device_get_softc(dev);
3958 domain = pcib_get_domain(dev);
3959 busno = pcib_get_bus(dev);
3960 maxslots = PCIB_MAXSLOTS(pcib);
3961 for (s = 0; s <= maxslots; s++) {
3962 /* If function 0 is not present, skip to the next slot. */
3964 if (REG(PCIR_VENDOR, 2) == 0xffff)
3967 hdrtype = REG(PCIR_HDRTYPE, 1);
3968 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3970 if (hdrtype & PCIM_MFDEV)
3971 pcifunchigh = PCIB_MAXFUNCS(pcib);
3972 for (f = 0; f <= pcifunchigh; f++) {
3973 if (REG(PCIR_VENDOR, 2) == 0xfff)
3977 * Found a valid function. Check if a
3978 * device_t for this device already exists.
3980 for (i = 0; i < devcount; i++) {
3984 if (pci_get_slot(child) == s &&
3985 pci_get_function(child) == f) {
3986 unchanged[i] = child;
3991 pci_identify_function(pcib, dev, domain, busno, s, f);
3996 /* Remove devices that are no longer present. */
3997 for (i = 0; i < devcount; i++) {
3998 if (unchanged[i] != NULL)
4000 device_delete_child(dev, devlist[i]);
4003 free(devlist, M_TEMP);
4004 oldcount = devcount;
4006 /* Try to attach the devices just added. */
4007 error = device_get_children(dev, &devlist, &devcount);
4009 free(unchanged, M_TEMP);
4013 for (i = 0; i < devcount; i++) {
4014 for (j = 0; j < oldcount; j++) {
4015 if (devlist[i] == unchanged[j])
4019 device_probe_and_attach(devlist[i]);
4023 free(unchanged, M_TEMP);
4024 free(devlist, M_TEMP);
4031 pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid,
4034 struct pci_devinfo *pf_dinfo, *vf_dinfo;
4036 int busno, slot, func;
4038 pf_dinfo = device_get_ivars(pf);
4040 pcib = device_get_parent(bus);
4042 PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func);
4044 vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno,
4045 slot, func, vid, did);
4047 vf_dinfo->cfg.flags |= PCICFG_VF;
4048 pci_add_child(bus, vf_dinfo);
4050 return (vf_dinfo->cfg.dev);
4054 pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid,
4055 uint16_t vid, uint16_t did)
4058 return (pci_add_iov_child(bus, pf, rid, vid, did));
4063 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
4065 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
4066 device_set_ivars(dinfo->cfg.dev, dinfo);
4067 resource_list_init(&dinfo->resources);
4068 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
4069 pci_cfg_restore(dinfo->cfg.dev, dinfo);
4070 pci_print_verbose(dinfo);
4071 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
4072 pci_child_added(dinfo->cfg.dev);
4076 pci_child_added_method(device_t dev, device_t child)
4082 pci_probe(device_t dev)
4085 device_set_desc(dev, "PCI bus");
4087 /* Allow other subclasses to override this driver. */
4088 return (BUS_PROBE_GENERIC);
4092 pci_attach_common(device_t dev)
4094 struct pci_softc *sc;
4096 #ifdef PCI_DMA_BOUNDARY
4097 int error, tag_valid;
4103 sc = device_get_softc(dev);
4104 domain = pcib_get_domain(dev);
4105 busno = pcib_get_bus(dev);
4108 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
4110 if (sc->sc_bus == NULL) {
4111 device_printf(dev, "failed to allocate bus number\n");
4116 device_printf(dev, "domain=%d, physical bus=%d\n",
4118 #ifdef PCI_DMA_BOUNDARY
4120 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
4121 devclass_find("pci")) {
4122 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
4123 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4124 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
4125 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
4127 device_printf(dev, "Failed to create DMA tag: %d\n",
4134 sc->sc_dma_tag = bus_get_dma_tag(dev);
4139 pci_attach(device_t dev)
4141 int busno, domain, error;
4143 error = pci_attach_common(dev);
4148 * Since there can be multiple independently numbered PCI
4149 * busses on systems with multiple PCI domains, we can't use
4150 * the unit number to decide which bus we are probing. We ask
4151 * the parent pcib what our domain and bus numbers are.
4153 domain = pcib_get_domain(dev);
4154 busno = pcib_get_bus(dev);
4155 pci_add_children(dev, domain, busno);
4156 return (bus_generic_attach(dev));
4160 pci_detach(device_t dev)
4163 struct pci_softc *sc;
4167 error = bus_generic_detach(dev);
4171 sc = device_get_softc(dev);
4172 error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus);
4176 return (device_delete_children(dev));
4180 pci_set_power_child(device_t dev, device_t child, int state)
4186 * Set the device to the given state. If the firmware suggests
4187 * a different power state, use it instead. If power management
4188 * is not present, the firmware is responsible for managing
4189 * device power. Skip children who aren't attached since they
4190 * are handled separately.
4192 pcib = device_get_parent(dev);
4194 if (device_is_attached(child) &&
4195 PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0)
4196 pci_set_powerstate(child, dstate);
4200 pci_suspend_child(device_t dev, device_t child)
4202 struct pci_devinfo *dinfo;
4205 dinfo = device_get_ivars(child);
4208 * Save the PCI configuration space for the child and set the
4209 * device in the appropriate power state for this sleep state.
4211 pci_cfg_save(child, dinfo, 0);
4213 /* Suspend devices before potentially powering them down. */
4214 error = bus_generic_suspend_child(dev, child);
4219 if (pci_do_power_suspend)
4220 pci_set_power_child(dev, child, PCI_POWERSTATE_D3);
4226 pci_resume_child(device_t dev, device_t child)
4228 struct pci_devinfo *dinfo;
4230 if (pci_do_power_resume)
4231 pci_set_power_child(dev, child, PCI_POWERSTATE_D0);
4233 dinfo = device_get_ivars(child);
4234 pci_cfg_restore(child, dinfo);
4235 if (!device_is_attached(child))
4236 pci_cfg_save(child, dinfo, 1);
4238 bus_generic_resume_child(dev, child);
4244 pci_resume(device_t dev)
4246 device_t child, *devlist;
4247 int error, i, numdevs;
4249 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
4253 * Resume critical devices first, then everything else later.
4255 for (i = 0; i < numdevs; i++) {
4257 switch (pci_get_class(child)) {
4261 case PCIC_BASEPERIPH:
4262 BUS_RESUME_CHILD(dev, child);
4266 for (i = 0; i < numdevs; i++) {
4268 switch (pci_get_class(child)) {
4272 case PCIC_BASEPERIPH:
4275 BUS_RESUME_CHILD(dev, child);
4278 free(devlist, M_TEMP);
4283 pci_load_vendor_data(void)
4289 data = preload_search_by_type("pci_vendor_data");
4291 ptr = preload_fetch_addr(data);
4292 sz = preload_fetch_size(data);
4293 if (ptr != NULL && sz != 0) {
4294 pci_vendordata = ptr;
4295 pci_vendordata_size = sz;
4296 /* terminate the database */
4297 pci_vendordata[pci_vendordata_size] = '\n';
4303 pci_driver_added(device_t dev, driver_t *driver)
4308 struct pci_devinfo *dinfo;
4312 device_printf(dev, "driver added\n");
4313 DEVICE_IDENTIFY(driver, dev);
4314 if (device_get_children(dev, &devlist, &numdevs) != 0)
4316 for (i = 0; i < numdevs; i++) {
4318 if (device_get_state(child) != DS_NOTPRESENT)
4320 dinfo = device_get_ivars(child);
4321 pci_print_verbose(dinfo);
4323 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
4324 pci_cfg_restore(child, dinfo);
4325 if (device_probe_and_attach(child) != 0)
4326 pci_child_detached(dev, child);
4328 free(devlist, M_TEMP);
4332 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
4333 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
4335 struct pci_devinfo *dinfo;
4336 struct msix_table_entry *mte;
4337 struct msix_vector *mv;
4343 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
4348 /* If this is not a direct child, just bail out. */
4349 if (device_get_parent(child) != dev) {
4354 rid = rman_get_rid(irq);
4356 /* Make sure that INTx is enabled */
4357 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
4360 * Check to see if the interrupt is MSI or MSI-X.
4361 * Ask our parent to map the MSI and give
4362 * us the address and data register values.
4363 * If we fail for some reason, teardown the
4364 * interrupt handler.
4366 dinfo = device_get_ivars(child);
4367 if (dinfo->cfg.msi.msi_alloc > 0) {
4368 if (dinfo->cfg.msi.msi_addr == 0) {
4369 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
4370 ("MSI has handlers, but vectors not mapped"));
4371 error = PCIB_MAP_MSI(device_get_parent(dev),
4372 child, rman_get_start(irq), &addr, &data);
4375 dinfo->cfg.msi.msi_addr = addr;
4376 dinfo->cfg.msi.msi_data = data;
4378 if (dinfo->cfg.msi.msi_handlers == 0)
4379 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
4380 dinfo->cfg.msi.msi_data);
4381 dinfo->cfg.msi.msi_handlers++;
4383 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4384 ("No MSI or MSI-X interrupts allocated"));
4385 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4386 ("MSI-X index too high"));
4387 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4388 KASSERT(mte->mte_vector != 0, ("no message vector"));
4389 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
4390 KASSERT(mv->mv_irq == rman_get_start(irq),
4392 if (mv->mv_address == 0) {
4393 KASSERT(mte->mte_handlers == 0,
4394 ("MSI-X table entry has handlers, but vector not mapped"));
4395 error = PCIB_MAP_MSI(device_get_parent(dev),
4396 child, rman_get_start(irq), &addr, &data);
4399 mv->mv_address = addr;
4402 if (mte->mte_handlers == 0) {
4403 pci_enable_msix(child, rid - 1, mv->mv_address,
4405 pci_unmask_msix(child, rid - 1);
4407 mte->mte_handlers++;
4411 * Make sure that INTx is disabled if we are using MSI/MSI-X,
4412 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
4413 * in which case we "enable" INTx so MSI/MSI-X actually works.
4415 if (!pci_has_quirk(pci_get_devid(child),
4416 PCI_QUIRK_MSI_INTX_BUG))
4417 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
4419 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
4422 (void)bus_generic_teardown_intr(dev, child, irq,
4432 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
4435 struct msix_table_entry *mte;
4436 struct resource_list_entry *rle;
4437 struct pci_devinfo *dinfo;
4440 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
4443 /* If this isn't a direct child, just bail out */
4444 if (device_get_parent(child) != dev)
4445 return(bus_generic_teardown_intr(dev, child, irq, cookie));
4447 rid = rman_get_rid(irq);
4450 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
4453 * Check to see if the interrupt is MSI or MSI-X. If so,
4454 * decrement the appropriate handlers count and mask the
4455 * MSI-X message, or disable MSI messages if the count
4458 dinfo = device_get_ivars(child);
4459 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
4460 if (rle->res != irq)
4462 if (dinfo->cfg.msi.msi_alloc > 0) {
4463 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
4464 ("MSI-X index too high"));
4465 if (dinfo->cfg.msi.msi_handlers == 0)
4467 dinfo->cfg.msi.msi_handlers--;
4468 if (dinfo->cfg.msi.msi_handlers == 0)
4469 pci_disable_msi(child);
4471 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4472 ("No MSI or MSI-X interrupts allocated"));
4473 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4474 ("MSI-X index too high"));
4475 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4476 if (mte->mte_handlers == 0)
4478 mte->mte_handlers--;
4479 if (mte->mte_handlers == 0)
4480 pci_mask_msix(child, rid - 1);
4483 error = bus_generic_teardown_intr(dev, child, irq, cookie);
4486 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4491 pci_print_child(device_t dev, device_t child)
4493 struct pci_devinfo *dinfo;
4494 struct resource_list *rl;
4497 dinfo = device_get_ivars(child);
4498 rl = &dinfo->resources;
4500 retval += bus_print_child_header(dev, child);
4502 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx");
4503 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
4504 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
4505 if (device_get_flags(dev))
4506 retval += printf(" flags %#x", device_get_flags(dev));
4508 retval += printf(" at device %d.%d", pci_get_slot(child),
4509 pci_get_function(child));
4511 retval += bus_print_child_domain(dev, child);
4512 retval += bus_print_child_footer(dev, child);
4521 int report; /* 0 = bootverbose, 1 = always */
4523 } pci_nomatch_tab[] = {
4524 {PCIC_OLD, -1, 1, "old"},
4525 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4526 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4527 {PCIC_STORAGE, -1, 1, "mass storage"},
4528 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4529 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4530 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4531 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4532 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4533 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4534 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4535 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4536 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4537 {PCIC_NETWORK, -1, 1, "network"},
4538 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4539 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4540 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4541 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4542 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4543 {PCIC_DISPLAY, -1, 1, "display"},
4544 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4545 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4546 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4547 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4548 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
4549 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
4550 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4551 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
4552 {PCIC_MEMORY, -1, 1, "memory"},
4553 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4554 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4555 {PCIC_BRIDGE, -1, 1, "bridge"},
4556 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4557 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4558 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4559 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4560 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4561 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4562 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4563 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4564 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4565 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4566 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4567 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4568 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4569 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4570 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4571 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4572 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4573 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
4574 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4575 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4576 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4577 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"},
4578 {PCIC_INPUTDEV, -1, 1, "input device"},
4579 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
4580 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4581 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
4582 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
4583 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
4584 {PCIC_DOCKING, -1, 1, "docking station"},
4585 {PCIC_PROCESSOR, -1, 1, "processor"},
4586 {PCIC_SERIALBUS, -1, 1, "serial bus"},
4587 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
4588 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
4589 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
4590 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
4591 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
4592 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
4593 {PCIC_WIRELESS, -1, 1, "wireless controller"},
4594 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
4595 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
4596 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
4597 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
4598 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
4599 {PCIC_SATCOM, -1, 1, "satellite communication"},
4600 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
4601 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
4602 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
4603 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
4604 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
4605 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
4606 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
4607 {PCIC_DASP, -1, 0, "dasp"},
4608 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
4613 pci_probe_nomatch(device_t dev, device_t child)
4616 const char *cp, *scp;
4620 * Look for a listing for this device in a loaded device database.
4623 if ((device = pci_describe_device(child)) != NULL) {
4624 device_printf(dev, "<%s>", device);
4625 free(device, M_DEVBUF);
4628 * Scan the class/subclass descriptions for a general
4633 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4634 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4635 if (pci_nomatch_tab[i].subclass == -1) {
4636 cp = pci_nomatch_tab[i].desc;
4637 report = pci_nomatch_tab[i].report;
4638 } else if (pci_nomatch_tab[i].subclass ==
4639 pci_get_subclass(child)) {
4640 scp = pci_nomatch_tab[i].desc;
4641 report = pci_nomatch_tab[i].report;
4645 if (report || bootverbose) {
4646 device_printf(dev, "<%s%s%s>",
4648 ((cp != NULL) && (scp != NULL)) ? ", " : "",
4652 if (report || bootverbose) {
4653 printf(" at device %d.%d (no driver attached)\n",
4654 pci_get_slot(child), pci_get_function(child));
4656 pci_cfg_save(child, device_get_ivars(child), 1);
4660 pci_child_detached(device_t dev, device_t child)
4662 struct pci_devinfo *dinfo;
4663 struct resource_list *rl;
4665 dinfo = device_get_ivars(child);
4666 rl = &dinfo->resources;
4669 * Have to deallocate IRQs before releasing any MSI messages and
4670 * have to release MSI messages before deallocating any memory
4673 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4674 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4675 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4676 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4677 (void)pci_release_msi(child);
4679 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4680 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4681 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4682 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4684 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4685 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4688 pci_cfg_save(child, dinfo, 1);
4692 * Parse the PCI device database, if loaded, and return a pointer to a
4693 * description of the device.
4695 * The database is flat text formatted as follows:
4697 * Any line not in a valid format is ignored.
4698 * Lines are terminated with newline '\n' characters.
4700 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4703 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4704 * - devices cannot be listed without a corresponding VENDOR line.
4705 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4706 * another TAB, then the device name.
4710 * Assuming (ptr) points to the beginning of a line in the database,
4711 * return the vendor or device and description of the next entry.
4712 * The value of (vendor) or (device) inappropriate for the entry type
4713 * is set to -1. Returns nonzero at the end of the database.
4715 * Note that this is slightly unrobust in the face of corrupt data;
4716 * we attempt to safeguard against this by spamming the end of the
4717 * database with a newline when we initialise.
4720 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4729 left = pci_vendordata_size - (cp - pci_vendordata);
4737 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4741 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4744 /* skip to next line */
4745 while (*cp != '\n' && left > 0) {
4754 /* skip to next line */
4755 while (*cp != '\n' && left > 0) {
4759 if (*cp == '\n' && left > 0)
4766 pci_describe_device(device_t dev)
4769 char *desc, *vp, *dp, *line;
4771 desc = vp = dp = NULL;
4774 * If we have no vendor data, we can't do anything.
4776 if (pci_vendordata == NULL)
4780 * Scan the vendor data looking for this device
4782 line = pci_vendordata;
4783 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4786 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4788 if (vendor == pci_get_vendor(dev))
4791 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4794 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4802 if (device == pci_get_device(dev))
4806 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4807 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4809 sprintf(desc, "%s, %s", vp, dp);
4819 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4821 struct pci_devinfo *dinfo;
4824 dinfo = device_get_ivars(child);
4828 case PCI_IVAR_ETHADDR:
4830 * The generic accessor doesn't deal with failure, so
4831 * we set the return value, then return an error.
4833 *((uint8_t **) result) = NULL;
4835 case PCI_IVAR_SUBVENDOR:
4836 *result = cfg->subvendor;
4838 case PCI_IVAR_SUBDEVICE:
4839 *result = cfg->subdevice;
4841 case PCI_IVAR_VENDOR:
4842 *result = cfg->vendor;
4844 case PCI_IVAR_DEVICE:
4845 *result = cfg->device;
4847 case PCI_IVAR_DEVID:
4848 *result = (cfg->device << 16) | cfg->vendor;
4850 case PCI_IVAR_CLASS:
4851 *result = cfg->baseclass;
4853 case PCI_IVAR_SUBCLASS:
4854 *result = cfg->subclass;
4856 case PCI_IVAR_PROGIF:
4857 *result = cfg->progif;
4859 case PCI_IVAR_REVID:
4860 *result = cfg->revid;
4862 case PCI_IVAR_INTPIN:
4863 *result = cfg->intpin;
4866 *result = cfg->intline;
4868 case PCI_IVAR_DOMAIN:
4869 *result = cfg->domain;
4875 *result = cfg->slot;
4877 case PCI_IVAR_FUNCTION:
4878 *result = cfg->func;
4880 case PCI_IVAR_CMDREG:
4881 *result = cfg->cmdreg;
4883 case PCI_IVAR_CACHELNSZ:
4884 *result = cfg->cachelnsz;
4886 case PCI_IVAR_MINGNT:
4887 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4891 *result = cfg->mingnt;
4893 case PCI_IVAR_MAXLAT:
4894 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
4898 *result = cfg->maxlat;
4900 case PCI_IVAR_LATTIMER:
4901 *result = cfg->lattimer;
4910 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4912 struct pci_devinfo *dinfo;
4914 dinfo = device_get_ivars(child);
4917 case PCI_IVAR_INTPIN:
4918 dinfo->cfg.intpin = value;
4920 case PCI_IVAR_ETHADDR:
4921 case PCI_IVAR_SUBVENDOR:
4922 case PCI_IVAR_SUBDEVICE:
4923 case PCI_IVAR_VENDOR:
4924 case PCI_IVAR_DEVICE:
4925 case PCI_IVAR_DEVID:
4926 case PCI_IVAR_CLASS:
4927 case PCI_IVAR_SUBCLASS:
4928 case PCI_IVAR_PROGIF:
4929 case PCI_IVAR_REVID:
4931 case PCI_IVAR_DOMAIN:
4934 case PCI_IVAR_FUNCTION:
4935 return (EINVAL); /* disallow for now */
4942 #include "opt_ddb.h"
4944 #include <ddb/ddb.h>
4945 #include <sys/cons.h>
4948 * List resources based on pci map registers, used for within ddb
4951 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4953 struct pci_devinfo *dinfo;
4954 struct devlist *devlist_head;
4957 int i, error, none_count;
4960 /* get the head of the device queue */
4961 devlist_head = &pci_devq;
4964 * Go through the list of devices and print out devices
4966 for (error = 0, i = 0,
4967 dinfo = STAILQ_FIRST(devlist_head);
4968 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4969 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4971 /* Populate pd_name and pd_unit */
4974 name = device_get_name(dinfo->cfg.dev);
4977 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4978 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4979 (name && *name) ? name : "none",
4980 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4982 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4983 p->pc_sel.pc_func, (p->pc_class << 16) |
4984 (p->pc_subclass << 8) | p->pc_progif,
4985 (p->pc_subdevice << 16) | p->pc_subvendor,
4986 (p->pc_device << 16) | p->pc_vendor,
4987 p->pc_revid, p->pc_hdr);
4992 static struct resource *
4993 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4994 rman_res_t start, rman_res_t end, rman_res_t count, u_int num,
4997 struct pci_devinfo *dinfo = device_get_ivars(child);
4998 struct resource_list *rl = &dinfo->resources;
4999 struct resource *res;
5001 pci_addr_t map, testval;
5006 /* If rid is managed by EA, ignore it */
5007 if (pci_ea_is_enabled(child, *rid))
5010 pm = pci_find_bar(child, *rid);
5012 /* This is a BAR that we failed to allocate earlier. */
5013 mapsize = pm->pm_size;
5017 * Weed out the bogons, and figure out how large the
5018 * BAR/map is. BARs that read back 0 here are bogus
5019 * and unimplemented. Note: atapci in legacy mode are
5020 * special and handled elsewhere in the code. If you
5021 * have a atapci device in legacy mode and it fails
5022 * here, that other code is broken.
5024 pci_read_bar(child, *rid, &map, &testval, NULL);
5027 * Determine the size of the BAR and ignore BARs with a size
5028 * of 0. Device ROM BARs use a different mask value.
5030 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
5031 mapsize = pci_romsize(testval);
5033 mapsize = pci_mapsize(testval);
5036 pm = pci_add_bar(child, *rid, map, mapsize);
5039 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
5040 if (type != SYS_RES_MEMORY) {
5043 "child %s requested type %d for rid %#x,"
5044 " but the BAR says it is an memio\n",
5045 device_get_nameunit(child), type, *rid);
5049 if (type != SYS_RES_IOPORT) {
5052 "child %s requested type %d for rid %#x,"
5053 " but the BAR says it is an ioport\n",
5054 device_get_nameunit(child), type, *rid);
5060 * For real BARs, we need to override the size that
5061 * the driver requests, because that's what the BAR
5062 * actually uses and we would otherwise have a
5063 * situation where we might allocate the excess to
5064 * another driver, which won't work.
5066 count = ((pci_addr_t)1 << mapsize) * num;
5067 if (RF_ALIGNMENT(flags) < mapsize)
5068 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
5069 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
5070 flags |= RF_PREFETCHABLE;
5073 * Allocate enough resource, and then write back the
5074 * appropriate BAR for that resource.
5076 resource_list_add(rl, type, *rid, start, end, count);
5077 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
5078 count, flags & ~RF_ACTIVE);
5080 resource_list_delete(rl, type, *rid);
5081 device_printf(child,
5082 "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n",
5083 count, *rid, type, start, end);
5087 device_printf(child,
5088 "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n",
5089 count, *rid, type, rman_get_start(res));
5090 map = rman_get_start(res);
5091 pci_write_bar(child, pm, map);
5097 pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid,
5098 rman_res_t start, rman_res_t end, rman_res_t count, u_long num,
5101 struct pci_devinfo *dinfo;
5102 struct resource_list *rl;
5103 struct resource_list_entry *rle;
5104 struct resource *res;
5108 * Perform lazy resource allocation
5110 dinfo = device_get_ivars(child);
5111 rl = &dinfo->resources;
5114 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
5116 return (pci_alloc_secbus(dev, child, rid, start, end, count,
5121 * Can't alloc legacy interrupt once MSI messages have
5124 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
5125 cfg->msix.msix_alloc > 0))
5129 * If the child device doesn't have an interrupt
5130 * routed and is deserving of an interrupt, try to
5133 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
5135 pci_assign_interrupt(dev, child, 0);
5137 case SYS_RES_IOPORT:
5138 case SYS_RES_MEMORY:
5141 * PCI-PCI bridge I/O window resources are not BARs.
5142 * For those allocations just pass the request up the
5145 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
5147 case PCIR_IOBASEL_1:
5148 case PCIR_MEMBASE_1:
5149 case PCIR_PMBASEL_1:
5151 * XXX: Should we bother creating a resource
5154 return (bus_generic_alloc_resource(dev, child,
5155 type, rid, start, end, count, flags));
5159 /* Reserve resources for this BAR if needed. */
5160 rle = resource_list_find(rl, type, *rid);
5162 res = pci_reserve_map(dev, child, type, rid, start, end,
5168 return (resource_list_alloc(rl, dev, child, type, rid,
5169 start, end, count, flags));
5173 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
5174 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
5177 struct pci_devinfo *dinfo;
5180 if (device_get_parent(child) != dev)
5181 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
5182 type, rid, start, end, count, flags));
5185 dinfo = device_get_ivars(child);
5186 if (dinfo->cfg.flags & PCICFG_VF) {
5188 /* VFs can't have I/O BARs. */
5189 case SYS_RES_IOPORT:
5191 case SYS_RES_MEMORY:
5192 return (pci_vf_alloc_mem_resource(dev, child, rid,
5193 start, end, count, flags));
5196 /* Fall through for other types of resource allocations. */
5200 return (pci_alloc_multi_resource(dev, child, type, rid, start, end,
5205 pci_release_resource(device_t dev, device_t child, int type, int rid,
5208 struct pci_devinfo *dinfo;
5209 struct resource_list *rl;
5212 if (device_get_parent(child) != dev)
5213 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
5216 dinfo = device_get_ivars(child);
5220 if (dinfo->cfg.flags & PCICFG_VF) {
5222 /* VFs can't have I/O BARs. */
5223 case SYS_RES_IOPORT:
5225 case SYS_RES_MEMORY:
5226 return (pci_vf_release_mem_resource(dev, child, rid,
5230 /* Fall through for other types of resource allocations. */
5236 * PCI-PCI bridge I/O window resources are not BARs. For
5237 * those allocations just pass the request up the tree.
5239 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
5240 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
5242 case PCIR_IOBASEL_1:
5243 case PCIR_MEMBASE_1:
5244 case PCIR_PMBASEL_1:
5245 return (bus_generic_release_resource(dev, child, type,
5251 rl = &dinfo->resources;
5252 return (resource_list_release(rl, dev, child, type, rid, r));
5256 pci_activate_resource(device_t dev, device_t child, int type, int rid,
5259 struct pci_devinfo *dinfo;
5262 error = bus_generic_activate_resource(dev, child, type, rid, r);
5266 /* Enable decoding in the command register when activating BARs. */
5267 if (device_get_parent(child) == dev) {
5268 /* Device ROMs need their decoding explicitly enabled. */
5269 dinfo = device_get_ivars(child);
5270 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5271 pci_write_bar(child, pci_find_bar(child, rid),
5272 rman_get_start(r) | PCIM_BIOS_ENABLE);
5274 case SYS_RES_IOPORT:
5275 case SYS_RES_MEMORY:
5276 error = PCI_ENABLE_IO(dev, child, type);
5284 pci_deactivate_resource(device_t dev, device_t child, int type,
5285 int rid, struct resource *r)
5287 struct pci_devinfo *dinfo;
5290 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
5294 /* Disable decoding for device ROMs. */
5295 if (device_get_parent(child) == dev) {
5296 dinfo = device_get_ivars(child);
5297 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5298 pci_write_bar(child, pci_find_bar(child, rid),
5305 pci_child_deleted(device_t dev, device_t child)
5307 struct resource_list_entry *rle;
5308 struct resource_list *rl;
5309 struct pci_devinfo *dinfo;
5311 dinfo = device_get_ivars(child);
5312 rl = &dinfo->resources;
5314 /* Turn off access to resources we're about to free */
5315 if (bus_child_present(child) != 0) {
5316 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
5317 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
5319 pci_disable_busmaster(child);
5322 /* Free all allocated resources */
5323 STAILQ_FOREACH(rle, rl, link) {
5325 if (rman_get_flags(rle->res) & RF_ACTIVE ||
5326 resource_list_busy(rl, rle->type, rle->rid)) {
5327 pci_printf(&dinfo->cfg,
5328 "Resource still owned, oops. "
5329 "(type=%d, rid=%d, addr=%lx)\n",
5330 rle->type, rle->rid,
5331 rman_get_start(rle->res));
5332 bus_release_resource(child, rle->type, rle->rid,
5335 resource_list_unreserve(rl, dev, child, rle->type,
5339 resource_list_free(rl);
5345 pci_delete_resource(device_t dev, device_t child, int type, int rid)
5347 struct pci_devinfo *dinfo;
5348 struct resource_list *rl;
5349 struct resource_list_entry *rle;
5351 if (device_get_parent(child) != dev)
5354 dinfo = device_get_ivars(child);
5355 rl = &dinfo->resources;
5356 rle = resource_list_find(rl, type, rid);
5361 if (rman_get_flags(rle->res) & RF_ACTIVE ||
5362 resource_list_busy(rl, type, rid)) {
5363 device_printf(dev, "delete_resource: "
5364 "Resource still owned by child, oops. "
5365 "(type=%d, rid=%d, addr=%jx)\n",
5366 type, rid, rman_get_start(rle->res));
5369 resource_list_unreserve(rl, dev, child, type, rid);
5371 resource_list_delete(rl, type, rid);
5374 struct resource_list *
5375 pci_get_resource_list (device_t dev, device_t child)
5377 struct pci_devinfo *dinfo = device_get_ivars(child);
5379 return (&dinfo->resources);
5383 pci_get_dma_tag(device_t bus, device_t dev)
5385 struct pci_softc *sc = device_get_softc(bus);
5387 return (sc->sc_dma_tag);
5391 pci_read_config_method(device_t dev, device_t child, int reg, int width)
5393 struct pci_devinfo *dinfo = device_get_ivars(child);
5394 pcicfgregs *cfg = &dinfo->cfg;
5398 * SR-IOV VFs don't implement the VID or DID registers, so we have to
5399 * emulate them here.
5401 if (cfg->flags & PCICFG_VF) {
5402 if (reg == PCIR_VENDOR) {
5405 return (cfg->device << 16 | cfg->vendor);
5407 return (cfg->vendor);
5409 return (cfg->vendor & 0xff);
5411 return (0xffffffff);
5413 } else if (reg == PCIR_DEVICE) {
5415 /* Note that an unaligned 4-byte read is an error. */
5417 return (cfg->device);
5419 return (cfg->device & 0xff);
5421 return (0xffffffff);
5427 return (PCIB_READ_CONFIG(device_get_parent(dev),
5428 cfg->bus, cfg->slot, cfg->func, reg, width));
5432 pci_write_config_method(device_t dev, device_t child, int reg,
5433 uint32_t val, int width)
5435 struct pci_devinfo *dinfo = device_get_ivars(child);
5436 pcicfgregs *cfg = &dinfo->cfg;
5438 PCIB_WRITE_CONFIG(device_get_parent(dev),
5439 cfg->bus, cfg->slot, cfg->func, reg, val, width);
5443 pci_child_location_str_method(device_t dev, device_t child, char *buf,
5447 snprintf(buf, buflen, "slot=%d function=%d dbsf=pci%d:%d:%d:%d",
5448 pci_get_slot(child), pci_get_function(child), pci_get_domain(child),
5449 pci_get_bus(child), pci_get_slot(child), pci_get_function(child));
5454 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
5457 struct pci_devinfo *dinfo;
5460 dinfo = device_get_ivars(child);
5462 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
5463 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
5464 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
5470 pci_assign_interrupt_method(device_t dev, device_t child)
5472 struct pci_devinfo *dinfo = device_get_ivars(child);
5473 pcicfgregs *cfg = &dinfo->cfg;
5475 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
5480 pci_lookup(void *arg, const char *name, device_t *dev)
5484 int domain, bus, slot, func;
5490 * Accept pciconf-style selectors of either pciD:B:S:F or
5491 * pciB:S:F. In the latter case, the domain is assumed to
5494 if (strncmp(name, "pci", 3) != 0)
5496 val = strtol(name + 3, &end, 10);
5497 if (val < 0 || val > INT_MAX || *end != ':')
5500 val = strtol(end + 1, &end, 10);
5501 if (val < 0 || val > INT_MAX || *end != ':')
5504 val = strtol(end + 1, &end, 10);
5505 if (val < 0 || val > INT_MAX)
5509 val = strtol(end + 1, &end, 10);
5510 if (val < 0 || val > INT_MAX || *end != '\0')
5513 } else if (*end == '\0') {
5521 if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX ||
5522 func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX))
5525 *dev = pci_find_dbsf(domain, bus, slot, func);
5529 pci_modevent(module_t mod, int what, void *arg)
5531 static struct cdev *pci_cdev;
5532 static eventhandler_tag tag;
5536 STAILQ_INIT(&pci_devq);
5538 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
5540 pci_load_vendor_data();
5541 tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL,
5547 EVENTHANDLER_DEREGISTER(dev_lookup, tag);
5548 destroy_dev(pci_cdev);
5556 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
5558 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
5559 struct pcicfg_pcie *cfg;
5562 cfg = &dinfo->cfg.pcie;
5563 pos = cfg->pcie_location;
5565 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5567 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
5569 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5570 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5571 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5572 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
5574 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5575 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5576 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5577 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
5579 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5580 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5581 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
5584 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
5585 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
5586 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
5592 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
5594 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
5595 dinfo->cfg.pcix.pcix_command, 2);
5599 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
5603 * Restore the device to full power mode. We must do this
5604 * before we restore the registers because moving from D3 to
5605 * D0 will cause the chip's BARs and some other registers to
5606 * be reset to some unknown power on reset values. Cut down
5607 * the noise on boot by doing nothing if we are already in
5610 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
5611 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5612 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
5613 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
5614 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
5615 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
5616 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
5617 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
5618 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
5619 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
5620 case PCIM_HDRTYPE_NORMAL:
5621 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
5622 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
5624 case PCIM_HDRTYPE_BRIDGE:
5625 pci_write_config(dev, PCIR_SECLAT_1,
5626 dinfo->cfg.bridge.br_seclat, 1);
5627 pci_write_config(dev, PCIR_SUBBUS_1,
5628 dinfo->cfg.bridge.br_subbus, 1);
5629 pci_write_config(dev, PCIR_SECBUS_1,
5630 dinfo->cfg.bridge.br_secbus, 1);
5631 pci_write_config(dev, PCIR_PRIBUS_1,
5632 dinfo->cfg.bridge.br_pribus, 1);
5633 pci_write_config(dev, PCIR_BRIDGECTL_1,
5634 dinfo->cfg.bridge.br_control, 2);
5636 case PCIM_HDRTYPE_CARDBUS:
5637 pci_write_config(dev, PCIR_SECLAT_2,
5638 dinfo->cfg.bridge.br_seclat, 1);
5639 pci_write_config(dev, PCIR_SUBBUS_2,
5640 dinfo->cfg.bridge.br_subbus, 1);
5641 pci_write_config(dev, PCIR_SECBUS_2,
5642 dinfo->cfg.bridge.br_secbus, 1);
5643 pci_write_config(dev, PCIR_PRIBUS_2,
5644 dinfo->cfg.bridge.br_pribus, 1);
5645 pci_write_config(dev, PCIR_BRIDGECTL_2,
5646 dinfo->cfg.bridge.br_control, 2);
5649 pci_restore_bars(dev);
5652 * Restore extended capabilities for PCI-Express and PCI-X
5654 if (dinfo->cfg.pcie.pcie_location != 0)
5655 pci_cfg_restore_pcie(dev, dinfo);
5656 if (dinfo->cfg.pcix.pcix_location != 0)
5657 pci_cfg_restore_pcix(dev, dinfo);
5659 /* Restore MSI and MSI-X configurations if they are present. */
5660 if (dinfo->cfg.msi.msi_location != 0)
5661 pci_resume_msi(dev);
5662 if (dinfo->cfg.msix.msix_location != 0)
5663 pci_resume_msix(dev);
5666 if (dinfo->cfg.iov != NULL)
5667 pci_iov_cfg_restore(dev, dinfo);
5672 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
5674 #define RREG(n) pci_read_config(dev, pos + (n), 2)
5675 struct pcicfg_pcie *cfg;
5678 cfg = &dinfo->cfg.pcie;
5679 pos = cfg->pcie_location;
5681 cfg->pcie_flags = RREG(PCIER_FLAGS);
5683 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
5685 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
5687 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5688 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
5689 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
5690 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
5692 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5693 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
5694 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
5695 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
5697 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
5698 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
5699 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
5702 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
5703 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
5704 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
5710 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5712 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5713 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5717 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5723 * Some drivers apparently write to these registers w/o updating our
5724 * cached copy. No harm happens if we update the copy, so do so here
5725 * so we can restore them. The COMMAND register is modified by the
5726 * bus w/o updating the cache. This should represent the normally
5727 * writable portion of the 'defined' part of type 0/1/2 headers.
5729 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5730 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5731 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5732 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5733 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5734 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5735 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5736 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5737 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5738 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5739 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5740 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
5741 case PCIM_HDRTYPE_NORMAL:
5742 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5743 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5744 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5745 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5747 case PCIM_HDRTYPE_BRIDGE:
5748 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
5750 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
5752 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
5754 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
5756 dinfo->cfg.bridge.br_control = pci_read_config(dev,
5757 PCIR_BRIDGECTL_1, 2);
5759 case PCIM_HDRTYPE_CARDBUS:
5760 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
5762 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
5764 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
5766 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
5768 dinfo->cfg.bridge.br_control = pci_read_config(dev,
5769 PCIR_BRIDGECTL_2, 2);
5770 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2);
5771 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2);
5775 if (dinfo->cfg.pcie.pcie_location != 0)
5776 pci_cfg_save_pcie(dev, dinfo);
5778 if (dinfo->cfg.pcix.pcix_location != 0)
5779 pci_cfg_save_pcix(dev, dinfo);
5782 if (dinfo->cfg.iov != NULL)
5783 pci_iov_cfg_save(dev, dinfo);
5787 * don't set the state for display devices, base peripherals and
5788 * memory devices since bad things happen when they are powered down.
5789 * We should (a) have drivers that can easily detach and (b) use
5790 * generic drivers for these devices so that some device actually
5791 * attaches. We need to make sure that when we implement (a) we don't
5792 * power the device down on a reattach.
5794 cls = pci_get_class(dev);
5797 switch (pci_do_power_nodriver)
5799 case 0: /* NO powerdown at all */
5801 case 1: /* Conservative about what to power down */
5802 if (cls == PCIC_STORAGE)
5805 case 2: /* Aggressive about what to power down */
5806 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5807 cls == PCIC_BASEPERIPH)
5810 case 3: /* Power down everything */
5814 * PCI spec says we can only go into D3 state from D0 state.
5815 * Transition from D[12] into D0 before going to D3 state.
5817 ps = pci_get_powerstate(dev);
5818 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5819 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5820 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5821 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5824 /* Wrapper APIs suitable for device driver use. */
5826 pci_save_state(device_t dev)
5828 struct pci_devinfo *dinfo;
5830 dinfo = device_get_ivars(dev);
5831 pci_cfg_save(dev, dinfo, 0);
5835 pci_restore_state(device_t dev)
5837 struct pci_devinfo *dinfo;
5839 dinfo = device_get_ivars(dev);
5840 pci_cfg_restore(dev, dinfo);
5844 pci_get_id_method(device_t dev, device_t child, enum pci_id_type type,
5848 return (PCIB_GET_ID(device_get_parent(dev), child, type, id));
5851 /* Find the upstream port of a given PCI device in a root complex. */
5853 pci_find_pcie_root_port(device_t dev)
5855 struct pci_devinfo *dinfo;
5856 devclass_t pci_class;
5859 pci_class = devclass_find("pci");
5860 KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class,
5861 ("%s: non-pci device %s", __func__, device_get_nameunit(dev)));
5864 * Walk the bridge hierarchy until we find a PCI-e root
5865 * port or a non-PCI device.
5868 bus = device_get_parent(dev);
5869 KASSERT(bus != NULL, ("%s: null parent of %s", __func__,
5870 device_get_nameunit(dev)));
5872 pcib = device_get_parent(bus);
5873 KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__,
5874 device_get_nameunit(bus)));
5877 * pcib's parent must be a PCI bus for this to be a
5880 if (device_get_devclass(device_get_parent(pcib)) != pci_class)
5883 dinfo = device_get_ivars(pcib);
5884 if (dinfo->cfg.pcie.pcie_location != 0 &&
5885 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)