2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/xhcireg.h>
66 #include <dev/usb/controller/ehcireg.h>
67 #include <dev/usb/controller/ohcireg.h>
68 #include <dev/usb/controller/uhcireg.h>
74 * XXX: Due to a limitation of the bus_dma_tag_create() API, we cannot
75 * specify a 4GB boundary on 32-bit targets. Usually this does not
76 * matter as it is ok to use a boundary of 0 on these systems.
77 * However, in the case of PAE, DMA addresses can cross a 4GB
78 * boundary, so as a workaround use a 2GB boundary.
80 #if (BUS_SPACE_MAXADDR > 0xFFFFFFFF)
82 #define PCI_DMA_BOUNDARY 0x80000000
84 #define PCI_DMA_BOUNDARY 0x100000000
88 #define PCIR_IS_BIOS(cfg, reg) \
89 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
90 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
92 static int pci_has_quirk(uint32_t devid, int quirk);
93 static pci_addr_t pci_mapbase(uint64_t mapreg);
94 static const char *pci_maptype(uint64_t mapreg);
95 static int pci_mapsize(uint64_t testval);
96 static int pci_maprange(uint64_t mapreg);
97 static pci_addr_t pci_rombase(uint64_t mapreg);
98 static int pci_romsize(uint64_t testval);
99 static void pci_fixancient(pcicfgregs *cfg);
100 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
102 static int pci_porten(device_t dev);
103 static int pci_memen(device_t dev);
104 static void pci_assign_interrupt(device_t bus, device_t dev,
106 static int pci_add_map(device_t bus, device_t dev, int reg,
107 struct resource_list *rl, int force, int prefetch);
108 static int pci_probe(device_t dev);
109 static int pci_attach(device_t dev);
110 static void pci_load_vendor_data(void);
111 static int pci_describe_parse_line(char **ptr, int *vendor,
112 int *device, char **desc);
113 static char *pci_describe_device(device_t dev);
114 static bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev);
115 static int pci_modevent(module_t mod, int what, void *arg);
116 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
118 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
119 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
120 int reg, uint32_t *data);
122 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
123 int reg, uint32_t data);
125 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
126 static void pci_disable_msi(device_t dev);
127 static void pci_enable_msi(device_t dev, uint64_t address,
129 static void pci_enable_msix(device_t dev, u_int index,
130 uint64_t address, uint32_t data);
131 static void pci_mask_msix(device_t dev, u_int index);
132 static void pci_unmask_msix(device_t dev, u_int index);
133 static int pci_msi_blacklisted(void);
134 static int pci_msix_blacklisted(void);
135 static void pci_resume_msi(device_t dev);
136 static void pci_resume_msix(device_t dev);
137 static int pci_remap_intr_method(device_t bus, device_t dev,
140 static device_method_t pci_methods[] = {
141 /* Device interface */
142 DEVMETHOD(device_probe, pci_probe),
143 DEVMETHOD(device_attach, pci_attach),
144 DEVMETHOD(device_detach, bus_generic_detach),
145 DEVMETHOD(device_shutdown, bus_generic_shutdown),
146 DEVMETHOD(device_suspend, pci_suspend),
147 DEVMETHOD(device_resume, pci_resume),
150 DEVMETHOD(bus_print_child, pci_print_child),
151 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
152 DEVMETHOD(bus_read_ivar, pci_read_ivar),
153 DEVMETHOD(bus_write_ivar, pci_write_ivar),
154 DEVMETHOD(bus_driver_added, pci_driver_added),
155 DEVMETHOD(bus_setup_intr, pci_setup_intr),
156 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
158 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
159 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
160 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
161 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
162 DEVMETHOD(bus_delete_resource, pci_delete_resource),
163 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
164 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
165 DEVMETHOD(bus_release_resource, pci_release_resource),
166 DEVMETHOD(bus_activate_resource, pci_activate_resource),
167 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
168 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
169 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
170 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
173 DEVMETHOD(pci_read_config, pci_read_config_method),
174 DEVMETHOD(pci_write_config, pci_write_config_method),
175 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
176 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
177 DEVMETHOD(pci_enable_io, pci_enable_io_method),
178 DEVMETHOD(pci_disable_io, pci_disable_io_method),
179 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
180 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
181 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
182 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
183 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
184 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
185 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
186 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
187 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
188 DEVMETHOD(pci_release_msi, pci_release_msi_method),
189 DEVMETHOD(pci_msi_count, pci_msi_count_method),
190 DEVMETHOD(pci_msix_count, pci_msix_count_method),
195 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
197 static devclass_t pci_devclass;
198 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
199 MODULE_VERSION(pci, 1);
201 static char *pci_vendordata;
202 static size_t pci_vendordata_size;
205 uint32_t devid; /* Vendor/device of the card */
207 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
208 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
209 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
210 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
211 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
212 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
217 static const struct pci_quirk pci_quirks[] = {
218 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
219 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
220 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
221 /* As does the Serverworks OSB4 (the SMBus mapping register) */
222 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
225 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
226 * or the CMIC-SL (AKA ServerWorks GC_LE).
228 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
229 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232 * MSI doesn't work on earlier Intel chipsets including
233 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
235 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
244 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
247 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
250 * MSI-X allocation doesn't work properly for devices passed through
251 * by VMware up to at least ESXi 5.1.
253 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
254 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
257 * Some virtualization environments emulate an older chipset
258 * but support MSI just fine. QEMU uses the Intel 82440.
260 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
263 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
264 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
265 * It prevents us from attaching hpet(4) when the bit is unset.
266 * Note this quirk only affects SB600 revision A13 and earlier.
267 * For SB600 A21 and later, firmware must set the bit to hide it.
268 * For SB700 and later, it is unused and hardcoded to zero.
270 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
273 * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that
274 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
275 * command register is set.
277 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
278 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
279 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
282 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
283 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
285 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
286 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
287 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
288 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
289 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
290 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
295 /* map register information */
296 #define PCI_MAPMEM 0x01 /* memory map */
297 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
298 #define PCI_MAPPORT 0x04 /* port map */
300 struct devlist pci_devq;
301 uint32_t pci_generation;
302 uint32_t pci_numdevs = 0;
303 static int pcie_chipset, pcix_chipset;
306 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
308 static int pci_enable_io_modes = 1;
309 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
310 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
311 &pci_enable_io_modes, 1,
312 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
313 enable these bits correctly. We'd like to do this all the time, but there\n\
314 are some peripherals that this causes problems with.");
316 static int pci_do_realloc_bars = 0;
317 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
318 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
319 &pci_do_realloc_bars, 0,
320 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
322 static int pci_do_power_nodriver = 0;
323 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
324 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
325 &pci_do_power_nodriver, 0,
326 "Place a function into D3 state when no driver attaches to it. 0 means\n\
327 disable. 1 means conservatively place devices into D3 state. 2 means\n\
328 agressively place devices into D3 state. 3 means put absolutely everything\n\
331 int pci_do_power_resume = 1;
332 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
333 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
334 &pci_do_power_resume, 1,
335 "Transition from D3 -> D0 on resume.");
337 int pci_do_power_suspend = 1;
338 TUNABLE_INT("hw.pci.do_power_suspend", &pci_do_power_suspend);
339 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RW,
340 &pci_do_power_suspend, 1,
341 "Transition from D0 -> D3 on suspend.");
343 static int pci_do_msi = 1;
344 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
345 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
346 "Enable support for MSI interrupts");
348 static int pci_do_msix = 1;
349 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
350 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
351 "Enable support for MSI-X interrupts");
353 static int pci_honor_msi_blacklist = 1;
354 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
355 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
356 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
358 #if defined(__i386__) || defined(__amd64__)
359 static int pci_usb_takeover = 1;
361 static int pci_usb_takeover = 0;
363 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
364 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
365 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
366 Disable this if you depend on BIOS emulation of USB devices, that is\n\
367 you use USB devices (like keyboard or mouse) but do not load USB drivers");
369 static int pci_clear_bars;
370 TUNABLE_INT("hw.pci.clear_bars", &pci_clear_bars);
371 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
372 "Ignore firmware-assigned resources for BARs.");
375 pci_has_quirk(uint32_t devid, int quirk)
377 const struct pci_quirk *q;
379 for (q = &pci_quirks[0]; q->devid; q++) {
380 if (q->devid == devid && q->type == quirk)
386 /* Find a device_t by bus/slot/function in domain 0 */
389 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
392 return (pci_find_dbsf(0, bus, slot, func));
395 /* Find a device_t by domain/bus/slot/function */
398 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
400 struct pci_devinfo *dinfo;
402 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
403 if ((dinfo->cfg.domain == domain) &&
404 (dinfo->cfg.bus == bus) &&
405 (dinfo->cfg.slot == slot) &&
406 (dinfo->cfg.func == func)) {
407 return (dinfo->cfg.dev);
414 /* Find a device_t by vendor/device ID */
417 pci_find_device(uint16_t vendor, uint16_t device)
419 struct pci_devinfo *dinfo;
421 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
422 if ((dinfo->cfg.vendor == vendor) &&
423 (dinfo->cfg.device == device)) {
424 return (dinfo->cfg.dev);
432 pci_find_class(uint8_t class, uint8_t subclass)
434 struct pci_devinfo *dinfo;
436 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
437 if (dinfo->cfg.baseclass == class &&
438 dinfo->cfg.subclass == subclass) {
439 return (dinfo->cfg.dev);
447 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
452 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
455 retval += vprintf(fmt, ap);
460 /* return base address of memory or port map */
463 pci_mapbase(uint64_t mapreg)
466 if (PCI_BAR_MEM(mapreg))
467 return (mapreg & PCIM_BAR_MEM_BASE);
469 return (mapreg & PCIM_BAR_IO_BASE);
472 /* return map type of memory or port map */
475 pci_maptype(uint64_t mapreg)
478 if (PCI_BAR_IO(mapreg))
480 if (mapreg & PCIM_BAR_MEM_PREFETCH)
481 return ("Prefetchable Memory");
485 /* return log2 of map size decoded for memory or port map */
488 pci_mapsize(uint64_t testval)
492 testval = pci_mapbase(testval);
495 while ((testval & 1) == 0)
504 /* return base address of device ROM */
507 pci_rombase(uint64_t mapreg)
510 return (mapreg & PCIM_BIOS_ADDR_MASK);
513 /* return log2 of map size decided for device ROM */
516 pci_romsize(uint64_t testval)
520 testval = pci_rombase(testval);
523 while ((testval & 1) == 0)
532 /* return log2 of address range supported by map register */
535 pci_maprange(uint64_t mapreg)
539 if (PCI_BAR_IO(mapreg))
542 switch (mapreg & PCIM_BAR_MEM_TYPE) {
543 case PCIM_BAR_MEM_32:
546 case PCIM_BAR_MEM_1MB:
549 case PCIM_BAR_MEM_64:
556 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
559 pci_fixancient(pcicfgregs *cfg)
561 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
564 /* PCI to PCI bridges use header type 1 */
565 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
566 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
569 /* extract header type specific config data */
572 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
574 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
575 switch (cfg->hdrtype & PCIM_HDRTYPE) {
576 case PCIM_HDRTYPE_NORMAL:
577 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
578 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
579 cfg->nummaps = PCI_MAXMAPS_0;
581 case PCIM_HDRTYPE_BRIDGE:
582 cfg->nummaps = PCI_MAXMAPS_1;
584 case PCIM_HDRTYPE_CARDBUS:
585 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
586 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
587 cfg->nummaps = PCI_MAXMAPS_2;
593 /* read configuration header into pcicfgregs structure */
595 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
597 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
598 pcicfgregs *cfg = NULL;
599 struct pci_devinfo *devlist_entry;
600 struct devlist *devlist_head;
602 devlist_head = &pci_devq;
604 devlist_entry = NULL;
606 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
607 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
609 cfg = &devlist_entry->cfg;
615 cfg->vendor = REG(PCIR_VENDOR, 2);
616 cfg->device = REG(PCIR_DEVICE, 2);
617 cfg->cmdreg = REG(PCIR_COMMAND, 2);
618 cfg->statreg = REG(PCIR_STATUS, 2);
619 cfg->baseclass = REG(PCIR_CLASS, 1);
620 cfg->subclass = REG(PCIR_SUBCLASS, 1);
621 cfg->progif = REG(PCIR_PROGIF, 1);
622 cfg->revid = REG(PCIR_REVID, 1);
623 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
624 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
625 cfg->lattimer = REG(PCIR_LATTIMER, 1);
626 cfg->intpin = REG(PCIR_INTPIN, 1);
627 cfg->intline = REG(PCIR_INTLINE, 1);
629 cfg->mingnt = REG(PCIR_MINGNT, 1);
630 cfg->maxlat = REG(PCIR_MAXLAT, 1);
632 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
633 cfg->hdrtype &= ~PCIM_MFDEV;
634 STAILQ_INIT(&cfg->maps);
637 pci_hdrtypedata(pcib, b, s, f, cfg);
639 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
640 pci_read_cap(pcib, cfg);
642 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
644 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
645 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
646 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
647 devlist_entry->conf.pc_sel.pc_func = cfg->func;
648 devlist_entry->conf.pc_hdr = cfg->hdrtype;
650 devlist_entry->conf.pc_subvendor = cfg->subvendor;
651 devlist_entry->conf.pc_subdevice = cfg->subdevice;
652 devlist_entry->conf.pc_vendor = cfg->vendor;
653 devlist_entry->conf.pc_device = cfg->device;
655 devlist_entry->conf.pc_class = cfg->baseclass;
656 devlist_entry->conf.pc_subclass = cfg->subclass;
657 devlist_entry->conf.pc_progif = cfg->progif;
658 devlist_entry->conf.pc_revid = cfg->revid;
663 return (devlist_entry);
668 pci_read_cap(device_t pcib, pcicfgregs *cfg)
670 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
671 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
672 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
676 int ptr, nextptr, ptrptr;
678 switch (cfg->hdrtype & PCIM_HDRTYPE) {
679 case PCIM_HDRTYPE_NORMAL:
680 case PCIM_HDRTYPE_BRIDGE:
681 ptrptr = PCIR_CAP_PTR;
683 case PCIM_HDRTYPE_CARDBUS:
684 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
687 return; /* no extended capabilities support */
689 nextptr = REG(ptrptr, 1); /* sanity check? */
692 * Read capability entries.
694 while (nextptr != 0) {
697 printf("illegal PCI extended capability offset %d\n",
701 /* Find the next entry */
703 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
705 /* Process this entry */
706 switch (REG(ptr + PCICAP_ID, 1)) {
707 case PCIY_PMG: /* PCI power management */
708 if (cfg->pp.pp_cap == 0) {
709 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
710 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
711 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
712 if ((nextptr - ptr) > PCIR_POWER_DATA)
713 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
716 case PCIY_HT: /* HyperTransport */
717 /* Determine HT-specific capability type. */
718 val = REG(ptr + PCIR_HT_COMMAND, 2);
720 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
721 cfg->ht.ht_slave = ptr;
723 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
724 switch (val & PCIM_HTCMD_CAP_MASK) {
725 case PCIM_HTCAP_MSI_MAPPING:
726 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
727 /* Sanity check the mapping window. */
728 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
731 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
733 if (addr != MSI_INTEL_ADDR_BASE)
735 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
736 cfg->domain, cfg->bus,
737 cfg->slot, cfg->func,
740 addr = MSI_INTEL_ADDR_BASE;
742 cfg->ht.ht_msimap = ptr;
743 cfg->ht.ht_msictrl = val;
744 cfg->ht.ht_msiaddr = addr;
749 case PCIY_MSI: /* PCI MSI */
750 cfg->msi.msi_location = ptr;
751 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
752 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
753 PCIM_MSICTRL_MMC_MASK)>>1);
755 case PCIY_MSIX: /* PCI MSI-X */
756 cfg->msix.msix_location = ptr;
757 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
758 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
759 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
760 val = REG(ptr + PCIR_MSIX_TABLE, 4);
761 cfg->msix.msix_table_bar = PCIR_BAR(val &
763 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
764 val = REG(ptr + PCIR_MSIX_PBA, 4);
765 cfg->msix.msix_pba_bar = PCIR_BAR(val &
767 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
769 case PCIY_VPD: /* PCI Vital Product Data */
770 cfg->vpd.vpd_reg = ptr;
773 /* Should always be true. */
774 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
775 PCIM_HDRTYPE_BRIDGE) {
776 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
777 cfg->subvendor = val & 0xffff;
778 cfg->subdevice = val >> 16;
781 case PCIY_PCIX: /* PCI-X */
783 * Assume we have a PCI-X chipset if we have
784 * at least one PCI-PCI bridge with a PCI-X
785 * capability. Note that some systems with
786 * PCI-express or HT chipsets might match on
787 * this check as well.
789 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
793 case PCIY_EXPRESS: /* PCI-express */
795 * Assume we have a PCI-express chipset if we have
796 * at least one PCI-express device.
799 cfg->pcie.pcie_location = ptr;
800 val = REG(ptr + PCIR_EXPRESS_FLAGS, 2);
801 cfg->pcie.pcie_type = val & PCIM_EXP_FLAGS_TYPE;
808 #if defined(__powerpc__)
810 * Enable the MSI mapping window for all HyperTransport
811 * slaves. PCI-PCI bridges have their windows enabled via
814 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
815 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
817 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
818 cfg->domain, cfg->bus, cfg->slot, cfg->func);
819 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
820 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
824 /* REG and WREG use carry through to next functions */
828 * PCI Vital Product Data
831 #define PCI_VPD_TIMEOUT 1000000
834 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
836 int count = PCI_VPD_TIMEOUT;
838 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
840 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
842 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
845 DELAY(1); /* limit looping */
847 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
854 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
856 int count = PCI_VPD_TIMEOUT;
858 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
860 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
861 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
862 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
865 DELAY(1); /* limit looping */
872 #undef PCI_VPD_TIMEOUT
874 struct vpd_readstate {
884 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
889 if (vrs->bytesinval == 0) {
890 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
892 vrs->val = le32toh(reg);
894 byte = vrs->val & 0xff;
897 vrs->val = vrs->val >> 8;
898 byte = vrs->val & 0xff;
908 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
910 struct vpd_readstate vrs;
915 int alloc, off; /* alloc/off for RO/W arrays */
921 /* init vpd reader */
929 name = remain = i = 0; /* shut up stupid gcc */
930 alloc = off = 0; /* shut up stupid gcc */
931 dflen = 0; /* shut up stupid gcc */
934 if (vpd_nextbyte(&vrs, &byte)) {
939 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
940 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
941 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
944 case 0: /* item name */
946 if (vpd_nextbyte(&vrs, &byte2)) {
951 if (vpd_nextbyte(&vrs, &byte2)) {
955 remain |= byte2 << 8;
956 if (remain > (0x7f*4 - vrs.off)) {
959 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
960 cfg->domain, cfg->bus, cfg->slot,
966 name = (byte >> 3) & 0xf;
969 case 0x2: /* String */
970 cfg->vpd.vpd_ident = malloc(remain + 1,
978 case 0x10: /* VPD-R */
981 cfg->vpd.vpd_ros = malloc(alloc *
982 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
986 case 0x11: /* VPD-W */
989 cfg->vpd.vpd_w = malloc(alloc *
990 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
994 default: /* Invalid data, abort */
1000 case 1: /* Identifier String */
1001 cfg->vpd.vpd_ident[i++] = byte;
1004 cfg->vpd.vpd_ident[i] = '\0';
1009 case 2: /* VPD-R Keyword Header */
1011 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1012 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1013 M_DEVBUF, M_WAITOK | M_ZERO);
1015 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1016 if (vpd_nextbyte(&vrs, &byte2)) {
1020 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1021 if (vpd_nextbyte(&vrs, &byte2)) {
1025 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1027 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1030 * if this happens, we can't trust the rest
1034 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1035 cfg->domain, cfg->bus, cfg->slot,
1040 } else if (dflen == 0) {
1041 cfg->vpd.vpd_ros[off].value = malloc(1 *
1042 sizeof(*cfg->vpd.vpd_ros[off].value),
1043 M_DEVBUF, M_WAITOK);
1044 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1046 cfg->vpd.vpd_ros[off].value = malloc(
1048 sizeof(*cfg->vpd.vpd_ros[off].value),
1049 M_DEVBUF, M_WAITOK);
1052 /* keep in sync w/ state 3's transistions */
1053 if (dflen == 0 && remain == 0)
1055 else if (dflen == 0)
1061 case 3: /* VPD-R Keyword Value */
1062 cfg->vpd.vpd_ros[off].value[i++] = byte;
1063 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1064 "RV", 2) == 0 && cksumvalid == -1) {
1070 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1071 cfg->domain, cfg->bus,
1072 cfg->slot, cfg->func,
1081 /* keep in sync w/ state 2's transistions */
1083 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1084 if (dflen == 0 && remain == 0) {
1085 cfg->vpd.vpd_rocnt = off;
1086 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1087 off * sizeof(*cfg->vpd.vpd_ros),
1088 M_DEVBUF, M_WAITOK | M_ZERO);
1090 } else if (dflen == 0)
1100 case 5: /* VPD-W Keyword Header */
1102 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1103 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1104 M_DEVBUF, M_WAITOK | M_ZERO);
1106 cfg->vpd.vpd_w[off].keyword[0] = byte;
1107 if (vpd_nextbyte(&vrs, &byte2)) {
1111 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1112 if (vpd_nextbyte(&vrs, &byte2)) {
1116 cfg->vpd.vpd_w[off].len = dflen = byte2;
1117 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1118 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1119 sizeof(*cfg->vpd.vpd_w[off].value),
1120 M_DEVBUF, M_WAITOK);
1123 /* keep in sync w/ state 6's transistions */
1124 if (dflen == 0 && remain == 0)
1126 else if (dflen == 0)
1132 case 6: /* VPD-W Keyword Value */
1133 cfg->vpd.vpd_w[off].value[i++] = byte;
1136 /* keep in sync w/ state 5's transistions */
1138 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1139 if (dflen == 0 && remain == 0) {
1140 cfg->vpd.vpd_wcnt = off;
1141 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1142 off * sizeof(*cfg->vpd.vpd_w),
1143 M_DEVBUF, M_WAITOK | M_ZERO);
1145 } else if (dflen == 0)
1150 printf("pci%d:%d:%d:%d: invalid state: %d\n",
1151 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1158 if (cksumvalid == 0 || state < -1) {
1159 /* read-only data bad, clean up */
1160 if (cfg->vpd.vpd_ros != NULL) {
1161 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1162 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1163 free(cfg->vpd.vpd_ros, M_DEVBUF);
1164 cfg->vpd.vpd_ros = NULL;
1168 /* I/O error, clean up */
1169 printf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1170 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1171 if (cfg->vpd.vpd_ident != NULL) {
1172 free(cfg->vpd.vpd_ident, M_DEVBUF);
1173 cfg->vpd.vpd_ident = NULL;
1175 if (cfg->vpd.vpd_w != NULL) {
1176 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1177 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1178 free(cfg->vpd.vpd_w, M_DEVBUF);
1179 cfg->vpd.vpd_w = NULL;
1182 cfg->vpd.vpd_cached = 1;
1188 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1190 struct pci_devinfo *dinfo = device_get_ivars(child);
1191 pcicfgregs *cfg = &dinfo->cfg;
1193 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1194 pci_read_vpd(device_get_parent(dev), cfg);
1196 *identptr = cfg->vpd.vpd_ident;
1198 if (*identptr == NULL)
1205 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1208 struct pci_devinfo *dinfo = device_get_ivars(child);
1209 pcicfgregs *cfg = &dinfo->cfg;
1212 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1213 pci_read_vpd(device_get_parent(dev), cfg);
1215 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1216 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1217 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1218 *vptr = cfg->vpd.vpd_ros[i].value;
1227 pci_fetch_vpd_list(device_t dev)
1229 struct pci_devinfo *dinfo = device_get_ivars(dev);
1230 pcicfgregs *cfg = &dinfo->cfg;
1232 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1233 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1238 * Find the requested extended capability and return the offset in
1239 * configuration space via the pointer provided. The function returns
1240 * 0 on success and error code otherwise.
1243 pci_find_extcap_method(device_t dev, device_t child, int capability,
1246 struct pci_devinfo *dinfo = device_get_ivars(child);
1247 pcicfgregs *cfg = &dinfo->cfg;
1252 * Check the CAP_LIST bit of the PCI status register first.
1254 status = pci_read_config(child, PCIR_STATUS, 2);
1255 if (!(status & PCIM_STATUS_CAPPRESENT))
1259 * Determine the start pointer of the capabilities list.
1261 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1262 case PCIM_HDRTYPE_NORMAL:
1263 case PCIM_HDRTYPE_BRIDGE:
1266 case PCIM_HDRTYPE_CARDBUS:
1267 ptr = PCIR_CAP_PTR_2;
1271 return (ENXIO); /* no extended capabilities support */
1273 ptr = pci_read_config(child, ptr, 1);
1276 * Traverse the capabilities list.
1279 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1284 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1291 * Support for MSI-X message interrupts.
1294 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1296 struct pci_devinfo *dinfo = device_get_ivars(dev);
1297 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1300 KASSERT(msix->msix_table_len > index, ("bogus index"));
1301 offset = msix->msix_table_offset + index * 16;
1302 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1303 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1304 bus_write_4(msix->msix_table_res, offset + 8, data);
1306 /* Enable MSI -> HT mapping. */
1307 pci_ht_map_msi(dev, address);
1311 pci_mask_msix(device_t dev, u_int index)
1313 struct pci_devinfo *dinfo = device_get_ivars(dev);
1314 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1315 uint32_t offset, val;
1317 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1318 offset = msix->msix_table_offset + index * 16 + 12;
1319 val = bus_read_4(msix->msix_table_res, offset);
1320 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1321 val |= PCIM_MSIX_VCTRL_MASK;
1322 bus_write_4(msix->msix_table_res, offset, val);
1327 pci_unmask_msix(device_t dev, u_int index)
1329 struct pci_devinfo *dinfo = device_get_ivars(dev);
1330 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1331 uint32_t offset, val;
1333 KASSERT(msix->msix_table_len > index, ("bogus index"));
1334 offset = msix->msix_table_offset + index * 16 + 12;
1335 val = bus_read_4(msix->msix_table_res, offset);
1336 if (val & PCIM_MSIX_VCTRL_MASK) {
1337 val &= ~PCIM_MSIX_VCTRL_MASK;
1338 bus_write_4(msix->msix_table_res, offset, val);
1343 pci_pending_msix(device_t dev, u_int index)
1345 struct pci_devinfo *dinfo = device_get_ivars(dev);
1346 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1347 uint32_t offset, bit;
1349 KASSERT(msix->msix_table_len > index, ("bogus index"));
1350 offset = msix->msix_pba_offset + (index / 32) * 4;
1351 bit = 1 << index % 32;
1352 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1356 * Restore MSI-X registers and table during resume. If MSI-X is
1357 * enabled then walk the virtual table to restore the actual MSI-X
1361 pci_resume_msix(device_t dev)
1363 struct pci_devinfo *dinfo = device_get_ivars(dev);
1364 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1365 struct msix_table_entry *mte;
1366 struct msix_vector *mv;
1369 if (msix->msix_alloc > 0) {
1370 /* First, mask all vectors. */
1371 for (i = 0; i < msix->msix_msgnum; i++)
1372 pci_mask_msix(dev, i);
1374 /* Second, program any messages with at least one handler. */
1375 for (i = 0; i < msix->msix_table_len; i++) {
1376 mte = &msix->msix_table[i];
1377 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1379 mv = &msix->msix_vectors[mte->mte_vector - 1];
1380 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1381 pci_unmask_msix(dev, i);
1384 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1385 msix->msix_ctrl, 2);
1389 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1390 * returned in *count. After this function returns, each message will be
1391 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1394 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1396 struct pci_devinfo *dinfo = device_get_ivars(child);
1397 pcicfgregs *cfg = &dinfo->cfg;
1398 struct resource_list_entry *rle;
1399 int actual, error, i, irq, max;
1401 /* Don't let count == 0 get us into trouble. */
1405 /* If rid 0 is allocated, then fail. */
1406 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1407 if (rle != NULL && rle->res != NULL)
1410 /* Already have allocated messages? */
1411 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1414 /* If MSI-X is blacklisted for this system, fail. */
1415 if (pci_msix_blacklisted())
1418 /* MSI-X capability present? */
1419 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1422 /* Make sure the appropriate BARs are mapped. */
1423 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1424 cfg->msix.msix_table_bar);
1425 if (rle == NULL || rle->res == NULL ||
1426 !(rman_get_flags(rle->res) & RF_ACTIVE))
1428 cfg->msix.msix_table_res = rle->res;
1429 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1430 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1431 cfg->msix.msix_pba_bar);
1432 if (rle == NULL || rle->res == NULL ||
1433 !(rman_get_flags(rle->res) & RF_ACTIVE))
1436 cfg->msix.msix_pba_res = rle->res;
1439 device_printf(child,
1440 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1441 *count, cfg->msix.msix_msgnum);
1442 max = min(*count, cfg->msix.msix_msgnum);
1443 for (i = 0; i < max; i++) {
1444 /* Allocate a message. */
1445 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1451 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1457 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1459 device_printf(child, "using IRQ %lu for MSI-X\n",
1465 * Be fancy and try to print contiguous runs of
1466 * IRQ values as ranges. 'irq' is the previous IRQ.
1467 * 'run' is true if we are in a range.
1469 device_printf(child, "using IRQs %lu", rle->start);
1472 for (i = 1; i < actual; i++) {
1473 rle = resource_list_find(&dinfo->resources,
1474 SYS_RES_IRQ, i + 1);
1476 /* Still in a run? */
1477 if (rle->start == irq + 1) {
1483 /* Finish previous range. */
1489 /* Start new range. */
1490 printf(",%lu", rle->start);
1494 /* Unfinished range? */
1497 printf(" for MSI-X\n");
1501 /* Mask all vectors. */
1502 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1503 pci_mask_msix(child, i);
1505 /* Allocate and initialize vector data and virtual table. */
1506 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1507 M_DEVBUF, M_WAITOK | M_ZERO);
1508 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1509 M_DEVBUF, M_WAITOK | M_ZERO);
1510 for (i = 0; i < actual; i++) {
1511 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1512 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1513 cfg->msix.msix_table[i].mte_vector = i + 1;
1516 /* Update control register to enable MSI-X. */
1517 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1518 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1519 cfg->msix.msix_ctrl, 2);
1521 /* Update counts of alloc'd messages. */
1522 cfg->msix.msix_alloc = actual;
1523 cfg->msix.msix_table_len = actual;
1529 * By default, pci_alloc_msix() will assign the allocated IRQ
1530 * resources consecutively to the first N messages in the MSI-X table.
1531 * However, device drivers may want to use different layouts if they
1532 * either receive fewer messages than they asked for, or they wish to
1533 * populate the MSI-X table sparsely. This method allows the driver
1534 * to specify what layout it wants. It must be called after a
1535 * successful pci_alloc_msix() but before any of the associated
1536 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1538 * The 'vectors' array contains 'count' message vectors. The array
1539 * maps directly to the MSI-X table in that index 0 in the array
1540 * specifies the vector for the first message in the MSI-X table, etc.
1541 * The vector value in each array index can either be 0 to indicate
1542 * that no vector should be assigned to a message slot, or it can be a
1543 * number from 1 to N (where N is the count returned from a
1544 * succcessful call to pci_alloc_msix()) to indicate which message
1545 * vector (IRQ) to be used for the corresponding message.
1547 * On successful return, each message with a non-zero vector will have
1548 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1549 * 1. Additionally, if any of the IRQs allocated via the previous
1550 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1551 * will be freed back to the system automatically.
1553 * For example, suppose a driver has a MSI-X table with 6 messages and
1554 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1555 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1556 * C. After the call to pci_alloc_msix(), the device will be setup to
1557 * have an MSI-X table of ABC--- (where - means no vector assigned).
1558 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1559 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1560 * be freed back to the system. This device will also have valid
1561 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1563 * In any case, the SYS_RES_IRQ rid X will always map to the message
1564 * at MSI-X table index X - 1 and will only be valid if a vector is
1565 * assigned to that table entry.
1568 pci_remap_msix_method(device_t dev, device_t child, int count,
1569 const u_int *vectors)
1571 struct pci_devinfo *dinfo = device_get_ivars(child);
1572 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1573 struct resource_list_entry *rle;
1574 int i, irq, j, *used;
1577 * Have to have at least one message in the table but the
1578 * table can't be bigger than the actual MSI-X table in the
1581 if (count == 0 || count > msix->msix_msgnum)
1584 /* Sanity check the vectors. */
1585 for (i = 0; i < count; i++)
1586 if (vectors[i] > msix->msix_alloc)
1590 * Make sure there aren't any holes in the vectors to be used.
1591 * It's a big pain to support it, and it doesn't really make
1592 * sense anyway. Also, at least one vector must be used.
1594 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1596 for (i = 0; i < count; i++)
1597 if (vectors[i] != 0)
1598 used[vectors[i] - 1] = 1;
1599 for (i = 0; i < msix->msix_alloc - 1; i++)
1600 if (used[i] == 0 && used[i + 1] == 1) {
1601 free(used, M_DEVBUF);
1605 free(used, M_DEVBUF);
1609 /* Make sure none of the resources are allocated. */
1610 for (i = 0; i < msix->msix_table_len; i++) {
1611 if (msix->msix_table[i].mte_vector == 0)
1613 if (msix->msix_table[i].mte_handlers > 0)
1615 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1616 KASSERT(rle != NULL, ("missing resource"));
1617 if (rle->res != NULL)
1621 /* Free the existing resource list entries. */
1622 for (i = 0; i < msix->msix_table_len; i++) {
1623 if (msix->msix_table[i].mte_vector == 0)
1625 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1629 * Build the new virtual table keeping track of which vectors are
1632 free(msix->msix_table, M_DEVBUF);
1633 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1634 M_DEVBUF, M_WAITOK | M_ZERO);
1635 for (i = 0; i < count; i++)
1636 msix->msix_table[i].mte_vector = vectors[i];
1637 msix->msix_table_len = count;
1639 /* Free any unused IRQs and resize the vectors array if necessary. */
1640 j = msix->msix_alloc - 1;
1642 struct msix_vector *vec;
1644 while (used[j] == 0) {
1645 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1646 msix->msix_vectors[j].mv_irq);
1649 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1651 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1653 free(msix->msix_vectors, M_DEVBUF);
1654 msix->msix_vectors = vec;
1655 msix->msix_alloc = j + 1;
1657 free(used, M_DEVBUF);
1659 /* Map the IRQs onto the rids. */
1660 for (i = 0; i < count; i++) {
1661 if (vectors[i] == 0)
1663 irq = msix->msix_vectors[vectors[i] - 1].mv_irq;
1664 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1669 device_printf(child, "Remapped MSI-X IRQs as: ");
1670 for (i = 0; i < count; i++) {
1673 if (vectors[i] == 0)
1677 msix->msix_vectors[vectors[i] - 1].mv_irq);
1686 pci_release_msix(device_t dev, device_t child)
1688 struct pci_devinfo *dinfo = device_get_ivars(child);
1689 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1690 struct resource_list_entry *rle;
1693 /* Do we have any messages to release? */
1694 if (msix->msix_alloc == 0)
1697 /* Make sure none of the resources are allocated. */
1698 for (i = 0; i < msix->msix_table_len; i++) {
1699 if (msix->msix_table[i].mte_vector == 0)
1701 if (msix->msix_table[i].mte_handlers > 0)
1703 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1704 KASSERT(rle != NULL, ("missing resource"));
1705 if (rle->res != NULL)
1709 /* Update control register to disable MSI-X. */
1710 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1711 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1712 msix->msix_ctrl, 2);
1714 /* Free the resource list entries. */
1715 for (i = 0; i < msix->msix_table_len; i++) {
1716 if (msix->msix_table[i].mte_vector == 0)
1718 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1720 free(msix->msix_table, M_DEVBUF);
1721 msix->msix_table_len = 0;
1723 /* Release the IRQs. */
1724 for (i = 0; i < msix->msix_alloc; i++)
1725 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1726 msix->msix_vectors[i].mv_irq);
1727 free(msix->msix_vectors, M_DEVBUF);
1728 msix->msix_alloc = 0;
1733 * Return the max supported MSI-X messages this device supports.
1734 * Basically, assuming the MD code can alloc messages, this function
1735 * should return the maximum value that pci_alloc_msix() can return.
1736 * Thus, it is subject to the tunables, etc.
1739 pci_msix_count_method(device_t dev, device_t child)
1741 struct pci_devinfo *dinfo = device_get_ivars(child);
1742 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1744 if (pci_do_msix && msix->msix_location != 0)
1745 return (msix->msix_msgnum);
1750 * HyperTransport MSI mapping control
1753 pci_ht_map_msi(device_t dev, uint64_t addr)
1755 struct pci_devinfo *dinfo = device_get_ivars(dev);
1756 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1761 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1762 ht->ht_msiaddr >> 20 == addr >> 20) {
1763 /* Enable MSI -> HT mapping. */
1764 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1765 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1769 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1770 /* Disable MSI -> HT mapping. */
1771 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1772 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1778 pci_get_max_payload(device_t dev)
1780 struct pci_devinfo *dinfo = device_get_ivars(dev);
1784 cap = dinfo->cfg.pcie.pcie_location;
1787 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1788 val &= PCIEM_CTL_MAX_PAYLOAD;
1790 return (1 << (val + 7));
1794 pci_get_max_read_req(device_t dev)
1796 struct pci_devinfo *dinfo = device_get_ivars(dev);
1800 cap = dinfo->cfg.pcie.pcie_location;
1803 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1804 val &= PCIEM_CTL_MAX_READ_REQUEST;
1806 return (1 << (val + 7));
1810 pci_set_max_read_req(device_t dev, int size)
1812 struct pci_devinfo *dinfo = device_get_ivars(dev);
1816 cap = dinfo->cfg.pcie.pcie_location;
1823 size = (1 << (fls(size) - 1));
1824 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1825 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1826 val |= (fls(size) - 8) << 12;
1827 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1832 pcie_read_config(device_t dev, int reg, int width)
1834 struct pci_devinfo *dinfo = device_get_ivars(dev);
1837 cap = dinfo->cfg.pcie.pcie_location;
1841 return (0xffffffff);
1844 return (pci_read_config(dev, cap + reg, width));
1848 pcie_write_config(device_t dev, int reg, uint32_t value, int width)
1850 struct pci_devinfo *dinfo = device_get_ivars(dev);
1853 cap = dinfo->cfg.pcie.pcie_location;
1856 pci_write_config(dev, cap + reg, value, width);
1860 * Adjusts a PCI-e capability register by clearing the bits in mask
1861 * and setting the bits in (value & mask). Bits not set in mask are
1864 * Returns the old value on success or all ones on failure.
1867 pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value,
1870 struct pci_devinfo *dinfo = device_get_ivars(dev);
1874 cap = dinfo->cfg.pcie.pcie_location;
1878 return (0xffffffff);
1881 old = pci_read_config(dev, cap + reg, width);
1883 new |= (value & mask);
1884 pci_write_config(dev, cap + reg, new, width);
1889 * Support for MSI message signalled interrupts.
1892 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1894 struct pci_devinfo *dinfo = device_get_ivars(dev);
1895 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1897 /* Write data and address values. */
1898 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1899 address & 0xffffffff, 4);
1900 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1901 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1903 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1906 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1909 /* Enable MSI in the control register. */
1910 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1911 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1914 /* Enable MSI -> HT mapping. */
1915 pci_ht_map_msi(dev, address);
1919 pci_disable_msi(device_t dev)
1921 struct pci_devinfo *dinfo = device_get_ivars(dev);
1922 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1924 /* Disable MSI -> HT mapping. */
1925 pci_ht_map_msi(dev, 0);
1927 /* Disable MSI in the control register. */
1928 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1929 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1934 * Restore MSI registers during resume. If MSI is enabled then
1935 * restore the data and address registers in addition to the control
1939 pci_resume_msi(device_t dev)
1941 struct pci_devinfo *dinfo = device_get_ivars(dev);
1942 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1946 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1947 address = msi->msi_addr;
1948 data = msi->msi_data;
1949 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1950 address & 0xffffffff, 4);
1951 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1952 pci_write_config(dev, msi->msi_location +
1953 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1954 pci_write_config(dev, msi->msi_location +
1955 PCIR_MSI_DATA_64BIT, data, 2);
1957 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1960 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1965 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1967 struct pci_devinfo *dinfo = device_get_ivars(dev);
1968 pcicfgregs *cfg = &dinfo->cfg;
1969 struct resource_list_entry *rle;
1970 struct msix_table_entry *mte;
1971 struct msix_vector *mv;
1977 * Handle MSI first. We try to find this IRQ among our list
1978 * of MSI IRQs. If we find it, we request updated address and
1979 * data registers and apply the results.
1981 if (cfg->msi.msi_alloc > 0) {
1983 /* If we don't have any active handlers, nothing to do. */
1984 if (cfg->msi.msi_handlers == 0)
1986 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1987 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1989 if (rle->start == irq) {
1990 error = PCIB_MAP_MSI(device_get_parent(bus),
1991 dev, irq, &addr, &data);
1994 pci_disable_msi(dev);
1995 dinfo->cfg.msi.msi_addr = addr;
1996 dinfo->cfg.msi.msi_data = data;
1997 pci_enable_msi(dev, addr, data);
2005 * For MSI-X, we check to see if we have this IRQ. If we do,
2006 * we request the updated mapping info. If that works, we go
2007 * through all the slots that use this IRQ and update them.
2009 if (cfg->msix.msix_alloc > 0) {
2010 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2011 mv = &cfg->msix.msix_vectors[i];
2012 if (mv->mv_irq == irq) {
2013 error = PCIB_MAP_MSI(device_get_parent(bus),
2014 dev, irq, &addr, &data);
2017 mv->mv_address = addr;
2019 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2020 mte = &cfg->msix.msix_table[j];
2021 if (mte->mte_vector != i + 1)
2023 if (mte->mte_handlers == 0)
2025 pci_mask_msix(dev, j);
2026 pci_enable_msix(dev, j, addr, data);
2027 pci_unmask_msix(dev, j);
2038 * Returns true if the specified device is blacklisted because MSI
2042 pci_msi_device_blacklisted(device_t dev)
2045 if (!pci_honor_msi_blacklist)
2048 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2052 * Determine if MSI is blacklisted globally on this system. Currently,
2053 * we just check for blacklisted chipsets as represented by the
2054 * host-PCI bridge at device 0:0:0. In the future, it may become
2055 * necessary to check other system attributes, such as the kenv values
2056 * that give the motherboard manufacturer and model number.
2059 pci_msi_blacklisted(void)
2063 if (!pci_honor_msi_blacklist)
2066 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2067 if (!(pcie_chipset || pcix_chipset)) {
2068 if (vm_guest != VM_GUEST_NO) {
2070 * Whitelist older chipsets in virtual
2071 * machines known to support MSI.
2073 dev = pci_find_bsf(0, 0, 0);
2075 return (!pci_has_quirk(pci_get_devid(dev),
2076 PCI_QUIRK_ENABLE_MSI_VM));
2081 dev = pci_find_bsf(0, 0, 0);
2083 return (pci_msi_device_blacklisted(dev));
2088 * Returns true if the specified device is blacklisted because MSI-X
2089 * doesn't work. Note that this assumes that if MSI doesn't work,
2090 * MSI-X doesn't either.
2093 pci_msix_device_blacklisted(device_t dev)
2096 if (!pci_honor_msi_blacklist)
2099 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2102 return (pci_msi_device_blacklisted(dev));
2106 * Determine if MSI-X is blacklisted globally on this system. If MSI
2107 * is blacklisted, assume that MSI-X is as well. Check for additional
2108 * chipsets where MSI works but MSI-X does not.
2111 pci_msix_blacklisted(void)
2115 if (!pci_honor_msi_blacklist)
2118 dev = pci_find_bsf(0, 0, 0);
2119 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2120 PCI_QUIRK_DISABLE_MSIX))
2123 return (pci_msi_blacklisted());
2127 * Attempt to allocate *count MSI messages. The actual number allocated is
2128 * returned in *count. After this function returns, each message will be
2129 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2132 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2134 struct pci_devinfo *dinfo = device_get_ivars(child);
2135 pcicfgregs *cfg = &dinfo->cfg;
2136 struct resource_list_entry *rle;
2137 int actual, error, i, irqs[32];
2140 /* Don't let count == 0 get us into trouble. */
2144 /* If rid 0 is allocated, then fail. */
2145 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2146 if (rle != NULL && rle->res != NULL)
2149 /* Already have allocated messages? */
2150 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2153 /* If MSI is blacklisted for this system, fail. */
2154 if (pci_msi_blacklisted())
2157 /* MSI capability present? */
2158 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2162 device_printf(child,
2163 "attempting to allocate %d MSI vectors (%d supported)\n",
2164 *count, cfg->msi.msi_msgnum);
2166 /* Don't ask for more than the device supports. */
2167 actual = min(*count, cfg->msi.msi_msgnum);
2169 /* Don't ask for more than 32 messages. */
2170 actual = min(actual, 32);
2172 /* MSI requires power of 2 number of messages. */
2173 if (!powerof2(actual))
2177 /* Try to allocate N messages. */
2178 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2190 * We now have N actual messages mapped onto SYS_RES_IRQ
2191 * resources in the irqs[] array, so add new resources
2192 * starting at rid 1.
2194 for (i = 0; i < actual; i++)
2195 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2196 irqs[i], irqs[i], 1);
2200 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2205 * Be fancy and try to print contiguous runs
2206 * of IRQ values as ranges. 'run' is true if
2207 * we are in a range.
2209 device_printf(child, "using IRQs %d", irqs[0]);
2211 for (i = 1; i < actual; i++) {
2213 /* Still in a run? */
2214 if (irqs[i] == irqs[i - 1] + 1) {
2219 /* Finish previous range. */
2221 printf("-%d", irqs[i - 1]);
2225 /* Start new range. */
2226 printf(",%d", irqs[i]);
2229 /* Unfinished range? */
2231 printf("-%d", irqs[actual - 1]);
2232 printf(" for MSI\n");
2236 /* Update control register with actual count. */
2237 ctrl = cfg->msi.msi_ctrl;
2238 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2239 ctrl |= (ffs(actual) - 1) << 4;
2240 cfg->msi.msi_ctrl = ctrl;
2241 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2243 /* Update counts of alloc'd messages. */
2244 cfg->msi.msi_alloc = actual;
2245 cfg->msi.msi_handlers = 0;
2250 /* Release the MSI messages associated with this device. */
2252 pci_release_msi_method(device_t dev, device_t child)
2254 struct pci_devinfo *dinfo = device_get_ivars(child);
2255 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2256 struct resource_list_entry *rle;
2257 int error, i, irqs[32];
2259 /* Try MSI-X first. */
2260 error = pci_release_msix(dev, child);
2261 if (error != ENODEV)
2264 /* Do we have any messages to release? */
2265 if (msi->msi_alloc == 0)
2267 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2269 /* Make sure none of the resources are allocated. */
2270 if (msi->msi_handlers > 0)
2272 for (i = 0; i < msi->msi_alloc; i++) {
2273 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2274 KASSERT(rle != NULL, ("missing MSI resource"));
2275 if (rle->res != NULL)
2277 irqs[i] = rle->start;
2280 /* Update control register with 0 count. */
2281 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2282 ("%s: MSI still enabled", __func__));
2283 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2284 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2287 /* Release the messages. */
2288 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2289 for (i = 0; i < msi->msi_alloc; i++)
2290 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2292 /* Update alloc count. */
2300 * Return the max supported MSI messages this device supports.
2301 * Basically, assuming the MD code can alloc messages, this function
2302 * should return the maximum value that pci_alloc_msi() can return.
2303 * Thus, it is subject to the tunables, etc.
2306 pci_msi_count_method(device_t dev, device_t child)
2308 struct pci_devinfo *dinfo = device_get_ivars(child);
2309 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2311 if (pci_do_msi && msi->msi_location != 0)
2312 return (msi->msi_msgnum);
2316 /* free pcicfgregs structure and all depending data structures */
2319 pci_freecfg(struct pci_devinfo *dinfo)
2321 struct devlist *devlist_head;
2322 struct pci_map *pm, *next;
2325 devlist_head = &pci_devq;
2327 if (dinfo->cfg.vpd.vpd_reg) {
2328 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2329 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2330 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2331 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2332 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2333 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2334 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2336 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2339 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2340 free(dinfo, M_DEVBUF);
2342 /* increment the generation count */
2345 /* we're losing one device */
2351 * PCI power manangement
2354 pci_set_powerstate_method(device_t dev, device_t child, int state)
2356 struct pci_devinfo *dinfo = device_get_ivars(child);
2357 pcicfgregs *cfg = &dinfo->cfg;
2359 int result, oldstate, highest, delay;
2361 if (cfg->pp.pp_cap == 0)
2362 return (EOPNOTSUPP);
2365 * Optimize a no state change request away. While it would be OK to
2366 * write to the hardware in theory, some devices have shown odd
2367 * behavior when going from D3 -> D3.
2369 oldstate = pci_get_powerstate(child);
2370 if (oldstate == state)
2374 * The PCI power management specification states that after a state
2375 * transition between PCI power states, system software must
2376 * guarantee a minimal delay before the function accesses the device.
2377 * Compute the worst case delay that we need to guarantee before we
2378 * access the device. Many devices will be responsive much more
2379 * quickly than this delay, but there are some that don't respond
2380 * instantly to state changes. Transitions to/from D3 state require
2381 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2382 * is done below with DELAY rather than a sleeper function because
2383 * this function can be called from contexts where we cannot sleep.
2385 highest = (oldstate > state) ? oldstate : state;
2386 if (highest == PCI_POWERSTATE_D3)
2388 else if (highest == PCI_POWERSTATE_D2)
2392 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2393 & ~PCIM_PSTAT_DMASK;
2396 case PCI_POWERSTATE_D0:
2397 status |= PCIM_PSTAT_D0;
2399 case PCI_POWERSTATE_D1:
2400 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2401 return (EOPNOTSUPP);
2402 status |= PCIM_PSTAT_D1;
2404 case PCI_POWERSTATE_D2:
2405 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2406 return (EOPNOTSUPP);
2407 status |= PCIM_PSTAT_D2;
2409 case PCI_POWERSTATE_D3:
2410 status |= PCIM_PSTAT_D3;
2417 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2420 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2427 pci_get_powerstate_method(device_t dev, device_t child)
2429 struct pci_devinfo *dinfo = device_get_ivars(child);
2430 pcicfgregs *cfg = &dinfo->cfg;
2434 if (cfg->pp.pp_cap != 0) {
2435 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2436 switch (status & PCIM_PSTAT_DMASK) {
2438 result = PCI_POWERSTATE_D0;
2441 result = PCI_POWERSTATE_D1;
2444 result = PCI_POWERSTATE_D2;
2447 result = PCI_POWERSTATE_D3;
2450 result = PCI_POWERSTATE_UNKNOWN;
2454 /* No support, device is always at D0 */
2455 result = PCI_POWERSTATE_D0;
2461 * Some convenience functions for PCI device drivers.
2464 static __inline void
2465 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2469 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2471 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2474 static __inline void
2475 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2479 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2481 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2485 pci_enable_busmaster_method(device_t dev, device_t child)
2487 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2492 pci_disable_busmaster_method(device_t dev, device_t child)
2494 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2499 pci_enable_io_method(device_t dev, device_t child, int space)
2504 case SYS_RES_IOPORT:
2505 bit = PCIM_CMD_PORTEN;
2507 case SYS_RES_MEMORY:
2508 bit = PCIM_CMD_MEMEN;
2513 pci_set_command_bit(dev, child, bit);
2518 pci_disable_io_method(device_t dev, device_t child, int space)
2523 case SYS_RES_IOPORT:
2524 bit = PCIM_CMD_PORTEN;
2526 case SYS_RES_MEMORY:
2527 bit = PCIM_CMD_MEMEN;
2532 pci_clear_command_bit(dev, child, bit);
2537 * New style pci driver. Parent device is either a pci-host-bridge or a
2538 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2542 pci_print_verbose(struct pci_devinfo *dinfo)
2546 pcicfgregs *cfg = &dinfo->cfg;
2548 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2549 cfg->vendor, cfg->device, cfg->revid);
2550 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2551 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2552 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2553 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2555 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2556 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2557 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2558 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2559 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2560 if (cfg->intpin > 0)
2561 printf("\tintpin=%c, irq=%d\n",
2562 cfg->intpin +'a' -1, cfg->intline);
2563 if (cfg->pp.pp_cap) {
2566 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2567 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2568 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2569 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2570 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2571 status & PCIM_PSTAT_DMASK);
2573 if (cfg->msi.msi_location) {
2576 ctrl = cfg->msi.msi_ctrl;
2577 printf("\tMSI supports %d message%s%s%s\n",
2578 cfg->msi.msi_msgnum,
2579 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2580 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2581 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2583 if (cfg->msix.msix_location) {
2584 printf("\tMSI-X supports %d message%s ",
2585 cfg->msix.msix_msgnum,
2586 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2587 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2588 printf("in map 0x%x\n",
2589 cfg->msix.msix_table_bar);
2591 printf("in maps 0x%x and 0x%x\n",
2592 cfg->msix.msix_table_bar,
2593 cfg->msix.msix_pba_bar);
2599 pci_porten(device_t dev)
2601 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2605 pci_memen(device_t dev)
2607 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2611 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2613 struct pci_devinfo *dinfo;
2614 pci_addr_t map, testval;
2619 * The device ROM BAR is special. It is always a 32-bit
2620 * memory BAR. Bit 0 is special and should not be set when
2623 dinfo = device_get_ivars(dev);
2624 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2625 map = pci_read_config(dev, reg, 4);
2626 pci_write_config(dev, reg, 0xfffffffe, 4);
2627 testval = pci_read_config(dev, reg, 4);
2628 pci_write_config(dev, reg, map, 4);
2630 *testvalp = testval;
2634 map = pci_read_config(dev, reg, 4);
2635 ln2range = pci_maprange(map);
2637 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2640 * Disable decoding via the command register before
2641 * determining the BAR's length since we will be placing it in
2644 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2645 pci_write_config(dev, PCIR_COMMAND,
2646 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2649 * Determine the BAR's length by writing all 1's. The bottom
2650 * log_2(size) bits of the BAR will stick as 0 when we read
2653 pci_write_config(dev, reg, 0xffffffff, 4);
2654 testval = pci_read_config(dev, reg, 4);
2655 if (ln2range == 64) {
2656 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2657 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2661 * Restore the original value of the BAR. We may have reprogrammed
2662 * the BAR of the low-level console device and when booting verbose,
2663 * we need the console device addressable.
2665 pci_write_config(dev, reg, map, 4);
2667 pci_write_config(dev, reg + 4, map >> 32, 4);
2668 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2671 *testvalp = testval;
2675 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2677 struct pci_devinfo *dinfo;
2680 /* The device ROM BAR is always a 32-bit memory BAR. */
2681 dinfo = device_get_ivars(dev);
2682 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2685 ln2range = pci_maprange(pm->pm_value);
2686 pci_write_config(dev, pm->pm_reg, base, 4);
2688 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2689 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2691 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2692 pm->pm_reg + 4, 4) << 32;
2696 pci_find_bar(device_t dev, int reg)
2698 struct pci_devinfo *dinfo;
2701 dinfo = device_get_ivars(dev);
2702 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2703 if (pm->pm_reg == reg)
2710 pci_bar_enabled(device_t dev, struct pci_map *pm)
2712 struct pci_devinfo *dinfo;
2715 dinfo = device_get_ivars(dev);
2716 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2717 !(pm->pm_value & PCIM_BIOS_ENABLE))
2719 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2720 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2721 return ((cmd & PCIM_CMD_MEMEN) != 0);
2723 return ((cmd & PCIM_CMD_PORTEN) != 0);
2726 static struct pci_map *
2727 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2729 struct pci_devinfo *dinfo;
2730 struct pci_map *pm, *prev;
2732 dinfo = device_get_ivars(dev);
2733 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2735 pm->pm_value = value;
2737 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2738 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2740 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2741 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2745 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2747 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2752 pci_restore_bars(device_t dev)
2754 struct pci_devinfo *dinfo;
2758 dinfo = device_get_ivars(dev);
2759 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2760 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2763 ln2range = pci_maprange(pm->pm_value);
2764 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2766 pci_write_config(dev, pm->pm_reg + 4,
2767 pm->pm_value >> 32, 4);
2772 * Add a resource based on a pci map register. Return 1 if the map
2773 * register is a 32bit map register or 2 if it is a 64bit register.
2776 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2777 int force, int prefetch)
2780 pci_addr_t base, map, testval;
2781 pci_addr_t start, end, count;
2782 int barlen, basezero, flags, maprange, mapsize, type;
2784 struct resource *res;
2787 * The BAR may already exist if the device is a CardBus card
2788 * whose CIS is stored in this BAR.
2790 pm = pci_find_bar(dev, reg);
2792 maprange = pci_maprange(pm->pm_value);
2793 barlen = maprange == 64 ? 2 : 1;
2797 pci_read_bar(dev, reg, &map, &testval);
2798 if (PCI_BAR_MEM(map)) {
2799 type = SYS_RES_MEMORY;
2800 if (map & PCIM_BAR_MEM_PREFETCH)
2803 type = SYS_RES_IOPORT;
2804 mapsize = pci_mapsize(testval);
2805 base = pci_mapbase(map);
2806 #ifdef __PCI_BAR_ZERO_VALID
2809 basezero = base == 0;
2811 maprange = pci_maprange(map);
2812 barlen = maprange == 64 ? 2 : 1;
2815 * For I/O registers, if bottom bit is set, and the next bit up
2816 * isn't clear, we know we have a BAR that doesn't conform to the
2817 * spec, so ignore it. Also, sanity check the size of the data
2818 * areas to the type of memory involved. Memory must be at least
2819 * 16 bytes in size, while I/O ranges must be at least 4.
2821 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2823 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2824 (type == SYS_RES_IOPORT && mapsize < 2))
2827 /* Save a record of this BAR. */
2828 pm = pci_add_bar(dev, reg, map, mapsize);
2830 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2831 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2832 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2833 printf(", port disabled\n");
2834 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2835 printf(", memory disabled\n");
2837 printf(", enabled\n");
2841 * If base is 0, then we have problems if this architecture does
2842 * not allow that. It is best to ignore such entries for the
2843 * moment. These will be allocated later if the driver specifically
2844 * requests them. However, some removable busses look better when
2845 * all resources are allocated, so allow '0' to be overriden.
2847 * Similarly treat maps whose values is the same as the test value
2848 * read back. These maps have had all f's written to them by the
2849 * BIOS in an attempt to disable the resources.
2851 if (!force && (basezero || map == testval))
2853 if ((u_long)base != base) {
2855 "pci%d:%d:%d:%d bar %#x too many address bits",
2856 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2857 pci_get_function(dev), reg);
2862 * This code theoretically does the right thing, but has
2863 * undesirable side effects in some cases where peripherals
2864 * respond oddly to having these bits enabled. Let the user
2865 * be able to turn them off (since pci_enable_io_modes is 1 by
2868 if (pci_enable_io_modes) {
2869 /* Turn on resources that have been left off by a lazy BIOS */
2870 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2871 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2872 cmd |= PCIM_CMD_PORTEN;
2873 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2875 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2876 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2877 cmd |= PCIM_CMD_MEMEN;
2878 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2881 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2883 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2887 count = (pci_addr_t)1 << mapsize;
2888 flags = RF_ALIGNMENT_LOG2(mapsize);
2890 flags |= RF_PREFETCHABLE;
2891 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
2892 start = 0; /* Let the parent decide. */
2896 end = base + count - 1;
2898 resource_list_add(rl, type, reg, start, end, count);
2901 * Try to allocate the resource for this BAR from our parent
2902 * so that this resource range is already reserved. The
2903 * driver for this device will later inherit this resource in
2904 * pci_alloc_resource().
2906 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2908 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2910 * If the allocation fails, try to allocate a resource for
2911 * this BAR using any available range. The firmware felt
2912 * it was important enough to assign a resource, so don't
2913 * disable decoding if we can help it.
2915 resource_list_delete(rl, type, reg);
2916 resource_list_add(rl, type, reg, 0, ~0ul, count);
2917 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
2922 * If the allocation fails, delete the resource list entry
2923 * and disable decoding for this device.
2925 * If the driver requests this resource in the future,
2926 * pci_reserve_map() will try to allocate a fresh
2929 resource_list_delete(rl, type, reg);
2930 pci_disable_io(dev, type);
2933 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2934 pci_get_domain(dev), pci_get_bus(dev),
2935 pci_get_slot(dev), pci_get_function(dev), reg);
2937 start = rman_get_start(res);
2938 pci_write_bar(dev, pm, start);
2944 * For ATA devices we need to decide early what addressing mode to use.
2945 * Legacy demands that the primary and secondary ATA ports sits on the
2946 * same addresses that old ISA hardware did. This dictates that we use
2947 * those addresses and ignore the BAR's if we cannot set PCI native
2951 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2952 uint32_t prefetchmask)
2955 int rid, type, progif;
2957 /* if this device supports PCI native addressing use it */
2958 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2959 if ((progif & 0x8a) == 0x8a) {
2960 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2961 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2962 printf("Trying ATA native PCI addressing mode\n");
2963 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2967 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2968 type = SYS_RES_IOPORT;
2969 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2970 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2971 prefetchmask & (1 << 0));
2972 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2973 prefetchmask & (1 << 1));
2976 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2977 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
2980 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2981 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
2984 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2985 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
2986 prefetchmask & (1 << 2));
2987 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
2988 prefetchmask & (1 << 3));
2991 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2992 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
2995 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2996 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
2999 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3000 prefetchmask & (1 << 4));
3001 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3002 prefetchmask & (1 << 5));
3006 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3008 struct pci_devinfo *dinfo = device_get_ivars(dev);
3009 pcicfgregs *cfg = &dinfo->cfg;
3010 char tunable_name[64];
3013 /* Has to have an intpin to have an interrupt. */
3014 if (cfg->intpin == 0)
3017 /* Let the user override the IRQ with a tunable. */
3018 irq = PCI_INVALID_IRQ;
3019 snprintf(tunable_name, sizeof(tunable_name),
3020 "hw.pci%d.%d.%d.INT%c.irq",
3021 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3022 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3023 irq = PCI_INVALID_IRQ;
3026 * If we didn't get an IRQ via the tunable, then we either use the
3027 * IRQ value in the intline register or we ask the bus to route an
3028 * interrupt for us. If force_route is true, then we only use the
3029 * value in the intline register if the bus was unable to assign an
3032 if (!PCI_INTERRUPT_VALID(irq)) {
3033 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3034 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3035 if (!PCI_INTERRUPT_VALID(irq))
3039 /* If after all that we don't have an IRQ, just bail. */
3040 if (!PCI_INTERRUPT_VALID(irq))
3043 /* Update the config register if it changed. */
3044 if (irq != cfg->intline) {
3046 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3049 /* Add this IRQ as rid 0 interrupt resource. */
3050 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3053 /* Perform early OHCI takeover from SMM. */
3055 ohci_early_takeover(device_t self)
3057 struct resource *res;
3063 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3067 ctl = bus_read_4(res, OHCI_CONTROL);
3068 if (ctl & OHCI_IR) {
3070 printf("ohci early: "
3071 "SMM active, request owner change\n");
3072 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3073 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3075 ctl = bus_read_4(res, OHCI_CONTROL);
3077 if (ctl & OHCI_IR) {
3079 printf("ohci early: "
3080 "SMM does not respond, resetting\n");
3081 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3083 /* Disable interrupts */
3084 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3087 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3090 /* Perform early UHCI takeover from SMM. */
3092 uhci_early_takeover(device_t self)
3094 struct resource *res;
3098 * Set the PIRQD enable bit and switch off all the others. We don't
3099 * want legacy support to interfere with us XXX Does this also mean
3100 * that the BIOS won't touch the keyboard anymore if it is connected
3101 * to the ports of the root hub?
3103 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3105 /* Disable interrupts */
3106 rid = PCI_UHCI_BASE_REG;
3107 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3109 bus_write_2(res, UHCI_INTR, 0);
3110 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3114 /* Perform early EHCI takeover from SMM. */
3116 ehci_early_takeover(device_t self)
3118 struct resource *res;
3128 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3132 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3134 /* Synchronise with the BIOS if it owns the controller. */
3135 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3136 eecp = EHCI_EECP_NEXT(eec)) {
3137 eec = pci_read_config(self, eecp, 4);
3138 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3141 bios_sem = pci_read_config(self, eecp +
3142 EHCI_LEGSUP_BIOS_SEM, 1);
3143 if (bios_sem == 0) {
3147 printf("ehci early: "
3148 "SMM active, request owner change\n");
3150 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3152 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3154 bios_sem = pci_read_config(self, eecp +
3155 EHCI_LEGSUP_BIOS_SEM, 1);
3158 if (bios_sem != 0) {
3160 printf("ehci early: "
3161 "SMM does not respond\n");
3163 /* Disable interrupts */
3164 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3165 bus_write_4(res, offs + EHCI_USBINTR, 0);
3167 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3170 /* Perform early XHCI takeover from SMM. */
3172 xhci_early_takeover(device_t self)
3174 struct resource *res;
3184 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3188 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3192 /* Synchronise with the BIOS if it owns the controller. */
3193 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3194 eecp += XHCI_XECP_NEXT(eec) << 2) {
3195 eec = bus_read_4(res, eecp);
3197 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3200 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3205 printf("xhci early: "
3206 "SMM active, request owner change\n");
3208 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3210 /* wait a maximum of 5 second */
3212 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3214 bios_sem = bus_read_1(res, eecp +
3215 XHCI_XECP_BIOS_SEM);
3218 if (bios_sem != 0) {
3220 printf("xhci early: "
3221 "SMM does not respond\n");
3224 /* Disable interrupts */
3225 offs = bus_read_1(res, XHCI_CAPLENGTH);
3226 bus_write_4(res, offs + XHCI_USBCMD, 0);
3227 bus_read_4(res, offs + XHCI_USBSTS);
3229 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3233 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3235 struct pci_devinfo *dinfo;
3237 struct resource_list *rl;
3238 const struct pci_quirk *q;
3242 dinfo = device_get_ivars(dev);
3244 rl = &dinfo->resources;
3245 devid = (cfg->device << 16) | cfg->vendor;
3247 /* ATA devices needs special map treatment */
3248 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3249 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3250 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3251 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3252 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3253 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3255 for (i = 0; i < cfg->nummaps;) {
3257 * Skip quirked resources.
3259 for (q = &pci_quirks[0]; q->devid != 0; q++)
3260 if (q->devid == devid &&
3261 q->type == PCI_QUIRK_UNMAP_REG &&
3262 q->arg1 == PCIR_BAR(i))
3264 if (q->devid != 0) {
3268 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3269 prefetchmask & (1 << i));
3273 * Add additional, quirked resources.
3275 for (q = &pci_quirks[0]; q->devid != 0; q++)
3276 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3277 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3279 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3280 #ifdef __PCI_REROUTE_INTERRUPT
3282 * Try to re-route interrupts. Sometimes the BIOS or
3283 * firmware may leave bogus values in these registers.
3284 * If the re-route fails, then just stick with what we
3287 pci_assign_interrupt(bus, dev, 1);
3289 pci_assign_interrupt(bus, dev, 0);
3293 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3294 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3295 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3296 xhci_early_takeover(dev);
3297 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3298 ehci_early_takeover(dev);
3299 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3300 ohci_early_takeover(dev);
3301 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3302 uhci_early_takeover(dev);
3307 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3309 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3310 device_t pcib = device_get_parent(dev);
3311 struct pci_devinfo *dinfo;
3313 int s, f, pcifunchigh;
3316 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3317 ("dinfo_size too small"));
3318 maxslots = PCIB_MAXSLOTS(pcib);
3319 for (s = 0; s <= maxslots; s++) {
3323 hdrtype = REG(PCIR_HDRTYPE, 1);
3324 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3326 if (hdrtype & PCIM_MFDEV)
3327 pcifunchigh = PCI_FUNCMAX;
3328 for (f = 0; f <= pcifunchigh; f++) {
3329 dinfo = pci_read_device(pcib, domain, busno, s, f,
3331 if (dinfo != NULL) {
3332 pci_add_child(dev, dinfo);
3340 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3342 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3343 device_set_ivars(dinfo->cfg.dev, dinfo);
3344 resource_list_init(&dinfo->resources);
3345 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3346 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3347 pci_print_verbose(dinfo);
3348 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3352 pci_probe(device_t dev)
3355 device_set_desc(dev, "PCI bus");
3357 /* Allow other subclasses to override this driver. */
3358 return (BUS_PROBE_GENERIC);
3362 pci_attach_common(device_t dev)
3364 struct pci_softc *sc;
3366 #ifdef PCI_DMA_BOUNDARY
3367 int error, tag_valid;
3370 sc = device_get_softc(dev);
3371 domain = pcib_get_domain(dev);
3372 busno = pcib_get_bus(dev);
3374 device_printf(dev, "domain=%d, physical bus=%d\n",
3376 #ifdef PCI_DMA_BOUNDARY
3378 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3379 devclass_find("pci")) {
3380 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3381 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3382 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3383 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3385 device_printf(dev, "Failed to create DMA tag: %d\n",
3392 sc->sc_dma_tag = bus_get_dma_tag(dev);
3397 pci_attach(device_t dev)
3399 int busno, domain, error;
3401 error = pci_attach_common(dev);
3406 * Since there can be multiple independantly numbered PCI
3407 * busses on systems with multiple PCI domains, we can't use
3408 * the unit number to decide which bus we are probing. We ask
3409 * the parent pcib what our domain and bus numbers are.
3411 domain = pcib_get_domain(dev);
3412 busno = pcib_get_bus(dev);
3413 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3414 return (bus_generic_attach(dev));
3418 pci_set_power_children(device_t dev, device_t *devlist, int numdevs,
3421 device_t child, pcib;
3422 struct pci_devinfo *dinfo;
3426 * Set the device to the given state. If the firmware suggests
3427 * a different power state, use it instead. If power management
3428 * is not present, the firmware is responsible for managing
3429 * device power. Skip children who aren't attached since they
3430 * are handled separately.
3432 pcib = device_get_parent(dev);
3433 for (i = 0; i < numdevs; i++) {
3435 dinfo = device_get_ivars(child);
3437 if (device_is_attached(child) &&
3438 PCIB_POWER_FOR_SLEEP(pcib, dev, &dstate) == 0)
3439 pci_set_powerstate(child, dstate);
3444 pci_suspend(device_t dev)
3446 device_t child, *devlist;
3447 struct pci_devinfo *dinfo;
3448 int error, i, numdevs;
3451 * Save the PCI configuration space for each child and set the
3452 * device in the appropriate power state for this sleep state.
3454 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3456 for (i = 0; i < numdevs; i++) {
3458 dinfo = device_get_ivars(child);
3459 pci_cfg_save(child, dinfo, 0);
3462 /* Suspend devices before potentially powering them down. */
3463 error = bus_generic_suspend(dev);
3465 free(devlist, M_TEMP);
3468 if (pci_do_power_suspend)
3469 pci_set_power_children(dev, devlist, numdevs,
3471 free(devlist, M_TEMP);
3476 pci_resume(device_t dev)
3478 device_t child, *devlist;
3479 struct pci_devinfo *dinfo;
3480 int error, i, numdevs;
3483 * Set each child to D0 and restore its PCI configuration space.
3485 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3487 if (pci_do_power_resume)
3488 pci_set_power_children(dev, devlist, numdevs,
3491 /* Now the device is powered up, restore its config space. */
3492 for (i = 0; i < numdevs; i++) {
3494 dinfo = device_get_ivars(child);
3496 pci_cfg_restore(child, dinfo);
3497 if (!device_is_attached(child))
3498 pci_cfg_save(child, dinfo, 1);
3502 * Resume critical devices first, then everything else later.
3504 for (i = 0; i < numdevs; i++) {
3506 switch (pci_get_class(child)) {
3510 case PCIC_BASEPERIPH:
3511 DEVICE_RESUME(child);
3515 for (i = 0; i < numdevs; i++) {
3517 switch (pci_get_class(child)) {
3521 case PCIC_BASEPERIPH:
3524 DEVICE_RESUME(child);
3527 free(devlist, M_TEMP);
3532 pci_load_vendor_data(void)
3538 data = preload_search_by_type("pci_vendor_data");
3540 ptr = preload_fetch_addr(data);
3541 sz = preload_fetch_size(data);
3542 if (ptr != NULL && sz != 0) {
3543 pci_vendordata = ptr;
3544 pci_vendordata_size = sz;
3545 /* terminate the database */
3546 pci_vendordata[pci_vendordata_size] = '\n';
3552 pci_driver_added(device_t dev, driver_t *driver)
3557 struct pci_devinfo *dinfo;
3561 device_printf(dev, "driver added\n");
3562 DEVICE_IDENTIFY(driver, dev);
3563 if (device_get_children(dev, &devlist, &numdevs) != 0)
3565 for (i = 0; i < numdevs; i++) {
3567 if (device_get_state(child) != DS_NOTPRESENT)
3569 dinfo = device_get_ivars(child);
3570 pci_print_verbose(dinfo);
3572 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3573 pci_cfg_restore(child, dinfo);
3574 if (device_probe_and_attach(child) != 0)
3575 pci_cfg_save(child, dinfo, 1);
3577 free(devlist, M_TEMP);
3581 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3582 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3584 struct pci_devinfo *dinfo;
3585 struct msix_table_entry *mte;
3586 struct msix_vector *mv;
3592 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3597 /* If this is not a direct child, just bail out. */
3598 if (device_get_parent(child) != dev) {
3603 rid = rman_get_rid(irq);
3605 /* Make sure that INTx is enabled */
3606 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3609 * Check to see if the interrupt is MSI or MSI-X.
3610 * Ask our parent to map the MSI and give
3611 * us the address and data register values.
3612 * If we fail for some reason, teardown the
3613 * interrupt handler.
3615 dinfo = device_get_ivars(child);
3616 if (dinfo->cfg.msi.msi_alloc > 0) {
3617 if (dinfo->cfg.msi.msi_addr == 0) {
3618 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3619 ("MSI has handlers, but vectors not mapped"));
3620 error = PCIB_MAP_MSI(device_get_parent(dev),
3621 child, rman_get_start(irq), &addr, &data);
3624 dinfo->cfg.msi.msi_addr = addr;
3625 dinfo->cfg.msi.msi_data = data;
3627 if (dinfo->cfg.msi.msi_handlers == 0)
3628 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3629 dinfo->cfg.msi.msi_data);
3630 dinfo->cfg.msi.msi_handlers++;
3632 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3633 ("No MSI or MSI-X interrupts allocated"));
3634 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3635 ("MSI-X index too high"));
3636 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3637 KASSERT(mte->mte_vector != 0, ("no message vector"));
3638 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3639 KASSERT(mv->mv_irq == rman_get_start(irq),
3641 if (mv->mv_address == 0) {
3642 KASSERT(mte->mte_handlers == 0,
3643 ("MSI-X table entry has handlers, but vector not mapped"));
3644 error = PCIB_MAP_MSI(device_get_parent(dev),
3645 child, rman_get_start(irq), &addr, &data);
3648 mv->mv_address = addr;
3651 if (mte->mte_handlers == 0) {
3652 pci_enable_msix(child, rid - 1, mv->mv_address,
3654 pci_unmask_msix(child, rid - 1);
3656 mte->mte_handlers++;
3660 * Make sure that INTx is disabled if we are using MSI/MSI-X,
3661 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
3662 * in which case we "enable" INTx so MSI/MSI-X actually works.
3664 if (!pci_has_quirk(pci_get_devid(child),
3665 PCI_QUIRK_MSI_INTX_BUG))
3666 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3668 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3671 (void)bus_generic_teardown_intr(dev, child, irq,
3681 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3684 struct msix_table_entry *mte;
3685 struct resource_list_entry *rle;
3686 struct pci_devinfo *dinfo;
3689 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3692 /* If this isn't a direct child, just bail out */
3693 if (device_get_parent(child) != dev)
3694 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3696 rid = rman_get_rid(irq);
3699 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3702 * Check to see if the interrupt is MSI or MSI-X. If so,
3703 * decrement the appropriate handlers count and mask the
3704 * MSI-X message, or disable MSI messages if the count
3707 dinfo = device_get_ivars(child);
3708 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3709 if (rle->res != irq)
3711 if (dinfo->cfg.msi.msi_alloc > 0) {
3712 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3713 ("MSI-X index too high"));
3714 if (dinfo->cfg.msi.msi_handlers == 0)
3716 dinfo->cfg.msi.msi_handlers--;
3717 if (dinfo->cfg.msi.msi_handlers == 0)
3718 pci_disable_msi(child);
3720 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3721 ("No MSI or MSI-X interrupts allocated"));
3722 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3723 ("MSI-X index too high"));
3724 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3725 if (mte->mte_handlers == 0)
3727 mte->mte_handlers--;
3728 if (mte->mte_handlers == 0)
3729 pci_mask_msix(child, rid - 1);
3732 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3735 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3740 pci_print_child(device_t dev, device_t child)
3742 struct pci_devinfo *dinfo;
3743 struct resource_list *rl;
3746 dinfo = device_get_ivars(child);
3747 rl = &dinfo->resources;
3749 retval += bus_print_child_header(dev, child);
3751 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3752 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3753 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3754 if (device_get_flags(dev))
3755 retval += printf(" flags %#x", device_get_flags(dev));
3757 retval += printf(" at device %d.%d", pci_get_slot(child),
3758 pci_get_function(child));
3760 retval += bus_print_child_footer(dev, child);
3769 int report; /* 0 = bootverbose, 1 = always */
3771 } pci_nomatch_tab[] = {
3772 {PCIC_OLD, -1, 1, "old"},
3773 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
3774 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
3775 {PCIC_STORAGE, -1, 1, "mass storage"},
3776 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
3777 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
3778 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
3779 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
3780 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
3781 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
3782 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
3783 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
3784 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
3785 {PCIC_NETWORK, -1, 1, "network"},
3786 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
3787 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
3788 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
3789 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
3790 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
3791 {PCIC_DISPLAY, -1, 1, "display"},
3792 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
3793 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
3794 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
3795 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
3796 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
3797 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
3798 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
3799 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
3800 {PCIC_MEMORY, -1, 1, "memory"},
3801 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
3802 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
3803 {PCIC_BRIDGE, -1, 1, "bridge"},
3804 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
3805 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
3806 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
3807 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
3808 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
3809 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
3810 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
3811 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
3812 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
3813 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
3814 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
3815 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
3816 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
3817 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
3818 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
3819 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
3820 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
3821 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
3822 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
3823 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
3824 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
3825 {PCIC_INPUTDEV, -1, 1, "input device"},
3826 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
3827 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
3828 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
3829 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
3830 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
3831 {PCIC_DOCKING, -1, 1, "docking station"},
3832 {PCIC_PROCESSOR, -1, 1, "processor"},
3833 {PCIC_SERIALBUS, -1, 1, "serial bus"},
3834 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
3835 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
3836 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
3837 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
3838 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
3839 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
3840 {PCIC_WIRELESS, -1, 1, "wireless controller"},
3841 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
3842 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
3843 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
3844 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
3845 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
3846 {PCIC_SATCOM, -1, 1, "satellite communication"},
3847 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
3848 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
3849 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
3850 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
3851 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
3852 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
3853 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
3854 {PCIC_DASP, -1, 0, "dasp"},
3855 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
3860 pci_probe_nomatch(device_t dev, device_t child)
3863 const char *cp, *scp;
3867 * Look for a listing for this device in a loaded device database.
3870 if ((device = pci_describe_device(child)) != NULL) {
3871 device_printf(dev, "<%s>", device);
3872 free(device, M_DEVBUF);
3875 * Scan the class/subclass descriptions for a general
3880 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3881 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3882 if (pci_nomatch_tab[i].subclass == -1) {
3883 cp = pci_nomatch_tab[i].desc;
3884 report = pci_nomatch_tab[i].report;
3885 } else if (pci_nomatch_tab[i].subclass ==
3886 pci_get_subclass(child)) {
3887 scp = pci_nomatch_tab[i].desc;
3888 report = pci_nomatch_tab[i].report;
3892 if (report || bootverbose) {
3893 device_printf(dev, "<%s%s%s>",
3895 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3899 if (report || bootverbose) {
3900 printf(" at device %d.%d (no driver attached)\n",
3901 pci_get_slot(child), pci_get_function(child));
3903 pci_cfg_save(child, device_get_ivars(child), 1);
3907 * Parse the PCI device database, if loaded, and return a pointer to a
3908 * description of the device.
3910 * The database is flat text formatted as follows:
3912 * Any line not in a valid format is ignored.
3913 * Lines are terminated with newline '\n' characters.
3915 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3918 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3919 * - devices cannot be listed without a corresponding VENDOR line.
3920 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3921 * another TAB, then the device name.
3925 * Assuming (ptr) points to the beginning of a line in the database,
3926 * return the vendor or device and description of the next entry.
3927 * The value of (vendor) or (device) inappropriate for the entry type
3928 * is set to -1. Returns nonzero at the end of the database.
3930 * Note that this is slightly unrobust in the face of corrupt data;
3931 * we attempt to safeguard against this by spamming the end of the
3932 * database with a newline when we initialise.
3935 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3944 left = pci_vendordata_size - (cp - pci_vendordata);
3952 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3956 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3959 /* skip to next line */
3960 while (*cp != '\n' && left > 0) {
3969 /* skip to next line */
3970 while (*cp != '\n' && left > 0) {
3974 if (*cp == '\n' && left > 0)
3981 pci_describe_device(device_t dev)
3984 char *desc, *vp, *dp, *line;
3986 desc = vp = dp = NULL;
3989 * If we have no vendor data, we can't do anything.
3991 if (pci_vendordata == NULL)
3995 * Scan the vendor data looking for this device
3997 line = pci_vendordata;
3998 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4001 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4003 if (vendor == pci_get_vendor(dev))
4006 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4009 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4017 if (device == pci_get_device(dev))
4021 snprintf(dp, 80, "0x%x", pci_get_device(dev));
4022 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4024 sprintf(desc, "%s, %s", vp, dp);
4034 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4036 struct pci_devinfo *dinfo;
4039 dinfo = device_get_ivars(child);
4043 case PCI_IVAR_ETHADDR:
4045 * The generic accessor doesn't deal with failure, so
4046 * we set the return value, then return an error.
4048 *((uint8_t **) result) = NULL;
4050 case PCI_IVAR_SUBVENDOR:
4051 *result = cfg->subvendor;
4053 case PCI_IVAR_SUBDEVICE:
4054 *result = cfg->subdevice;
4056 case PCI_IVAR_VENDOR:
4057 *result = cfg->vendor;
4059 case PCI_IVAR_DEVICE:
4060 *result = cfg->device;
4062 case PCI_IVAR_DEVID:
4063 *result = (cfg->device << 16) | cfg->vendor;
4065 case PCI_IVAR_CLASS:
4066 *result = cfg->baseclass;
4068 case PCI_IVAR_SUBCLASS:
4069 *result = cfg->subclass;
4071 case PCI_IVAR_PROGIF:
4072 *result = cfg->progif;
4074 case PCI_IVAR_REVID:
4075 *result = cfg->revid;
4077 case PCI_IVAR_INTPIN:
4078 *result = cfg->intpin;
4081 *result = cfg->intline;
4083 case PCI_IVAR_DOMAIN:
4084 *result = cfg->domain;
4090 *result = cfg->slot;
4092 case PCI_IVAR_FUNCTION:
4093 *result = cfg->func;
4095 case PCI_IVAR_CMDREG:
4096 *result = cfg->cmdreg;
4098 case PCI_IVAR_CACHELNSZ:
4099 *result = cfg->cachelnsz;
4101 case PCI_IVAR_MINGNT:
4102 *result = cfg->mingnt;
4104 case PCI_IVAR_MAXLAT:
4105 *result = cfg->maxlat;
4107 case PCI_IVAR_LATTIMER:
4108 *result = cfg->lattimer;
4117 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4119 struct pci_devinfo *dinfo;
4121 dinfo = device_get_ivars(child);
4124 case PCI_IVAR_INTPIN:
4125 dinfo->cfg.intpin = value;
4127 case PCI_IVAR_ETHADDR:
4128 case PCI_IVAR_SUBVENDOR:
4129 case PCI_IVAR_SUBDEVICE:
4130 case PCI_IVAR_VENDOR:
4131 case PCI_IVAR_DEVICE:
4132 case PCI_IVAR_DEVID:
4133 case PCI_IVAR_CLASS:
4134 case PCI_IVAR_SUBCLASS:
4135 case PCI_IVAR_PROGIF:
4136 case PCI_IVAR_REVID:
4138 case PCI_IVAR_DOMAIN:
4141 case PCI_IVAR_FUNCTION:
4142 return (EINVAL); /* disallow for now */
4149 #include "opt_ddb.h"
4151 #include <ddb/ddb.h>
4152 #include <sys/cons.h>
4155 * List resources based on pci map registers, used for within ddb
4158 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4160 struct pci_devinfo *dinfo;
4161 struct devlist *devlist_head;
4164 int i, error, none_count;
4167 /* get the head of the device queue */
4168 devlist_head = &pci_devq;
4171 * Go through the list of devices and print out devices
4173 for (error = 0, i = 0,
4174 dinfo = STAILQ_FIRST(devlist_head);
4175 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4176 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4178 /* Populate pd_name and pd_unit */
4181 name = device_get_name(dinfo->cfg.dev);
4184 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4185 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4186 (name && *name) ? name : "none",
4187 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4189 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4190 p->pc_sel.pc_func, (p->pc_class << 16) |
4191 (p->pc_subclass << 8) | p->pc_progif,
4192 (p->pc_subdevice << 16) | p->pc_subvendor,
4193 (p->pc_device << 16) | p->pc_vendor,
4194 p->pc_revid, p->pc_hdr);
4199 static struct resource *
4200 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4201 u_long start, u_long end, u_long count, u_int flags)
4203 struct pci_devinfo *dinfo = device_get_ivars(child);
4204 struct resource_list *rl = &dinfo->resources;
4205 struct resource *res;
4207 pci_addr_t map, testval;
4211 pm = pci_find_bar(child, *rid);
4213 /* This is a BAR that we failed to allocate earlier. */
4214 mapsize = pm->pm_size;
4218 * Weed out the bogons, and figure out how large the
4219 * BAR/map is. BARs that read back 0 here are bogus
4220 * and unimplemented. Note: atapci in legacy mode are
4221 * special and handled elsewhere in the code. If you
4222 * have a atapci device in legacy mode and it fails
4223 * here, that other code is broken.
4225 pci_read_bar(child, *rid, &map, &testval);
4228 * Determine the size of the BAR and ignore BARs with a size
4229 * of 0. Device ROM BARs use a different mask value.
4231 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4232 mapsize = pci_romsize(testval);
4234 mapsize = pci_mapsize(testval);
4237 pm = pci_add_bar(child, *rid, map, mapsize);
4240 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4241 if (type != SYS_RES_MEMORY) {
4244 "child %s requested type %d for rid %#x,"
4245 " but the BAR says it is an memio\n",
4246 device_get_nameunit(child), type, *rid);
4250 if (type != SYS_RES_IOPORT) {
4253 "child %s requested type %d for rid %#x,"
4254 " but the BAR says it is an ioport\n",
4255 device_get_nameunit(child), type, *rid);
4261 * For real BARs, we need to override the size that
4262 * the driver requests, because that's what the BAR
4263 * actually uses and we would otherwise have a
4264 * situation where we might allocate the excess to
4265 * another driver, which won't work.
4267 count = (pci_addr_t)1 << mapsize;
4268 if (RF_ALIGNMENT(flags) < mapsize)
4269 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4270 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4271 flags |= RF_PREFETCHABLE;
4274 * Allocate enough resource, and then write back the
4275 * appropriate BAR for that resource.
4277 resource_list_add(rl, type, *rid, start, end, count);
4278 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
4279 count, flags & ~RF_ACTIVE);
4281 resource_list_delete(rl, type, *rid);
4282 device_printf(child,
4283 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4284 count, *rid, type, start, end);
4288 device_printf(child,
4289 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4290 count, *rid, type, rman_get_start(res));
4291 map = rman_get_start(res);
4292 pci_write_bar(child, pm, map);
4298 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4299 u_long start, u_long end, u_long count, u_int flags)
4301 struct pci_devinfo *dinfo;
4302 struct resource_list *rl;
4303 struct resource_list_entry *rle;
4304 struct resource *res;
4307 if (device_get_parent(child) != dev)
4308 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4309 type, rid, start, end, count, flags));
4312 * Perform lazy resource allocation
4314 dinfo = device_get_ivars(child);
4315 rl = &dinfo->resources;
4320 * Can't alloc legacy interrupt once MSI messages have
4323 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4324 cfg->msix.msix_alloc > 0))
4328 * If the child device doesn't have an interrupt
4329 * routed and is deserving of an interrupt, try to
4332 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4334 pci_assign_interrupt(dev, child, 0);
4336 case SYS_RES_IOPORT:
4337 case SYS_RES_MEMORY:
4340 * PCI-PCI bridge I/O window resources are not BARs.
4341 * For those allocations just pass the request up the
4344 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4346 case PCIR_IOBASEL_1:
4347 case PCIR_MEMBASE_1:
4348 case PCIR_PMBASEL_1:
4350 * XXX: Should we bother creating a resource
4353 return (bus_generic_alloc_resource(dev, child,
4354 type, rid, start, end, count, flags));
4358 /* Reserve resources for this BAR if needed. */
4359 rle = resource_list_find(rl, type, *rid);
4361 res = pci_reserve_map(dev, child, type, rid, start, end,
4367 return (resource_list_alloc(rl, dev, child, type, rid,
4368 start, end, count, flags));
4372 pci_release_resource(device_t dev, device_t child, int type, int rid,
4375 struct pci_devinfo *dinfo;
4376 struct resource_list *rl;
4379 if (device_get_parent(child) != dev)
4380 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4383 dinfo = device_get_ivars(child);
4387 * PCI-PCI bridge I/O window resources are not BARs. For
4388 * those allocations just pass the request up the tree.
4390 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4391 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4393 case PCIR_IOBASEL_1:
4394 case PCIR_MEMBASE_1:
4395 case PCIR_PMBASEL_1:
4396 return (bus_generic_release_resource(dev, child, type,
4402 rl = &dinfo->resources;
4403 return (resource_list_release(rl, dev, child, type, rid, r));
4407 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4410 struct pci_devinfo *dinfo;
4413 error = bus_generic_activate_resource(dev, child, type, rid, r);
4417 /* Enable decoding in the command register when activating BARs. */
4418 if (device_get_parent(child) == dev) {
4419 /* Device ROMs need their decoding explicitly enabled. */
4420 dinfo = device_get_ivars(child);
4421 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4422 pci_write_bar(child, pci_find_bar(child, rid),
4423 rman_get_start(r) | PCIM_BIOS_ENABLE);
4425 case SYS_RES_IOPORT:
4426 case SYS_RES_MEMORY:
4427 error = PCI_ENABLE_IO(dev, child, type);
4435 pci_deactivate_resource(device_t dev, device_t child, int type,
4436 int rid, struct resource *r)
4438 struct pci_devinfo *dinfo;
4441 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4445 /* Disable decoding for device ROMs. */
4446 if (device_get_parent(child) == dev) {
4447 dinfo = device_get_ivars(child);
4448 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4449 pci_write_bar(child, pci_find_bar(child, rid),
4456 pci_delete_child(device_t dev, device_t child)
4458 struct resource_list_entry *rle;
4459 struct resource_list *rl;
4460 struct pci_devinfo *dinfo;
4462 dinfo = device_get_ivars(child);
4463 rl = &dinfo->resources;
4465 if (device_is_attached(child))
4466 device_detach(child);
4468 /* Turn off access to resources we're about to free */
4469 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4470 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4472 /* Free all allocated resources */
4473 STAILQ_FOREACH(rle, rl, link) {
4475 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4476 resource_list_busy(rl, rle->type, rle->rid)) {
4477 pci_printf(&dinfo->cfg,
4478 "Resource still owned, oops. "
4479 "(type=%d, rid=%d, addr=%lx)\n",
4480 rle->type, rle->rid,
4481 rman_get_start(rle->res));
4482 bus_release_resource(child, rle->type, rle->rid,
4485 resource_list_unreserve(rl, dev, child, rle->type,
4489 resource_list_free(rl);
4491 device_delete_child(dev, child);
4496 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4498 struct pci_devinfo *dinfo;
4499 struct resource_list *rl;
4500 struct resource_list_entry *rle;
4502 if (device_get_parent(child) != dev)
4505 dinfo = device_get_ivars(child);
4506 rl = &dinfo->resources;
4507 rle = resource_list_find(rl, type, rid);
4512 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4513 resource_list_busy(rl, type, rid)) {
4514 device_printf(dev, "delete_resource: "
4515 "Resource still owned by child, oops. "
4516 "(type=%d, rid=%d, addr=%lx)\n",
4517 type, rid, rman_get_start(rle->res));
4520 resource_list_unreserve(rl, dev, child, type, rid);
4522 resource_list_delete(rl, type, rid);
4525 struct resource_list *
4526 pci_get_resource_list (device_t dev, device_t child)
4528 struct pci_devinfo *dinfo = device_get_ivars(child);
4530 return (&dinfo->resources);
4534 pci_get_dma_tag(device_t bus, device_t dev)
4536 struct pci_softc *sc = device_get_softc(bus);
4538 return (sc->sc_dma_tag);
4542 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4544 struct pci_devinfo *dinfo = device_get_ivars(child);
4545 pcicfgregs *cfg = &dinfo->cfg;
4547 return (PCIB_READ_CONFIG(device_get_parent(dev),
4548 cfg->bus, cfg->slot, cfg->func, reg, width));
4552 pci_write_config_method(device_t dev, device_t child, int reg,
4553 uint32_t val, int width)
4555 struct pci_devinfo *dinfo = device_get_ivars(child);
4556 pcicfgregs *cfg = &dinfo->cfg;
4558 PCIB_WRITE_CONFIG(device_get_parent(dev),
4559 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4563 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4567 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4568 pci_get_function(child));
4573 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4576 struct pci_devinfo *dinfo;
4579 dinfo = device_get_ivars(child);
4581 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4582 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4583 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4589 pci_assign_interrupt_method(device_t dev, device_t child)
4591 struct pci_devinfo *dinfo = device_get_ivars(child);
4592 pcicfgregs *cfg = &dinfo->cfg;
4594 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4599 pci_modevent(module_t mod, int what, void *arg)
4601 static struct cdev *pci_cdev;
4605 STAILQ_INIT(&pci_devq);
4607 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4609 pci_load_vendor_data();
4613 destroy_dev(pci_cdev);
4621 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
4625 * Only do header type 0 devices. Type 1 devices are bridges,
4626 * which we know need special treatment. Type 2 devices are
4627 * cardbus bridges which also require special treatment.
4628 * Other types are unknown, and we err on the side of safety
4631 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
4635 * Restore the device to full power mode. We must do this
4636 * before we restore the registers because moving from D3 to
4637 * D0 will cause the chip's BARs and some other registers to
4638 * be reset to some unknown power on reset values. Cut down
4639 * the noise on boot by doing nothing if we are already in
4642 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
4643 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4644 pci_restore_bars(dev);
4645 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
4646 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
4647 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
4648 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
4649 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
4650 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
4651 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
4652 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
4653 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
4655 /* Restore MSI and MSI-X configurations if they are present. */
4656 if (dinfo->cfg.msi.msi_location != 0)
4657 pci_resume_msi(dev);
4658 if (dinfo->cfg.msix.msix_location != 0)
4659 pci_resume_msix(dev);
4663 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
4669 * Only do header type 0 devices. Type 1 devices are bridges, which
4670 * we know need special treatment. Type 2 devices are cardbus bridges
4671 * which also require special treatment. Other types are unknown, and
4672 * we err on the side of safety by ignoring them. Powering down
4673 * bridges should not be undertaken lightly.
4675 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
4679 * Some drivers apparently write to these registers w/o updating our
4680 * cached copy. No harm happens if we update the copy, so do so here
4681 * so we can restore them. The COMMAND register is modified by the
4682 * bus w/o updating the cache. This should represent the normally
4683 * writable portion of the 'defined' part of type 0 headers. In
4684 * theory we also need to save/restore the PCI capability structures
4685 * we know about, but apart from power we don't know any that are
4688 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
4689 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4690 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
4691 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
4692 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
4693 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
4694 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
4695 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
4696 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
4697 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
4698 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
4699 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
4700 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
4701 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
4702 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4705 * don't set the state for display devices, base peripherals and
4706 * memory devices since bad things happen when they are powered down.
4707 * We should (a) have drivers that can easily detach and (b) use
4708 * generic drivers for these devices so that some device actually
4709 * attaches. We need to make sure that when we implement (a) we don't
4710 * power the device down on a reattach.
4712 cls = pci_get_class(dev);
4715 switch (pci_do_power_nodriver)
4717 case 0: /* NO powerdown at all */
4719 case 1: /* Conservative about what to power down */
4720 if (cls == PCIC_STORAGE)
4723 case 2: /* Agressive about what to power down */
4724 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4725 cls == PCIC_BASEPERIPH)
4728 case 3: /* Power down everything */
4732 * PCI spec says we can only go into D3 state from D0 state.
4733 * Transition from D[12] into D0 before going to D3 state.
4735 ps = pci_get_powerstate(dev);
4736 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4737 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4738 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4739 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
4742 /* Wrapper APIs suitable for device driver use. */
4744 pci_save_state(device_t dev)
4746 struct pci_devinfo *dinfo;
4748 dinfo = device_get_ivars(dev);
4749 pci_cfg_save(dev, dinfo, 0);
4753 pci_restore_state(device_t dev)
4755 struct pci_devinfo *dinfo;
4757 dinfo = device_get_ivars(dev);
4758 pci_cfg_restore(dev, dinfo);
4761 /* Find the upstream port of a given PCI device in a root complex. */
4763 pci_find_pcie_root_port(device_t dev)
4765 struct pci_devinfo *dinfo;
4766 devclass_t pci_class;
4769 pci_class = devclass_find("pci");
4770 KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class,
4771 ("%s: non-pci device %s", __func__, device_get_nameunit(dev)));
4774 * Walk the bridge hierarchy until we find a PCI-e root
4775 * port or a non-PCI device.
4778 bus = device_get_parent(dev);
4779 KASSERT(bus != NULL, ("%s: null parent of %s", __func__,
4780 device_get_nameunit(dev)));
4782 pcib = device_get_parent(bus);
4783 KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__,
4784 device_get_nameunit(bus)));
4787 * pcib's parent must be a PCI bus for this to be a
4790 if (device_get_devclass(device_get_parent(pcib)) != pci_class)
4793 dinfo = device_get_ivars(pcib);
4794 if (dinfo->cfg.pcie.pcie_location != 0 &&
4795 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)