2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/xhcireg.h>
66 #include <dev/usb/controller/ehcireg.h>
67 #include <dev/usb/controller/ohcireg.h>
68 #include <dev/usb/controller/uhcireg.h>
74 * XXX: Due to a limitation of the bus_dma_tag_create() API, we cannot
75 * specify a 4GB boundary on 32-bit targets. Usually this does not
76 * matter as it is ok to use a boundary of 0 on these systems.
77 * However, in the case of PAE, DMA addresses can cross a 4GB
78 * boundary, so as a workaround use a 2GB boundary.
80 #if (BUS_SPACE_MAXADDR > 0xFFFFFFFF)
82 #define PCI_DMA_BOUNDARY 0x80000000
84 #define PCI_DMA_BOUNDARY 0x100000000
88 #define PCIR_IS_BIOS(cfg, reg) \
89 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
90 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
92 static int pci_has_quirk(uint32_t devid, int quirk);
93 static pci_addr_t pci_mapbase(uint64_t mapreg);
94 static const char *pci_maptype(uint64_t mapreg);
95 static int pci_mapsize(uint64_t testval);
96 static int pci_maprange(uint64_t mapreg);
97 static pci_addr_t pci_rombase(uint64_t mapreg);
98 static int pci_romsize(uint64_t testval);
99 static void pci_fixancient(pcicfgregs *cfg);
100 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
102 static int pci_porten(device_t dev);
103 static int pci_memen(device_t dev);
104 static void pci_assign_interrupt(device_t bus, device_t dev,
106 static int pci_add_map(device_t bus, device_t dev, int reg,
107 struct resource_list *rl, int force, int prefetch);
108 static int pci_probe(device_t dev);
109 static int pci_attach(device_t dev);
110 static void pci_load_vendor_data(void);
111 static int pci_describe_parse_line(char **ptr, int *vendor,
112 int *device, char **desc);
113 static char *pci_describe_device(device_t dev);
114 static bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev);
115 static int pci_modevent(module_t mod, int what, void *arg);
116 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
118 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
119 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
120 int reg, uint32_t *data);
122 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
123 int reg, uint32_t data);
125 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
126 static void pci_disable_msi(device_t dev);
127 static void pci_enable_msi(device_t dev, uint64_t address,
129 static void pci_enable_msix(device_t dev, u_int index,
130 uint64_t address, uint32_t data);
131 static void pci_mask_msix(device_t dev, u_int index);
132 static void pci_unmask_msix(device_t dev, u_int index);
133 static int pci_msi_blacklisted(void);
134 static int pci_msix_blacklisted(void);
135 static void pci_resume_msi(device_t dev);
136 static void pci_resume_msix(device_t dev);
137 static int pci_remap_intr_method(device_t bus, device_t dev,
140 static device_method_t pci_methods[] = {
141 /* Device interface */
142 DEVMETHOD(device_probe, pci_probe),
143 DEVMETHOD(device_attach, pci_attach),
144 DEVMETHOD(device_detach, bus_generic_detach),
145 DEVMETHOD(device_shutdown, bus_generic_shutdown),
146 DEVMETHOD(device_suspend, pci_suspend),
147 DEVMETHOD(device_resume, pci_resume),
150 DEVMETHOD(bus_print_child, pci_print_child),
151 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
152 DEVMETHOD(bus_read_ivar, pci_read_ivar),
153 DEVMETHOD(bus_write_ivar, pci_write_ivar),
154 DEVMETHOD(bus_driver_added, pci_driver_added),
155 DEVMETHOD(bus_setup_intr, pci_setup_intr),
156 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
158 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
159 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
160 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
161 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
162 DEVMETHOD(bus_delete_resource, pci_delete_resource),
163 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
164 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
165 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
166 DEVMETHOD(bus_activate_resource, pci_activate_resource),
167 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
168 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
169 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
170 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
173 DEVMETHOD(pci_read_config, pci_read_config_method),
174 DEVMETHOD(pci_write_config, pci_write_config_method),
175 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
176 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
177 DEVMETHOD(pci_enable_io, pci_enable_io_method),
178 DEVMETHOD(pci_disable_io, pci_disable_io_method),
179 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
180 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
181 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
182 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
183 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
184 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
185 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
186 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
187 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
188 DEVMETHOD(pci_release_msi, pci_release_msi_method),
189 DEVMETHOD(pci_msi_count, pci_msi_count_method),
190 DEVMETHOD(pci_msix_count, pci_msix_count_method),
195 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
197 static devclass_t pci_devclass;
198 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
199 MODULE_VERSION(pci, 1);
201 static char *pci_vendordata;
202 static size_t pci_vendordata_size;
205 uint32_t devid; /* Vendor/device of the card */
207 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
208 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
209 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
210 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
211 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
216 static const struct pci_quirk pci_quirks[] = {
217 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
218 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
219 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
220 /* As does the Serverworks OSB4 (the SMBus mapping register) */
221 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
224 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
225 * or the CMIC-SL (AKA ServerWorks GC_LE).
227 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
228 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
231 * MSI doesn't work on earlier Intel chipsets including
232 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
234 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
246 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
249 * MSI-X allocation doesn't work properly for devices passed through
250 * by VMware up to at least ESXi 5.1.
252 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
253 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
256 * Some virtualization environments emulate an older chipset
257 * but support MSI just fine. QEMU uses the Intel 82440.
259 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
262 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
263 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
264 * It prevents us from attaching hpet(4) when the bit is unset.
265 * Note this quirk only affects SB600 revision A13 and earlier.
266 * For SB600 A21 and later, firmware must set the bit to hide it.
267 * For SB700 and later, it is unused and hardcoded to zero.
269 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
274 /* map register information */
275 #define PCI_MAPMEM 0x01 /* memory map */
276 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
277 #define PCI_MAPPORT 0x04 /* port map */
279 struct devlist pci_devq;
280 uint32_t pci_generation;
281 uint32_t pci_numdevs = 0;
282 static int pcie_chipset, pcix_chipset;
285 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
287 static int pci_enable_io_modes = 1;
288 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
289 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
290 &pci_enable_io_modes, 1,
291 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
292 enable these bits correctly. We'd like to do this all the time, but there\n\
293 are some peripherals that this causes problems with.");
295 static int pci_do_realloc_bars = 0;
296 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
297 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
298 &pci_do_realloc_bars, 0,
299 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
301 static int pci_do_power_nodriver = 0;
302 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
303 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
304 &pci_do_power_nodriver, 0,
305 "Place a function into D3 state when no driver attaches to it. 0 means\n\
306 disable. 1 means conservatively place devices into D3 state. 2 means\n\
307 agressively place devices into D3 state. 3 means put absolutely everything\n\
310 int pci_do_power_resume = 1;
311 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
312 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
313 &pci_do_power_resume, 1,
314 "Transition from D3 -> D0 on resume.");
316 int pci_do_power_suspend = 1;
317 TUNABLE_INT("hw.pci.do_power_suspend", &pci_do_power_suspend);
318 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RW,
319 &pci_do_power_suspend, 1,
320 "Transition from D0 -> D3 on suspend.");
322 static int pci_do_msi = 1;
323 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
324 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
325 "Enable support for MSI interrupts");
327 static int pci_do_msix = 1;
328 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
329 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
330 "Enable support for MSI-X interrupts");
332 static int pci_honor_msi_blacklist = 1;
333 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
334 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
335 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
337 #if defined(__i386__) || defined(__amd64__)
338 static int pci_usb_takeover = 1;
340 static int pci_usb_takeover = 0;
342 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
343 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
344 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
345 Disable this if you depend on BIOS emulation of USB devices, that is\n\
346 you use USB devices (like keyboard or mouse) but do not load USB drivers");
349 pci_has_quirk(uint32_t devid, int quirk)
351 const struct pci_quirk *q;
353 for (q = &pci_quirks[0]; q->devid; q++) {
354 if (q->devid == devid && q->type == quirk)
360 /* Find a device_t by bus/slot/function in domain 0 */
363 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
366 return (pci_find_dbsf(0, bus, slot, func));
369 /* Find a device_t by domain/bus/slot/function */
372 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
374 struct pci_devinfo *dinfo;
376 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
377 if ((dinfo->cfg.domain == domain) &&
378 (dinfo->cfg.bus == bus) &&
379 (dinfo->cfg.slot == slot) &&
380 (dinfo->cfg.func == func)) {
381 return (dinfo->cfg.dev);
388 /* Find a device_t by vendor/device ID */
391 pci_find_device(uint16_t vendor, uint16_t device)
393 struct pci_devinfo *dinfo;
395 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
396 if ((dinfo->cfg.vendor == vendor) &&
397 (dinfo->cfg.device == device)) {
398 return (dinfo->cfg.dev);
406 pci_find_class(uint8_t class, uint8_t subclass)
408 struct pci_devinfo *dinfo;
410 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
411 if (dinfo->cfg.baseclass == class &&
412 dinfo->cfg.subclass == subclass) {
413 return (dinfo->cfg.dev);
421 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
426 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
429 retval += vprintf(fmt, ap);
434 /* return base address of memory or port map */
437 pci_mapbase(uint64_t mapreg)
440 if (PCI_BAR_MEM(mapreg))
441 return (mapreg & PCIM_BAR_MEM_BASE);
443 return (mapreg & PCIM_BAR_IO_BASE);
446 /* return map type of memory or port map */
449 pci_maptype(uint64_t mapreg)
452 if (PCI_BAR_IO(mapreg))
454 if (mapreg & PCIM_BAR_MEM_PREFETCH)
455 return ("Prefetchable Memory");
459 /* return log2 of map size decoded for memory or port map */
462 pci_mapsize(uint64_t testval)
466 testval = pci_mapbase(testval);
469 while ((testval & 1) == 0)
478 /* return base address of device ROM */
481 pci_rombase(uint64_t mapreg)
484 return (mapreg & PCIM_BIOS_ADDR_MASK);
487 /* return log2 of map size decided for device ROM */
490 pci_romsize(uint64_t testval)
494 testval = pci_rombase(testval);
497 while ((testval & 1) == 0)
506 /* return log2 of address range supported by map register */
509 pci_maprange(uint64_t mapreg)
513 if (PCI_BAR_IO(mapreg))
516 switch (mapreg & PCIM_BAR_MEM_TYPE) {
517 case PCIM_BAR_MEM_32:
520 case PCIM_BAR_MEM_1MB:
523 case PCIM_BAR_MEM_64:
530 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
533 pci_fixancient(pcicfgregs *cfg)
535 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
538 /* PCI to PCI bridges use header type 1 */
539 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
540 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
543 /* extract header type specific config data */
546 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
548 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
549 switch (cfg->hdrtype & PCIM_HDRTYPE) {
550 case PCIM_HDRTYPE_NORMAL:
551 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
552 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
553 cfg->nummaps = PCI_MAXMAPS_0;
555 case PCIM_HDRTYPE_BRIDGE:
556 cfg->nummaps = PCI_MAXMAPS_1;
558 case PCIM_HDRTYPE_CARDBUS:
559 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
560 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
561 cfg->nummaps = PCI_MAXMAPS_2;
567 /* read configuration header into pcicfgregs structure */
569 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
571 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
572 pcicfgregs *cfg = NULL;
573 struct pci_devinfo *devlist_entry;
574 struct devlist *devlist_head;
576 devlist_head = &pci_devq;
578 devlist_entry = NULL;
580 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
581 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
582 if (devlist_entry == NULL)
585 cfg = &devlist_entry->cfg;
591 cfg->vendor = REG(PCIR_VENDOR, 2);
592 cfg->device = REG(PCIR_DEVICE, 2);
593 cfg->cmdreg = REG(PCIR_COMMAND, 2);
594 cfg->statreg = REG(PCIR_STATUS, 2);
595 cfg->baseclass = REG(PCIR_CLASS, 1);
596 cfg->subclass = REG(PCIR_SUBCLASS, 1);
597 cfg->progif = REG(PCIR_PROGIF, 1);
598 cfg->revid = REG(PCIR_REVID, 1);
599 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
600 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
601 cfg->lattimer = REG(PCIR_LATTIMER, 1);
602 cfg->intpin = REG(PCIR_INTPIN, 1);
603 cfg->intline = REG(PCIR_INTLINE, 1);
605 cfg->mingnt = REG(PCIR_MINGNT, 1);
606 cfg->maxlat = REG(PCIR_MAXLAT, 1);
608 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
609 cfg->hdrtype &= ~PCIM_MFDEV;
610 STAILQ_INIT(&cfg->maps);
613 pci_hdrtypedata(pcib, b, s, f, cfg);
615 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
616 pci_read_cap(pcib, cfg);
618 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
620 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
621 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
622 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
623 devlist_entry->conf.pc_sel.pc_func = cfg->func;
624 devlist_entry->conf.pc_hdr = cfg->hdrtype;
626 devlist_entry->conf.pc_subvendor = cfg->subvendor;
627 devlist_entry->conf.pc_subdevice = cfg->subdevice;
628 devlist_entry->conf.pc_vendor = cfg->vendor;
629 devlist_entry->conf.pc_device = cfg->device;
631 devlist_entry->conf.pc_class = cfg->baseclass;
632 devlist_entry->conf.pc_subclass = cfg->subclass;
633 devlist_entry->conf.pc_progif = cfg->progif;
634 devlist_entry->conf.pc_revid = cfg->revid;
639 return (devlist_entry);
644 pci_read_cap(device_t pcib, pcicfgregs *cfg)
646 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
647 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
648 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
652 int ptr, nextptr, ptrptr;
654 switch (cfg->hdrtype & PCIM_HDRTYPE) {
655 case PCIM_HDRTYPE_NORMAL:
656 case PCIM_HDRTYPE_BRIDGE:
657 ptrptr = PCIR_CAP_PTR;
659 case PCIM_HDRTYPE_CARDBUS:
660 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
663 return; /* no extended capabilities support */
665 nextptr = REG(ptrptr, 1); /* sanity check? */
668 * Read capability entries.
670 while (nextptr != 0) {
673 printf("illegal PCI extended capability offset %d\n",
677 /* Find the next entry */
679 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
681 /* Process this entry */
682 switch (REG(ptr + PCICAP_ID, 1)) {
683 case PCIY_PMG: /* PCI power management */
684 if (cfg->pp.pp_cap == 0) {
685 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
686 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
687 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
688 if ((nextptr - ptr) > PCIR_POWER_DATA)
689 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
692 case PCIY_HT: /* HyperTransport */
693 /* Determine HT-specific capability type. */
694 val = REG(ptr + PCIR_HT_COMMAND, 2);
696 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
697 cfg->ht.ht_slave = ptr;
699 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
700 switch (val & PCIM_HTCMD_CAP_MASK) {
701 case PCIM_HTCAP_MSI_MAPPING:
702 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
703 /* Sanity check the mapping window. */
704 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
707 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
709 if (addr != MSI_INTEL_ADDR_BASE)
711 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
712 cfg->domain, cfg->bus,
713 cfg->slot, cfg->func,
716 addr = MSI_INTEL_ADDR_BASE;
718 cfg->ht.ht_msimap = ptr;
719 cfg->ht.ht_msictrl = val;
720 cfg->ht.ht_msiaddr = addr;
725 case PCIY_MSI: /* PCI MSI */
726 cfg->msi.msi_location = ptr;
727 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
728 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
729 PCIM_MSICTRL_MMC_MASK)>>1);
731 case PCIY_MSIX: /* PCI MSI-X */
732 cfg->msix.msix_location = ptr;
733 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
734 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
735 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
736 val = REG(ptr + PCIR_MSIX_TABLE, 4);
737 cfg->msix.msix_table_bar = PCIR_BAR(val &
739 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
740 val = REG(ptr + PCIR_MSIX_PBA, 4);
741 cfg->msix.msix_pba_bar = PCIR_BAR(val &
743 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
745 case PCIY_VPD: /* PCI Vital Product Data */
746 cfg->vpd.vpd_reg = ptr;
749 /* Should always be true. */
750 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
751 PCIM_HDRTYPE_BRIDGE) {
752 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
753 cfg->subvendor = val & 0xffff;
754 cfg->subdevice = val >> 16;
757 case PCIY_PCIX: /* PCI-X */
759 * Assume we have a PCI-X chipset if we have
760 * at least one PCI-PCI bridge with a PCI-X
761 * capability. Note that some systems with
762 * PCI-express or HT chipsets might match on
763 * this check as well.
765 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
769 case PCIY_EXPRESS: /* PCI-express */
771 * Assume we have a PCI-express chipset if we have
772 * at least one PCI-express device.
781 #if defined(__powerpc__)
783 * Enable the MSI mapping window for all HyperTransport
784 * slaves. PCI-PCI bridges have their windows enabled via
787 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
788 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
790 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
791 cfg->domain, cfg->bus, cfg->slot, cfg->func);
792 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
793 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
797 /* REG and WREG use carry through to next functions */
801 * PCI Vital Product Data
804 #define PCI_VPD_TIMEOUT 1000000
807 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
809 int count = PCI_VPD_TIMEOUT;
811 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
813 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
815 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
818 DELAY(1); /* limit looping */
820 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
827 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
829 int count = PCI_VPD_TIMEOUT;
831 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
833 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
834 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
835 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
838 DELAY(1); /* limit looping */
845 #undef PCI_VPD_TIMEOUT
847 struct vpd_readstate {
857 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
862 if (vrs->bytesinval == 0) {
863 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
865 vrs->val = le32toh(reg);
867 byte = vrs->val & 0xff;
870 vrs->val = vrs->val >> 8;
871 byte = vrs->val & 0xff;
881 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
883 struct vpd_readstate vrs;
888 int alloc, off; /* alloc/off for RO/W arrays */
894 /* init vpd reader */
902 name = remain = i = 0; /* shut up stupid gcc */
903 alloc = off = 0; /* shut up stupid gcc */
904 dflen = 0; /* shut up stupid gcc */
907 if (vpd_nextbyte(&vrs, &byte)) {
912 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
913 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
914 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
917 case 0: /* item name */
919 if (vpd_nextbyte(&vrs, &byte2)) {
924 if (vpd_nextbyte(&vrs, &byte2)) {
928 remain |= byte2 << 8;
929 if (remain > (0x7f*4 - vrs.off)) {
932 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
933 cfg->domain, cfg->bus, cfg->slot,
939 name = (byte >> 3) & 0xf;
942 case 0x2: /* String */
943 cfg->vpd.vpd_ident = malloc(remain + 1,
951 case 0x10: /* VPD-R */
954 cfg->vpd.vpd_ros = malloc(alloc *
955 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
959 case 0x11: /* VPD-W */
962 cfg->vpd.vpd_w = malloc(alloc *
963 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
967 default: /* Invalid data, abort */
973 case 1: /* Identifier String */
974 cfg->vpd.vpd_ident[i++] = byte;
977 cfg->vpd.vpd_ident[i] = '\0';
982 case 2: /* VPD-R Keyword Header */
984 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
985 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
986 M_DEVBUF, M_WAITOK | M_ZERO);
988 cfg->vpd.vpd_ros[off].keyword[0] = byte;
989 if (vpd_nextbyte(&vrs, &byte2)) {
993 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
994 if (vpd_nextbyte(&vrs, &byte2)) {
1000 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1003 * if this happens, we can't trust the rest
1007 "pci%d:%d:%d:%d: bad keyword length: %d\n",
1008 cfg->domain, cfg->bus, cfg->slot,
1013 } else if (dflen == 0) {
1014 cfg->vpd.vpd_ros[off].value = malloc(1 *
1015 sizeof(*cfg->vpd.vpd_ros[off].value),
1016 M_DEVBUF, M_WAITOK);
1017 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1019 cfg->vpd.vpd_ros[off].value = malloc(
1021 sizeof(*cfg->vpd.vpd_ros[off].value),
1022 M_DEVBUF, M_WAITOK);
1025 /* keep in sync w/ state 3's transistions */
1026 if (dflen == 0 && remain == 0)
1028 else if (dflen == 0)
1034 case 3: /* VPD-R Keyword Value */
1035 cfg->vpd.vpd_ros[off].value[i++] = byte;
1036 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1037 "RV", 2) == 0 && cksumvalid == -1) {
1043 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1044 cfg->domain, cfg->bus,
1045 cfg->slot, cfg->func,
1054 /* keep in sync w/ state 2's transistions */
1056 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1057 if (dflen == 0 && remain == 0) {
1058 cfg->vpd.vpd_rocnt = off;
1059 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1060 off * sizeof(*cfg->vpd.vpd_ros),
1061 M_DEVBUF, M_WAITOK | M_ZERO);
1063 } else if (dflen == 0)
1073 case 5: /* VPD-W Keyword Header */
1075 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1076 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1077 M_DEVBUF, M_WAITOK | M_ZERO);
1079 cfg->vpd.vpd_w[off].keyword[0] = byte;
1080 if (vpd_nextbyte(&vrs, &byte2)) {
1084 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1085 if (vpd_nextbyte(&vrs, &byte2)) {
1089 cfg->vpd.vpd_w[off].len = dflen = byte2;
1090 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1091 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1092 sizeof(*cfg->vpd.vpd_w[off].value),
1093 M_DEVBUF, M_WAITOK);
1096 /* keep in sync w/ state 6's transistions */
1097 if (dflen == 0 && remain == 0)
1099 else if (dflen == 0)
1105 case 6: /* VPD-W Keyword Value */
1106 cfg->vpd.vpd_w[off].value[i++] = byte;
1109 /* keep in sync w/ state 5's transistions */
1111 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1112 if (dflen == 0 && remain == 0) {
1113 cfg->vpd.vpd_wcnt = off;
1114 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1115 off * sizeof(*cfg->vpd.vpd_w),
1116 M_DEVBUF, M_WAITOK | M_ZERO);
1118 } else if (dflen == 0)
1123 printf("pci%d:%d:%d:%d: invalid state: %d\n",
1124 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1131 if (cksumvalid == 0 || state < -1) {
1132 /* read-only data bad, clean up */
1133 if (cfg->vpd.vpd_ros != NULL) {
1134 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1135 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1136 free(cfg->vpd.vpd_ros, M_DEVBUF);
1137 cfg->vpd.vpd_ros = NULL;
1141 /* I/O error, clean up */
1142 printf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1143 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1144 if (cfg->vpd.vpd_ident != NULL) {
1145 free(cfg->vpd.vpd_ident, M_DEVBUF);
1146 cfg->vpd.vpd_ident = NULL;
1148 if (cfg->vpd.vpd_w != NULL) {
1149 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1150 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1151 free(cfg->vpd.vpd_w, M_DEVBUF);
1152 cfg->vpd.vpd_w = NULL;
1155 cfg->vpd.vpd_cached = 1;
1161 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1163 struct pci_devinfo *dinfo = device_get_ivars(child);
1164 pcicfgregs *cfg = &dinfo->cfg;
1166 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1167 pci_read_vpd(device_get_parent(dev), cfg);
1169 *identptr = cfg->vpd.vpd_ident;
1171 if (*identptr == NULL)
1178 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1181 struct pci_devinfo *dinfo = device_get_ivars(child);
1182 pcicfgregs *cfg = &dinfo->cfg;
1185 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1186 pci_read_vpd(device_get_parent(dev), cfg);
1188 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1189 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1190 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1191 *vptr = cfg->vpd.vpd_ros[i].value;
1200 * Find the requested extended capability and return the offset in
1201 * configuration space via the pointer provided. The function returns
1202 * 0 on success and error code otherwise.
1205 pci_find_extcap_method(device_t dev, device_t child, int capability,
1208 struct pci_devinfo *dinfo = device_get_ivars(child);
1209 pcicfgregs *cfg = &dinfo->cfg;
1214 * Check the CAP_LIST bit of the PCI status register first.
1216 status = pci_read_config(child, PCIR_STATUS, 2);
1217 if (!(status & PCIM_STATUS_CAPPRESENT))
1221 * Determine the start pointer of the capabilities list.
1223 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1224 case PCIM_HDRTYPE_NORMAL:
1225 case PCIM_HDRTYPE_BRIDGE:
1228 case PCIM_HDRTYPE_CARDBUS:
1229 ptr = PCIR_CAP_PTR_2;
1233 return (ENXIO); /* no extended capabilities support */
1235 ptr = pci_read_config(child, ptr, 1);
1238 * Traverse the capabilities list.
1241 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1246 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1253 * Support for MSI-X message interrupts.
1256 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1258 struct pci_devinfo *dinfo = device_get_ivars(dev);
1259 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1262 KASSERT(msix->msix_table_len > index, ("bogus index"));
1263 offset = msix->msix_table_offset + index * 16;
1264 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1265 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1266 bus_write_4(msix->msix_table_res, offset + 8, data);
1268 /* Enable MSI -> HT mapping. */
1269 pci_ht_map_msi(dev, address);
1273 pci_mask_msix(device_t dev, u_int index)
1275 struct pci_devinfo *dinfo = device_get_ivars(dev);
1276 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1277 uint32_t offset, val;
1279 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1280 offset = msix->msix_table_offset + index * 16 + 12;
1281 val = bus_read_4(msix->msix_table_res, offset);
1282 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1283 val |= PCIM_MSIX_VCTRL_MASK;
1284 bus_write_4(msix->msix_table_res, offset, val);
1289 pci_unmask_msix(device_t dev, u_int index)
1291 struct pci_devinfo *dinfo = device_get_ivars(dev);
1292 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1293 uint32_t offset, val;
1295 KASSERT(msix->msix_table_len > index, ("bogus index"));
1296 offset = msix->msix_table_offset + index * 16 + 12;
1297 val = bus_read_4(msix->msix_table_res, offset);
1298 if (val & PCIM_MSIX_VCTRL_MASK) {
1299 val &= ~PCIM_MSIX_VCTRL_MASK;
1300 bus_write_4(msix->msix_table_res, offset, val);
1305 pci_pending_msix(device_t dev, u_int index)
1307 struct pci_devinfo *dinfo = device_get_ivars(dev);
1308 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1309 uint32_t offset, bit;
1311 KASSERT(msix->msix_table_len > index, ("bogus index"));
1312 offset = msix->msix_pba_offset + (index / 32) * 4;
1313 bit = 1 << index % 32;
1314 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1318 * Restore MSI-X registers and table during resume. If MSI-X is
1319 * enabled then walk the virtual table to restore the actual MSI-X
1323 pci_resume_msix(device_t dev)
1325 struct pci_devinfo *dinfo = device_get_ivars(dev);
1326 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1327 struct msix_table_entry *mte;
1328 struct msix_vector *mv;
1331 if (msix->msix_alloc > 0) {
1332 /* First, mask all vectors. */
1333 for (i = 0; i < msix->msix_msgnum; i++)
1334 pci_mask_msix(dev, i);
1336 /* Second, program any messages with at least one handler. */
1337 for (i = 0; i < msix->msix_table_len; i++) {
1338 mte = &msix->msix_table[i];
1339 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1341 mv = &msix->msix_vectors[mte->mte_vector - 1];
1342 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1343 pci_unmask_msix(dev, i);
1346 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1347 msix->msix_ctrl, 2);
1351 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1352 * returned in *count. After this function returns, each message will be
1353 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1356 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1358 struct pci_devinfo *dinfo = device_get_ivars(child);
1359 pcicfgregs *cfg = &dinfo->cfg;
1360 struct resource_list_entry *rle;
1361 int actual, error, i, irq, max;
1363 /* Don't let count == 0 get us into trouble. */
1367 /* If rid 0 is allocated, then fail. */
1368 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1369 if (rle != NULL && rle->res != NULL)
1372 /* Already have allocated messages? */
1373 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1376 /* If MSI-X is blacklisted for this system, fail. */
1377 if (pci_msix_blacklisted())
1380 /* MSI-X capability present? */
1381 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1384 /* Make sure the appropriate BARs are mapped. */
1385 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1386 cfg->msix.msix_table_bar);
1387 if (rle == NULL || rle->res == NULL ||
1388 !(rman_get_flags(rle->res) & RF_ACTIVE))
1390 cfg->msix.msix_table_res = rle->res;
1391 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1392 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1393 cfg->msix.msix_pba_bar);
1394 if (rle == NULL || rle->res == NULL ||
1395 !(rman_get_flags(rle->res) & RF_ACTIVE))
1398 cfg->msix.msix_pba_res = rle->res;
1401 device_printf(child,
1402 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1403 *count, cfg->msix.msix_msgnum);
1404 max = min(*count, cfg->msix.msix_msgnum);
1405 for (i = 0; i < max; i++) {
1406 /* Allocate a message. */
1407 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1413 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1419 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1421 device_printf(child, "using IRQ %lu for MSI-X\n",
1427 * Be fancy and try to print contiguous runs of
1428 * IRQ values as ranges. 'irq' is the previous IRQ.
1429 * 'run' is true if we are in a range.
1431 device_printf(child, "using IRQs %lu", rle->start);
1434 for (i = 1; i < actual; i++) {
1435 rle = resource_list_find(&dinfo->resources,
1436 SYS_RES_IRQ, i + 1);
1438 /* Still in a run? */
1439 if (rle->start == irq + 1) {
1445 /* Finish previous range. */
1451 /* Start new range. */
1452 printf(",%lu", rle->start);
1456 /* Unfinished range? */
1459 printf(" for MSI-X\n");
1463 /* Mask all vectors. */
1464 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1465 pci_mask_msix(child, i);
1467 /* Allocate and initialize vector data and virtual table. */
1468 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1469 M_DEVBUF, M_WAITOK | M_ZERO);
1470 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1471 M_DEVBUF, M_WAITOK | M_ZERO);
1472 for (i = 0; i < actual; i++) {
1473 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1474 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1475 cfg->msix.msix_table[i].mte_vector = i + 1;
1478 /* Update control register to enable MSI-X. */
1479 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1480 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1481 cfg->msix.msix_ctrl, 2);
1483 /* Update counts of alloc'd messages. */
1484 cfg->msix.msix_alloc = actual;
1485 cfg->msix.msix_table_len = actual;
1491 * By default, pci_alloc_msix() will assign the allocated IRQ
1492 * resources consecutively to the first N messages in the MSI-X table.
1493 * However, device drivers may want to use different layouts if they
1494 * either receive fewer messages than they asked for, or they wish to
1495 * populate the MSI-X table sparsely. This method allows the driver
1496 * to specify what layout it wants. It must be called after a
1497 * successful pci_alloc_msix() but before any of the associated
1498 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1500 * The 'vectors' array contains 'count' message vectors. The array
1501 * maps directly to the MSI-X table in that index 0 in the array
1502 * specifies the vector for the first message in the MSI-X table, etc.
1503 * The vector value in each array index can either be 0 to indicate
1504 * that no vector should be assigned to a message slot, or it can be a
1505 * number from 1 to N (where N is the count returned from a
1506 * succcessful call to pci_alloc_msix()) to indicate which message
1507 * vector (IRQ) to be used for the corresponding message.
1509 * On successful return, each message with a non-zero vector will have
1510 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1511 * 1. Additionally, if any of the IRQs allocated via the previous
1512 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1513 * will be freed back to the system automatically.
1515 * For example, suppose a driver has a MSI-X table with 6 messages and
1516 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1517 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1518 * C. After the call to pci_alloc_msix(), the device will be setup to
1519 * have an MSI-X table of ABC--- (where - means no vector assigned).
1520 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1521 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1522 * be freed back to the system. This device will also have valid
1523 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1525 * In any case, the SYS_RES_IRQ rid X will always map to the message
1526 * at MSI-X table index X - 1 and will only be valid if a vector is
1527 * assigned to that table entry.
1530 pci_remap_msix_method(device_t dev, device_t child, int count,
1531 const u_int *vectors)
1533 struct pci_devinfo *dinfo = device_get_ivars(child);
1534 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1535 struct resource_list_entry *rle;
1536 int i, irq, j, *used;
1539 * Have to have at least one message in the table but the
1540 * table can't be bigger than the actual MSI-X table in the
1543 if (count == 0 || count > msix->msix_msgnum)
1546 /* Sanity check the vectors. */
1547 for (i = 0; i < count; i++)
1548 if (vectors[i] > msix->msix_alloc)
1552 * Make sure there aren't any holes in the vectors to be used.
1553 * It's a big pain to support it, and it doesn't really make
1554 * sense anyway. Also, at least one vector must be used.
1556 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1558 for (i = 0; i < count; i++)
1559 if (vectors[i] != 0)
1560 used[vectors[i] - 1] = 1;
1561 for (i = 0; i < msix->msix_alloc - 1; i++)
1562 if (used[i] == 0 && used[i + 1] == 1) {
1563 free(used, M_DEVBUF);
1567 free(used, M_DEVBUF);
1571 /* Make sure none of the resources are allocated. */
1572 for (i = 0; i < msix->msix_table_len; i++) {
1573 if (msix->msix_table[i].mte_vector == 0)
1575 if (msix->msix_table[i].mte_handlers > 0)
1577 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1578 KASSERT(rle != NULL, ("missing resource"));
1579 if (rle->res != NULL)
1583 /* Free the existing resource list entries. */
1584 for (i = 0; i < msix->msix_table_len; i++) {
1585 if (msix->msix_table[i].mte_vector == 0)
1587 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1591 * Build the new virtual table keeping track of which vectors are
1594 free(msix->msix_table, M_DEVBUF);
1595 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1596 M_DEVBUF, M_WAITOK | M_ZERO);
1597 for (i = 0; i < count; i++)
1598 msix->msix_table[i].mte_vector = vectors[i];
1599 msix->msix_table_len = count;
1601 /* Free any unused IRQs and resize the vectors array if necessary. */
1602 j = msix->msix_alloc - 1;
1604 struct msix_vector *vec;
1606 while (used[j] == 0) {
1607 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1608 msix->msix_vectors[j].mv_irq);
1611 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1613 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1615 free(msix->msix_vectors, M_DEVBUF);
1616 msix->msix_vectors = vec;
1617 msix->msix_alloc = j + 1;
1619 free(used, M_DEVBUF);
1621 /* Map the IRQs onto the rids. */
1622 for (i = 0; i < count; i++) {
1623 if (vectors[i] == 0)
1625 irq = msix->msix_vectors[vectors[i]].mv_irq;
1626 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1631 device_printf(child, "Remapped MSI-X IRQs as: ");
1632 for (i = 0; i < count; i++) {
1635 if (vectors[i] == 0)
1639 msix->msix_vectors[vectors[i]].mv_irq);
1648 pci_release_msix(device_t dev, device_t child)
1650 struct pci_devinfo *dinfo = device_get_ivars(child);
1651 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1652 struct resource_list_entry *rle;
1655 /* Do we have any messages to release? */
1656 if (msix->msix_alloc == 0)
1659 /* Make sure none of the resources are allocated. */
1660 for (i = 0; i < msix->msix_table_len; i++) {
1661 if (msix->msix_table[i].mte_vector == 0)
1663 if (msix->msix_table[i].mte_handlers > 0)
1665 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1666 KASSERT(rle != NULL, ("missing resource"));
1667 if (rle->res != NULL)
1671 /* Update control register to disable MSI-X. */
1672 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1673 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1674 msix->msix_ctrl, 2);
1676 /* Free the resource list entries. */
1677 for (i = 0; i < msix->msix_table_len; i++) {
1678 if (msix->msix_table[i].mte_vector == 0)
1680 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1682 free(msix->msix_table, M_DEVBUF);
1683 msix->msix_table_len = 0;
1685 /* Release the IRQs. */
1686 for (i = 0; i < msix->msix_alloc; i++)
1687 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1688 msix->msix_vectors[i].mv_irq);
1689 free(msix->msix_vectors, M_DEVBUF);
1690 msix->msix_alloc = 0;
1695 * Return the max supported MSI-X messages this device supports.
1696 * Basically, assuming the MD code can alloc messages, this function
1697 * should return the maximum value that pci_alloc_msix() can return.
1698 * Thus, it is subject to the tunables, etc.
1701 pci_msix_count_method(device_t dev, device_t child)
1703 struct pci_devinfo *dinfo = device_get_ivars(child);
1704 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1706 if (pci_do_msix && msix->msix_location != 0)
1707 return (msix->msix_msgnum);
1712 * HyperTransport MSI mapping control
1715 pci_ht_map_msi(device_t dev, uint64_t addr)
1717 struct pci_devinfo *dinfo = device_get_ivars(dev);
1718 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1723 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1724 ht->ht_msiaddr >> 20 == addr >> 20) {
1725 /* Enable MSI -> HT mapping. */
1726 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1727 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1731 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1732 /* Disable MSI -> HT mapping. */
1733 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1734 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1740 pci_get_max_read_req(device_t dev)
1745 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) != 0)
1747 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1748 val &= PCIEM_CTL_MAX_READ_REQUEST;
1750 return (1 << (val + 7));
1754 pci_set_max_read_req(device_t dev, int size)
1759 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) != 0)
1765 size = (1 << (fls(size) - 1));
1766 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1767 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1768 val |= (fls(size) - 8) << 12;
1769 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1774 * Support for MSI message signalled interrupts.
1777 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1779 struct pci_devinfo *dinfo = device_get_ivars(dev);
1780 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1782 /* Write data and address values. */
1783 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1784 address & 0xffffffff, 4);
1785 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1786 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1788 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1791 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1794 /* Enable MSI in the control register. */
1795 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1796 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1799 /* Enable MSI -> HT mapping. */
1800 pci_ht_map_msi(dev, address);
1804 pci_disable_msi(device_t dev)
1806 struct pci_devinfo *dinfo = device_get_ivars(dev);
1807 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1809 /* Disable MSI -> HT mapping. */
1810 pci_ht_map_msi(dev, 0);
1812 /* Disable MSI in the control register. */
1813 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1814 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1819 * Restore MSI registers during resume. If MSI is enabled then
1820 * restore the data and address registers in addition to the control
1824 pci_resume_msi(device_t dev)
1826 struct pci_devinfo *dinfo = device_get_ivars(dev);
1827 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1831 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1832 address = msi->msi_addr;
1833 data = msi->msi_data;
1834 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1835 address & 0xffffffff, 4);
1836 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1837 pci_write_config(dev, msi->msi_location +
1838 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1839 pci_write_config(dev, msi->msi_location +
1840 PCIR_MSI_DATA_64BIT, data, 2);
1842 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1845 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1850 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1852 struct pci_devinfo *dinfo = device_get_ivars(dev);
1853 pcicfgregs *cfg = &dinfo->cfg;
1854 struct resource_list_entry *rle;
1855 struct msix_table_entry *mte;
1856 struct msix_vector *mv;
1862 * Handle MSI first. We try to find this IRQ among our list
1863 * of MSI IRQs. If we find it, we request updated address and
1864 * data registers and apply the results.
1866 if (cfg->msi.msi_alloc > 0) {
1868 /* If we don't have any active handlers, nothing to do. */
1869 if (cfg->msi.msi_handlers == 0)
1871 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1872 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1874 if (rle->start == irq) {
1875 error = PCIB_MAP_MSI(device_get_parent(bus),
1876 dev, irq, &addr, &data);
1879 pci_disable_msi(dev);
1880 dinfo->cfg.msi.msi_addr = addr;
1881 dinfo->cfg.msi.msi_data = data;
1882 pci_enable_msi(dev, addr, data);
1890 * For MSI-X, we check to see if we have this IRQ. If we do,
1891 * we request the updated mapping info. If that works, we go
1892 * through all the slots that use this IRQ and update them.
1894 if (cfg->msix.msix_alloc > 0) {
1895 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1896 mv = &cfg->msix.msix_vectors[i];
1897 if (mv->mv_irq == irq) {
1898 error = PCIB_MAP_MSI(device_get_parent(bus),
1899 dev, irq, &addr, &data);
1902 mv->mv_address = addr;
1904 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1905 mte = &cfg->msix.msix_table[j];
1906 if (mte->mte_vector != i + 1)
1908 if (mte->mte_handlers == 0)
1910 pci_mask_msix(dev, j);
1911 pci_enable_msix(dev, j, addr, data);
1912 pci_unmask_msix(dev, j);
1923 * Returns true if the specified device is blacklisted because MSI
1927 pci_msi_device_blacklisted(device_t dev)
1930 if (!pci_honor_msi_blacklist)
1933 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
1937 * Determine if MSI is blacklisted globally on this system. Currently,
1938 * we just check for blacklisted chipsets as represented by the
1939 * host-PCI bridge at device 0:0:0. In the future, it may become
1940 * necessary to check other system attributes, such as the kenv values
1941 * that give the motherboard manufacturer and model number.
1944 pci_msi_blacklisted(void)
1948 if (!pci_honor_msi_blacklist)
1951 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1952 if (!(pcie_chipset || pcix_chipset)) {
1953 if (vm_guest != VM_GUEST_NO) {
1955 * Whitelist older chipsets in virtual
1956 * machines known to support MSI.
1958 dev = pci_find_bsf(0, 0, 0);
1960 return (!pci_has_quirk(pci_get_devid(dev),
1961 PCI_QUIRK_ENABLE_MSI_VM));
1966 dev = pci_find_bsf(0, 0, 0);
1968 return (pci_msi_device_blacklisted(dev));
1973 * Returns true if the specified device is blacklisted because MSI-X
1974 * doesn't work. Note that this assumes that if MSI doesn't work,
1975 * MSI-X doesn't either.
1978 pci_msix_device_blacklisted(device_t dev)
1981 if (!pci_honor_msi_blacklist)
1984 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
1987 return (pci_msi_device_blacklisted(dev));
1991 * Determine if MSI-X is blacklisted globally on this system. If MSI
1992 * is blacklisted, assume that MSI-X is as well. Check for additional
1993 * chipsets where MSI works but MSI-X does not.
1996 pci_msix_blacklisted(void)
2000 if (!pci_honor_msi_blacklist)
2003 dev = pci_find_bsf(0, 0, 0);
2004 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2005 PCI_QUIRK_DISABLE_MSIX))
2008 return (pci_msi_blacklisted());
2012 * Attempt to allocate *count MSI messages. The actual number allocated is
2013 * returned in *count. After this function returns, each message will be
2014 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2017 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2019 struct pci_devinfo *dinfo = device_get_ivars(child);
2020 pcicfgregs *cfg = &dinfo->cfg;
2021 struct resource_list_entry *rle;
2022 int actual, error, i, irqs[32];
2025 /* Don't let count == 0 get us into trouble. */
2029 /* If rid 0 is allocated, then fail. */
2030 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2031 if (rle != NULL && rle->res != NULL)
2034 /* Already have allocated messages? */
2035 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2038 /* If MSI is blacklisted for this system, fail. */
2039 if (pci_msi_blacklisted())
2042 /* MSI capability present? */
2043 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2047 device_printf(child,
2048 "attempting to allocate %d MSI vectors (%d supported)\n",
2049 *count, cfg->msi.msi_msgnum);
2051 /* Don't ask for more than the device supports. */
2052 actual = min(*count, cfg->msi.msi_msgnum);
2054 /* Don't ask for more than 32 messages. */
2055 actual = min(actual, 32);
2057 /* MSI requires power of 2 number of messages. */
2058 if (!powerof2(actual))
2062 /* Try to allocate N messages. */
2063 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2075 * We now have N actual messages mapped onto SYS_RES_IRQ
2076 * resources in the irqs[] array, so add new resources
2077 * starting at rid 1.
2079 for (i = 0; i < actual; i++)
2080 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2081 irqs[i], irqs[i], 1);
2085 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2090 * Be fancy and try to print contiguous runs
2091 * of IRQ values as ranges. 'run' is true if
2092 * we are in a range.
2094 device_printf(child, "using IRQs %d", irqs[0]);
2096 for (i = 1; i < actual; i++) {
2098 /* Still in a run? */
2099 if (irqs[i] == irqs[i - 1] + 1) {
2104 /* Finish previous range. */
2106 printf("-%d", irqs[i - 1]);
2110 /* Start new range. */
2111 printf(",%d", irqs[i]);
2114 /* Unfinished range? */
2116 printf("-%d", irqs[actual - 1]);
2117 printf(" for MSI\n");
2121 /* Update control register with actual count. */
2122 ctrl = cfg->msi.msi_ctrl;
2123 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2124 ctrl |= (ffs(actual) - 1) << 4;
2125 cfg->msi.msi_ctrl = ctrl;
2126 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2128 /* Update counts of alloc'd messages. */
2129 cfg->msi.msi_alloc = actual;
2130 cfg->msi.msi_handlers = 0;
2135 /* Release the MSI messages associated with this device. */
2137 pci_release_msi_method(device_t dev, device_t child)
2139 struct pci_devinfo *dinfo = device_get_ivars(child);
2140 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2141 struct resource_list_entry *rle;
2142 int error, i, irqs[32];
2144 /* Try MSI-X first. */
2145 error = pci_release_msix(dev, child);
2146 if (error != ENODEV)
2149 /* Do we have any messages to release? */
2150 if (msi->msi_alloc == 0)
2152 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2154 /* Make sure none of the resources are allocated. */
2155 if (msi->msi_handlers > 0)
2157 for (i = 0; i < msi->msi_alloc; i++) {
2158 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2159 KASSERT(rle != NULL, ("missing MSI resource"));
2160 if (rle->res != NULL)
2162 irqs[i] = rle->start;
2165 /* Update control register with 0 count. */
2166 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2167 ("%s: MSI still enabled", __func__));
2168 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2169 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2172 /* Release the messages. */
2173 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2174 for (i = 0; i < msi->msi_alloc; i++)
2175 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2177 /* Update alloc count. */
2185 * Return the max supported MSI messages this device supports.
2186 * Basically, assuming the MD code can alloc messages, this function
2187 * should return the maximum value that pci_alloc_msi() can return.
2188 * Thus, it is subject to the tunables, etc.
2191 pci_msi_count_method(device_t dev, device_t child)
2193 struct pci_devinfo *dinfo = device_get_ivars(child);
2194 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2196 if (pci_do_msi && msi->msi_location != 0)
2197 return (msi->msi_msgnum);
2201 /* free pcicfgregs structure and all depending data structures */
2204 pci_freecfg(struct pci_devinfo *dinfo)
2206 struct devlist *devlist_head;
2207 struct pci_map *pm, *next;
2210 devlist_head = &pci_devq;
2212 if (dinfo->cfg.vpd.vpd_reg) {
2213 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2214 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2215 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2216 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2217 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2218 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2219 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2221 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2224 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2225 free(dinfo, M_DEVBUF);
2227 /* increment the generation count */
2230 /* we're losing one device */
2236 * PCI power manangement
2239 pci_set_powerstate_method(device_t dev, device_t child, int state)
2241 struct pci_devinfo *dinfo = device_get_ivars(child);
2242 pcicfgregs *cfg = &dinfo->cfg;
2244 int result, oldstate, highest, delay;
2246 if (cfg->pp.pp_cap == 0)
2247 return (EOPNOTSUPP);
2250 * Optimize a no state change request away. While it would be OK to
2251 * write to the hardware in theory, some devices have shown odd
2252 * behavior when going from D3 -> D3.
2254 oldstate = pci_get_powerstate(child);
2255 if (oldstate == state)
2259 * The PCI power management specification states that after a state
2260 * transition between PCI power states, system software must
2261 * guarantee a minimal delay before the function accesses the device.
2262 * Compute the worst case delay that we need to guarantee before we
2263 * access the device. Many devices will be responsive much more
2264 * quickly than this delay, but there are some that don't respond
2265 * instantly to state changes. Transitions to/from D3 state require
2266 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2267 * is done below with DELAY rather than a sleeper function because
2268 * this function can be called from contexts where we cannot sleep.
2270 highest = (oldstate > state) ? oldstate : state;
2271 if (highest == PCI_POWERSTATE_D3)
2273 else if (highest == PCI_POWERSTATE_D2)
2277 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2278 & ~PCIM_PSTAT_DMASK;
2281 case PCI_POWERSTATE_D0:
2282 status |= PCIM_PSTAT_D0;
2284 case PCI_POWERSTATE_D1:
2285 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2286 return (EOPNOTSUPP);
2287 status |= PCIM_PSTAT_D1;
2289 case PCI_POWERSTATE_D2:
2290 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2291 return (EOPNOTSUPP);
2292 status |= PCIM_PSTAT_D2;
2294 case PCI_POWERSTATE_D3:
2295 status |= PCIM_PSTAT_D3;
2302 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2305 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2312 pci_get_powerstate_method(device_t dev, device_t child)
2314 struct pci_devinfo *dinfo = device_get_ivars(child);
2315 pcicfgregs *cfg = &dinfo->cfg;
2319 if (cfg->pp.pp_cap != 0) {
2320 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2321 switch (status & PCIM_PSTAT_DMASK) {
2323 result = PCI_POWERSTATE_D0;
2326 result = PCI_POWERSTATE_D1;
2329 result = PCI_POWERSTATE_D2;
2332 result = PCI_POWERSTATE_D3;
2335 result = PCI_POWERSTATE_UNKNOWN;
2339 /* No support, device is always at D0 */
2340 result = PCI_POWERSTATE_D0;
2346 * Some convenience functions for PCI device drivers.
2349 static __inline void
2350 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2354 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2356 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2359 static __inline void
2360 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2364 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2366 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2370 pci_enable_busmaster_method(device_t dev, device_t child)
2372 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2377 pci_disable_busmaster_method(device_t dev, device_t child)
2379 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2384 pci_enable_io_method(device_t dev, device_t child, int space)
2389 case SYS_RES_IOPORT:
2390 bit = PCIM_CMD_PORTEN;
2392 case SYS_RES_MEMORY:
2393 bit = PCIM_CMD_MEMEN;
2398 pci_set_command_bit(dev, child, bit);
2403 pci_disable_io_method(device_t dev, device_t child, int space)
2408 case SYS_RES_IOPORT:
2409 bit = PCIM_CMD_PORTEN;
2411 case SYS_RES_MEMORY:
2412 bit = PCIM_CMD_MEMEN;
2417 pci_clear_command_bit(dev, child, bit);
2422 * New style pci driver. Parent device is either a pci-host-bridge or a
2423 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2427 pci_print_verbose(struct pci_devinfo *dinfo)
2431 pcicfgregs *cfg = &dinfo->cfg;
2433 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2434 cfg->vendor, cfg->device, cfg->revid);
2435 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2436 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2437 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2438 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2440 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2441 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2442 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2443 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2444 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2445 if (cfg->intpin > 0)
2446 printf("\tintpin=%c, irq=%d\n",
2447 cfg->intpin +'a' -1, cfg->intline);
2448 if (cfg->pp.pp_cap) {
2451 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2452 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2453 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2454 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2455 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2456 status & PCIM_PSTAT_DMASK);
2458 if (cfg->msi.msi_location) {
2461 ctrl = cfg->msi.msi_ctrl;
2462 printf("\tMSI supports %d message%s%s%s\n",
2463 cfg->msi.msi_msgnum,
2464 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2465 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2466 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2468 if (cfg->msix.msix_location) {
2469 printf("\tMSI-X supports %d message%s ",
2470 cfg->msix.msix_msgnum,
2471 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2472 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2473 printf("in map 0x%x\n",
2474 cfg->msix.msix_table_bar);
2476 printf("in maps 0x%x and 0x%x\n",
2477 cfg->msix.msix_table_bar,
2478 cfg->msix.msix_pba_bar);
2484 pci_porten(device_t dev)
2486 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2490 pci_memen(device_t dev)
2492 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2496 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2498 struct pci_devinfo *dinfo;
2499 pci_addr_t map, testval;
2504 * The device ROM BAR is special. It is always a 32-bit
2505 * memory BAR. Bit 0 is special and should not be set when
2508 dinfo = device_get_ivars(dev);
2509 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2510 map = pci_read_config(dev, reg, 4);
2511 pci_write_config(dev, reg, 0xfffffffe, 4);
2512 testval = pci_read_config(dev, reg, 4);
2513 pci_write_config(dev, reg, map, 4);
2515 *testvalp = testval;
2519 map = pci_read_config(dev, reg, 4);
2520 ln2range = pci_maprange(map);
2522 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2525 * Disable decoding via the command register before
2526 * determining the BAR's length since we will be placing it in
2529 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2530 pci_write_config(dev, PCIR_COMMAND,
2531 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2534 * Determine the BAR's length by writing all 1's. The bottom
2535 * log_2(size) bits of the BAR will stick as 0 when we read
2538 pci_write_config(dev, reg, 0xffffffff, 4);
2539 testval = pci_read_config(dev, reg, 4);
2540 if (ln2range == 64) {
2541 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2542 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2546 * Restore the original value of the BAR. We may have reprogrammed
2547 * the BAR of the low-level console device and when booting verbose,
2548 * we need the console device addressable.
2550 pci_write_config(dev, reg, map, 4);
2552 pci_write_config(dev, reg + 4, map >> 32, 4);
2553 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2556 *testvalp = testval;
2560 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2562 struct pci_devinfo *dinfo;
2565 /* The device ROM BAR is always a 32-bit memory BAR. */
2566 dinfo = device_get_ivars(dev);
2567 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2570 ln2range = pci_maprange(pm->pm_value);
2571 pci_write_config(dev, pm->pm_reg, base, 4);
2573 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2574 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2576 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2577 pm->pm_reg + 4, 4) << 32;
2581 pci_find_bar(device_t dev, int reg)
2583 struct pci_devinfo *dinfo;
2586 dinfo = device_get_ivars(dev);
2587 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2588 if (pm->pm_reg == reg)
2595 pci_bar_enabled(device_t dev, struct pci_map *pm)
2597 struct pci_devinfo *dinfo;
2600 dinfo = device_get_ivars(dev);
2601 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2602 !(pm->pm_value & PCIM_BIOS_ENABLE))
2604 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2605 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2606 return ((cmd & PCIM_CMD_MEMEN) != 0);
2608 return ((cmd & PCIM_CMD_PORTEN) != 0);
2611 static struct pci_map *
2612 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2614 struct pci_devinfo *dinfo;
2615 struct pci_map *pm, *prev;
2617 dinfo = device_get_ivars(dev);
2618 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2620 pm->pm_value = value;
2622 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2623 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2625 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2626 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2630 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2632 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2637 pci_restore_bars(device_t dev)
2639 struct pci_devinfo *dinfo;
2643 dinfo = device_get_ivars(dev);
2644 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2645 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2648 ln2range = pci_maprange(pm->pm_value);
2649 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2651 pci_write_config(dev, pm->pm_reg + 4,
2652 pm->pm_value >> 32, 4);
2657 * Add a resource based on a pci map register. Return 1 if the map
2658 * register is a 32bit map register or 2 if it is a 64bit register.
2661 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2662 int force, int prefetch)
2665 pci_addr_t base, map, testval;
2666 pci_addr_t start, end, count;
2667 int barlen, basezero, maprange, mapsize, type;
2669 struct resource *res;
2672 * The BAR may already exist if the device is a CardBus card
2673 * whose CIS is stored in this BAR.
2675 pm = pci_find_bar(dev, reg);
2677 maprange = pci_maprange(pm->pm_value);
2678 barlen = maprange == 64 ? 2 : 1;
2682 pci_read_bar(dev, reg, &map, &testval);
2683 if (PCI_BAR_MEM(map)) {
2684 type = SYS_RES_MEMORY;
2685 if (map & PCIM_BAR_MEM_PREFETCH)
2688 type = SYS_RES_IOPORT;
2689 mapsize = pci_mapsize(testval);
2690 base = pci_mapbase(map);
2691 #ifdef __PCI_BAR_ZERO_VALID
2694 basezero = base == 0;
2696 maprange = pci_maprange(map);
2697 barlen = maprange == 64 ? 2 : 1;
2700 * For I/O registers, if bottom bit is set, and the next bit up
2701 * isn't clear, we know we have a BAR that doesn't conform to the
2702 * spec, so ignore it. Also, sanity check the size of the data
2703 * areas to the type of memory involved. Memory must be at least
2704 * 16 bytes in size, while I/O ranges must be at least 4.
2706 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2708 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2709 (type == SYS_RES_IOPORT && mapsize < 2))
2712 /* Save a record of this BAR. */
2713 pm = pci_add_bar(dev, reg, map, mapsize);
2715 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2716 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2717 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2718 printf(", port disabled\n");
2719 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2720 printf(", memory disabled\n");
2722 printf(", enabled\n");
2726 * If base is 0, then we have problems if this architecture does
2727 * not allow that. It is best to ignore such entries for the
2728 * moment. These will be allocated later if the driver specifically
2729 * requests them. However, some removable busses look better when
2730 * all resources are allocated, so allow '0' to be overriden.
2732 * Similarly treat maps whose values is the same as the test value
2733 * read back. These maps have had all f's written to them by the
2734 * BIOS in an attempt to disable the resources.
2736 if (!force && (basezero || map == testval))
2738 if ((u_long)base != base) {
2740 "pci%d:%d:%d:%d bar %#x too many address bits",
2741 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2742 pci_get_function(dev), reg);
2747 * This code theoretically does the right thing, but has
2748 * undesirable side effects in some cases where peripherals
2749 * respond oddly to having these bits enabled. Let the user
2750 * be able to turn them off (since pci_enable_io_modes is 1 by
2753 if (pci_enable_io_modes) {
2754 /* Turn on resources that have been left off by a lazy BIOS */
2755 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2756 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2757 cmd |= PCIM_CMD_PORTEN;
2758 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2760 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2761 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2762 cmd |= PCIM_CMD_MEMEN;
2763 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2766 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2768 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2772 count = (pci_addr_t)1 << mapsize;
2773 if (basezero || base == pci_mapbase(testval)) {
2774 start = 0; /* Let the parent decide. */
2778 end = base + count - 1;
2780 resource_list_add(rl, type, reg, start, end, count);
2783 * Try to allocate the resource for this BAR from our parent
2784 * so that this resource range is already reserved. The
2785 * driver for this device will later inherit this resource in
2786 * pci_alloc_resource().
2788 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2789 prefetch ? RF_PREFETCHABLE : 0);
2790 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2792 * If the allocation fails, try to allocate a resource for
2793 * this BAR using any available range. The firmware felt
2794 * it was important enough to assign a resource, so don't
2795 * disable decoding if we can help it.
2797 resource_list_delete(rl, type, reg);
2798 resource_list_add(rl, type, reg, 0, ~0ul, count);
2799 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
2800 count, prefetch ? RF_PREFETCHABLE : 0);
2804 * If the allocation fails, delete the resource list entry
2805 * and disable decoding for this device.
2807 * If the driver requests this resource in the future,
2808 * pci_reserve_map() will try to allocate a fresh
2811 resource_list_delete(rl, type, reg);
2812 pci_disable_io(dev, type);
2815 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2816 pci_get_domain(dev), pci_get_bus(dev),
2817 pci_get_slot(dev), pci_get_function(dev), reg);
2819 start = rman_get_start(res);
2820 pci_write_bar(dev, pm, start);
2826 * For ATA devices we need to decide early what addressing mode to use.
2827 * Legacy demands that the primary and secondary ATA ports sits on the
2828 * same addresses that old ISA hardware did. This dictates that we use
2829 * those addresses and ignore the BAR's if we cannot set PCI native
2833 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2834 uint32_t prefetchmask)
2837 int rid, type, progif;
2839 /* if this device supports PCI native addressing use it */
2840 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2841 if ((progif & 0x8a) == 0x8a) {
2842 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2843 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2844 printf("Trying ATA native PCI addressing mode\n");
2845 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2849 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2850 type = SYS_RES_IOPORT;
2851 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2852 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2853 prefetchmask & (1 << 0));
2854 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2855 prefetchmask & (1 << 1));
2858 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2859 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
2862 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2863 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
2866 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2867 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
2868 prefetchmask & (1 << 2));
2869 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
2870 prefetchmask & (1 << 3));
2873 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2874 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
2877 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2878 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
2881 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
2882 prefetchmask & (1 << 4));
2883 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
2884 prefetchmask & (1 << 5));
2888 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2890 struct pci_devinfo *dinfo = device_get_ivars(dev);
2891 pcicfgregs *cfg = &dinfo->cfg;
2892 char tunable_name[64];
2895 /* Has to have an intpin to have an interrupt. */
2896 if (cfg->intpin == 0)
2899 /* Let the user override the IRQ with a tunable. */
2900 irq = PCI_INVALID_IRQ;
2901 snprintf(tunable_name, sizeof(tunable_name),
2902 "hw.pci%d.%d.%d.INT%c.irq",
2903 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2904 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2905 irq = PCI_INVALID_IRQ;
2908 * If we didn't get an IRQ via the tunable, then we either use the
2909 * IRQ value in the intline register or we ask the bus to route an
2910 * interrupt for us. If force_route is true, then we only use the
2911 * value in the intline register if the bus was unable to assign an
2914 if (!PCI_INTERRUPT_VALID(irq)) {
2915 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2916 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2917 if (!PCI_INTERRUPT_VALID(irq))
2921 /* If after all that we don't have an IRQ, just bail. */
2922 if (!PCI_INTERRUPT_VALID(irq))
2925 /* Update the config register if it changed. */
2926 if (irq != cfg->intline) {
2928 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2931 /* Add this IRQ as rid 0 interrupt resource. */
2932 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2935 /* Perform early OHCI takeover from SMM. */
2937 ohci_early_takeover(device_t self)
2939 struct resource *res;
2945 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
2949 ctl = bus_read_4(res, OHCI_CONTROL);
2950 if (ctl & OHCI_IR) {
2952 printf("ohci early: "
2953 "SMM active, request owner change\n");
2954 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
2955 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
2957 ctl = bus_read_4(res, OHCI_CONTROL);
2959 if (ctl & OHCI_IR) {
2961 printf("ohci early: "
2962 "SMM does not respond, resetting\n");
2963 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
2965 /* Disable interrupts */
2966 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
2969 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
2972 /* Perform early UHCI takeover from SMM. */
2974 uhci_early_takeover(device_t self)
2976 struct resource *res;
2980 * Set the PIRQD enable bit and switch off all the others. We don't
2981 * want legacy support to interfere with us XXX Does this also mean
2982 * that the BIOS won't touch the keyboard anymore if it is connected
2983 * to the ports of the root hub?
2985 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
2987 /* Disable interrupts */
2988 rid = PCI_UHCI_BASE_REG;
2989 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
2991 bus_write_2(res, UHCI_INTR, 0);
2992 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
2996 /* Perform early EHCI takeover from SMM. */
2998 ehci_early_takeover(device_t self)
3000 struct resource *res;
3010 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3014 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3016 /* Synchronise with the BIOS if it owns the controller. */
3017 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3018 eecp = EHCI_EECP_NEXT(eec)) {
3019 eec = pci_read_config(self, eecp, 4);
3020 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3023 bios_sem = pci_read_config(self, eecp +
3024 EHCI_LEGSUP_BIOS_SEM, 1);
3025 if (bios_sem == 0) {
3029 printf("ehci early: "
3030 "SMM active, request owner change\n");
3032 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3034 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3036 bios_sem = pci_read_config(self, eecp +
3037 EHCI_LEGSUP_BIOS_SEM, 1);
3040 if (bios_sem != 0) {
3042 printf("ehci early: "
3043 "SMM does not respond\n");
3045 /* Disable interrupts */
3046 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3047 bus_write_4(res, offs + EHCI_USBINTR, 0);
3049 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3052 /* Perform early XHCI takeover from SMM. */
3054 xhci_early_takeover(device_t self)
3056 struct resource *res;
3066 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3070 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3074 /* Synchronise with the BIOS if it owns the controller. */
3075 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3076 eecp += XHCI_XECP_NEXT(eec) << 2) {
3077 eec = bus_read_4(res, eecp);
3079 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3082 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3087 printf("xhci early: "
3088 "SMM active, request owner change\n");
3090 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3092 /* wait a maximum of 5 second */
3094 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3096 bios_sem = bus_read_1(res, eecp +
3097 XHCI_XECP_BIOS_SEM);
3100 if (bios_sem != 0) {
3102 printf("xhci early: "
3103 "SMM does not respond\n");
3106 /* Disable interrupts */
3107 offs = bus_read_1(res, XHCI_CAPLENGTH);
3108 bus_write_4(res, offs + XHCI_USBCMD, 0);
3109 bus_read_4(res, offs + XHCI_USBSTS);
3111 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3115 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3117 struct pci_devinfo *dinfo;
3119 struct resource_list *rl;
3120 const struct pci_quirk *q;
3124 dinfo = device_get_ivars(dev);
3126 rl = &dinfo->resources;
3127 devid = (cfg->device << 16) | cfg->vendor;
3129 /* ATA devices needs special map treatment */
3130 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3131 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3132 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3133 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3134 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3135 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3137 for (i = 0; i < cfg->nummaps;) {
3139 * Skip quirked resources.
3141 for (q = &pci_quirks[0]; q->devid != 0; q++)
3142 if (q->devid == devid &&
3143 q->type == PCI_QUIRK_UNMAP_REG &&
3144 q->arg1 == PCIR_BAR(i))
3146 if (q->devid != 0) {
3150 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3151 prefetchmask & (1 << i));
3155 * Add additional, quirked resources.
3157 for (q = &pci_quirks[0]; q->devid != 0; q++)
3158 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3159 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3161 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3162 #ifdef __PCI_REROUTE_INTERRUPT
3164 * Try to re-route interrupts. Sometimes the BIOS or
3165 * firmware may leave bogus values in these registers.
3166 * If the re-route fails, then just stick with what we
3169 pci_assign_interrupt(bus, dev, 1);
3171 pci_assign_interrupt(bus, dev, 0);
3175 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3176 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3177 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3178 xhci_early_takeover(dev);
3179 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3180 ehci_early_takeover(dev);
3181 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3182 ohci_early_takeover(dev);
3183 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3184 uhci_early_takeover(dev);
3189 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3191 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3192 device_t pcib = device_get_parent(dev);
3193 struct pci_devinfo *dinfo;
3195 int s, f, pcifunchigh;
3198 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3199 ("dinfo_size too small"));
3200 maxslots = PCIB_MAXSLOTS(pcib);
3201 for (s = 0; s <= maxslots; s++) {
3205 hdrtype = REG(PCIR_HDRTYPE, 1);
3206 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3208 if (hdrtype & PCIM_MFDEV)
3209 pcifunchigh = PCI_FUNCMAX;
3210 for (f = 0; f <= pcifunchigh; f++) {
3211 dinfo = pci_read_device(pcib, domain, busno, s, f,
3213 if (dinfo != NULL) {
3214 pci_add_child(dev, dinfo);
3222 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3224 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3225 device_set_ivars(dinfo->cfg.dev, dinfo);
3226 resource_list_init(&dinfo->resources);
3227 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3228 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3229 pci_print_verbose(dinfo);
3230 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3234 pci_probe(device_t dev)
3237 device_set_desc(dev, "PCI bus");
3239 /* Allow other subclasses to override this driver. */
3240 return (BUS_PROBE_GENERIC);
3244 pci_attach_common(device_t dev)
3246 struct pci_softc *sc;
3248 #ifdef PCI_DMA_BOUNDARY
3249 int error, tag_valid;
3252 sc = device_get_softc(dev);
3253 domain = pcib_get_domain(dev);
3254 busno = pcib_get_bus(dev);
3256 device_printf(dev, "domain=%d, physical bus=%d\n",
3258 #ifdef PCI_DMA_BOUNDARY
3260 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3261 devclass_find("pci")) {
3262 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3263 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3264 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3265 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3267 device_printf(dev, "Failed to create DMA tag: %d\n",
3274 sc->sc_dma_tag = bus_get_dma_tag(dev);
3279 pci_attach(device_t dev)
3281 int busno, domain, error;
3283 error = pci_attach_common(dev);
3288 * Since there can be multiple independantly numbered PCI
3289 * busses on systems with multiple PCI domains, we can't use
3290 * the unit number to decide which bus we are probing. We ask
3291 * the parent pcib what our domain and bus numbers are.
3293 domain = pcib_get_domain(dev);
3294 busno = pcib_get_bus(dev);
3295 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3296 return (bus_generic_attach(dev));
3300 pci_set_power_children(device_t dev, device_t *devlist, int numdevs,
3303 device_t child, pcib;
3304 struct pci_devinfo *dinfo;
3308 * Set the device to the given state. If the firmware suggests
3309 * a different power state, use it instead. If power management
3310 * is not present, the firmware is responsible for managing
3311 * device power. Skip children who aren't attached since they
3312 * are handled separately.
3314 pcib = device_get_parent(dev);
3315 for (i = 0; i < numdevs; i++) {
3317 dinfo = device_get_ivars(child);
3319 if (device_is_attached(child) &&
3320 PCIB_POWER_FOR_SLEEP(pcib, dev, &dstate) == 0)
3321 pci_set_powerstate(child, dstate);
3326 pci_suspend(device_t dev)
3328 device_t child, *devlist;
3329 struct pci_devinfo *dinfo;
3330 int error, i, numdevs;
3333 * Save the PCI configuration space for each child and set the
3334 * device in the appropriate power state for this sleep state.
3336 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3338 for (i = 0; i < numdevs; i++) {
3340 dinfo = device_get_ivars(child);
3341 pci_cfg_save(child, dinfo, 0);
3344 /* Suspend devices before potentially powering them down. */
3345 error = bus_generic_suspend(dev);
3347 free(devlist, M_TEMP);
3350 if (pci_do_power_suspend)
3351 pci_set_power_children(dev, devlist, numdevs,
3353 free(devlist, M_TEMP);
3358 pci_resume(device_t dev)
3360 device_t child, *devlist;
3361 struct pci_devinfo *dinfo;
3362 int error, i, numdevs;
3365 * Set each child to D0 and restore its PCI configuration space.
3367 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3369 if (pci_do_power_resume)
3370 pci_set_power_children(dev, devlist, numdevs,
3373 /* Now the device is powered up, restore its config space. */
3374 for (i = 0; i < numdevs; i++) {
3376 dinfo = device_get_ivars(child);
3378 pci_cfg_restore(child, dinfo);
3379 if (!device_is_attached(child))
3380 pci_cfg_save(child, dinfo, 1);
3384 * Resume critical devices first, then everything else later.
3386 for (i = 0; i < numdevs; i++) {
3388 switch (pci_get_class(child)) {
3392 case PCIC_BASEPERIPH:
3393 DEVICE_RESUME(child);
3397 for (i = 0; i < numdevs; i++) {
3399 switch (pci_get_class(child)) {
3403 case PCIC_BASEPERIPH:
3406 DEVICE_RESUME(child);
3409 free(devlist, M_TEMP);
3414 pci_load_vendor_data(void)
3420 data = preload_search_by_type("pci_vendor_data");
3422 ptr = preload_fetch_addr(data);
3423 sz = preload_fetch_size(data);
3424 if (ptr != NULL && sz != 0) {
3425 pci_vendordata = ptr;
3426 pci_vendordata_size = sz;
3427 /* terminate the database */
3428 pci_vendordata[pci_vendordata_size] = '\n';
3434 pci_driver_added(device_t dev, driver_t *driver)
3439 struct pci_devinfo *dinfo;
3443 device_printf(dev, "driver added\n");
3444 DEVICE_IDENTIFY(driver, dev);
3445 if (device_get_children(dev, &devlist, &numdevs) != 0)
3447 for (i = 0; i < numdevs; i++) {
3449 if (device_get_state(child) != DS_NOTPRESENT)
3451 dinfo = device_get_ivars(child);
3452 pci_print_verbose(dinfo);
3454 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3455 pci_cfg_restore(child, dinfo);
3456 if (device_probe_and_attach(child) != 0)
3457 pci_cfg_save(child, dinfo, 1);
3459 free(devlist, M_TEMP);
3463 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3464 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3466 struct pci_devinfo *dinfo;
3467 struct msix_table_entry *mte;
3468 struct msix_vector *mv;
3474 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3479 /* If this is not a direct child, just bail out. */
3480 if (device_get_parent(child) != dev) {
3485 rid = rman_get_rid(irq);
3487 /* Make sure that INTx is enabled */
3488 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3491 * Check to see if the interrupt is MSI or MSI-X.
3492 * Ask our parent to map the MSI and give
3493 * us the address and data register values.
3494 * If we fail for some reason, teardown the
3495 * interrupt handler.
3497 dinfo = device_get_ivars(child);
3498 if (dinfo->cfg.msi.msi_alloc > 0) {
3499 if (dinfo->cfg.msi.msi_addr == 0) {
3500 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3501 ("MSI has handlers, but vectors not mapped"));
3502 error = PCIB_MAP_MSI(device_get_parent(dev),
3503 child, rman_get_start(irq), &addr, &data);
3506 dinfo->cfg.msi.msi_addr = addr;
3507 dinfo->cfg.msi.msi_data = data;
3509 if (dinfo->cfg.msi.msi_handlers == 0)
3510 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3511 dinfo->cfg.msi.msi_data);
3512 dinfo->cfg.msi.msi_handlers++;
3514 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3515 ("No MSI or MSI-X interrupts allocated"));
3516 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3517 ("MSI-X index too high"));
3518 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3519 KASSERT(mte->mte_vector != 0, ("no message vector"));
3520 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3521 KASSERT(mv->mv_irq == rman_get_start(irq),
3523 if (mv->mv_address == 0) {
3524 KASSERT(mte->mte_handlers == 0,
3525 ("MSI-X table entry has handlers, but vector not mapped"));
3526 error = PCIB_MAP_MSI(device_get_parent(dev),
3527 child, rman_get_start(irq), &addr, &data);
3530 mv->mv_address = addr;
3533 if (mte->mte_handlers == 0) {
3534 pci_enable_msix(child, rid - 1, mv->mv_address,
3536 pci_unmask_msix(child, rid - 1);
3538 mte->mte_handlers++;
3541 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3542 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3545 (void)bus_generic_teardown_intr(dev, child, irq,
3555 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3558 struct msix_table_entry *mte;
3559 struct resource_list_entry *rle;
3560 struct pci_devinfo *dinfo;
3563 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3566 /* If this isn't a direct child, just bail out */
3567 if (device_get_parent(child) != dev)
3568 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3570 rid = rman_get_rid(irq);
3573 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3576 * Check to see if the interrupt is MSI or MSI-X. If so,
3577 * decrement the appropriate handlers count and mask the
3578 * MSI-X message, or disable MSI messages if the count
3581 dinfo = device_get_ivars(child);
3582 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3583 if (rle->res != irq)
3585 if (dinfo->cfg.msi.msi_alloc > 0) {
3586 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3587 ("MSI-X index too high"));
3588 if (dinfo->cfg.msi.msi_handlers == 0)
3590 dinfo->cfg.msi.msi_handlers--;
3591 if (dinfo->cfg.msi.msi_handlers == 0)
3592 pci_disable_msi(child);
3594 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3595 ("No MSI or MSI-X interrupts allocated"));
3596 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3597 ("MSI-X index too high"));
3598 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3599 if (mte->mte_handlers == 0)
3601 mte->mte_handlers--;
3602 if (mte->mte_handlers == 0)
3603 pci_mask_msix(child, rid - 1);
3606 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3609 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3614 pci_print_child(device_t dev, device_t child)
3616 struct pci_devinfo *dinfo;
3617 struct resource_list *rl;
3620 dinfo = device_get_ivars(child);
3621 rl = &dinfo->resources;
3623 retval += bus_print_child_header(dev, child);
3625 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3626 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3627 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3628 if (device_get_flags(dev))
3629 retval += printf(" flags %#x", device_get_flags(dev));
3631 retval += printf(" at device %d.%d", pci_get_slot(child),
3632 pci_get_function(child));
3634 retval += bus_print_child_footer(dev, child);
3644 } pci_nomatch_tab[] = {
3645 {PCIC_OLD, -1, "old"},
3646 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3647 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3648 {PCIC_STORAGE, -1, "mass storage"},
3649 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3650 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3651 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3652 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3653 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3654 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3655 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3656 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3657 {PCIC_STORAGE, PCIS_STORAGE_NVM, "NVM"},
3658 {PCIC_NETWORK, -1, "network"},
3659 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3660 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3661 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3662 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3663 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3664 {PCIC_DISPLAY, -1, "display"},
3665 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3666 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3667 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3668 {PCIC_MULTIMEDIA, -1, "multimedia"},
3669 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3670 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3671 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3672 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3673 {PCIC_MEMORY, -1, "memory"},
3674 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3675 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3676 {PCIC_BRIDGE, -1, "bridge"},
3677 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3678 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3679 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3680 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3681 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3682 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3683 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3684 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3685 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3686 {PCIC_SIMPLECOMM, -1, "simple comms"},
3687 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3688 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3689 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3690 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3691 {PCIC_BASEPERIPH, -1, "base peripheral"},
3692 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3693 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3694 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3695 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3696 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3697 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3698 {PCIC_INPUTDEV, -1, "input device"},
3699 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3700 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3701 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3702 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3703 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3704 {PCIC_DOCKING, -1, "docking station"},
3705 {PCIC_PROCESSOR, -1, "processor"},
3706 {PCIC_SERIALBUS, -1, "serial bus"},
3707 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3708 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3709 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3710 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3711 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3712 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3713 {PCIC_WIRELESS, -1, "wireless controller"},
3714 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3715 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3716 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3717 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3718 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3719 {PCIC_SATCOM, -1, "satellite communication"},
3720 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3721 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3722 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3723 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3724 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3725 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3726 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3727 {PCIC_DASP, -1, "dasp"},
3728 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3733 pci_probe_nomatch(device_t dev, device_t child)
3736 const char *cp, *scp;
3740 * Look for a listing for this device in a loaded device database.
3742 if ((device = pci_describe_device(child)) != NULL) {
3743 device_printf(dev, "<%s>", device);
3744 free(device, M_DEVBUF);
3747 * Scan the class/subclass descriptions for a general
3752 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3753 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3754 if (pci_nomatch_tab[i].subclass == -1) {
3755 cp = pci_nomatch_tab[i].desc;
3756 } else if (pci_nomatch_tab[i].subclass ==
3757 pci_get_subclass(child)) {
3758 scp = pci_nomatch_tab[i].desc;
3762 device_printf(dev, "<%s%s%s>",
3764 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3767 printf(" at device %d.%d (no driver attached)\n",
3768 pci_get_slot(child), pci_get_function(child));
3769 pci_cfg_save(child, device_get_ivars(child), 1);
3773 * Parse the PCI device database, if loaded, and return a pointer to a
3774 * description of the device.
3776 * The database is flat text formatted as follows:
3778 * Any line not in a valid format is ignored.
3779 * Lines are terminated with newline '\n' characters.
3781 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3784 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3785 * - devices cannot be listed without a corresponding VENDOR line.
3786 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3787 * another TAB, then the device name.
3791 * Assuming (ptr) points to the beginning of a line in the database,
3792 * return the vendor or device and description of the next entry.
3793 * The value of (vendor) or (device) inappropriate for the entry type
3794 * is set to -1. Returns nonzero at the end of the database.
3796 * Note that this is slightly unrobust in the face of corrupt data;
3797 * we attempt to safeguard against this by spamming the end of the
3798 * database with a newline when we initialise.
3801 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3810 left = pci_vendordata_size - (cp - pci_vendordata);
3818 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3822 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3825 /* skip to next line */
3826 while (*cp != '\n' && left > 0) {
3835 /* skip to next line */
3836 while (*cp != '\n' && left > 0) {
3840 if (*cp == '\n' && left > 0)
3847 pci_describe_device(device_t dev)
3850 char *desc, *vp, *dp, *line;
3852 desc = vp = dp = NULL;
3855 * If we have no vendor data, we can't do anything.
3857 if (pci_vendordata == NULL)
3861 * Scan the vendor data looking for this device
3863 line = pci_vendordata;
3864 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3867 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3869 if (vendor == pci_get_vendor(dev))
3872 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3875 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3883 if (device == pci_get_device(dev))
3887 snprintf(dp, 80, "0x%x", pci_get_device(dev));
3888 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3890 sprintf(desc, "%s, %s", vp, dp);
3900 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3902 struct pci_devinfo *dinfo;
3905 dinfo = device_get_ivars(child);
3909 case PCI_IVAR_ETHADDR:
3911 * The generic accessor doesn't deal with failure, so
3912 * we set the return value, then return an error.
3914 *((uint8_t **) result) = NULL;
3916 case PCI_IVAR_SUBVENDOR:
3917 *result = cfg->subvendor;
3919 case PCI_IVAR_SUBDEVICE:
3920 *result = cfg->subdevice;
3922 case PCI_IVAR_VENDOR:
3923 *result = cfg->vendor;
3925 case PCI_IVAR_DEVICE:
3926 *result = cfg->device;
3928 case PCI_IVAR_DEVID:
3929 *result = (cfg->device << 16) | cfg->vendor;
3931 case PCI_IVAR_CLASS:
3932 *result = cfg->baseclass;
3934 case PCI_IVAR_SUBCLASS:
3935 *result = cfg->subclass;
3937 case PCI_IVAR_PROGIF:
3938 *result = cfg->progif;
3940 case PCI_IVAR_REVID:
3941 *result = cfg->revid;
3943 case PCI_IVAR_INTPIN:
3944 *result = cfg->intpin;
3947 *result = cfg->intline;
3949 case PCI_IVAR_DOMAIN:
3950 *result = cfg->domain;
3956 *result = cfg->slot;
3958 case PCI_IVAR_FUNCTION:
3959 *result = cfg->func;
3961 case PCI_IVAR_CMDREG:
3962 *result = cfg->cmdreg;
3964 case PCI_IVAR_CACHELNSZ:
3965 *result = cfg->cachelnsz;
3967 case PCI_IVAR_MINGNT:
3968 *result = cfg->mingnt;
3970 case PCI_IVAR_MAXLAT:
3971 *result = cfg->maxlat;
3973 case PCI_IVAR_LATTIMER:
3974 *result = cfg->lattimer;
3983 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3985 struct pci_devinfo *dinfo;
3987 dinfo = device_get_ivars(child);
3990 case PCI_IVAR_INTPIN:
3991 dinfo->cfg.intpin = value;
3993 case PCI_IVAR_ETHADDR:
3994 case PCI_IVAR_SUBVENDOR:
3995 case PCI_IVAR_SUBDEVICE:
3996 case PCI_IVAR_VENDOR:
3997 case PCI_IVAR_DEVICE:
3998 case PCI_IVAR_DEVID:
3999 case PCI_IVAR_CLASS:
4000 case PCI_IVAR_SUBCLASS:
4001 case PCI_IVAR_PROGIF:
4002 case PCI_IVAR_REVID:
4004 case PCI_IVAR_DOMAIN:
4007 case PCI_IVAR_FUNCTION:
4008 return (EINVAL); /* disallow for now */
4015 #include "opt_ddb.h"
4017 #include <ddb/ddb.h>
4018 #include <sys/cons.h>
4021 * List resources based on pci map registers, used for within ddb
4024 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4026 struct pci_devinfo *dinfo;
4027 struct devlist *devlist_head;
4030 int i, error, none_count;
4033 /* get the head of the device queue */
4034 devlist_head = &pci_devq;
4037 * Go through the list of devices and print out devices
4039 for (error = 0, i = 0,
4040 dinfo = STAILQ_FIRST(devlist_head);
4041 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4042 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4044 /* Populate pd_name and pd_unit */
4047 name = device_get_name(dinfo->cfg.dev);
4050 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4051 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4052 (name && *name) ? name : "none",
4053 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4055 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4056 p->pc_sel.pc_func, (p->pc_class << 16) |
4057 (p->pc_subclass << 8) | p->pc_progif,
4058 (p->pc_subdevice << 16) | p->pc_subvendor,
4059 (p->pc_device << 16) | p->pc_vendor,
4060 p->pc_revid, p->pc_hdr);
4065 static struct resource *
4066 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4067 u_long start, u_long end, u_long count, u_int flags)
4069 struct pci_devinfo *dinfo = device_get_ivars(child);
4070 struct resource_list *rl = &dinfo->resources;
4071 struct resource_list_entry *rle;
4072 struct resource *res;
4074 pci_addr_t map, testval;
4078 pm = pci_find_bar(child, *rid);
4080 /* This is a BAR that we failed to allocate earlier. */
4081 mapsize = pm->pm_size;
4085 * Weed out the bogons, and figure out how large the
4086 * BAR/map is. BARs that read back 0 here are bogus
4087 * and unimplemented. Note: atapci in legacy mode are
4088 * special and handled elsewhere in the code. If you
4089 * have a atapci device in legacy mode and it fails
4090 * here, that other code is broken.
4092 pci_read_bar(child, *rid, &map, &testval);
4095 * Determine the size of the BAR and ignore BARs with a size
4096 * of 0. Device ROM BARs use a different mask value.
4098 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4099 mapsize = pci_romsize(testval);
4101 mapsize = pci_mapsize(testval);
4104 pm = pci_add_bar(child, *rid, map, mapsize);
4107 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4108 if (type != SYS_RES_MEMORY) {
4111 "child %s requested type %d for rid %#x,"
4112 " but the BAR says it is an memio\n",
4113 device_get_nameunit(child), type, *rid);
4117 if (type != SYS_RES_IOPORT) {
4120 "child %s requested type %d for rid %#x,"
4121 " but the BAR says it is an ioport\n",
4122 device_get_nameunit(child), type, *rid);
4128 * For real BARs, we need to override the size that
4129 * the driver requests, because that's what the BAR
4130 * actually uses and we would otherwise have a
4131 * situation where we might allocate the excess to
4132 * another driver, which won't work.
4134 count = (pci_addr_t)1 << mapsize;
4135 if (RF_ALIGNMENT(flags) < mapsize)
4136 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4137 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4138 flags |= RF_PREFETCHABLE;
4141 * Allocate enough resource, and then write back the
4142 * appropriate BAR for that resource.
4144 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4145 start, end, count, flags & ~RF_ACTIVE);
4147 device_printf(child,
4148 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4149 count, *rid, type, start, end);
4152 resource_list_add(rl, type, *rid, start, end, count);
4153 rle = resource_list_find(rl, type, *rid);
4155 panic("pci_reserve_map: unexpectedly can't find resource.");
4157 rle->start = rman_get_start(res);
4158 rle->end = rman_get_end(res);
4160 rle->flags = RLE_RESERVED;
4162 device_printf(child,
4163 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4164 count, *rid, type, rman_get_start(res));
4165 map = rman_get_start(res);
4166 pci_write_bar(child, pm, map);
4172 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4173 u_long start, u_long end, u_long count, u_int flags)
4175 struct pci_devinfo *dinfo = device_get_ivars(child);
4176 struct resource_list *rl = &dinfo->resources;
4177 struct resource_list_entry *rle;
4178 struct resource *res;
4179 pcicfgregs *cfg = &dinfo->cfg;
4181 if (device_get_parent(child) != dev)
4182 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4183 type, rid, start, end, count, flags));
4186 * Perform lazy resource allocation
4191 * Can't alloc legacy interrupt once MSI messages have
4194 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4195 cfg->msix.msix_alloc > 0))
4199 * If the child device doesn't have an interrupt
4200 * routed and is deserving of an interrupt, try to
4203 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4205 pci_assign_interrupt(dev, child, 0);
4207 case SYS_RES_IOPORT:
4208 case SYS_RES_MEMORY:
4211 * PCI-PCI bridge I/O window resources are not BARs.
4212 * For those allocations just pass the request up the
4215 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4217 case PCIR_IOBASEL_1:
4218 case PCIR_MEMBASE_1:
4219 case PCIR_PMBASEL_1:
4221 * XXX: Should we bother creating a resource
4224 return (bus_generic_alloc_resource(dev, child,
4225 type, rid, start, end, count, flags));
4229 /* Reserve resources for this BAR if needed. */
4230 rle = resource_list_find(rl, type, *rid);
4232 res = pci_reserve_map(dev, child, type, rid, start, end,
4238 return (resource_list_alloc(rl, dev, child, type, rid,
4239 start, end, count, flags));
4243 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4246 struct pci_devinfo *dinfo;
4249 error = bus_generic_activate_resource(dev, child, type, rid, r);
4253 /* Enable decoding in the command register when activating BARs. */
4254 if (device_get_parent(child) == dev) {
4255 /* Device ROMs need their decoding explicitly enabled. */
4256 dinfo = device_get_ivars(child);
4257 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4258 pci_write_bar(child, pci_find_bar(child, rid),
4259 rman_get_start(r) | PCIM_BIOS_ENABLE);
4261 case SYS_RES_IOPORT:
4262 case SYS_RES_MEMORY:
4263 error = PCI_ENABLE_IO(dev, child, type);
4271 pci_deactivate_resource(device_t dev, device_t child, int type,
4272 int rid, struct resource *r)
4274 struct pci_devinfo *dinfo;
4277 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4281 /* Disable decoding for device ROMs. */
4282 if (device_get_parent(child) == dev) {
4283 dinfo = device_get_ivars(child);
4284 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4285 pci_write_bar(child, pci_find_bar(child, rid),
4292 pci_delete_child(device_t dev, device_t child)
4294 struct resource_list_entry *rle;
4295 struct resource_list *rl;
4296 struct pci_devinfo *dinfo;
4298 dinfo = device_get_ivars(child);
4299 rl = &dinfo->resources;
4301 if (device_is_attached(child))
4302 device_detach(child);
4304 /* Turn off access to resources we're about to free */
4305 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4306 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4308 /* Free all allocated resources */
4309 STAILQ_FOREACH(rle, rl, link) {
4311 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4312 resource_list_busy(rl, rle->type, rle->rid)) {
4313 pci_printf(&dinfo->cfg,
4314 "Resource still owned, oops. "
4315 "(type=%d, rid=%d, addr=%lx)\n",
4316 rle->type, rle->rid,
4317 rman_get_start(rle->res));
4318 bus_release_resource(child, rle->type, rle->rid,
4321 resource_list_unreserve(rl, dev, child, rle->type,
4325 resource_list_free(rl);
4327 device_delete_child(dev, child);
4332 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4334 struct pci_devinfo *dinfo;
4335 struct resource_list *rl;
4336 struct resource_list_entry *rle;
4338 if (device_get_parent(child) != dev)
4341 dinfo = device_get_ivars(child);
4342 rl = &dinfo->resources;
4343 rle = resource_list_find(rl, type, rid);
4348 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4349 resource_list_busy(rl, type, rid)) {
4350 device_printf(dev, "delete_resource: "
4351 "Resource still owned by child, oops. "
4352 "(type=%d, rid=%d, addr=%lx)\n",
4353 type, rid, rman_get_start(rle->res));
4356 resource_list_unreserve(rl, dev, child, type, rid);
4358 resource_list_delete(rl, type, rid);
4361 struct resource_list *
4362 pci_get_resource_list (device_t dev, device_t child)
4364 struct pci_devinfo *dinfo = device_get_ivars(child);
4366 return (&dinfo->resources);
4370 pci_get_dma_tag(device_t bus, device_t dev)
4372 struct pci_softc *sc = device_get_softc(bus);
4374 return (sc->sc_dma_tag);
4378 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4380 struct pci_devinfo *dinfo = device_get_ivars(child);
4381 pcicfgregs *cfg = &dinfo->cfg;
4383 return (PCIB_READ_CONFIG(device_get_parent(dev),
4384 cfg->bus, cfg->slot, cfg->func, reg, width));
4388 pci_write_config_method(device_t dev, device_t child, int reg,
4389 uint32_t val, int width)
4391 struct pci_devinfo *dinfo = device_get_ivars(child);
4392 pcicfgregs *cfg = &dinfo->cfg;
4394 PCIB_WRITE_CONFIG(device_get_parent(dev),
4395 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4399 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4403 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4404 pci_get_function(child));
4409 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4412 struct pci_devinfo *dinfo;
4415 dinfo = device_get_ivars(child);
4417 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4418 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4419 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4425 pci_assign_interrupt_method(device_t dev, device_t child)
4427 struct pci_devinfo *dinfo = device_get_ivars(child);
4428 pcicfgregs *cfg = &dinfo->cfg;
4430 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4435 pci_modevent(module_t mod, int what, void *arg)
4437 static struct cdev *pci_cdev;
4441 STAILQ_INIT(&pci_devq);
4443 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4445 pci_load_vendor_data();
4449 destroy_dev(pci_cdev);
4457 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
4461 * Only do header type 0 devices. Type 1 devices are bridges,
4462 * which we know need special treatment. Type 2 devices are
4463 * cardbus bridges which also require special treatment.
4464 * Other types are unknown, and we err on the side of safety
4467 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
4471 * Restore the device to full power mode. We must do this
4472 * before we restore the registers because moving from D3 to
4473 * D0 will cause the chip's BARs and some other registers to
4474 * be reset to some unknown power on reset values. Cut down
4475 * the noise on boot by doing nothing if we are already in
4478 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
4479 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4480 pci_restore_bars(dev);
4481 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
4482 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
4483 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
4484 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
4485 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
4486 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
4487 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
4488 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
4489 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
4491 /* Restore MSI and MSI-X configurations if they are present. */
4492 if (dinfo->cfg.msi.msi_location != 0)
4493 pci_resume_msi(dev);
4494 if (dinfo->cfg.msix.msix_location != 0)
4495 pci_resume_msix(dev);
4499 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
4505 * Only do header type 0 devices. Type 1 devices are bridges, which
4506 * we know need special treatment. Type 2 devices are cardbus bridges
4507 * which also require special treatment. Other types are unknown, and
4508 * we err on the side of safety by ignoring them. Powering down
4509 * bridges should not be undertaken lightly.
4511 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
4515 * Some drivers apparently write to these registers w/o updating our
4516 * cached copy. No harm happens if we update the copy, so do so here
4517 * so we can restore them. The COMMAND register is modified by the
4518 * bus w/o updating the cache. This should represent the normally
4519 * writable portion of the 'defined' part of type 0 headers. In
4520 * theory we also need to save/restore the PCI capability structures
4521 * we know about, but apart from power we don't know any that are
4524 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
4525 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4526 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
4527 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
4528 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
4529 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
4530 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
4531 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
4532 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
4533 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
4534 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
4535 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
4536 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
4537 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
4538 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4541 * don't set the state for display devices, base peripherals and
4542 * memory devices since bad things happen when they are powered down.
4543 * We should (a) have drivers that can easily detach and (b) use
4544 * generic drivers for these devices so that some device actually
4545 * attaches. We need to make sure that when we implement (a) we don't
4546 * power the device down on a reattach.
4548 cls = pci_get_class(dev);
4551 switch (pci_do_power_nodriver)
4553 case 0: /* NO powerdown at all */
4555 case 1: /* Conservative about what to power down */
4556 if (cls == PCIC_STORAGE)
4559 case 2: /* Agressive about what to power down */
4560 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4561 cls == PCIC_BASEPERIPH)
4564 case 3: /* Power down everything */
4568 * PCI spec says we can only go into D3 state from D0 state.
4569 * Transition from D[12] into D0 before going to D3 state.
4571 ps = pci_get_powerstate(dev);
4572 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4573 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4574 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4575 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
4578 /* Wrapper APIs suitable for device driver use. */
4580 pci_save_state(device_t dev)
4582 struct pci_devinfo *dinfo;
4584 dinfo = device_get_ivars(dev);
4585 pci_cfg_save(dev, dinfo, 0);
4589 pci_restore_state(device_t dev)
4591 struct pci_devinfo *dinfo;
4593 dinfo = device_get_ivars(dev);
4594 pci_cfg_restore(dev, dinfo);