2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/xhcireg.h>
66 #include <dev/usb/controller/ehcireg.h>
67 #include <dev/usb/controller/ohcireg.h>
68 #include <dev/usb/controller/uhcireg.h>
73 #define PCIR_IS_BIOS(cfg, reg) \
74 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
75 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
77 static int pci_has_quirk(uint32_t devid, int quirk);
78 static pci_addr_t pci_mapbase(uint64_t mapreg);
79 static const char *pci_maptype(uint64_t mapreg);
80 static int pci_mapsize(uint64_t testval);
81 static int pci_maprange(uint64_t mapreg);
82 static pci_addr_t pci_rombase(uint64_t mapreg);
83 static int pci_romsize(uint64_t testval);
84 static void pci_fixancient(pcicfgregs *cfg);
85 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
87 static int pci_porten(device_t dev);
88 static int pci_memen(device_t dev);
89 static void pci_assign_interrupt(device_t bus, device_t dev,
91 static int pci_add_map(device_t bus, device_t dev, int reg,
92 struct resource_list *rl, int force, int prefetch);
93 static int pci_probe(device_t dev);
94 static int pci_attach(device_t dev);
95 static void pci_load_vendor_data(void);
96 static int pci_describe_parse_line(char **ptr, int *vendor,
97 int *device, char **desc);
98 static char *pci_describe_device(device_t dev);
99 static bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev);
100 static int pci_modevent(module_t mod, int what, void *arg);
101 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
103 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
104 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
105 int reg, uint32_t *data);
107 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
108 int reg, uint32_t data);
110 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
111 static void pci_disable_msi(device_t dev);
112 static void pci_enable_msi(device_t dev, uint64_t address,
114 static void pci_enable_msix(device_t dev, u_int index,
115 uint64_t address, uint32_t data);
116 static void pci_mask_msix(device_t dev, u_int index);
117 static void pci_unmask_msix(device_t dev, u_int index);
118 static int pci_msi_blacklisted(void);
119 static int pci_msix_blacklisted(void);
120 static void pci_resume_msi(device_t dev);
121 static void pci_resume_msix(device_t dev);
122 static int pci_remap_intr_method(device_t bus, device_t dev,
125 static device_method_t pci_methods[] = {
126 /* Device interface */
127 DEVMETHOD(device_probe, pci_probe),
128 DEVMETHOD(device_attach, pci_attach),
129 DEVMETHOD(device_detach, bus_generic_detach),
130 DEVMETHOD(device_shutdown, bus_generic_shutdown),
131 DEVMETHOD(device_suspend, pci_suspend),
132 DEVMETHOD(device_resume, pci_resume),
135 DEVMETHOD(bus_print_child, pci_print_child),
136 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
137 DEVMETHOD(bus_read_ivar, pci_read_ivar),
138 DEVMETHOD(bus_write_ivar, pci_write_ivar),
139 DEVMETHOD(bus_driver_added, pci_driver_added),
140 DEVMETHOD(bus_setup_intr, pci_setup_intr),
141 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
143 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
144 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
145 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
146 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
147 DEVMETHOD(bus_delete_resource, pci_delete_resource),
148 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
149 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
150 DEVMETHOD(bus_release_resource, pci_release_resource),
151 DEVMETHOD(bus_activate_resource, pci_activate_resource),
152 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
153 DEVMETHOD(bus_child_detached, pci_child_detached),
154 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
155 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
156 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
159 DEVMETHOD(pci_read_config, pci_read_config_method),
160 DEVMETHOD(pci_write_config, pci_write_config_method),
161 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
162 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
163 DEVMETHOD(pci_enable_io, pci_enable_io_method),
164 DEVMETHOD(pci_disable_io, pci_disable_io_method),
165 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
166 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
167 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
168 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
169 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
170 DEVMETHOD(pci_find_cap, pci_find_cap_method),
171 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
172 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
173 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
174 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
175 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
176 DEVMETHOD(pci_release_msi, pci_release_msi_method),
177 DEVMETHOD(pci_msi_count, pci_msi_count_method),
178 DEVMETHOD(pci_msix_count, pci_msix_count_method),
183 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
185 static devclass_t pci_devclass;
186 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
187 MODULE_VERSION(pci, 1);
189 static char *pci_vendordata;
190 static size_t pci_vendordata_size;
193 uint32_t devid; /* Vendor/device of the card */
195 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
196 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
197 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
198 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
199 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
204 static const struct pci_quirk pci_quirks[] = {
205 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
206 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
207 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
208 /* As does the Serverworks OSB4 (the SMBus mapping register) */
209 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
212 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
213 * or the CMIC-SL (AKA ServerWorks GC_LE).
215 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
216 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
219 * MSI doesn't work on earlier Intel chipsets including
220 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
222 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
223 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
224 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
225 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
226 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
227 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
228 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
231 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
234 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
237 * MSI-X allocation doesn't work properly for devices passed through
238 * by VMware up to at least ESXi 5.1.
240 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
241 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
244 * Some virtualization environments emulate an older chipset
245 * but support MSI just fine. QEMU uses the Intel 82440.
247 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
250 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
251 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
252 * It prevents us from attaching hpet(4) when the bit is unset.
253 * Note this quirk only affects SB600 revision A13 and earlier.
254 * For SB600 A21 and later, firmware must set the bit to hide it.
255 * For SB700 and later, it is unused and hardcoded to zero.
257 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
262 /* map register information */
263 #define PCI_MAPMEM 0x01 /* memory map */
264 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
265 #define PCI_MAPPORT 0x04 /* port map */
267 struct devlist pci_devq;
268 uint32_t pci_generation;
269 uint32_t pci_numdevs = 0;
270 static int pcie_chipset, pcix_chipset;
273 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
275 static int pci_enable_io_modes = 1;
276 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
277 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
278 &pci_enable_io_modes, 1,
279 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
280 enable these bits correctly. We'd like to do this all the time, but there\n\
281 are some peripherals that this causes problems with.");
283 static int pci_do_realloc_bars = 0;
284 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
285 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
286 &pci_do_realloc_bars, 0,
287 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
289 static int pci_do_power_nodriver = 0;
290 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
291 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
292 &pci_do_power_nodriver, 0,
293 "Place a function into D3 state when no driver attaches to it. 0 means\n\
294 disable. 1 means conservatively place devices into D3 state. 2 means\n\
295 agressively place devices into D3 state. 3 means put absolutely everything\n\
298 int pci_do_power_resume = 1;
299 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
300 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
301 &pci_do_power_resume, 1,
302 "Transition from D3 -> D0 on resume.");
304 int pci_do_power_suspend = 1;
305 TUNABLE_INT("hw.pci.do_power_suspend", &pci_do_power_suspend);
306 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RW,
307 &pci_do_power_suspend, 1,
308 "Transition from D0 -> D3 on suspend.");
310 static int pci_do_msi = 1;
311 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
312 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
313 "Enable support for MSI interrupts");
315 static int pci_do_msix = 1;
316 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
317 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
318 "Enable support for MSI-X interrupts");
320 static int pci_honor_msi_blacklist = 1;
321 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
322 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
323 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
325 #if defined(__i386__) || defined(__amd64__)
326 static int pci_usb_takeover = 1;
328 static int pci_usb_takeover = 0;
330 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
331 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
332 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
333 Disable this if you depend on BIOS emulation of USB devices, that is\n\
334 you use USB devices (like keyboard or mouse) but do not load USB drivers");
337 pci_has_quirk(uint32_t devid, int quirk)
339 const struct pci_quirk *q;
341 for (q = &pci_quirks[0]; q->devid; q++) {
342 if (q->devid == devid && q->type == quirk)
348 /* Find a device_t by bus/slot/function in domain 0 */
351 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
354 return (pci_find_dbsf(0, bus, slot, func));
357 /* Find a device_t by domain/bus/slot/function */
360 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
362 struct pci_devinfo *dinfo;
364 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
365 if ((dinfo->cfg.domain == domain) &&
366 (dinfo->cfg.bus == bus) &&
367 (dinfo->cfg.slot == slot) &&
368 (dinfo->cfg.func == func)) {
369 return (dinfo->cfg.dev);
376 /* Find a device_t by vendor/device ID */
379 pci_find_device(uint16_t vendor, uint16_t device)
381 struct pci_devinfo *dinfo;
383 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
384 if ((dinfo->cfg.vendor == vendor) &&
385 (dinfo->cfg.device == device)) {
386 return (dinfo->cfg.dev);
394 pci_find_class(uint8_t class, uint8_t subclass)
396 struct pci_devinfo *dinfo;
398 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
399 if (dinfo->cfg.baseclass == class &&
400 dinfo->cfg.subclass == subclass) {
401 return (dinfo->cfg.dev);
409 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
414 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
417 retval += vprintf(fmt, ap);
422 /* return base address of memory or port map */
425 pci_mapbase(uint64_t mapreg)
428 if (PCI_BAR_MEM(mapreg))
429 return (mapreg & PCIM_BAR_MEM_BASE);
431 return (mapreg & PCIM_BAR_IO_BASE);
434 /* return map type of memory or port map */
437 pci_maptype(uint64_t mapreg)
440 if (PCI_BAR_IO(mapreg))
442 if (mapreg & PCIM_BAR_MEM_PREFETCH)
443 return ("Prefetchable Memory");
447 /* return log2 of map size decoded for memory or port map */
450 pci_mapsize(uint64_t testval)
454 testval = pci_mapbase(testval);
457 while ((testval & 1) == 0)
466 /* return base address of device ROM */
469 pci_rombase(uint64_t mapreg)
472 return (mapreg & PCIM_BIOS_ADDR_MASK);
475 /* return log2 of map size decided for device ROM */
478 pci_romsize(uint64_t testval)
482 testval = pci_rombase(testval);
485 while ((testval & 1) == 0)
494 /* return log2 of address range supported by map register */
497 pci_maprange(uint64_t mapreg)
501 if (PCI_BAR_IO(mapreg))
504 switch (mapreg & PCIM_BAR_MEM_TYPE) {
505 case PCIM_BAR_MEM_32:
508 case PCIM_BAR_MEM_1MB:
511 case PCIM_BAR_MEM_64:
518 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
521 pci_fixancient(pcicfgregs *cfg)
523 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
526 /* PCI to PCI bridges use header type 1 */
527 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
528 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
531 /* extract header type specific config data */
534 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
536 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
537 switch (cfg->hdrtype & PCIM_HDRTYPE) {
538 case PCIM_HDRTYPE_NORMAL:
539 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
540 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
541 cfg->nummaps = PCI_MAXMAPS_0;
543 case PCIM_HDRTYPE_BRIDGE:
544 cfg->nummaps = PCI_MAXMAPS_1;
546 case PCIM_HDRTYPE_CARDBUS:
547 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
548 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
549 cfg->nummaps = PCI_MAXMAPS_2;
555 /* read configuration header into pcicfgregs structure */
557 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
559 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
560 pcicfgregs *cfg = NULL;
561 struct pci_devinfo *devlist_entry;
562 struct devlist *devlist_head;
564 devlist_head = &pci_devq;
566 devlist_entry = NULL;
568 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
569 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
570 if (devlist_entry == NULL)
573 cfg = &devlist_entry->cfg;
579 cfg->vendor = REG(PCIR_VENDOR, 2);
580 cfg->device = REG(PCIR_DEVICE, 2);
581 cfg->cmdreg = REG(PCIR_COMMAND, 2);
582 cfg->statreg = REG(PCIR_STATUS, 2);
583 cfg->baseclass = REG(PCIR_CLASS, 1);
584 cfg->subclass = REG(PCIR_SUBCLASS, 1);
585 cfg->progif = REG(PCIR_PROGIF, 1);
586 cfg->revid = REG(PCIR_REVID, 1);
587 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
588 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
589 cfg->lattimer = REG(PCIR_LATTIMER, 1);
590 cfg->intpin = REG(PCIR_INTPIN, 1);
591 cfg->intline = REG(PCIR_INTLINE, 1);
593 cfg->mingnt = REG(PCIR_MINGNT, 1);
594 cfg->maxlat = REG(PCIR_MAXLAT, 1);
596 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
597 cfg->hdrtype &= ~PCIM_MFDEV;
598 STAILQ_INIT(&cfg->maps);
601 pci_hdrtypedata(pcib, b, s, f, cfg);
603 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
604 pci_read_cap(pcib, cfg);
606 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
608 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
609 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
610 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
611 devlist_entry->conf.pc_sel.pc_func = cfg->func;
612 devlist_entry->conf.pc_hdr = cfg->hdrtype;
614 devlist_entry->conf.pc_subvendor = cfg->subvendor;
615 devlist_entry->conf.pc_subdevice = cfg->subdevice;
616 devlist_entry->conf.pc_vendor = cfg->vendor;
617 devlist_entry->conf.pc_device = cfg->device;
619 devlist_entry->conf.pc_class = cfg->baseclass;
620 devlist_entry->conf.pc_subclass = cfg->subclass;
621 devlist_entry->conf.pc_progif = cfg->progif;
622 devlist_entry->conf.pc_revid = cfg->revid;
627 return (devlist_entry);
632 pci_read_cap(device_t pcib, pcicfgregs *cfg)
634 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
635 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
636 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
640 int ptr, nextptr, ptrptr;
642 switch (cfg->hdrtype & PCIM_HDRTYPE) {
643 case PCIM_HDRTYPE_NORMAL:
644 case PCIM_HDRTYPE_BRIDGE:
645 ptrptr = PCIR_CAP_PTR;
647 case PCIM_HDRTYPE_CARDBUS:
648 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
651 return; /* no extended capabilities support */
653 nextptr = REG(ptrptr, 1); /* sanity check? */
656 * Read capability entries.
658 while (nextptr != 0) {
661 printf("illegal PCI extended capability offset %d\n",
665 /* Find the next entry */
667 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
669 /* Process this entry */
670 switch (REG(ptr + PCICAP_ID, 1)) {
671 case PCIY_PMG: /* PCI power management */
672 if (cfg->pp.pp_cap == 0) {
673 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
674 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
675 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
676 if ((nextptr - ptr) > PCIR_POWER_DATA)
677 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
680 case PCIY_HT: /* HyperTransport */
681 /* Determine HT-specific capability type. */
682 val = REG(ptr + PCIR_HT_COMMAND, 2);
684 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
685 cfg->ht.ht_slave = ptr;
687 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
688 switch (val & PCIM_HTCMD_CAP_MASK) {
689 case PCIM_HTCAP_MSI_MAPPING:
690 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
691 /* Sanity check the mapping window. */
692 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
695 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
697 if (addr != MSI_INTEL_ADDR_BASE)
699 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
700 cfg->domain, cfg->bus,
701 cfg->slot, cfg->func,
704 addr = MSI_INTEL_ADDR_BASE;
706 cfg->ht.ht_msimap = ptr;
707 cfg->ht.ht_msictrl = val;
708 cfg->ht.ht_msiaddr = addr;
713 case PCIY_MSI: /* PCI MSI */
714 cfg->msi.msi_location = ptr;
715 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
716 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
717 PCIM_MSICTRL_MMC_MASK)>>1);
719 case PCIY_MSIX: /* PCI MSI-X */
720 cfg->msix.msix_location = ptr;
721 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
722 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
723 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
724 val = REG(ptr + PCIR_MSIX_TABLE, 4);
725 cfg->msix.msix_table_bar = PCIR_BAR(val &
727 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
728 val = REG(ptr + PCIR_MSIX_PBA, 4);
729 cfg->msix.msix_pba_bar = PCIR_BAR(val &
731 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
733 case PCIY_VPD: /* PCI Vital Product Data */
734 cfg->vpd.vpd_reg = ptr;
737 /* Should always be true. */
738 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
739 PCIM_HDRTYPE_BRIDGE) {
740 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
741 cfg->subvendor = val & 0xffff;
742 cfg->subdevice = val >> 16;
745 case PCIY_PCIX: /* PCI-X */
747 * Assume we have a PCI-X chipset if we have
748 * at least one PCI-PCI bridge with a PCI-X
749 * capability. Note that some systems with
750 * PCI-express or HT chipsets might match on
751 * this check as well.
753 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
756 cfg->pcix.pcix_location = ptr;
758 case PCIY_EXPRESS: /* PCI-express */
760 * Assume we have a PCI-express chipset if we have
761 * at least one PCI-express device.
764 cfg->pcie.pcie_location = ptr;
765 val = REG(ptr + PCIER_FLAGS, 2);
766 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
773 #if defined(__powerpc__)
775 * Enable the MSI mapping window for all HyperTransport
776 * slaves. PCI-PCI bridges have their windows enabled via
779 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
780 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
782 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
783 cfg->domain, cfg->bus, cfg->slot, cfg->func);
784 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
785 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
789 /* REG and WREG use carry through to next functions */
793 * PCI Vital Product Data
796 #define PCI_VPD_TIMEOUT 1000000
799 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
801 int count = PCI_VPD_TIMEOUT;
803 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
805 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
807 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
810 DELAY(1); /* limit looping */
812 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
819 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
821 int count = PCI_VPD_TIMEOUT;
823 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
825 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
826 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
827 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
830 DELAY(1); /* limit looping */
837 #undef PCI_VPD_TIMEOUT
839 struct vpd_readstate {
849 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
854 if (vrs->bytesinval == 0) {
855 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
857 vrs->val = le32toh(reg);
859 byte = vrs->val & 0xff;
862 vrs->val = vrs->val >> 8;
863 byte = vrs->val & 0xff;
873 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
875 struct vpd_readstate vrs;
880 int alloc, off; /* alloc/off for RO/W arrays */
886 /* init vpd reader */
894 name = remain = i = 0; /* shut up stupid gcc */
895 alloc = off = 0; /* shut up stupid gcc */
896 dflen = 0; /* shut up stupid gcc */
899 if (vpd_nextbyte(&vrs, &byte)) {
904 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
905 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
906 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
909 case 0: /* item name */
911 if (vpd_nextbyte(&vrs, &byte2)) {
916 if (vpd_nextbyte(&vrs, &byte2)) {
920 remain |= byte2 << 8;
921 if (remain > (0x7f*4 - vrs.off)) {
924 "invalid VPD data, remain %#x\n",
930 name = (byte >> 3) & 0xf;
933 case 0x2: /* String */
934 cfg->vpd.vpd_ident = malloc(remain + 1,
942 case 0x10: /* VPD-R */
945 cfg->vpd.vpd_ros = malloc(alloc *
946 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
950 case 0x11: /* VPD-W */
953 cfg->vpd.vpd_w = malloc(alloc *
954 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
958 default: /* Invalid data, abort */
964 case 1: /* Identifier String */
965 cfg->vpd.vpd_ident[i++] = byte;
968 cfg->vpd.vpd_ident[i] = '\0';
973 case 2: /* VPD-R Keyword Header */
975 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
976 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
977 M_DEVBUF, M_WAITOK | M_ZERO);
979 cfg->vpd.vpd_ros[off].keyword[0] = byte;
980 if (vpd_nextbyte(&vrs, &byte2)) {
984 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
985 if (vpd_nextbyte(&vrs, &byte2)) {
991 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
994 * if this happens, we can't trust the rest
997 pci_printf(cfg, "bad keyword length: %d\n",
1002 } else if (dflen == 0) {
1003 cfg->vpd.vpd_ros[off].value = malloc(1 *
1004 sizeof(*cfg->vpd.vpd_ros[off].value),
1005 M_DEVBUF, M_WAITOK);
1006 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1008 cfg->vpd.vpd_ros[off].value = malloc(
1010 sizeof(*cfg->vpd.vpd_ros[off].value),
1011 M_DEVBUF, M_WAITOK);
1014 /* keep in sync w/ state 3's transistions */
1015 if (dflen == 0 && remain == 0)
1017 else if (dflen == 0)
1023 case 3: /* VPD-R Keyword Value */
1024 cfg->vpd.vpd_ros[off].value[i++] = byte;
1025 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1026 "RV", 2) == 0 && cksumvalid == -1) {
1032 "bad VPD cksum, remain %hhu\n",
1041 /* keep in sync w/ state 2's transistions */
1043 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1044 if (dflen == 0 && remain == 0) {
1045 cfg->vpd.vpd_rocnt = off;
1046 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1047 off * sizeof(*cfg->vpd.vpd_ros),
1048 M_DEVBUF, M_WAITOK | M_ZERO);
1050 } else if (dflen == 0)
1060 case 5: /* VPD-W Keyword Header */
1062 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1063 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1064 M_DEVBUF, M_WAITOK | M_ZERO);
1066 cfg->vpd.vpd_w[off].keyword[0] = byte;
1067 if (vpd_nextbyte(&vrs, &byte2)) {
1071 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1072 if (vpd_nextbyte(&vrs, &byte2)) {
1076 cfg->vpd.vpd_w[off].len = dflen = byte2;
1077 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1078 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1079 sizeof(*cfg->vpd.vpd_w[off].value),
1080 M_DEVBUF, M_WAITOK);
1083 /* keep in sync w/ state 6's transistions */
1084 if (dflen == 0 && remain == 0)
1086 else if (dflen == 0)
1092 case 6: /* VPD-W Keyword Value */
1093 cfg->vpd.vpd_w[off].value[i++] = byte;
1096 /* keep in sync w/ state 5's transistions */
1098 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1099 if (dflen == 0 && remain == 0) {
1100 cfg->vpd.vpd_wcnt = off;
1101 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1102 off * sizeof(*cfg->vpd.vpd_w),
1103 M_DEVBUF, M_WAITOK | M_ZERO);
1105 } else if (dflen == 0)
1110 pci_printf(cfg, "invalid state: %d\n", state);
1116 if (cksumvalid == 0 || state < -1) {
1117 /* read-only data bad, clean up */
1118 if (cfg->vpd.vpd_ros != NULL) {
1119 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1120 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1121 free(cfg->vpd.vpd_ros, M_DEVBUF);
1122 cfg->vpd.vpd_ros = NULL;
1126 /* I/O error, clean up */
1127 pci_printf(cfg, "failed to read VPD data.\n");
1128 if (cfg->vpd.vpd_ident != NULL) {
1129 free(cfg->vpd.vpd_ident, M_DEVBUF);
1130 cfg->vpd.vpd_ident = NULL;
1132 if (cfg->vpd.vpd_w != NULL) {
1133 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1134 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1135 free(cfg->vpd.vpd_w, M_DEVBUF);
1136 cfg->vpd.vpd_w = NULL;
1139 cfg->vpd.vpd_cached = 1;
1145 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1147 struct pci_devinfo *dinfo = device_get_ivars(child);
1148 pcicfgregs *cfg = &dinfo->cfg;
1150 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1151 pci_read_vpd(device_get_parent(dev), cfg);
1153 *identptr = cfg->vpd.vpd_ident;
1155 if (*identptr == NULL)
1162 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1165 struct pci_devinfo *dinfo = device_get_ivars(child);
1166 pcicfgregs *cfg = &dinfo->cfg;
1169 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1170 pci_read_vpd(device_get_parent(dev), cfg);
1172 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1173 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1174 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1175 *vptr = cfg->vpd.vpd_ros[i].value;
1184 * Find the requested HyperTransport capability and return the offset
1185 * in configuration space via the pointer provided. The function
1186 * returns 0 on success and an error code otherwise.
1189 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1194 error = pci_find_cap(child, PCIY_HT, &ptr);
1199 * Traverse the capabilities list checking each HT capability
1200 * to see if it matches the requested HT capability.
1203 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1204 if (capability == PCIM_HTCAP_SLAVE ||
1205 capability == PCIM_HTCAP_HOST)
1208 val &= PCIM_HTCMD_CAP_MASK;
1209 if (val == capability) {
1215 /* Skip to the next HT capability. */
1217 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1218 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1227 * Find the requested capability and return the offset in
1228 * configuration space via the pointer provided. The function returns
1229 * 0 on success and an error code otherwise.
1232 pci_find_cap_method(device_t dev, device_t child, int capability,
1235 struct pci_devinfo *dinfo = device_get_ivars(child);
1236 pcicfgregs *cfg = &dinfo->cfg;
1241 * Check the CAP_LIST bit of the PCI status register first.
1243 status = pci_read_config(child, PCIR_STATUS, 2);
1244 if (!(status & PCIM_STATUS_CAPPRESENT))
1248 * Determine the start pointer of the capabilities list.
1250 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1251 case PCIM_HDRTYPE_NORMAL:
1252 case PCIM_HDRTYPE_BRIDGE:
1255 case PCIM_HDRTYPE_CARDBUS:
1256 ptr = PCIR_CAP_PTR_2;
1260 return (ENXIO); /* no extended capabilities support */
1262 ptr = pci_read_config(child, ptr, 1);
1265 * Traverse the capabilities list.
1268 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1273 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1280 * Find the requested extended capability and return the offset in
1281 * configuration space via the pointer provided. The function returns
1282 * 0 on success and an error code otherwise.
1285 pci_find_extcap_method(device_t dev, device_t child, int capability,
1288 struct pci_devinfo *dinfo = device_get_ivars(child);
1289 pcicfgregs *cfg = &dinfo->cfg;
1293 /* Only supported for PCI-express devices. */
1294 if (cfg->pcie.pcie_location == 0)
1298 ecap = pci_read_config(child, ptr, 4);
1299 if (ecap == 0xffffffff || ecap == 0)
1302 if (PCI_EXTCAP_ID(ecap) == capability) {
1307 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1310 ecap = pci_read_config(child, ptr, 4);
1317 * Support for MSI-X message interrupts.
1320 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1322 struct pci_devinfo *dinfo = device_get_ivars(dev);
1323 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1326 KASSERT(msix->msix_table_len > index, ("bogus index"));
1327 offset = msix->msix_table_offset + index * 16;
1328 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1329 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1330 bus_write_4(msix->msix_table_res, offset + 8, data);
1332 /* Enable MSI -> HT mapping. */
1333 pci_ht_map_msi(dev, address);
1337 pci_mask_msix(device_t dev, u_int index)
1339 struct pci_devinfo *dinfo = device_get_ivars(dev);
1340 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1341 uint32_t offset, val;
1343 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1344 offset = msix->msix_table_offset + index * 16 + 12;
1345 val = bus_read_4(msix->msix_table_res, offset);
1346 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1347 val |= PCIM_MSIX_VCTRL_MASK;
1348 bus_write_4(msix->msix_table_res, offset, val);
1353 pci_unmask_msix(device_t dev, u_int index)
1355 struct pci_devinfo *dinfo = device_get_ivars(dev);
1356 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1357 uint32_t offset, val;
1359 KASSERT(msix->msix_table_len > index, ("bogus index"));
1360 offset = msix->msix_table_offset + index * 16 + 12;
1361 val = bus_read_4(msix->msix_table_res, offset);
1362 if (val & PCIM_MSIX_VCTRL_MASK) {
1363 val &= ~PCIM_MSIX_VCTRL_MASK;
1364 bus_write_4(msix->msix_table_res, offset, val);
1369 pci_pending_msix(device_t dev, u_int index)
1371 struct pci_devinfo *dinfo = device_get_ivars(dev);
1372 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1373 uint32_t offset, bit;
1375 KASSERT(msix->msix_table_len > index, ("bogus index"));
1376 offset = msix->msix_pba_offset + (index / 32) * 4;
1377 bit = 1 << index % 32;
1378 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1382 * Restore MSI-X registers and table during resume. If MSI-X is
1383 * enabled then walk the virtual table to restore the actual MSI-X
1387 pci_resume_msix(device_t dev)
1389 struct pci_devinfo *dinfo = device_get_ivars(dev);
1390 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1391 struct msix_table_entry *mte;
1392 struct msix_vector *mv;
1395 if (msix->msix_alloc > 0) {
1396 /* First, mask all vectors. */
1397 for (i = 0; i < msix->msix_msgnum; i++)
1398 pci_mask_msix(dev, i);
1400 /* Second, program any messages with at least one handler. */
1401 for (i = 0; i < msix->msix_table_len; i++) {
1402 mte = &msix->msix_table[i];
1403 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1405 mv = &msix->msix_vectors[mte->mte_vector - 1];
1406 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1407 pci_unmask_msix(dev, i);
1410 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1411 msix->msix_ctrl, 2);
1415 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1416 * returned in *count. After this function returns, each message will be
1417 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1420 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1422 struct pci_devinfo *dinfo = device_get_ivars(child);
1423 pcicfgregs *cfg = &dinfo->cfg;
1424 struct resource_list_entry *rle;
1425 int actual, error, i, irq, max;
1427 /* Don't let count == 0 get us into trouble. */
1431 /* If rid 0 is allocated, then fail. */
1432 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1433 if (rle != NULL && rle->res != NULL)
1436 /* Already have allocated messages? */
1437 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1440 /* If MSI-X is blacklisted for this system, fail. */
1441 if (pci_msix_blacklisted())
1444 /* MSI-X capability present? */
1445 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1448 /* Make sure the appropriate BARs are mapped. */
1449 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1450 cfg->msix.msix_table_bar);
1451 if (rle == NULL || rle->res == NULL ||
1452 !(rman_get_flags(rle->res) & RF_ACTIVE))
1454 cfg->msix.msix_table_res = rle->res;
1455 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1456 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1457 cfg->msix.msix_pba_bar);
1458 if (rle == NULL || rle->res == NULL ||
1459 !(rman_get_flags(rle->res) & RF_ACTIVE))
1462 cfg->msix.msix_pba_res = rle->res;
1465 device_printf(child,
1466 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1467 *count, cfg->msix.msix_msgnum);
1468 max = min(*count, cfg->msix.msix_msgnum);
1469 for (i = 0; i < max; i++) {
1470 /* Allocate a message. */
1471 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1477 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1483 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1485 device_printf(child, "using IRQ %lu for MSI-X\n",
1491 * Be fancy and try to print contiguous runs of
1492 * IRQ values as ranges. 'irq' is the previous IRQ.
1493 * 'run' is true if we are in a range.
1495 device_printf(child, "using IRQs %lu", rle->start);
1498 for (i = 1; i < actual; i++) {
1499 rle = resource_list_find(&dinfo->resources,
1500 SYS_RES_IRQ, i + 1);
1502 /* Still in a run? */
1503 if (rle->start == irq + 1) {
1509 /* Finish previous range. */
1515 /* Start new range. */
1516 printf(",%lu", rle->start);
1520 /* Unfinished range? */
1523 printf(" for MSI-X\n");
1527 /* Mask all vectors. */
1528 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1529 pci_mask_msix(child, i);
1531 /* Allocate and initialize vector data and virtual table. */
1532 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1533 M_DEVBUF, M_WAITOK | M_ZERO);
1534 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1535 M_DEVBUF, M_WAITOK | M_ZERO);
1536 for (i = 0; i < actual; i++) {
1537 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1538 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1539 cfg->msix.msix_table[i].mte_vector = i + 1;
1542 /* Update control register to enable MSI-X. */
1543 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1544 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1545 cfg->msix.msix_ctrl, 2);
1547 /* Update counts of alloc'd messages. */
1548 cfg->msix.msix_alloc = actual;
1549 cfg->msix.msix_table_len = actual;
1555 * By default, pci_alloc_msix() will assign the allocated IRQ
1556 * resources consecutively to the first N messages in the MSI-X table.
1557 * However, device drivers may want to use different layouts if they
1558 * either receive fewer messages than they asked for, or they wish to
1559 * populate the MSI-X table sparsely. This method allows the driver
1560 * to specify what layout it wants. It must be called after a
1561 * successful pci_alloc_msix() but before any of the associated
1562 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1564 * The 'vectors' array contains 'count' message vectors. The array
1565 * maps directly to the MSI-X table in that index 0 in the array
1566 * specifies the vector for the first message in the MSI-X table, etc.
1567 * The vector value in each array index can either be 0 to indicate
1568 * that no vector should be assigned to a message slot, or it can be a
1569 * number from 1 to N (where N is the count returned from a
1570 * succcessful call to pci_alloc_msix()) to indicate which message
1571 * vector (IRQ) to be used for the corresponding message.
1573 * On successful return, each message with a non-zero vector will have
1574 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1575 * 1. Additionally, if any of the IRQs allocated via the previous
1576 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1577 * will be freed back to the system automatically.
1579 * For example, suppose a driver has a MSI-X table with 6 messages and
1580 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1581 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1582 * C. After the call to pci_alloc_msix(), the device will be setup to
1583 * have an MSI-X table of ABC--- (where - means no vector assigned).
1584 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1585 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1586 * be freed back to the system. This device will also have valid
1587 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1589 * In any case, the SYS_RES_IRQ rid X will always map to the message
1590 * at MSI-X table index X - 1 and will only be valid if a vector is
1591 * assigned to that table entry.
1594 pci_remap_msix_method(device_t dev, device_t child, int count,
1595 const u_int *vectors)
1597 struct pci_devinfo *dinfo = device_get_ivars(child);
1598 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1599 struct resource_list_entry *rle;
1600 int i, irq, j, *used;
1603 * Have to have at least one message in the table but the
1604 * table can't be bigger than the actual MSI-X table in the
1607 if (count == 0 || count > msix->msix_msgnum)
1610 /* Sanity check the vectors. */
1611 for (i = 0; i < count; i++)
1612 if (vectors[i] > msix->msix_alloc)
1616 * Make sure there aren't any holes in the vectors to be used.
1617 * It's a big pain to support it, and it doesn't really make
1618 * sense anyway. Also, at least one vector must be used.
1620 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1622 for (i = 0; i < count; i++)
1623 if (vectors[i] != 0)
1624 used[vectors[i] - 1] = 1;
1625 for (i = 0; i < msix->msix_alloc - 1; i++)
1626 if (used[i] == 0 && used[i + 1] == 1) {
1627 free(used, M_DEVBUF);
1631 free(used, M_DEVBUF);
1635 /* Make sure none of the resources are allocated. */
1636 for (i = 0; i < msix->msix_table_len; i++) {
1637 if (msix->msix_table[i].mte_vector == 0)
1639 if (msix->msix_table[i].mte_handlers > 0)
1641 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1642 KASSERT(rle != NULL, ("missing resource"));
1643 if (rle->res != NULL)
1647 /* Free the existing resource list entries. */
1648 for (i = 0; i < msix->msix_table_len; i++) {
1649 if (msix->msix_table[i].mte_vector == 0)
1651 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1655 * Build the new virtual table keeping track of which vectors are
1658 free(msix->msix_table, M_DEVBUF);
1659 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1660 M_DEVBUF, M_WAITOK | M_ZERO);
1661 for (i = 0; i < count; i++)
1662 msix->msix_table[i].mte_vector = vectors[i];
1663 msix->msix_table_len = count;
1665 /* Free any unused IRQs and resize the vectors array if necessary. */
1666 j = msix->msix_alloc - 1;
1668 struct msix_vector *vec;
1670 while (used[j] == 0) {
1671 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1672 msix->msix_vectors[j].mv_irq);
1675 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1677 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1679 free(msix->msix_vectors, M_DEVBUF);
1680 msix->msix_vectors = vec;
1681 msix->msix_alloc = j + 1;
1683 free(used, M_DEVBUF);
1685 /* Map the IRQs onto the rids. */
1686 for (i = 0; i < count; i++) {
1687 if (vectors[i] == 0)
1689 irq = msix->msix_vectors[vectors[i]].mv_irq;
1690 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1695 device_printf(child, "Remapped MSI-X IRQs as: ");
1696 for (i = 0; i < count; i++) {
1699 if (vectors[i] == 0)
1703 msix->msix_vectors[vectors[i]].mv_irq);
1712 pci_release_msix(device_t dev, device_t child)
1714 struct pci_devinfo *dinfo = device_get_ivars(child);
1715 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1716 struct resource_list_entry *rle;
1719 /* Do we have any messages to release? */
1720 if (msix->msix_alloc == 0)
1723 /* Make sure none of the resources are allocated. */
1724 for (i = 0; i < msix->msix_table_len; i++) {
1725 if (msix->msix_table[i].mte_vector == 0)
1727 if (msix->msix_table[i].mte_handlers > 0)
1729 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1730 KASSERT(rle != NULL, ("missing resource"));
1731 if (rle->res != NULL)
1735 /* Update control register to disable MSI-X. */
1736 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1737 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1738 msix->msix_ctrl, 2);
1740 /* Free the resource list entries. */
1741 for (i = 0; i < msix->msix_table_len; i++) {
1742 if (msix->msix_table[i].mte_vector == 0)
1744 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1746 free(msix->msix_table, M_DEVBUF);
1747 msix->msix_table_len = 0;
1749 /* Release the IRQs. */
1750 for (i = 0; i < msix->msix_alloc; i++)
1751 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1752 msix->msix_vectors[i].mv_irq);
1753 free(msix->msix_vectors, M_DEVBUF);
1754 msix->msix_alloc = 0;
1759 * Return the max supported MSI-X messages this device supports.
1760 * Basically, assuming the MD code can alloc messages, this function
1761 * should return the maximum value that pci_alloc_msix() can return.
1762 * Thus, it is subject to the tunables, etc.
1765 pci_msix_count_method(device_t dev, device_t child)
1767 struct pci_devinfo *dinfo = device_get_ivars(child);
1768 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1770 if (pci_do_msix && msix->msix_location != 0)
1771 return (msix->msix_msgnum);
1776 * HyperTransport MSI mapping control
1779 pci_ht_map_msi(device_t dev, uint64_t addr)
1781 struct pci_devinfo *dinfo = device_get_ivars(dev);
1782 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1787 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1788 ht->ht_msiaddr >> 20 == addr >> 20) {
1789 /* Enable MSI -> HT mapping. */
1790 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1791 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1795 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1796 /* Disable MSI -> HT mapping. */
1797 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1798 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1804 pci_get_max_read_req(device_t dev)
1806 struct pci_devinfo *dinfo = device_get_ivars(dev);
1810 cap = dinfo->cfg.pcie.pcie_location;
1813 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1814 val &= PCIEM_CTL_MAX_READ_REQUEST;
1816 return (1 << (val + 7));
1820 pci_set_max_read_req(device_t dev, int size)
1822 struct pci_devinfo *dinfo = device_get_ivars(dev);
1826 cap = dinfo->cfg.pcie.pcie_location;
1833 size = (1 << (fls(size) - 1));
1834 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1835 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1836 val |= (fls(size) - 8) << 12;
1837 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1842 * Support for MSI message signalled interrupts.
1845 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1847 struct pci_devinfo *dinfo = device_get_ivars(dev);
1848 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1850 /* Write data and address values. */
1851 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1852 address & 0xffffffff, 4);
1853 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1854 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1856 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1859 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1862 /* Enable MSI in the control register. */
1863 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1864 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1867 /* Enable MSI -> HT mapping. */
1868 pci_ht_map_msi(dev, address);
1872 pci_disable_msi(device_t dev)
1874 struct pci_devinfo *dinfo = device_get_ivars(dev);
1875 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1877 /* Disable MSI -> HT mapping. */
1878 pci_ht_map_msi(dev, 0);
1880 /* Disable MSI in the control register. */
1881 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1882 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1887 * Restore MSI registers during resume. If MSI is enabled then
1888 * restore the data and address registers in addition to the control
1892 pci_resume_msi(device_t dev)
1894 struct pci_devinfo *dinfo = device_get_ivars(dev);
1895 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1899 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1900 address = msi->msi_addr;
1901 data = msi->msi_data;
1902 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1903 address & 0xffffffff, 4);
1904 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1905 pci_write_config(dev, msi->msi_location +
1906 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1907 pci_write_config(dev, msi->msi_location +
1908 PCIR_MSI_DATA_64BIT, data, 2);
1910 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1913 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1918 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1920 struct pci_devinfo *dinfo = device_get_ivars(dev);
1921 pcicfgregs *cfg = &dinfo->cfg;
1922 struct resource_list_entry *rle;
1923 struct msix_table_entry *mte;
1924 struct msix_vector *mv;
1930 * Handle MSI first. We try to find this IRQ among our list
1931 * of MSI IRQs. If we find it, we request updated address and
1932 * data registers and apply the results.
1934 if (cfg->msi.msi_alloc > 0) {
1936 /* If we don't have any active handlers, nothing to do. */
1937 if (cfg->msi.msi_handlers == 0)
1939 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1940 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1942 if (rle->start == irq) {
1943 error = PCIB_MAP_MSI(device_get_parent(bus),
1944 dev, irq, &addr, &data);
1947 pci_disable_msi(dev);
1948 dinfo->cfg.msi.msi_addr = addr;
1949 dinfo->cfg.msi.msi_data = data;
1950 pci_enable_msi(dev, addr, data);
1958 * For MSI-X, we check to see if we have this IRQ. If we do,
1959 * we request the updated mapping info. If that works, we go
1960 * through all the slots that use this IRQ and update them.
1962 if (cfg->msix.msix_alloc > 0) {
1963 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1964 mv = &cfg->msix.msix_vectors[i];
1965 if (mv->mv_irq == irq) {
1966 error = PCIB_MAP_MSI(device_get_parent(bus),
1967 dev, irq, &addr, &data);
1970 mv->mv_address = addr;
1972 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1973 mte = &cfg->msix.msix_table[j];
1974 if (mte->mte_vector != i + 1)
1976 if (mte->mte_handlers == 0)
1978 pci_mask_msix(dev, j);
1979 pci_enable_msix(dev, j, addr, data);
1980 pci_unmask_msix(dev, j);
1991 * Returns true if the specified device is blacklisted because MSI
1995 pci_msi_device_blacklisted(device_t dev)
1998 if (!pci_honor_msi_blacklist)
2001 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2005 * Determine if MSI is blacklisted globally on this system. Currently,
2006 * we just check for blacklisted chipsets as represented by the
2007 * host-PCI bridge at device 0:0:0. In the future, it may become
2008 * necessary to check other system attributes, such as the kenv values
2009 * that give the motherboard manufacturer and model number.
2012 pci_msi_blacklisted(void)
2016 if (!pci_honor_msi_blacklist)
2019 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2020 if (!(pcie_chipset || pcix_chipset)) {
2021 if (vm_guest != VM_GUEST_NO) {
2023 * Whitelist older chipsets in virtual
2024 * machines known to support MSI.
2026 dev = pci_find_bsf(0, 0, 0);
2028 return (!pci_has_quirk(pci_get_devid(dev),
2029 PCI_QUIRK_ENABLE_MSI_VM));
2034 dev = pci_find_bsf(0, 0, 0);
2036 return (pci_msi_device_blacklisted(dev));
2041 * Returns true if the specified device is blacklisted because MSI-X
2042 * doesn't work. Note that this assumes that if MSI doesn't work,
2043 * MSI-X doesn't either.
2046 pci_msix_device_blacklisted(device_t dev)
2049 if (!pci_honor_msi_blacklist)
2052 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2055 return (pci_msi_device_blacklisted(dev));
2059 * Determine if MSI-X is blacklisted globally on this system. If MSI
2060 * is blacklisted, assume that MSI-X is as well. Check for additional
2061 * chipsets where MSI works but MSI-X does not.
2064 pci_msix_blacklisted(void)
2068 if (!pci_honor_msi_blacklist)
2071 dev = pci_find_bsf(0, 0, 0);
2072 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2073 PCI_QUIRK_DISABLE_MSIX))
2076 return (pci_msi_blacklisted());
2080 * Attempt to allocate *count MSI messages. The actual number allocated is
2081 * returned in *count. After this function returns, each message will be
2082 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2085 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2087 struct pci_devinfo *dinfo = device_get_ivars(child);
2088 pcicfgregs *cfg = &dinfo->cfg;
2089 struct resource_list_entry *rle;
2090 int actual, error, i, irqs[32];
2093 /* Don't let count == 0 get us into trouble. */
2097 /* If rid 0 is allocated, then fail. */
2098 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2099 if (rle != NULL && rle->res != NULL)
2102 /* Already have allocated messages? */
2103 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2106 /* If MSI is blacklisted for this system, fail. */
2107 if (pci_msi_blacklisted())
2110 /* MSI capability present? */
2111 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2115 device_printf(child,
2116 "attempting to allocate %d MSI vectors (%d supported)\n",
2117 *count, cfg->msi.msi_msgnum);
2119 /* Don't ask for more than the device supports. */
2120 actual = min(*count, cfg->msi.msi_msgnum);
2122 /* Don't ask for more than 32 messages. */
2123 actual = min(actual, 32);
2125 /* MSI requires power of 2 number of messages. */
2126 if (!powerof2(actual))
2130 /* Try to allocate N messages. */
2131 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2143 * We now have N actual messages mapped onto SYS_RES_IRQ
2144 * resources in the irqs[] array, so add new resources
2145 * starting at rid 1.
2147 for (i = 0; i < actual; i++)
2148 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2149 irqs[i], irqs[i], 1);
2153 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2158 * Be fancy and try to print contiguous runs
2159 * of IRQ values as ranges. 'run' is true if
2160 * we are in a range.
2162 device_printf(child, "using IRQs %d", irqs[0]);
2164 for (i = 1; i < actual; i++) {
2166 /* Still in a run? */
2167 if (irqs[i] == irqs[i - 1] + 1) {
2172 /* Finish previous range. */
2174 printf("-%d", irqs[i - 1]);
2178 /* Start new range. */
2179 printf(",%d", irqs[i]);
2182 /* Unfinished range? */
2184 printf("-%d", irqs[actual - 1]);
2185 printf(" for MSI\n");
2189 /* Update control register with actual count. */
2190 ctrl = cfg->msi.msi_ctrl;
2191 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2192 ctrl |= (ffs(actual) - 1) << 4;
2193 cfg->msi.msi_ctrl = ctrl;
2194 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2196 /* Update counts of alloc'd messages. */
2197 cfg->msi.msi_alloc = actual;
2198 cfg->msi.msi_handlers = 0;
2203 /* Release the MSI messages associated with this device. */
2205 pci_release_msi_method(device_t dev, device_t child)
2207 struct pci_devinfo *dinfo = device_get_ivars(child);
2208 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2209 struct resource_list_entry *rle;
2210 int error, i, irqs[32];
2212 /* Try MSI-X first. */
2213 error = pci_release_msix(dev, child);
2214 if (error != ENODEV)
2217 /* Do we have any messages to release? */
2218 if (msi->msi_alloc == 0)
2220 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2222 /* Make sure none of the resources are allocated. */
2223 if (msi->msi_handlers > 0)
2225 for (i = 0; i < msi->msi_alloc; i++) {
2226 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2227 KASSERT(rle != NULL, ("missing MSI resource"));
2228 if (rle->res != NULL)
2230 irqs[i] = rle->start;
2233 /* Update control register with 0 count. */
2234 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2235 ("%s: MSI still enabled", __func__));
2236 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2237 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2240 /* Release the messages. */
2241 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2242 for (i = 0; i < msi->msi_alloc; i++)
2243 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2245 /* Update alloc count. */
2253 * Return the max supported MSI messages this device supports.
2254 * Basically, assuming the MD code can alloc messages, this function
2255 * should return the maximum value that pci_alloc_msi() can return.
2256 * Thus, it is subject to the tunables, etc.
2259 pci_msi_count_method(device_t dev, device_t child)
2261 struct pci_devinfo *dinfo = device_get_ivars(child);
2262 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2264 if (pci_do_msi && msi->msi_location != 0)
2265 return (msi->msi_msgnum);
2269 /* free pcicfgregs structure and all depending data structures */
2272 pci_freecfg(struct pci_devinfo *dinfo)
2274 struct devlist *devlist_head;
2275 struct pci_map *pm, *next;
2278 devlist_head = &pci_devq;
2280 if (dinfo->cfg.vpd.vpd_reg) {
2281 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2282 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2283 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2284 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2285 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2286 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2287 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2289 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2292 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2293 free(dinfo, M_DEVBUF);
2295 /* increment the generation count */
2298 /* we're losing one device */
2304 * PCI power manangement
2307 pci_set_powerstate_method(device_t dev, device_t child, int state)
2309 struct pci_devinfo *dinfo = device_get_ivars(child);
2310 pcicfgregs *cfg = &dinfo->cfg;
2312 int result, oldstate, highest, delay;
2314 if (cfg->pp.pp_cap == 0)
2315 return (EOPNOTSUPP);
2318 * Optimize a no state change request away. While it would be OK to
2319 * write to the hardware in theory, some devices have shown odd
2320 * behavior when going from D3 -> D3.
2322 oldstate = pci_get_powerstate(child);
2323 if (oldstate == state)
2327 * The PCI power management specification states that after a state
2328 * transition between PCI power states, system software must
2329 * guarantee a minimal delay before the function accesses the device.
2330 * Compute the worst case delay that we need to guarantee before we
2331 * access the device. Many devices will be responsive much more
2332 * quickly than this delay, but there are some that don't respond
2333 * instantly to state changes. Transitions to/from D3 state require
2334 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2335 * is done below with DELAY rather than a sleeper function because
2336 * this function can be called from contexts where we cannot sleep.
2338 highest = (oldstate > state) ? oldstate : state;
2339 if (highest == PCI_POWERSTATE_D3)
2341 else if (highest == PCI_POWERSTATE_D2)
2345 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2346 & ~PCIM_PSTAT_DMASK;
2349 case PCI_POWERSTATE_D0:
2350 status |= PCIM_PSTAT_D0;
2352 case PCI_POWERSTATE_D1:
2353 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2354 return (EOPNOTSUPP);
2355 status |= PCIM_PSTAT_D1;
2357 case PCI_POWERSTATE_D2:
2358 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2359 return (EOPNOTSUPP);
2360 status |= PCIM_PSTAT_D2;
2362 case PCI_POWERSTATE_D3:
2363 status |= PCIM_PSTAT_D3;
2370 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2373 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2380 pci_get_powerstate_method(device_t dev, device_t child)
2382 struct pci_devinfo *dinfo = device_get_ivars(child);
2383 pcicfgregs *cfg = &dinfo->cfg;
2387 if (cfg->pp.pp_cap != 0) {
2388 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2389 switch (status & PCIM_PSTAT_DMASK) {
2391 result = PCI_POWERSTATE_D0;
2394 result = PCI_POWERSTATE_D1;
2397 result = PCI_POWERSTATE_D2;
2400 result = PCI_POWERSTATE_D3;
2403 result = PCI_POWERSTATE_UNKNOWN;
2407 /* No support, device is always at D0 */
2408 result = PCI_POWERSTATE_D0;
2414 * Some convenience functions for PCI device drivers.
2417 static __inline void
2418 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2422 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2424 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2427 static __inline void
2428 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2432 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2434 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2438 pci_enable_busmaster_method(device_t dev, device_t child)
2440 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2445 pci_disable_busmaster_method(device_t dev, device_t child)
2447 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2452 pci_enable_io_method(device_t dev, device_t child, int space)
2457 case SYS_RES_IOPORT:
2458 bit = PCIM_CMD_PORTEN;
2460 case SYS_RES_MEMORY:
2461 bit = PCIM_CMD_MEMEN;
2466 pci_set_command_bit(dev, child, bit);
2471 pci_disable_io_method(device_t dev, device_t child, int space)
2476 case SYS_RES_IOPORT:
2477 bit = PCIM_CMD_PORTEN;
2479 case SYS_RES_MEMORY:
2480 bit = PCIM_CMD_MEMEN;
2485 pci_clear_command_bit(dev, child, bit);
2490 * New style pci driver. Parent device is either a pci-host-bridge or a
2491 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2495 pci_print_verbose(struct pci_devinfo *dinfo)
2499 pcicfgregs *cfg = &dinfo->cfg;
2501 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2502 cfg->vendor, cfg->device, cfg->revid);
2503 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2504 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2505 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2506 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2508 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2509 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2510 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2511 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2512 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2513 if (cfg->intpin > 0)
2514 printf("\tintpin=%c, irq=%d\n",
2515 cfg->intpin +'a' -1, cfg->intline);
2516 if (cfg->pp.pp_cap) {
2519 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2520 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2521 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2522 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2523 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2524 status & PCIM_PSTAT_DMASK);
2526 if (cfg->msi.msi_location) {
2529 ctrl = cfg->msi.msi_ctrl;
2530 printf("\tMSI supports %d message%s%s%s\n",
2531 cfg->msi.msi_msgnum,
2532 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2533 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2534 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2536 if (cfg->msix.msix_location) {
2537 printf("\tMSI-X supports %d message%s ",
2538 cfg->msix.msix_msgnum,
2539 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2540 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2541 printf("in map 0x%x\n",
2542 cfg->msix.msix_table_bar);
2544 printf("in maps 0x%x and 0x%x\n",
2545 cfg->msix.msix_table_bar,
2546 cfg->msix.msix_pba_bar);
2552 pci_porten(device_t dev)
2554 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2558 pci_memen(device_t dev)
2560 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2564 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2566 struct pci_devinfo *dinfo;
2567 pci_addr_t map, testval;
2572 * The device ROM BAR is special. It is always a 32-bit
2573 * memory BAR. Bit 0 is special and should not be set when
2576 dinfo = device_get_ivars(dev);
2577 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2578 map = pci_read_config(dev, reg, 4);
2579 pci_write_config(dev, reg, 0xfffffffe, 4);
2580 testval = pci_read_config(dev, reg, 4);
2581 pci_write_config(dev, reg, map, 4);
2583 *testvalp = testval;
2587 map = pci_read_config(dev, reg, 4);
2588 ln2range = pci_maprange(map);
2590 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2593 * Disable decoding via the command register before
2594 * determining the BAR's length since we will be placing it in
2597 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2598 pci_write_config(dev, PCIR_COMMAND,
2599 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2602 * Determine the BAR's length by writing all 1's. The bottom
2603 * log_2(size) bits of the BAR will stick as 0 when we read
2606 pci_write_config(dev, reg, 0xffffffff, 4);
2607 testval = pci_read_config(dev, reg, 4);
2608 if (ln2range == 64) {
2609 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2610 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2614 * Restore the original value of the BAR. We may have reprogrammed
2615 * the BAR of the low-level console device and when booting verbose,
2616 * we need the console device addressable.
2618 pci_write_config(dev, reg, map, 4);
2620 pci_write_config(dev, reg + 4, map >> 32, 4);
2621 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2624 *testvalp = testval;
2628 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2630 struct pci_devinfo *dinfo;
2633 /* The device ROM BAR is always a 32-bit memory BAR. */
2634 dinfo = device_get_ivars(dev);
2635 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2638 ln2range = pci_maprange(pm->pm_value);
2639 pci_write_config(dev, pm->pm_reg, base, 4);
2641 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2642 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2644 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2645 pm->pm_reg + 4, 4) << 32;
2649 pci_find_bar(device_t dev, int reg)
2651 struct pci_devinfo *dinfo;
2654 dinfo = device_get_ivars(dev);
2655 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2656 if (pm->pm_reg == reg)
2663 pci_bar_enabled(device_t dev, struct pci_map *pm)
2665 struct pci_devinfo *dinfo;
2668 dinfo = device_get_ivars(dev);
2669 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2670 !(pm->pm_value & PCIM_BIOS_ENABLE))
2672 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2673 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2674 return ((cmd & PCIM_CMD_MEMEN) != 0);
2676 return ((cmd & PCIM_CMD_PORTEN) != 0);
2679 static struct pci_map *
2680 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2682 struct pci_devinfo *dinfo;
2683 struct pci_map *pm, *prev;
2685 dinfo = device_get_ivars(dev);
2686 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2688 pm->pm_value = value;
2690 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2691 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2693 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2694 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2698 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2700 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2705 pci_restore_bars(device_t dev)
2707 struct pci_devinfo *dinfo;
2711 dinfo = device_get_ivars(dev);
2712 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2713 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2716 ln2range = pci_maprange(pm->pm_value);
2717 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2719 pci_write_config(dev, pm->pm_reg + 4,
2720 pm->pm_value >> 32, 4);
2725 * Add a resource based on a pci map register. Return 1 if the map
2726 * register is a 32bit map register or 2 if it is a 64bit register.
2729 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2730 int force, int prefetch)
2733 pci_addr_t base, map, testval;
2734 pci_addr_t start, end, count;
2735 int barlen, basezero, maprange, mapsize, type;
2737 struct resource *res;
2740 * The BAR may already exist if the device is a CardBus card
2741 * whose CIS is stored in this BAR.
2743 pm = pci_find_bar(dev, reg);
2745 maprange = pci_maprange(pm->pm_value);
2746 barlen = maprange == 64 ? 2 : 1;
2750 pci_read_bar(dev, reg, &map, &testval);
2751 if (PCI_BAR_MEM(map)) {
2752 type = SYS_RES_MEMORY;
2753 if (map & PCIM_BAR_MEM_PREFETCH)
2756 type = SYS_RES_IOPORT;
2757 mapsize = pci_mapsize(testval);
2758 base = pci_mapbase(map);
2759 #ifdef __PCI_BAR_ZERO_VALID
2762 basezero = base == 0;
2764 maprange = pci_maprange(map);
2765 barlen = maprange == 64 ? 2 : 1;
2768 * For I/O registers, if bottom bit is set, and the next bit up
2769 * isn't clear, we know we have a BAR that doesn't conform to the
2770 * spec, so ignore it. Also, sanity check the size of the data
2771 * areas to the type of memory involved. Memory must be at least
2772 * 16 bytes in size, while I/O ranges must be at least 4.
2774 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2776 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2777 (type == SYS_RES_IOPORT && mapsize < 2))
2780 /* Save a record of this BAR. */
2781 pm = pci_add_bar(dev, reg, map, mapsize);
2783 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2784 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2785 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2786 printf(", port disabled\n");
2787 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2788 printf(", memory disabled\n");
2790 printf(", enabled\n");
2794 * If base is 0, then we have problems if this architecture does
2795 * not allow that. It is best to ignore such entries for the
2796 * moment. These will be allocated later if the driver specifically
2797 * requests them. However, some removable busses look better when
2798 * all resources are allocated, so allow '0' to be overriden.
2800 * Similarly treat maps whose values is the same as the test value
2801 * read back. These maps have had all f's written to them by the
2802 * BIOS in an attempt to disable the resources.
2804 if (!force && (basezero || map == testval))
2806 if ((u_long)base != base) {
2808 "pci%d:%d:%d:%d bar %#x too many address bits",
2809 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2810 pci_get_function(dev), reg);
2815 * This code theoretically does the right thing, but has
2816 * undesirable side effects in some cases where peripherals
2817 * respond oddly to having these bits enabled. Let the user
2818 * be able to turn them off (since pci_enable_io_modes is 1 by
2821 if (pci_enable_io_modes) {
2822 /* Turn on resources that have been left off by a lazy BIOS */
2823 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2824 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2825 cmd |= PCIM_CMD_PORTEN;
2826 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2828 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2829 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2830 cmd |= PCIM_CMD_MEMEN;
2831 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2834 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2836 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2840 count = (pci_addr_t)1 << mapsize;
2841 if (basezero || base == pci_mapbase(testval)) {
2842 start = 0; /* Let the parent decide. */
2846 end = base + count - 1;
2848 resource_list_add(rl, type, reg, start, end, count);
2851 * Try to allocate the resource for this BAR from our parent
2852 * so that this resource range is already reserved. The
2853 * driver for this device will later inherit this resource in
2854 * pci_alloc_resource().
2856 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2857 prefetch ? RF_PREFETCHABLE : 0);
2858 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2860 * If the allocation fails, try to allocate a resource for
2861 * this BAR using any available range. The firmware felt
2862 * it was important enough to assign a resource, so don't
2863 * disable decoding if we can help it.
2865 resource_list_delete(rl, type, reg);
2866 resource_list_add(rl, type, reg, 0, ~0ul, count);
2867 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
2868 count, prefetch ? RF_PREFETCHABLE : 0);
2872 * If the allocation fails, delete the resource list entry
2873 * and disable decoding for this device.
2875 * If the driver requests this resource in the future,
2876 * pci_reserve_map() will try to allocate a fresh
2879 resource_list_delete(rl, type, reg);
2880 pci_disable_io(dev, type);
2883 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2884 pci_get_domain(dev), pci_get_bus(dev),
2885 pci_get_slot(dev), pci_get_function(dev), reg);
2887 start = rman_get_start(res);
2888 pci_write_bar(dev, pm, start);
2894 * For ATA devices we need to decide early what addressing mode to use.
2895 * Legacy demands that the primary and secondary ATA ports sits on the
2896 * same addresses that old ISA hardware did. This dictates that we use
2897 * those addresses and ignore the BAR's if we cannot set PCI native
2901 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2902 uint32_t prefetchmask)
2905 int rid, type, progif;
2907 /* if this device supports PCI native addressing use it */
2908 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2909 if ((progif & 0x8a) == 0x8a) {
2910 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2911 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2912 printf("Trying ATA native PCI addressing mode\n");
2913 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2917 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2918 type = SYS_RES_IOPORT;
2919 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2920 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2921 prefetchmask & (1 << 0));
2922 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2923 prefetchmask & (1 << 1));
2926 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2927 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
2930 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2931 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
2934 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2935 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
2936 prefetchmask & (1 << 2));
2937 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
2938 prefetchmask & (1 << 3));
2941 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2942 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
2945 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2946 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
2949 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
2950 prefetchmask & (1 << 4));
2951 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
2952 prefetchmask & (1 << 5));
2956 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2958 struct pci_devinfo *dinfo = device_get_ivars(dev);
2959 pcicfgregs *cfg = &dinfo->cfg;
2960 char tunable_name[64];
2963 /* Has to have an intpin to have an interrupt. */
2964 if (cfg->intpin == 0)
2967 /* Let the user override the IRQ with a tunable. */
2968 irq = PCI_INVALID_IRQ;
2969 snprintf(tunable_name, sizeof(tunable_name),
2970 "hw.pci%d.%d.%d.INT%c.irq",
2971 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2972 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2973 irq = PCI_INVALID_IRQ;
2976 * If we didn't get an IRQ via the tunable, then we either use the
2977 * IRQ value in the intline register or we ask the bus to route an
2978 * interrupt for us. If force_route is true, then we only use the
2979 * value in the intline register if the bus was unable to assign an
2982 if (!PCI_INTERRUPT_VALID(irq)) {
2983 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2984 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2985 if (!PCI_INTERRUPT_VALID(irq))
2989 /* If after all that we don't have an IRQ, just bail. */
2990 if (!PCI_INTERRUPT_VALID(irq))
2993 /* Update the config register if it changed. */
2994 if (irq != cfg->intline) {
2996 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2999 /* Add this IRQ as rid 0 interrupt resource. */
3000 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3003 /* Perform early OHCI takeover from SMM. */
3005 ohci_early_takeover(device_t self)
3007 struct resource *res;
3013 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3017 ctl = bus_read_4(res, OHCI_CONTROL);
3018 if (ctl & OHCI_IR) {
3020 printf("ohci early: "
3021 "SMM active, request owner change\n");
3022 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3023 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3025 ctl = bus_read_4(res, OHCI_CONTROL);
3027 if (ctl & OHCI_IR) {
3029 printf("ohci early: "
3030 "SMM does not respond, resetting\n");
3031 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3033 /* Disable interrupts */
3034 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3037 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3040 /* Perform early UHCI takeover from SMM. */
3042 uhci_early_takeover(device_t self)
3044 struct resource *res;
3048 * Set the PIRQD enable bit and switch off all the others. We don't
3049 * want legacy support to interfere with us XXX Does this also mean
3050 * that the BIOS won't touch the keyboard anymore if it is connected
3051 * to the ports of the root hub?
3053 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3055 /* Disable interrupts */
3056 rid = PCI_UHCI_BASE_REG;
3057 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3059 bus_write_2(res, UHCI_INTR, 0);
3060 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3064 /* Perform early EHCI takeover from SMM. */
3066 ehci_early_takeover(device_t self)
3068 struct resource *res;
3078 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3082 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3084 /* Synchronise with the BIOS if it owns the controller. */
3085 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3086 eecp = EHCI_EECP_NEXT(eec)) {
3087 eec = pci_read_config(self, eecp, 4);
3088 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3091 bios_sem = pci_read_config(self, eecp +
3092 EHCI_LEGSUP_BIOS_SEM, 1);
3093 if (bios_sem == 0) {
3097 printf("ehci early: "
3098 "SMM active, request owner change\n");
3100 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3102 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3104 bios_sem = pci_read_config(self, eecp +
3105 EHCI_LEGSUP_BIOS_SEM, 1);
3108 if (bios_sem != 0) {
3110 printf("ehci early: "
3111 "SMM does not respond\n");
3113 /* Disable interrupts */
3114 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3115 bus_write_4(res, offs + EHCI_USBINTR, 0);
3117 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3120 /* Perform early XHCI takeover from SMM. */
3122 xhci_early_takeover(device_t self)
3124 struct resource *res;
3134 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3138 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3142 /* Synchronise with the BIOS if it owns the controller. */
3143 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3144 eecp += XHCI_XECP_NEXT(eec) << 2) {
3145 eec = bus_read_4(res, eecp);
3147 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3150 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3155 printf("xhci early: "
3156 "SMM active, request owner change\n");
3158 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3160 /* wait a maximum of 5 second */
3162 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3164 bios_sem = bus_read_1(res, eecp +
3165 XHCI_XECP_BIOS_SEM);
3168 if (bios_sem != 0) {
3170 printf("xhci early: "
3171 "SMM does not respond\n");
3174 /* Disable interrupts */
3175 offs = bus_read_1(res, XHCI_CAPLENGTH);
3176 bus_write_4(res, offs + XHCI_USBCMD, 0);
3177 bus_read_4(res, offs + XHCI_USBSTS);
3179 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3183 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3185 struct pci_devinfo *dinfo;
3187 struct resource_list *rl;
3188 const struct pci_quirk *q;
3192 dinfo = device_get_ivars(dev);
3194 rl = &dinfo->resources;
3195 devid = (cfg->device << 16) | cfg->vendor;
3197 /* ATA devices needs special map treatment */
3198 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3199 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3200 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3201 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3202 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3203 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3205 for (i = 0; i < cfg->nummaps;) {
3207 * Skip quirked resources.
3209 for (q = &pci_quirks[0]; q->devid != 0; q++)
3210 if (q->devid == devid &&
3211 q->type == PCI_QUIRK_UNMAP_REG &&
3212 q->arg1 == PCIR_BAR(i))
3214 if (q->devid != 0) {
3218 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3219 prefetchmask & (1 << i));
3223 * Add additional, quirked resources.
3225 for (q = &pci_quirks[0]; q->devid != 0; q++)
3226 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3227 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3229 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3230 #ifdef __PCI_REROUTE_INTERRUPT
3232 * Try to re-route interrupts. Sometimes the BIOS or
3233 * firmware may leave bogus values in these registers.
3234 * If the re-route fails, then just stick with what we
3237 pci_assign_interrupt(bus, dev, 1);
3239 pci_assign_interrupt(bus, dev, 0);
3243 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3244 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3245 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3246 xhci_early_takeover(dev);
3247 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3248 ehci_early_takeover(dev);
3249 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3250 ohci_early_takeover(dev);
3251 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3252 uhci_early_takeover(dev);
3257 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3259 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3260 device_t pcib = device_get_parent(dev);
3261 struct pci_devinfo *dinfo;
3263 int s, f, pcifunchigh;
3266 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3267 ("dinfo_size too small"));
3268 maxslots = PCIB_MAXSLOTS(pcib);
3269 for (s = 0; s <= maxslots; s++) {
3273 hdrtype = REG(PCIR_HDRTYPE, 1);
3274 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3276 if (hdrtype & PCIM_MFDEV)
3277 pcifunchigh = PCI_FUNCMAX;
3278 for (f = 0; f <= pcifunchigh; f++) {
3279 dinfo = pci_read_device(pcib, domain, busno, s, f,
3281 if (dinfo != NULL) {
3282 pci_add_child(dev, dinfo);
3290 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3292 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3293 device_set_ivars(dinfo->cfg.dev, dinfo);
3294 resource_list_init(&dinfo->resources);
3295 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3296 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3297 pci_print_verbose(dinfo);
3298 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3302 pci_probe(device_t dev)
3305 device_set_desc(dev, "PCI bus");
3307 /* Allow other subclasses to override this driver. */
3308 return (BUS_PROBE_GENERIC);
3312 pci_attach_common(device_t dev)
3314 struct pci_softc *sc;
3316 #ifdef PCI_DMA_BOUNDARY
3317 int error, tag_valid;
3320 sc = device_get_softc(dev);
3321 domain = pcib_get_domain(dev);
3322 busno = pcib_get_bus(dev);
3324 device_printf(dev, "domain=%d, physical bus=%d\n",
3326 #ifdef PCI_DMA_BOUNDARY
3328 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3329 devclass_find("pci")) {
3330 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3331 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3332 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3333 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3335 device_printf(dev, "Failed to create DMA tag: %d\n",
3342 sc->sc_dma_tag = bus_get_dma_tag(dev);
3347 pci_attach(device_t dev)
3349 int busno, domain, error;
3351 error = pci_attach_common(dev);
3356 * Since there can be multiple independantly numbered PCI
3357 * busses on systems with multiple PCI domains, we can't use
3358 * the unit number to decide which bus we are probing. We ask
3359 * the parent pcib what our domain and bus numbers are.
3361 domain = pcib_get_domain(dev);
3362 busno = pcib_get_bus(dev);
3363 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3364 return (bus_generic_attach(dev));
3368 pci_set_power_children(device_t dev, device_t *devlist, int numdevs,
3371 device_t child, pcib;
3372 struct pci_devinfo *dinfo;
3376 * Set the device to the given state. If the firmware suggests
3377 * a different power state, use it instead. If power management
3378 * is not present, the firmware is responsible for managing
3379 * device power. Skip children who aren't attached since they
3380 * are handled separately.
3382 pcib = device_get_parent(dev);
3383 for (i = 0; i < numdevs; i++) {
3385 dinfo = device_get_ivars(child);
3387 if (device_is_attached(child) &&
3388 PCIB_POWER_FOR_SLEEP(pcib, dev, &dstate) == 0)
3389 pci_set_powerstate(child, dstate);
3394 pci_suspend(device_t dev)
3396 device_t child, *devlist;
3397 struct pci_devinfo *dinfo;
3398 int error, i, numdevs;
3401 * Save the PCI configuration space for each child and set the
3402 * device in the appropriate power state for this sleep state.
3404 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3406 for (i = 0; i < numdevs; i++) {
3408 dinfo = device_get_ivars(child);
3409 pci_cfg_save(child, dinfo, 0);
3412 /* Suspend devices before potentially powering them down. */
3413 error = bus_generic_suspend(dev);
3415 free(devlist, M_TEMP);
3418 if (pci_do_power_suspend)
3419 pci_set_power_children(dev, devlist, numdevs,
3421 free(devlist, M_TEMP);
3426 pci_resume(device_t dev)
3428 device_t child, *devlist;
3429 struct pci_devinfo *dinfo;
3430 int error, i, numdevs;
3433 * Set each child to D0 and restore its PCI configuration space.
3435 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3437 if (pci_do_power_resume)
3438 pci_set_power_children(dev, devlist, numdevs,
3441 /* Now the device is powered up, restore its config space. */
3442 for (i = 0; i < numdevs; i++) {
3444 dinfo = device_get_ivars(child);
3446 pci_cfg_restore(child, dinfo);
3447 if (!device_is_attached(child))
3448 pci_cfg_save(child, dinfo, 1);
3452 * Resume critical devices first, then everything else later.
3454 for (i = 0; i < numdevs; i++) {
3456 switch (pci_get_class(child)) {
3460 case PCIC_BASEPERIPH:
3461 DEVICE_RESUME(child);
3465 for (i = 0; i < numdevs; i++) {
3467 switch (pci_get_class(child)) {
3471 case PCIC_BASEPERIPH:
3474 DEVICE_RESUME(child);
3477 free(devlist, M_TEMP);
3482 pci_load_vendor_data(void)
3488 data = preload_search_by_type("pci_vendor_data");
3490 ptr = preload_fetch_addr(data);
3491 sz = preload_fetch_size(data);
3492 if (ptr != NULL && sz != 0) {
3493 pci_vendordata = ptr;
3494 pci_vendordata_size = sz;
3495 /* terminate the database */
3496 pci_vendordata[pci_vendordata_size] = '\n';
3502 pci_driver_added(device_t dev, driver_t *driver)
3507 struct pci_devinfo *dinfo;
3511 device_printf(dev, "driver added\n");
3512 DEVICE_IDENTIFY(driver, dev);
3513 if (device_get_children(dev, &devlist, &numdevs) != 0)
3515 for (i = 0; i < numdevs; i++) {
3517 if (device_get_state(child) != DS_NOTPRESENT)
3519 dinfo = device_get_ivars(child);
3520 pci_print_verbose(dinfo);
3522 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3523 pci_cfg_restore(child, dinfo);
3524 if (device_probe_and_attach(child) != 0)
3525 pci_child_detached(dev, child);
3527 free(devlist, M_TEMP);
3531 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3532 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3534 struct pci_devinfo *dinfo;
3535 struct msix_table_entry *mte;
3536 struct msix_vector *mv;
3542 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3547 /* If this is not a direct child, just bail out. */
3548 if (device_get_parent(child) != dev) {
3553 rid = rman_get_rid(irq);
3555 /* Make sure that INTx is enabled */
3556 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3559 * Check to see if the interrupt is MSI or MSI-X.
3560 * Ask our parent to map the MSI and give
3561 * us the address and data register values.
3562 * If we fail for some reason, teardown the
3563 * interrupt handler.
3565 dinfo = device_get_ivars(child);
3566 if (dinfo->cfg.msi.msi_alloc > 0) {
3567 if (dinfo->cfg.msi.msi_addr == 0) {
3568 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3569 ("MSI has handlers, but vectors not mapped"));
3570 error = PCIB_MAP_MSI(device_get_parent(dev),
3571 child, rman_get_start(irq), &addr, &data);
3574 dinfo->cfg.msi.msi_addr = addr;
3575 dinfo->cfg.msi.msi_data = data;
3577 if (dinfo->cfg.msi.msi_handlers == 0)
3578 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3579 dinfo->cfg.msi.msi_data);
3580 dinfo->cfg.msi.msi_handlers++;
3582 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3583 ("No MSI or MSI-X interrupts allocated"));
3584 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3585 ("MSI-X index too high"));
3586 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3587 KASSERT(mte->mte_vector != 0, ("no message vector"));
3588 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3589 KASSERT(mv->mv_irq == rman_get_start(irq),
3591 if (mv->mv_address == 0) {
3592 KASSERT(mte->mte_handlers == 0,
3593 ("MSI-X table entry has handlers, but vector not mapped"));
3594 error = PCIB_MAP_MSI(device_get_parent(dev),
3595 child, rman_get_start(irq), &addr, &data);
3598 mv->mv_address = addr;
3601 if (mte->mte_handlers == 0) {
3602 pci_enable_msix(child, rid - 1, mv->mv_address,
3604 pci_unmask_msix(child, rid - 1);
3606 mte->mte_handlers++;
3609 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3610 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3613 (void)bus_generic_teardown_intr(dev, child, irq,
3623 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3626 struct msix_table_entry *mte;
3627 struct resource_list_entry *rle;
3628 struct pci_devinfo *dinfo;
3631 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3634 /* If this isn't a direct child, just bail out */
3635 if (device_get_parent(child) != dev)
3636 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3638 rid = rman_get_rid(irq);
3641 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3644 * Check to see if the interrupt is MSI or MSI-X. If so,
3645 * decrement the appropriate handlers count and mask the
3646 * MSI-X message, or disable MSI messages if the count
3649 dinfo = device_get_ivars(child);
3650 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3651 if (rle->res != irq)
3653 if (dinfo->cfg.msi.msi_alloc > 0) {
3654 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3655 ("MSI-X index too high"));
3656 if (dinfo->cfg.msi.msi_handlers == 0)
3658 dinfo->cfg.msi.msi_handlers--;
3659 if (dinfo->cfg.msi.msi_handlers == 0)
3660 pci_disable_msi(child);
3662 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3663 ("No MSI or MSI-X interrupts allocated"));
3664 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3665 ("MSI-X index too high"));
3666 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3667 if (mte->mte_handlers == 0)
3669 mte->mte_handlers--;
3670 if (mte->mte_handlers == 0)
3671 pci_mask_msix(child, rid - 1);
3674 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3677 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3682 pci_print_child(device_t dev, device_t child)
3684 struct pci_devinfo *dinfo;
3685 struct resource_list *rl;
3688 dinfo = device_get_ivars(child);
3689 rl = &dinfo->resources;
3691 retval += bus_print_child_header(dev, child);
3693 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3694 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3695 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3696 if (device_get_flags(dev))
3697 retval += printf(" flags %#x", device_get_flags(dev));
3699 retval += printf(" at device %d.%d", pci_get_slot(child),
3700 pci_get_function(child));
3702 retval += bus_print_child_footer(dev, child);
3712 } pci_nomatch_tab[] = {
3713 {PCIC_OLD, -1, "old"},
3714 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3715 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3716 {PCIC_STORAGE, -1, "mass storage"},
3717 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3718 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3719 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3720 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3721 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3722 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3723 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3724 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3725 {PCIC_STORAGE, PCIS_STORAGE_NVM, "NVM"},
3726 {PCIC_NETWORK, -1, "network"},
3727 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3728 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3729 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3730 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3731 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3732 {PCIC_DISPLAY, -1, "display"},
3733 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3734 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3735 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3736 {PCIC_MULTIMEDIA, -1, "multimedia"},
3737 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3738 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3739 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3740 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3741 {PCIC_MEMORY, -1, "memory"},
3742 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3743 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3744 {PCIC_BRIDGE, -1, "bridge"},
3745 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3746 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3747 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3748 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3749 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3750 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3751 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3752 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3753 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3754 {PCIC_SIMPLECOMM, -1, "simple comms"},
3755 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3756 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3757 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3758 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3759 {PCIC_BASEPERIPH, -1, "base peripheral"},
3760 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3761 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3762 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3763 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3764 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3765 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3766 {PCIC_INPUTDEV, -1, "input device"},
3767 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3768 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3769 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3770 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3771 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3772 {PCIC_DOCKING, -1, "docking station"},
3773 {PCIC_PROCESSOR, -1, "processor"},
3774 {PCIC_SERIALBUS, -1, "serial bus"},
3775 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3776 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3777 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3778 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3779 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3780 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3781 {PCIC_WIRELESS, -1, "wireless controller"},
3782 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3783 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3784 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3785 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3786 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3787 {PCIC_SATCOM, -1, "satellite communication"},
3788 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3789 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3790 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3791 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3792 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3793 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3794 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3795 {PCIC_DASP, -1, "dasp"},
3796 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3801 pci_probe_nomatch(device_t dev, device_t child)
3804 const char *cp, *scp;
3808 * Look for a listing for this device in a loaded device database.
3810 if ((device = pci_describe_device(child)) != NULL) {
3811 device_printf(dev, "<%s>", device);
3812 free(device, M_DEVBUF);
3815 * Scan the class/subclass descriptions for a general
3820 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3821 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3822 if (pci_nomatch_tab[i].subclass == -1) {
3823 cp = pci_nomatch_tab[i].desc;
3824 } else if (pci_nomatch_tab[i].subclass ==
3825 pci_get_subclass(child)) {
3826 scp = pci_nomatch_tab[i].desc;
3830 device_printf(dev, "<%s%s%s>",
3832 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3835 printf(" at device %d.%d (no driver attached)\n",
3836 pci_get_slot(child), pci_get_function(child));
3837 pci_cfg_save(child, device_get_ivars(child), 1);
3841 pci_child_detached(device_t dev, device_t child)
3843 struct pci_devinfo *dinfo;
3844 struct resource_list *rl;
3846 dinfo = device_get_ivars(child);
3847 rl = &dinfo->resources;
3850 * Have to deallocate IRQs before releasing any MSI messages and
3851 * have to release MSI messages before deallocating any memory
3854 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
3855 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
3856 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
3857 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
3858 (void)pci_release_msi(child);
3860 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
3861 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
3862 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
3863 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
3865 pci_cfg_save(child, dinfo, 1);
3869 * Parse the PCI device database, if loaded, and return a pointer to a
3870 * description of the device.
3872 * The database is flat text formatted as follows:
3874 * Any line not in a valid format is ignored.
3875 * Lines are terminated with newline '\n' characters.
3877 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3880 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3881 * - devices cannot be listed without a corresponding VENDOR line.
3882 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3883 * another TAB, then the device name.
3887 * Assuming (ptr) points to the beginning of a line in the database,
3888 * return the vendor or device and description of the next entry.
3889 * The value of (vendor) or (device) inappropriate for the entry type
3890 * is set to -1. Returns nonzero at the end of the database.
3892 * Note that this is slightly unrobust in the face of corrupt data;
3893 * we attempt to safeguard against this by spamming the end of the
3894 * database with a newline when we initialise.
3897 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3906 left = pci_vendordata_size - (cp - pci_vendordata);
3914 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3918 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3921 /* skip to next line */
3922 while (*cp != '\n' && left > 0) {
3931 /* skip to next line */
3932 while (*cp != '\n' && left > 0) {
3936 if (*cp == '\n' && left > 0)
3943 pci_describe_device(device_t dev)
3946 char *desc, *vp, *dp, *line;
3948 desc = vp = dp = NULL;
3951 * If we have no vendor data, we can't do anything.
3953 if (pci_vendordata == NULL)
3957 * Scan the vendor data looking for this device
3959 line = pci_vendordata;
3960 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3963 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3965 if (vendor == pci_get_vendor(dev))
3968 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3971 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3979 if (device == pci_get_device(dev))
3983 snprintf(dp, 80, "0x%x", pci_get_device(dev));
3984 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3986 sprintf(desc, "%s, %s", vp, dp);
3996 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3998 struct pci_devinfo *dinfo;
4001 dinfo = device_get_ivars(child);
4005 case PCI_IVAR_ETHADDR:
4007 * The generic accessor doesn't deal with failure, so
4008 * we set the return value, then return an error.
4010 *((uint8_t **) result) = NULL;
4012 case PCI_IVAR_SUBVENDOR:
4013 *result = cfg->subvendor;
4015 case PCI_IVAR_SUBDEVICE:
4016 *result = cfg->subdevice;
4018 case PCI_IVAR_VENDOR:
4019 *result = cfg->vendor;
4021 case PCI_IVAR_DEVICE:
4022 *result = cfg->device;
4024 case PCI_IVAR_DEVID:
4025 *result = (cfg->device << 16) | cfg->vendor;
4027 case PCI_IVAR_CLASS:
4028 *result = cfg->baseclass;
4030 case PCI_IVAR_SUBCLASS:
4031 *result = cfg->subclass;
4033 case PCI_IVAR_PROGIF:
4034 *result = cfg->progif;
4036 case PCI_IVAR_REVID:
4037 *result = cfg->revid;
4039 case PCI_IVAR_INTPIN:
4040 *result = cfg->intpin;
4043 *result = cfg->intline;
4045 case PCI_IVAR_DOMAIN:
4046 *result = cfg->domain;
4052 *result = cfg->slot;
4054 case PCI_IVAR_FUNCTION:
4055 *result = cfg->func;
4057 case PCI_IVAR_CMDREG:
4058 *result = cfg->cmdreg;
4060 case PCI_IVAR_CACHELNSZ:
4061 *result = cfg->cachelnsz;
4063 case PCI_IVAR_MINGNT:
4064 *result = cfg->mingnt;
4066 case PCI_IVAR_MAXLAT:
4067 *result = cfg->maxlat;
4069 case PCI_IVAR_LATTIMER:
4070 *result = cfg->lattimer;
4079 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4081 struct pci_devinfo *dinfo;
4083 dinfo = device_get_ivars(child);
4086 case PCI_IVAR_INTPIN:
4087 dinfo->cfg.intpin = value;
4089 case PCI_IVAR_ETHADDR:
4090 case PCI_IVAR_SUBVENDOR:
4091 case PCI_IVAR_SUBDEVICE:
4092 case PCI_IVAR_VENDOR:
4093 case PCI_IVAR_DEVICE:
4094 case PCI_IVAR_DEVID:
4095 case PCI_IVAR_CLASS:
4096 case PCI_IVAR_SUBCLASS:
4097 case PCI_IVAR_PROGIF:
4098 case PCI_IVAR_REVID:
4100 case PCI_IVAR_DOMAIN:
4103 case PCI_IVAR_FUNCTION:
4104 return (EINVAL); /* disallow for now */
4111 #include "opt_ddb.h"
4113 #include <ddb/ddb.h>
4114 #include <sys/cons.h>
4117 * List resources based on pci map registers, used for within ddb
4120 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4122 struct pci_devinfo *dinfo;
4123 struct devlist *devlist_head;
4126 int i, error, none_count;
4129 /* get the head of the device queue */
4130 devlist_head = &pci_devq;
4133 * Go through the list of devices and print out devices
4135 for (error = 0, i = 0,
4136 dinfo = STAILQ_FIRST(devlist_head);
4137 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4138 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4140 /* Populate pd_name and pd_unit */
4143 name = device_get_name(dinfo->cfg.dev);
4146 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4147 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4148 (name && *name) ? name : "none",
4149 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4151 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4152 p->pc_sel.pc_func, (p->pc_class << 16) |
4153 (p->pc_subclass << 8) | p->pc_progif,
4154 (p->pc_subdevice << 16) | p->pc_subvendor,
4155 (p->pc_device << 16) | p->pc_vendor,
4156 p->pc_revid, p->pc_hdr);
4161 static struct resource *
4162 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4163 u_long start, u_long end, u_long count, u_int flags)
4165 struct pci_devinfo *dinfo = device_get_ivars(child);
4166 struct resource_list *rl = &dinfo->resources;
4167 struct resource_list_entry *rle;
4168 struct resource *res;
4170 pci_addr_t map, testval;
4174 pm = pci_find_bar(child, *rid);
4176 /* This is a BAR that we failed to allocate earlier. */
4177 mapsize = pm->pm_size;
4181 * Weed out the bogons, and figure out how large the
4182 * BAR/map is. BARs that read back 0 here are bogus
4183 * and unimplemented. Note: atapci in legacy mode are
4184 * special and handled elsewhere in the code. If you
4185 * have a atapci device in legacy mode and it fails
4186 * here, that other code is broken.
4188 pci_read_bar(child, *rid, &map, &testval);
4191 * Determine the size of the BAR and ignore BARs with a size
4192 * of 0. Device ROM BARs use a different mask value.
4194 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4195 mapsize = pci_romsize(testval);
4197 mapsize = pci_mapsize(testval);
4200 pm = pci_add_bar(child, *rid, map, mapsize);
4203 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4204 if (type != SYS_RES_MEMORY) {
4207 "child %s requested type %d for rid %#x,"
4208 " but the BAR says it is an memio\n",
4209 device_get_nameunit(child), type, *rid);
4213 if (type != SYS_RES_IOPORT) {
4216 "child %s requested type %d for rid %#x,"
4217 " but the BAR says it is an ioport\n",
4218 device_get_nameunit(child), type, *rid);
4224 * For real BARs, we need to override the size that
4225 * the driver requests, because that's what the BAR
4226 * actually uses and we would otherwise have a
4227 * situation where we might allocate the excess to
4228 * another driver, which won't work.
4230 count = (pci_addr_t)1 << mapsize;
4231 if (RF_ALIGNMENT(flags) < mapsize)
4232 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4233 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4234 flags |= RF_PREFETCHABLE;
4237 * Allocate enough resource, and then write back the
4238 * appropriate BAR for that resource.
4240 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4241 start, end, count, flags & ~RF_ACTIVE);
4243 device_printf(child,
4244 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4245 count, *rid, type, start, end);
4248 resource_list_add(rl, type, *rid, start, end, count);
4249 rle = resource_list_find(rl, type, *rid);
4251 panic("pci_reserve_map: unexpectedly can't find resource.");
4253 rle->start = rman_get_start(res);
4254 rle->end = rman_get_end(res);
4256 rle->flags = RLE_RESERVED;
4258 device_printf(child,
4259 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4260 count, *rid, type, rman_get_start(res));
4261 map = rman_get_start(res);
4262 pci_write_bar(child, pm, map);
4268 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4269 u_long start, u_long end, u_long count, u_int flags)
4271 struct pci_devinfo *dinfo;
4272 struct resource_list *rl;
4273 struct resource_list_entry *rle;
4274 struct resource *res;
4277 if (device_get_parent(child) != dev)
4278 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4279 type, rid, start, end, count, flags));
4282 * Perform lazy resource allocation
4284 dinfo = device_get_ivars(child);
4285 rl = &dinfo->resources;
4290 * Can't alloc legacy interrupt once MSI messages have
4293 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4294 cfg->msix.msix_alloc > 0))
4298 * If the child device doesn't have an interrupt
4299 * routed and is deserving of an interrupt, try to
4302 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4304 pci_assign_interrupt(dev, child, 0);
4306 case SYS_RES_IOPORT:
4307 case SYS_RES_MEMORY:
4310 * PCI-PCI bridge I/O window resources are not BARs.
4311 * For those allocations just pass the request up the
4314 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4316 case PCIR_IOBASEL_1:
4317 case PCIR_MEMBASE_1:
4318 case PCIR_PMBASEL_1:
4320 * XXX: Should we bother creating a resource
4323 return (bus_generic_alloc_resource(dev, child,
4324 type, rid, start, end, count, flags));
4328 /* Reserve resources for this BAR if needed. */
4329 rle = resource_list_find(rl, type, *rid);
4331 res = pci_reserve_map(dev, child, type, rid, start, end,
4337 return (resource_list_alloc(rl, dev, child, type, rid,
4338 start, end, count, flags));
4342 pci_release_resource(device_t dev, device_t child, int type, int rid,
4345 struct pci_devinfo *dinfo;
4346 struct resource_list *rl;
4349 if (device_get_parent(child) != dev)
4350 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4353 dinfo = device_get_ivars(child);
4357 * PCI-PCI bridge I/O window resources are not BARs. For
4358 * those allocations just pass the request up the tree.
4360 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4361 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4363 case PCIR_IOBASEL_1:
4364 case PCIR_MEMBASE_1:
4365 case PCIR_PMBASEL_1:
4366 return (bus_generic_release_resource(dev, child, type,
4372 rl = &dinfo->resources;
4373 return (resource_list_release(rl, dev, child, type, rid, r));
4377 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4380 struct pci_devinfo *dinfo;
4383 error = bus_generic_activate_resource(dev, child, type, rid, r);
4387 /* Enable decoding in the command register when activating BARs. */
4388 if (device_get_parent(child) == dev) {
4389 /* Device ROMs need their decoding explicitly enabled. */
4390 dinfo = device_get_ivars(child);
4391 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4392 pci_write_bar(child, pci_find_bar(child, rid),
4393 rman_get_start(r) | PCIM_BIOS_ENABLE);
4395 case SYS_RES_IOPORT:
4396 case SYS_RES_MEMORY:
4397 error = PCI_ENABLE_IO(dev, child, type);
4405 pci_deactivate_resource(device_t dev, device_t child, int type,
4406 int rid, struct resource *r)
4408 struct pci_devinfo *dinfo;
4411 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4415 /* Disable decoding for device ROMs. */
4416 if (device_get_parent(child) == dev) {
4417 dinfo = device_get_ivars(child);
4418 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4419 pci_write_bar(child, pci_find_bar(child, rid),
4426 pci_delete_child(device_t dev, device_t child)
4428 struct resource_list_entry *rle;
4429 struct resource_list *rl;
4430 struct pci_devinfo *dinfo;
4432 dinfo = device_get_ivars(child);
4433 rl = &dinfo->resources;
4435 if (device_is_attached(child))
4436 device_detach(child);
4438 /* Turn off access to resources we're about to free */
4439 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4440 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4442 /* Free all allocated resources */
4443 STAILQ_FOREACH(rle, rl, link) {
4445 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4446 resource_list_busy(rl, rle->type, rle->rid)) {
4447 pci_printf(&dinfo->cfg,
4448 "Resource still owned, oops. "
4449 "(type=%d, rid=%d, addr=%lx)\n",
4450 rle->type, rle->rid,
4451 rman_get_start(rle->res));
4452 bus_release_resource(child, rle->type, rle->rid,
4455 resource_list_unreserve(rl, dev, child, rle->type,
4459 resource_list_free(rl);
4461 device_delete_child(dev, child);
4466 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4468 struct pci_devinfo *dinfo;
4469 struct resource_list *rl;
4470 struct resource_list_entry *rle;
4472 if (device_get_parent(child) != dev)
4475 dinfo = device_get_ivars(child);
4476 rl = &dinfo->resources;
4477 rle = resource_list_find(rl, type, rid);
4482 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4483 resource_list_busy(rl, type, rid)) {
4484 device_printf(dev, "delete_resource: "
4485 "Resource still owned by child, oops. "
4486 "(type=%d, rid=%d, addr=%lx)\n",
4487 type, rid, rman_get_start(rle->res));
4490 resource_list_unreserve(rl, dev, child, type, rid);
4492 resource_list_delete(rl, type, rid);
4495 struct resource_list *
4496 pci_get_resource_list (device_t dev, device_t child)
4498 struct pci_devinfo *dinfo = device_get_ivars(child);
4500 return (&dinfo->resources);
4504 pci_get_dma_tag(device_t bus, device_t dev)
4506 struct pci_softc *sc = device_get_softc(bus);
4508 return (sc->sc_dma_tag);
4512 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4514 struct pci_devinfo *dinfo = device_get_ivars(child);
4515 pcicfgregs *cfg = &dinfo->cfg;
4517 return (PCIB_READ_CONFIG(device_get_parent(dev),
4518 cfg->bus, cfg->slot, cfg->func, reg, width));
4522 pci_write_config_method(device_t dev, device_t child, int reg,
4523 uint32_t val, int width)
4525 struct pci_devinfo *dinfo = device_get_ivars(child);
4526 pcicfgregs *cfg = &dinfo->cfg;
4528 PCIB_WRITE_CONFIG(device_get_parent(dev),
4529 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4533 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4537 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4538 pci_get_function(child));
4543 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4546 struct pci_devinfo *dinfo;
4549 dinfo = device_get_ivars(child);
4551 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4552 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4553 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4559 pci_assign_interrupt_method(device_t dev, device_t child)
4561 struct pci_devinfo *dinfo = device_get_ivars(child);
4562 pcicfgregs *cfg = &dinfo->cfg;
4564 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4569 pci_modevent(module_t mod, int what, void *arg)
4571 static struct cdev *pci_cdev;
4575 STAILQ_INIT(&pci_devq);
4577 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4579 pci_load_vendor_data();
4583 destroy_dev(pci_cdev);
4591 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
4593 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
4594 struct pcicfg_pcie *cfg;
4597 cfg = &dinfo->cfg.pcie;
4598 pos = cfg->pcie_location;
4600 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4602 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
4604 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4605 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4606 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4607 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
4609 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4610 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4611 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4612 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
4614 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4615 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4616 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
4619 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
4620 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
4621 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
4627 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
4629 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
4630 dinfo->cfg.pcix.pcix_command, 2);
4634 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
4638 * Only do header type 0 devices. Type 1 devices are bridges,
4639 * which we know need special treatment. Type 2 devices are
4640 * cardbus bridges which also require special treatment.
4641 * Other types are unknown, and we err on the side of safety
4644 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
4648 * Restore the device to full power mode. We must do this
4649 * before we restore the registers because moving from D3 to
4650 * D0 will cause the chip's BARs and some other registers to
4651 * be reset to some unknown power on reset values. Cut down
4652 * the noise on boot by doing nothing if we are already in
4655 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
4656 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4657 pci_restore_bars(dev);
4658 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
4659 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
4660 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
4661 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
4662 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
4663 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
4664 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
4665 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
4666 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
4669 * Restore extended capabilities for PCI-Express and PCI-X
4671 if (dinfo->cfg.pcie.pcie_location != 0)
4672 pci_cfg_restore_pcie(dev, dinfo);
4673 if (dinfo->cfg.pcix.pcix_location != 0)
4674 pci_cfg_restore_pcix(dev, dinfo);
4676 /* Restore MSI and MSI-X configurations if they are present. */
4677 if (dinfo->cfg.msi.msi_location != 0)
4678 pci_resume_msi(dev);
4679 if (dinfo->cfg.msix.msix_location != 0)
4680 pci_resume_msix(dev);
4684 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
4686 #define RREG(n) pci_read_config(dev, pos + (n), 2)
4687 struct pcicfg_pcie *cfg;
4690 cfg = &dinfo->cfg.pcie;
4691 pos = cfg->pcie_location;
4693 cfg->pcie_flags = RREG(PCIER_FLAGS);
4695 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4697 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
4699 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4700 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4701 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4702 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
4704 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4705 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4706 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4707 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
4709 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4710 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4711 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
4714 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
4715 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
4716 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
4722 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
4724 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
4725 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
4729 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
4735 * Only do header type 0 devices. Type 1 devices are bridges, which
4736 * we know need special treatment. Type 2 devices are cardbus bridges
4737 * which also require special treatment. Other types are unknown, and
4738 * we err on the side of safety by ignoring them. Powering down
4739 * bridges should not be undertaken lightly.
4741 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
4745 * Some drivers apparently write to these registers w/o updating our
4746 * cached copy. No harm happens if we update the copy, so do so here
4747 * so we can restore them. The COMMAND register is modified by the
4748 * bus w/o updating the cache. This should represent the normally
4749 * writable portion of the 'defined' part of type 0 headers. In
4750 * theory we also need to save/restore the PCI capability structures
4751 * we know about, but apart from power we don't know any that are
4754 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
4755 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4756 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
4757 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
4758 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
4759 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
4760 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
4761 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
4762 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
4763 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
4764 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
4765 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
4766 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
4767 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
4768 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4770 if (dinfo->cfg.pcie.pcie_location != 0)
4771 pci_cfg_save_pcie(dev, dinfo);
4773 if (dinfo->cfg.pcix.pcix_location != 0)
4774 pci_cfg_save_pcix(dev, dinfo);
4777 * don't set the state for display devices, base peripherals and
4778 * memory devices since bad things happen when they are powered down.
4779 * We should (a) have drivers that can easily detach and (b) use
4780 * generic drivers for these devices so that some device actually
4781 * attaches. We need to make sure that when we implement (a) we don't
4782 * power the device down on a reattach.
4784 cls = pci_get_class(dev);
4787 switch (pci_do_power_nodriver)
4789 case 0: /* NO powerdown at all */
4791 case 1: /* Conservative about what to power down */
4792 if (cls == PCIC_STORAGE)
4795 case 2: /* Agressive about what to power down */
4796 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4797 cls == PCIC_BASEPERIPH)
4800 case 3: /* Power down everything */
4804 * PCI spec says we can only go into D3 state from D0 state.
4805 * Transition from D[12] into D0 before going to D3 state.
4807 ps = pci_get_powerstate(dev);
4808 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4809 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4810 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4811 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
4814 /* Wrapper APIs suitable for device driver use. */
4816 pci_save_state(device_t dev)
4818 struct pci_devinfo *dinfo;
4820 dinfo = device_get_ivars(dev);
4821 pci_cfg_save(dev, dinfo, 0);
4825 pci_restore_state(device_t dev)
4827 struct pci_devinfo *dinfo;
4829 dinfo = device_get_ivars(dev);
4830 pci_cfg_restore(dev, dinfo);