2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/xhcireg.h>
66 #include <dev/usb/controller/ehcireg.h>
67 #include <dev/usb/controller/ohcireg.h>
68 #include <dev/usb/controller/uhcireg.h>
74 #include <contrib/dev/acpica/include/acpi.h>
77 #define ACPI_PWR_FOR_SLEEP(x, y, z)
81 * XXX: Due to a limitation of the bus_dma_tag_create() API, we cannot
82 * specify a 4GB boundary on 32-bit targets. Usually this does not
83 * matter as it is ok to use a boundary of 0 on these systems.
84 * However, in the case of PAE, DMA addresses can cross a 4GB
85 * boundary, so as a workaround use a 2GB boundary.
87 #if (BUS_SPACE_MAXADDR > 0xFFFFFFFF)
89 #define PCI_DMA_BOUNDARY 0x80000000
91 #define PCI_DMA_BOUNDARY 0x100000000
95 #define PCIR_IS_BIOS(cfg, reg) \
96 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
97 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
99 static pci_addr_t pci_mapbase(uint64_t mapreg);
100 static const char *pci_maptype(uint64_t mapreg);
101 static int pci_mapsize(uint64_t testval);
102 static int pci_maprange(uint64_t mapreg);
103 static pci_addr_t pci_rombase(uint64_t mapreg);
104 static int pci_romsize(uint64_t testval);
105 static void pci_fixancient(pcicfgregs *cfg);
106 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
108 static int pci_porten(device_t dev);
109 static int pci_memen(device_t dev);
110 static void pci_assign_interrupt(device_t bus, device_t dev,
112 static int pci_add_map(device_t bus, device_t dev, int reg,
113 struct resource_list *rl, int force, int prefetch);
114 static int pci_probe(device_t dev);
115 static int pci_attach(device_t dev);
116 static void pci_load_vendor_data(void);
117 static int pci_describe_parse_line(char **ptr, int *vendor,
118 int *device, char **desc);
119 static char *pci_describe_device(device_t dev);
120 static bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev);
121 static int pci_modevent(module_t mod, int what, void *arg);
122 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
124 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
125 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
126 int reg, uint32_t *data);
128 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
129 int reg, uint32_t data);
131 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
132 static void pci_disable_msi(device_t dev);
133 static void pci_enable_msi(device_t dev, uint64_t address,
135 static void pci_enable_msix(device_t dev, u_int index,
136 uint64_t address, uint32_t data);
137 static void pci_mask_msix(device_t dev, u_int index);
138 static void pci_unmask_msix(device_t dev, u_int index);
139 static int pci_msi_blacklisted(void);
140 static void pci_resume_msi(device_t dev);
141 static void pci_resume_msix(device_t dev);
142 static int pci_remap_intr_method(device_t bus, device_t dev,
145 static device_method_t pci_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, pci_probe),
148 DEVMETHOD(device_attach, pci_attach),
149 DEVMETHOD(device_detach, bus_generic_detach),
150 DEVMETHOD(device_shutdown, bus_generic_shutdown),
151 DEVMETHOD(device_suspend, pci_suspend),
152 DEVMETHOD(device_resume, pci_resume),
155 DEVMETHOD(bus_print_child, pci_print_child),
156 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
157 DEVMETHOD(bus_read_ivar, pci_read_ivar),
158 DEVMETHOD(bus_write_ivar, pci_write_ivar),
159 DEVMETHOD(bus_driver_added, pci_driver_added),
160 DEVMETHOD(bus_setup_intr, pci_setup_intr),
161 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
163 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
164 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
165 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
166 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
167 DEVMETHOD(bus_delete_resource, pci_delete_resource),
168 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
169 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
170 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
171 DEVMETHOD(bus_activate_resource, pci_activate_resource),
172 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
173 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
174 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
175 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
178 DEVMETHOD(pci_read_config, pci_read_config_method),
179 DEVMETHOD(pci_write_config, pci_write_config_method),
180 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
181 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
182 DEVMETHOD(pci_enable_io, pci_enable_io_method),
183 DEVMETHOD(pci_disable_io, pci_disable_io_method),
184 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
185 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
186 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
187 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
188 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
189 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
190 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
191 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
192 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
193 DEVMETHOD(pci_release_msi, pci_release_msi_method),
194 DEVMETHOD(pci_msi_count, pci_msi_count_method),
195 DEVMETHOD(pci_msix_count, pci_msix_count_method),
200 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
202 static devclass_t pci_devclass;
203 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
204 MODULE_VERSION(pci, 1);
206 static char *pci_vendordata;
207 static size_t pci_vendordata_size;
210 uint32_t devid; /* Vendor/device of the card */
212 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
213 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
214 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
215 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
220 static const struct pci_quirk pci_quirks[] = {
221 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
222 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
223 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
224 /* As does the Serverworks OSB4 (the SMBus mapping register) */
225 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
228 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
229 * or the CMIC-SL (AKA ServerWorks GC_LE).
231 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
232 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
235 * MSI doesn't work on earlier Intel chipsets including
236 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
238 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
244 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
247 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
250 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
253 * MSI-X allocation doesn't work properly for devices passed through
254 * by VMware up to at least ESXi 5.1.
256 { 0x079015ad, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* PCI/PCI-X */
257 { 0x07a015ad, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* PCIe */
260 * Some virtualization environments emulate an older chipset
261 * but support MSI just fine. QEMU uses the Intel 82440.
263 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
266 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
267 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
268 * It prevents us from attaching hpet(4) when the bit is unset.
269 * Note this quirk only affects SB600 revision A13 and earlier.
270 * For SB600 A21 and later, firmware must set the bit to hide it.
271 * For SB700 and later, it is unused and hardcoded to zero.
273 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
278 /* map register information */
279 #define PCI_MAPMEM 0x01 /* memory map */
280 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
281 #define PCI_MAPPORT 0x04 /* port map */
283 struct devlist pci_devq;
284 uint32_t pci_generation;
285 uint32_t pci_numdevs = 0;
286 static int pcie_chipset, pcix_chipset;
289 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
291 static int pci_enable_io_modes = 1;
292 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
293 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
294 &pci_enable_io_modes, 1,
295 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
296 enable these bits correctly. We'd like to do this all the time, but there\n\
297 are some peripherals that this causes problems with.");
299 static int pci_do_realloc_bars = 0;
300 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
301 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
302 &pci_do_realloc_bars, 0,
303 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
305 static int pci_do_power_nodriver = 0;
306 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
307 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
308 &pci_do_power_nodriver, 0,
309 "Place a function into D3 state when no driver attaches to it. 0 means\n\
310 disable. 1 means conservatively place devices into D3 state. 2 means\n\
311 agressively place devices into D3 state. 3 means put absolutely everything\n\
314 static int pci_do_power_resume = 1;
315 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
316 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
317 &pci_do_power_resume, 1,
318 "Transition from D3 -> D0 on resume.");
320 static int pci_do_msi = 1;
321 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
322 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
323 "Enable support for MSI interrupts");
325 static int pci_do_msix = 1;
326 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
327 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
328 "Enable support for MSI-X interrupts");
330 static int pci_honor_msi_blacklist = 1;
331 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
332 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
333 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
335 #if defined(__i386__) || defined(__amd64__)
336 static int pci_usb_takeover = 1;
338 static int pci_usb_takeover = 0;
340 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
341 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
342 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
343 Disable this if you depend on BIOS emulation of USB devices, that is\n\
344 you use USB devices (like keyboard or mouse) but do not load USB drivers");
346 /* Find a device_t by bus/slot/function in domain 0 */
349 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
352 return (pci_find_dbsf(0, bus, slot, func));
355 /* Find a device_t by domain/bus/slot/function */
358 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
360 struct pci_devinfo *dinfo;
362 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
363 if ((dinfo->cfg.domain == domain) &&
364 (dinfo->cfg.bus == bus) &&
365 (dinfo->cfg.slot == slot) &&
366 (dinfo->cfg.func == func)) {
367 return (dinfo->cfg.dev);
374 /* Find a device_t by vendor/device ID */
377 pci_find_device(uint16_t vendor, uint16_t device)
379 struct pci_devinfo *dinfo;
381 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
382 if ((dinfo->cfg.vendor == vendor) &&
383 (dinfo->cfg.device == device)) {
384 return (dinfo->cfg.dev);
392 pci_find_class(uint8_t class, uint8_t subclass)
394 struct pci_devinfo *dinfo;
396 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
397 if (dinfo->cfg.baseclass == class &&
398 dinfo->cfg.subclass == subclass) {
399 return (dinfo->cfg.dev);
407 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
412 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
415 retval += vprintf(fmt, ap);
420 /* return base address of memory or port map */
423 pci_mapbase(uint64_t mapreg)
426 if (PCI_BAR_MEM(mapreg))
427 return (mapreg & PCIM_BAR_MEM_BASE);
429 return (mapreg & PCIM_BAR_IO_BASE);
432 /* return map type of memory or port map */
435 pci_maptype(uint64_t mapreg)
438 if (PCI_BAR_IO(mapreg))
440 if (mapreg & PCIM_BAR_MEM_PREFETCH)
441 return ("Prefetchable Memory");
445 /* return log2 of map size decoded for memory or port map */
448 pci_mapsize(uint64_t testval)
452 testval = pci_mapbase(testval);
455 while ((testval & 1) == 0)
464 /* return base address of device ROM */
467 pci_rombase(uint64_t mapreg)
470 return (mapreg & PCIM_BIOS_ADDR_MASK);
473 /* return log2 of map size decided for device ROM */
476 pci_romsize(uint64_t testval)
480 testval = pci_rombase(testval);
483 while ((testval & 1) == 0)
492 /* return log2 of address range supported by map register */
495 pci_maprange(uint64_t mapreg)
499 if (PCI_BAR_IO(mapreg))
502 switch (mapreg & PCIM_BAR_MEM_TYPE) {
503 case PCIM_BAR_MEM_32:
506 case PCIM_BAR_MEM_1MB:
509 case PCIM_BAR_MEM_64:
516 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
519 pci_fixancient(pcicfgregs *cfg)
521 if (cfg->hdrtype != 0)
524 /* PCI to PCI bridges use header type 1 */
525 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
529 /* extract header type specific config data */
532 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
534 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
535 switch (cfg->hdrtype) {
537 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
538 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
539 cfg->nummaps = PCI_MAXMAPS_0;
542 cfg->nummaps = PCI_MAXMAPS_1;
545 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
546 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
547 cfg->nummaps = PCI_MAXMAPS_2;
553 /* read configuration header into pcicfgregs structure */
555 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
557 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
558 pcicfgregs *cfg = NULL;
559 struct pci_devinfo *devlist_entry;
560 struct devlist *devlist_head;
562 devlist_head = &pci_devq;
564 devlist_entry = NULL;
566 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
567 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
568 if (devlist_entry == NULL)
571 cfg = &devlist_entry->cfg;
577 cfg->vendor = REG(PCIR_VENDOR, 2);
578 cfg->device = REG(PCIR_DEVICE, 2);
579 cfg->cmdreg = REG(PCIR_COMMAND, 2);
580 cfg->statreg = REG(PCIR_STATUS, 2);
581 cfg->baseclass = REG(PCIR_CLASS, 1);
582 cfg->subclass = REG(PCIR_SUBCLASS, 1);
583 cfg->progif = REG(PCIR_PROGIF, 1);
584 cfg->revid = REG(PCIR_REVID, 1);
585 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
586 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
587 cfg->lattimer = REG(PCIR_LATTIMER, 1);
588 cfg->intpin = REG(PCIR_INTPIN, 1);
589 cfg->intline = REG(PCIR_INTLINE, 1);
591 cfg->mingnt = REG(PCIR_MINGNT, 1);
592 cfg->maxlat = REG(PCIR_MAXLAT, 1);
594 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
595 cfg->hdrtype &= ~PCIM_MFDEV;
596 STAILQ_INIT(&cfg->maps);
599 pci_hdrtypedata(pcib, b, s, f, cfg);
601 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
602 pci_read_cap(pcib, cfg);
604 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
606 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
607 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
608 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
609 devlist_entry->conf.pc_sel.pc_func = cfg->func;
610 devlist_entry->conf.pc_hdr = cfg->hdrtype;
612 devlist_entry->conf.pc_subvendor = cfg->subvendor;
613 devlist_entry->conf.pc_subdevice = cfg->subdevice;
614 devlist_entry->conf.pc_vendor = cfg->vendor;
615 devlist_entry->conf.pc_device = cfg->device;
617 devlist_entry->conf.pc_class = cfg->baseclass;
618 devlist_entry->conf.pc_subclass = cfg->subclass;
619 devlist_entry->conf.pc_progif = cfg->progif;
620 devlist_entry->conf.pc_revid = cfg->revid;
625 return (devlist_entry);
630 pci_read_cap(device_t pcib, pcicfgregs *cfg)
632 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
633 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
634 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
638 int ptr, nextptr, ptrptr;
640 switch (cfg->hdrtype & PCIM_HDRTYPE) {
643 ptrptr = PCIR_CAP_PTR;
646 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
649 return; /* no extended capabilities support */
651 nextptr = REG(ptrptr, 1); /* sanity check? */
654 * Read capability entries.
656 while (nextptr != 0) {
659 printf("illegal PCI extended capability offset %d\n",
663 /* Find the next entry */
665 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
667 /* Process this entry */
668 switch (REG(ptr + PCICAP_ID, 1)) {
669 case PCIY_PMG: /* PCI power management */
670 if (cfg->pp.pp_cap == 0) {
671 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
672 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
673 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
674 if ((nextptr - ptr) > PCIR_POWER_DATA)
675 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
678 case PCIY_HT: /* HyperTransport */
679 /* Determine HT-specific capability type. */
680 val = REG(ptr + PCIR_HT_COMMAND, 2);
682 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
683 cfg->ht.ht_slave = ptr;
685 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
686 switch (val & PCIM_HTCMD_CAP_MASK) {
687 case PCIM_HTCAP_MSI_MAPPING:
688 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
689 /* Sanity check the mapping window. */
690 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
693 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
695 if (addr != MSI_INTEL_ADDR_BASE)
697 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
698 cfg->domain, cfg->bus,
699 cfg->slot, cfg->func,
702 addr = MSI_INTEL_ADDR_BASE;
704 cfg->ht.ht_msimap = ptr;
705 cfg->ht.ht_msictrl = val;
706 cfg->ht.ht_msiaddr = addr;
711 case PCIY_MSI: /* PCI MSI */
712 cfg->msi.msi_location = ptr;
713 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
714 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
715 PCIM_MSICTRL_MMC_MASK)>>1);
717 case PCIY_MSIX: /* PCI MSI-X */
718 cfg->msix.msix_location = ptr;
719 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
720 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
721 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
722 val = REG(ptr + PCIR_MSIX_TABLE, 4);
723 cfg->msix.msix_table_bar = PCIR_BAR(val &
725 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
726 val = REG(ptr + PCIR_MSIX_PBA, 4);
727 cfg->msix.msix_pba_bar = PCIR_BAR(val &
729 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
731 case PCIY_VPD: /* PCI Vital Product Data */
732 cfg->vpd.vpd_reg = ptr;
735 /* Should always be true. */
736 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
737 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
738 cfg->subvendor = val & 0xffff;
739 cfg->subdevice = val >> 16;
742 case PCIY_PCIX: /* PCI-X */
744 * Assume we have a PCI-X chipset if we have
745 * at least one PCI-PCI bridge with a PCI-X
746 * capability. Note that some systems with
747 * PCI-express or HT chipsets might match on
748 * this check as well.
750 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
753 case PCIY_EXPRESS: /* PCI-express */
755 * Assume we have a PCI-express chipset if we have
756 * at least one PCI-express device.
765 #if defined(__powerpc__)
767 * Enable the MSI mapping window for all HyperTransport
768 * slaves. PCI-PCI bridges have their windows enabled via
771 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
772 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
774 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
775 cfg->domain, cfg->bus, cfg->slot, cfg->func);
776 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
777 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
781 /* REG and WREG use carry through to next functions */
785 * PCI Vital Product Data
788 #define PCI_VPD_TIMEOUT 1000000
791 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
793 int count = PCI_VPD_TIMEOUT;
795 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
797 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
799 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
802 DELAY(1); /* limit looping */
804 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
811 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
813 int count = PCI_VPD_TIMEOUT;
815 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
817 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
818 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
819 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
822 DELAY(1); /* limit looping */
829 #undef PCI_VPD_TIMEOUT
831 struct vpd_readstate {
841 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
846 if (vrs->bytesinval == 0) {
847 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
849 vrs->val = le32toh(reg);
851 byte = vrs->val & 0xff;
854 vrs->val = vrs->val >> 8;
855 byte = vrs->val & 0xff;
865 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
867 struct vpd_readstate vrs;
872 int alloc, off; /* alloc/off for RO/W arrays */
878 /* init vpd reader */
886 name = remain = i = 0; /* shut up stupid gcc */
887 alloc = off = 0; /* shut up stupid gcc */
888 dflen = 0; /* shut up stupid gcc */
891 if (vpd_nextbyte(&vrs, &byte)) {
896 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
897 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
898 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
901 case 0: /* item name */
903 if (vpd_nextbyte(&vrs, &byte2)) {
908 if (vpd_nextbyte(&vrs, &byte2)) {
912 remain |= byte2 << 8;
913 if (remain > (0x7f*4 - vrs.off)) {
916 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
917 cfg->domain, cfg->bus, cfg->slot,
923 name = (byte >> 3) & 0xf;
926 case 0x2: /* String */
927 cfg->vpd.vpd_ident = malloc(remain + 1,
935 case 0x10: /* VPD-R */
938 cfg->vpd.vpd_ros = malloc(alloc *
939 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
943 case 0x11: /* VPD-W */
946 cfg->vpd.vpd_w = malloc(alloc *
947 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
951 default: /* Invalid data, abort */
957 case 1: /* Identifier String */
958 cfg->vpd.vpd_ident[i++] = byte;
961 cfg->vpd.vpd_ident[i] = '\0';
966 case 2: /* VPD-R Keyword Header */
968 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
969 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
970 M_DEVBUF, M_WAITOK | M_ZERO);
972 cfg->vpd.vpd_ros[off].keyword[0] = byte;
973 if (vpd_nextbyte(&vrs, &byte2)) {
977 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
978 if (vpd_nextbyte(&vrs, &byte2)) {
982 cfg->vpd.vpd_ros[off].len = dflen = byte2;
984 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
987 * if this happens, we can't trust the rest
991 "pci%d:%d:%d:%d: bad keyword length: %d\n",
992 cfg->domain, cfg->bus, cfg->slot,
997 } else if (dflen == 0) {
998 cfg->vpd.vpd_ros[off].value = malloc(1 *
999 sizeof(*cfg->vpd.vpd_ros[off].value),
1000 M_DEVBUF, M_WAITOK);
1001 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1003 cfg->vpd.vpd_ros[off].value = malloc(
1005 sizeof(*cfg->vpd.vpd_ros[off].value),
1006 M_DEVBUF, M_WAITOK);
1009 /* keep in sync w/ state 3's transistions */
1010 if (dflen == 0 && remain == 0)
1012 else if (dflen == 0)
1018 case 3: /* VPD-R Keyword Value */
1019 cfg->vpd.vpd_ros[off].value[i++] = byte;
1020 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1021 "RV", 2) == 0 && cksumvalid == -1) {
1027 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1028 cfg->domain, cfg->bus,
1029 cfg->slot, cfg->func,
1038 /* keep in sync w/ state 2's transistions */
1040 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1041 if (dflen == 0 && remain == 0) {
1042 cfg->vpd.vpd_rocnt = off;
1043 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1044 off * sizeof(*cfg->vpd.vpd_ros),
1045 M_DEVBUF, M_WAITOK | M_ZERO);
1047 } else if (dflen == 0)
1057 case 5: /* VPD-W Keyword Header */
1059 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1060 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1061 M_DEVBUF, M_WAITOK | M_ZERO);
1063 cfg->vpd.vpd_w[off].keyword[0] = byte;
1064 if (vpd_nextbyte(&vrs, &byte2)) {
1068 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1069 if (vpd_nextbyte(&vrs, &byte2)) {
1073 cfg->vpd.vpd_w[off].len = dflen = byte2;
1074 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1075 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1076 sizeof(*cfg->vpd.vpd_w[off].value),
1077 M_DEVBUF, M_WAITOK);
1080 /* keep in sync w/ state 6's transistions */
1081 if (dflen == 0 && remain == 0)
1083 else if (dflen == 0)
1089 case 6: /* VPD-W Keyword Value */
1090 cfg->vpd.vpd_w[off].value[i++] = byte;
1093 /* keep in sync w/ state 5's transistions */
1095 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1096 if (dflen == 0 && remain == 0) {
1097 cfg->vpd.vpd_wcnt = off;
1098 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1099 off * sizeof(*cfg->vpd.vpd_w),
1100 M_DEVBUF, M_WAITOK | M_ZERO);
1102 } else if (dflen == 0)
1107 printf("pci%d:%d:%d:%d: invalid state: %d\n",
1108 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1115 if (cksumvalid == 0 || state < -1) {
1116 /* read-only data bad, clean up */
1117 if (cfg->vpd.vpd_ros != NULL) {
1118 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1119 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1120 free(cfg->vpd.vpd_ros, M_DEVBUF);
1121 cfg->vpd.vpd_ros = NULL;
1125 /* I/O error, clean up */
1126 printf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1127 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1128 if (cfg->vpd.vpd_ident != NULL) {
1129 free(cfg->vpd.vpd_ident, M_DEVBUF);
1130 cfg->vpd.vpd_ident = NULL;
1132 if (cfg->vpd.vpd_w != NULL) {
1133 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1134 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1135 free(cfg->vpd.vpd_w, M_DEVBUF);
1136 cfg->vpd.vpd_w = NULL;
1139 cfg->vpd.vpd_cached = 1;
1145 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1147 struct pci_devinfo *dinfo = device_get_ivars(child);
1148 pcicfgregs *cfg = &dinfo->cfg;
1150 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1151 pci_read_vpd(device_get_parent(dev), cfg);
1153 *identptr = cfg->vpd.vpd_ident;
1155 if (*identptr == NULL)
1162 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1165 struct pci_devinfo *dinfo = device_get_ivars(child);
1166 pcicfgregs *cfg = &dinfo->cfg;
1169 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1170 pci_read_vpd(device_get_parent(dev), cfg);
1172 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1173 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1174 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1175 *vptr = cfg->vpd.vpd_ros[i].value;
1184 pci_fetch_vpd_list(device_t dev)
1186 struct pci_devinfo *dinfo = device_get_ivars(dev);
1187 pcicfgregs *cfg = &dinfo->cfg;
1189 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1190 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1195 * Find the requested extended capability and return the offset in
1196 * configuration space via the pointer provided. The function returns
1197 * 0 on success and error code otherwise.
1200 pci_find_extcap_method(device_t dev, device_t child, int capability,
1203 struct pci_devinfo *dinfo = device_get_ivars(child);
1204 pcicfgregs *cfg = &dinfo->cfg;
1209 * Check the CAP_LIST bit of the PCI status register first.
1211 status = pci_read_config(child, PCIR_STATUS, 2);
1212 if (!(status & PCIM_STATUS_CAPPRESENT))
1216 * Determine the start pointer of the capabilities list.
1218 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1224 ptr = PCIR_CAP_PTR_2;
1228 return (ENXIO); /* no extended capabilities support */
1230 ptr = pci_read_config(child, ptr, 1);
1233 * Traverse the capabilities list.
1236 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1241 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1248 * Support for MSI-X message interrupts.
1251 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1253 struct pci_devinfo *dinfo = device_get_ivars(dev);
1254 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1257 KASSERT(msix->msix_table_len > index, ("bogus index"));
1258 offset = msix->msix_table_offset + index * 16;
1259 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1260 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1261 bus_write_4(msix->msix_table_res, offset + 8, data);
1263 /* Enable MSI -> HT mapping. */
1264 pci_ht_map_msi(dev, address);
1268 pci_mask_msix(device_t dev, u_int index)
1270 struct pci_devinfo *dinfo = device_get_ivars(dev);
1271 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1272 uint32_t offset, val;
1274 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1275 offset = msix->msix_table_offset + index * 16 + 12;
1276 val = bus_read_4(msix->msix_table_res, offset);
1277 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1278 val |= PCIM_MSIX_VCTRL_MASK;
1279 bus_write_4(msix->msix_table_res, offset, val);
1284 pci_unmask_msix(device_t dev, u_int index)
1286 struct pci_devinfo *dinfo = device_get_ivars(dev);
1287 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1288 uint32_t offset, val;
1290 KASSERT(msix->msix_table_len > index, ("bogus index"));
1291 offset = msix->msix_table_offset + index * 16 + 12;
1292 val = bus_read_4(msix->msix_table_res, offset);
1293 if (val & PCIM_MSIX_VCTRL_MASK) {
1294 val &= ~PCIM_MSIX_VCTRL_MASK;
1295 bus_write_4(msix->msix_table_res, offset, val);
1300 pci_pending_msix(device_t dev, u_int index)
1302 struct pci_devinfo *dinfo = device_get_ivars(dev);
1303 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1304 uint32_t offset, bit;
1306 KASSERT(msix->msix_table_len > index, ("bogus index"));
1307 offset = msix->msix_pba_offset + (index / 32) * 4;
1308 bit = 1 << index % 32;
1309 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1313 * Restore MSI-X registers and table during resume. If MSI-X is
1314 * enabled then walk the virtual table to restore the actual MSI-X
1318 pci_resume_msix(device_t dev)
1320 struct pci_devinfo *dinfo = device_get_ivars(dev);
1321 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1322 struct msix_table_entry *mte;
1323 struct msix_vector *mv;
1326 if (msix->msix_alloc > 0) {
1327 /* First, mask all vectors. */
1328 for (i = 0; i < msix->msix_msgnum; i++)
1329 pci_mask_msix(dev, i);
1331 /* Second, program any messages with at least one handler. */
1332 for (i = 0; i < msix->msix_table_len; i++) {
1333 mte = &msix->msix_table[i];
1334 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1336 mv = &msix->msix_vectors[mte->mte_vector - 1];
1337 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1338 pci_unmask_msix(dev, i);
1341 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1342 msix->msix_ctrl, 2);
1346 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1347 * returned in *count. After this function returns, each message will be
1348 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1351 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1353 struct pci_devinfo *dinfo = device_get_ivars(child);
1354 pcicfgregs *cfg = &dinfo->cfg;
1355 struct resource_list_entry *rle;
1356 int actual, error, i, irq, max;
1358 /* Don't let count == 0 get us into trouble. */
1362 /* If rid 0 is allocated, then fail. */
1363 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1364 if (rle != NULL && rle->res != NULL)
1367 /* Already have allocated messages? */
1368 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1371 /* If MSI is blacklisted for this system, fail. */
1372 if (pci_msi_blacklisted())
1375 /* MSI-X capability present? */
1376 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1379 /* Make sure the appropriate BARs are mapped. */
1380 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1381 cfg->msix.msix_table_bar);
1382 if (rle == NULL || rle->res == NULL ||
1383 !(rman_get_flags(rle->res) & RF_ACTIVE))
1385 cfg->msix.msix_table_res = rle->res;
1386 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1387 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1388 cfg->msix.msix_pba_bar);
1389 if (rle == NULL || rle->res == NULL ||
1390 !(rman_get_flags(rle->res) & RF_ACTIVE))
1393 cfg->msix.msix_pba_res = rle->res;
1396 device_printf(child,
1397 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1398 *count, cfg->msix.msix_msgnum);
1399 max = min(*count, cfg->msix.msix_msgnum);
1400 for (i = 0; i < max; i++) {
1401 /* Allocate a message. */
1402 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1408 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1414 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1416 device_printf(child, "using IRQ %lu for MSI-X\n",
1422 * Be fancy and try to print contiguous runs of
1423 * IRQ values as ranges. 'irq' is the previous IRQ.
1424 * 'run' is true if we are in a range.
1426 device_printf(child, "using IRQs %lu", rle->start);
1429 for (i = 1; i < actual; i++) {
1430 rle = resource_list_find(&dinfo->resources,
1431 SYS_RES_IRQ, i + 1);
1433 /* Still in a run? */
1434 if (rle->start == irq + 1) {
1440 /* Finish previous range. */
1446 /* Start new range. */
1447 printf(",%lu", rle->start);
1451 /* Unfinished range? */
1454 printf(" for MSI-X\n");
1458 /* Mask all vectors. */
1459 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1460 pci_mask_msix(child, i);
1462 /* Allocate and initialize vector data and virtual table. */
1463 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1464 M_DEVBUF, M_WAITOK | M_ZERO);
1465 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1466 M_DEVBUF, M_WAITOK | M_ZERO);
1467 for (i = 0; i < actual; i++) {
1468 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1469 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1470 cfg->msix.msix_table[i].mte_vector = i + 1;
1473 /* Update control register to enable MSI-X. */
1474 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1475 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1476 cfg->msix.msix_ctrl, 2);
1478 /* Update counts of alloc'd messages. */
1479 cfg->msix.msix_alloc = actual;
1480 cfg->msix.msix_table_len = actual;
1486 * By default, pci_alloc_msix() will assign the allocated IRQ
1487 * resources consecutively to the first N messages in the MSI-X table.
1488 * However, device drivers may want to use different layouts if they
1489 * either receive fewer messages than they asked for, or they wish to
1490 * populate the MSI-X table sparsely. This method allows the driver
1491 * to specify what layout it wants. It must be called after a
1492 * successful pci_alloc_msix() but before any of the associated
1493 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1495 * The 'vectors' array contains 'count' message vectors. The array
1496 * maps directly to the MSI-X table in that index 0 in the array
1497 * specifies the vector for the first message in the MSI-X table, etc.
1498 * The vector value in each array index can either be 0 to indicate
1499 * that no vector should be assigned to a message slot, or it can be a
1500 * number from 1 to N (where N is the count returned from a
1501 * succcessful call to pci_alloc_msix()) to indicate which message
1502 * vector (IRQ) to be used for the corresponding message.
1504 * On successful return, each message with a non-zero vector will have
1505 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1506 * 1. Additionally, if any of the IRQs allocated via the previous
1507 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1508 * will be freed back to the system automatically.
1510 * For example, suppose a driver has a MSI-X table with 6 messages and
1511 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1512 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1513 * C. After the call to pci_alloc_msix(), the device will be setup to
1514 * have an MSI-X table of ABC--- (where - means no vector assigned).
1515 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1516 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1517 * be freed back to the system. This device will also have valid
1518 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1520 * In any case, the SYS_RES_IRQ rid X will always map to the message
1521 * at MSI-X table index X - 1 and will only be valid if a vector is
1522 * assigned to that table entry.
1525 pci_remap_msix_method(device_t dev, device_t child, int count,
1526 const u_int *vectors)
1528 struct pci_devinfo *dinfo = device_get_ivars(child);
1529 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1530 struct resource_list_entry *rle;
1531 int i, irq, j, *used;
1534 * Have to have at least one message in the table but the
1535 * table can't be bigger than the actual MSI-X table in the
1538 if (count == 0 || count > msix->msix_msgnum)
1541 /* Sanity check the vectors. */
1542 for (i = 0; i < count; i++)
1543 if (vectors[i] > msix->msix_alloc)
1547 * Make sure there aren't any holes in the vectors to be used.
1548 * It's a big pain to support it, and it doesn't really make
1549 * sense anyway. Also, at least one vector must be used.
1551 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1553 for (i = 0; i < count; i++)
1554 if (vectors[i] != 0)
1555 used[vectors[i] - 1] = 1;
1556 for (i = 0; i < msix->msix_alloc - 1; i++)
1557 if (used[i] == 0 && used[i + 1] == 1) {
1558 free(used, M_DEVBUF);
1562 free(used, M_DEVBUF);
1566 /* Make sure none of the resources are allocated. */
1567 for (i = 0; i < msix->msix_table_len; i++) {
1568 if (msix->msix_table[i].mte_vector == 0)
1570 if (msix->msix_table[i].mte_handlers > 0)
1572 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1573 KASSERT(rle != NULL, ("missing resource"));
1574 if (rle->res != NULL)
1578 /* Free the existing resource list entries. */
1579 for (i = 0; i < msix->msix_table_len; i++) {
1580 if (msix->msix_table[i].mte_vector == 0)
1582 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1586 * Build the new virtual table keeping track of which vectors are
1589 free(msix->msix_table, M_DEVBUF);
1590 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1591 M_DEVBUF, M_WAITOK | M_ZERO);
1592 for (i = 0; i < count; i++)
1593 msix->msix_table[i].mte_vector = vectors[i];
1594 msix->msix_table_len = count;
1596 /* Free any unused IRQs and resize the vectors array if necessary. */
1597 j = msix->msix_alloc - 1;
1599 struct msix_vector *vec;
1601 while (used[j] == 0) {
1602 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1603 msix->msix_vectors[j].mv_irq);
1606 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1608 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1610 free(msix->msix_vectors, M_DEVBUF);
1611 msix->msix_vectors = vec;
1612 msix->msix_alloc = j + 1;
1614 free(used, M_DEVBUF);
1616 /* Map the IRQs onto the rids. */
1617 for (i = 0; i < count; i++) {
1618 if (vectors[i] == 0)
1620 irq = msix->msix_vectors[vectors[i] - 1].mv_irq;
1621 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1626 device_printf(child, "Remapped MSI-X IRQs as: ");
1627 for (i = 0; i < count; i++) {
1630 if (vectors[i] == 0)
1634 msix->msix_vectors[vectors[i] - 1].mv_irq);
1643 pci_release_msix(device_t dev, device_t child)
1645 struct pci_devinfo *dinfo = device_get_ivars(child);
1646 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1647 struct resource_list_entry *rle;
1650 /* Do we have any messages to release? */
1651 if (msix->msix_alloc == 0)
1654 /* Make sure none of the resources are allocated. */
1655 for (i = 0; i < msix->msix_table_len; i++) {
1656 if (msix->msix_table[i].mte_vector == 0)
1658 if (msix->msix_table[i].mte_handlers > 0)
1660 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1661 KASSERT(rle != NULL, ("missing resource"));
1662 if (rle->res != NULL)
1666 /* Update control register to disable MSI-X. */
1667 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1668 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1669 msix->msix_ctrl, 2);
1671 /* Free the resource list entries. */
1672 for (i = 0; i < msix->msix_table_len; i++) {
1673 if (msix->msix_table[i].mte_vector == 0)
1675 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1677 free(msix->msix_table, M_DEVBUF);
1678 msix->msix_table_len = 0;
1680 /* Release the IRQs. */
1681 for (i = 0; i < msix->msix_alloc; i++)
1682 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1683 msix->msix_vectors[i].mv_irq);
1684 free(msix->msix_vectors, M_DEVBUF);
1685 msix->msix_alloc = 0;
1690 * Return the max supported MSI-X messages this device supports.
1691 * Basically, assuming the MD code can alloc messages, this function
1692 * should return the maximum value that pci_alloc_msix() can return.
1693 * Thus, it is subject to the tunables, etc.
1696 pci_msix_count_method(device_t dev, device_t child)
1698 struct pci_devinfo *dinfo = device_get_ivars(child);
1699 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1701 if (pci_do_msix && msix->msix_location != 0)
1702 return (msix->msix_msgnum);
1707 * HyperTransport MSI mapping control
1710 pci_ht_map_msi(device_t dev, uint64_t addr)
1712 struct pci_devinfo *dinfo = device_get_ivars(dev);
1713 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1718 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1719 ht->ht_msiaddr >> 20 == addr >> 20) {
1720 /* Enable MSI -> HT mapping. */
1721 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1722 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1726 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1727 /* Disable MSI -> HT mapping. */
1728 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1729 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1735 pci_get_max_read_req(device_t dev)
1740 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) != 0)
1742 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1743 val &= PCIEM_CTL_MAX_READ_REQUEST;
1745 return (1 << (val + 7));
1749 pci_set_max_read_req(device_t dev, int size)
1754 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) != 0)
1760 size = (1 << (fls(size) - 1));
1761 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1762 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1763 val |= (fls(size) - 8) << 12;
1764 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1769 * Support for MSI message signalled interrupts.
1772 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1774 struct pci_devinfo *dinfo = device_get_ivars(dev);
1775 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1777 /* Write data and address values. */
1778 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1779 address & 0xffffffff, 4);
1780 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1781 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1783 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1786 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1789 /* Enable MSI in the control register. */
1790 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1791 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1794 /* Enable MSI -> HT mapping. */
1795 pci_ht_map_msi(dev, address);
1799 pci_disable_msi(device_t dev)
1801 struct pci_devinfo *dinfo = device_get_ivars(dev);
1802 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1804 /* Disable MSI -> HT mapping. */
1805 pci_ht_map_msi(dev, 0);
1807 /* Disable MSI in the control register. */
1808 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1809 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1814 * Restore MSI registers during resume. If MSI is enabled then
1815 * restore the data and address registers in addition to the control
1819 pci_resume_msi(device_t dev)
1821 struct pci_devinfo *dinfo = device_get_ivars(dev);
1822 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1826 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1827 address = msi->msi_addr;
1828 data = msi->msi_data;
1829 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1830 address & 0xffffffff, 4);
1831 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1832 pci_write_config(dev, msi->msi_location +
1833 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1834 pci_write_config(dev, msi->msi_location +
1835 PCIR_MSI_DATA_64BIT, data, 2);
1837 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1840 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1845 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1847 struct pci_devinfo *dinfo = device_get_ivars(dev);
1848 pcicfgregs *cfg = &dinfo->cfg;
1849 struct resource_list_entry *rle;
1850 struct msix_table_entry *mte;
1851 struct msix_vector *mv;
1857 * Handle MSI first. We try to find this IRQ among our list
1858 * of MSI IRQs. If we find it, we request updated address and
1859 * data registers and apply the results.
1861 if (cfg->msi.msi_alloc > 0) {
1863 /* If we don't have any active handlers, nothing to do. */
1864 if (cfg->msi.msi_handlers == 0)
1866 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1867 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1869 if (rle->start == irq) {
1870 error = PCIB_MAP_MSI(device_get_parent(bus),
1871 dev, irq, &addr, &data);
1874 pci_disable_msi(dev);
1875 dinfo->cfg.msi.msi_addr = addr;
1876 dinfo->cfg.msi.msi_data = data;
1877 pci_enable_msi(dev, addr, data);
1885 * For MSI-X, we check to see if we have this IRQ. If we do,
1886 * we request the updated mapping info. If that works, we go
1887 * through all the slots that use this IRQ and update them.
1889 if (cfg->msix.msix_alloc > 0) {
1890 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1891 mv = &cfg->msix.msix_vectors[i];
1892 if (mv->mv_irq == irq) {
1893 error = PCIB_MAP_MSI(device_get_parent(bus),
1894 dev, irq, &addr, &data);
1897 mv->mv_address = addr;
1899 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1900 mte = &cfg->msix.msix_table[j];
1901 if (mte->mte_vector != i + 1)
1903 if (mte->mte_handlers == 0)
1905 pci_mask_msix(dev, j);
1906 pci_enable_msix(dev, j, addr, data);
1907 pci_unmask_msix(dev, j);
1918 * Returns true if the specified device is blacklisted because MSI
1922 pci_msi_device_blacklisted(device_t dev)
1924 const struct pci_quirk *q;
1926 if (!pci_honor_msi_blacklist)
1929 for (q = &pci_quirks[0]; q->devid; q++) {
1930 if (q->devid == pci_get_devid(dev) &&
1931 q->type == PCI_QUIRK_DISABLE_MSI)
1938 * Returns true if a specified chipset supports MSI when it is
1939 * emulated hardware in a virtual machine.
1942 pci_msi_vm_chipset(device_t dev)
1944 const struct pci_quirk *q;
1946 for (q = &pci_quirks[0]; q->devid; q++) {
1947 if (q->devid == pci_get_devid(dev) &&
1948 q->type == PCI_QUIRK_ENABLE_MSI_VM)
1955 * Determine if MSI is blacklisted globally on this sytem. Currently,
1956 * we just check for blacklisted chipsets as represented by the
1957 * host-PCI bridge at device 0:0:0. In the future, it may become
1958 * necessary to check other system attributes, such as the kenv values
1959 * that give the motherboard manufacturer and model number.
1962 pci_msi_blacklisted(void)
1966 if (!pci_honor_msi_blacklist)
1969 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1970 if (!(pcie_chipset || pcix_chipset)) {
1971 if (vm_guest != VM_GUEST_NO) {
1972 dev = pci_find_bsf(0, 0, 0);
1974 return (pci_msi_vm_chipset(dev) == 0);
1979 dev = pci_find_bsf(0, 0, 0);
1981 return (pci_msi_device_blacklisted(dev));
1986 * Attempt to allocate *count MSI messages. The actual number allocated is
1987 * returned in *count. After this function returns, each message will be
1988 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1991 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1993 struct pci_devinfo *dinfo = device_get_ivars(child);
1994 pcicfgregs *cfg = &dinfo->cfg;
1995 struct resource_list_entry *rle;
1996 int actual, error, i, irqs[32];
1999 /* Don't let count == 0 get us into trouble. */
2003 /* If rid 0 is allocated, then fail. */
2004 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2005 if (rle != NULL && rle->res != NULL)
2008 /* Already have allocated messages? */
2009 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2012 /* If MSI is blacklisted for this system, fail. */
2013 if (pci_msi_blacklisted())
2016 /* MSI capability present? */
2017 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2021 device_printf(child,
2022 "attempting to allocate %d MSI vectors (%d supported)\n",
2023 *count, cfg->msi.msi_msgnum);
2025 /* Don't ask for more than the device supports. */
2026 actual = min(*count, cfg->msi.msi_msgnum);
2028 /* Don't ask for more than 32 messages. */
2029 actual = min(actual, 32);
2031 /* MSI requires power of 2 number of messages. */
2032 if (!powerof2(actual))
2036 /* Try to allocate N messages. */
2037 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2049 * We now have N actual messages mapped onto SYS_RES_IRQ
2050 * resources in the irqs[] array, so add new resources
2051 * starting at rid 1.
2053 for (i = 0; i < actual; i++)
2054 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2055 irqs[i], irqs[i], 1);
2059 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2064 * Be fancy and try to print contiguous runs
2065 * of IRQ values as ranges. 'run' is true if
2066 * we are in a range.
2068 device_printf(child, "using IRQs %d", irqs[0]);
2070 for (i = 1; i < actual; i++) {
2072 /* Still in a run? */
2073 if (irqs[i] == irqs[i - 1] + 1) {
2078 /* Finish previous range. */
2080 printf("-%d", irqs[i - 1]);
2084 /* Start new range. */
2085 printf(",%d", irqs[i]);
2088 /* Unfinished range? */
2090 printf("-%d", irqs[actual - 1]);
2091 printf(" for MSI\n");
2095 /* Update control register with actual count. */
2096 ctrl = cfg->msi.msi_ctrl;
2097 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2098 ctrl |= (ffs(actual) - 1) << 4;
2099 cfg->msi.msi_ctrl = ctrl;
2100 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2102 /* Update counts of alloc'd messages. */
2103 cfg->msi.msi_alloc = actual;
2104 cfg->msi.msi_handlers = 0;
2109 /* Release the MSI messages associated with this device. */
2111 pci_release_msi_method(device_t dev, device_t child)
2113 struct pci_devinfo *dinfo = device_get_ivars(child);
2114 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2115 struct resource_list_entry *rle;
2116 int error, i, irqs[32];
2118 /* Try MSI-X first. */
2119 error = pci_release_msix(dev, child);
2120 if (error != ENODEV)
2123 /* Do we have any messages to release? */
2124 if (msi->msi_alloc == 0)
2126 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2128 /* Make sure none of the resources are allocated. */
2129 if (msi->msi_handlers > 0)
2131 for (i = 0; i < msi->msi_alloc; i++) {
2132 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2133 KASSERT(rle != NULL, ("missing MSI resource"));
2134 if (rle->res != NULL)
2136 irqs[i] = rle->start;
2139 /* Update control register with 0 count. */
2140 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2141 ("%s: MSI still enabled", __func__));
2142 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2143 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2146 /* Release the messages. */
2147 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2148 for (i = 0; i < msi->msi_alloc; i++)
2149 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2151 /* Update alloc count. */
2159 * Return the max supported MSI messages this device supports.
2160 * Basically, assuming the MD code can alloc messages, this function
2161 * should return the maximum value that pci_alloc_msi() can return.
2162 * Thus, it is subject to the tunables, etc.
2165 pci_msi_count_method(device_t dev, device_t child)
2167 struct pci_devinfo *dinfo = device_get_ivars(child);
2168 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2170 if (pci_do_msi && msi->msi_location != 0)
2171 return (msi->msi_msgnum);
2175 /* free pcicfgregs structure and all depending data structures */
2178 pci_freecfg(struct pci_devinfo *dinfo)
2180 struct devlist *devlist_head;
2181 struct pci_map *pm, *next;
2184 devlist_head = &pci_devq;
2186 if (dinfo->cfg.vpd.vpd_reg) {
2187 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2188 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2189 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2190 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2191 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2192 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2193 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2195 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2198 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2199 free(dinfo, M_DEVBUF);
2201 /* increment the generation count */
2204 /* we're losing one device */
2210 * PCI power manangement
2213 pci_set_powerstate_method(device_t dev, device_t child, int state)
2215 struct pci_devinfo *dinfo = device_get_ivars(child);
2216 pcicfgregs *cfg = &dinfo->cfg;
2218 int result, oldstate, highest, delay;
2220 if (cfg->pp.pp_cap == 0)
2221 return (EOPNOTSUPP);
2224 * Optimize a no state change request away. While it would be OK to
2225 * write to the hardware in theory, some devices have shown odd
2226 * behavior when going from D3 -> D3.
2228 oldstate = pci_get_powerstate(child);
2229 if (oldstate == state)
2233 * The PCI power management specification states that after a state
2234 * transition between PCI power states, system software must
2235 * guarantee a minimal delay before the function accesses the device.
2236 * Compute the worst case delay that we need to guarantee before we
2237 * access the device. Many devices will be responsive much more
2238 * quickly than this delay, but there are some that don't respond
2239 * instantly to state changes. Transitions to/from D3 state require
2240 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2241 * is done below with DELAY rather than a sleeper function because
2242 * this function can be called from contexts where we cannot sleep.
2244 highest = (oldstate > state) ? oldstate : state;
2245 if (highest == PCI_POWERSTATE_D3)
2247 else if (highest == PCI_POWERSTATE_D2)
2251 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2252 & ~PCIM_PSTAT_DMASK;
2255 case PCI_POWERSTATE_D0:
2256 status |= PCIM_PSTAT_D0;
2258 case PCI_POWERSTATE_D1:
2259 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2260 return (EOPNOTSUPP);
2261 status |= PCIM_PSTAT_D1;
2263 case PCI_POWERSTATE_D2:
2264 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2265 return (EOPNOTSUPP);
2266 status |= PCIM_PSTAT_D2;
2268 case PCI_POWERSTATE_D3:
2269 status |= PCIM_PSTAT_D3;
2276 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2279 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2286 pci_get_powerstate_method(device_t dev, device_t child)
2288 struct pci_devinfo *dinfo = device_get_ivars(child);
2289 pcicfgregs *cfg = &dinfo->cfg;
2293 if (cfg->pp.pp_cap != 0) {
2294 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2295 switch (status & PCIM_PSTAT_DMASK) {
2297 result = PCI_POWERSTATE_D0;
2300 result = PCI_POWERSTATE_D1;
2303 result = PCI_POWERSTATE_D2;
2306 result = PCI_POWERSTATE_D3;
2309 result = PCI_POWERSTATE_UNKNOWN;
2313 /* No support, device is always at D0 */
2314 result = PCI_POWERSTATE_D0;
2320 * Some convenience functions for PCI device drivers.
2323 static __inline void
2324 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2328 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2330 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2333 static __inline void
2334 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2338 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2340 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2344 pci_enable_busmaster_method(device_t dev, device_t child)
2346 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2351 pci_disable_busmaster_method(device_t dev, device_t child)
2353 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2358 pci_enable_io_method(device_t dev, device_t child, int space)
2363 case SYS_RES_IOPORT:
2364 bit = PCIM_CMD_PORTEN;
2366 case SYS_RES_MEMORY:
2367 bit = PCIM_CMD_MEMEN;
2372 pci_set_command_bit(dev, child, bit);
2377 pci_disable_io_method(device_t dev, device_t child, int space)
2382 case SYS_RES_IOPORT:
2383 bit = PCIM_CMD_PORTEN;
2385 case SYS_RES_MEMORY:
2386 bit = PCIM_CMD_MEMEN;
2391 pci_clear_command_bit(dev, child, bit);
2396 * New style pci driver. Parent device is either a pci-host-bridge or a
2397 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2401 pci_print_verbose(struct pci_devinfo *dinfo)
2405 pcicfgregs *cfg = &dinfo->cfg;
2407 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2408 cfg->vendor, cfg->device, cfg->revid);
2409 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2410 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2411 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2412 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2414 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2415 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2416 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2417 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2418 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2419 if (cfg->intpin > 0)
2420 printf("\tintpin=%c, irq=%d\n",
2421 cfg->intpin +'a' -1, cfg->intline);
2422 if (cfg->pp.pp_cap) {
2425 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2426 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2427 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2428 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2429 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2430 status & PCIM_PSTAT_DMASK);
2432 if (cfg->msi.msi_location) {
2435 ctrl = cfg->msi.msi_ctrl;
2436 printf("\tMSI supports %d message%s%s%s\n",
2437 cfg->msi.msi_msgnum,
2438 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2439 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2440 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2442 if (cfg->msix.msix_location) {
2443 printf("\tMSI-X supports %d message%s ",
2444 cfg->msix.msix_msgnum,
2445 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2446 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2447 printf("in map 0x%x\n",
2448 cfg->msix.msix_table_bar);
2450 printf("in maps 0x%x and 0x%x\n",
2451 cfg->msix.msix_table_bar,
2452 cfg->msix.msix_pba_bar);
2458 pci_porten(device_t dev)
2460 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2464 pci_memen(device_t dev)
2466 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2470 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2472 struct pci_devinfo *dinfo;
2473 pci_addr_t map, testval;
2478 * The device ROM BAR is special. It is always a 32-bit
2479 * memory BAR. Bit 0 is special and should not be set when
2482 dinfo = device_get_ivars(dev);
2483 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2484 map = pci_read_config(dev, reg, 4);
2485 pci_write_config(dev, reg, 0xfffffffe, 4);
2486 testval = pci_read_config(dev, reg, 4);
2487 pci_write_config(dev, reg, map, 4);
2489 *testvalp = testval;
2493 map = pci_read_config(dev, reg, 4);
2494 ln2range = pci_maprange(map);
2496 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2499 * Disable decoding via the command register before
2500 * determining the BAR's length since we will be placing it in
2503 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2504 pci_write_config(dev, PCIR_COMMAND,
2505 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2508 * Determine the BAR's length by writing all 1's. The bottom
2509 * log_2(size) bits of the BAR will stick as 0 when we read
2512 pci_write_config(dev, reg, 0xffffffff, 4);
2513 testval = pci_read_config(dev, reg, 4);
2514 if (ln2range == 64) {
2515 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2516 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2520 * Restore the original value of the BAR. We may have reprogrammed
2521 * the BAR of the low-level console device and when booting verbose,
2522 * we need the console device addressable.
2524 pci_write_config(dev, reg, map, 4);
2526 pci_write_config(dev, reg + 4, map >> 32, 4);
2527 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2530 *testvalp = testval;
2534 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2536 struct pci_devinfo *dinfo;
2539 /* The device ROM BAR is always a 32-bit memory BAR. */
2540 dinfo = device_get_ivars(dev);
2541 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2544 ln2range = pci_maprange(pm->pm_value);
2545 pci_write_config(dev, pm->pm_reg, base, 4);
2547 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2548 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2550 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2551 pm->pm_reg + 4, 4) << 32;
2555 pci_find_bar(device_t dev, int reg)
2557 struct pci_devinfo *dinfo;
2560 dinfo = device_get_ivars(dev);
2561 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2562 if (pm->pm_reg == reg)
2569 pci_bar_enabled(device_t dev, struct pci_map *pm)
2571 struct pci_devinfo *dinfo;
2574 dinfo = device_get_ivars(dev);
2575 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2576 !(pm->pm_value & PCIM_BIOS_ENABLE))
2578 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2579 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2580 return ((cmd & PCIM_CMD_MEMEN) != 0);
2582 return ((cmd & PCIM_CMD_PORTEN) != 0);
2585 static struct pci_map *
2586 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2588 struct pci_devinfo *dinfo;
2589 struct pci_map *pm, *prev;
2591 dinfo = device_get_ivars(dev);
2592 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2594 pm->pm_value = value;
2596 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2597 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2599 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2600 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2604 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2606 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2611 pci_restore_bars(device_t dev)
2613 struct pci_devinfo *dinfo;
2617 dinfo = device_get_ivars(dev);
2618 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2619 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2622 ln2range = pci_maprange(pm->pm_value);
2623 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2625 pci_write_config(dev, pm->pm_reg + 4,
2626 pm->pm_value >> 32, 4);
2631 * Add a resource based on a pci map register. Return 1 if the map
2632 * register is a 32bit map register or 2 if it is a 64bit register.
2635 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2636 int force, int prefetch)
2639 pci_addr_t base, map, testval;
2640 pci_addr_t start, end, count;
2641 int barlen, basezero, flags, maprange, mapsize, type;
2643 struct resource *res;
2646 * The BAR may already exist if the device is a CardBus card
2647 * whose CIS is stored in this BAR.
2649 pm = pci_find_bar(dev, reg);
2651 maprange = pci_maprange(pm->pm_value);
2652 barlen = maprange == 64 ? 2 : 1;
2656 pci_read_bar(dev, reg, &map, &testval);
2657 if (PCI_BAR_MEM(map)) {
2658 type = SYS_RES_MEMORY;
2659 if (map & PCIM_BAR_MEM_PREFETCH)
2662 type = SYS_RES_IOPORT;
2663 mapsize = pci_mapsize(testval);
2664 base = pci_mapbase(map);
2665 #ifdef __PCI_BAR_ZERO_VALID
2668 basezero = base == 0;
2670 maprange = pci_maprange(map);
2671 barlen = maprange == 64 ? 2 : 1;
2674 * For I/O registers, if bottom bit is set, and the next bit up
2675 * isn't clear, we know we have a BAR that doesn't conform to the
2676 * spec, so ignore it. Also, sanity check the size of the data
2677 * areas to the type of memory involved. Memory must be at least
2678 * 16 bytes in size, while I/O ranges must be at least 4.
2680 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2682 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2683 (type == SYS_RES_IOPORT && mapsize < 2))
2686 /* Save a record of this BAR. */
2687 pm = pci_add_bar(dev, reg, map, mapsize);
2689 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2690 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2691 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2692 printf(", port disabled\n");
2693 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2694 printf(", memory disabled\n");
2696 printf(", enabled\n");
2700 * If base is 0, then we have problems if this architecture does
2701 * not allow that. It is best to ignore such entries for the
2702 * moment. These will be allocated later if the driver specifically
2703 * requests them. However, some removable busses look better when
2704 * all resources are allocated, so allow '0' to be overriden.
2706 * Similarly treat maps whose values is the same as the test value
2707 * read back. These maps have had all f's written to them by the
2708 * BIOS in an attempt to disable the resources.
2710 if (!force && (basezero || map == testval))
2712 if ((u_long)base != base) {
2714 "pci%d:%d:%d:%d bar %#x too many address bits",
2715 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2716 pci_get_function(dev), reg);
2721 * This code theoretically does the right thing, but has
2722 * undesirable side effects in some cases where peripherals
2723 * respond oddly to having these bits enabled. Let the user
2724 * be able to turn them off (since pci_enable_io_modes is 1 by
2727 if (pci_enable_io_modes) {
2728 /* Turn on resources that have been left off by a lazy BIOS */
2729 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2730 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2731 cmd |= PCIM_CMD_PORTEN;
2732 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2734 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2735 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2736 cmd |= PCIM_CMD_MEMEN;
2737 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2740 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2742 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2746 count = (pci_addr_t)1 << mapsize;
2747 flags = RF_ALIGNMENT_LOG2(mapsize);
2749 flags |= RF_PREFETCHABLE;
2750 if (basezero || base == pci_mapbase(testval)) {
2751 start = 0; /* Let the parent decide. */
2755 end = base + count - 1;
2757 resource_list_add(rl, type, reg, start, end, count);
2760 * Try to allocate the resource for this BAR from our parent
2761 * so that this resource range is already reserved. The
2762 * driver for this device will later inherit this resource in
2763 * pci_alloc_resource().
2765 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2767 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2769 * If the allocation fails, try to allocate a resource for
2770 * this BAR using any available range. The firmware felt
2771 * it was important enough to assign a resource, so don't
2772 * disable decoding if we can help it.
2774 resource_list_delete(rl, type, reg);
2775 resource_list_add(rl, type, reg, 0, ~0ul, count);
2776 res = resource_list_alloc(rl, bus, dev, type, ®, 0, ~0ul,
2781 * If the allocation fails, delete the resource list entry
2782 * and disable decoding for this device.
2784 * If the driver requests this resource in the future,
2785 * pci_reserve_map() will try to allocate a fresh
2788 resource_list_delete(rl, type, reg);
2789 pci_disable_io(dev, type);
2792 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2793 pci_get_domain(dev), pci_get_bus(dev),
2794 pci_get_slot(dev), pci_get_function(dev), reg);
2796 start = rman_get_start(res);
2797 pci_write_bar(dev, pm, start);
2798 rman_set_device(res, bus);
2804 * For ATA devices we need to decide early what addressing mode to use.
2805 * Legacy demands that the primary and secondary ATA ports sits on the
2806 * same addresses that old ISA hardware did. This dictates that we use
2807 * those addresses and ignore the BAR's if we cannot set PCI native
2811 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2812 uint32_t prefetchmask)
2815 int rid, type, progif;
2817 /* if this device supports PCI native addressing use it */
2818 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2819 if ((progif & 0x8a) == 0x8a) {
2820 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2821 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2822 printf("Trying ATA native PCI addressing mode\n");
2823 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2827 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2828 type = SYS_RES_IOPORT;
2829 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2830 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2831 prefetchmask & (1 << 0));
2832 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2833 prefetchmask & (1 << 1));
2836 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2837 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7,
2839 rman_set_device(r, bus);
2841 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2842 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6,
2844 rman_set_device(r, bus);
2846 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2847 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
2848 prefetchmask & (1 << 2));
2849 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
2850 prefetchmask & (1 << 3));
2853 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2854 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177,
2856 rman_set_device(r, bus);
2858 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2859 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376,
2861 rman_set_device(r, bus);
2863 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
2864 prefetchmask & (1 << 4));
2865 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
2866 prefetchmask & (1 << 5));
2870 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2872 struct pci_devinfo *dinfo = device_get_ivars(dev);
2873 pcicfgregs *cfg = &dinfo->cfg;
2874 char tunable_name[64];
2877 /* Has to have an intpin to have an interrupt. */
2878 if (cfg->intpin == 0)
2881 /* Let the user override the IRQ with a tunable. */
2882 irq = PCI_INVALID_IRQ;
2883 snprintf(tunable_name, sizeof(tunable_name),
2884 "hw.pci%d.%d.%d.INT%c.irq",
2885 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2886 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2887 irq = PCI_INVALID_IRQ;
2890 * If we didn't get an IRQ via the tunable, then we either use the
2891 * IRQ value in the intline register or we ask the bus to route an
2892 * interrupt for us. If force_route is true, then we only use the
2893 * value in the intline register if the bus was unable to assign an
2896 if (!PCI_INTERRUPT_VALID(irq)) {
2897 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2898 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2899 if (!PCI_INTERRUPT_VALID(irq))
2903 /* If after all that we don't have an IRQ, just bail. */
2904 if (!PCI_INTERRUPT_VALID(irq))
2907 /* Update the config register if it changed. */
2908 if (irq != cfg->intline) {
2910 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2913 /* Add this IRQ as rid 0 interrupt resource. */
2914 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2917 /* Perform early OHCI takeover from SMM. */
2919 ohci_early_takeover(device_t self)
2921 struct resource *res;
2927 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
2931 ctl = bus_read_4(res, OHCI_CONTROL);
2932 if (ctl & OHCI_IR) {
2934 printf("ohci early: "
2935 "SMM active, request owner change\n");
2936 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
2937 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
2939 ctl = bus_read_4(res, OHCI_CONTROL);
2941 if (ctl & OHCI_IR) {
2943 printf("ohci early: "
2944 "SMM does not respond, resetting\n");
2945 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
2947 /* Disable interrupts */
2948 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
2951 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
2954 /* Perform early UHCI takeover from SMM. */
2956 uhci_early_takeover(device_t self)
2958 struct resource *res;
2962 * Set the PIRQD enable bit and switch off all the others. We don't
2963 * want legacy support to interfere with us XXX Does this also mean
2964 * that the BIOS won't touch the keyboard anymore if it is connected
2965 * to the ports of the root hub?
2967 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
2969 /* Disable interrupts */
2970 rid = PCI_UHCI_BASE_REG;
2971 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
2973 bus_write_2(res, UHCI_INTR, 0);
2974 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
2978 /* Perform early EHCI takeover from SMM. */
2980 ehci_early_takeover(device_t self)
2982 struct resource *res;
2992 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
2996 cparams = bus_read_4(res, EHCI_HCCPARAMS);
2998 /* Synchronise with the BIOS if it owns the controller. */
2999 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3000 eecp = EHCI_EECP_NEXT(eec)) {
3001 eec = pci_read_config(self, eecp, 4);
3002 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3005 bios_sem = pci_read_config(self, eecp +
3006 EHCI_LEGSUP_BIOS_SEM, 1);
3007 if (bios_sem == 0) {
3011 printf("ehci early: "
3012 "SMM active, request owner change\n");
3014 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3016 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3018 bios_sem = pci_read_config(self, eecp +
3019 EHCI_LEGSUP_BIOS_SEM, 1);
3022 if (bios_sem != 0) {
3024 printf("ehci early: "
3025 "SMM does not respond\n");
3027 /* Disable interrupts */
3028 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3029 bus_write_4(res, offs + EHCI_USBINTR, 0);
3031 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3034 /* Perform early XHCI takeover from SMM. */
3036 xhci_early_takeover(device_t self)
3038 struct resource *res;
3048 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3052 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3056 /* Synchronise with the BIOS if it owns the controller. */
3057 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3058 eecp += XHCI_XECP_NEXT(eec) << 2) {
3059 eec = bus_read_4(res, eecp);
3061 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3064 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3069 printf("xhci early: "
3070 "SMM active, request owner change\n");
3072 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3074 /* wait a maximum of 5 second */
3076 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3078 bios_sem = bus_read_1(res, eecp +
3079 XHCI_XECP_BIOS_SEM);
3082 if (bios_sem != 0) {
3084 printf("xhci early: "
3085 "SMM does not respond\n");
3088 /* Disable interrupts */
3089 offs = bus_read_1(res, XHCI_CAPLENGTH);
3090 bus_write_4(res, offs + XHCI_USBCMD, 0);
3091 bus_read_4(res, offs + XHCI_USBSTS);
3093 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3097 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3099 struct pci_devinfo *dinfo;
3101 struct resource_list *rl;
3102 const struct pci_quirk *q;
3106 dinfo = device_get_ivars(dev);
3108 rl = &dinfo->resources;
3109 devid = (cfg->device << 16) | cfg->vendor;
3111 /* ATA devices needs special map treatment */
3112 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3113 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3114 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3115 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3116 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3117 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3119 for (i = 0; i < cfg->nummaps;) {
3121 * Skip quirked resources.
3123 for (q = &pci_quirks[0]; q->devid != 0; q++)
3124 if (q->devid == devid &&
3125 q->type == PCI_QUIRK_UNMAP_REG &&
3126 q->arg1 == PCIR_BAR(i))
3128 if (q->devid != 0) {
3132 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3133 prefetchmask & (1 << i));
3137 * Add additional, quirked resources.
3139 for (q = &pci_quirks[0]; q->devid != 0; q++)
3140 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3141 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3143 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3144 #ifdef __PCI_REROUTE_INTERRUPT
3146 * Try to re-route interrupts. Sometimes the BIOS or
3147 * firmware may leave bogus values in these registers.
3148 * If the re-route fails, then just stick with what we
3151 pci_assign_interrupt(bus, dev, 1);
3153 pci_assign_interrupt(bus, dev, 0);
3157 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3158 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3159 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3160 xhci_early_takeover(dev);
3161 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3162 ehci_early_takeover(dev);
3163 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3164 ohci_early_takeover(dev);
3165 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3166 uhci_early_takeover(dev);
3171 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3173 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3174 device_t pcib = device_get_parent(dev);
3175 struct pci_devinfo *dinfo;
3177 int s, f, pcifunchigh;
3180 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3181 ("dinfo_size too small"));
3182 maxslots = PCIB_MAXSLOTS(pcib);
3183 for (s = 0; s <= maxslots; s++) {
3187 hdrtype = REG(PCIR_HDRTYPE, 1);
3188 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3190 if (hdrtype & PCIM_MFDEV)
3191 pcifunchigh = PCI_FUNCMAX;
3192 for (f = 0; f <= pcifunchigh; f++) {
3193 dinfo = pci_read_device(pcib, domain, busno, s, f,
3195 if (dinfo != NULL) {
3196 pci_add_child(dev, dinfo);
3204 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3206 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3207 device_set_ivars(dinfo->cfg.dev, dinfo);
3208 resource_list_init(&dinfo->resources);
3209 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3210 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3211 pci_print_verbose(dinfo);
3212 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3216 pci_probe(device_t dev)
3219 device_set_desc(dev, "PCI bus");
3221 /* Allow other subclasses to override this driver. */
3222 return (BUS_PROBE_GENERIC);
3226 pci_attach_common(device_t dev)
3228 struct pci_softc *sc;
3230 #ifdef PCI_DMA_BOUNDARY
3231 int error, tag_valid;
3234 sc = device_get_softc(dev);
3235 domain = pcib_get_domain(dev);
3236 busno = pcib_get_bus(dev);
3238 device_printf(dev, "domain=%d, physical bus=%d\n",
3240 #ifdef PCI_DMA_BOUNDARY
3242 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3243 devclass_find("pci")) {
3244 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3245 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3246 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3247 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3249 device_printf(dev, "Failed to create DMA tag: %d\n",
3256 sc->sc_dma_tag = bus_get_dma_tag(dev);
3261 pci_attach(device_t dev)
3263 int busno, domain, error;
3265 error = pci_attach_common(dev);
3270 * Since there can be multiple independantly numbered PCI
3271 * busses on systems with multiple PCI domains, we can't use
3272 * the unit number to decide which bus we are probing. We ask
3273 * the parent pcib what our domain and bus numbers are.
3275 domain = pcib_get_domain(dev);
3276 busno = pcib_get_bus(dev);
3277 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3278 return (bus_generic_attach(dev));
3282 pci_suspend(device_t dev)
3284 int dstate, error, i, numdevs;
3285 device_t acpi_dev, child, *devlist;
3286 struct pci_devinfo *dinfo;
3289 * Save the PCI configuration space for each child and set the
3290 * device in the appropriate power state for this sleep state.
3293 if (pci_do_power_resume)
3294 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3295 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3297 for (i = 0; i < numdevs; i++) {
3299 dinfo = device_get_ivars(child);
3300 pci_cfg_save(child, dinfo, 0);
3303 /* Suspend devices before potentially powering them down. */
3304 error = bus_generic_suspend(dev);
3306 free(devlist, M_TEMP);
3311 * Always set the device to D3. If ACPI suggests a different
3312 * power state, use it instead. If ACPI is not present, the
3313 * firmware is responsible for managing device power. Skip
3314 * children who aren't attached since they are powered down
3315 * separately. Only manage type 0 devices for now.
3317 for (i = 0; acpi_dev && i < numdevs; i++) {
3319 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3320 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
3321 dstate = PCI_POWERSTATE_D3;
3322 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
3323 pci_set_powerstate(child, dstate);
3326 free(devlist, M_TEMP);
3331 pci_resume(device_t dev)
3333 int i, numdevs, error;
3334 device_t acpi_dev, child, *devlist;
3335 struct pci_devinfo *dinfo;
3338 * Set each child to D0 and restore its PCI configuration space.
3341 if (pci_do_power_resume)
3342 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3343 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3345 for (i = 0; i < numdevs; i++) {
3347 * Notify ACPI we're going to D0 but ignore the result. If
3348 * ACPI is not present, the firmware is responsible for
3349 * managing device power. Only manage type 0 devices for now.
3352 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3353 if (acpi_dev && device_is_attached(child) &&
3354 dinfo->cfg.hdrtype == 0) {
3355 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
3356 pci_set_powerstate(child, PCI_POWERSTATE_D0);
3359 /* Now the device is powered up, restore its config space. */
3360 pci_cfg_restore(child, dinfo);
3362 free(devlist, M_TEMP);
3363 return (bus_generic_resume(dev));
3367 pci_load_vendor_data(void)
3369 caddr_t vendordata, info;
3371 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
3372 info = preload_search_info(vendordata, MODINFO_ADDR);
3373 pci_vendordata = *(char **)info;
3374 info = preload_search_info(vendordata, MODINFO_SIZE);
3375 pci_vendordata_size = *(size_t *)info;
3376 /* terminate the database */
3377 pci_vendordata[pci_vendordata_size] = '\n';
3382 pci_driver_added(device_t dev, driver_t *driver)
3387 struct pci_devinfo *dinfo;
3391 device_printf(dev, "driver added\n");
3392 DEVICE_IDENTIFY(driver, dev);
3393 if (device_get_children(dev, &devlist, &numdevs) != 0)
3395 for (i = 0; i < numdevs; i++) {
3397 if (device_get_state(child) != DS_NOTPRESENT)
3399 dinfo = device_get_ivars(child);
3400 pci_print_verbose(dinfo);
3402 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3403 pci_cfg_restore(child, dinfo);
3404 if (device_probe_and_attach(child) != 0)
3405 pci_cfg_save(child, dinfo, 1);
3407 free(devlist, M_TEMP);
3411 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3412 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3414 struct pci_devinfo *dinfo;
3415 struct msix_table_entry *mte;
3416 struct msix_vector *mv;
3422 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3427 /* If this is not a direct child, just bail out. */
3428 if (device_get_parent(child) != dev) {
3433 rid = rman_get_rid(irq);
3435 /* Make sure that INTx is enabled */
3436 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3439 * Check to see if the interrupt is MSI or MSI-X.
3440 * Ask our parent to map the MSI and give
3441 * us the address and data register values.
3442 * If we fail for some reason, teardown the
3443 * interrupt handler.
3445 dinfo = device_get_ivars(child);
3446 if (dinfo->cfg.msi.msi_alloc > 0) {
3447 if (dinfo->cfg.msi.msi_addr == 0) {
3448 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3449 ("MSI has handlers, but vectors not mapped"));
3450 error = PCIB_MAP_MSI(device_get_parent(dev),
3451 child, rman_get_start(irq), &addr, &data);
3454 dinfo->cfg.msi.msi_addr = addr;
3455 dinfo->cfg.msi.msi_data = data;
3457 if (dinfo->cfg.msi.msi_handlers == 0)
3458 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3459 dinfo->cfg.msi.msi_data);
3460 dinfo->cfg.msi.msi_handlers++;
3462 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3463 ("No MSI or MSI-X interrupts allocated"));
3464 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3465 ("MSI-X index too high"));
3466 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3467 KASSERT(mte->mte_vector != 0, ("no message vector"));
3468 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3469 KASSERT(mv->mv_irq == rman_get_start(irq),
3471 if (mv->mv_address == 0) {
3472 KASSERT(mte->mte_handlers == 0,
3473 ("MSI-X table entry has handlers, but vector not mapped"));
3474 error = PCIB_MAP_MSI(device_get_parent(dev),
3475 child, rman_get_start(irq), &addr, &data);
3478 mv->mv_address = addr;
3481 if (mte->mte_handlers == 0) {
3482 pci_enable_msix(child, rid - 1, mv->mv_address,
3484 pci_unmask_msix(child, rid - 1);
3486 mte->mte_handlers++;
3489 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3490 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3493 (void)bus_generic_teardown_intr(dev, child, irq,
3503 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3506 struct msix_table_entry *mte;
3507 struct resource_list_entry *rle;
3508 struct pci_devinfo *dinfo;
3511 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3514 /* If this isn't a direct child, just bail out */
3515 if (device_get_parent(child) != dev)
3516 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3518 rid = rman_get_rid(irq);
3521 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3524 * Check to see if the interrupt is MSI or MSI-X. If so,
3525 * decrement the appropriate handlers count and mask the
3526 * MSI-X message, or disable MSI messages if the count
3529 dinfo = device_get_ivars(child);
3530 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3531 if (rle->res != irq)
3533 if (dinfo->cfg.msi.msi_alloc > 0) {
3534 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3535 ("MSI-X index too high"));
3536 if (dinfo->cfg.msi.msi_handlers == 0)
3538 dinfo->cfg.msi.msi_handlers--;
3539 if (dinfo->cfg.msi.msi_handlers == 0)
3540 pci_disable_msi(child);
3542 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3543 ("No MSI or MSI-X interrupts allocated"));
3544 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3545 ("MSI-X index too high"));
3546 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3547 if (mte->mte_handlers == 0)
3549 mte->mte_handlers--;
3550 if (mte->mte_handlers == 0)
3551 pci_mask_msix(child, rid - 1);
3554 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3557 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3562 pci_print_child(device_t dev, device_t child)
3564 struct pci_devinfo *dinfo;
3565 struct resource_list *rl;
3568 dinfo = device_get_ivars(child);
3569 rl = &dinfo->resources;
3571 retval += bus_print_child_header(dev, child);
3573 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3574 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3575 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3576 if (device_get_flags(dev))
3577 retval += printf(" flags %#x", device_get_flags(dev));
3579 retval += printf(" at device %d.%d", pci_get_slot(child),
3580 pci_get_function(child));
3582 retval += bus_print_child_footer(dev, child);
3592 } pci_nomatch_tab[] = {
3593 {PCIC_OLD, -1, "old"},
3594 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3595 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3596 {PCIC_STORAGE, -1, "mass storage"},
3597 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3598 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3599 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3600 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3601 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3602 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3603 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3604 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3605 {PCIC_STORAGE, PCIS_STORAGE_NVM, "NVM"},
3606 {PCIC_NETWORK, -1, "network"},
3607 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3608 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3609 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3610 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3611 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3612 {PCIC_DISPLAY, -1, "display"},
3613 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3614 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3615 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3616 {PCIC_MULTIMEDIA, -1, "multimedia"},
3617 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3618 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3619 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3620 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3621 {PCIC_MEMORY, -1, "memory"},
3622 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3623 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3624 {PCIC_BRIDGE, -1, "bridge"},
3625 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3626 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3627 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3628 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3629 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3630 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3631 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3632 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3633 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3634 {PCIC_SIMPLECOMM, -1, "simple comms"},
3635 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3636 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3637 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3638 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3639 {PCIC_BASEPERIPH, -1, "base peripheral"},
3640 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3641 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3642 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3643 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3644 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3645 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3646 {PCIC_INPUTDEV, -1, "input device"},
3647 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3648 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3649 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3650 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3651 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3652 {PCIC_DOCKING, -1, "docking station"},
3653 {PCIC_PROCESSOR, -1, "processor"},
3654 {PCIC_SERIALBUS, -1, "serial bus"},
3655 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3656 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3657 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3658 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3659 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3660 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3661 {PCIC_WIRELESS, -1, "wireless controller"},
3662 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3663 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3664 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3665 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3666 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3667 {PCIC_SATCOM, -1, "satellite communication"},
3668 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3669 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3670 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3671 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3672 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3673 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3674 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3675 {PCIC_DASP, -1, "dasp"},
3676 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3681 pci_probe_nomatch(device_t dev, device_t child)
3684 const char *cp, *scp;
3688 * Look for a listing for this device in a loaded device database.
3690 if ((device = pci_describe_device(child)) != NULL) {
3691 device_printf(dev, "<%s>", device);
3692 free(device, M_DEVBUF);
3695 * Scan the class/subclass descriptions for a general
3700 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3701 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3702 if (pci_nomatch_tab[i].subclass == -1) {
3703 cp = pci_nomatch_tab[i].desc;
3704 } else if (pci_nomatch_tab[i].subclass ==
3705 pci_get_subclass(child)) {
3706 scp = pci_nomatch_tab[i].desc;
3710 device_printf(dev, "<%s%s%s>",
3712 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3715 printf(" at device %d.%d (no driver attached)\n",
3716 pci_get_slot(child), pci_get_function(child));
3717 pci_cfg_save(child, device_get_ivars(child), 1);
3721 * Parse the PCI device database, if loaded, and return a pointer to a
3722 * description of the device.
3724 * The database is flat text formatted as follows:
3726 * Any line not in a valid format is ignored.
3727 * Lines are terminated with newline '\n' characters.
3729 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3732 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3733 * - devices cannot be listed without a corresponding VENDOR line.
3734 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3735 * another TAB, then the device name.
3739 * Assuming (ptr) points to the beginning of a line in the database,
3740 * return the vendor or device and description of the next entry.
3741 * The value of (vendor) or (device) inappropriate for the entry type
3742 * is set to -1. Returns nonzero at the end of the database.
3744 * Note that this is slightly unrobust in the face of corrupt data;
3745 * we attempt to safeguard against this by spamming the end of the
3746 * database with a newline when we initialise.
3749 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3758 left = pci_vendordata_size - (cp - pci_vendordata);
3766 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3770 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3773 /* skip to next line */
3774 while (*cp != '\n' && left > 0) {
3783 /* skip to next line */
3784 while (*cp != '\n' && left > 0) {
3788 if (*cp == '\n' && left > 0)
3795 pci_describe_device(device_t dev)
3798 char *desc, *vp, *dp, *line;
3800 desc = vp = dp = NULL;
3803 * If we have no vendor data, we can't do anything.
3805 if (pci_vendordata == NULL)
3809 * Scan the vendor data looking for this device
3811 line = pci_vendordata;
3812 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3815 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3817 if (vendor == pci_get_vendor(dev))
3820 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3823 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3831 if (device == pci_get_device(dev))
3835 snprintf(dp, 80, "0x%x", pci_get_device(dev));
3836 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3838 sprintf(desc, "%s, %s", vp, dp);
3848 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3850 struct pci_devinfo *dinfo;
3853 dinfo = device_get_ivars(child);
3857 case PCI_IVAR_ETHADDR:
3859 * The generic accessor doesn't deal with failure, so
3860 * we set the return value, then return an error.
3862 *((uint8_t **) result) = NULL;
3864 case PCI_IVAR_SUBVENDOR:
3865 *result = cfg->subvendor;
3867 case PCI_IVAR_SUBDEVICE:
3868 *result = cfg->subdevice;
3870 case PCI_IVAR_VENDOR:
3871 *result = cfg->vendor;
3873 case PCI_IVAR_DEVICE:
3874 *result = cfg->device;
3876 case PCI_IVAR_DEVID:
3877 *result = (cfg->device << 16) | cfg->vendor;
3879 case PCI_IVAR_CLASS:
3880 *result = cfg->baseclass;
3882 case PCI_IVAR_SUBCLASS:
3883 *result = cfg->subclass;
3885 case PCI_IVAR_PROGIF:
3886 *result = cfg->progif;
3888 case PCI_IVAR_REVID:
3889 *result = cfg->revid;
3891 case PCI_IVAR_INTPIN:
3892 *result = cfg->intpin;
3895 *result = cfg->intline;
3897 case PCI_IVAR_DOMAIN:
3898 *result = cfg->domain;
3904 *result = cfg->slot;
3906 case PCI_IVAR_FUNCTION:
3907 *result = cfg->func;
3909 case PCI_IVAR_CMDREG:
3910 *result = cfg->cmdreg;
3912 case PCI_IVAR_CACHELNSZ:
3913 *result = cfg->cachelnsz;
3915 case PCI_IVAR_MINGNT:
3916 *result = cfg->mingnt;
3918 case PCI_IVAR_MAXLAT:
3919 *result = cfg->maxlat;
3921 case PCI_IVAR_LATTIMER:
3922 *result = cfg->lattimer;
3931 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3933 struct pci_devinfo *dinfo;
3935 dinfo = device_get_ivars(child);
3938 case PCI_IVAR_INTPIN:
3939 dinfo->cfg.intpin = value;
3941 case PCI_IVAR_ETHADDR:
3942 case PCI_IVAR_SUBVENDOR:
3943 case PCI_IVAR_SUBDEVICE:
3944 case PCI_IVAR_VENDOR:
3945 case PCI_IVAR_DEVICE:
3946 case PCI_IVAR_DEVID:
3947 case PCI_IVAR_CLASS:
3948 case PCI_IVAR_SUBCLASS:
3949 case PCI_IVAR_PROGIF:
3950 case PCI_IVAR_REVID:
3952 case PCI_IVAR_DOMAIN:
3955 case PCI_IVAR_FUNCTION:
3956 return (EINVAL); /* disallow for now */
3963 #include "opt_ddb.h"
3965 #include <ddb/ddb.h>
3966 #include <sys/cons.h>
3969 * List resources based on pci map registers, used for within ddb
3972 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3974 struct pci_devinfo *dinfo;
3975 struct devlist *devlist_head;
3978 int i, error, none_count;
3981 /* get the head of the device queue */
3982 devlist_head = &pci_devq;
3985 * Go through the list of devices and print out devices
3987 for (error = 0, i = 0,
3988 dinfo = STAILQ_FIRST(devlist_head);
3989 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3990 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3992 /* Populate pd_name and pd_unit */
3995 name = device_get_name(dinfo->cfg.dev);
3998 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3999 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4000 (name && *name) ? name : "none",
4001 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4003 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4004 p->pc_sel.pc_func, (p->pc_class << 16) |
4005 (p->pc_subclass << 8) | p->pc_progif,
4006 (p->pc_subdevice << 16) | p->pc_subvendor,
4007 (p->pc_device << 16) | p->pc_vendor,
4008 p->pc_revid, p->pc_hdr);
4013 static struct resource *
4014 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
4015 u_long start, u_long end, u_long count, u_int flags)
4017 struct pci_devinfo *dinfo = device_get_ivars(child);
4018 struct resource_list *rl = &dinfo->resources;
4019 struct resource_list_entry *rle;
4020 struct resource *res;
4022 pci_addr_t map, testval;
4026 pm = pci_find_bar(child, *rid);
4028 /* This is a BAR that we failed to allocate earlier. */
4029 mapsize = pm->pm_size;
4033 * Weed out the bogons, and figure out how large the
4034 * BAR/map is. BARs that read back 0 here are bogus
4035 * and unimplemented. Note: atapci in legacy mode are
4036 * special and handled elsewhere in the code. If you
4037 * have a atapci device in legacy mode and it fails
4038 * here, that other code is broken.
4040 pci_read_bar(child, *rid, &map, &testval);
4043 * Determine the size of the BAR and ignore BARs with a size
4044 * of 0. Device ROM BARs use a different mask value.
4046 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4047 mapsize = pci_romsize(testval);
4049 mapsize = pci_mapsize(testval);
4052 pm = pci_add_bar(child, *rid, map, mapsize);
4055 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4056 if (type != SYS_RES_MEMORY) {
4059 "child %s requested type %d for rid %#x,"
4060 " but the BAR says it is an memio\n",
4061 device_get_nameunit(child), type, *rid);
4065 if (type != SYS_RES_IOPORT) {
4068 "child %s requested type %d for rid %#x,"
4069 " but the BAR says it is an ioport\n",
4070 device_get_nameunit(child), type, *rid);
4076 * For real BARs, we need to override the size that
4077 * the driver requests, because that's what the BAR
4078 * actually uses and we would otherwise have a
4079 * situation where we might allocate the excess to
4080 * another driver, which won't work.
4082 count = (pci_addr_t)1 << mapsize;
4083 if (RF_ALIGNMENT(flags) < mapsize)
4084 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4085 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4086 flags |= RF_PREFETCHABLE;
4089 * Allocate enough resource, and then write back the
4090 * appropriate BAR for that resource.
4092 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4093 start, end, count, flags & ~RF_ACTIVE);
4095 device_printf(child,
4096 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4097 count, *rid, type, start, end);
4100 rman_set_device(res, dev);
4101 resource_list_add(rl, type, *rid, start, end, count);
4102 rle = resource_list_find(rl, type, *rid);
4104 panic("pci_alloc_map: unexpectedly can't find resource.");
4106 rle->start = rman_get_start(res);
4107 rle->end = rman_get_end(res);
4110 device_printf(child,
4111 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4112 count, *rid, type, rman_get_start(res));
4113 map = rman_get_start(res);
4114 pci_write_bar(child, pm, map);
4120 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4121 u_long start, u_long end, u_long count, u_int flags)
4123 struct pci_devinfo *dinfo = device_get_ivars(child);
4124 struct resource_list *rl = &dinfo->resources;
4125 struct resource_list_entry *rle;
4126 struct resource *res;
4127 pcicfgregs *cfg = &dinfo->cfg;
4129 if (device_get_parent(child) != dev)
4130 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4131 type, rid, start, end, count, flags));
4134 * Perform lazy resource allocation
4139 * Can't alloc legacy interrupt once MSI messages have
4142 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4143 cfg->msix.msix_alloc > 0))
4147 * If the child device doesn't have an interrupt
4148 * routed and is deserving of an interrupt, try to
4151 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4153 pci_assign_interrupt(dev, child, 0);
4155 case SYS_RES_IOPORT:
4156 case SYS_RES_MEMORY:
4159 * PCI-PCI bridge I/O window resources are not BARs.
4160 * For those allocations just pass the request up the
4163 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4165 case PCIR_IOBASEL_1:
4166 case PCIR_MEMBASE_1:
4167 case PCIR_PMBASEL_1:
4169 * XXX: Should we bother creating a resource
4172 return (bus_generic_alloc_resource(dev, child,
4173 type, rid, start, end, count, flags));
4177 /* Allocate resources for this BAR if needed. */
4178 rle = resource_list_find(rl, type, *rid);
4180 res = pci_alloc_map(dev, child, type, rid, start, end,
4184 rle = resource_list_find(rl, type, *rid);
4188 * If the resource belongs to the bus, then give it to
4189 * the child. We need to activate it if requested
4190 * since the bus always allocates inactive resources.
4192 if (rle != NULL && rle->res != NULL &&
4193 rman_get_device(rle->res) == dev) {
4195 device_printf(child,
4196 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
4197 rman_get_size(rle->res), *rid, type,
4198 rman_get_start(rle->res));
4199 rman_set_device(rle->res, child);
4200 if ((flags & RF_ACTIVE) &&
4201 bus_activate_resource(child, type, *rid,
4207 return (resource_list_alloc(rl, dev, child, type, rid,
4208 start, end, count, flags));
4212 pci_release_resource(device_t dev, device_t child, int type, int rid,
4217 if (device_get_parent(child) != dev)
4218 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4222 * For BARs we don't actually want to release the resource.
4223 * Instead, we deactivate the resource if needed and then give
4224 * ownership of the BAR back to the bus.
4227 case SYS_RES_IOPORT:
4228 case SYS_RES_MEMORY:
4229 if (rman_get_device(r) != child)
4231 if (rman_get_flags(r) & RF_ACTIVE) {
4232 error = bus_deactivate_resource(child, type, rid, r);
4236 rman_set_device(r, dev);
4239 return (bus_generic_rl_release_resource(dev, child, type, rid, r));
4243 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4246 struct pci_devinfo *dinfo;
4249 error = bus_generic_activate_resource(dev, child, type, rid, r);
4253 /* Enable decoding in the command register when activating BARs. */
4254 if (device_get_parent(child) == dev) {
4255 /* Device ROMs need their decoding explicitly enabled. */
4256 dinfo = device_get_ivars(child);
4257 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4258 pci_write_bar(child, pci_find_bar(child, rid),
4259 rman_get_start(r) | PCIM_BIOS_ENABLE);
4261 case SYS_RES_IOPORT:
4262 case SYS_RES_MEMORY:
4263 error = PCI_ENABLE_IO(dev, child, type);
4271 pci_deactivate_resource(device_t dev, device_t child, int type,
4272 int rid, struct resource *r)
4274 struct pci_devinfo *dinfo;
4277 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4281 /* Disable decoding for device ROMs. */
4282 if (device_get_parent(child) == dev) {
4283 dinfo = device_get_ivars(child);
4284 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4285 pci_write_bar(child, pci_find_bar(child, rid),
4292 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4294 struct pci_devinfo *dinfo;
4295 struct resource_list *rl;
4296 struct resource_list_entry *rle;
4298 if (device_get_parent(child) != dev)
4301 dinfo = device_get_ivars(child);
4302 rl = &dinfo->resources;
4303 rle = resource_list_find(rl, type, rid);
4308 if (rman_get_device(rle->res) != dev ||
4309 rman_get_flags(rle->res) & RF_ACTIVE) {
4310 device_printf(dev, "delete_resource: "
4311 "Resource still owned by child, oops. "
4312 "(type=%d, rid=%d, addr=%lx)\n",
4313 rle->type, rle->rid,
4314 rman_get_start(rle->res));
4317 bus_release_resource(dev, type, rid, rle->res);
4319 resource_list_delete(rl, type, rid);
4322 struct resource_list *
4323 pci_get_resource_list (device_t dev, device_t child)
4325 struct pci_devinfo *dinfo = device_get_ivars(child);
4327 return (&dinfo->resources);
4331 pci_get_dma_tag(device_t bus, device_t dev)
4333 struct pci_softc *sc = device_get_softc(bus);
4335 return (sc->sc_dma_tag);
4339 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4341 struct pci_devinfo *dinfo = device_get_ivars(child);
4342 pcicfgregs *cfg = &dinfo->cfg;
4344 return (PCIB_READ_CONFIG(device_get_parent(dev),
4345 cfg->bus, cfg->slot, cfg->func, reg, width));
4349 pci_write_config_method(device_t dev, device_t child, int reg,
4350 uint32_t val, int width)
4352 struct pci_devinfo *dinfo = device_get_ivars(child);
4353 pcicfgregs *cfg = &dinfo->cfg;
4355 PCIB_WRITE_CONFIG(device_get_parent(dev),
4356 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4360 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4364 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4365 pci_get_function(child));
4370 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4373 struct pci_devinfo *dinfo;
4376 dinfo = device_get_ivars(child);
4378 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4379 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4380 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4386 pci_assign_interrupt_method(device_t dev, device_t child)
4388 struct pci_devinfo *dinfo = device_get_ivars(child);
4389 pcicfgregs *cfg = &dinfo->cfg;
4391 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4396 pci_modevent(module_t mod, int what, void *arg)
4398 static struct cdev *pci_cdev;
4402 STAILQ_INIT(&pci_devq);
4404 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4406 pci_load_vendor_data();
4410 destroy_dev(pci_cdev);
4418 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
4422 * Only do header type 0 devices. Type 1 devices are bridges,
4423 * which we know need special treatment. Type 2 devices are
4424 * cardbus bridges which also require special treatment.
4425 * Other types are unknown, and we err on the side of safety
4428 if (dinfo->cfg.hdrtype != 0)
4432 * Restore the device to full power mode. We must do this
4433 * before we restore the registers because moving from D3 to
4434 * D0 will cause the chip's BARs and some other registers to
4435 * be reset to some unknown power on reset values. Cut down
4436 * the noise on boot by doing nothing if we are already in
4439 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
4440 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4441 pci_restore_bars(dev);
4442 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
4443 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
4444 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
4445 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
4446 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
4447 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
4448 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
4449 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
4450 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
4452 /* Restore MSI and MSI-X configurations if they are present. */
4453 if (dinfo->cfg.msi.msi_location != 0)
4454 pci_resume_msi(dev);
4455 if (dinfo->cfg.msix.msix_location != 0)
4456 pci_resume_msix(dev);
4460 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
4466 * Only do header type 0 devices. Type 1 devices are bridges, which
4467 * we know need special treatment. Type 2 devices are cardbus bridges
4468 * which also require special treatment. Other types are unknown, and
4469 * we err on the side of safety by ignoring them. Powering down
4470 * bridges should not be undertaken lightly.
4472 if (dinfo->cfg.hdrtype != 0)
4476 * Some drivers apparently write to these registers w/o updating our
4477 * cached copy. No harm happens if we update the copy, so do so here
4478 * so we can restore them. The COMMAND register is modified by the
4479 * bus w/o updating the cache. This should represent the normally
4480 * writable portion of the 'defined' part of type 0 headers. In
4481 * theory we also need to save/restore the PCI capability structures
4482 * we know about, but apart from power we don't know any that are
4485 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
4486 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4487 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
4488 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
4489 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
4490 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
4491 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
4492 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
4493 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
4494 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
4495 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
4496 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
4497 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
4498 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
4499 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4502 * don't set the state for display devices, base peripherals and
4503 * memory devices since bad things happen when they are powered down.
4504 * We should (a) have drivers that can easily detach and (b) use
4505 * generic drivers for these devices so that some device actually
4506 * attaches. We need to make sure that when we implement (a) we don't
4507 * power the device down on a reattach.
4509 cls = pci_get_class(dev);
4512 switch (pci_do_power_nodriver)
4514 case 0: /* NO powerdown at all */
4516 case 1: /* Conservative about what to power down */
4517 if (cls == PCIC_STORAGE)
4520 case 2: /* Agressive about what to power down */
4521 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4522 cls == PCIC_BASEPERIPH)
4525 case 3: /* Power down everything */
4529 * PCI spec says we can only go into D3 state from D0 state.
4530 * Transition from D[12] into D0 before going to D3 state.
4532 ps = pci_get_powerstate(dev);
4533 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4534 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4535 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4536 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
4539 /* Wrapper APIs suitable for device driver use. */
4541 pci_save_state(device_t dev)
4543 struct pci_devinfo *dinfo;
4545 dinfo = device_get_ivars(dev);
4546 pci_cfg_save(dev, dinfo, 0);
4550 pci_restore_state(device_t dev)
4552 struct pci_devinfo *dinfo;
4554 dinfo = device_get_ivars(dev);
4555 pci_cfg_restore(dev, dinfo);