2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/ehcireg.h>
66 #include <dev/usb/controller/ohcireg.h>
67 #include <dev/usb/controller/uhcireg.h>
73 #include <contrib/dev/acpica/include/acpi.h>
76 #define ACPI_PWR_FOR_SLEEP(x, y, z)
80 * XXX: Due to a limitation of the bus_dma_tag_create() API, we cannot
81 * specify a 4GB boundary on 32-bit targets. Usually this does not
82 * matter as it is ok to use a boundary of 0 on these systems.
83 * However, in the case of PAE, DMA addresses can cross a 4GB
84 * boundary, so as a workaround use a 2GB boundary.
86 #if (BUS_SPACE_MAXADDR > 0xFFFFFFFF)
88 #define PCI_DMA_BOUNDARY 0x80000000
90 #define PCI_DMA_BOUNDARY 0x100000000
94 #define PCIR_IS_BIOS(cfg, reg) \
95 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
96 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
98 static pci_addr_t pci_mapbase(uint64_t mapreg);
99 static const char *pci_maptype(uint64_t mapreg);
100 static int pci_mapsize(uint64_t testval);
101 static int pci_maprange(uint64_t mapreg);
102 static pci_addr_t pci_rombase(uint64_t mapreg);
103 static int pci_romsize(uint64_t testval);
104 static void pci_fixancient(pcicfgregs *cfg);
105 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
107 static int pci_porten(device_t dev);
108 static int pci_memen(device_t dev);
109 static void pci_assign_interrupt(device_t bus, device_t dev,
111 static int pci_add_map(device_t bus, device_t dev, int reg,
112 struct resource_list *rl, int force, int prefetch);
113 static int pci_probe(device_t dev);
114 static int pci_attach(device_t dev);
115 static void pci_load_vendor_data(void);
116 static int pci_describe_parse_line(char **ptr, int *vendor,
117 int *device, char **desc);
118 static char *pci_describe_device(device_t dev);
119 static bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev);
120 static int pci_modevent(module_t mod, int what, void *arg);
121 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
123 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
124 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
125 int reg, uint32_t *data);
127 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
128 int reg, uint32_t data);
130 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
131 static void pci_disable_msi(device_t dev);
132 static void pci_enable_msi(device_t dev, uint64_t address,
134 static void pci_enable_msix(device_t dev, u_int index,
135 uint64_t address, uint32_t data);
136 static void pci_mask_msix(device_t dev, u_int index);
137 static void pci_unmask_msix(device_t dev, u_int index);
138 static int pci_msi_blacklisted(void);
139 static void pci_resume_msi(device_t dev);
140 static void pci_resume_msix(device_t dev);
141 static int pci_remap_intr_method(device_t bus, device_t dev,
144 static device_method_t pci_methods[] = {
145 /* Device interface */
146 DEVMETHOD(device_probe, pci_probe),
147 DEVMETHOD(device_attach, pci_attach),
148 DEVMETHOD(device_detach, bus_generic_detach),
149 DEVMETHOD(device_shutdown, bus_generic_shutdown),
150 DEVMETHOD(device_suspend, pci_suspend),
151 DEVMETHOD(device_resume, pci_resume),
154 DEVMETHOD(bus_print_child, pci_print_child),
155 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
156 DEVMETHOD(bus_read_ivar, pci_read_ivar),
157 DEVMETHOD(bus_write_ivar, pci_write_ivar),
158 DEVMETHOD(bus_driver_added, pci_driver_added),
159 DEVMETHOD(bus_setup_intr, pci_setup_intr),
160 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
162 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
163 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
164 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
165 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
166 DEVMETHOD(bus_delete_resource, pci_delete_resource),
167 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
168 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
169 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
170 DEVMETHOD(bus_activate_resource, pci_activate_resource),
171 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
172 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
173 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
174 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
177 DEVMETHOD(pci_read_config, pci_read_config_method),
178 DEVMETHOD(pci_write_config, pci_write_config_method),
179 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
180 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
181 DEVMETHOD(pci_enable_io, pci_enable_io_method),
182 DEVMETHOD(pci_disable_io, pci_disable_io_method),
183 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
184 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
185 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
186 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
187 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
188 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
189 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
190 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
191 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
192 DEVMETHOD(pci_release_msi, pci_release_msi_method),
193 DEVMETHOD(pci_msi_count, pci_msi_count_method),
194 DEVMETHOD(pci_msix_count, pci_msix_count_method),
199 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
201 static devclass_t pci_devclass;
202 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
203 MODULE_VERSION(pci, 1);
205 static char *pci_vendordata;
206 static size_t pci_vendordata_size;
209 uint32_t devid; /* Vendor/device of the card */
211 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
212 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
213 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
214 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
219 static const struct pci_quirk pci_quirks[] = {
220 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
221 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
222 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
223 /* As does the Serverworks OSB4 (the SMBus mapping register) */
224 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
227 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
228 * or the CMIC-SL (AKA ServerWorks GC_LE).
230 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
231 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
234 * MSI doesn't work on earlier Intel chipsets including
235 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
237 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
238 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
239 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
240 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
241 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
242 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
243 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
246 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
249 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
252 * MSI-X allocation doesn't work properly for devices passed through
253 * by VMware up to at least ESXi 5.1.
255 { 0x079015ad, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* PCI/PCI-X */
256 { 0x07a015ad, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* PCIe */
259 * Some virtualization environments emulate an older chipset
260 * but support MSI just fine. QEMU uses the Intel 82440.
262 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
265 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
266 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
267 * It prevents us from attaching hpet(4) when the bit is unset.
268 * Note this quirk only affects SB600 revision A13 and earlier.
269 * For SB600 A21 and later, firmware must set the bit to hide it.
270 * For SB700 and later, it is unused and hardcoded to zero.
272 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
277 /* map register information */
278 #define PCI_MAPMEM 0x01 /* memory map */
279 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
280 #define PCI_MAPPORT 0x04 /* port map */
282 struct devlist pci_devq;
283 uint32_t pci_generation;
284 uint32_t pci_numdevs = 0;
285 static int pcie_chipset, pcix_chipset;
288 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
290 static int pci_enable_io_modes = 1;
291 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
292 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
293 &pci_enable_io_modes, 1,
294 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
295 enable these bits correctly. We'd like to do this all the time, but there\n\
296 are some peripherals that this causes problems with.");
298 static int pci_do_realloc_bars = 0;
299 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
300 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
301 &pci_do_realloc_bars, 0,
302 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
304 static int pci_do_power_nodriver = 0;
305 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
306 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
307 &pci_do_power_nodriver, 0,
308 "Place a function into D3 state when no driver attaches to it. 0 means\n\
309 disable. 1 means conservatively place devices into D3 state. 2 means\n\
310 agressively place devices into D3 state. 3 means put absolutely everything\n\
313 static int pci_do_power_resume = 1;
314 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
315 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
316 &pci_do_power_resume, 1,
317 "Transition from D3 -> D0 on resume.");
319 static int pci_do_msi = 1;
320 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
321 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
322 "Enable support for MSI interrupts");
324 static int pci_do_msix = 1;
325 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
326 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
327 "Enable support for MSI-X interrupts");
329 static int pci_honor_msi_blacklist = 1;
330 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
331 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
332 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
334 #if defined(__i386__) || defined(__amd64__)
335 static int pci_usb_takeover = 1;
337 static int pci_usb_takeover = 0;
339 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
340 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
341 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
342 Disable this if you depend on BIOS emulation of USB devices, that is\n\
343 you use USB devices (like keyboard or mouse) but do not load USB drivers");
345 /* Find a device_t by bus/slot/function in domain 0 */
348 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
351 return (pci_find_dbsf(0, bus, slot, func));
354 /* Find a device_t by domain/bus/slot/function */
357 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
359 struct pci_devinfo *dinfo;
361 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
362 if ((dinfo->cfg.domain == domain) &&
363 (dinfo->cfg.bus == bus) &&
364 (dinfo->cfg.slot == slot) &&
365 (dinfo->cfg.func == func)) {
366 return (dinfo->cfg.dev);
373 /* Find a device_t by vendor/device ID */
376 pci_find_device(uint16_t vendor, uint16_t device)
378 struct pci_devinfo *dinfo;
380 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
381 if ((dinfo->cfg.vendor == vendor) &&
382 (dinfo->cfg.device == device)) {
383 return (dinfo->cfg.dev);
391 pci_find_class(uint8_t class, uint8_t subclass)
393 struct pci_devinfo *dinfo;
395 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
396 if (dinfo->cfg.baseclass == class &&
397 dinfo->cfg.subclass == subclass) {
398 return (dinfo->cfg.dev);
406 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
411 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
414 retval += vprintf(fmt, ap);
419 /* return base address of memory or port map */
422 pci_mapbase(uint64_t mapreg)
425 if (PCI_BAR_MEM(mapreg))
426 return (mapreg & PCIM_BAR_MEM_BASE);
428 return (mapreg & PCIM_BAR_IO_BASE);
431 /* return map type of memory or port map */
434 pci_maptype(uint64_t mapreg)
437 if (PCI_BAR_IO(mapreg))
439 if (mapreg & PCIM_BAR_MEM_PREFETCH)
440 return ("Prefetchable Memory");
444 /* return log2 of map size decoded for memory or port map */
447 pci_mapsize(uint64_t testval)
451 testval = pci_mapbase(testval);
454 while ((testval & 1) == 0)
463 /* return base address of device ROM */
466 pci_rombase(uint64_t mapreg)
469 return (mapreg & PCIM_BIOS_ADDR_MASK);
472 /* return log2 of map size decided for device ROM */
475 pci_romsize(uint64_t testval)
479 testval = pci_rombase(testval);
482 while ((testval & 1) == 0)
491 /* return log2 of address range supported by map register */
494 pci_maprange(uint64_t mapreg)
498 if (PCI_BAR_IO(mapreg))
501 switch (mapreg & PCIM_BAR_MEM_TYPE) {
502 case PCIM_BAR_MEM_32:
505 case PCIM_BAR_MEM_1MB:
508 case PCIM_BAR_MEM_64:
515 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
518 pci_fixancient(pcicfgregs *cfg)
520 if (cfg->hdrtype != 0)
523 /* PCI to PCI bridges use header type 1 */
524 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
528 /* extract header type specific config data */
531 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
533 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
534 switch (cfg->hdrtype) {
536 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
537 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
538 cfg->nummaps = PCI_MAXMAPS_0;
541 cfg->nummaps = PCI_MAXMAPS_1;
544 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
545 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
546 cfg->nummaps = PCI_MAXMAPS_2;
552 /* read configuration header into pcicfgregs structure */
554 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
556 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
557 pcicfgregs *cfg = NULL;
558 struct pci_devinfo *devlist_entry;
559 struct devlist *devlist_head;
561 devlist_head = &pci_devq;
563 devlist_entry = NULL;
565 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
566 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
567 if (devlist_entry == NULL)
570 cfg = &devlist_entry->cfg;
576 cfg->vendor = REG(PCIR_VENDOR, 2);
577 cfg->device = REG(PCIR_DEVICE, 2);
578 cfg->cmdreg = REG(PCIR_COMMAND, 2);
579 cfg->statreg = REG(PCIR_STATUS, 2);
580 cfg->baseclass = REG(PCIR_CLASS, 1);
581 cfg->subclass = REG(PCIR_SUBCLASS, 1);
582 cfg->progif = REG(PCIR_PROGIF, 1);
583 cfg->revid = REG(PCIR_REVID, 1);
584 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
585 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
586 cfg->lattimer = REG(PCIR_LATTIMER, 1);
587 cfg->intpin = REG(PCIR_INTPIN, 1);
588 cfg->intline = REG(PCIR_INTLINE, 1);
590 cfg->mingnt = REG(PCIR_MINGNT, 1);
591 cfg->maxlat = REG(PCIR_MAXLAT, 1);
593 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
594 cfg->hdrtype &= ~PCIM_MFDEV;
595 STAILQ_INIT(&cfg->maps);
598 pci_hdrtypedata(pcib, b, s, f, cfg);
600 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
601 pci_read_cap(pcib, cfg);
603 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
605 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
606 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
607 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
608 devlist_entry->conf.pc_sel.pc_func = cfg->func;
609 devlist_entry->conf.pc_hdr = cfg->hdrtype;
611 devlist_entry->conf.pc_subvendor = cfg->subvendor;
612 devlist_entry->conf.pc_subdevice = cfg->subdevice;
613 devlist_entry->conf.pc_vendor = cfg->vendor;
614 devlist_entry->conf.pc_device = cfg->device;
616 devlist_entry->conf.pc_class = cfg->baseclass;
617 devlist_entry->conf.pc_subclass = cfg->subclass;
618 devlist_entry->conf.pc_progif = cfg->progif;
619 devlist_entry->conf.pc_revid = cfg->revid;
624 return (devlist_entry);
629 pci_read_cap(device_t pcib, pcicfgregs *cfg)
631 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
632 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
633 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
637 int ptr, nextptr, ptrptr;
639 switch (cfg->hdrtype & PCIM_HDRTYPE) {
642 ptrptr = PCIR_CAP_PTR;
645 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
648 return; /* no extended capabilities support */
650 nextptr = REG(ptrptr, 1); /* sanity check? */
653 * Read capability entries.
655 while (nextptr != 0) {
658 printf("illegal PCI extended capability offset %d\n",
662 /* Find the next entry */
664 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
666 /* Process this entry */
667 switch (REG(ptr + PCICAP_ID, 1)) {
668 case PCIY_PMG: /* PCI power management */
669 if (cfg->pp.pp_cap == 0) {
670 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
671 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
672 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
673 if ((nextptr - ptr) > PCIR_POWER_DATA)
674 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
677 case PCIY_HT: /* HyperTransport */
678 /* Determine HT-specific capability type. */
679 val = REG(ptr + PCIR_HT_COMMAND, 2);
681 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
682 cfg->ht.ht_slave = ptr;
684 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
685 switch (val & PCIM_HTCMD_CAP_MASK) {
686 case PCIM_HTCAP_MSI_MAPPING:
687 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
688 /* Sanity check the mapping window. */
689 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
692 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
694 if (addr != MSI_INTEL_ADDR_BASE)
696 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
697 cfg->domain, cfg->bus,
698 cfg->slot, cfg->func,
701 addr = MSI_INTEL_ADDR_BASE;
703 cfg->ht.ht_msimap = ptr;
704 cfg->ht.ht_msictrl = val;
705 cfg->ht.ht_msiaddr = addr;
710 case PCIY_MSI: /* PCI MSI */
711 cfg->msi.msi_location = ptr;
712 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
713 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
714 PCIM_MSICTRL_MMC_MASK)>>1);
716 case PCIY_MSIX: /* PCI MSI-X */
717 cfg->msix.msix_location = ptr;
718 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
719 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
720 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
721 val = REG(ptr + PCIR_MSIX_TABLE, 4);
722 cfg->msix.msix_table_bar = PCIR_BAR(val &
724 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
725 val = REG(ptr + PCIR_MSIX_PBA, 4);
726 cfg->msix.msix_pba_bar = PCIR_BAR(val &
728 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
730 case PCIY_VPD: /* PCI Vital Product Data */
731 cfg->vpd.vpd_reg = ptr;
734 /* Should always be true. */
735 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
736 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
737 cfg->subvendor = val & 0xffff;
738 cfg->subdevice = val >> 16;
741 case PCIY_PCIX: /* PCI-X */
743 * Assume we have a PCI-X chipset if we have
744 * at least one PCI-PCI bridge with a PCI-X
745 * capability. Note that some systems with
746 * PCI-express or HT chipsets might match on
747 * this check as well.
749 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
752 case PCIY_EXPRESS: /* PCI-express */
754 * Assume we have a PCI-express chipset if we have
755 * at least one PCI-express device.
764 #if defined(__powerpc__)
766 * Enable the MSI mapping window for all HyperTransport
767 * slaves. PCI-PCI bridges have their windows enabled via
770 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
771 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
773 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
774 cfg->domain, cfg->bus, cfg->slot, cfg->func);
775 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
776 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
780 /* REG and WREG use carry through to next functions */
784 * PCI Vital Product Data
787 #define PCI_VPD_TIMEOUT 1000000
790 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
792 int count = PCI_VPD_TIMEOUT;
794 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
796 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
798 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
801 DELAY(1); /* limit looping */
803 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
810 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
812 int count = PCI_VPD_TIMEOUT;
814 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
816 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
817 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
818 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
821 DELAY(1); /* limit looping */
828 #undef PCI_VPD_TIMEOUT
830 struct vpd_readstate {
840 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
845 if (vrs->bytesinval == 0) {
846 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
848 vrs->val = le32toh(reg);
850 byte = vrs->val & 0xff;
853 vrs->val = vrs->val >> 8;
854 byte = vrs->val & 0xff;
864 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
866 struct vpd_readstate vrs;
871 int alloc, off; /* alloc/off for RO/W arrays */
877 /* init vpd reader */
885 name = remain = i = 0; /* shut up stupid gcc */
886 alloc = off = 0; /* shut up stupid gcc */
887 dflen = 0; /* shut up stupid gcc */
890 if (vpd_nextbyte(&vrs, &byte)) {
895 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
896 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
897 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
900 case 0: /* item name */
902 if (vpd_nextbyte(&vrs, &byte2)) {
907 if (vpd_nextbyte(&vrs, &byte2)) {
911 remain |= byte2 << 8;
912 if (remain > (0x7f*4 - vrs.off)) {
915 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
916 cfg->domain, cfg->bus, cfg->slot,
922 name = (byte >> 3) & 0xf;
925 case 0x2: /* String */
926 cfg->vpd.vpd_ident = malloc(remain + 1,
934 case 0x10: /* VPD-R */
937 cfg->vpd.vpd_ros = malloc(alloc *
938 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
942 case 0x11: /* VPD-W */
945 cfg->vpd.vpd_w = malloc(alloc *
946 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
950 default: /* Invalid data, abort */
956 case 1: /* Identifier String */
957 cfg->vpd.vpd_ident[i++] = byte;
960 cfg->vpd.vpd_ident[i] = '\0';
965 case 2: /* VPD-R Keyword Header */
967 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
968 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
969 M_DEVBUF, M_WAITOK | M_ZERO);
971 cfg->vpd.vpd_ros[off].keyword[0] = byte;
972 if (vpd_nextbyte(&vrs, &byte2)) {
976 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
977 if (vpd_nextbyte(&vrs, &byte2)) {
981 cfg->vpd.vpd_ros[off].len = dflen = byte2;
983 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
986 * if this happens, we can't trust the rest
990 "pci%d:%d:%d:%d: bad keyword length: %d\n",
991 cfg->domain, cfg->bus, cfg->slot,
996 } else if (dflen == 0) {
997 cfg->vpd.vpd_ros[off].value = malloc(1 *
998 sizeof(*cfg->vpd.vpd_ros[off].value),
1000 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1002 cfg->vpd.vpd_ros[off].value = malloc(
1004 sizeof(*cfg->vpd.vpd_ros[off].value),
1005 M_DEVBUF, M_WAITOK);
1008 /* keep in sync w/ state 3's transistions */
1009 if (dflen == 0 && remain == 0)
1011 else if (dflen == 0)
1017 case 3: /* VPD-R Keyword Value */
1018 cfg->vpd.vpd_ros[off].value[i++] = byte;
1019 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1020 "RV", 2) == 0 && cksumvalid == -1) {
1026 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
1027 cfg->domain, cfg->bus,
1028 cfg->slot, cfg->func,
1037 /* keep in sync w/ state 2's transistions */
1039 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1040 if (dflen == 0 && remain == 0) {
1041 cfg->vpd.vpd_rocnt = off;
1042 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1043 off * sizeof(*cfg->vpd.vpd_ros),
1044 M_DEVBUF, M_WAITOK | M_ZERO);
1046 } else if (dflen == 0)
1056 case 5: /* VPD-W Keyword Header */
1058 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1059 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1060 M_DEVBUF, M_WAITOK | M_ZERO);
1062 cfg->vpd.vpd_w[off].keyword[0] = byte;
1063 if (vpd_nextbyte(&vrs, &byte2)) {
1067 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1068 if (vpd_nextbyte(&vrs, &byte2)) {
1072 cfg->vpd.vpd_w[off].len = dflen = byte2;
1073 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1074 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1075 sizeof(*cfg->vpd.vpd_w[off].value),
1076 M_DEVBUF, M_WAITOK);
1079 /* keep in sync w/ state 6's transistions */
1080 if (dflen == 0 && remain == 0)
1082 else if (dflen == 0)
1088 case 6: /* VPD-W Keyword Value */
1089 cfg->vpd.vpd_w[off].value[i++] = byte;
1092 /* keep in sync w/ state 5's transistions */
1094 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1095 if (dflen == 0 && remain == 0) {
1096 cfg->vpd.vpd_wcnt = off;
1097 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1098 off * sizeof(*cfg->vpd.vpd_w),
1099 M_DEVBUF, M_WAITOK | M_ZERO);
1101 } else if (dflen == 0)
1106 printf("pci%d:%d:%d:%d: invalid state: %d\n",
1107 cfg->domain, cfg->bus, cfg->slot, cfg->func,
1114 if (cksumvalid == 0 || state < -1) {
1115 /* read-only data bad, clean up */
1116 if (cfg->vpd.vpd_ros != NULL) {
1117 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1118 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1119 free(cfg->vpd.vpd_ros, M_DEVBUF);
1120 cfg->vpd.vpd_ros = NULL;
1124 /* I/O error, clean up */
1125 printf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1126 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1127 if (cfg->vpd.vpd_ident != NULL) {
1128 free(cfg->vpd.vpd_ident, M_DEVBUF);
1129 cfg->vpd.vpd_ident = NULL;
1131 if (cfg->vpd.vpd_w != NULL) {
1132 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1133 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1134 free(cfg->vpd.vpd_w, M_DEVBUF);
1135 cfg->vpd.vpd_w = NULL;
1138 cfg->vpd.vpd_cached = 1;
1144 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1146 struct pci_devinfo *dinfo = device_get_ivars(child);
1147 pcicfgregs *cfg = &dinfo->cfg;
1149 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1150 pci_read_vpd(device_get_parent(dev), cfg);
1152 *identptr = cfg->vpd.vpd_ident;
1154 if (*identptr == NULL)
1161 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1164 struct pci_devinfo *dinfo = device_get_ivars(child);
1165 pcicfgregs *cfg = &dinfo->cfg;
1168 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1169 pci_read_vpd(device_get_parent(dev), cfg);
1171 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1172 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1173 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1174 *vptr = cfg->vpd.vpd_ros[i].value;
1183 pci_fetch_vpd_list(device_t dev)
1185 struct pci_devinfo *dinfo = device_get_ivars(dev);
1186 pcicfgregs *cfg = &dinfo->cfg;
1188 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1189 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1194 * Find the requested extended capability and return the offset in
1195 * configuration space via the pointer provided. The function returns
1196 * 0 on success and error code otherwise.
1199 pci_find_extcap_method(device_t dev, device_t child, int capability,
1202 struct pci_devinfo *dinfo = device_get_ivars(child);
1203 pcicfgregs *cfg = &dinfo->cfg;
1208 * Check the CAP_LIST bit of the PCI status register first.
1210 status = pci_read_config(child, PCIR_STATUS, 2);
1211 if (!(status & PCIM_STATUS_CAPPRESENT))
1215 * Determine the start pointer of the capabilities list.
1217 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1223 ptr = PCIR_CAP_PTR_2;
1227 return (ENXIO); /* no extended capabilities support */
1229 ptr = pci_read_config(child, ptr, 1);
1232 * Traverse the capabilities list.
1235 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1240 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1247 * Support for MSI-X message interrupts.
1250 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1252 struct pci_devinfo *dinfo = device_get_ivars(dev);
1253 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1256 KASSERT(msix->msix_table_len > index, ("bogus index"));
1257 offset = msix->msix_table_offset + index * 16;
1258 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1259 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1260 bus_write_4(msix->msix_table_res, offset + 8, data);
1262 /* Enable MSI -> HT mapping. */
1263 pci_ht_map_msi(dev, address);
1267 pci_mask_msix(device_t dev, u_int index)
1269 struct pci_devinfo *dinfo = device_get_ivars(dev);
1270 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1271 uint32_t offset, val;
1273 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1274 offset = msix->msix_table_offset + index * 16 + 12;
1275 val = bus_read_4(msix->msix_table_res, offset);
1276 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1277 val |= PCIM_MSIX_VCTRL_MASK;
1278 bus_write_4(msix->msix_table_res, offset, val);
1283 pci_unmask_msix(device_t dev, u_int index)
1285 struct pci_devinfo *dinfo = device_get_ivars(dev);
1286 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1287 uint32_t offset, val;
1289 KASSERT(msix->msix_table_len > index, ("bogus index"));
1290 offset = msix->msix_table_offset + index * 16 + 12;
1291 val = bus_read_4(msix->msix_table_res, offset);
1292 if (val & PCIM_MSIX_VCTRL_MASK) {
1293 val &= ~PCIM_MSIX_VCTRL_MASK;
1294 bus_write_4(msix->msix_table_res, offset, val);
1299 pci_pending_msix(device_t dev, u_int index)
1301 struct pci_devinfo *dinfo = device_get_ivars(dev);
1302 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1303 uint32_t offset, bit;
1305 KASSERT(msix->msix_table_len > index, ("bogus index"));
1306 offset = msix->msix_pba_offset + (index / 32) * 4;
1307 bit = 1 << index % 32;
1308 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1312 * Restore MSI-X registers and table during resume. If MSI-X is
1313 * enabled then walk the virtual table to restore the actual MSI-X
1317 pci_resume_msix(device_t dev)
1319 struct pci_devinfo *dinfo = device_get_ivars(dev);
1320 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1321 struct msix_table_entry *mte;
1322 struct msix_vector *mv;
1325 if (msix->msix_alloc > 0) {
1326 /* First, mask all vectors. */
1327 for (i = 0; i < msix->msix_msgnum; i++)
1328 pci_mask_msix(dev, i);
1330 /* Second, program any messages with at least one handler. */
1331 for (i = 0; i < msix->msix_table_len; i++) {
1332 mte = &msix->msix_table[i];
1333 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1335 mv = &msix->msix_vectors[mte->mte_vector - 1];
1336 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1337 pci_unmask_msix(dev, i);
1340 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1341 msix->msix_ctrl, 2);
1345 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1346 * returned in *count. After this function returns, each message will be
1347 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1350 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1352 struct pci_devinfo *dinfo = device_get_ivars(child);
1353 pcicfgregs *cfg = &dinfo->cfg;
1354 struct resource_list_entry *rle;
1355 int actual, error, i, irq, max;
1357 /* Don't let count == 0 get us into trouble. */
1361 /* If rid 0 is allocated, then fail. */
1362 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1363 if (rle != NULL && rle->res != NULL)
1366 /* Already have allocated messages? */
1367 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1370 /* If MSI is blacklisted for this system, fail. */
1371 if (pci_msi_blacklisted())
1374 /* MSI-X capability present? */
1375 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1378 /* Make sure the appropriate BARs are mapped. */
1379 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1380 cfg->msix.msix_table_bar);
1381 if (rle == NULL || rle->res == NULL ||
1382 !(rman_get_flags(rle->res) & RF_ACTIVE))
1384 cfg->msix.msix_table_res = rle->res;
1385 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1386 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1387 cfg->msix.msix_pba_bar);
1388 if (rle == NULL || rle->res == NULL ||
1389 !(rman_get_flags(rle->res) & RF_ACTIVE))
1392 cfg->msix.msix_pba_res = rle->res;
1395 device_printf(child,
1396 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1397 *count, cfg->msix.msix_msgnum);
1398 max = min(*count, cfg->msix.msix_msgnum);
1399 for (i = 0; i < max; i++) {
1400 /* Allocate a message. */
1401 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1407 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1413 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1415 device_printf(child, "using IRQ %lu for MSI-X\n",
1421 * Be fancy and try to print contiguous runs of
1422 * IRQ values as ranges. 'irq' is the previous IRQ.
1423 * 'run' is true if we are in a range.
1425 device_printf(child, "using IRQs %lu", rle->start);
1428 for (i = 1; i < actual; i++) {
1429 rle = resource_list_find(&dinfo->resources,
1430 SYS_RES_IRQ, i + 1);
1432 /* Still in a run? */
1433 if (rle->start == irq + 1) {
1439 /* Finish previous range. */
1445 /* Start new range. */
1446 printf(",%lu", rle->start);
1450 /* Unfinished range? */
1453 printf(" for MSI-X\n");
1457 /* Mask all vectors. */
1458 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1459 pci_mask_msix(child, i);
1461 /* Allocate and initialize vector data and virtual table. */
1462 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1463 M_DEVBUF, M_WAITOK | M_ZERO);
1464 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1465 M_DEVBUF, M_WAITOK | M_ZERO);
1466 for (i = 0; i < actual; i++) {
1467 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1468 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1469 cfg->msix.msix_table[i].mte_vector = i + 1;
1472 /* Update control register to enable MSI-X. */
1473 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1474 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1475 cfg->msix.msix_ctrl, 2);
1477 /* Update counts of alloc'd messages. */
1478 cfg->msix.msix_alloc = actual;
1479 cfg->msix.msix_table_len = actual;
1485 * By default, pci_alloc_msix() will assign the allocated IRQ
1486 * resources consecutively to the first N messages in the MSI-X table.
1487 * However, device drivers may want to use different layouts if they
1488 * either receive fewer messages than they asked for, or they wish to
1489 * populate the MSI-X table sparsely. This method allows the driver
1490 * to specify what layout it wants. It must be called after a
1491 * successful pci_alloc_msix() but before any of the associated
1492 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1494 * The 'vectors' array contains 'count' message vectors. The array
1495 * maps directly to the MSI-X table in that index 0 in the array
1496 * specifies the vector for the first message in the MSI-X table, etc.
1497 * The vector value in each array index can either be 0 to indicate
1498 * that no vector should be assigned to a message slot, or it can be a
1499 * number from 1 to N (where N is the count returned from a
1500 * succcessful call to pci_alloc_msix()) to indicate which message
1501 * vector (IRQ) to be used for the corresponding message.
1503 * On successful return, each message with a non-zero vector will have
1504 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1505 * 1. Additionally, if any of the IRQs allocated via the previous
1506 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1507 * will be freed back to the system automatically.
1509 * For example, suppose a driver has a MSI-X table with 6 messages and
1510 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1511 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1512 * C. After the call to pci_alloc_msix(), the device will be setup to
1513 * have an MSI-X table of ABC--- (where - means no vector assigned).
1514 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1515 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1516 * be freed back to the system. This device will also have valid
1517 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1519 * In any case, the SYS_RES_IRQ rid X will always map to the message
1520 * at MSI-X table index X - 1 and will only be valid if a vector is
1521 * assigned to that table entry.
1524 pci_remap_msix_method(device_t dev, device_t child, int count,
1525 const u_int *vectors)
1527 struct pci_devinfo *dinfo = device_get_ivars(child);
1528 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1529 struct resource_list_entry *rle;
1530 int i, irq, j, *used;
1533 * Have to have at least one message in the table but the
1534 * table can't be bigger than the actual MSI-X table in the
1537 if (count == 0 || count > msix->msix_msgnum)
1540 /* Sanity check the vectors. */
1541 for (i = 0; i < count; i++)
1542 if (vectors[i] > msix->msix_alloc)
1546 * Make sure there aren't any holes in the vectors to be used.
1547 * It's a big pain to support it, and it doesn't really make
1548 * sense anyway. Also, at least one vector must be used.
1550 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1552 for (i = 0; i < count; i++)
1553 if (vectors[i] != 0)
1554 used[vectors[i] - 1] = 1;
1555 for (i = 0; i < msix->msix_alloc - 1; i++)
1556 if (used[i] == 0 && used[i + 1] == 1) {
1557 free(used, M_DEVBUF);
1561 free(used, M_DEVBUF);
1565 /* Make sure none of the resources are allocated. */
1566 for (i = 0; i < msix->msix_table_len; i++) {
1567 if (msix->msix_table[i].mte_vector == 0)
1569 if (msix->msix_table[i].mte_handlers > 0)
1571 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1572 KASSERT(rle != NULL, ("missing resource"));
1573 if (rle->res != NULL)
1577 /* Free the existing resource list entries. */
1578 for (i = 0; i < msix->msix_table_len; i++) {
1579 if (msix->msix_table[i].mte_vector == 0)
1581 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1585 * Build the new virtual table keeping track of which vectors are
1588 free(msix->msix_table, M_DEVBUF);
1589 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1590 M_DEVBUF, M_WAITOK | M_ZERO);
1591 for (i = 0; i < count; i++)
1592 msix->msix_table[i].mte_vector = vectors[i];
1593 msix->msix_table_len = count;
1595 /* Free any unused IRQs and resize the vectors array if necessary. */
1596 j = msix->msix_alloc - 1;
1598 struct msix_vector *vec;
1600 while (used[j] == 0) {
1601 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1602 msix->msix_vectors[j].mv_irq);
1605 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1607 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1609 free(msix->msix_vectors, M_DEVBUF);
1610 msix->msix_vectors = vec;
1611 msix->msix_alloc = j + 1;
1613 free(used, M_DEVBUF);
1615 /* Map the IRQs onto the rids. */
1616 for (i = 0; i < count; i++) {
1617 if (vectors[i] == 0)
1619 irq = msix->msix_vectors[vectors[i]].mv_irq;
1620 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1625 device_printf(child, "Remapped MSI-X IRQs as: ");
1626 for (i = 0; i < count; i++) {
1629 if (vectors[i] == 0)
1633 msix->msix_vectors[vectors[i]].mv_irq);
1642 pci_release_msix(device_t dev, device_t child)
1644 struct pci_devinfo *dinfo = device_get_ivars(child);
1645 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1646 struct resource_list_entry *rle;
1649 /* Do we have any messages to release? */
1650 if (msix->msix_alloc == 0)
1653 /* Make sure none of the resources are allocated. */
1654 for (i = 0; i < msix->msix_table_len; i++) {
1655 if (msix->msix_table[i].mte_vector == 0)
1657 if (msix->msix_table[i].mte_handlers > 0)
1659 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1660 KASSERT(rle != NULL, ("missing resource"));
1661 if (rle->res != NULL)
1665 /* Update control register to disable MSI-X. */
1666 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1667 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1668 msix->msix_ctrl, 2);
1670 /* Free the resource list entries. */
1671 for (i = 0; i < msix->msix_table_len; i++) {
1672 if (msix->msix_table[i].mte_vector == 0)
1674 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1676 free(msix->msix_table, M_DEVBUF);
1677 msix->msix_table_len = 0;
1679 /* Release the IRQs. */
1680 for (i = 0; i < msix->msix_alloc; i++)
1681 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1682 msix->msix_vectors[i].mv_irq);
1683 free(msix->msix_vectors, M_DEVBUF);
1684 msix->msix_alloc = 0;
1689 * Return the max supported MSI-X messages this device supports.
1690 * Basically, assuming the MD code can alloc messages, this function
1691 * should return the maximum value that pci_alloc_msix() can return.
1692 * Thus, it is subject to the tunables, etc.
1695 pci_msix_count_method(device_t dev, device_t child)
1697 struct pci_devinfo *dinfo = device_get_ivars(child);
1698 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1700 if (pci_do_msix && msix->msix_location != 0)
1701 return (msix->msix_msgnum);
1706 * HyperTransport MSI mapping control
1709 pci_ht_map_msi(device_t dev, uint64_t addr)
1711 struct pci_devinfo *dinfo = device_get_ivars(dev);
1712 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1717 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1718 ht->ht_msiaddr >> 20 == addr >> 20) {
1719 /* Enable MSI -> HT mapping. */
1720 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1721 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1725 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1726 /* Disable MSI -> HT mapping. */
1727 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1728 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1734 pci_get_max_read_req(device_t dev)
1739 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) != 0)
1741 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1742 val &= PCIEM_CTL_MAX_READ_REQUEST;
1744 return (1 << (val + 7));
1748 pci_set_max_read_req(device_t dev, int size)
1753 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) != 0)
1759 size = (1 << (fls(size) - 1));
1760 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1761 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1762 val |= (fls(size) - 8) << 12;
1763 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1768 * Support for MSI message signalled interrupts.
1771 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1773 struct pci_devinfo *dinfo = device_get_ivars(dev);
1774 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1776 /* Write data and address values. */
1777 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1778 address & 0xffffffff, 4);
1779 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1780 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1782 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1785 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1788 /* Enable MSI in the control register. */
1789 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1790 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1793 /* Enable MSI -> HT mapping. */
1794 pci_ht_map_msi(dev, address);
1798 pci_disable_msi(device_t dev)
1800 struct pci_devinfo *dinfo = device_get_ivars(dev);
1801 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1803 /* Disable MSI -> HT mapping. */
1804 pci_ht_map_msi(dev, 0);
1806 /* Disable MSI in the control register. */
1807 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1808 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1813 * Restore MSI registers during resume. If MSI is enabled then
1814 * restore the data and address registers in addition to the control
1818 pci_resume_msi(device_t dev)
1820 struct pci_devinfo *dinfo = device_get_ivars(dev);
1821 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1825 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1826 address = msi->msi_addr;
1827 data = msi->msi_data;
1828 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1829 address & 0xffffffff, 4);
1830 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1831 pci_write_config(dev, msi->msi_location +
1832 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1833 pci_write_config(dev, msi->msi_location +
1834 PCIR_MSI_DATA_64BIT, data, 2);
1836 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1839 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1844 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1846 struct pci_devinfo *dinfo = device_get_ivars(dev);
1847 pcicfgregs *cfg = &dinfo->cfg;
1848 struct resource_list_entry *rle;
1849 struct msix_table_entry *mte;
1850 struct msix_vector *mv;
1856 * Handle MSI first. We try to find this IRQ among our list
1857 * of MSI IRQs. If we find it, we request updated address and
1858 * data registers and apply the results.
1860 if (cfg->msi.msi_alloc > 0) {
1862 /* If we don't have any active handlers, nothing to do. */
1863 if (cfg->msi.msi_handlers == 0)
1865 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1866 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1868 if (rle->start == irq) {
1869 error = PCIB_MAP_MSI(device_get_parent(bus),
1870 dev, irq, &addr, &data);
1873 pci_disable_msi(dev);
1874 dinfo->cfg.msi.msi_addr = addr;
1875 dinfo->cfg.msi.msi_data = data;
1876 pci_enable_msi(dev, addr, data);
1884 * For MSI-X, we check to see if we have this IRQ. If we do,
1885 * we request the updated mapping info. If that works, we go
1886 * through all the slots that use this IRQ and update them.
1888 if (cfg->msix.msix_alloc > 0) {
1889 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1890 mv = &cfg->msix.msix_vectors[i];
1891 if (mv->mv_irq == irq) {
1892 error = PCIB_MAP_MSI(device_get_parent(bus),
1893 dev, irq, &addr, &data);
1896 mv->mv_address = addr;
1898 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1899 mte = &cfg->msix.msix_table[j];
1900 if (mte->mte_vector != i + 1)
1902 if (mte->mte_handlers == 0)
1904 pci_mask_msix(dev, j);
1905 pci_enable_msix(dev, j, addr, data);
1906 pci_unmask_msix(dev, j);
1917 * Returns true if the specified device is blacklisted because MSI
1921 pci_msi_device_blacklisted(device_t dev)
1923 const struct pci_quirk *q;
1925 if (!pci_honor_msi_blacklist)
1928 for (q = &pci_quirks[0]; q->devid; q++) {
1929 if (q->devid == pci_get_devid(dev) &&
1930 q->type == PCI_QUIRK_DISABLE_MSI)
1937 * Returns true if a specified chipset supports MSI when it is
1938 * emulated hardware in a virtual machine.
1941 pci_msi_vm_chipset(device_t dev)
1943 const struct pci_quirk *q;
1945 for (q = &pci_quirks[0]; q->devid; q++) {
1946 if (q->devid == pci_get_devid(dev) &&
1947 q->type == PCI_QUIRK_ENABLE_MSI_VM)
1954 * Determine if MSI is blacklisted globally on this sytem. Currently,
1955 * we just check for blacklisted chipsets as represented by the
1956 * host-PCI bridge at device 0:0:0. In the future, it may become
1957 * necessary to check other system attributes, such as the kenv values
1958 * that give the motherboard manufacturer and model number.
1961 pci_msi_blacklisted(void)
1965 if (!pci_honor_msi_blacklist)
1968 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1969 if (!(pcie_chipset || pcix_chipset)) {
1970 if (vm_guest != VM_GUEST_NO) {
1971 dev = pci_find_bsf(0, 0, 0);
1973 return (pci_msi_vm_chipset(dev) == 0);
1978 dev = pci_find_bsf(0, 0, 0);
1980 return (pci_msi_device_blacklisted(dev));
1985 * Attempt to allocate *count MSI messages. The actual number allocated is
1986 * returned in *count. After this function returns, each message will be
1987 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1990 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1992 struct pci_devinfo *dinfo = device_get_ivars(child);
1993 pcicfgregs *cfg = &dinfo->cfg;
1994 struct resource_list_entry *rle;
1995 int actual, error, i, irqs[32];
1998 /* Don't let count == 0 get us into trouble. */
2002 /* If rid 0 is allocated, then fail. */
2003 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2004 if (rle != NULL && rle->res != NULL)
2007 /* Already have allocated messages? */
2008 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2011 /* If MSI is blacklisted for this system, fail. */
2012 if (pci_msi_blacklisted())
2015 /* MSI capability present? */
2016 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2020 device_printf(child,
2021 "attempting to allocate %d MSI vectors (%d supported)\n",
2022 *count, cfg->msi.msi_msgnum);
2024 /* Don't ask for more than the device supports. */
2025 actual = min(*count, cfg->msi.msi_msgnum);
2027 /* Don't ask for more than 32 messages. */
2028 actual = min(actual, 32);
2030 /* MSI requires power of 2 number of messages. */
2031 if (!powerof2(actual))
2035 /* Try to allocate N messages. */
2036 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2048 * We now have N actual messages mapped onto SYS_RES_IRQ
2049 * resources in the irqs[] array, so add new resources
2050 * starting at rid 1.
2052 for (i = 0; i < actual; i++)
2053 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2054 irqs[i], irqs[i], 1);
2058 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2063 * Be fancy and try to print contiguous runs
2064 * of IRQ values as ranges. 'run' is true if
2065 * we are in a range.
2067 device_printf(child, "using IRQs %d", irqs[0]);
2069 for (i = 1; i < actual; i++) {
2071 /* Still in a run? */
2072 if (irqs[i] == irqs[i - 1] + 1) {
2077 /* Finish previous range. */
2079 printf("-%d", irqs[i - 1]);
2083 /* Start new range. */
2084 printf(",%d", irqs[i]);
2087 /* Unfinished range? */
2089 printf("-%d", irqs[actual - 1]);
2090 printf(" for MSI\n");
2094 /* Update control register with actual count. */
2095 ctrl = cfg->msi.msi_ctrl;
2096 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2097 ctrl |= (ffs(actual) - 1) << 4;
2098 cfg->msi.msi_ctrl = ctrl;
2099 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2101 /* Update counts of alloc'd messages. */
2102 cfg->msi.msi_alloc = actual;
2103 cfg->msi.msi_handlers = 0;
2108 /* Release the MSI messages associated with this device. */
2110 pci_release_msi_method(device_t dev, device_t child)
2112 struct pci_devinfo *dinfo = device_get_ivars(child);
2113 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2114 struct resource_list_entry *rle;
2115 int error, i, irqs[32];
2117 /* Try MSI-X first. */
2118 error = pci_release_msix(dev, child);
2119 if (error != ENODEV)
2122 /* Do we have any messages to release? */
2123 if (msi->msi_alloc == 0)
2125 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2127 /* Make sure none of the resources are allocated. */
2128 if (msi->msi_handlers > 0)
2130 for (i = 0; i < msi->msi_alloc; i++) {
2131 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2132 KASSERT(rle != NULL, ("missing MSI resource"));
2133 if (rle->res != NULL)
2135 irqs[i] = rle->start;
2138 /* Update control register with 0 count. */
2139 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2140 ("%s: MSI still enabled", __func__));
2141 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2142 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2145 /* Release the messages. */
2146 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2147 for (i = 0; i < msi->msi_alloc; i++)
2148 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2150 /* Update alloc count. */
2158 * Return the max supported MSI messages this device supports.
2159 * Basically, assuming the MD code can alloc messages, this function
2160 * should return the maximum value that pci_alloc_msi() can return.
2161 * Thus, it is subject to the tunables, etc.
2164 pci_msi_count_method(device_t dev, device_t child)
2166 struct pci_devinfo *dinfo = device_get_ivars(child);
2167 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2169 if (pci_do_msi && msi->msi_location != 0)
2170 return (msi->msi_msgnum);
2174 /* free pcicfgregs structure and all depending data structures */
2177 pci_freecfg(struct pci_devinfo *dinfo)
2179 struct devlist *devlist_head;
2180 struct pci_map *pm, *next;
2183 devlist_head = &pci_devq;
2185 if (dinfo->cfg.vpd.vpd_reg) {
2186 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2187 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2188 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2189 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2190 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2191 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2192 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2194 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2197 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2198 free(dinfo, M_DEVBUF);
2200 /* increment the generation count */
2203 /* we're losing one device */
2209 * PCI power manangement
2212 pci_set_powerstate_method(device_t dev, device_t child, int state)
2214 struct pci_devinfo *dinfo = device_get_ivars(child);
2215 pcicfgregs *cfg = &dinfo->cfg;
2217 int result, oldstate, highest, delay;
2219 if (cfg->pp.pp_cap == 0)
2220 return (EOPNOTSUPP);
2223 * Optimize a no state change request away. While it would be OK to
2224 * write to the hardware in theory, some devices have shown odd
2225 * behavior when going from D3 -> D3.
2227 oldstate = pci_get_powerstate(child);
2228 if (oldstate == state)
2232 * The PCI power management specification states that after a state
2233 * transition between PCI power states, system software must
2234 * guarantee a minimal delay before the function accesses the device.
2235 * Compute the worst case delay that we need to guarantee before we
2236 * access the device. Many devices will be responsive much more
2237 * quickly than this delay, but there are some that don't respond
2238 * instantly to state changes. Transitions to/from D3 state require
2239 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2240 * is done below with DELAY rather than a sleeper function because
2241 * this function can be called from contexts where we cannot sleep.
2243 highest = (oldstate > state) ? oldstate : state;
2244 if (highest == PCI_POWERSTATE_D3)
2246 else if (highest == PCI_POWERSTATE_D2)
2250 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2251 & ~PCIM_PSTAT_DMASK;
2254 case PCI_POWERSTATE_D0:
2255 status |= PCIM_PSTAT_D0;
2257 case PCI_POWERSTATE_D1:
2258 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2259 return (EOPNOTSUPP);
2260 status |= PCIM_PSTAT_D1;
2262 case PCI_POWERSTATE_D2:
2263 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2264 return (EOPNOTSUPP);
2265 status |= PCIM_PSTAT_D2;
2267 case PCI_POWERSTATE_D3:
2268 status |= PCIM_PSTAT_D3;
2275 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2278 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2285 pci_get_powerstate_method(device_t dev, device_t child)
2287 struct pci_devinfo *dinfo = device_get_ivars(child);
2288 pcicfgregs *cfg = &dinfo->cfg;
2292 if (cfg->pp.pp_cap != 0) {
2293 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2294 switch (status & PCIM_PSTAT_DMASK) {
2296 result = PCI_POWERSTATE_D0;
2299 result = PCI_POWERSTATE_D1;
2302 result = PCI_POWERSTATE_D2;
2305 result = PCI_POWERSTATE_D3;
2308 result = PCI_POWERSTATE_UNKNOWN;
2312 /* No support, device is always at D0 */
2313 result = PCI_POWERSTATE_D0;
2319 * Some convenience functions for PCI device drivers.
2322 static __inline void
2323 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2327 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2329 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2332 static __inline void
2333 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2337 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2339 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2343 pci_enable_busmaster_method(device_t dev, device_t child)
2345 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2350 pci_disable_busmaster_method(device_t dev, device_t child)
2352 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2357 pci_enable_io_method(device_t dev, device_t child, int space)
2362 case SYS_RES_IOPORT:
2363 bit = PCIM_CMD_PORTEN;
2365 case SYS_RES_MEMORY:
2366 bit = PCIM_CMD_MEMEN;
2371 pci_set_command_bit(dev, child, bit);
2376 pci_disable_io_method(device_t dev, device_t child, int space)
2381 case SYS_RES_IOPORT:
2382 bit = PCIM_CMD_PORTEN;
2384 case SYS_RES_MEMORY:
2385 bit = PCIM_CMD_MEMEN;
2390 pci_clear_command_bit(dev, child, bit);
2395 * New style pci driver. Parent device is either a pci-host-bridge or a
2396 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2400 pci_print_verbose(struct pci_devinfo *dinfo)
2404 pcicfgregs *cfg = &dinfo->cfg;
2406 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2407 cfg->vendor, cfg->device, cfg->revid);
2408 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2409 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2410 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2411 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2413 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2414 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2415 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2416 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2417 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2418 if (cfg->intpin > 0)
2419 printf("\tintpin=%c, irq=%d\n",
2420 cfg->intpin +'a' -1, cfg->intline);
2421 if (cfg->pp.pp_cap) {
2424 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2425 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2426 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2427 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2428 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2429 status & PCIM_PSTAT_DMASK);
2431 if (cfg->msi.msi_location) {
2434 ctrl = cfg->msi.msi_ctrl;
2435 printf("\tMSI supports %d message%s%s%s\n",
2436 cfg->msi.msi_msgnum,
2437 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2438 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2439 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2441 if (cfg->msix.msix_location) {
2442 printf("\tMSI-X supports %d message%s ",
2443 cfg->msix.msix_msgnum,
2444 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2445 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2446 printf("in map 0x%x\n",
2447 cfg->msix.msix_table_bar);
2449 printf("in maps 0x%x and 0x%x\n",
2450 cfg->msix.msix_table_bar,
2451 cfg->msix.msix_pba_bar);
2457 pci_porten(device_t dev)
2459 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2463 pci_memen(device_t dev)
2465 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2469 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2471 struct pci_devinfo *dinfo;
2472 pci_addr_t map, testval;
2477 * The device ROM BAR is special. It is always a 32-bit
2478 * memory BAR. Bit 0 is special and should not be set when
2481 dinfo = device_get_ivars(dev);
2482 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2483 map = pci_read_config(dev, reg, 4);
2484 pci_write_config(dev, reg, 0xfffffffe, 4);
2485 testval = pci_read_config(dev, reg, 4);
2486 pci_write_config(dev, reg, map, 4);
2488 *testvalp = testval;
2492 map = pci_read_config(dev, reg, 4);
2493 ln2range = pci_maprange(map);
2495 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2498 * Disable decoding via the command register before
2499 * determining the BAR's length since we will be placing it in
2502 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2503 pci_write_config(dev, PCIR_COMMAND,
2504 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2507 * Determine the BAR's length by writing all 1's. The bottom
2508 * log_2(size) bits of the BAR will stick as 0 when we read
2511 pci_write_config(dev, reg, 0xffffffff, 4);
2512 testval = pci_read_config(dev, reg, 4);
2513 if (ln2range == 64) {
2514 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2515 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2519 * Restore the original value of the BAR. We may have reprogrammed
2520 * the BAR of the low-level console device and when booting verbose,
2521 * we need the console device addressable.
2523 pci_write_config(dev, reg, map, 4);
2525 pci_write_config(dev, reg + 4, map >> 32, 4);
2526 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2529 *testvalp = testval;
2533 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2535 struct pci_devinfo *dinfo;
2538 /* The device ROM BAR is always a 32-bit memory BAR. */
2539 dinfo = device_get_ivars(dev);
2540 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2543 ln2range = pci_maprange(pm->pm_value);
2544 pci_write_config(dev, pm->pm_reg, base, 4);
2546 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2547 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2549 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2550 pm->pm_reg + 4, 4) << 32;
2554 pci_find_bar(device_t dev, int reg)
2556 struct pci_devinfo *dinfo;
2559 dinfo = device_get_ivars(dev);
2560 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2561 if (pm->pm_reg == reg)
2568 pci_bar_enabled(device_t dev, struct pci_map *pm)
2570 struct pci_devinfo *dinfo;
2573 dinfo = device_get_ivars(dev);
2574 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2575 !(pm->pm_value & PCIM_BIOS_ENABLE))
2577 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2578 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2579 return ((cmd & PCIM_CMD_MEMEN) != 0);
2581 return ((cmd & PCIM_CMD_PORTEN) != 0);
2584 static struct pci_map *
2585 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2587 struct pci_devinfo *dinfo;
2588 struct pci_map *pm, *prev;
2590 dinfo = device_get_ivars(dev);
2591 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2593 pm->pm_value = value;
2595 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2596 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2598 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2599 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2603 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2605 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2610 pci_restore_bars(device_t dev)
2612 struct pci_devinfo *dinfo;
2616 dinfo = device_get_ivars(dev);
2617 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2618 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2621 ln2range = pci_maprange(pm->pm_value);
2622 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2624 pci_write_config(dev, pm->pm_reg + 4,
2625 pm->pm_value >> 32, 4);
2630 * Add a resource based on a pci map register. Return 1 if the map
2631 * register is a 32bit map register or 2 if it is a 64bit register.
2634 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2635 int force, int prefetch)
2638 pci_addr_t base, map, testval;
2639 pci_addr_t start, end, count;
2640 int barlen, basezero, maprange, mapsize, type;
2642 struct resource *res;
2645 * The BAR may already exist if the device is a CardBus card
2646 * whose CIS is stored in this BAR.
2648 pm = pci_find_bar(dev, reg);
2650 maprange = pci_maprange(pm->pm_value);
2651 barlen = maprange == 64 ? 2 : 1;
2655 pci_read_bar(dev, reg, &map, &testval);
2656 if (PCI_BAR_MEM(map)) {
2657 type = SYS_RES_MEMORY;
2658 if (map & PCIM_BAR_MEM_PREFETCH)
2661 type = SYS_RES_IOPORT;
2662 mapsize = pci_mapsize(testval);
2663 base = pci_mapbase(map);
2664 #ifdef __PCI_BAR_ZERO_VALID
2667 basezero = base == 0;
2669 maprange = pci_maprange(map);
2670 barlen = maprange == 64 ? 2 : 1;
2673 * For I/O registers, if bottom bit is set, and the next bit up
2674 * isn't clear, we know we have a BAR that doesn't conform to the
2675 * spec, so ignore it. Also, sanity check the size of the data
2676 * areas to the type of memory involved. Memory must be at least
2677 * 16 bytes in size, while I/O ranges must be at least 4.
2679 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2681 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2682 (type == SYS_RES_IOPORT && mapsize < 2))
2685 /* Save a record of this BAR. */
2686 pm = pci_add_bar(dev, reg, map, mapsize);
2688 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2689 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2690 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2691 printf(", port disabled\n");
2692 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2693 printf(", memory disabled\n");
2695 printf(", enabled\n");
2699 * If base is 0, then we have problems if this architecture does
2700 * not allow that. It is best to ignore such entries for the
2701 * moment. These will be allocated later if the driver specifically
2702 * requests them. However, some removable busses look better when
2703 * all resources are allocated, so allow '0' to be overriden.
2705 * Similarly treat maps whose values is the same as the test value
2706 * read back. These maps have had all f's written to them by the
2707 * BIOS in an attempt to disable the resources.
2709 if (!force && (basezero || map == testval))
2711 if ((u_long)base != base) {
2713 "pci%d:%d:%d:%d bar %#x too many address bits",
2714 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2715 pci_get_function(dev), reg);
2720 * This code theoretically does the right thing, but has
2721 * undesirable side effects in some cases where peripherals
2722 * respond oddly to having these bits enabled. Let the user
2723 * be able to turn them off (since pci_enable_io_modes is 1 by
2726 if (pci_enable_io_modes) {
2727 /* Turn on resources that have been left off by a lazy BIOS */
2728 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2729 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2730 cmd |= PCIM_CMD_PORTEN;
2731 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2733 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2734 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2735 cmd |= PCIM_CMD_MEMEN;
2736 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2739 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2741 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2745 count = (pci_addr_t)1 << mapsize;
2746 if (basezero || base == pci_mapbase(testval)) {
2747 start = 0; /* Let the parent decide. */
2751 end = base + count - 1;
2753 resource_list_add(rl, type, reg, start, end, count);
2756 * Try to allocate the resource for this BAR from our parent
2757 * so that this resource range is already reserved. The
2758 * driver for this device will later inherit this resource in
2759 * pci_alloc_resource().
2761 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2762 prefetch ? RF_PREFETCHABLE : 0);
2763 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2765 * If the allocation fails, try to allocate a resource for
2766 * this BAR using any available range. The firmware felt
2767 * it was important enough to assign a resource, so don't
2768 * disable decoding if we can help it.
2770 resource_list_delete(rl, type, reg);
2771 resource_list_add(rl, type, reg, 0, ~0ul, count);
2772 res = resource_list_alloc(rl, bus, dev, type, ®, 0, ~0ul,
2773 count, prefetch ? RF_PREFETCHABLE : 0);
2777 * If the allocation fails, delete the resource list entry
2778 * and disable decoding for this device.
2780 * If the driver requests this resource in the future,
2781 * pci_reserve_map() will try to allocate a fresh
2784 resource_list_delete(rl, type, reg);
2785 pci_disable_io(dev, type);
2788 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2789 pci_get_domain(dev), pci_get_bus(dev),
2790 pci_get_slot(dev), pci_get_function(dev), reg);
2792 start = rman_get_start(res);
2793 pci_write_bar(dev, pm, start);
2794 rman_set_device(res, bus);
2800 * For ATA devices we need to decide early what addressing mode to use.
2801 * Legacy demands that the primary and secondary ATA ports sits on the
2802 * same addresses that old ISA hardware did. This dictates that we use
2803 * those addresses and ignore the BAR's if we cannot set PCI native
2807 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2808 uint32_t prefetchmask)
2811 int rid, type, progif;
2813 /* if this device supports PCI native addressing use it */
2814 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2815 if ((progif & 0x8a) == 0x8a) {
2816 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2817 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2818 printf("Trying ATA native PCI addressing mode\n");
2819 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2823 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2824 type = SYS_RES_IOPORT;
2825 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2826 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2827 prefetchmask & (1 << 0));
2828 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2829 prefetchmask & (1 << 1));
2832 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2833 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7,
2835 rman_set_device(r, bus);
2837 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2838 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6,
2840 rman_set_device(r, bus);
2842 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2843 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
2844 prefetchmask & (1 << 2));
2845 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
2846 prefetchmask & (1 << 3));
2849 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2850 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177,
2852 rman_set_device(r, bus);
2854 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2855 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376,
2857 rman_set_device(r, bus);
2859 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
2860 prefetchmask & (1 << 4));
2861 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
2862 prefetchmask & (1 << 5));
2866 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2868 struct pci_devinfo *dinfo = device_get_ivars(dev);
2869 pcicfgregs *cfg = &dinfo->cfg;
2870 char tunable_name[64];
2873 /* Has to have an intpin to have an interrupt. */
2874 if (cfg->intpin == 0)
2877 /* Let the user override the IRQ with a tunable. */
2878 irq = PCI_INVALID_IRQ;
2879 snprintf(tunable_name, sizeof(tunable_name),
2880 "hw.pci%d.%d.%d.INT%c.irq",
2881 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2882 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2883 irq = PCI_INVALID_IRQ;
2886 * If we didn't get an IRQ via the tunable, then we either use the
2887 * IRQ value in the intline register or we ask the bus to route an
2888 * interrupt for us. If force_route is true, then we only use the
2889 * value in the intline register if the bus was unable to assign an
2892 if (!PCI_INTERRUPT_VALID(irq)) {
2893 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2894 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2895 if (!PCI_INTERRUPT_VALID(irq))
2899 /* If after all that we don't have an IRQ, just bail. */
2900 if (!PCI_INTERRUPT_VALID(irq))
2903 /* Update the config register if it changed. */
2904 if (irq != cfg->intline) {
2906 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2909 /* Add this IRQ as rid 0 interrupt resource. */
2910 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2913 /* Perform early OHCI takeover from SMM. */
2915 ohci_early_takeover(device_t self)
2917 struct resource *res;
2923 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
2927 ctl = bus_read_4(res, OHCI_CONTROL);
2928 if (ctl & OHCI_IR) {
2930 printf("ohci early: "
2931 "SMM active, request owner change\n");
2932 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
2933 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
2935 ctl = bus_read_4(res, OHCI_CONTROL);
2937 if (ctl & OHCI_IR) {
2939 printf("ohci early: "
2940 "SMM does not respond, resetting\n");
2941 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
2943 /* Disable interrupts */
2944 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
2947 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
2950 /* Perform early UHCI takeover from SMM. */
2952 uhci_early_takeover(device_t self)
2954 struct resource *res;
2958 * Set the PIRQD enable bit and switch off all the others. We don't
2959 * want legacy support to interfere with us XXX Does this also mean
2960 * that the BIOS won't touch the keyboard anymore if it is connected
2961 * to the ports of the root hub?
2963 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
2965 /* Disable interrupts */
2966 rid = PCI_UHCI_BASE_REG;
2967 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
2969 bus_write_2(res, UHCI_INTR, 0);
2970 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
2974 /* Perform early EHCI takeover from SMM. */
2976 ehci_early_takeover(device_t self)
2978 struct resource *res;
2988 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
2992 cparams = bus_read_4(res, EHCI_HCCPARAMS);
2994 /* Synchronise with the BIOS if it owns the controller. */
2995 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
2996 eecp = EHCI_EECP_NEXT(eec)) {
2997 eec = pci_read_config(self, eecp, 4);
2998 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3001 bios_sem = pci_read_config(self, eecp +
3002 EHCI_LEGSUP_BIOS_SEM, 1);
3003 if (bios_sem == 0) {
3007 printf("ehci early: "
3008 "SMM active, request owner change\n");
3010 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3012 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3014 bios_sem = pci_read_config(self, eecp +
3015 EHCI_LEGSUP_BIOS_SEM, 1);
3018 if (bios_sem != 0) {
3020 printf("ehci early: "
3021 "SMM does not respond\n");
3023 /* Disable interrupts */
3024 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3025 bus_write_4(res, offs + EHCI_USBINTR, 0);
3027 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3031 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3033 struct pci_devinfo *dinfo;
3035 struct resource_list *rl;
3036 const struct pci_quirk *q;
3040 dinfo = device_get_ivars(dev);
3042 rl = &dinfo->resources;
3043 devid = (cfg->device << 16) | cfg->vendor;
3045 /* ATA devices needs special map treatment */
3046 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3047 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3048 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3049 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3050 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3051 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3053 for (i = 0; i < cfg->nummaps;) {
3055 * Skip quirked resources.
3057 for (q = &pci_quirks[0]; q->devid != 0; q++)
3058 if (q->devid == devid &&
3059 q->type == PCI_QUIRK_UNMAP_REG &&
3060 q->arg1 == PCIR_BAR(i))
3062 if (q->devid != 0) {
3066 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3067 prefetchmask & (1 << i));
3071 * Add additional, quirked resources.
3073 for (q = &pci_quirks[0]; q->devid != 0; q++)
3074 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3075 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3077 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3078 #ifdef __PCI_REROUTE_INTERRUPT
3080 * Try to re-route interrupts. Sometimes the BIOS or
3081 * firmware may leave bogus values in these registers.
3082 * If the re-route fails, then just stick with what we
3085 pci_assign_interrupt(bus, dev, 1);
3087 pci_assign_interrupt(bus, dev, 0);
3091 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3092 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3093 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3094 ehci_early_takeover(dev);
3095 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3096 ohci_early_takeover(dev);
3097 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3098 uhci_early_takeover(dev);
3103 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3105 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3106 device_t pcib = device_get_parent(dev);
3107 struct pci_devinfo *dinfo;
3109 int s, f, pcifunchigh;
3112 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3113 ("dinfo_size too small"));
3114 maxslots = PCIB_MAXSLOTS(pcib);
3115 for (s = 0; s <= maxslots; s++) {
3119 hdrtype = REG(PCIR_HDRTYPE, 1);
3120 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3122 if (hdrtype & PCIM_MFDEV)
3123 pcifunchigh = PCI_FUNCMAX;
3124 for (f = 0; f <= pcifunchigh; f++) {
3125 dinfo = pci_read_device(pcib, domain, busno, s, f,
3127 if (dinfo != NULL) {
3128 pci_add_child(dev, dinfo);
3136 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3138 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3139 device_set_ivars(dinfo->cfg.dev, dinfo);
3140 resource_list_init(&dinfo->resources);
3141 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3142 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3143 pci_print_verbose(dinfo);
3144 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3148 pci_probe(device_t dev)
3151 device_set_desc(dev, "PCI bus");
3153 /* Allow other subclasses to override this driver. */
3154 return (BUS_PROBE_GENERIC);
3158 pci_attach_common(device_t dev)
3160 struct pci_softc *sc;
3162 #ifdef PCI_DMA_BOUNDARY
3163 int error, tag_valid;
3166 sc = device_get_softc(dev);
3167 domain = pcib_get_domain(dev);
3168 busno = pcib_get_bus(dev);
3170 device_printf(dev, "domain=%d, physical bus=%d\n",
3172 #ifdef PCI_DMA_BOUNDARY
3174 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3175 devclass_find("pci")) {
3176 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3177 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3178 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3179 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3181 device_printf(dev, "Failed to create DMA tag: %d\n",
3188 sc->sc_dma_tag = bus_get_dma_tag(dev);
3193 pci_attach(device_t dev)
3195 int busno, domain, error;
3197 error = pci_attach_common(dev);
3202 * Since there can be multiple independantly numbered PCI
3203 * busses on systems with multiple PCI domains, we can't use
3204 * the unit number to decide which bus we are probing. We ask
3205 * the parent pcib what our domain and bus numbers are.
3207 domain = pcib_get_domain(dev);
3208 busno = pcib_get_bus(dev);
3209 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3210 return (bus_generic_attach(dev));
3214 pci_suspend(device_t dev)
3216 int dstate, error, i, numdevs;
3217 device_t acpi_dev, child, *devlist;
3218 struct pci_devinfo *dinfo;
3221 * Save the PCI configuration space for each child and set the
3222 * device in the appropriate power state for this sleep state.
3225 if (pci_do_power_resume)
3226 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3227 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3229 for (i = 0; i < numdevs; i++) {
3231 dinfo = device_get_ivars(child);
3232 pci_cfg_save(child, dinfo, 0);
3235 /* Suspend devices before potentially powering them down. */
3236 error = bus_generic_suspend(dev);
3238 free(devlist, M_TEMP);
3243 * Always set the device to D3. If ACPI suggests a different
3244 * power state, use it instead. If ACPI is not present, the
3245 * firmware is responsible for managing device power. Skip
3246 * children who aren't attached since they are powered down
3247 * separately. Only manage type 0 devices for now.
3249 for (i = 0; acpi_dev && i < numdevs; i++) {
3251 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3252 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
3253 dstate = PCI_POWERSTATE_D3;
3254 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
3255 pci_set_powerstate(child, dstate);
3258 free(devlist, M_TEMP);
3263 pci_resume(device_t dev)
3265 int i, numdevs, error;
3266 device_t acpi_dev, child, *devlist;
3267 struct pci_devinfo *dinfo;
3270 * Set each child to D0 and restore its PCI configuration space.
3273 if (pci_do_power_resume)
3274 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
3275 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3277 for (i = 0; i < numdevs; i++) {
3279 * Notify ACPI we're going to D0 but ignore the result. If
3280 * ACPI is not present, the firmware is responsible for
3281 * managing device power. Only manage type 0 devices for now.
3284 dinfo = (struct pci_devinfo *) device_get_ivars(child);
3285 if (acpi_dev && device_is_attached(child) &&
3286 dinfo->cfg.hdrtype == 0) {
3287 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
3288 pci_set_powerstate(child, PCI_POWERSTATE_D0);
3291 /* Now the device is powered up, restore its config space. */
3292 pci_cfg_restore(child, dinfo);
3294 free(devlist, M_TEMP);
3295 return (bus_generic_resume(dev));
3299 pci_load_vendor_data(void)
3301 caddr_t vendordata, info;
3303 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
3304 info = preload_search_info(vendordata, MODINFO_ADDR);
3305 pci_vendordata = *(char **)info;
3306 info = preload_search_info(vendordata, MODINFO_SIZE);
3307 pci_vendordata_size = *(size_t *)info;
3308 /* terminate the database */
3309 pci_vendordata[pci_vendordata_size] = '\n';
3314 pci_driver_added(device_t dev, driver_t *driver)
3319 struct pci_devinfo *dinfo;
3323 device_printf(dev, "driver added\n");
3324 DEVICE_IDENTIFY(driver, dev);
3325 if (device_get_children(dev, &devlist, &numdevs) != 0)
3327 for (i = 0; i < numdevs; i++) {
3329 if (device_get_state(child) != DS_NOTPRESENT)
3331 dinfo = device_get_ivars(child);
3332 pci_print_verbose(dinfo);
3334 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3335 pci_cfg_restore(child, dinfo);
3336 if (device_probe_and_attach(child) != 0)
3337 pci_cfg_save(child, dinfo, 1);
3339 free(devlist, M_TEMP);
3343 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3344 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3346 struct pci_devinfo *dinfo;
3347 struct msix_table_entry *mte;
3348 struct msix_vector *mv;
3354 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3359 /* If this is not a direct child, just bail out. */
3360 if (device_get_parent(child) != dev) {
3365 rid = rman_get_rid(irq);
3367 /* Make sure that INTx is enabled */
3368 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3371 * Check to see if the interrupt is MSI or MSI-X.
3372 * Ask our parent to map the MSI and give
3373 * us the address and data register values.
3374 * If we fail for some reason, teardown the
3375 * interrupt handler.
3377 dinfo = device_get_ivars(child);
3378 if (dinfo->cfg.msi.msi_alloc > 0) {
3379 if (dinfo->cfg.msi.msi_addr == 0) {
3380 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3381 ("MSI has handlers, but vectors not mapped"));
3382 error = PCIB_MAP_MSI(device_get_parent(dev),
3383 child, rman_get_start(irq), &addr, &data);
3386 dinfo->cfg.msi.msi_addr = addr;
3387 dinfo->cfg.msi.msi_data = data;
3389 if (dinfo->cfg.msi.msi_handlers == 0)
3390 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3391 dinfo->cfg.msi.msi_data);
3392 dinfo->cfg.msi.msi_handlers++;
3394 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3395 ("No MSI or MSI-X interrupts allocated"));
3396 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3397 ("MSI-X index too high"));
3398 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3399 KASSERT(mte->mte_vector != 0, ("no message vector"));
3400 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3401 KASSERT(mv->mv_irq == rman_get_start(irq),
3403 if (mv->mv_address == 0) {
3404 KASSERT(mte->mte_handlers == 0,
3405 ("MSI-X table entry has handlers, but vector not mapped"));
3406 error = PCIB_MAP_MSI(device_get_parent(dev),
3407 child, rman_get_start(irq), &addr, &data);
3410 mv->mv_address = addr;
3413 if (mte->mte_handlers == 0) {
3414 pci_enable_msix(child, rid - 1, mv->mv_address,
3416 pci_unmask_msix(child, rid - 1);
3418 mte->mte_handlers++;
3421 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3422 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3425 (void)bus_generic_teardown_intr(dev, child, irq,
3435 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3438 struct msix_table_entry *mte;
3439 struct resource_list_entry *rle;
3440 struct pci_devinfo *dinfo;
3443 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3446 /* If this isn't a direct child, just bail out */
3447 if (device_get_parent(child) != dev)
3448 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3450 rid = rman_get_rid(irq);
3453 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3456 * Check to see if the interrupt is MSI or MSI-X. If so,
3457 * decrement the appropriate handlers count and mask the
3458 * MSI-X message, or disable MSI messages if the count
3461 dinfo = device_get_ivars(child);
3462 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3463 if (rle->res != irq)
3465 if (dinfo->cfg.msi.msi_alloc > 0) {
3466 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3467 ("MSI-X index too high"));
3468 if (dinfo->cfg.msi.msi_handlers == 0)
3470 dinfo->cfg.msi.msi_handlers--;
3471 if (dinfo->cfg.msi.msi_handlers == 0)
3472 pci_disable_msi(child);
3474 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3475 ("No MSI or MSI-X interrupts allocated"));
3476 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3477 ("MSI-X index too high"));
3478 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3479 if (mte->mte_handlers == 0)
3481 mte->mte_handlers--;
3482 if (mte->mte_handlers == 0)
3483 pci_mask_msix(child, rid - 1);
3486 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3489 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3494 pci_print_child(device_t dev, device_t child)
3496 struct pci_devinfo *dinfo;
3497 struct resource_list *rl;
3500 dinfo = device_get_ivars(child);
3501 rl = &dinfo->resources;
3503 retval += bus_print_child_header(dev, child);
3505 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3506 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3507 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3508 if (device_get_flags(dev))
3509 retval += printf(" flags %#x", device_get_flags(dev));
3511 retval += printf(" at device %d.%d", pci_get_slot(child),
3512 pci_get_function(child));
3514 retval += bus_print_child_footer(dev, child);
3524 } pci_nomatch_tab[] = {
3525 {PCIC_OLD, -1, "old"},
3526 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3527 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3528 {PCIC_STORAGE, -1, "mass storage"},
3529 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3530 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3531 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3532 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3533 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3534 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3535 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3536 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3537 {PCIC_STORAGE, PCIS_STORAGE_NVM, "NVM"},
3538 {PCIC_NETWORK, -1, "network"},
3539 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3540 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3541 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3542 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3543 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3544 {PCIC_DISPLAY, -1, "display"},
3545 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3546 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3547 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3548 {PCIC_MULTIMEDIA, -1, "multimedia"},
3549 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3550 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3551 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3552 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3553 {PCIC_MEMORY, -1, "memory"},
3554 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3555 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3556 {PCIC_BRIDGE, -1, "bridge"},
3557 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3558 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3559 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3560 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3561 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3562 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3563 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3564 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3565 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3566 {PCIC_SIMPLECOMM, -1, "simple comms"},
3567 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3568 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3569 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3570 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3571 {PCIC_BASEPERIPH, -1, "base peripheral"},
3572 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3573 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3574 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3575 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3576 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3577 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3578 {PCIC_INPUTDEV, -1, "input device"},
3579 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3580 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3581 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3582 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3583 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3584 {PCIC_DOCKING, -1, "docking station"},
3585 {PCIC_PROCESSOR, -1, "processor"},
3586 {PCIC_SERIALBUS, -1, "serial bus"},
3587 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3588 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3589 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3590 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3591 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3592 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3593 {PCIC_WIRELESS, -1, "wireless controller"},
3594 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3595 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3596 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3597 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3598 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3599 {PCIC_SATCOM, -1, "satellite communication"},
3600 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3601 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3602 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3603 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3604 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3605 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3606 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3607 {PCIC_DASP, -1, "dasp"},
3608 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3613 pci_probe_nomatch(device_t dev, device_t child)
3616 const char *cp, *scp;
3620 * Look for a listing for this device in a loaded device database.
3622 if ((device = pci_describe_device(child)) != NULL) {
3623 device_printf(dev, "<%s>", device);
3624 free(device, M_DEVBUF);
3627 * Scan the class/subclass descriptions for a general
3632 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3633 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3634 if (pci_nomatch_tab[i].subclass == -1) {
3635 cp = pci_nomatch_tab[i].desc;
3636 } else if (pci_nomatch_tab[i].subclass ==
3637 pci_get_subclass(child)) {
3638 scp = pci_nomatch_tab[i].desc;
3642 device_printf(dev, "<%s%s%s>",
3644 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3647 printf(" at device %d.%d (no driver attached)\n",
3648 pci_get_slot(child), pci_get_function(child));
3649 pci_cfg_save(child, device_get_ivars(child), 1);
3653 * Parse the PCI device database, if loaded, and return a pointer to a
3654 * description of the device.
3656 * The database is flat text formatted as follows:
3658 * Any line not in a valid format is ignored.
3659 * Lines are terminated with newline '\n' characters.
3661 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3664 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3665 * - devices cannot be listed without a corresponding VENDOR line.
3666 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3667 * another TAB, then the device name.
3671 * Assuming (ptr) points to the beginning of a line in the database,
3672 * return the vendor or device and description of the next entry.
3673 * The value of (vendor) or (device) inappropriate for the entry type
3674 * is set to -1. Returns nonzero at the end of the database.
3676 * Note that this is slightly unrobust in the face of corrupt data;
3677 * we attempt to safeguard against this by spamming the end of the
3678 * database with a newline when we initialise.
3681 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3690 left = pci_vendordata_size - (cp - pci_vendordata);
3698 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3702 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3705 /* skip to next line */
3706 while (*cp != '\n' && left > 0) {
3715 /* skip to next line */
3716 while (*cp != '\n' && left > 0) {
3720 if (*cp == '\n' && left > 0)
3727 pci_describe_device(device_t dev)
3730 char *desc, *vp, *dp, *line;
3732 desc = vp = dp = NULL;
3735 * If we have no vendor data, we can't do anything.
3737 if (pci_vendordata == NULL)
3741 * Scan the vendor data looking for this device
3743 line = pci_vendordata;
3744 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3747 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3749 if (vendor == pci_get_vendor(dev))
3752 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3755 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3763 if (device == pci_get_device(dev))
3767 snprintf(dp, 80, "0x%x", pci_get_device(dev));
3768 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3770 sprintf(desc, "%s, %s", vp, dp);
3780 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3782 struct pci_devinfo *dinfo;
3785 dinfo = device_get_ivars(child);
3789 case PCI_IVAR_ETHADDR:
3791 * The generic accessor doesn't deal with failure, so
3792 * we set the return value, then return an error.
3794 *((uint8_t **) result) = NULL;
3796 case PCI_IVAR_SUBVENDOR:
3797 *result = cfg->subvendor;
3799 case PCI_IVAR_SUBDEVICE:
3800 *result = cfg->subdevice;
3802 case PCI_IVAR_VENDOR:
3803 *result = cfg->vendor;
3805 case PCI_IVAR_DEVICE:
3806 *result = cfg->device;
3808 case PCI_IVAR_DEVID:
3809 *result = (cfg->device << 16) | cfg->vendor;
3811 case PCI_IVAR_CLASS:
3812 *result = cfg->baseclass;
3814 case PCI_IVAR_SUBCLASS:
3815 *result = cfg->subclass;
3817 case PCI_IVAR_PROGIF:
3818 *result = cfg->progif;
3820 case PCI_IVAR_REVID:
3821 *result = cfg->revid;
3823 case PCI_IVAR_INTPIN:
3824 *result = cfg->intpin;
3827 *result = cfg->intline;
3829 case PCI_IVAR_DOMAIN:
3830 *result = cfg->domain;
3836 *result = cfg->slot;
3838 case PCI_IVAR_FUNCTION:
3839 *result = cfg->func;
3841 case PCI_IVAR_CMDREG:
3842 *result = cfg->cmdreg;
3844 case PCI_IVAR_CACHELNSZ:
3845 *result = cfg->cachelnsz;
3847 case PCI_IVAR_MINGNT:
3848 *result = cfg->mingnt;
3850 case PCI_IVAR_MAXLAT:
3851 *result = cfg->maxlat;
3853 case PCI_IVAR_LATTIMER:
3854 *result = cfg->lattimer;
3863 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3865 struct pci_devinfo *dinfo;
3867 dinfo = device_get_ivars(child);
3870 case PCI_IVAR_INTPIN:
3871 dinfo->cfg.intpin = value;
3873 case PCI_IVAR_ETHADDR:
3874 case PCI_IVAR_SUBVENDOR:
3875 case PCI_IVAR_SUBDEVICE:
3876 case PCI_IVAR_VENDOR:
3877 case PCI_IVAR_DEVICE:
3878 case PCI_IVAR_DEVID:
3879 case PCI_IVAR_CLASS:
3880 case PCI_IVAR_SUBCLASS:
3881 case PCI_IVAR_PROGIF:
3882 case PCI_IVAR_REVID:
3884 case PCI_IVAR_DOMAIN:
3887 case PCI_IVAR_FUNCTION:
3888 return (EINVAL); /* disallow for now */
3895 #include "opt_ddb.h"
3897 #include <ddb/ddb.h>
3898 #include <sys/cons.h>
3901 * List resources based on pci map registers, used for within ddb
3904 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3906 struct pci_devinfo *dinfo;
3907 struct devlist *devlist_head;
3910 int i, error, none_count;
3913 /* get the head of the device queue */
3914 devlist_head = &pci_devq;
3917 * Go through the list of devices and print out devices
3919 for (error = 0, i = 0,
3920 dinfo = STAILQ_FIRST(devlist_head);
3921 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3922 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3924 /* Populate pd_name and pd_unit */
3927 name = device_get_name(dinfo->cfg.dev);
3930 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3931 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3932 (name && *name) ? name : "none",
3933 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3935 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3936 p->pc_sel.pc_func, (p->pc_class << 16) |
3937 (p->pc_subclass << 8) | p->pc_progif,
3938 (p->pc_subdevice << 16) | p->pc_subvendor,
3939 (p->pc_device << 16) | p->pc_vendor,
3940 p->pc_revid, p->pc_hdr);
3945 static struct resource *
3946 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3947 u_long start, u_long end, u_long count, u_int flags)
3949 struct pci_devinfo *dinfo = device_get_ivars(child);
3950 struct resource_list *rl = &dinfo->resources;
3951 struct resource_list_entry *rle;
3952 struct resource *res;
3954 pci_addr_t map, testval;
3958 pm = pci_find_bar(child, *rid);
3960 /* This is a BAR that we failed to allocate earlier. */
3961 mapsize = pm->pm_size;
3965 * Weed out the bogons, and figure out how large the
3966 * BAR/map is. BARs that read back 0 here are bogus
3967 * and unimplemented. Note: atapci in legacy mode are
3968 * special and handled elsewhere in the code. If you
3969 * have a atapci device in legacy mode and it fails
3970 * here, that other code is broken.
3972 pci_read_bar(child, *rid, &map, &testval);
3975 * Determine the size of the BAR and ignore BARs with a size
3976 * of 0. Device ROM BARs use a different mask value.
3978 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
3979 mapsize = pci_romsize(testval);
3981 mapsize = pci_mapsize(testval);
3984 pm = pci_add_bar(child, *rid, map, mapsize);
3987 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
3988 if (type != SYS_RES_MEMORY) {
3991 "child %s requested type %d for rid %#x,"
3992 " but the BAR says it is an memio\n",
3993 device_get_nameunit(child), type, *rid);
3997 if (type != SYS_RES_IOPORT) {
4000 "child %s requested type %d for rid %#x,"
4001 " but the BAR says it is an ioport\n",
4002 device_get_nameunit(child), type, *rid);
4008 * For real BARs, we need to override the size that
4009 * the driver requests, because that's what the BAR
4010 * actually uses and we would otherwise have a
4011 * situation where we might allocate the excess to
4012 * another driver, which won't work.
4014 count = (pci_addr_t)1 << mapsize;
4015 if (RF_ALIGNMENT(flags) < mapsize)
4016 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4017 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4018 flags |= RF_PREFETCHABLE;
4021 * Allocate enough resource, and then write back the
4022 * appropriate BAR for that resource.
4024 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4025 start, end, count, flags & ~RF_ACTIVE);
4027 device_printf(child,
4028 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4029 count, *rid, type, start, end);
4032 rman_set_device(res, dev);
4033 resource_list_add(rl, type, *rid, start, end, count);
4034 rle = resource_list_find(rl, type, *rid);
4036 panic("pci_alloc_map: unexpectedly can't find resource.");
4038 rle->start = rman_get_start(res);
4039 rle->end = rman_get_end(res);
4042 device_printf(child,
4043 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4044 count, *rid, type, rman_get_start(res));
4045 map = rman_get_start(res);
4046 pci_write_bar(child, pm, map);
4052 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4053 u_long start, u_long end, u_long count, u_int flags)
4055 struct pci_devinfo *dinfo = device_get_ivars(child);
4056 struct resource_list *rl = &dinfo->resources;
4057 struct resource_list_entry *rle;
4058 struct resource *res;
4059 pcicfgregs *cfg = &dinfo->cfg;
4061 if (device_get_parent(child) != dev)
4062 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4063 type, rid, start, end, count, flags));
4066 * Perform lazy resource allocation
4071 * Can't alloc legacy interrupt once MSI messages have
4074 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4075 cfg->msix.msix_alloc > 0))
4079 * If the child device doesn't have an interrupt
4080 * routed and is deserving of an interrupt, try to
4083 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4085 pci_assign_interrupt(dev, child, 0);
4087 case SYS_RES_IOPORT:
4088 case SYS_RES_MEMORY:
4091 * PCI-PCI bridge I/O window resources are not BARs.
4092 * For those allocations just pass the request up the
4095 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4097 case PCIR_IOBASEL_1:
4098 case PCIR_MEMBASE_1:
4099 case PCIR_PMBASEL_1:
4101 * XXX: Should we bother creating a resource
4104 return (bus_generic_alloc_resource(dev, child,
4105 type, rid, start, end, count, flags));
4109 /* Allocate resources for this BAR if needed. */
4110 rle = resource_list_find(rl, type, *rid);
4112 res = pci_alloc_map(dev, child, type, rid, start, end,
4116 rle = resource_list_find(rl, type, *rid);
4120 * If the resource belongs to the bus, then give it to
4121 * the child. We need to activate it if requested
4122 * since the bus always allocates inactive resources.
4124 if (rle != NULL && rle->res != NULL &&
4125 rman_get_device(rle->res) == dev) {
4127 device_printf(child,
4128 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
4129 rman_get_size(rle->res), *rid, type,
4130 rman_get_start(rle->res));
4131 rman_set_device(rle->res, child);
4132 if ((flags & RF_ACTIVE) &&
4133 bus_activate_resource(child, type, *rid,
4139 return (resource_list_alloc(rl, dev, child, type, rid,
4140 start, end, count, flags));
4144 pci_release_resource(device_t dev, device_t child, int type, int rid,
4149 if (device_get_parent(child) != dev)
4150 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4154 * For BARs we don't actually want to release the resource.
4155 * Instead, we deactivate the resource if needed and then give
4156 * ownership of the BAR back to the bus.
4159 case SYS_RES_IOPORT:
4160 case SYS_RES_MEMORY:
4161 if (rman_get_device(r) != child)
4163 if (rman_get_flags(r) & RF_ACTIVE) {
4164 error = bus_deactivate_resource(child, type, rid, r);
4168 rman_set_device(r, dev);
4171 return (bus_generic_rl_release_resource(dev, child, type, rid, r));
4175 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4178 struct pci_devinfo *dinfo;
4181 error = bus_generic_activate_resource(dev, child, type, rid, r);
4185 /* Enable decoding in the command register when activating BARs. */
4186 if (device_get_parent(child) == dev) {
4187 /* Device ROMs need their decoding explicitly enabled. */
4188 dinfo = device_get_ivars(child);
4189 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4190 pci_write_bar(child, pci_find_bar(child, rid),
4191 rman_get_start(r) | PCIM_BIOS_ENABLE);
4193 case SYS_RES_IOPORT:
4194 case SYS_RES_MEMORY:
4195 error = PCI_ENABLE_IO(dev, child, type);
4203 pci_deactivate_resource(device_t dev, device_t child, int type,
4204 int rid, struct resource *r)
4206 struct pci_devinfo *dinfo;
4209 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4213 /* Disable decoding for device ROMs. */
4214 if (device_get_parent(child) == dev) {
4215 dinfo = device_get_ivars(child);
4216 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4217 pci_write_bar(child, pci_find_bar(child, rid),
4224 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4226 struct pci_devinfo *dinfo;
4227 struct resource_list *rl;
4228 struct resource_list_entry *rle;
4230 if (device_get_parent(child) != dev)
4233 dinfo = device_get_ivars(child);
4234 rl = &dinfo->resources;
4235 rle = resource_list_find(rl, type, rid);
4240 if (rman_get_device(rle->res) != dev ||
4241 rman_get_flags(rle->res) & RF_ACTIVE) {
4242 device_printf(dev, "delete_resource: "
4243 "Resource still owned by child, oops. "
4244 "(type=%d, rid=%d, addr=%lx)\n",
4245 rle->type, rle->rid,
4246 rman_get_start(rle->res));
4249 bus_release_resource(dev, type, rid, rle->res);
4251 resource_list_delete(rl, type, rid);
4254 struct resource_list *
4255 pci_get_resource_list (device_t dev, device_t child)
4257 struct pci_devinfo *dinfo = device_get_ivars(child);
4259 return (&dinfo->resources);
4263 pci_get_dma_tag(device_t bus, device_t dev)
4265 struct pci_softc *sc = device_get_softc(bus);
4267 return (sc->sc_dma_tag);
4271 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4273 struct pci_devinfo *dinfo = device_get_ivars(child);
4274 pcicfgregs *cfg = &dinfo->cfg;
4276 return (PCIB_READ_CONFIG(device_get_parent(dev),
4277 cfg->bus, cfg->slot, cfg->func, reg, width));
4281 pci_write_config_method(device_t dev, device_t child, int reg,
4282 uint32_t val, int width)
4284 struct pci_devinfo *dinfo = device_get_ivars(child);
4285 pcicfgregs *cfg = &dinfo->cfg;
4287 PCIB_WRITE_CONFIG(device_get_parent(dev),
4288 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4292 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4296 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4297 pci_get_function(child));
4302 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4305 struct pci_devinfo *dinfo;
4308 dinfo = device_get_ivars(child);
4310 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4311 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4312 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4318 pci_assign_interrupt_method(device_t dev, device_t child)
4320 struct pci_devinfo *dinfo = device_get_ivars(child);
4321 pcicfgregs *cfg = &dinfo->cfg;
4323 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4328 pci_modevent(module_t mod, int what, void *arg)
4330 static struct cdev *pci_cdev;
4334 STAILQ_INIT(&pci_devq);
4336 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4338 pci_load_vendor_data();
4342 destroy_dev(pci_cdev);
4350 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
4354 * Only do header type 0 devices. Type 1 devices are bridges,
4355 * which we know need special treatment. Type 2 devices are
4356 * cardbus bridges which also require special treatment.
4357 * Other types are unknown, and we err on the side of safety
4360 if (dinfo->cfg.hdrtype != 0)
4364 * Restore the device to full power mode. We must do this
4365 * before we restore the registers because moving from D3 to
4366 * D0 will cause the chip's BARs and some other registers to
4367 * be reset to some unknown power on reset values. Cut down
4368 * the noise on boot by doing nothing if we are already in
4371 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
4372 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4373 pci_restore_bars(dev);
4374 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
4375 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
4376 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
4377 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
4378 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
4379 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
4380 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
4381 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
4382 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
4384 /* Restore MSI and MSI-X configurations if they are present. */
4385 if (dinfo->cfg.msi.msi_location != 0)
4386 pci_resume_msi(dev);
4387 if (dinfo->cfg.msix.msix_location != 0)
4388 pci_resume_msix(dev);
4392 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
4398 * Only do header type 0 devices. Type 1 devices are bridges, which
4399 * we know need special treatment. Type 2 devices are cardbus bridges
4400 * which also require special treatment. Other types are unknown, and
4401 * we err on the side of safety by ignoring them. Powering down
4402 * bridges should not be undertaken lightly.
4404 if (dinfo->cfg.hdrtype != 0)
4408 * Some drivers apparently write to these registers w/o updating our
4409 * cached copy. No harm happens if we update the copy, so do so here
4410 * so we can restore them. The COMMAND register is modified by the
4411 * bus w/o updating the cache. This should represent the normally
4412 * writable portion of the 'defined' part of type 0 headers. In
4413 * theory we also need to save/restore the PCI capability structures
4414 * we know about, but apart from power we don't know any that are
4417 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
4418 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4419 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
4420 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
4421 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
4422 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
4423 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
4424 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
4425 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
4426 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
4427 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
4428 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
4429 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
4430 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
4431 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4434 * don't set the state for display devices, base peripherals and
4435 * memory devices since bad things happen when they are powered down.
4436 * We should (a) have drivers that can easily detach and (b) use
4437 * generic drivers for these devices so that some device actually
4438 * attaches. We need to make sure that when we implement (a) we don't
4439 * power the device down on a reattach.
4441 cls = pci_get_class(dev);
4444 switch (pci_do_power_nodriver)
4446 case 0: /* NO powerdown at all */
4448 case 1: /* Conservative about what to power down */
4449 if (cls == PCIC_STORAGE)
4452 case 2: /* Agressive about what to power down */
4453 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4454 cls == PCIC_BASEPERIPH)
4457 case 3: /* Power down everything */
4461 * PCI spec says we can only go into D3 state from D0 state.
4462 * Transition from D[12] into D0 before going to D3 state.
4464 ps = pci_get_powerstate(dev);
4465 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4466 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4467 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4468 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
4471 /* Wrapper APIs suitable for device driver use. */
4473 pci_save_state(device_t dev)
4475 struct pci_devinfo *dinfo;
4477 dinfo = device_get_ivars(dev);
4478 pci_cfg_save(dev, dinfo, 0);
4482 pci_restore_state(device_t dev)
4484 struct pci_devinfo *dinfo;
4486 dinfo = device_get_ivars(dev);
4487 pci_cfg_restore(dev, dinfo);