2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
55 #if defined(__i386__) || defined(__amd64__)
56 #include <machine/intr_machdep.h>
59 #include <sys/pciio.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
68 #include <contrib/dev/acpica/acpi.h>
71 #define ACPI_PWR_FOR_SLEEP(x, y, z)
74 static uint32_t pci_mapbase(unsigned mapreg);
75 static const char *pci_maptype(unsigned mapreg);
76 static int pci_mapsize(unsigned testval);
77 static int pci_maprange(unsigned mapreg);
78 static void pci_fixancient(pcicfgregs *cfg);
80 static int pci_porten(device_t pcib, int b, int s, int f);
81 static int pci_memen(device_t pcib, int b, int s, int f);
82 static void pci_assign_interrupt(device_t bus, device_t dev,
84 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
85 int b, int s, int f, int reg,
86 struct resource_list *rl, int force, int prefetch);
87 static int pci_probe(device_t dev);
88 static int pci_attach(device_t dev);
89 static void pci_load_vendor_data(void);
90 static int pci_describe_parse_line(char **ptr, int *vendor,
91 int *device, char **desc);
92 static char *pci_describe_device(device_t dev);
93 static int pci_modevent(module_t mod, int what, void *arg);
94 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
96 static void pci_read_extcap(device_t pcib, pcicfgregs *cfg);
97 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
98 int reg, uint32_t *data);
100 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
101 int reg, uint32_t data);
103 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
104 static void pci_disable_msi(device_t dev);
105 static void pci_enable_msi(device_t dev, uint64_t address,
107 static void pci_enable_msix(device_t dev, u_int index,
108 uint64_t address, uint32_t data);
109 static void pci_mask_msix(device_t dev, u_int index);
110 static void pci_unmask_msix(device_t dev, u_int index);
111 static int pci_msi_blacklisted(void);
112 static void pci_resume_msi(device_t dev);
113 static void pci_resume_msix(device_t dev);
115 static device_method_t pci_methods[] = {
116 /* Device interface */
117 DEVMETHOD(device_probe, pci_probe),
118 DEVMETHOD(device_attach, pci_attach),
119 DEVMETHOD(device_detach, bus_generic_detach),
120 DEVMETHOD(device_shutdown, bus_generic_shutdown),
121 DEVMETHOD(device_suspend, pci_suspend),
122 DEVMETHOD(device_resume, pci_resume),
125 DEVMETHOD(bus_print_child, pci_print_child),
126 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
127 DEVMETHOD(bus_read_ivar, pci_read_ivar),
128 DEVMETHOD(bus_write_ivar, pci_write_ivar),
129 DEVMETHOD(bus_driver_added, pci_driver_added),
130 DEVMETHOD(bus_setup_intr, pci_setup_intr),
131 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
133 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
134 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
135 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
136 DEVMETHOD(bus_delete_resource, pci_delete_resource),
137 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
138 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
139 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
140 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
141 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
142 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
145 DEVMETHOD(pci_read_config, pci_read_config_method),
146 DEVMETHOD(pci_write_config, pci_write_config_method),
147 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
148 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
149 DEVMETHOD(pci_enable_io, pci_enable_io_method),
150 DEVMETHOD(pci_disable_io, pci_disable_io_method),
151 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
152 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
153 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
154 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
155 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
156 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
157 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
158 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
159 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
160 DEVMETHOD(pci_release_msi, pci_release_msi_method),
161 DEVMETHOD(pci_msi_count, pci_msi_count_method),
162 DEVMETHOD(pci_msix_count, pci_msix_count_method),
167 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
169 static devclass_t pci_devclass;
170 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
171 MODULE_VERSION(pci, 1);
173 static char *pci_vendordata;
174 static size_t pci_vendordata_size;
178 uint32_t devid; /* Vendor/device of the card */
180 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
181 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
186 struct pci_quirk pci_quirks[] = {
187 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
188 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
189 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
190 /* As does the Serverworks OSB4 (the SMBus mapping register) */
191 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
194 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
195 * or the CMIC-SL (AKA ServerWorks GC_LE).
197 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
198 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
201 * MSI doesn't work on earlier Intel chipsets including
202 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
204 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
205 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
206 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
207 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
208 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
209 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
210 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
213 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
216 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
221 /* map register information */
222 #define PCI_MAPMEM 0x01 /* memory map */
223 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
224 #define PCI_MAPPORT 0x04 /* port map */
226 struct devlist pci_devq;
227 uint32_t pci_generation;
228 uint32_t pci_numdevs = 0;
229 static int pcie_chipset, pcix_chipset;
232 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
234 static int pci_enable_io_modes = 1;
235 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
236 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
237 &pci_enable_io_modes, 1,
238 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
239 enable these bits correctly. We'd like to do this all the time, but there\n\
240 are some peripherals that this causes problems with.");
242 static int pci_do_power_nodriver = 0;
243 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
244 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
245 &pci_do_power_nodriver, 0,
246 "Place a function into D3 state when no driver attaches to it. 0 means\n\
247 disable. 1 means conservatively place devices into D3 state. 2 means\n\
248 agressively place devices into D3 state. 3 means put absolutely everything\n\
251 static int pci_do_power_resume = 1;
252 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
253 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
254 &pci_do_power_resume, 1,
255 "Transition from D3 -> D0 on resume.");
257 static int pci_do_msi = 1;
258 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
259 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
260 "Enable support for MSI interrupts");
262 static int pci_do_msix = 1;
263 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
264 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
265 "Enable support for MSI-X interrupts");
267 static int pci_honor_msi_blacklist = 1;
268 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
269 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
270 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
272 /* Find a device_t by bus/slot/function in domain 0 */
275 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
278 return (pci_find_dbsf(0, bus, slot, func));
281 /* Find a device_t by domain/bus/slot/function */
284 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
286 struct pci_devinfo *dinfo;
288 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
289 if ((dinfo->cfg.domain == domain) &&
290 (dinfo->cfg.bus == bus) &&
291 (dinfo->cfg.slot == slot) &&
292 (dinfo->cfg.func == func)) {
293 return (dinfo->cfg.dev);
300 /* Find a device_t by vendor/device ID */
303 pci_find_device(uint16_t vendor, uint16_t device)
305 struct pci_devinfo *dinfo;
307 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
308 if ((dinfo->cfg.vendor == vendor) &&
309 (dinfo->cfg.device == device)) {
310 return (dinfo->cfg.dev);
317 /* return base address of memory or port map */
320 pci_mapbase(uint32_t mapreg)
323 if (PCI_BAR_MEM(mapreg))
324 return (mapreg & PCIM_BAR_MEM_BASE);
326 return (mapreg & PCIM_BAR_IO_BASE);
329 /* return map type of memory or port map */
332 pci_maptype(unsigned mapreg)
335 if (PCI_BAR_IO(mapreg))
337 if (mapreg & PCIM_BAR_MEM_PREFETCH)
338 return ("Prefetchable Memory");
342 /* return log2 of map size decoded for memory or port map */
345 pci_mapsize(uint32_t testval)
349 testval = pci_mapbase(testval);
352 while ((testval & 1) == 0)
361 /* return log2 of address range supported by map register */
364 pci_maprange(unsigned mapreg)
368 if (PCI_BAR_IO(mapreg))
371 switch (mapreg & PCIM_BAR_MEM_TYPE) {
372 case PCIM_BAR_MEM_32:
375 case PCIM_BAR_MEM_1MB:
378 case PCIM_BAR_MEM_64:
385 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
388 pci_fixancient(pcicfgregs *cfg)
390 if (cfg->hdrtype != 0)
393 /* PCI to PCI bridges use header type 1 */
394 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
398 /* extract header type specific config data */
401 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
403 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
404 switch (cfg->hdrtype) {
406 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
407 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
408 cfg->nummaps = PCI_MAXMAPS_0;
411 cfg->nummaps = PCI_MAXMAPS_1;
414 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
415 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
416 cfg->nummaps = PCI_MAXMAPS_2;
422 /* read configuration header into pcicfgregs structure */
424 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
426 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
427 pcicfgregs *cfg = NULL;
428 struct pci_devinfo *devlist_entry;
429 struct devlist *devlist_head;
431 devlist_head = &pci_devq;
433 devlist_entry = NULL;
435 if (REG(PCIR_DEVVENDOR, 4) != -1) {
436 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
437 if (devlist_entry == NULL)
440 cfg = &devlist_entry->cfg;
446 cfg->vendor = REG(PCIR_VENDOR, 2);
447 cfg->device = REG(PCIR_DEVICE, 2);
448 cfg->cmdreg = REG(PCIR_COMMAND, 2);
449 cfg->statreg = REG(PCIR_STATUS, 2);
450 cfg->baseclass = REG(PCIR_CLASS, 1);
451 cfg->subclass = REG(PCIR_SUBCLASS, 1);
452 cfg->progif = REG(PCIR_PROGIF, 1);
453 cfg->revid = REG(PCIR_REVID, 1);
454 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
455 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
456 cfg->lattimer = REG(PCIR_LATTIMER, 1);
457 cfg->intpin = REG(PCIR_INTPIN, 1);
458 cfg->intline = REG(PCIR_INTLINE, 1);
460 cfg->mingnt = REG(PCIR_MINGNT, 1);
461 cfg->maxlat = REG(PCIR_MAXLAT, 1);
463 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
464 cfg->hdrtype &= ~PCIM_MFDEV;
467 pci_hdrtypedata(pcib, b, s, f, cfg);
469 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
470 pci_read_extcap(pcib, cfg);
472 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
474 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
475 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
476 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
477 devlist_entry->conf.pc_sel.pc_func = cfg->func;
478 devlist_entry->conf.pc_hdr = cfg->hdrtype;
480 devlist_entry->conf.pc_subvendor = cfg->subvendor;
481 devlist_entry->conf.pc_subdevice = cfg->subdevice;
482 devlist_entry->conf.pc_vendor = cfg->vendor;
483 devlist_entry->conf.pc_device = cfg->device;
485 devlist_entry->conf.pc_class = cfg->baseclass;
486 devlist_entry->conf.pc_subclass = cfg->subclass;
487 devlist_entry->conf.pc_progif = cfg->progif;
488 devlist_entry->conf.pc_revid = cfg->revid;
493 return (devlist_entry);
498 pci_read_extcap(device_t pcib, pcicfgregs *cfg)
500 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
501 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
502 #if defined(__i386__) || defined(__amd64__)
506 int ptr, nextptr, ptrptr;
508 switch (cfg->hdrtype & PCIM_HDRTYPE) {
511 ptrptr = PCIR_CAP_PTR;
514 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
517 return; /* no extended capabilities support */
519 nextptr = REG(ptrptr, 1); /* sanity check? */
522 * Read capability entries.
524 while (nextptr != 0) {
527 printf("illegal PCI extended capability offset %d\n",
531 /* Find the next entry */
533 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
535 /* Process this entry */
536 switch (REG(ptr + PCICAP_ID, 1)) {
537 case PCIY_PMG: /* PCI power management */
538 if (cfg->pp.pp_cap == 0) {
539 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
540 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
541 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
542 if ((nextptr - ptr) > PCIR_POWER_DATA)
543 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
546 #if defined(__i386__) || defined(__amd64__)
547 case PCIY_HT: /* HyperTransport */
548 /* Determine HT-specific capability type. */
549 val = REG(ptr + PCIR_HT_COMMAND, 2);
550 switch (val & PCIM_HTCMD_CAP_MASK) {
551 case PCIM_HTCAP_MSI_MAPPING:
552 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
553 /* Sanity check the mapping window. */
554 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
557 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
559 if (addr != MSI_INTEL_ADDR_BASE)
561 "HT Bridge at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
562 cfg->domain, cfg->bus,
563 cfg->slot, cfg->func,
566 addr = MSI_INTEL_ADDR_BASE;
568 cfg->ht.ht_msimap = ptr;
569 cfg->ht.ht_msictrl = val;
570 cfg->ht.ht_msiaddr = addr;
575 case PCIY_MSI: /* PCI MSI */
576 cfg->msi.msi_location = ptr;
577 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
578 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
579 PCIM_MSICTRL_MMC_MASK)>>1);
581 case PCIY_MSIX: /* PCI MSI-X */
582 cfg->msix.msix_location = ptr;
583 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
584 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
585 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
586 val = REG(ptr + PCIR_MSIX_TABLE, 4);
587 cfg->msix.msix_table_bar = PCIR_BAR(val &
589 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
590 val = REG(ptr + PCIR_MSIX_PBA, 4);
591 cfg->msix.msix_pba_bar = PCIR_BAR(val &
593 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
595 case PCIY_VPD: /* PCI Vital Product Data */
596 cfg->vpd.vpd_reg = ptr;
599 /* Should always be true. */
600 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
601 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
602 cfg->subvendor = val & 0xffff;
603 cfg->subdevice = val >> 16;
606 case PCIY_PCIX: /* PCI-X */
608 * Assume we have a PCI-X chipset if we have
609 * at least one PCI-PCI bridge with a PCI-X
610 * capability. Note that some systems with
611 * PCI-express or HT chipsets might match on
612 * this check as well.
614 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
617 case PCIY_EXPRESS: /* PCI-express */
619 * Assume we have a PCI-express chipset if we have
620 * at least one PCI-express device.
628 /* REG and WREG use carry through to next functions */
632 * PCI Vital Product Data
635 #define PCI_VPD_TIMEOUT 1000000
638 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
640 int count = PCI_VPD_TIMEOUT;
642 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
644 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
646 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
649 DELAY(1); /* limit looping */
651 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
658 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
660 int count = PCI_VPD_TIMEOUT;
662 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
664 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
665 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
666 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
669 DELAY(1); /* limit looping */
676 #undef PCI_VPD_TIMEOUT
678 struct vpd_readstate {
688 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
693 if (vrs->bytesinval == 0) {
694 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
696 vrs->val = le32toh(reg);
698 byte = vrs->val & 0xff;
701 vrs->val = vrs->val >> 8;
702 byte = vrs->val & 0xff;
712 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
714 struct vpd_readstate vrs;
719 int alloc, off; /* alloc/off for RO/W arrays */
725 /* init vpd reader */
733 name = remain = i = 0; /* shut up stupid gcc */
734 alloc = off = 0; /* shut up stupid gcc */
735 dflen = 0; /* shut up stupid gcc */
738 if (vpd_nextbyte(&vrs, &byte)) {
743 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
744 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
745 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
748 case 0: /* item name */
750 if (vpd_nextbyte(&vrs, &byte2)) {
755 if (vpd_nextbyte(&vrs, &byte2)) {
759 remain |= byte2 << 8;
760 if (remain > (0x7f*4 - vrs.off)) {
763 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
764 cfg->domain, cfg->bus, cfg->slot,
770 name = (byte >> 3) & 0xf;
773 case 0x2: /* String */
774 cfg->vpd.vpd_ident = malloc(remain + 1,
782 case 0x10: /* VPD-R */
785 cfg->vpd.vpd_ros = malloc(alloc *
786 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
790 case 0x11: /* VPD-W */
793 cfg->vpd.vpd_w = malloc(alloc *
794 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
798 default: /* Invalid data, abort */
804 case 1: /* Identifier String */
805 cfg->vpd.vpd_ident[i++] = byte;
808 cfg->vpd.vpd_ident[i] = '\0';
813 case 2: /* VPD-R Keyword Header */
815 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
816 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
817 M_DEVBUF, M_WAITOK | M_ZERO);
819 cfg->vpd.vpd_ros[off].keyword[0] = byte;
820 if (vpd_nextbyte(&vrs, &byte2)) {
824 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
825 if (vpd_nextbyte(&vrs, &byte2)) {
831 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
834 * if this happens, we can't trust the rest
838 "pci%d:%d:%d:%d: bad keyword length: %d\n",
839 cfg->domain, cfg->bus, cfg->slot,
844 } else if (dflen == 0) {
845 cfg->vpd.vpd_ros[off].value = malloc(1 *
846 sizeof(*cfg->vpd.vpd_ros[off].value),
848 cfg->vpd.vpd_ros[off].value[0] = '\x00';
850 cfg->vpd.vpd_ros[off].value = malloc(
852 sizeof(*cfg->vpd.vpd_ros[off].value),
856 /* keep in sync w/ state 3's transistions */
857 if (dflen == 0 && remain == 0)
865 case 3: /* VPD-R Keyword Value */
866 cfg->vpd.vpd_ros[off].value[i++] = byte;
867 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
868 "RV", 2) == 0 && cksumvalid == -1) {
874 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
875 cfg->domain, cfg->bus,
876 cfg->slot, cfg->func,
885 /* keep in sync w/ state 2's transistions */
887 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
888 if (dflen == 0 && remain == 0) {
889 cfg->vpd.vpd_rocnt = off;
890 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
891 off * sizeof(*cfg->vpd.vpd_ros),
892 M_DEVBUF, M_WAITOK | M_ZERO);
894 } else if (dflen == 0)
904 case 5: /* VPD-W Keyword Header */
906 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
907 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
908 M_DEVBUF, M_WAITOK | M_ZERO);
910 cfg->vpd.vpd_w[off].keyword[0] = byte;
911 if (vpd_nextbyte(&vrs, &byte2)) {
915 cfg->vpd.vpd_w[off].keyword[1] = byte2;
916 if (vpd_nextbyte(&vrs, &byte2)) {
920 cfg->vpd.vpd_w[off].len = dflen = byte2;
921 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
922 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
923 sizeof(*cfg->vpd.vpd_w[off].value),
927 /* keep in sync w/ state 6's transistions */
928 if (dflen == 0 && remain == 0)
936 case 6: /* VPD-W Keyword Value */
937 cfg->vpd.vpd_w[off].value[i++] = byte;
940 /* keep in sync w/ state 5's transistions */
942 cfg->vpd.vpd_w[off++].value[i++] = '\0';
943 if (dflen == 0 && remain == 0) {
944 cfg->vpd.vpd_wcnt = off;
945 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
946 off * sizeof(*cfg->vpd.vpd_w),
947 M_DEVBUF, M_WAITOK | M_ZERO);
949 } else if (dflen == 0)
954 printf("pci%d:%d:%d:%d: invalid state: %d\n",
955 cfg->domain, cfg->bus, cfg->slot, cfg->func,
962 if (cksumvalid == 0 || state < -1) {
963 /* read-only data bad, clean up */
964 if (cfg->vpd.vpd_ros != NULL) {
965 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
966 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
967 free(cfg->vpd.vpd_ros, M_DEVBUF);
968 cfg->vpd.vpd_ros = NULL;
972 /* I/O error, clean up */
973 printf("pci%d:%d:%d:%d: failed to read VPD data.\n",
974 cfg->domain, cfg->bus, cfg->slot, cfg->func);
975 if (cfg->vpd.vpd_ident != NULL) {
976 free(cfg->vpd.vpd_ident, M_DEVBUF);
977 cfg->vpd.vpd_ident = NULL;
979 if (cfg->vpd.vpd_w != NULL) {
980 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
981 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
982 free(cfg->vpd.vpd_w, M_DEVBUF);
983 cfg->vpd.vpd_w = NULL;
986 cfg->vpd.vpd_cached = 1;
992 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
994 struct pci_devinfo *dinfo = device_get_ivars(child);
995 pcicfgregs *cfg = &dinfo->cfg;
997 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
998 pci_read_vpd(device_get_parent(dev), cfg);
1000 *identptr = cfg->vpd.vpd_ident;
1002 if (*identptr == NULL)
1009 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1012 struct pci_devinfo *dinfo = device_get_ivars(child);
1013 pcicfgregs *cfg = &dinfo->cfg;
1016 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1017 pci_read_vpd(device_get_parent(dev), cfg);
1019 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1020 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1021 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1022 *vptr = cfg->vpd.vpd_ros[i].value;
1025 if (i != cfg->vpd.vpd_rocnt)
1033 * Return the offset in configuration space of the requested extended
1034 * capability entry or 0 if the specified capability was not found.
1037 pci_find_extcap_method(device_t dev, device_t child, int capability,
1040 struct pci_devinfo *dinfo = device_get_ivars(child);
1041 pcicfgregs *cfg = &dinfo->cfg;
1046 * Check the CAP_LIST bit of the PCI status register first.
1048 status = pci_read_config(child, PCIR_STATUS, 2);
1049 if (!(status & PCIM_STATUS_CAPPRESENT))
1053 * Determine the start pointer of the capabilities list.
1055 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1061 ptr = PCIR_CAP_PTR_2;
1065 return (ENXIO); /* no extended capabilities support */
1067 ptr = pci_read_config(child, ptr, 1);
1070 * Traverse the capabilities list.
1073 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1078 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1085 * Support for MSI-X message interrupts.
1088 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1090 struct pci_devinfo *dinfo = device_get_ivars(dev);
1091 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1094 KASSERT(msix->msix_table_len > index, ("bogus index"));
1095 offset = msix->msix_table_offset + index * 16;
1096 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1097 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1098 bus_write_4(msix->msix_table_res, offset + 8, data);
1100 /* Enable MSI -> HT mapping. */
1101 pci_ht_map_msi(dev, address);
1105 pci_mask_msix(device_t dev, u_int index)
1107 struct pci_devinfo *dinfo = device_get_ivars(dev);
1108 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1109 uint32_t offset, val;
1111 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1112 offset = msix->msix_table_offset + index * 16 + 12;
1113 val = bus_read_4(msix->msix_table_res, offset);
1114 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1115 val |= PCIM_MSIX_VCTRL_MASK;
1116 bus_write_4(msix->msix_table_res, offset, val);
1121 pci_unmask_msix(device_t dev, u_int index)
1123 struct pci_devinfo *dinfo = device_get_ivars(dev);
1124 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1125 uint32_t offset, val;
1127 KASSERT(msix->msix_table_len > index, ("bogus index"));
1128 offset = msix->msix_table_offset + index * 16 + 12;
1129 val = bus_read_4(msix->msix_table_res, offset);
1130 if (val & PCIM_MSIX_VCTRL_MASK) {
1131 val &= ~PCIM_MSIX_VCTRL_MASK;
1132 bus_write_4(msix->msix_table_res, offset, val);
1137 pci_pending_msix(device_t dev, u_int index)
1139 struct pci_devinfo *dinfo = device_get_ivars(dev);
1140 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1141 uint32_t offset, bit;
1143 KASSERT(msix->msix_table_len > index, ("bogus index"));
1144 offset = msix->msix_pba_offset + (index / 32) * 4;
1145 bit = 1 << index % 32;
1146 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1150 * Restore MSI-X registers and table during resume. If MSI-X is
1151 * enabled then walk the virtual table to restore the actual MSI-X
1155 pci_resume_msix(device_t dev)
1157 struct pci_devinfo *dinfo = device_get_ivars(dev);
1158 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1159 struct msix_table_entry *mte;
1160 struct msix_vector *mv;
1163 if (msix->msix_alloc > 0) {
1164 /* First, mask all vectors. */
1165 for (i = 0; i < msix->msix_msgnum; i++)
1166 pci_mask_msix(dev, i);
1168 /* Second, program any messages with at least one handler. */
1169 for (i = 0; i < msix->msix_table_len; i++) {
1170 mte = &msix->msix_table[i];
1171 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1173 mv = &msix->msix_vectors[mte->mte_vector - 1];
1174 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1175 pci_unmask_msix(dev, i);
1178 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1179 msix->msix_ctrl, 2);
1183 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1184 * returned in *count. After this function returns, each message will be
1185 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1188 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1190 struct pci_devinfo *dinfo = device_get_ivars(child);
1191 pcicfgregs *cfg = &dinfo->cfg;
1192 struct resource_list_entry *rle;
1193 int actual, error, i, irq, max;
1195 /* Don't let count == 0 get us into trouble. */
1199 /* If rid 0 is allocated, then fail. */
1200 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1201 if (rle != NULL && rle->res != NULL)
1204 /* Already have allocated messages? */
1205 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1208 /* If MSI is blacklisted for this system, fail. */
1209 if (pci_msi_blacklisted())
1212 /* MSI-X capability present? */
1213 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1216 /* Make sure the appropriate BARs are mapped. */
1217 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1218 cfg->msix.msix_table_bar);
1219 if (rle == NULL || rle->res == NULL ||
1220 !(rman_get_flags(rle->res) & RF_ACTIVE))
1222 cfg->msix.msix_table_res = rle->res;
1223 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1224 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1225 cfg->msix.msix_pba_bar);
1226 if (rle == NULL || rle->res == NULL ||
1227 !(rman_get_flags(rle->res) & RF_ACTIVE))
1230 cfg->msix.msix_pba_res = rle->res;
1233 device_printf(child,
1234 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1235 *count, cfg->msix.msix_msgnum);
1236 max = min(*count, cfg->msix.msix_msgnum);
1237 for (i = 0; i < max; i++) {
1238 /* Allocate a message. */
1239 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1242 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1248 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1250 device_printf(child, "using IRQ %lu for MSI-X\n",
1256 * Be fancy and try to print contiguous runs of
1257 * IRQ values as ranges. 'irq' is the previous IRQ.
1258 * 'run' is true if we are in a range.
1260 device_printf(child, "using IRQs %lu", rle->start);
1263 for (i = 1; i < actual; i++) {
1264 rle = resource_list_find(&dinfo->resources,
1265 SYS_RES_IRQ, i + 1);
1267 /* Still in a run? */
1268 if (rle->start == irq + 1) {
1274 /* Finish previous range. */
1280 /* Start new range. */
1281 printf(",%lu", rle->start);
1285 /* Unfinished range? */
1288 printf(" for MSI-X\n");
1292 /* Mask all vectors. */
1293 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1294 pci_mask_msix(child, i);
1296 /* Allocate and initialize vector data and virtual table. */
1297 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1298 M_DEVBUF, M_WAITOK | M_ZERO);
1299 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1300 M_DEVBUF, M_WAITOK | M_ZERO);
1301 for (i = 0; i < actual; i++) {
1302 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1303 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1304 cfg->msix.msix_table[i].mte_vector = i + 1;
1307 /* Update control register to enable MSI-X. */
1308 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1309 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1310 cfg->msix.msix_ctrl, 2);
1312 /* Update counts of alloc'd messages. */
1313 cfg->msix.msix_alloc = actual;
1314 cfg->msix.msix_table_len = actual;
1320 * By default, pci_alloc_msix() will assign the allocated IRQ
1321 * resources consecutively to the first N messages in the MSI-X table.
1322 * However, device drivers may want to use different layouts if they
1323 * either receive fewer messages than they asked for, or they wish to
1324 * populate the MSI-X table sparsely. This method allows the driver
1325 * to specify what layout it wants. It must be called after a
1326 * successful pci_alloc_msix() but before any of the associated
1327 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1329 * The 'vectors' array contains 'count' message vectors. The array
1330 * maps directly to the MSI-X table in that index 0 in the array
1331 * specifies the vector for the first message in the MSI-X table, etc.
1332 * The vector value in each array index can either be 0 to indicate
1333 * that no vector should be assigned to a message slot, or it can be a
1334 * number from 1 to N (where N is the count returned from a
1335 * succcessful call to pci_alloc_msix()) to indicate which message
1336 * vector (IRQ) to be used for the corresponding message.
1338 * On successful return, each message with a non-zero vector will have
1339 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1340 * 1. Additionally, if any of the IRQs allocated via the previous
1341 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1342 * will be freed back to the system automatically.
1344 * For example, suppose a driver has a MSI-X table with 6 messages and
1345 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1346 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1347 * C. After the call to pci_alloc_msix(), the device will be setup to
1348 * have an MSI-X table of ABC--- (where - means no vector assigned).
1349 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1350 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1351 * be freed back to the system. This device will also have valid
1352 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1354 * In any case, the SYS_RES_IRQ rid X will always map to the message
1355 * at MSI-X table index X - 1 and will only be valid if a vector is
1356 * assigned to that table entry.
1359 pci_remap_msix_method(device_t dev, device_t child, int count,
1360 const u_int *vectors)
1362 struct pci_devinfo *dinfo = device_get_ivars(child);
1363 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1364 struct resource_list_entry *rle;
1365 int i, irq, j, *used;
1368 * Have to have at least one message in the table but the
1369 * table can't be bigger than the actual MSI-X table in the
1372 if (count == 0 || count > msix->msix_msgnum)
1375 /* Sanity check the vectors. */
1376 for (i = 0; i < count; i++)
1377 if (vectors[i] > msix->msix_alloc)
1381 * Make sure there aren't any holes in the vectors to be used.
1382 * It's a big pain to support it, and it doesn't really make
1383 * sense anyway. Also, at least one vector must be used.
1385 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1387 for (i = 0; i < count; i++)
1388 if (vectors[i] != 0)
1389 used[vectors[i] - 1] = 1;
1390 for (i = 0; i < msix->msix_alloc - 1; i++)
1391 if (used[i] == 0 && used[i + 1] == 1) {
1392 free(used, M_DEVBUF);
1396 free(used, M_DEVBUF);
1400 /* Make sure none of the resources are allocated. */
1401 for (i = 0; i < msix->msix_table_len; i++) {
1402 if (msix->msix_table[i].mte_vector == 0)
1404 if (msix->msix_table[i].mte_handlers > 0)
1406 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1407 KASSERT(rle != NULL, ("missing resource"));
1408 if (rle->res != NULL)
1412 /* Free the existing resource list entries. */
1413 for (i = 0; i < msix->msix_table_len; i++) {
1414 if (msix->msix_table[i].mte_vector == 0)
1416 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1420 * Build the new virtual table keeping track of which vectors are
1423 free(msix->msix_table, M_DEVBUF);
1424 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1425 M_DEVBUF, M_WAITOK | M_ZERO);
1426 for (i = 0; i < count; i++)
1427 msix->msix_table[i].mte_vector = vectors[i];
1428 msix->msix_table_len = count;
1430 /* Free any unused IRQs and resize the vectors array if necessary. */
1431 j = msix->msix_alloc - 1;
1433 struct msix_vector *vec;
1435 while (used[j] == 0) {
1436 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1437 msix->msix_vectors[j].mv_irq);
1440 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1442 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1444 free(msix->msix_vectors, M_DEVBUF);
1445 msix->msix_vectors = vec;
1446 msix->msix_alloc = j + 1;
1448 free(used, M_DEVBUF);
1450 /* Map the IRQs onto the rids. */
1451 for (i = 0; i < count; i++) {
1452 if (vectors[i] == 0)
1454 irq = msix->msix_vectors[vectors[i]].mv_irq;
1455 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1460 device_printf(child, "Remapped MSI-X IRQs as: ");
1461 for (i = 0; i < count; i++) {
1464 if (vectors[i] == 0)
1468 msix->msix_vectors[vectors[i]].mv_irq);
1477 pci_release_msix(device_t dev, device_t child)
1479 struct pci_devinfo *dinfo = device_get_ivars(child);
1480 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1481 struct resource_list_entry *rle;
1484 /* Do we have any messages to release? */
1485 if (msix->msix_alloc == 0)
1488 /* Make sure none of the resources are allocated. */
1489 for (i = 0; i < msix->msix_table_len; i++) {
1490 if (msix->msix_table[i].mte_vector == 0)
1492 if (msix->msix_table[i].mte_handlers > 0)
1494 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1495 KASSERT(rle != NULL, ("missing resource"));
1496 if (rle->res != NULL)
1500 /* Update control register to disable MSI-X. */
1501 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1502 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1503 msix->msix_ctrl, 2);
1505 /* Free the resource list entries. */
1506 for (i = 0; i < msix->msix_table_len; i++) {
1507 if (msix->msix_table[i].mte_vector == 0)
1509 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1511 free(msix->msix_table, M_DEVBUF);
1512 msix->msix_table_len = 0;
1514 /* Release the IRQs. */
1515 for (i = 0; i < msix->msix_alloc; i++)
1516 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1517 msix->msix_vectors[i].mv_irq);
1518 free(msix->msix_vectors, M_DEVBUF);
1519 msix->msix_alloc = 0;
1524 * Return the max supported MSI-X messages this device supports.
1525 * Basically, assuming the MD code can alloc messages, this function
1526 * should return the maximum value that pci_alloc_msix() can return.
1527 * Thus, it is subject to the tunables, etc.
1530 pci_msix_count_method(device_t dev, device_t child)
1532 struct pci_devinfo *dinfo = device_get_ivars(child);
1533 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1535 if (pci_do_msix && msix->msix_location != 0)
1536 return (msix->msix_msgnum);
1541 * HyperTransport MSI mapping control
1544 pci_ht_map_msi(device_t dev, uint64_t addr)
1546 struct pci_devinfo *dinfo = device_get_ivars(dev);
1547 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1552 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1553 ht->ht_msiaddr >> 20 == addr >> 20) {
1554 /* Enable MSI -> HT mapping. */
1555 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1556 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1560 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1561 /* Disable MSI -> HT mapping. */
1562 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1563 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1569 * Support for MSI message signalled interrupts.
1572 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1574 struct pci_devinfo *dinfo = device_get_ivars(dev);
1575 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1577 /* Write data and address values. */
1578 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1579 address & 0xffffffff, 4);
1580 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1581 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1583 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1586 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1589 /* Enable MSI in the control register. */
1590 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1591 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1594 /* Enable MSI -> HT mapping. */
1595 pci_ht_map_msi(dev, address);
1599 pci_disable_msi(device_t dev)
1601 struct pci_devinfo *dinfo = device_get_ivars(dev);
1602 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1604 /* Disable MSI -> HT mapping. */
1605 pci_ht_map_msi(dev, 0);
1607 /* Disable MSI in the control register. */
1608 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1609 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1614 * Restore MSI registers during resume. If MSI is enabled then
1615 * restore the data and address registers in addition to the control
1619 pci_resume_msi(device_t dev)
1621 struct pci_devinfo *dinfo = device_get_ivars(dev);
1622 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1626 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1627 address = msi->msi_addr;
1628 data = msi->msi_data;
1629 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1630 address & 0xffffffff, 4);
1631 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1632 pci_write_config(dev, msi->msi_location +
1633 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1634 pci_write_config(dev, msi->msi_location +
1635 PCIR_MSI_DATA_64BIT, data, 2);
1637 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1640 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1645 pci_remap_msi_irq(device_t dev, u_int irq)
1647 struct pci_devinfo *dinfo = device_get_ivars(dev);
1648 pcicfgregs *cfg = &dinfo->cfg;
1649 struct resource_list_entry *rle;
1650 struct msix_table_entry *mte;
1651 struct msix_vector *mv;
1657 bus = device_get_parent(dev);
1660 * Handle MSI first. We try to find this IRQ among our list
1661 * of MSI IRQs. If we find it, we request updated address and
1662 * data registers and apply the results.
1664 if (cfg->msi.msi_alloc > 0) {
1666 /* If we don't have any active handlers, nothing to do. */
1667 if (cfg->msi.msi_handlers == 0)
1669 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1670 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1672 if (rle->start == irq) {
1673 error = PCIB_MAP_MSI(device_get_parent(bus),
1674 dev, irq, &addr, &data);
1677 pci_disable_msi(dev);
1678 dinfo->cfg.msi.msi_addr = addr;
1679 dinfo->cfg.msi.msi_data = data;
1680 pci_enable_msi(dev, addr, data);
1688 * For MSI-X, we check to see if we have this IRQ. If we do,
1689 * we request the updated mapping info. If that works, we go
1690 * through all the slots that use this IRQ and update them.
1692 if (cfg->msix.msix_alloc > 0) {
1693 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1694 mv = &cfg->msix.msix_vectors[i];
1695 if (mv->mv_irq == irq) {
1696 error = PCIB_MAP_MSI(device_get_parent(bus),
1697 dev, irq, &addr, &data);
1700 mv->mv_address = addr;
1702 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1703 mte = &cfg->msix.msix_table[j];
1704 if (mte->mte_vector != i + 1)
1706 if (mte->mte_handlers == 0)
1708 pci_mask_msix(dev, j);
1709 pci_enable_msix(dev, j, addr, data);
1710 pci_unmask_msix(dev, j);
1721 * Returns true if the specified device is blacklisted because MSI
1725 pci_msi_device_blacklisted(device_t dev)
1727 struct pci_quirk *q;
1729 if (!pci_honor_msi_blacklist)
1732 for (q = &pci_quirks[0]; q->devid; q++) {
1733 if (q->devid == pci_get_devid(dev) &&
1734 q->type == PCI_QUIRK_DISABLE_MSI)
1741 * Determine if MSI is blacklisted globally on this sytem. Currently,
1742 * we just check for blacklisted chipsets as represented by the
1743 * host-PCI bridge at device 0:0:0. In the future, it may become
1744 * necessary to check other system attributes, such as the kenv values
1745 * that give the motherboard manufacturer and model number.
1748 pci_msi_blacklisted(void)
1752 if (!pci_honor_msi_blacklist)
1755 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1756 if (!(pcie_chipset || pcix_chipset))
1759 dev = pci_find_bsf(0, 0, 0);
1761 return (pci_msi_device_blacklisted(dev));
1766 * Attempt to allocate *count MSI messages. The actual number allocated is
1767 * returned in *count. After this function returns, each message will be
1768 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1771 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1773 struct pci_devinfo *dinfo = device_get_ivars(child);
1774 pcicfgregs *cfg = &dinfo->cfg;
1775 struct resource_list_entry *rle;
1776 int actual, error, i, irqs[32];
1779 /* Don't let count == 0 get us into trouble. */
1783 /* If rid 0 is allocated, then fail. */
1784 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1785 if (rle != NULL && rle->res != NULL)
1788 /* Already have allocated messages? */
1789 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1792 /* If MSI is blacklisted for this system, fail. */
1793 if (pci_msi_blacklisted())
1796 /* MSI capability present? */
1797 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1801 device_printf(child,
1802 "attempting to allocate %d MSI vectors (%d supported)\n",
1803 *count, cfg->msi.msi_msgnum);
1805 /* Don't ask for more than the device supports. */
1806 actual = min(*count, cfg->msi.msi_msgnum);
1808 /* Don't ask for more than 32 messages. */
1809 actual = min(actual, 32);
1811 /* MSI requires power of 2 number of messages. */
1812 if (!powerof2(actual))
1816 /* Try to allocate N messages. */
1817 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
1818 cfg->msi.msi_msgnum, irqs);
1829 * We now have N actual messages mapped onto SYS_RES_IRQ
1830 * resources in the irqs[] array, so add new resources
1831 * starting at rid 1.
1833 for (i = 0; i < actual; i++)
1834 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1835 irqs[i], irqs[i], 1);
1839 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
1844 * Be fancy and try to print contiguous runs
1845 * of IRQ values as ranges. 'run' is true if
1846 * we are in a range.
1848 device_printf(child, "using IRQs %d", irqs[0]);
1850 for (i = 1; i < actual; i++) {
1852 /* Still in a run? */
1853 if (irqs[i] == irqs[i - 1] + 1) {
1858 /* Finish previous range. */
1860 printf("-%d", irqs[i - 1]);
1864 /* Start new range. */
1865 printf(",%d", irqs[i]);
1868 /* Unfinished range? */
1870 printf("-%d", irqs[actual - 1]);
1871 printf(" for MSI\n");
1875 /* Update control register with actual count. */
1876 ctrl = cfg->msi.msi_ctrl;
1877 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1878 ctrl |= (ffs(actual) - 1) << 4;
1879 cfg->msi.msi_ctrl = ctrl;
1880 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1882 /* Update counts of alloc'd messages. */
1883 cfg->msi.msi_alloc = actual;
1884 cfg->msi.msi_handlers = 0;
1889 /* Release the MSI messages associated with this device. */
1891 pci_release_msi_method(device_t dev, device_t child)
1893 struct pci_devinfo *dinfo = device_get_ivars(child);
1894 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1895 struct resource_list_entry *rle;
1896 int error, i, irqs[32];
1898 /* Try MSI-X first. */
1899 error = pci_release_msix(dev, child);
1900 if (error != ENODEV)
1903 /* Do we have any messages to release? */
1904 if (msi->msi_alloc == 0)
1906 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
1908 /* Make sure none of the resources are allocated. */
1909 if (msi->msi_handlers > 0)
1911 for (i = 0; i < msi->msi_alloc; i++) {
1912 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1913 KASSERT(rle != NULL, ("missing MSI resource"));
1914 if (rle->res != NULL)
1916 irqs[i] = rle->start;
1919 /* Update control register with 0 count. */
1920 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
1921 ("%s: MSI still enabled", __func__));
1922 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
1923 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1926 /* Release the messages. */
1927 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
1928 for (i = 0; i < msi->msi_alloc; i++)
1929 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1931 /* Update alloc count. */
1939 * Return the max supported MSI messages this device supports.
1940 * Basically, assuming the MD code can alloc messages, this function
1941 * should return the maximum value that pci_alloc_msi() can return.
1942 * Thus, it is subject to the tunables, etc.
1945 pci_msi_count_method(device_t dev, device_t child)
1947 struct pci_devinfo *dinfo = device_get_ivars(child);
1948 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1950 if (pci_do_msi && msi->msi_location != 0)
1951 return (msi->msi_msgnum);
1955 /* free pcicfgregs structure and all depending data structures */
1958 pci_freecfg(struct pci_devinfo *dinfo)
1960 struct devlist *devlist_head;
1963 devlist_head = &pci_devq;
1965 if (dinfo->cfg.vpd.vpd_reg) {
1966 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
1967 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
1968 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
1969 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
1970 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
1971 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
1972 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
1974 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
1975 free(dinfo, M_DEVBUF);
1977 /* increment the generation count */
1980 /* we're losing one device */
1986 * PCI power manangement
1989 pci_set_powerstate_method(device_t dev, device_t child, int state)
1991 struct pci_devinfo *dinfo = device_get_ivars(child);
1992 pcicfgregs *cfg = &dinfo->cfg;
1994 int result, oldstate, highest, delay;
1996 if (cfg->pp.pp_cap == 0)
1997 return (EOPNOTSUPP);
2000 * Optimize a no state change request away. While it would be OK to
2001 * write to the hardware in theory, some devices have shown odd
2002 * behavior when going from D3 -> D3.
2004 oldstate = pci_get_powerstate(child);
2005 if (oldstate == state)
2009 * The PCI power management specification states that after a state
2010 * transition between PCI power states, system software must
2011 * guarantee a minimal delay before the function accesses the device.
2012 * Compute the worst case delay that we need to guarantee before we
2013 * access the device. Many devices will be responsive much more
2014 * quickly than this delay, but there are some that don't respond
2015 * instantly to state changes. Transitions to/from D3 state require
2016 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2017 * is done below with DELAY rather than a sleeper function because
2018 * this function can be called from contexts where we cannot sleep.
2020 highest = (oldstate > state) ? oldstate : state;
2021 if (highest == PCI_POWERSTATE_D3)
2023 else if (highest == PCI_POWERSTATE_D2)
2027 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2028 & ~PCIM_PSTAT_DMASK;
2031 case PCI_POWERSTATE_D0:
2032 status |= PCIM_PSTAT_D0;
2034 case PCI_POWERSTATE_D1:
2035 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2036 return (EOPNOTSUPP);
2037 status |= PCIM_PSTAT_D1;
2039 case PCI_POWERSTATE_D2:
2040 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2041 return (EOPNOTSUPP);
2042 status |= PCIM_PSTAT_D2;
2044 case PCI_POWERSTATE_D3:
2045 status |= PCIM_PSTAT_D3;
2053 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
2054 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2055 dinfo->cfg.func, oldstate, state);
2057 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2064 pci_get_powerstate_method(device_t dev, device_t child)
2066 struct pci_devinfo *dinfo = device_get_ivars(child);
2067 pcicfgregs *cfg = &dinfo->cfg;
2071 if (cfg->pp.pp_cap != 0) {
2072 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2073 switch (status & PCIM_PSTAT_DMASK) {
2075 result = PCI_POWERSTATE_D0;
2078 result = PCI_POWERSTATE_D1;
2081 result = PCI_POWERSTATE_D2;
2084 result = PCI_POWERSTATE_D3;
2087 result = PCI_POWERSTATE_UNKNOWN;
2091 /* No support, device is always at D0 */
2092 result = PCI_POWERSTATE_D0;
2098 * Some convenience functions for PCI device drivers.
2101 static __inline void
2102 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2106 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2108 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2111 static __inline void
2112 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2116 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2118 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2122 pci_enable_busmaster_method(device_t dev, device_t child)
2124 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2129 pci_disable_busmaster_method(device_t dev, device_t child)
2131 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2136 pci_enable_io_method(device_t dev, device_t child, int space)
2146 case SYS_RES_IOPORT:
2147 bit = PCIM_CMD_PORTEN;
2150 case SYS_RES_MEMORY:
2151 bit = PCIM_CMD_MEMEN;
2157 pci_set_command_bit(dev, child, bit);
2158 /* Some devices seem to need a brief stall here, what do to? */
2159 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2162 device_printf(child, "failed to enable %s mapping!\n", error);
2167 pci_disable_io_method(device_t dev, device_t child, int space)
2177 case SYS_RES_IOPORT:
2178 bit = PCIM_CMD_PORTEN;
2181 case SYS_RES_MEMORY:
2182 bit = PCIM_CMD_MEMEN;
2188 pci_clear_command_bit(dev, child, bit);
2189 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2190 if (command & bit) {
2191 device_printf(child, "failed to disable %s mapping!\n", error);
2198 * New style pci driver. Parent device is either a pci-host-bridge or a
2199 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2203 pci_print_verbose(struct pci_devinfo *dinfo)
2207 pcicfgregs *cfg = &dinfo->cfg;
2209 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2210 cfg->vendor, cfg->device, cfg->revid);
2211 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2212 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2213 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2214 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2216 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2217 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2218 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2219 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2220 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2221 if (cfg->intpin > 0)
2222 printf("\tintpin=%c, irq=%d\n",
2223 cfg->intpin +'a' -1, cfg->intline);
2224 if (cfg->pp.pp_cap) {
2227 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2228 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2229 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2230 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2231 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2232 status & PCIM_PSTAT_DMASK);
2234 if (cfg->msi.msi_location) {
2237 ctrl = cfg->msi.msi_ctrl;
2238 printf("\tMSI supports %d message%s%s%s\n",
2239 cfg->msi.msi_msgnum,
2240 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2241 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2242 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2244 if (cfg->msix.msix_location) {
2245 printf("\tMSI-X supports %d message%s ",
2246 cfg->msix.msix_msgnum,
2247 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2248 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2249 printf("in map 0x%x\n",
2250 cfg->msix.msix_table_bar);
2252 printf("in maps 0x%x and 0x%x\n",
2253 cfg->msix.msix_table_bar,
2254 cfg->msix.msix_pba_bar);
2260 pci_porten(device_t pcib, int b, int s, int f)
2262 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2263 & PCIM_CMD_PORTEN) != 0;
2267 pci_memen(device_t pcib, int b, int s, int f)
2269 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2270 & PCIM_CMD_MEMEN) != 0;
2274 * Add a resource based on a pci map register. Return 1 if the map
2275 * register is a 32bit map register or 2 if it is a 64bit register.
2278 pci_add_map(device_t pcib, device_t bus, device_t dev,
2279 int b, int s, int f, int reg, struct resource_list *rl, int force,
2284 pci_addr_t start, end, count;
2291 struct resource *res;
2293 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2294 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2295 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2296 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
2298 if (PCI_BAR_MEM(map)) {
2299 type = SYS_RES_MEMORY;
2300 if (map & PCIM_BAR_MEM_PREFETCH)
2303 type = SYS_RES_IOPORT;
2304 ln2size = pci_mapsize(testval);
2305 ln2range = pci_maprange(testval);
2306 base = pci_mapbase(map);
2307 barlen = ln2range == 64 ? 2 : 1;
2310 * For I/O registers, if bottom bit is set, and the next bit up
2311 * isn't clear, we know we have a BAR that doesn't conform to the
2312 * spec, so ignore it. Also, sanity check the size of the data
2313 * areas to the type of memory involved. Memory must be at least
2314 * 16 bytes in size, while I/O ranges must be at least 4.
2316 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2318 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2319 (type == SYS_RES_IOPORT && ln2size < 2))
2323 /* Read the other half of a 64bit map register */
2324 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2326 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2327 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2328 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2329 printf(", port disabled\n");
2330 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2331 printf(", memory disabled\n");
2333 printf(", enabled\n");
2337 * If base is 0, then we have problems. It is best to ignore
2338 * such entries for the moment. These will be allocated later if
2339 * the driver specifically requests them. However, some
2340 * removable busses look better when all resources are allocated,
2341 * so allow '0' to be overriden.
2343 * Similarly treat maps whose values is the same as the test value
2344 * read back. These maps have had all f's written to them by the
2345 * BIOS in an attempt to disable the resources.
2347 if (!force && (base == 0 || map == testval))
2349 if ((u_long)base != base) {
2351 "pci%d:%d:%d:%d bar %#x too many address bits",
2352 pci_get_domain(dev), b, s, f, reg);
2357 * This code theoretically does the right thing, but has
2358 * undesirable side effects in some cases where peripherals
2359 * respond oddly to having these bits enabled. Let the user
2360 * be able to turn them off (since pci_enable_io_modes is 1 by
2363 if (pci_enable_io_modes) {
2364 /* Turn on resources that have been left off by a lazy BIOS */
2365 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2366 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2367 cmd |= PCIM_CMD_PORTEN;
2368 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2370 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2371 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2372 cmd |= PCIM_CMD_MEMEN;
2373 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2376 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2378 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2382 count = 1 << ln2size;
2383 if (base == 0 || base == pci_mapbase(testval)) {
2384 start = 0; /* Let the parent decide. */
2388 end = base + (1 << ln2size) - 1;
2390 resource_list_add(rl, type, reg, start, end, count);
2393 * Try to allocate the resource for this BAR from our parent
2394 * so that this resource range is already reserved. The
2395 * driver for this device will later inherit this resource in
2396 * pci_alloc_resource().
2398 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2399 prefetch ? RF_PREFETCHABLE : 0);
2402 * If the allocation fails, clear the BAR and delete
2403 * the resource list entry to force
2404 * pci_alloc_resource() to allocate resources from the
2407 resource_list_delete(rl, type, reg);
2410 start = rman_get_start(res);
2411 pci_write_config(dev, reg, start, 4);
2413 pci_write_config(dev, reg + 4, start >> 32, 4);
2418 * For ATA devices we need to decide early what addressing mode to use.
2419 * Legacy demands that the primary and secondary ATA ports sits on the
2420 * same addresses that old ISA hardware did. This dictates that we use
2421 * those addresses and ignore the BAR's if we cannot set PCI native
2425 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2426 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
2428 int rid, type, progif;
2430 /* if this device supports PCI native addressing use it */
2431 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2432 if ((progif & 0x8a) == 0x8a) {
2433 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2434 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2435 printf("Trying ATA native PCI addressing mode\n");
2436 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2440 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2441 type = SYS_RES_IOPORT;
2442 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2443 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2444 prefetchmask & (1 << 0));
2445 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2446 prefetchmask & (1 << 1));
2449 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2450 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
2453 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2454 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
2457 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2458 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2459 prefetchmask & (1 << 2));
2460 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2461 prefetchmask & (1 << 3));
2464 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2465 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
2468 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2469 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
2472 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2473 prefetchmask & (1 << 4));
2474 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2475 prefetchmask & (1 << 5));
2479 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2481 struct pci_devinfo *dinfo = device_get_ivars(dev);
2482 pcicfgregs *cfg = &dinfo->cfg;
2483 char tunable_name[64];
2486 /* Has to have an intpin to have an interrupt. */
2487 if (cfg->intpin == 0)
2490 /* Let the user override the IRQ with a tunable. */
2491 irq = PCI_INVALID_IRQ;
2492 snprintf(tunable_name, sizeof(tunable_name),
2493 "hw.pci%d.%d.%d.INT%c.irq",
2494 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2495 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2496 irq = PCI_INVALID_IRQ;
2499 * If we didn't get an IRQ via the tunable, then we either use the
2500 * IRQ value in the intline register or we ask the bus to route an
2501 * interrupt for us. If force_route is true, then we only use the
2502 * value in the intline register if the bus was unable to assign an
2505 if (!PCI_INTERRUPT_VALID(irq)) {
2506 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2507 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2508 if (!PCI_INTERRUPT_VALID(irq))
2512 /* If after all that we don't have an IRQ, just bail. */
2513 if (!PCI_INTERRUPT_VALID(irq))
2516 /* Update the config register if it changed. */
2517 if (irq != cfg->intline) {
2519 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2522 /* Add this IRQ as rid 0 interrupt resource. */
2523 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2527 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
2530 struct pci_devinfo *dinfo = device_get_ivars(dev);
2531 pcicfgregs *cfg = &dinfo->cfg;
2532 struct resource_list *rl = &dinfo->resources;
2533 struct pci_quirk *q;
2536 pcib = device_get_parent(bus);
2542 /* ATA devices needs special map treatment */
2543 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2544 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
2545 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2546 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2547 !pci_read_config(dev, PCIR_BAR(2), 4))) )
2548 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
2550 for (i = 0; i < cfg->nummaps;)
2551 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2552 rl, force, prefetchmask & (1 << i));
2555 * Add additional, quirked resources.
2557 for (q = &pci_quirks[0]; q->devid; q++) {
2558 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2559 && q->type == PCI_QUIRK_MAP_REG)
2560 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2564 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2565 #ifdef __PCI_REROUTE_INTERRUPT
2567 * Try to re-route interrupts. Sometimes the BIOS or
2568 * firmware may leave bogus values in these registers.
2569 * If the re-route fails, then just stick with what we
2572 pci_assign_interrupt(bus, dev, 1);
2574 pci_assign_interrupt(bus, dev, 0);
2580 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
2582 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2583 device_t pcib = device_get_parent(dev);
2584 struct pci_devinfo *dinfo;
2586 int s, f, pcifunchigh;
2589 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2590 ("dinfo_size too small"));
2591 maxslots = PCIB_MAXSLOTS(pcib);
2592 for (s = 0; s <= maxslots; s++) {
2596 hdrtype = REG(PCIR_HDRTYPE, 1);
2597 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2599 if (hdrtype & PCIM_MFDEV)
2600 pcifunchigh = PCI_FUNCMAX;
2601 for (f = 0; f <= pcifunchigh; f++) {
2602 dinfo = pci_read_device(pcib, domain, busno, s, f,
2604 if (dinfo != NULL) {
2605 pci_add_child(dev, dinfo);
2613 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2615 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2616 device_set_ivars(dinfo->cfg.dev, dinfo);
2617 resource_list_init(&dinfo->resources);
2618 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2619 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2620 pci_print_verbose(dinfo);
2621 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
2625 pci_probe(device_t dev)
2628 device_set_desc(dev, "PCI bus");
2630 /* Allow other subclasses to override this driver. */
2635 pci_attach(device_t dev)
2640 * Since there can be multiple independantly numbered PCI
2641 * busses on systems with multiple PCI domains, we can't use
2642 * the unit number to decide which bus we are probing. We ask
2643 * the parent pcib what our domain and bus numbers are.
2645 domain = pcib_get_domain(dev);
2646 busno = pcib_get_bus(dev);
2648 device_printf(dev, "domain=%d, physical bus=%d\n",
2651 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
2653 return (bus_generic_attach(dev));
2657 pci_suspend(device_t dev)
2659 int dstate, error, i, numdevs;
2660 device_t acpi_dev, child, *devlist;
2661 struct pci_devinfo *dinfo;
2664 * Save the PCI configuration space for each child and set the
2665 * device in the appropriate power state for this sleep state.
2668 if (pci_do_power_resume)
2669 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2670 device_get_children(dev, &devlist, &numdevs);
2671 for (i = 0; i < numdevs; i++) {
2673 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2674 pci_cfg_save(child, dinfo, 0);
2677 /* Suspend devices before potentially powering them down. */
2678 error = bus_generic_suspend(dev);
2680 free(devlist, M_TEMP);
2685 * Always set the device to D3. If ACPI suggests a different
2686 * power state, use it instead. If ACPI is not present, the
2687 * firmware is responsible for managing device power. Skip
2688 * children who aren't attached since they are powered down
2689 * separately. Only manage type 0 devices for now.
2691 for (i = 0; acpi_dev && i < numdevs; i++) {
2693 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2694 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2695 dstate = PCI_POWERSTATE_D3;
2696 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2697 pci_set_powerstate(child, dstate);
2700 free(devlist, M_TEMP);
2705 pci_resume(device_t dev)
2708 device_t acpi_dev, child, *devlist;
2709 struct pci_devinfo *dinfo;
2712 * Set each child to D0 and restore its PCI configuration space.
2715 if (pci_do_power_resume)
2716 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2717 device_get_children(dev, &devlist, &numdevs);
2718 for (i = 0; i < numdevs; i++) {
2720 * Notify ACPI we're going to D0 but ignore the result. If
2721 * ACPI is not present, the firmware is responsible for
2722 * managing device power. Only manage type 0 devices for now.
2725 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2726 if (acpi_dev && device_is_attached(child) &&
2727 dinfo->cfg.hdrtype == 0) {
2728 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2729 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2732 /* Now the device is powered up, restore its config space. */
2733 pci_cfg_restore(child, dinfo);
2735 free(devlist, M_TEMP);
2736 return (bus_generic_resume(dev));
2740 pci_load_vendor_data(void)
2742 caddr_t vendordata, info;
2744 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2745 info = preload_search_info(vendordata, MODINFO_ADDR);
2746 pci_vendordata = *(char **)info;
2747 info = preload_search_info(vendordata, MODINFO_SIZE);
2748 pci_vendordata_size = *(size_t *)info;
2749 /* terminate the database */
2750 pci_vendordata[pci_vendordata_size] = '\n';
2755 pci_driver_added(device_t dev, driver_t *driver)
2760 struct pci_devinfo *dinfo;
2764 device_printf(dev, "driver added\n");
2765 DEVICE_IDENTIFY(driver, dev);
2766 device_get_children(dev, &devlist, &numdevs);
2767 for (i = 0; i < numdevs; i++) {
2769 if (device_get_state(child) != DS_NOTPRESENT)
2771 dinfo = device_get_ivars(child);
2772 pci_print_verbose(dinfo);
2774 printf("pci%d:%d:%d:%d: reprobing on driver added\n",
2775 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2777 pci_cfg_restore(child, dinfo);
2778 if (device_probe_and_attach(child) != 0)
2779 pci_cfg_save(child, dinfo, 1);
2781 free(devlist, M_TEMP);
2785 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2786 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
2788 struct pci_devinfo *dinfo;
2789 struct msix_table_entry *mte;
2790 struct msix_vector *mv;
2796 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
2801 /* If this is not a direct child, just bail out. */
2802 if (device_get_parent(child) != dev) {
2807 rid = rman_get_rid(irq);
2809 /* Make sure that INTx is enabled */
2810 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
2813 * Check to see if the interrupt is MSI or MSI-X.
2814 * Ask our parent to map the MSI and give
2815 * us the address and data register values.
2816 * If we fail for some reason, teardown the
2817 * interrupt handler.
2819 dinfo = device_get_ivars(child);
2820 if (dinfo->cfg.msi.msi_alloc > 0) {
2821 if (dinfo->cfg.msi.msi_addr == 0) {
2822 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
2823 ("MSI has handlers, but vectors not mapped"));
2824 error = PCIB_MAP_MSI(device_get_parent(dev),
2825 child, rman_get_start(irq), &addr, &data);
2828 dinfo->cfg.msi.msi_addr = addr;
2829 dinfo->cfg.msi.msi_data = data;
2830 pci_enable_msi(child, addr, data);
2832 dinfo->cfg.msi.msi_handlers++;
2834 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2835 ("No MSI or MSI-X interrupts allocated"));
2836 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2837 ("MSI-X index too high"));
2838 mte = &dinfo->cfg.msix.msix_table[rid - 1];
2839 KASSERT(mte->mte_vector != 0, ("no message vector"));
2840 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
2841 KASSERT(mv->mv_irq == rman_get_start(irq),
2843 if (mv->mv_address == 0) {
2844 KASSERT(mte->mte_handlers == 0,
2845 ("MSI-X table entry has handlers, but vector not mapped"));
2846 error = PCIB_MAP_MSI(device_get_parent(dev),
2847 child, rman_get_start(irq), &addr, &data);
2850 mv->mv_address = addr;
2853 if (mte->mte_handlers == 0) {
2854 pci_enable_msix(child, rid - 1, mv->mv_address,
2856 pci_unmask_msix(child, rid - 1);
2858 mte->mte_handlers++;
2861 /* Make sure that INTx is disabled if we are using MSI/MSIX */
2862 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
2865 (void)bus_generic_teardown_intr(dev, child, irq,
2875 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
2878 struct msix_table_entry *mte;
2879 struct resource_list_entry *rle;
2880 struct pci_devinfo *dinfo;
2883 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
2886 /* If this isn't a direct child, just bail out */
2887 if (device_get_parent(child) != dev)
2888 return(bus_generic_teardown_intr(dev, child, irq, cookie));
2890 rid = rman_get_rid(irq);
2893 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
2896 * Check to see if the interrupt is MSI or MSI-X. If so,
2897 * decrement the appropriate handlers count and mask the
2898 * MSI-X message, or disable MSI messages if the count
2901 dinfo = device_get_ivars(child);
2902 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
2903 if (rle->res != irq)
2905 if (dinfo->cfg.msi.msi_alloc > 0) {
2906 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
2907 ("MSI-X index too high"));
2908 if (dinfo->cfg.msi.msi_handlers == 0)
2910 dinfo->cfg.msi.msi_handlers--;
2911 if (dinfo->cfg.msi.msi_handlers == 0)
2912 pci_disable_msi(child);
2914 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2915 ("No MSI or MSI-X interrupts allocated"));
2916 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2917 ("MSI-X index too high"));
2918 mte = &dinfo->cfg.msix.msix_table[rid - 1];
2919 if (mte->mte_handlers == 0)
2921 mte->mte_handlers--;
2922 if (mte->mte_handlers == 0)
2923 pci_mask_msix(child, rid - 1);
2926 error = bus_generic_teardown_intr(dev, child, irq, cookie);
2929 ("%s: generic teardown failed for MSI/MSI-X", __func__));
2934 pci_print_child(device_t dev, device_t child)
2936 struct pci_devinfo *dinfo;
2937 struct resource_list *rl;
2940 dinfo = device_get_ivars(child);
2941 rl = &dinfo->resources;
2943 retval += bus_print_child_header(dev, child);
2945 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
2946 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
2947 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
2948 if (device_get_flags(dev))
2949 retval += printf(" flags %#x", device_get_flags(dev));
2951 retval += printf(" at device %d.%d", pci_get_slot(child),
2952 pci_get_function(child));
2954 retval += bus_print_child_footer(dev, child);
2964 } pci_nomatch_tab[] = {
2965 {PCIC_OLD, -1, "old"},
2966 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
2967 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
2968 {PCIC_STORAGE, -1, "mass storage"},
2969 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
2970 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
2971 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
2972 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
2973 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
2974 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
2975 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
2976 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
2977 {PCIC_NETWORK, -1, "network"},
2978 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
2979 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
2980 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
2981 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
2982 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
2983 {PCIC_DISPLAY, -1, "display"},
2984 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
2985 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
2986 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
2987 {PCIC_MULTIMEDIA, -1, "multimedia"},
2988 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
2989 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
2990 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
2991 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
2992 {PCIC_MEMORY, -1, "memory"},
2993 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
2994 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
2995 {PCIC_BRIDGE, -1, "bridge"},
2996 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
2997 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
2998 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
2999 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3000 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3001 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3002 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3003 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3004 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3005 {PCIC_SIMPLECOMM, -1, "simple comms"},
3006 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3007 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3008 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3009 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3010 {PCIC_BASEPERIPH, -1, "base peripheral"},
3011 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3012 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3013 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3014 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3015 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3016 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3017 {PCIC_INPUTDEV, -1, "input device"},
3018 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3019 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3020 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3021 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3022 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3023 {PCIC_DOCKING, -1, "docking station"},
3024 {PCIC_PROCESSOR, -1, "processor"},
3025 {PCIC_SERIALBUS, -1, "serial bus"},
3026 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3027 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3028 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3029 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3030 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3031 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3032 {PCIC_WIRELESS, -1, "wireless controller"},
3033 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3034 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3035 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3036 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3037 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3038 {PCIC_SATCOM, -1, "satellite communication"},
3039 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3040 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3041 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3042 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3043 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3044 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3045 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3046 {PCIC_DASP, -1, "dasp"},
3047 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3052 pci_probe_nomatch(device_t dev, device_t child)
3055 char *cp, *scp, *device;
3058 * Look for a listing for this device in a loaded device database.
3060 if ((device = pci_describe_device(child)) != NULL) {
3061 device_printf(dev, "<%s>", device);
3062 free(device, M_DEVBUF);
3065 * Scan the class/subclass descriptions for a general
3070 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3071 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3072 if (pci_nomatch_tab[i].subclass == -1) {
3073 cp = pci_nomatch_tab[i].desc;
3074 } else if (pci_nomatch_tab[i].subclass ==
3075 pci_get_subclass(child)) {
3076 scp = pci_nomatch_tab[i].desc;
3080 device_printf(dev, "<%s%s%s>",
3082 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3085 printf(" at device %d.%d (no driver attached)\n",
3086 pci_get_slot(child), pci_get_function(child));
3087 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
3092 * Parse the PCI device database, if loaded, and return a pointer to a
3093 * description of the device.
3095 * The database is flat text formatted as follows:
3097 * Any line not in a valid format is ignored.
3098 * Lines are terminated with newline '\n' characters.
3100 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3103 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3104 * - devices cannot be listed without a corresponding VENDOR line.
3105 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3106 * another TAB, then the device name.
3110 * Assuming (ptr) points to the beginning of a line in the database,
3111 * return the vendor or device and description of the next entry.
3112 * The value of (vendor) or (device) inappropriate for the entry type
3113 * is set to -1. Returns nonzero at the end of the database.
3115 * Note that this is slightly unrobust in the face of corrupt data;
3116 * we attempt to safeguard against this by spamming the end of the
3117 * database with a newline when we initialise.
3120 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3129 left = pci_vendordata_size - (cp - pci_vendordata);
3137 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3141 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3144 /* skip to next line */
3145 while (*cp != '\n' && left > 0) {
3154 /* skip to next line */
3155 while (*cp != '\n' && left > 0) {
3159 if (*cp == '\n' && left > 0)
3166 pci_describe_device(device_t dev)
3169 char *desc, *vp, *dp, *line;
3171 desc = vp = dp = NULL;
3174 * If we have no vendor data, we can't do anything.
3176 if (pci_vendordata == NULL)
3180 * Scan the vendor data looking for this device
3182 line = pci_vendordata;
3183 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3186 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3188 if (vendor == pci_get_vendor(dev))
3191 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3194 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3202 if (device == pci_get_device(dev))
3206 snprintf(dp, 80, "0x%x", pci_get_device(dev));
3207 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3209 sprintf(desc, "%s, %s", vp, dp);
3219 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3221 struct pci_devinfo *dinfo;
3224 dinfo = device_get_ivars(child);
3228 case PCI_IVAR_ETHADDR:
3230 * The generic accessor doesn't deal with failure, so
3231 * we set the return value, then return an error.
3233 *((uint8_t **) result) = NULL;
3235 case PCI_IVAR_SUBVENDOR:
3236 *result = cfg->subvendor;
3238 case PCI_IVAR_SUBDEVICE:
3239 *result = cfg->subdevice;
3241 case PCI_IVAR_VENDOR:
3242 *result = cfg->vendor;
3244 case PCI_IVAR_DEVICE:
3245 *result = cfg->device;
3247 case PCI_IVAR_DEVID:
3248 *result = (cfg->device << 16) | cfg->vendor;
3250 case PCI_IVAR_CLASS:
3251 *result = cfg->baseclass;
3253 case PCI_IVAR_SUBCLASS:
3254 *result = cfg->subclass;
3256 case PCI_IVAR_PROGIF:
3257 *result = cfg->progif;
3259 case PCI_IVAR_REVID:
3260 *result = cfg->revid;
3262 case PCI_IVAR_INTPIN:
3263 *result = cfg->intpin;
3266 *result = cfg->intline;
3268 case PCI_IVAR_DOMAIN:
3269 *result = cfg->domain;
3275 *result = cfg->slot;
3277 case PCI_IVAR_FUNCTION:
3278 *result = cfg->func;
3280 case PCI_IVAR_CMDREG:
3281 *result = cfg->cmdreg;
3283 case PCI_IVAR_CACHELNSZ:
3284 *result = cfg->cachelnsz;
3286 case PCI_IVAR_MINGNT:
3287 *result = cfg->mingnt;
3289 case PCI_IVAR_MAXLAT:
3290 *result = cfg->maxlat;
3292 case PCI_IVAR_LATTIMER:
3293 *result = cfg->lattimer;
3302 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3304 struct pci_devinfo *dinfo;
3306 dinfo = device_get_ivars(child);
3309 case PCI_IVAR_INTPIN:
3310 dinfo->cfg.intpin = value;
3312 case PCI_IVAR_ETHADDR:
3313 case PCI_IVAR_SUBVENDOR:
3314 case PCI_IVAR_SUBDEVICE:
3315 case PCI_IVAR_VENDOR:
3316 case PCI_IVAR_DEVICE:
3317 case PCI_IVAR_DEVID:
3318 case PCI_IVAR_CLASS:
3319 case PCI_IVAR_SUBCLASS:
3320 case PCI_IVAR_PROGIF:
3321 case PCI_IVAR_REVID:
3323 case PCI_IVAR_DOMAIN:
3326 case PCI_IVAR_FUNCTION:
3327 return (EINVAL); /* disallow for now */
3335 #include "opt_ddb.h"
3337 #include <ddb/ddb.h>
3338 #include <sys/cons.h>
3341 * List resources based on pci map registers, used for within ddb
3344 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3346 struct pci_devinfo *dinfo;
3347 struct devlist *devlist_head;
3350 int i, error, none_count;
3353 /* get the head of the device queue */
3354 devlist_head = &pci_devq;
3357 * Go through the list of devices and print out devices
3359 for (error = 0, i = 0,
3360 dinfo = STAILQ_FIRST(devlist_head);
3361 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3362 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3364 /* Populate pd_name and pd_unit */
3367 name = device_get_name(dinfo->cfg.dev);
3370 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3371 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3372 (name && *name) ? name : "none",
3373 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3375 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3376 p->pc_sel.pc_func, (p->pc_class << 16) |
3377 (p->pc_subclass << 8) | p->pc_progif,
3378 (p->pc_subdevice << 16) | p->pc_subvendor,
3379 (p->pc_device << 16) | p->pc_vendor,
3380 p->pc_revid, p->pc_hdr);
3385 static struct resource *
3386 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3387 u_long start, u_long end, u_long count, u_int flags)
3389 struct pci_devinfo *dinfo = device_get_ivars(child);
3390 struct resource_list *rl = &dinfo->resources;
3391 struct resource_list_entry *rle;
3392 struct resource *res;
3393 pci_addr_t map, testval;
3397 * Weed out the bogons, and figure out how large the BAR/map
3398 * is. Bars that read back 0 here are bogus and unimplemented.
3399 * Note: atapci in legacy mode are special and handled elsewhere
3400 * in the code. If you have a atapci device in legacy mode and
3401 * it fails here, that other code is broken.
3404 map = pci_read_config(child, *rid, 4);
3405 pci_write_config(child, *rid, 0xffffffff, 4);
3406 testval = pci_read_config(child, *rid, 4);
3407 if (pci_maprange(testval) == 64)
3408 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
3409 if (pci_mapbase(testval) == 0)
3413 * Restore the original value of the BAR. We may have reprogrammed
3414 * the BAR of the low-level console device and when booting verbose,
3415 * we need the console device addressable.
3417 pci_write_config(child, *rid, map, 4);
3419 if (PCI_BAR_MEM(testval)) {
3420 if (type != SYS_RES_MEMORY) {
3423 "child %s requested type %d for rid %#x,"
3424 " but the BAR says it is an memio\n",
3425 device_get_nameunit(child), type, *rid);
3429 if (type != SYS_RES_IOPORT) {
3432 "child %s requested type %d for rid %#x,"
3433 " but the BAR says it is an ioport\n",
3434 device_get_nameunit(child), type, *rid);
3439 * For real BARs, we need to override the size that
3440 * the driver requests, because that's what the BAR
3441 * actually uses and we would otherwise have a
3442 * situation where we might allocate the excess to
3443 * another driver, which won't work.
3445 mapsize = pci_mapsize(testval);
3446 count = 1UL << mapsize;
3447 if (RF_ALIGNMENT(flags) < mapsize)
3448 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3449 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3450 flags |= RF_PREFETCHABLE;
3453 * Allocate enough resource, and then write back the
3454 * appropriate bar for that resource.
3456 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
3457 start, end, count, flags);
3459 device_printf(child,
3460 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3461 count, *rid, type, start, end);
3464 resource_list_add(rl, type, *rid, start, end, count);
3465 rle = resource_list_find(rl, type, *rid);
3467 panic("pci_alloc_map: unexpectedly can't find resource.");
3469 rle->start = rman_get_start(res);
3470 rle->end = rman_get_end(res);
3473 device_printf(child,
3474 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3475 count, *rid, type, rman_get_start(res));
3476 map = rman_get_start(res);
3478 pci_write_config(child, *rid, map, 4);
3479 if (pci_maprange(testval) == 64)
3480 pci_write_config(child, *rid + 4, map >> 32, 4);
3486 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3487 u_long start, u_long end, u_long count, u_int flags)
3489 struct pci_devinfo *dinfo = device_get_ivars(child);
3490 struct resource_list *rl = &dinfo->resources;
3491 struct resource_list_entry *rle;
3492 pcicfgregs *cfg = &dinfo->cfg;
3495 * Perform lazy resource allocation
3497 if (device_get_parent(child) == dev) {
3501 * Can't alloc legacy interrupt once MSI messages
3502 * have been allocated.
3504 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3505 cfg->msix.msix_alloc > 0))
3508 * If the child device doesn't have an
3509 * interrupt routed and is deserving of an
3510 * interrupt, try to assign it one.
3512 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3514 pci_assign_interrupt(dev, child, 0);
3516 case SYS_RES_IOPORT:
3517 case SYS_RES_MEMORY:
3518 if (*rid < PCIR_BAR(cfg->nummaps)) {
3520 * Enable the I/O mode. We should
3521 * also be assigning resources too
3522 * when none are present. The
3523 * resource_list_alloc kind of sorta does
3526 if (PCI_ENABLE_IO(dev, child, type))
3529 rle = resource_list_find(rl, type, *rid);
3531 return (pci_alloc_map(dev, child, type, rid,
3532 start, end, count, flags));
3536 * If we've already allocated the resource, then
3537 * return it now. But first we may need to activate
3538 * it, since we don't allocate the resource as active
3539 * above. Normally this would be done down in the
3540 * nexus, but since we short-circuit that path we have
3541 * to do its job here. Not sure if we should free the
3542 * resource if it fails to activate.
3544 rle = resource_list_find(rl, type, *rid);
3545 if (rle != NULL && rle->res != NULL) {
3547 device_printf(child,
3548 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3549 rman_get_size(rle->res), *rid, type,
3550 rman_get_start(rle->res));
3551 if ((flags & RF_ACTIVE) &&
3552 bus_generic_activate_resource(dev, child, type,
3553 *rid, rle->res) != 0)
3558 return (resource_list_alloc(rl, dev, child, type, rid,
3559 start, end, count, flags));
3563 pci_delete_resource(device_t dev, device_t child, int type, int rid)
3565 struct pci_devinfo *dinfo;
3566 struct resource_list *rl;
3567 struct resource_list_entry *rle;
3569 if (device_get_parent(child) != dev)
3572 dinfo = device_get_ivars(child);
3573 rl = &dinfo->resources;
3574 rle = resource_list_find(rl, type, rid);
3577 if (rman_get_device(rle->res) != dev ||
3578 rman_get_flags(rle->res) & RF_ACTIVE) {
3579 device_printf(dev, "delete_resource: "
3580 "Resource still owned by child, oops. "
3581 "(type=%d, rid=%d, addr=%lx)\n",
3582 rle->type, rle->rid,
3583 rman_get_start(rle->res));
3586 bus_release_resource(dev, type, rid, rle->res);
3588 resource_list_delete(rl, type, rid);
3591 * Why do we turn off the PCI configuration BAR when we delete a
3594 pci_write_config(child, rid, 0, 4);
3595 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
3598 struct resource_list *
3599 pci_get_resource_list (device_t dev, device_t child)
3601 struct pci_devinfo *dinfo = device_get_ivars(child);
3603 return (&dinfo->resources);
3607 pci_read_config_method(device_t dev, device_t child, int reg, int width)
3609 struct pci_devinfo *dinfo = device_get_ivars(child);
3610 pcicfgregs *cfg = &dinfo->cfg;
3612 return (PCIB_READ_CONFIG(device_get_parent(dev),
3613 cfg->bus, cfg->slot, cfg->func, reg, width));
3617 pci_write_config_method(device_t dev, device_t child, int reg,
3618 uint32_t val, int width)
3620 struct pci_devinfo *dinfo = device_get_ivars(child);
3621 pcicfgregs *cfg = &dinfo->cfg;
3623 PCIB_WRITE_CONFIG(device_get_parent(dev),
3624 cfg->bus, cfg->slot, cfg->func, reg, val, width);
3628 pci_child_location_str_method(device_t dev, device_t child, char *buf,
3632 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
3633 pci_get_function(child));
3638 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
3641 struct pci_devinfo *dinfo;
3644 dinfo = device_get_ivars(child);
3646 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
3647 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3648 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3654 pci_assign_interrupt_method(device_t dev, device_t child)
3656 struct pci_devinfo *dinfo = device_get_ivars(child);
3657 pcicfgregs *cfg = &dinfo->cfg;
3659 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3664 pci_modevent(module_t mod, int what, void *arg)
3666 static struct cdev *pci_cdev;
3670 STAILQ_INIT(&pci_devq);
3672 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
3674 pci_load_vendor_data();
3678 destroy_dev(pci_cdev);
3686 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3691 * Only do header type 0 devices. Type 1 devices are bridges,
3692 * which we know need special treatment. Type 2 devices are
3693 * cardbus bridges which also require special treatment.
3694 * Other types are unknown, and we err on the side of safety
3697 if (dinfo->cfg.hdrtype != 0)
3701 * Restore the device to full power mode. We must do this
3702 * before we restore the registers because moving from D3 to
3703 * D0 will cause the chip's BARs and some other registers to
3704 * be reset to some unknown power on reset values. Cut down
3705 * the noise on boot by doing nothing if we are already in
3708 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3709 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3711 for (i = 0; i < dinfo->cfg.nummaps; i++)
3712 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3713 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3714 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3715 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3716 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3717 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3718 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3719 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3720 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3721 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3722 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3724 /* Restore MSI and MSI-X configurations if they are present. */
3725 if (dinfo->cfg.msi.msi_location != 0)
3726 pci_resume_msi(dev);
3727 if (dinfo->cfg.msix.msix_location != 0)
3728 pci_resume_msix(dev);
3732 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3739 * Only do header type 0 devices. Type 1 devices are bridges, which
3740 * we know need special treatment. Type 2 devices are cardbus bridges
3741 * which also require special treatment. Other types are unknown, and
3742 * we err on the side of safety by ignoring them. Powering down
3743 * bridges should not be undertaken lightly.
3745 if (dinfo->cfg.hdrtype != 0)
3747 for (i = 0; i < dinfo->cfg.nummaps; i++)
3748 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3749 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3752 * Some drivers apparently write to these registers w/o updating our
3753 * cached copy. No harm happens if we update the copy, so do so here
3754 * so we can restore them. The COMMAND register is modified by the
3755 * bus w/o updating the cache. This should represent the normally
3756 * writable portion of the 'defined' part of type 0 headers. In
3757 * theory we also need to save/restore the PCI capability structures
3758 * we know about, but apart from power we don't know any that are
3761 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3762 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3763 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3764 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3765 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3766 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3767 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3768 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3769 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3770 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3771 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3772 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3773 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3774 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3775 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
3778 * don't set the state for display devices, base peripherals and
3779 * memory devices since bad things happen when they are powered down.
3780 * We should (a) have drivers that can easily detach and (b) use
3781 * generic drivers for these devices so that some device actually
3782 * attaches. We need to make sure that when we implement (a) we don't
3783 * power the device down on a reattach.
3785 cls = pci_get_class(dev);
3788 switch (pci_do_power_nodriver)
3790 case 0: /* NO powerdown at all */
3792 case 1: /* Conservative about what to power down */
3793 if (cls == PCIC_STORAGE)
3796 case 2: /* Agressive about what to power down */
3797 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
3798 cls == PCIC_BASEPERIPH)
3801 case 3: /* Power down everything */
3805 * PCI spec says we can only go into D3 state from D0 state.
3806 * Transition from D[12] into D0 before going to D3 state.
3808 ps = pci_get_powerstate(dev);
3809 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
3810 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3811 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
3812 pci_set_powerstate(dev, PCI_POWERSTATE_D3);