2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
55 #if defined(__i386__) || defined(__amd64__)
56 #include <machine/intr_machdep.h>
59 #include <sys/pciio.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
68 #include <contrib/dev/acpica/acpi.h>
71 #define ACPI_PWR_FOR_SLEEP(x, y, z)
74 static uint32_t pci_mapbase(unsigned mapreg);
75 static const char *pci_maptype(unsigned mapreg);
76 static int pci_mapsize(unsigned testval);
77 static int pci_maprange(unsigned mapreg);
78 static void pci_fixancient(pcicfgregs *cfg);
80 static int pci_porten(device_t pcib, int b, int s, int f);
81 static int pci_memen(device_t pcib, int b, int s, int f);
82 static void pci_assign_interrupt(device_t bus, device_t dev,
84 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
85 int b, int s, int f, int reg,
86 struct resource_list *rl, int force, int prefetch);
87 static int pci_probe(device_t dev);
88 static int pci_attach(device_t dev);
89 static void pci_load_vendor_data(void);
90 static int pci_describe_parse_line(char **ptr, int *vendor,
91 int *device, char **desc);
92 static char *pci_describe_device(device_t dev);
93 static int pci_modevent(module_t mod, int what, void *arg);
94 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
96 static void pci_read_extcap(device_t pcib, pcicfgregs *cfg);
97 static uint32_t pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
100 static void pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
101 int reg, uint32_t data);
103 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
104 static void pci_disable_msi(device_t dev);
105 static void pci_enable_msi(device_t dev, uint64_t address,
107 static void pci_enable_msix(device_t dev, u_int index,
108 uint64_t address, uint32_t data);
109 static void pci_mask_msix(device_t dev, u_int index);
110 static void pci_unmask_msix(device_t dev, u_int index);
111 static int pci_msi_blacklisted(void);
112 static void pci_resume_msi(device_t dev);
113 static void pci_resume_msix(device_t dev);
115 static device_method_t pci_methods[] = {
116 /* Device interface */
117 DEVMETHOD(device_probe, pci_probe),
118 DEVMETHOD(device_attach, pci_attach),
119 DEVMETHOD(device_detach, bus_generic_detach),
120 DEVMETHOD(device_shutdown, bus_generic_shutdown),
121 DEVMETHOD(device_suspend, pci_suspend),
122 DEVMETHOD(device_resume, pci_resume),
125 DEVMETHOD(bus_print_child, pci_print_child),
126 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
127 DEVMETHOD(bus_read_ivar, pci_read_ivar),
128 DEVMETHOD(bus_write_ivar, pci_write_ivar),
129 DEVMETHOD(bus_driver_added, pci_driver_added),
130 DEVMETHOD(bus_setup_intr, pci_setup_intr),
131 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
133 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
134 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
135 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
136 DEVMETHOD(bus_delete_resource, pci_delete_resource),
137 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
138 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
139 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
140 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
141 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
142 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
145 DEVMETHOD(pci_read_config, pci_read_config_method),
146 DEVMETHOD(pci_write_config, pci_write_config_method),
147 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
148 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
149 DEVMETHOD(pci_enable_io, pci_enable_io_method),
150 DEVMETHOD(pci_disable_io, pci_disable_io_method),
151 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
152 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
153 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
154 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
155 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
156 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
157 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
158 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
159 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
160 DEVMETHOD(pci_release_msi, pci_release_msi_method),
161 DEVMETHOD(pci_msi_count, pci_msi_count_method),
162 DEVMETHOD(pci_msix_count, pci_msix_count_method),
167 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
169 static devclass_t pci_devclass;
170 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
171 MODULE_VERSION(pci, 1);
173 static char *pci_vendordata;
174 static size_t pci_vendordata_size;
178 uint32_t devid; /* Vendor/device of the card */
180 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
181 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
186 struct pci_quirk pci_quirks[] = {
187 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
188 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
189 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
190 /* As does the Serverworks OSB4 (the SMBus mapping register) */
191 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
194 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
195 * or the CMIC-SL (AKA ServerWorks GC_LE).
197 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
198 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
201 * MSI doesn't work on earlier Intel chipsets including
202 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
204 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
205 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
206 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
207 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
208 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
209 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
210 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
213 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
216 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
221 /* map register information */
222 #define PCI_MAPMEM 0x01 /* memory map */
223 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
224 #define PCI_MAPPORT 0x04 /* port map */
226 struct devlist pci_devq;
227 uint32_t pci_generation;
228 uint32_t pci_numdevs = 0;
229 static int pcie_chipset, pcix_chipset;
232 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
234 static int pci_enable_io_modes = 1;
235 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
236 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
237 &pci_enable_io_modes, 1,
238 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
239 enable these bits correctly. We'd like to do this all the time, but there\n\
240 are some peripherals that this causes problems with.");
242 static int pci_do_power_nodriver = 0;
243 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
244 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
245 &pci_do_power_nodriver, 0,
246 "Place a function into D3 state when no driver attaches to it. 0 means\n\
247 disable. 1 means conservatively place devices into D3 state. 2 means\n\
248 agressively place devices into D3 state. 3 means put absolutely everything\n\
251 static int pci_do_power_resume = 1;
252 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
253 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
254 &pci_do_power_resume, 1,
255 "Transition from D3 -> D0 on resume.");
257 static int pci_do_vpd = 1;
258 TUNABLE_INT("hw.pci.enable_vpd", &pci_do_vpd);
259 SYSCTL_INT(_hw_pci, OID_AUTO, enable_vpd, CTLFLAG_RW, &pci_do_vpd, 1,
260 "Enable support for VPD.");
262 static int pci_do_msi = 1;
263 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
264 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
265 "Enable support for MSI interrupts");
267 static int pci_do_msix = 1;
268 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
269 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
270 "Enable support for MSI-X interrupts");
272 static int pci_honor_msi_blacklist = 1;
273 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
274 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
275 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
277 /* Find a device_t by bus/slot/function in domain 0 */
280 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
283 return (pci_find_dbsf(0, bus, slot, func));
286 /* Find a device_t by domain/bus/slot/function */
289 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
291 struct pci_devinfo *dinfo;
293 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
294 if ((dinfo->cfg.domain == domain) &&
295 (dinfo->cfg.bus == bus) &&
296 (dinfo->cfg.slot == slot) &&
297 (dinfo->cfg.func == func)) {
298 return (dinfo->cfg.dev);
305 /* Find a device_t by vendor/device ID */
308 pci_find_device(uint16_t vendor, uint16_t device)
310 struct pci_devinfo *dinfo;
312 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
313 if ((dinfo->cfg.vendor == vendor) &&
314 (dinfo->cfg.device == device)) {
315 return (dinfo->cfg.dev);
322 /* return base address of memory or port map */
325 pci_mapbase(uint32_t mapreg)
328 if (PCI_BAR_MEM(mapreg))
329 return (mapreg & PCIM_BAR_MEM_BASE);
331 return (mapreg & PCIM_BAR_IO_BASE);
334 /* return map type of memory or port map */
337 pci_maptype(unsigned mapreg)
340 if (PCI_BAR_IO(mapreg))
342 if (mapreg & PCIM_BAR_MEM_PREFETCH)
343 return ("Prefetchable Memory");
347 /* return log2 of map size decoded for memory or port map */
350 pci_mapsize(uint32_t testval)
354 testval = pci_mapbase(testval);
357 while ((testval & 1) == 0)
366 /* return log2 of address range supported by map register */
369 pci_maprange(unsigned mapreg)
373 if (PCI_BAR_IO(mapreg))
376 switch (mapreg & PCIM_BAR_MEM_TYPE) {
377 case PCIM_BAR_MEM_32:
380 case PCIM_BAR_MEM_1MB:
383 case PCIM_BAR_MEM_64:
390 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
393 pci_fixancient(pcicfgregs *cfg)
395 if (cfg->hdrtype != 0)
398 /* PCI to PCI bridges use header type 1 */
399 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
403 /* extract header type specific config data */
406 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
408 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
409 switch (cfg->hdrtype) {
411 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
412 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
413 cfg->nummaps = PCI_MAXMAPS_0;
416 cfg->nummaps = PCI_MAXMAPS_1;
419 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
420 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
421 cfg->nummaps = PCI_MAXMAPS_2;
427 /* read configuration header into pcicfgregs structure */
429 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
431 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
432 pcicfgregs *cfg = NULL;
433 struct pci_devinfo *devlist_entry;
434 struct devlist *devlist_head;
436 devlist_head = &pci_devq;
438 devlist_entry = NULL;
440 if (REG(PCIR_DEVVENDOR, 4) != -1) {
441 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
442 if (devlist_entry == NULL)
445 cfg = &devlist_entry->cfg;
451 cfg->vendor = REG(PCIR_VENDOR, 2);
452 cfg->device = REG(PCIR_DEVICE, 2);
453 cfg->cmdreg = REG(PCIR_COMMAND, 2);
454 cfg->statreg = REG(PCIR_STATUS, 2);
455 cfg->baseclass = REG(PCIR_CLASS, 1);
456 cfg->subclass = REG(PCIR_SUBCLASS, 1);
457 cfg->progif = REG(PCIR_PROGIF, 1);
458 cfg->revid = REG(PCIR_REVID, 1);
459 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
460 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
461 cfg->lattimer = REG(PCIR_LATTIMER, 1);
462 cfg->intpin = REG(PCIR_INTPIN, 1);
463 cfg->intline = REG(PCIR_INTLINE, 1);
465 cfg->mingnt = REG(PCIR_MINGNT, 1);
466 cfg->maxlat = REG(PCIR_MAXLAT, 1);
468 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
469 cfg->hdrtype &= ~PCIM_MFDEV;
472 pci_hdrtypedata(pcib, b, s, f, cfg);
474 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
475 pci_read_extcap(pcib, cfg);
477 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
479 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
480 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
481 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
482 devlist_entry->conf.pc_sel.pc_func = cfg->func;
483 devlist_entry->conf.pc_hdr = cfg->hdrtype;
485 devlist_entry->conf.pc_subvendor = cfg->subvendor;
486 devlist_entry->conf.pc_subdevice = cfg->subdevice;
487 devlist_entry->conf.pc_vendor = cfg->vendor;
488 devlist_entry->conf.pc_device = cfg->device;
490 devlist_entry->conf.pc_class = cfg->baseclass;
491 devlist_entry->conf.pc_subclass = cfg->subclass;
492 devlist_entry->conf.pc_progif = cfg->progif;
493 devlist_entry->conf.pc_revid = cfg->revid;
498 return (devlist_entry);
503 pci_read_extcap(device_t pcib, pcicfgregs *cfg)
505 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
506 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
507 #if defined(__i386__) || defined(__amd64__)
511 int ptr, nextptr, ptrptr;
513 switch (cfg->hdrtype & PCIM_HDRTYPE) {
516 ptrptr = PCIR_CAP_PTR;
519 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
522 return; /* no extended capabilities support */
524 nextptr = REG(ptrptr, 1); /* sanity check? */
527 * Read capability entries.
529 while (nextptr != 0) {
532 printf("illegal PCI extended capability offset %d\n",
536 /* Find the next entry */
538 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
540 /* Process this entry */
541 switch (REG(ptr + PCICAP_ID, 1)) {
542 case PCIY_PMG: /* PCI power management */
543 if (cfg->pp.pp_cap == 0) {
544 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
545 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
546 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
547 if ((nextptr - ptr) > PCIR_POWER_DATA)
548 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
551 #if defined(__i386__) || defined(__amd64__)
552 case PCIY_HT: /* HyperTransport */
553 /* Determine HT-specific capability type. */
554 val = REG(ptr + PCIR_HT_COMMAND, 2);
555 switch (val & PCIM_HTCMD_CAP_MASK) {
556 case PCIM_HTCAP_MSI_MAPPING:
557 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
558 /* Sanity check the mapping window. */
559 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
562 addr = REG(ptr + PCIR_HTMSI_ADDRESS_LO,
564 if (addr != MSI_INTEL_ADDR_BASE)
566 "HT Bridge at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
567 cfg->domain, cfg->bus,
568 cfg->slot, cfg->func,
572 /* Enable MSI -> HT mapping. */
573 val |= PCIM_HTCMD_MSI_ENABLE;
574 WREG(ptr + PCIR_HT_COMMAND, val, 2);
579 case PCIY_MSI: /* PCI MSI */
580 cfg->msi.msi_location = ptr;
581 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
582 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
583 PCIM_MSICTRL_MMC_MASK)>>1);
585 case PCIY_MSIX: /* PCI MSI-X */
586 cfg->msix.msix_location = ptr;
587 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
588 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
589 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
590 val = REG(ptr + PCIR_MSIX_TABLE, 4);
591 cfg->msix.msix_table_bar = PCIR_BAR(val &
593 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
594 val = REG(ptr + PCIR_MSIX_PBA, 4);
595 cfg->msix.msix_pba_bar = PCIR_BAR(val &
597 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
599 case PCIY_VPD: /* PCI Vital Product Data */
600 cfg->vpd.vpd_reg = ptr;
603 /* Should always be true. */
604 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
605 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
606 cfg->subvendor = val & 0xffff;
607 cfg->subdevice = val >> 16;
610 case PCIY_PCIX: /* PCI-X */
612 * Assume we have a PCI-X chipset if we have
613 * at least one PCI-PCI bridge with a PCI-X
614 * capability. Note that some systems with
615 * PCI-express or HT chipsets might match on
616 * this check as well.
618 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
621 case PCIY_EXPRESS: /* PCI-express */
623 * Assume we have a PCI-express chipset if we have
624 * at least one PCI-express root port.
626 val = REG(ptr + PCIR_EXPRESS_FLAGS, 2);
627 if ((val & PCIM_EXP_FLAGS_TYPE) ==
628 PCIM_EXP_TYPE_ROOT_PORT)
635 /* REG and WREG use carry through to next functions */
639 * PCI Vital Product Data
642 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg)
645 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
647 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
648 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000)
649 DELAY(1); /* limit looping */
651 return (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
656 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
658 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
660 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
661 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
662 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000)
663 DELAY(1); /* limit looping */
669 struct vpd_readstate {
679 vpd_nextbyte(struct vpd_readstate *vrs)
683 if (vrs->bytesinval == 0) {
684 vrs->val = le32toh(pci_read_vpd_reg(vrs->pcib, vrs->cfg,
687 byte = vrs->val & 0xff;
690 vrs->val = vrs->val >> 8;
691 byte = vrs->val & 0xff;
700 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
702 struct vpd_readstate vrs;
709 int alloc, off; /* alloc/off for RO/W arrays */
714 cfg->vpd.vpd_cached = 1;
718 /* init vpd reader */
726 name = remain = i = 0; /* shut up stupid gcc */
727 alloc = off = 0; /* shut up stupid gcc */
728 dflen = 0; /* shut up stupid gcc */
732 byte = vpd_nextbyte(&vrs);
734 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
735 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
736 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
739 case 0: /* item name */
741 remain = vpd_nextbyte(&vrs);
742 remain |= vpd_nextbyte(&vrs) << 8;
743 if (remain > (0x7f*4 - vrs.off)) {
746 "pci%d:%d:%d:%d: invalid vpd data, remain %#x\n",
747 cfg->domain, cfg->bus, cfg->slot,
753 name = (byte >> 3) & 0xf;
756 case 0x2: /* String */
757 cfg->vpd.vpd_ident = malloc(remain + 1,
766 case 0x10: /* VPD-R */
769 cfg->vpd.vpd_ros = malloc(alloc *
770 sizeof *cfg->vpd.vpd_ros, M_DEVBUF,
774 case 0x11: /* VPD-W */
777 cfg->vpd.vpd_w = malloc(alloc *
778 sizeof *cfg->vpd.vpd_w, M_DEVBUF,
782 default: /* Invalid data, abort */
788 case 1: /* Identifier String */
789 cfg->vpd.vpd_ident[i++] = byte;
792 cfg->vpd.vpd_ident[i] = '\0';
797 case 2: /* VPD-R Keyword Header */
799 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
800 (alloc *= 2) * sizeof *cfg->vpd.vpd_ros,
803 cfg->vpd.vpd_ros[off].keyword[0] = byte;
804 cfg->vpd.vpd_ros[off].keyword[1] = vpd_nextbyte(&vrs);
805 dflen = vpd_nextbyte(&vrs);
807 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
810 * if this happens, we can't trust the rest
814 "pci%d:%d:%d:%d: bad keyword length: %d\n",
815 cfg->domain, cfg->bus, cfg->slot,
820 } else if (dflen == 0) {
821 cfg->vpd.vpd_ros[off].value = malloc(1 *
822 sizeof *cfg->vpd.vpd_ros[off].value,
824 cfg->vpd.vpd_ros[off].value[0] = '\x00';
826 cfg->vpd.vpd_ros[off].value = malloc(
828 sizeof *cfg->vpd.vpd_ros[off].value,
832 /* keep in sync w/ state 3's transistions */
833 if (dflen == 0 && remain == 0)
841 case 3: /* VPD-R Keyword Value */
842 cfg->vpd.vpd_ros[off].value[i++] = byte;
843 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
844 "RV", 2) == 0 && cksumvalid == -1) {
849 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
850 cfg->domain, cfg->bus, cfg->slot,
851 cfg->func, vrs.cksum);
859 /* keep in sync w/ state 2's transistions */
861 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
862 if (dflen == 0 && remain == 0) {
863 cfg->vpd.vpd_rocnt = off;
864 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
865 off * sizeof *cfg->vpd.vpd_ros,
868 } else if (dflen == 0)
878 case 5: /* VPD-W Keyword Header */
880 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
881 (alloc *= 2) * sizeof *cfg->vpd.vpd_w,
884 cfg->vpd.vpd_w[off].keyword[0] = byte;
885 cfg->vpd.vpd_w[off].keyword[1] = vpd_nextbyte(&vrs);
886 cfg->vpd.vpd_w[off].len = dflen = vpd_nextbyte(&vrs);
887 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
888 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
889 sizeof *cfg->vpd.vpd_w[off].value,
893 /* keep in sync w/ state 6's transistions */
894 if (dflen == 0 && remain == 0)
902 case 6: /* VPD-W Keyword Value */
903 cfg->vpd.vpd_w[off].value[i++] = byte;
906 /* keep in sync w/ state 5's transistions */
908 cfg->vpd.vpd_w[off++].value[i++] = '\0';
909 if (dflen == 0 && remain == 0) {
910 cfg->vpd.vpd_wcnt = off;
911 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
912 off * sizeof *cfg->vpd.vpd_w,
915 } else if (dflen == 0)
920 printf("pci%d:%d:%d:%d: invalid state: %d\n",
921 cfg->domain, cfg->bus, cfg->slot, cfg->func,
928 if (cksumvalid == 0) {
929 /* read-only data bad, clean up */
931 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
933 free(cfg->vpd.vpd_ros, M_DEVBUF);
934 cfg->vpd.vpd_ros = NULL;
936 cfg->vpd.vpd_cached = 1;
942 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
944 struct pci_devinfo *dinfo = device_get_ivars(child);
945 pcicfgregs *cfg = &dinfo->cfg;
947 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
948 pci_read_vpd(device_get_parent(dev), cfg);
950 *identptr = cfg->vpd.vpd_ident;
952 if (*identptr == NULL)
959 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
962 struct pci_devinfo *dinfo = device_get_ivars(child);
963 pcicfgregs *cfg = &dinfo->cfg;
966 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
967 pci_read_vpd(device_get_parent(dev), cfg);
969 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
970 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
971 sizeof cfg->vpd.vpd_ros[i].keyword) == 0) {
972 *vptr = cfg->vpd.vpd_ros[i].value;
975 if (i != cfg->vpd.vpd_rocnt)
983 * Return the offset in configuration space of the requested extended
984 * capability entry or 0 if the specified capability was not found.
987 pci_find_extcap_method(device_t dev, device_t child, int capability,
990 struct pci_devinfo *dinfo = device_get_ivars(child);
991 pcicfgregs *cfg = &dinfo->cfg;
996 * Check the CAP_LIST bit of the PCI status register first.
998 status = pci_read_config(child, PCIR_STATUS, 2);
999 if (!(status & PCIM_STATUS_CAPPRESENT))
1003 * Determine the start pointer of the capabilities list.
1005 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1011 ptr = PCIR_CAP_PTR_2;
1015 return (ENXIO); /* no extended capabilities support */
1017 ptr = pci_read_config(child, ptr, 1);
1020 * Traverse the capabilities list.
1023 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1028 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1035 * Support for MSI-X message interrupts.
1038 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1040 struct pci_devinfo *dinfo = device_get_ivars(dev);
1041 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1044 KASSERT(msix->msix_table_len > index, ("bogus index"));
1045 offset = msix->msix_table_offset + index * 16;
1046 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1047 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1048 bus_write_4(msix->msix_table_res, offset + 8, data);
1052 pci_mask_msix(device_t dev, u_int index)
1054 struct pci_devinfo *dinfo = device_get_ivars(dev);
1055 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1056 uint32_t offset, val;
1058 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1059 offset = msix->msix_table_offset + index * 16 + 12;
1060 val = bus_read_4(msix->msix_table_res, offset);
1061 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1062 val |= PCIM_MSIX_VCTRL_MASK;
1063 bus_write_4(msix->msix_table_res, offset, val);
1068 pci_unmask_msix(device_t dev, u_int index)
1070 struct pci_devinfo *dinfo = device_get_ivars(dev);
1071 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1072 uint32_t offset, val;
1074 KASSERT(msix->msix_table_len > index, ("bogus index"));
1075 offset = msix->msix_table_offset + index * 16 + 12;
1076 val = bus_read_4(msix->msix_table_res, offset);
1077 if (val & PCIM_MSIX_VCTRL_MASK) {
1078 val &= ~PCIM_MSIX_VCTRL_MASK;
1079 bus_write_4(msix->msix_table_res, offset, val);
1084 pci_pending_msix(device_t dev, u_int index)
1086 struct pci_devinfo *dinfo = device_get_ivars(dev);
1087 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1088 uint32_t offset, bit;
1090 KASSERT(msix->msix_table_len > index, ("bogus index"));
1091 offset = msix->msix_pba_offset + (index / 32) * 4;
1092 bit = 1 << index % 32;
1093 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1097 * Restore MSI-X registers and table during resume. If MSI-X is
1098 * enabled then walk the virtual table to restore the actual MSI-X
1102 pci_resume_msix(device_t dev)
1104 struct pci_devinfo *dinfo = device_get_ivars(dev);
1105 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1106 struct msix_table_entry *mte;
1107 struct msix_vector *mv;
1110 if (msix->msix_alloc > 0) {
1111 /* First, mask all vectors. */
1112 for (i = 0; i < msix->msix_msgnum; i++)
1113 pci_mask_msix(dev, i);
1115 /* Second, program any messages with at least one handler. */
1116 for (i = 0; i < msix->msix_table_len; i++) {
1117 mte = &msix->msix_table[i];
1118 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1120 mv = &msix->msix_vectors[mte->mte_vector - 1];
1121 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1122 pci_unmask_msix(dev, i);
1125 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1126 msix->msix_ctrl, 2);
1130 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1131 * returned in *count. After this function returns, each message will be
1132 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1135 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1137 struct pci_devinfo *dinfo = device_get_ivars(child);
1138 pcicfgregs *cfg = &dinfo->cfg;
1139 struct resource_list_entry *rle;
1140 int actual, error, i, irq, max;
1142 /* Don't let count == 0 get us into trouble. */
1146 /* If rid 0 is allocated, then fail. */
1147 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1148 if (rle != NULL && rle->res != NULL)
1151 /* Already have allocated messages? */
1152 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1155 /* If MSI is blacklisted for this system, fail. */
1156 if (pci_msi_blacklisted())
1159 /* MSI-X capability present? */
1160 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1163 /* Make sure the appropriate BARs are mapped. */
1164 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1165 cfg->msix.msix_table_bar);
1166 if (rle == NULL || rle->res == NULL ||
1167 !(rman_get_flags(rle->res) & RF_ACTIVE))
1169 cfg->msix.msix_table_res = rle->res;
1170 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1171 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1172 cfg->msix.msix_pba_bar);
1173 if (rle == NULL || rle->res == NULL ||
1174 !(rman_get_flags(rle->res) & RF_ACTIVE))
1177 cfg->msix.msix_pba_res = rle->res;
1180 device_printf(child,
1181 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1182 *count, cfg->msix.msix_msgnum);
1183 max = min(*count, cfg->msix.msix_msgnum);
1184 for (i = 0; i < max; i++) {
1185 /* Allocate a message. */
1186 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1189 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1195 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1197 device_printf(child, "using IRQ %lu for MSI-X\n",
1203 * Be fancy and try to print contiguous runs of
1204 * IRQ values as ranges. 'irq' is the previous IRQ.
1205 * 'run' is true if we are in a range.
1207 device_printf(child, "using IRQs %lu", rle->start);
1210 for (i = 1; i < actual; i++) {
1211 rle = resource_list_find(&dinfo->resources,
1212 SYS_RES_IRQ, i + 1);
1214 /* Still in a run? */
1215 if (rle->start == irq + 1) {
1221 /* Finish previous range. */
1227 /* Start new range. */
1228 printf(",%lu", rle->start);
1232 /* Unfinished range? */
1235 printf(" for MSI-X\n");
1239 /* Mask all vectors. */
1240 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1241 pci_mask_msix(child, i);
1243 /* Allocate and initialize vector data and virtual table. */
1244 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1245 M_DEVBUF, M_WAITOK | M_ZERO);
1246 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1247 M_DEVBUF, M_WAITOK | M_ZERO);
1248 for (i = 0; i < actual; i++) {
1249 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1250 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1251 cfg->msix.msix_table[i].mte_vector = i + 1;
1254 /* Update control register to enable MSI-X. */
1255 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1256 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1257 cfg->msix.msix_ctrl, 2);
1259 /* Update counts of alloc'd messages. */
1260 cfg->msix.msix_alloc = actual;
1261 cfg->msix.msix_table_len = actual;
1267 * By default, pci_alloc_msix() will assign the allocated IRQ
1268 * resources consecutively to the first N messages in the MSI-X table.
1269 * However, device drivers may want to use different layouts if they
1270 * either receive fewer messages than they asked for, or they wish to
1271 * populate the MSI-X table sparsely. This method allows the driver
1272 * to specify what layout it wants. It must be called after a
1273 * successful pci_alloc_msix() but before any of the associated
1274 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1276 * The 'vectors' array contains 'count' message vectors. The array
1277 * maps directly to the MSI-X table in that index 0 in the array
1278 * specifies the vector for the first message in the MSI-X table, etc.
1279 * The vector value in each array index can either be 0 to indicate
1280 * that no vector should be assigned to a message slot, or it can be a
1281 * number from 1 to N (where N is the count returned from a
1282 * succcessful call to pci_alloc_msix()) to indicate which message
1283 * vector (IRQ) to be used for the corresponding message.
1285 * On successful return, each message with a non-zero vector will have
1286 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1287 * 1. Additionally, if any of the IRQs allocated via the previous
1288 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1289 * will be freed back to the system automatically.
1291 * For example, suppose a driver has a MSI-X table with 6 messages and
1292 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1293 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1294 * C. After the call to pci_alloc_msix(), the device will be setup to
1295 * have an MSI-X table of ABC--- (where - means no vector assigned).
1296 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1297 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1298 * be freed back to the system. This device will also have valid
1299 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1301 * In any case, the SYS_RES_IRQ rid X will always map to the message
1302 * at MSI-X table index X - 1 and will only be valid if a vector is
1303 * assigned to that table entry.
1306 pci_remap_msix_method(device_t dev, device_t child, int count,
1307 const u_int *vectors)
1309 struct pci_devinfo *dinfo = device_get_ivars(child);
1310 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1311 struct resource_list_entry *rle;
1312 int i, irq, j, *used;
1315 * Have to have at least one message in the table but the
1316 * table can't be bigger than the actual MSI-X table in the
1319 if (count == 0 || count > msix->msix_msgnum)
1322 /* Sanity check the vectors. */
1323 for (i = 0; i < count; i++)
1324 if (vectors[i] > msix->msix_alloc)
1328 * Make sure there aren't any holes in the vectors to be used.
1329 * It's a big pain to support it, and it doesn't really make
1330 * sense anyway. Also, at least one vector must be used.
1332 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1334 for (i = 0; i < count; i++)
1335 if (vectors[i] != 0)
1336 used[vectors[i] - 1] = 1;
1337 for (i = 0; i < msix->msix_alloc - 1; i++)
1338 if (used[i] == 0 && used[i + 1] == 1) {
1339 free(used, M_DEVBUF);
1343 free(used, M_DEVBUF);
1347 /* Make sure none of the resources are allocated. */
1348 for (i = 0; i < msix->msix_table_len; i++) {
1349 if (msix->msix_table[i].mte_vector == 0)
1351 if (msix->msix_table[i].mte_handlers > 0)
1353 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1354 KASSERT(rle != NULL, ("missing resource"));
1355 if (rle->res != NULL)
1359 /* Free the existing resource list entries. */
1360 for (i = 0; i < msix->msix_table_len; i++) {
1361 if (msix->msix_table[i].mte_vector == 0)
1363 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1367 * Build the new virtual table keeping track of which vectors are
1370 free(msix->msix_table, M_DEVBUF);
1371 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1372 M_DEVBUF, M_WAITOK | M_ZERO);
1373 for (i = 0; i < count; i++)
1374 msix->msix_table[i].mte_vector = vectors[i];
1375 msix->msix_table_len = count;
1377 /* Free any unused IRQs and resize the vectors array if necessary. */
1378 j = msix->msix_alloc - 1;
1380 struct msix_vector *vec;
1382 while (used[j] == 0) {
1383 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1384 msix->msix_vectors[j].mv_irq);
1387 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1389 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1391 free(msix->msix_vectors, M_DEVBUF);
1392 msix->msix_vectors = vec;
1393 msix->msix_alloc = j + 1;
1395 free(used, M_DEVBUF);
1397 /* Map the IRQs onto the rids. */
1398 for (i = 0; i < count; i++) {
1399 if (vectors[i] == 0)
1401 irq = msix->msix_vectors[vectors[i]].mv_irq;
1402 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1407 device_printf(child, "Remapped MSI-X IRQs as: ");
1408 for (i = 0; i < count; i++) {
1411 if (vectors[i] == 0)
1415 msix->msix_vectors[vectors[i]].mv_irq);
1424 pci_release_msix(device_t dev, device_t child)
1426 struct pci_devinfo *dinfo = device_get_ivars(child);
1427 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1428 struct resource_list_entry *rle;
1431 /* Do we have any messages to release? */
1432 if (msix->msix_alloc == 0)
1435 /* Make sure none of the resources are allocated. */
1436 for (i = 0; i < msix->msix_table_len; i++) {
1437 if (msix->msix_table[i].mte_vector == 0)
1439 if (msix->msix_table[i].mte_handlers > 0)
1441 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1442 KASSERT(rle != NULL, ("missing resource"));
1443 if (rle->res != NULL)
1447 /* Update control register to disable MSI-X. */
1448 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1449 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1450 msix->msix_ctrl, 2);
1452 /* Free the resource list entries. */
1453 for (i = 0; i < msix->msix_table_len; i++) {
1454 if (msix->msix_table[i].mte_vector == 0)
1456 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1458 free(msix->msix_table, M_DEVBUF);
1459 msix->msix_table_len = 0;
1461 /* Release the IRQs. */
1462 for (i = 0; i < msix->msix_alloc; i++)
1463 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1464 msix->msix_vectors[i].mv_irq);
1465 free(msix->msix_vectors, M_DEVBUF);
1466 msix->msix_alloc = 0;
1471 * Return the max supported MSI-X messages this device supports.
1472 * Basically, assuming the MD code can alloc messages, this function
1473 * should return the maximum value that pci_alloc_msix() can return.
1474 * Thus, it is subject to the tunables, etc.
1477 pci_msix_count_method(device_t dev, device_t child)
1479 struct pci_devinfo *dinfo = device_get_ivars(child);
1480 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1482 if (pci_do_msix && msix->msix_location != 0)
1483 return (msix->msix_msgnum);
1488 * Support for MSI message signalled interrupts.
1491 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1493 struct pci_devinfo *dinfo = device_get_ivars(dev);
1494 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1496 /* Write data and address values. */
1497 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1498 address & 0xffffffff, 4);
1499 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1500 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1502 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1505 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1508 /* Enable MSI in the control register. */
1509 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1510 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1515 pci_disable_msi(device_t dev)
1517 struct pci_devinfo *dinfo = device_get_ivars(dev);
1518 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1520 /* Disable MSI in the control register. */
1521 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1522 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1527 * Restore MSI registers during resume. If MSI is enabled then
1528 * restore the data and address registers in addition to the control
1532 pci_resume_msi(device_t dev)
1534 struct pci_devinfo *dinfo = device_get_ivars(dev);
1535 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1539 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1540 address = msi->msi_addr;
1541 data = msi->msi_data;
1542 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1543 address & 0xffffffff, 4);
1544 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1545 pci_write_config(dev, msi->msi_location +
1546 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1547 pci_write_config(dev, msi->msi_location +
1548 PCIR_MSI_DATA_64BIT, data, 2);
1550 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1553 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1558 pci_remap_msi_irq(device_t dev, u_int irq)
1560 struct pci_devinfo *dinfo = device_get_ivars(dev);
1561 pcicfgregs *cfg = &dinfo->cfg;
1562 struct resource_list_entry *rle;
1563 struct msix_table_entry *mte;
1564 struct msix_vector *mv;
1570 bus = device_get_parent(dev);
1573 * Handle MSI first. We try to find this IRQ among our list
1574 * of MSI IRQs. If we find it, we request updated address and
1575 * data registers and apply the results.
1577 if (cfg->msi.msi_alloc > 0) {
1579 /* If we don't have any active handlers, nothing to do. */
1580 if (cfg->msi.msi_handlers == 0)
1582 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1583 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1585 if (rle->start == irq) {
1586 error = PCIB_MAP_MSI(device_get_parent(bus),
1587 dev, irq, &addr, &data);
1590 pci_disable_msi(dev);
1591 dinfo->cfg.msi.msi_addr = addr;
1592 dinfo->cfg.msi.msi_data = data;
1593 pci_enable_msi(dev, addr, data);
1601 * For MSI-X, we check to see if we have this IRQ. If we do,
1602 * we request the updated mapping info. If that works, we go
1603 * through all the slots that use this IRQ and update them.
1605 if (cfg->msix.msix_alloc > 0) {
1606 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1607 mv = &cfg->msix.msix_vectors[i];
1608 if (mv->mv_irq == irq) {
1609 error = PCIB_MAP_MSI(device_get_parent(bus),
1610 dev, irq, &addr, &data);
1613 mv->mv_address = addr;
1615 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1616 mte = &cfg->msix.msix_table[j];
1617 if (mte->mte_vector != i + 1)
1619 if (mte->mte_handlers == 0)
1621 pci_mask_msix(dev, j);
1622 pci_enable_msix(dev, j, addr, data);
1623 pci_unmask_msix(dev, j);
1634 * Returns true if the specified device is blacklisted because MSI
1638 pci_msi_device_blacklisted(device_t dev)
1640 struct pci_quirk *q;
1642 if (!pci_honor_msi_blacklist)
1645 for (q = &pci_quirks[0]; q->devid; q++) {
1646 if (q->devid == pci_get_devid(dev) &&
1647 q->type == PCI_QUIRK_DISABLE_MSI)
1654 * Determine if MSI is blacklisted globally on this sytem. Currently,
1655 * we just check for blacklisted chipsets as represented by the
1656 * host-PCI bridge at device 0:0:0. In the future, it may become
1657 * necessary to check other system attributes, such as the kenv values
1658 * that give the motherboard manufacturer and model number.
1661 pci_msi_blacklisted(void)
1665 if (!pci_honor_msi_blacklist)
1668 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1669 if (!(pcie_chipset || pcix_chipset))
1672 dev = pci_find_bsf(0, 0, 0);
1674 return (pci_msi_device_blacklisted(dev));
1679 * Attempt to allocate *count MSI messages. The actual number allocated is
1680 * returned in *count. After this function returns, each message will be
1681 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1684 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1686 struct pci_devinfo *dinfo = device_get_ivars(child);
1687 pcicfgregs *cfg = &dinfo->cfg;
1688 struct resource_list_entry *rle;
1689 int actual, error, i, irqs[32];
1692 /* Don't let count == 0 get us into trouble. */
1696 /* If rid 0 is allocated, then fail. */
1697 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1698 if (rle != NULL && rle->res != NULL)
1701 /* Already have allocated messages? */
1702 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1705 /* If MSI is blacklisted for this system, fail. */
1706 if (pci_msi_blacklisted())
1709 /* MSI capability present? */
1710 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1714 device_printf(child,
1715 "attempting to allocate %d MSI vectors (%d supported)\n",
1716 *count, cfg->msi.msi_msgnum);
1718 /* Don't ask for more than the device supports. */
1719 actual = min(*count, cfg->msi.msi_msgnum);
1721 /* Don't ask for more than 32 messages. */
1722 actual = min(actual, 32);
1724 /* MSI requires power of 2 number of messages. */
1725 if (!powerof2(actual))
1729 /* Try to allocate N messages. */
1730 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
1731 cfg->msi.msi_msgnum, irqs);
1742 * We now have N actual messages mapped onto SYS_RES_IRQ
1743 * resources in the irqs[] array, so add new resources
1744 * starting at rid 1.
1746 for (i = 0; i < actual; i++)
1747 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1748 irqs[i], irqs[i], 1);
1752 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
1757 * Be fancy and try to print contiguous runs
1758 * of IRQ values as ranges. 'run' is true if
1759 * we are in a range.
1761 device_printf(child, "using IRQs %d", irqs[0]);
1763 for (i = 1; i < actual; i++) {
1765 /* Still in a run? */
1766 if (irqs[i] == irqs[i - 1] + 1) {
1771 /* Finish previous range. */
1773 printf("-%d", irqs[i - 1]);
1777 /* Start new range. */
1778 printf(",%d", irqs[i]);
1781 /* Unfinished range? */
1783 printf("-%d", irqs[actual - 1]);
1784 printf(" for MSI\n");
1788 /* Update control register with actual count. */
1789 ctrl = cfg->msi.msi_ctrl;
1790 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1791 ctrl |= (ffs(actual) - 1) << 4;
1792 cfg->msi.msi_ctrl = ctrl;
1793 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1795 /* Update counts of alloc'd messages. */
1796 cfg->msi.msi_alloc = actual;
1797 cfg->msi.msi_handlers = 0;
1802 /* Release the MSI messages associated with this device. */
1804 pci_release_msi_method(device_t dev, device_t child)
1806 struct pci_devinfo *dinfo = device_get_ivars(child);
1807 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1808 struct resource_list_entry *rle;
1809 int error, i, irqs[32];
1811 /* Try MSI-X first. */
1812 error = pci_release_msix(dev, child);
1813 if (error != ENODEV)
1816 /* Do we have any messages to release? */
1817 if (msi->msi_alloc == 0)
1819 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
1821 /* Make sure none of the resources are allocated. */
1822 if (msi->msi_handlers > 0)
1824 for (i = 0; i < msi->msi_alloc; i++) {
1825 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1826 KASSERT(rle != NULL, ("missing MSI resource"));
1827 if (rle->res != NULL)
1829 irqs[i] = rle->start;
1832 /* Update control register with 0 count. */
1833 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
1834 ("%s: MSI still enabled", __func__));
1835 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
1836 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1839 /* Release the messages. */
1840 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
1841 for (i = 0; i < msi->msi_alloc; i++)
1842 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1844 /* Update alloc count. */
1852 * Return the max supported MSI messages this device supports.
1853 * Basically, assuming the MD code can alloc messages, this function
1854 * should return the maximum value that pci_alloc_msi() can return.
1855 * Thus, it is subject to the tunables, etc.
1858 pci_msi_count_method(device_t dev, device_t child)
1860 struct pci_devinfo *dinfo = device_get_ivars(child);
1861 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1863 if (pci_do_msi && msi->msi_location != 0)
1864 return (msi->msi_msgnum);
1868 /* free pcicfgregs structure and all depending data structures */
1871 pci_freecfg(struct pci_devinfo *dinfo)
1873 struct devlist *devlist_head;
1876 devlist_head = &pci_devq;
1878 if (dinfo->cfg.vpd.vpd_reg) {
1879 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
1880 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
1881 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
1882 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
1883 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
1884 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
1885 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
1887 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
1888 free(dinfo, M_DEVBUF);
1890 /* increment the generation count */
1893 /* we're losing one device */
1899 * PCI power manangement
1902 pci_set_powerstate_method(device_t dev, device_t child, int state)
1904 struct pci_devinfo *dinfo = device_get_ivars(child);
1905 pcicfgregs *cfg = &dinfo->cfg;
1907 int result, oldstate, highest, delay;
1909 if (cfg->pp.pp_cap == 0)
1910 return (EOPNOTSUPP);
1913 * Optimize a no state change request away. While it would be OK to
1914 * write to the hardware in theory, some devices have shown odd
1915 * behavior when going from D3 -> D3.
1917 oldstate = pci_get_powerstate(child);
1918 if (oldstate == state)
1922 * The PCI power management specification states that after a state
1923 * transition between PCI power states, system software must
1924 * guarantee a minimal delay before the function accesses the device.
1925 * Compute the worst case delay that we need to guarantee before we
1926 * access the device. Many devices will be responsive much more
1927 * quickly than this delay, but there are some that don't respond
1928 * instantly to state changes. Transitions to/from D3 state require
1929 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
1930 * is done below with DELAY rather than a sleeper function because
1931 * this function can be called from contexts where we cannot sleep.
1933 highest = (oldstate > state) ? oldstate : state;
1934 if (highest == PCI_POWERSTATE_D3)
1936 else if (highest == PCI_POWERSTATE_D2)
1940 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
1941 & ~PCIM_PSTAT_DMASK;
1944 case PCI_POWERSTATE_D0:
1945 status |= PCIM_PSTAT_D0;
1947 case PCI_POWERSTATE_D1:
1948 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
1949 return (EOPNOTSUPP);
1950 status |= PCIM_PSTAT_D1;
1952 case PCI_POWERSTATE_D2:
1953 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
1954 return (EOPNOTSUPP);
1955 status |= PCIM_PSTAT_D2;
1957 case PCI_POWERSTATE_D3:
1958 status |= PCIM_PSTAT_D3;
1966 "pci%d:%d:%d:%d: Transition from D%d to D%d\n",
1967 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
1968 dinfo->cfg.func, oldstate, state);
1970 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
1977 pci_get_powerstate_method(device_t dev, device_t child)
1979 struct pci_devinfo *dinfo = device_get_ivars(child);
1980 pcicfgregs *cfg = &dinfo->cfg;
1984 if (cfg->pp.pp_cap != 0) {
1985 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
1986 switch (status & PCIM_PSTAT_DMASK) {
1988 result = PCI_POWERSTATE_D0;
1991 result = PCI_POWERSTATE_D1;
1994 result = PCI_POWERSTATE_D2;
1997 result = PCI_POWERSTATE_D3;
2000 result = PCI_POWERSTATE_UNKNOWN;
2004 /* No support, device is always at D0 */
2005 result = PCI_POWERSTATE_D0;
2011 * Some convenience functions for PCI device drivers.
2014 static __inline void
2015 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2019 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2021 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2024 static __inline void
2025 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2029 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2031 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2035 pci_enable_busmaster_method(device_t dev, device_t child)
2037 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2042 pci_disable_busmaster_method(device_t dev, device_t child)
2044 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2049 pci_enable_io_method(device_t dev, device_t child, int space)
2059 case SYS_RES_IOPORT:
2060 bit = PCIM_CMD_PORTEN;
2063 case SYS_RES_MEMORY:
2064 bit = PCIM_CMD_MEMEN;
2070 pci_set_command_bit(dev, child, bit);
2071 /* Some devices seem to need a brief stall here, what do to? */
2072 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2075 device_printf(child, "failed to enable %s mapping!\n", error);
2080 pci_disable_io_method(device_t dev, device_t child, int space)
2090 case SYS_RES_IOPORT:
2091 bit = PCIM_CMD_PORTEN;
2094 case SYS_RES_MEMORY:
2095 bit = PCIM_CMD_MEMEN;
2101 pci_clear_command_bit(dev, child, bit);
2102 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2103 if (command & bit) {
2104 device_printf(child, "failed to disable %s mapping!\n", error);
2111 * New style pci driver. Parent device is either a pci-host-bridge or a
2112 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2116 pci_print_verbose(struct pci_devinfo *dinfo)
2120 pcicfgregs *cfg = &dinfo->cfg;
2122 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2123 cfg->vendor, cfg->device, cfg->revid);
2124 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2125 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2126 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2127 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2129 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2130 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2131 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2132 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2133 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2134 if (cfg->intpin > 0)
2135 printf("\tintpin=%c, irq=%d\n",
2136 cfg->intpin +'a' -1, cfg->intline);
2137 if (cfg->pp.pp_cap) {
2140 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2141 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2142 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2143 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2144 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2145 status & PCIM_PSTAT_DMASK);
2147 if (cfg->msi.msi_location) {
2150 ctrl = cfg->msi.msi_ctrl;
2151 printf("\tMSI supports %d message%s%s%s\n",
2152 cfg->msi.msi_msgnum,
2153 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2154 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2155 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2157 if (cfg->msix.msix_location) {
2158 printf("\tMSI-X supports %d message%s ",
2159 cfg->msix.msix_msgnum,
2160 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2161 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2162 printf("in map 0x%x\n",
2163 cfg->msix.msix_table_bar);
2165 printf("in maps 0x%x and 0x%x\n",
2166 cfg->msix.msix_table_bar,
2167 cfg->msix.msix_pba_bar);
2173 pci_porten(device_t pcib, int b, int s, int f)
2175 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2176 & PCIM_CMD_PORTEN) != 0;
2180 pci_memen(device_t pcib, int b, int s, int f)
2182 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
2183 & PCIM_CMD_MEMEN) != 0;
2187 * Add a resource based on a pci map register. Return 1 if the map
2188 * register is a 32bit map register or 2 if it is a 64bit register.
2191 pci_add_map(device_t pcib, device_t bus, device_t dev,
2192 int b, int s, int f, int reg, struct resource_list *rl, int force,
2197 pci_addr_t start, end, count;
2204 struct resource *res;
2206 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2207 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
2208 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
2209 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
2211 if (PCI_BAR_MEM(map))
2212 type = SYS_RES_MEMORY;
2214 type = SYS_RES_IOPORT;
2215 ln2size = pci_mapsize(testval);
2216 ln2range = pci_maprange(testval);
2217 base = pci_mapbase(map);
2218 barlen = ln2range == 64 ? 2 : 1;
2221 * For I/O registers, if bottom bit is set, and the next bit up
2222 * isn't clear, we know we have a BAR that doesn't conform to the
2223 * spec, so ignore it. Also, sanity check the size of the data
2224 * areas to the type of memory involved. Memory must be at least
2225 * 16 bytes in size, while I/O ranges must be at least 4.
2227 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2229 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
2230 (type == SYS_RES_IOPORT && ln2size < 2))
2234 /* Read the other half of a 64bit map register */
2235 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
2237 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2238 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
2239 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2240 printf(", port disabled\n");
2241 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2242 printf(", memory disabled\n");
2244 printf(", enabled\n");
2248 * If base is 0, then we have problems. It is best to ignore
2249 * such entries for the moment. These will be allocated later if
2250 * the driver specifically requests them. However, some
2251 * removable busses look better when all resources are allocated,
2252 * so allow '0' to be overriden.
2254 * Similarly treat maps whose values is the same as the test value
2255 * read back. These maps have had all f's written to them by the
2256 * BIOS in an attempt to disable the resources.
2258 if (!force && (base == 0 || map == testval))
2260 if ((u_long)base != base) {
2262 "pci%d:%d:%d:%d bar %#x too many address bits",
2263 pci_get_domain(dev), b, s, f, reg);
2268 * This code theoretically does the right thing, but has
2269 * undesirable side effects in some cases where peripherals
2270 * respond oddly to having these bits enabled. Let the user
2271 * be able to turn them off (since pci_enable_io_modes is 1 by
2274 if (pci_enable_io_modes) {
2275 /* Turn on resources that have been left off by a lazy BIOS */
2276 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
2277 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2278 cmd |= PCIM_CMD_PORTEN;
2279 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2281 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
2282 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
2283 cmd |= PCIM_CMD_MEMEN;
2284 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
2287 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
2289 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
2293 count = 1 << ln2size;
2294 if (base == 0 || base == pci_mapbase(testval)) {
2295 start = 0; /* Let the parent deside */
2299 end = base + (1 << ln2size) - 1;
2301 resource_list_add(rl, type, reg, start, end, count);
2304 * Not quite sure what to do on failure of allocating the resource
2305 * since I can postulate several right answers.
2307 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2308 prefetch ? RF_PREFETCHABLE : 0);
2311 start = rman_get_start(res);
2312 if ((u_long)start != start) {
2313 /* Wait a minute! this platform can't do this address. */
2315 "pci%d:%d.%d.%x bar %#x start %#jx, too many bits.",
2316 pci_get_domain(dev), b, s, f, reg, (uintmax_t)start);
2317 resource_list_release(rl, bus, dev, type, reg, res);
2320 pci_write_config(dev, reg, start, 4);
2322 pci_write_config(dev, reg + 4, start >> 32, 4);
2327 * For ATA devices we need to decide early what addressing mode to use.
2328 * Legacy demands that the primary and secondary ATA ports sits on the
2329 * same addresses that old ISA hardware did. This dictates that we use
2330 * those addresses and ignore the BAR's if we cannot set PCI native
2334 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
2335 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
2337 int rid, type, progif;
2339 /* if this device supports PCI native addressing use it */
2340 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2341 if ((progif & 0x8a) == 0x8a) {
2342 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2343 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2344 printf("Trying ATA native PCI addressing mode\n");
2345 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2349 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2350 type = SYS_RES_IOPORT;
2351 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2352 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
2353 prefetchmask & (1 << 0));
2354 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
2355 prefetchmask & (1 << 1));
2358 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2359 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
2362 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2363 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
2366 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2367 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
2368 prefetchmask & (1 << 2));
2369 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
2370 prefetchmask & (1 << 3));
2373 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2374 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
2377 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2378 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
2381 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
2382 prefetchmask & (1 << 4));
2383 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2384 prefetchmask & (1 << 5));
2388 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2390 struct pci_devinfo *dinfo = device_get_ivars(dev);
2391 pcicfgregs *cfg = &dinfo->cfg;
2392 char tunable_name[64];
2395 /* Has to have an intpin to have an interrupt. */
2396 if (cfg->intpin == 0)
2399 /* Let the user override the IRQ with a tunable. */
2400 irq = PCI_INVALID_IRQ;
2401 snprintf(tunable_name, sizeof(tunable_name),
2402 "hw.pci%d.%d.%d.INT%c.irq",
2403 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2404 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2405 irq = PCI_INVALID_IRQ;
2408 * If we didn't get an IRQ via the tunable, then we either use the
2409 * IRQ value in the intline register or we ask the bus to route an
2410 * interrupt for us. If force_route is true, then we only use the
2411 * value in the intline register if the bus was unable to assign an
2414 if (!PCI_INTERRUPT_VALID(irq)) {
2415 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2416 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2417 if (!PCI_INTERRUPT_VALID(irq))
2421 /* If after all that we don't have an IRQ, just bail. */
2422 if (!PCI_INTERRUPT_VALID(irq))
2425 /* Update the config register if it changed. */
2426 if (irq != cfg->intline) {
2428 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2431 /* Add this IRQ as rid 0 interrupt resource. */
2432 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2436 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
2439 struct pci_devinfo *dinfo = device_get_ivars(dev);
2440 pcicfgregs *cfg = &dinfo->cfg;
2441 struct resource_list *rl = &dinfo->resources;
2442 struct pci_quirk *q;
2445 pcib = device_get_parent(bus);
2451 /* ATA devices needs special map treatment */
2452 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2453 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
2454 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2455 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2456 !pci_read_config(dev, PCIR_BAR(2), 4))) )
2457 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
2459 for (i = 0; i < cfg->nummaps;)
2460 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2461 rl, force, prefetchmask & (1 << i));
2464 * Add additional, quirked resources.
2466 for (q = &pci_quirks[0]; q->devid; q++) {
2467 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2468 && q->type == PCI_QUIRK_MAP_REG)
2469 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2473 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2474 #ifdef __PCI_REROUTE_INTERRUPT
2476 * Try to re-route interrupts. Sometimes the BIOS or
2477 * firmware may leave bogus values in these registers.
2478 * If the re-route fails, then just stick with what we
2481 pci_assign_interrupt(bus, dev, 1);
2483 pci_assign_interrupt(bus, dev, 0);
2489 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
2491 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2492 device_t pcib = device_get_parent(dev);
2493 struct pci_devinfo *dinfo;
2495 int s, f, pcifunchigh;
2498 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2499 ("dinfo_size too small"));
2500 maxslots = PCIB_MAXSLOTS(pcib);
2501 for (s = 0; s <= maxslots; s++) {
2505 hdrtype = REG(PCIR_HDRTYPE, 1);
2506 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2508 if (hdrtype & PCIM_MFDEV)
2509 pcifunchigh = PCI_FUNCMAX;
2510 for (f = 0; f <= pcifunchigh; f++) {
2511 dinfo = pci_read_device(pcib, domain, busno, s, f,
2513 if (dinfo != NULL) {
2514 pci_add_child(dev, dinfo);
2522 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2524 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2525 device_set_ivars(dinfo->cfg.dev, dinfo);
2526 resource_list_init(&dinfo->resources);
2527 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2528 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2529 pci_print_verbose(dinfo);
2530 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
2534 pci_probe(device_t dev)
2537 device_set_desc(dev, "PCI bus");
2539 /* Allow other subclasses to override this driver. */
2544 pci_attach(device_t dev)
2549 * Since there can be multiple independantly numbered PCI
2550 * busses on systems with multiple PCI domains, we can't use
2551 * the unit number to decide which bus we are probing. We ask
2552 * the parent pcib what our domain and bus numbers are.
2554 domain = pcib_get_domain(dev);
2555 busno = pcib_get_bus(dev);
2557 device_printf(dev, "domain=%d, physical bus=%d\n",
2560 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
2562 return (bus_generic_attach(dev));
2566 pci_suspend(device_t dev)
2568 int dstate, error, i, numdevs;
2569 device_t acpi_dev, child, *devlist;
2570 struct pci_devinfo *dinfo;
2573 * Save the PCI configuration space for each child and set the
2574 * device in the appropriate power state for this sleep state.
2577 if (pci_do_power_resume)
2578 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2579 device_get_children(dev, &devlist, &numdevs);
2580 for (i = 0; i < numdevs; i++) {
2582 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2583 pci_cfg_save(child, dinfo, 0);
2586 /* Suspend devices before potentially powering them down. */
2587 error = bus_generic_suspend(dev);
2589 free(devlist, M_TEMP);
2594 * Always set the device to D3. If ACPI suggests a different
2595 * power state, use it instead. If ACPI is not present, the
2596 * firmware is responsible for managing device power. Skip
2597 * children who aren't attached since they are powered down
2598 * separately. Only manage type 0 devices for now.
2600 for (i = 0; acpi_dev && i < numdevs; i++) {
2602 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2603 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2604 dstate = PCI_POWERSTATE_D3;
2605 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2606 pci_set_powerstate(child, dstate);
2609 free(devlist, M_TEMP);
2614 pci_resume(device_t dev)
2617 device_t acpi_dev, child, *devlist;
2618 struct pci_devinfo *dinfo;
2621 * Set each child to D0 and restore its PCI configuration space.
2624 if (pci_do_power_resume)
2625 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2626 device_get_children(dev, &devlist, &numdevs);
2627 for (i = 0; i < numdevs; i++) {
2629 * Notify ACPI we're going to D0 but ignore the result. If
2630 * ACPI is not present, the firmware is responsible for
2631 * managing device power. Only manage type 0 devices for now.
2634 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2635 if (acpi_dev && device_is_attached(child) &&
2636 dinfo->cfg.hdrtype == 0) {
2637 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2638 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2641 /* Now the device is powered up, restore its config space. */
2642 pci_cfg_restore(child, dinfo);
2644 free(devlist, M_TEMP);
2645 return (bus_generic_resume(dev));
2649 pci_load_vendor_data(void)
2651 caddr_t vendordata, info;
2653 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2654 info = preload_search_info(vendordata, MODINFO_ADDR);
2655 pci_vendordata = *(char **)info;
2656 info = preload_search_info(vendordata, MODINFO_SIZE);
2657 pci_vendordata_size = *(size_t *)info;
2658 /* terminate the database */
2659 pci_vendordata[pci_vendordata_size] = '\n';
2664 pci_driver_added(device_t dev, driver_t *driver)
2669 struct pci_devinfo *dinfo;
2673 device_printf(dev, "driver added\n");
2674 DEVICE_IDENTIFY(driver, dev);
2675 device_get_children(dev, &devlist, &numdevs);
2676 for (i = 0; i < numdevs; i++) {
2678 if (device_get_state(child) != DS_NOTPRESENT)
2680 dinfo = device_get_ivars(child);
2681 pci_print_verbose(dinfo);
2683 printf("pci%d:%d:%d:%d: reprobing on driver added\n",
2684 dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot,
2686 pci_cfg_restore(child, dinfo);
2687 if (device_probe_and_attach(child) != 0)
2688 pci_cfg_save(child, dinfo, 1);
2690 free(devlist, M_TEMP);
2694 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2695 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
2697 struct pci_devinfo *dinfo;
2698 struct msix_table_entry *mte;
2699 struct msix_vector *mv;
2705 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
2711 * If this is a direct child, check to see if the interrupt is
2712 * MSI or MSI-X. If so, ask our parent to map the MSI and give
2713 * us the address and data register values. If we fail for some
2714 * reason, teardown the interrupt handler.
2716 rid = rman_get_rid(irq);
2717 if (device_get_parent(child) == dev && rid > 0) {
2718 dinfo = device_get_ivars(child);
2719 if (dinfo->cfg.msi.msi_alloc > 0) {
2720 if (dinfo->cfg.msi.msi_addr == 0) {
2721 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
2722 ("MSI has handlers, but vectors not mapped"));
2723 error = PCIB_MAP_MSI(device_get_parent(dev),
2724 child, rman_get_start(irq), &addr, &data);
2727 dinfo->cfg.msi.msi_addr = addr;
2728 dinfo->cfg.msi.msi_data = data;
2729 pci_enable_msi(child, addr, data);
2731 dinfo->cfg.msi.msi_handlers++;
2733 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2734 ("No MSI or MSI-X interrupts allocated"));
2735 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2736 ("MSI-X index too high"));
2737 mte = &dinfo->cfg.msix.msix_table[rid - 1];
2738 KASSERT(mte->mte_vector != 0, ("no message vector"));
2739 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
2740 KASSERT(mv->mv_irq == rman_get_start(irq),
2742 if (mv->mv_address == 0) {
2743 KASSERT(mte->mte_handlers == 0,
2744 ("MSI-X table entry has handlers, but vector not mapped"));
2745 error = PCIB_MAP_MSI(device_get_parent(dev),
2746 child, rman_get_start(irq), &addr, &data);
2749 mv->mv_address = addr;
2752 if (mte->mte_handlers == 0) {
2753 pci_enable_msix(child, rid - 1, mv->mv_address,
2755 pci_unmask_msix(child, rid - 1);
2757 mte->mte_handlers++;
2761 (void)bus_generic_teardown_intr(dev, child, irq,
2771 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
2774 struct msix_table_entry *mte;
2775 struct resource_list_entry *rle;
2776 struct pci_devinfo *dinfo;
2780 * If this is a direct child, check to see if the interrupt is
2781 * MSI or MSI-X. If so, decrement the appropriate handlers
2782 * count and mask the MSI-X message, or disable MSI messages
2783 * if the count drops to 0.
2785 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
2787 rid = rman_get_rid(irq);
2788 if (device_get_parent(child) == dev && rid > 0) {
2789 dinfo = device_get_ivars(child);
2790 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
2791 if (rle->res != irq)
2793 if (dinfo->cfg.msi.msi_alloc > 0) {
2794 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
2795 ("MSI-X index too high"));
2796 if (dinfo->cfg.msi.msi_handlers == 0)
2798 dinfo->cfg.msi.msi_handlers--;
2799 if (dinfo->cfg.msi.msi_handlers == 0)
2800 pci_disable_msi(child);
2802 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2803 ("No MSI or MSI-X interrupts allocated"));
2804 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2805 ("MSI-X index too high"));
2806 mte = &dinfo->cfg.msix.msix_table[rid - 1];
2807 if (mte->mte_handlers == 0)
2809 mte->mte_handlers--;
2810 if (mte->mte_handlers == 0)
2811 pci_mask_msix(child, rid - 1);
2814 error = bus_generic_teardown_intr(dev, child, irq, cookie);
2815 if (device_get_parent(child) == dev && rid > 0)
2817 ("%s: generic teardown failed for MSI/MSI-X", __func__));
2822 pci_print_child(device_t dev, device_t child)
2824 struct pci_devinfo *dinfo;
2825 struct resource_list *rl;
2828 dinfo = device_get_ivars(child);
2829 rl = &dinfo->resources;
2831 retval += bus_print_child_header(dev, child);
2833 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
2834 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
2835 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
2836 if (device_get_flags(dev))
2837 retval += printf(" flags %#x", device_get_flags(dev));
2839 retval += printf(" at device %d.%d", pci_get_slot(child),
2840 pci_get_function(child));
2842 retval += bus_print_child_footer(dev, child);
2852 } pci_nomatch_tab[] = {
2853 {PCIC_OLD, -1, "old"},
2854 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
2855 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
2856 {PCIC_STORAGE, -1, "mass storage"},
2857 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
2858 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
2859 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
2860 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
2861 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
2862 {PCIC_NETWORK, -1, "network"},
2863 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
2864 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
2865 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
2866 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
2867 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
2868 {PCIC_DISPLAY, -1, "display"},
2869 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
2870 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
2871 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
2872 {PCIC_MULTIMEDIA, -1, "multimedia"},
2873 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
2874 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
2875 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
2876 {PCIC_MEMORY, -1, "memory"},
2877 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
2878 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
2879 {PCIC_BRIDGE, -1, "bridge"},
2880 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
2881 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
2882 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
2883 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
2884 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
2885 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
2886 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
2887 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
2888 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
2889 {PCIC_SIMPLECOMM, -1, "simple comms"},
2890 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
2891 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
2892 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
2893 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
2894 {PCIC_BASEPERIPH, -1, "base peripheral"},
2895 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
2896 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
2897 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
2898 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
2899 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
2900 {PCIC_INPUTDEV, -1, "input device"},
2901 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
2902 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
2903 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
2904 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
2905 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
2906 {PCIC_DOCKING, -1, "docking station"},
2907 {PCIC_PROCESSOR, -1, "processor"},
2908 {PCIC_SERIALBUS, -1, "serial bus"},
2909 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
2910 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
2911 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
2912 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
2913 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
2914 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
2915 {PCIC_WIRELESS, -1, "wireless controller"},
2916 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
2917 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
2918 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
2919 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
2920 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
2921 {PCIC_SATCOM, -1, "satellite communication"},
2922 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
2923 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
2924 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
2925 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
2926 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
2927 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
2928 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
2929 {PCIC_DASP, -1, "dasp"},
2930 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
2935 pci_probe_nomatch(device_t dev, device_t child)
2938 char *cp, *scp, *device;
2941 * Look for a listing for this device in a loaded device database.
2943 if ((device = pci_describe_device(child)) != NULL) {
2944 device_printf(dev, "<%s>", device);
2945 free(device, M_DEVBUF);
2948 * Scan the class/subclass descriptions for a general
2953 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
2954 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
2955 if (pci_nomatch_tab[i].subclass == -1) {
2956 cp = pci_nomatch_tab[i].desc;
2957 } else if (pci_nomatch_tab[i].subclass ==
2958 pci_get_subclass(child)) {
2959 scp = pci_nomatch_tab[i].desc;
2963 device_printf(dev, "<%s%s%s>",
2965 ((cp != NULL) && (scp != NULL)) ? ", " : "",
2968 printf(" at device %d.%d (no driver attached)\n",
2969 pci_get_slot(child), pci_get_function(child));
2970 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
2975 * Parse the PCI device database, if loaded, and return a pointer to a
2976 * description of the device.
2978 * The database is flat text formatted as follows:
2980 * Any line not in a valid format is ignored.
2981 * Lines are terminated with newline '\n' characters.
2983 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
2986 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
2987 * - devices cannot be listed without a corresponding VENDOR line.
2988 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
2989 * another TAB, then the device name.
2993 * Assuming (ptr) points to the beginning of a line in the database,
2994 * return the vendor or device and description of the next entry.
2995 * The value of (vendor) or (device) inappropriate for the entry type
2996 * is set to -1. Returns nonzero at the end of the database.
2998 * Note that this is slightly unrobust in the face of corrupt data;
2999 * we attempt to safeguard against this by spamming the end of the
3000 * database with a newline when we initialise.
3003 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3012 left = pci_vendordata_size - (cp - pci_vendordata);
3020 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3024 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3027 /* skip to next line */
3028 while (*cp != '\n' && left > 0) {
3037 /* skip to next line */
3038 while (*cp != '\n' && left > 0) {
3042 if (*cp == '\n' && left > 0)
3049 pci_describe_device(device_t dev)
3052 char *desc, *vp, *dp, *line;
3054 desc = vp = dp = NULL;
3057 * If we have no vendor data, we can't do anything.
3059 if (pci_vendordata == NULL)
3063 * Scan the vendor data looking for this device
3065 line = pci_vendordata;
3066 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3069 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3071 if (vendor == pci_get_vendor(dev))
3074 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3077 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3085 if (device == pci_get_device(dev))
3089 snprintf(dp, 80, "0x%x", pci_get_device(dev));
3090 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3092 sprintf(desc, "%s, %s", vp, dp);
3102 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3104 struct pci_devinfo *dinfo;
3107 dinfo = device_get_ivars(child);
3111 case PCI_IVAR_ETHADDR:
3113 * The generic accessor doesn't deal with failure, so
3114 * we set the return value, then return an error.
3116 *((uint8_t **) result) = NULL;
3118 case PCI_IVAR_SUBVENDOR:
3119 *result = cfg->subvendor;
3121 case PCI_IVAR_SUBDEVICE:
3122 *result = cfg->subdevice;
3124 case PCI_IVAR_VENDOR:
3125 *result = cfg->vendor;
3127 case PCI_IVAR_DEVICE:
3128 *result = cfg->device;
3130 case PCI_IVAR_DEVID:
3131 *result = (cfg->device << 16) | cfg->vendor;
3133 case PCI_IVAR_CLASS:
3134 *result = cfg->baseclass;
3136 case PCI_IVAR_SUBCLASS:
3137 *result = cfg->subclass;
3139 case PCI_IVAR_PROGIF:
3140 *result = cfg->progif;
3142 case PCI_IVAR_REVID:
3143 *result = cfg->revid;
3145 case PCI_IVAR_INTPIN:
3146 *result = cfg->intpin;
3149 *result = cfg->intline;
3151 case PCI_IVAR_DOMAIN:
3152 *result = cfg->domain;
3158 *result = cfg->slot;
3160 case PCI_IVAR_FUNCTION:
3161 *result = cfg->func;
3163 case PCI_IVAR_CMDREG:
3164 *result = cfg->cmdreg;
3166 case PCI_IVAR_CACHELNSZ:
3167 *result = cfg->cachelnsz;
3169 case PCI_IVAR_MINGNT:
3170 *result = cfg->mingnt;
3172 case PCI_IVAR_MAXLAT:
3173 *result = cfg->maxlat;
3175 case PCI_IVAR_LATTIMER:
3176 *result = cfg->lattimer;
3185 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3187 struct pci_devinfo *dinfo;
3189 dinfo = device_get_ivars(child);
3192 case PCI_IVAR_INTPIN:
3193 dinfo->cfg.intpin = value;
3195 case PCI_IVAR_ETHADDR:
3196 case PCI_IVAR_SUBVENDOR:
3197 case PCI_IVAR_SUBDEVICE:
3198 case PCI_IVAR_VENDOR:
3199 case PCI_IVAR_DEVICE:
3200 case PCI_IVAR_DEVID:
3201 case PCI_IVAR_CLASS:
3202 case PCI_IVAR_SUBCLASS:
3203 case PCI_IVAR_PROGIF:
3204 case PCI_IVAR_REVID:
3206 case PCI_IVAR_DOMAIN:
3209 case PCI_IVAR_FUNCTION:
3210 return (EINVAL); /* disallow for now */
3218 #include "opt_ddb.h"
3220 #include <ddb/ddb.h>
3221 #include <sys/cons.h>
3224 * List resources based on pci map registers, used for within ddb
3227 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3229 struct pci_devinfo *dinfo;
3230 struct devlist *devlist_head;
3233 int i, error, none_count;
3236 /* get the head of the device queue */
3237 devlist_head = &pci_devq;
3240 * Go through the list of devices and print out devices
3242 for (error = 0, i = 0,
3243 dinfo = STAILQ_FIRST(devlist_head);
3244 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3245 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3247 /* Populate pd_name and pd_unit */
3250 name = device_get_name(dinfo->cfg.dev);
3253 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3254 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3255 (name && *name) ? name : "none",
3256 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3258 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3259 p->pc_sel.pc_func, (p->pc_class << 16) |
3260 (p->pc_subclass << 8) | p->pc_progif,
3261 (p->pc_subdevice << 16) | p->pc_subvendor,
3262 (p->pc_device << 16) | p->pc_vendor,
3263 p->pc_revid, p->pc_hdr);
3268 static struct resource *
3269 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3270 u_long start, u_long end, u_long count, u_int flags)
3272 struct pci_devinfo *dinfo = device_get_ivars(child);
3273 struct resource_list *rl = &dinfo->resources;
3274 struct resource_list_entry *rle;
3275 struct resource *res;
3276 pci_addr_t map, testval;
3280 * Weed out the bogons, and figure out how large the BAR/map
3281 * is. Bars that read back 0 here are bogus and unimplemented.
3282 * Note: atapci in legacy mode are special and handled elsewhere
3283 * in the code. If you have a atapci device in legacy mode and
3284 * it fails here, that other code is broken.
3287 map = pci_read_config(child, *rid, 4);
3288 pci_write_config(child, *rid, 0xffffffff, 4);
3289 testval = pci_read_config(child, *rid, 4);
3290 if (pci_maprange(testval) == 64)
3291 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
3292 if (pci_mapbase(testval) == 0)
3296 * Restore the original value of the BAR. We may have reprogrammed
3297 * the BAR of the low-level console device and when booting verbose,
3298 * we need the console device addressable.
3300 pci_write_config(child, *rid, map, 4);
3302 if (PCI_BAR_MEM(testval)) {
3303 if (type != SYS_RES_MEMORY) {
3306 "child %s requested type %d for rid %#x,"
3307 " but the BAR says it is an memio\n",
3308 device_get_nameunit(child), type, *rid);
3312 if (type != SYS_RES_IOPORT) {
3315 "child %s requested type %d for rid %#x,"
3316 " but the BAR says it is an ioport\n",
3317 device_get_nameunit(child), type, *rid);
3322 * For real BARs, we need to override the size that
3323 * the driver requests, because that's what the BAR
3324 * actually uses and we would otherwise have a
3325 * situation where we might allocate the excess to
3326 * another driver, which won't work.
3328 mapsize = pci_mapsize(testval);
3329 count = 1UL << mapsize;
3330 if (RF_ALIGNMENT(flags) < mapsize)
3331 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3334 * Allocate enough resource, and then write back the
3335 * appropriate bar for that resource.
3337 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
3338 start, end, count, flags);
3340 device_printf(child,
3341 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3342 count, *rid, type, start, end);
3345 resource_list_add(rl, type, *rid, start, end, count);
3346 rle = resource_list_find(rl, type, *rid);
3348 panic("pci_alloc_map: unexpectedly can't find resource.");
3350 rle->start = rman_get_start(res);
3351 rle->end = rman_get_end(res);
3354 device_printf(child,
3355 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3356 count, *rid, type, rman_get_start(res));
3357 map = rman_get_start(res);
3359 pci_write_config(child, *rid, map, 4);
3360 if (pci_maprange(testval) == 64)
3361 pci_write_config(child, *rid + 4, map >> 32, 4);
3367 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3368 u_long start, u_long end, u_long count, u_int flags)
3370 struct pci_devinfo *dinfo = device_get_ivars(child);
3371 struct resource_list *rl = &dinfo->resources;
3372 struct resource_list_entry *rle;
3373 pcicfgregs *cfg = &dinfo->cfg;
3376 * Perform lazy resource allocation
3378 if (device_get_parent(child) == dev) {
3382 * Can't alloc legacy interrupt once MSI messages
3383 * have been allocated.
3385 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3386 cfg->msix.msix_alloc > 0))
3389 * If the child device doesn't have an
3390 * interrupt routed and is deserving of an
3391 * interrupt, try to assign it one.
3393 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3395 pci_assign_interrupt(dev, child, 0);
3397 case SYS_RES_IOPORT:
3398 case SYS_RES_MEMORY:
3399 if (*rid < PCIR_BAR(cfg->nummaps)) {
3401 * Enable the I/O mode. We should
3402 * also be assigning resources too
3403 * when none are present. The
3404 * resource_list_alloc kind of sorta does
3407 if (PCI_ENABLE_IO(dev, child, type))
3410 rle = resource_list_find(rl, type, *rid);
3412 return (pci_alloc_map(dev, child, type, rid,
3413 start, end, count, flags));
3417 * If we've already allocated the resource, then
3418 * return it now. But first we may need to activate
3419 * it, since we don't allocate the resource as active
3420 * above. Normally this would be done down in the
3421 * nexus, but since we short-circuit that path we have
3422 * to do its job here. Not sure if we should free the
3423 * resource if it fails to activate.
3425 rle = resource_list_find(rl, type, *rid);
3426 if (rle != NULL && rle->res != NULL) {
3428 device_printf(child,
3429 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3430 rman_get_size(rle->res), *rid, type,
3431 rman_get_start(rle->res));
3432 if ((flags & RF_ACTIVE) &&
3433 bus_generic_activate_resource(dev, child, type,
3434 *rid, rle->res) != 0)
3439 return (resource_list_alloc(rl, dev, child, type, rid,
3440 start, end, count, flags));
3444 pci_delete_resource(device_t dev, device_t child, int type, int rid)
3446 struct pci_devinfo *dinfo;
3447 struct resource_list *rl;
3448 struct resource_list_entry *rle;
3450 if (device_get_parent(child) != dev)
3453 dinfo = device_get_ivars(child);
3454 rl = &dinfo->resources;
3455 rle = resource_list_find(rl, type, rid);
3458 if (rman_get_device(rle->res) != dev ||
3459 rman_get_flags(rle->res) & RF_ACTIVE) {
3460 device_printf(dev, "delete_resource: "
3461 "Resource still owned by child, oops. "
3462 "(type=%d, rid=%d, addr=%lx)\n",
3463 rle->type, rle->rid,
3464 rman_get_start(rle->res));
3467 bus_release_resource(dev, type, rid, rle->res);
3469 resource_list_delete(rl, type, rid);
3472 * Why do we turn off the PCI configuration BAR when we delete a
3475 pci_write_config(child, rid, 0, 4);
3476 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
3479 struct resource_list *
3480 pci_get_resource_list (device_t dev, device_t child)
3482 struct pci_devinfo *dinfo = device_get_ivars(child);
3484 return (&dinfo->resources);
3488 pci_read_config_method(device_t dev, device_t child, int reg, int width)
3490 struct pci_devinfo *dinfo = device_get_ivars(child);
3491 pcicfgregs *cfg = &dinfo->cfg;
3493 return (PCIB_READ_CONFIG(device_get_parent(dev),
3494 cfg->bus, cfg->slot, cfg->func, reg, width));
3498 pci_write_config_method(device_t dev, device_t child, int reg,
3499 uint32_t val, int width)
3501 struct pci_devinfo *dinfo = device_get_ivars(child);
3502 pcicfgregs *cfg = &dinfo->cfg;
3504 PCIB_WRITE_CONFIG(device_get_parent(dev),
3505 cfg->bus, cfg->slot, cfg->func, reg, val, width);
3509 pci_child_location_str_method(device_t dev, device_t child, char *buf,
3513 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
3514 pci_get_function(child));
3519 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
3522 struct pci_devinfo *dinfo;
3525 dinfo = device_get_ivars(child);
3527 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
3528 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3529 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3535 pci_assign_interrupt_method(device_t dev, device_t child)
3537 struct pci_devinfo *dinfo = device_get_ivars(child);
3538 pcicfgregs *cfg = &dinfo->cfg;
3540 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3545 pci_modevent(module_t mod, int what, void *arg)
3547 static struct cdev *pci_cdev;
3551 STAILQ_INIT(&pci_devq);
3553 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
3555 pci_load_vendor_data();
3559 destroy_dev(pci_cdev);
3567 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3572 * Only do header type 0 devices. Type 1 devices are bridges,
3573 * which we know need special treatment. Type 2 devices are
3574 * cardbus bridges which also require special treatment.
3575 * Other types are unknown, and we err on the side of safety
3578 if (dinfo->cfg.hdrtype != 0)
3582 * Restore the device to full power mode. We must do this
3583 * before we restore the registers because moving from D3 to
3584 * D0 will cause the chip's BARs and some other registers to
3585 * be reset to some unknown power on reset values. Cut down
3586 * the noise on boot by doing nothing if we are already in
3589 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3590 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3592 for (i = 0; i < dinfo->cfg.nummaps; i++)
3593 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3594 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3595 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3596 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3597 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3598 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3599 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3600 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3601 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3602 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3603 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3605 /* Restore MSI and MSI-X configurations if they are present. */
3606 if (dinfo->cfg.msi.msi_location != 0)
3607 pci_resume_msi(dev);
3608 if (dinfo->cfg.msix.msix_location != 0)
3609 pci_resume_msix(dev);
3613 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3620 * Only do header type 0 devices. Type 1 devices are bridges, which
3621 * we know need special treatment. Type 2 devices are cardbus bridges
3622 * which also require special treatment. Other types are unknown, and
3623 * we err on the side of safety by ignoring them. Powering down
3624 * bridges should not be undertaken lightly.
3626 if (dinfo->cfg.hdrtype != 0)
3628 for (i = 0; i < dinfo->cfg.nummaps; i++)
3629 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3630 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3633 * Some drivers apparently write to these registers w/o updating our
3634 * cached copy. No harm happens if we update the copy, so do so here
3635 * so we can restore them. The COMMAND register is modified by the
3636 * bus w/o updating the cache. This should represent the normally
3637 * writable portion of the 'defined' part of type 0 headers. In
3638 * theory we also need to save/restore the PCI capability structures
3639 * we know about, but apart from power we don't know any that are
3642 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3643 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3644 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3645 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3646 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3647 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3648 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3649 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3650 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3651 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3652 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3653 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3654 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3655 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3656 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
3659 * don't set the state for display devices, base peripherals and
3660 * memory devices since bad things happen when they are powered down.
3661 * We should (a) have drivers that can easily detach and (b) use
3662 * generic drivers for these devices so that some device actually
3663 * attaches. We need to make sure that when we implement (a) we don't
3664 * power the device down on a reattach.
3666 cls = pci_get_class(dev);
3669 switch (pci_do_power_nodriver)
3671 case 0: /* NO powerdown at all */
3673 case 1: /* Conservative about what to power down */
3674 if (cls == PCIC_STORAGE)
3677 case 2: /* Agressive about what to power down */
3678 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
3679 cls == PCIC_BASEPERIPH)
3682 case 3: /* Power down everything */
3686 * PCI spec says we can only go into D3 state from D0 state.
3687 * Transition from D[12] into D0 before going to D3 state.
3689 ps = pci_get_powerstate(dev);
3690 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
3691 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3692 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
3693 pci_set_powerstate(dev, PCI_POWERSTATE_D3);