2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
55 #include <sys/pciio.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pci_private.h>
64 #include <contrib/dev/acpica/acpi.h>
67 #define ACPI_PWR_FOR_SLEEP(x, y, z)
70 static uint32_t pci_mapbase(unsigned mapreg);
71 static int pci_maptype(unsigned mapreg);
72 static int pci_mapsize(unsigned testval);
73 static int pci_maprange(unsigned mapreg);
74 static void pci_fixancient(pcicfgregs *cfg);
76 static int pci_porten(device_t pcib, int b, int s, int f);
77 static int pci_memen(device_t pcib, int b, int s, int f);
78 static void pci_assign_interrupt(device_t bus, device_t dev,
80 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
81 int b, int s, int f, int reg,
82 struct resource_list *rl, int force, int prefetch);
83 static int pci_probe(device_t dev);
84 static int pci_attach(device_t dev);
85 static void pci_load_vendor_data(void);
86 static int pci_describe_parse_line(char **ptr, int *vendor,
87 int *device, char **desc);
88 static char *pci_describe_device(device_t dev);
89 static int pci_modevent(module_t mod, int what, void *arg);
90 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
92 static void pci_read_extcap(device_t pcib, pcicfgregs *cfg);
93 static uint32_t pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
96 static void pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
97 int reg, uint32_t data);
99 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
101 static device_method_t pci_methods[] = {
102 /* Device interface */
103 DEVMETHOD(device_probe, pci_probe),
104 DEVMETHOD(device_attach, pci_attach),
105 DEVMETHOD(device_detach, bus_generic_detach),
106 DEVMETHOD(device_shutdown, bus_generic_shutdown),
107 DEVMETHOD(device_suspend, pci_suspend),
108 DEVMETHOD(device_resume, pci_resume),
111 DEVMETHOD(bus_print_child, pci_print_child),
112 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
113 DEVMETHOD(bus_read_ivar, pci_read_ivar),
114 DEVMETHOD(bus_write_ivar, pci_write_ivar),
115 DEVMETHOD(bus_driver_added, pci_driver_added),
116 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
117 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
119 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
120 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
121 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
122 DEVMETHOD(bus_delete_resource, pci_delete_resource),
123 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
124 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
125 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
126 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
127 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
128 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
131 DEVMETHOD(pci_read_config, pci_read_config_method),
132 DEVMETHOD(pci_write_config, pci_write_config_method),
133 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
134 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
135 DEVMETHOD(pci_enable_io, pci_enable_io_method),
136 DEVMETHOD(pci_disable_io, pci_disable_io_method),
137 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
138 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
139 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
140 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
141 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
142 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
143 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
144 DEVMETHOD(pci_release_msi, pci_release_msi_method),
145 DEVMETHOD(pci_msi_count, pci_msi_count_method),
150 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
152 static devclass_t pci_devclass;
153 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
154 MODULE_VERSION(pci, 1);
156 static char *pci_vendordata;
157 static size_t pci_vendordata_size;
161 uint32_t devid; /* Vendor/device of the card */
163 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
168 struct pci_quirk pci_quirks[] = {
169 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
170 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
171 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
172 /* As does the Serverworks OSB4 (the SMBus mapping register) */
173 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
178 /* map register information */
179 #define PCI_MAPMEM 0x01 /* memory map */
180 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
181 #define PCI_MAPPORT 0x04 /* port map */
183 struct devlist pci_devq;
184 uint32_t pci_generation;
185 uint32_t pci_numdevs = 0;
188 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
190 static int pci_enable_io_modes = 1;
191 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
192 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
193 &pci_enable_io_modes, 1,
194 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
195 enable these bits correctly. We'd like to do this all the time, but there\n\
196 are some peripherals that this causes problems with.");
198 static int pci_do_power_nodriver = 0;
199 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
200 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
201 &pci_do_power_nodriver, 0,
202 "Place a function into D3 state when no driver attaches to it. 0 means\n\
203 disable. 1 means conservatively place devices into D3 state. 2 means\n\
204 agressively place devices into D3 state. 3 means put absolutely everything\n\
207 static int pci_do_power_resume = 1;
208 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
209 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
210 &pci_do_power_resume, 1,
211 "Transition from D3 -> D0 on resume.");
213 static int pci_do_msi = 1;
214 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
215 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
216 "Enable support for MSI interrupts");
218 static int pci_do_msix = 1;
219 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
220 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
221 "Enable support for MSI-X interrupts");
223 /* Find a device_t by bus/slot/function */
226 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
228 struct pci_devinfo *dinfo;
230 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
231 if ((dinfo->cfg.bus == bus) &&
232 (dinfo->cfg.slot == slot) &&
233 (dinfo->cfg.func == func)) {
234 return (dinfo->cfg.dev);
241 /* Find a device_t by vendor/device ID */
244 pci_find_device(uint16_t vendor, uint16_t device)
246 struct pci_devinfo *dinfo;
248 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
249 if ((dinfo->cfg.vendor == vendor) &&
250 (dinfo->cfg.device == device)) {
251 return (dinfo->cfg.dev);
258 /* return base address of memory or port map */
261 pci_mapbase(uint32_t mapreg)
264 if ((mapreg & 0x01) == 0)
266 return (mapreg & ~mask);
269 /* return map type of memory or port map */
272 pci_maptype(unsigned mapreg)
274 static uint8_t maptype[0x10] = {
275 PCI_MAPMEM, PCI_MAPPORT,
277 PCI_MAPMEM, PCI_MAPPORT,
279 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
280 PCI_MAPMEM|PCI_MAPMEMP, 0,
281 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
285 return maptype[mapreg & 0x0f];
288 /* return log2 of map size decoded for memory or port map */
291 pci_mapsize(uint32_t testval)
295 testval = pci_mapbase(testval);
298 while ((testval & 1) == 0)
307 /* return log2 of address range supported by map register */
310 pci_maprange(unsigned mapreg)
313 switch (mapreg & 0x07) {
329 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
332 pci_fixancient(pcicfgregs *cfg)
334 if (cfg->hdrtype != 0)
337 /* PCI to PCI bridges use header type 1 */
338 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
342 /* extract header type specific config data */
345 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
347 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
348 switch (cfg->hdrtype) {
350 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
351 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
352 cfg->nummaps = PCI_MAXMAPS_0;
355 cfg->subvendor = REG(PCIR_SUBVEND_1, 2);
356 cfg->subdevice = REG(PCIR_SUBDEV_1, 2);
357 cfg->nummaps = PCI_MAXMAPS_1;
360 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
361 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
362 cfg->nummaps = PCI_MAXMAPS_2;
368 /* read configuration header into pcicfgregs structure */
370 pci_read_device(device_t pcib, int b, int s, int f, size_t size)
372 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
373 pcicfgregs *cfg = NULL;
374 struct pci_devinfo *devlist_entry;
375 struct devlist *devlist_head;
377 devlist_head = &pci_devq;
379 devlist_entry = NULL;
381 if (REG(PCIR_DEVVENDOR, 4) != -1) {
382 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
383 if (devlist_entry == NULL)
386 cfg = &devlist_entry->cfg;
391 cfg->vendor = REG(PCIR_VENDOR, 2);
392 cfg->device = REG(PCIR_DEVICE, 2);
393 cfg->cmdreg = REG(PCIR_COMMAND, 2);
394 cfg->statreg = REG(PCIR_STATUS, 2);
395 cfg->baseclass = REG(PCIR_CLASS, 1);
396 cfg->subclass = REG(PCIR_SUBCLASS, 1);
397 cfg->progif = REG(PCIR_PROGIF, 1);
398 cfg->revid = REG(PCIR_REVID, 1);
399 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
400 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
401 cfg->lattimer = REG(PCIR_LATTIMER, 1);
402 cfg->intpin = REG(PCIR_INTPIN, 1);
403 cfg->intline = REG(PCIR_INTLINE, 1);
405 cfg->mingnt = REG(PCIR_MINGNT, 1);
406 cfg->maxlat = REG(PCIR_MAXLAT, 1);
408 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
409 cfg->hdrtype &= ~PCIM_MFDEV;
412 pci_hdrtypedata(pcib, b, s, f, cfg);
414 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
415 pci_read_extcap(pcib, cfg);
417 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
419 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
420 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
421 devlist_entry->conf.pc_sel.pc_func = cfg->func;
422 devlist_entry->conf.pc_hdr = cfg->hdrtype;
424 devlist_entry->conf.pc_subvendor = cfg->subvendor;
425 devlist_entry->conf.pc_subdevice = cfg->subdevice;
426 devlist_entry->conf.pc_vendor = cfg->vendor;
427 devlist_entry->conf.pc_device = cfg->device;
429 devlist_entry->conf.pc_class = cfg->baseclass;
430 devlist_entry->conf.pc_subclass = cfg->subclass;
431 devlist_entry->conf.pc_progif = cfg->progif;
432 devlist_entry->conf.pc_revid = cfg->revid;
437 return (devlist_entry);
442 pci_read_extcap(device_t pcib, pcicfgregs *cfg)
444 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
446 int ptr, nextptr, ptrptr;
448 switch (cfg->hdrtype & PCIM_HDRTYPE) {
451 ptrptr = PCIR_CAP_PTR;
454 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
457 return; /* no extended capabilities support */
459 nextptr = REG(ptrptr, 1); /* sanity check? */
462 * Read capability entries.
464 while (nextptr != 0) {
467 printf("illegal PCI extended capability offset %d\n",
471 /* Find the next entry */
473 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
475 /* Process this entry */
476 switch (REG(ptr + PCICAP_ID, 1)) {
477 case PCIY_PMG: /* PCI power management */
478 if (cfg->pp.pp_cap == 0) {
479 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
480 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
481 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
482 if ((nextptr - ptr) > PCIR_POWER_DATA)
483 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
486 case PCIY_MSI: /* PCI MSI */
487 cfg->msi.msi_location = ptr;
488 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
489 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
490 PCIM_MSICTRL_MMC_MASK)>>1);
492 case PCIY_MSIX: /* PCI MSI-X */
493 cfg->msix.msix_location = ptr;
494 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
495 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
496 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
497 val = REG(ptr + PCIR_MSIX_TABLE, 4);
498 cfg->msix.msix_table_bar = PCIR_BAR(val &
500 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
501 val = REG(ptr + PCIR_MSIX_PBA, 4);
502 cfg->msix.msix_pba_bar = PCIR_BAR(val &
504 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
506 case PCIY_VPD: /* PCI Vital Product Data */
507 cfg->vpd.vpd_reg = ptr;
508 pci_read_vpd(pcib, cfg);
514 /* REG use carry through to next functions */
518 * PCI Vital Product Data
521 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg)
523 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
525 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
527 WREG(cfg->vpd.vpd_reg + 2, reg, 2);
528 while ((REG(cfg->vpd.vpd_reg + 2, 2) & 0x8000) != 0x8000)
529 DELAY(1); /* limit looping */
531 return REG(cfg->vpd.vpd_reg + 4, 4);
536 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
538 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
540 WREG(cfg->vpd.vpd_reg + 4, data, 4);
541 WREG(cfg->vpd.vpd_reg + 2, reg | 0x8000, 2);
542 while ((REG(cfg->vpd.vpd_reg + 2, 2) & 0x8000) == 0x8000)
543 DELAY(1); /* limit looping */
550 struct vpd_readstate {
560 vpd_nextbyte(struct vpd_readstate *vrs)
564 if (vrs->bytesinval == 0) {
565 vrs->val = le32toh(pci_read_vpd_reg(vrs->pcib, vrs->cfg,
568 byte = vrs->val & 0xff;
571 vrs->val = vrs->val >> 8;
572 byte = vrs->val & 0xff;
581 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
583 struct vpd_readstate vrs;
590 int alloc, off; /* alloc/off for RO/W arrays */
594 /* init vpd reader */
602 name = remain = i = 0; /* shut up stupid gcc */
603 alloc = off = 0; /* shut up stupid gcc */
604 dflen = 0; /* shut up stupid gcc */
608 byte = vpd_nextbyte(&vrs);
610 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
611 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
612 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
615 case 0: /* item name */
617 remain = vpd_nextbyte(&vrs);
618 remain |= vpd_nextbyte(&vrs) << 8;
619 if (remain > (0x7f*4 - vrs.off)) {
622 "pci%d:%d:%d: invalid vpd data, remain %#x\n",
623 cfg->bus, cfg->slot, cfg->func,
629 name = (byte >> 3) & 0xf;
632 case 0x2: /* String */
633 cfg->vpd.vpd_ident = malloc(remain + 1,
642 case 0x10: /* VPD-R */
645 cfg->vpd.vpd_ros = malloc(alloc *
646 sizeof *cfg->vpd.vpd_ros, M_DEVBUF,
650 case 0x11: /* VPD-W */
653 cfg->vpd.vpd_w = malloc(alloc *
654 sizeof *cfg->vpd.vpd_w, M_DEVBUF,
658 default: /* Invalid data, abort */
664 case 1: /* Identifier String */
665 cfg->vpd.vpd_ident[i++] = byte;
668 cfg->vpd.vpd_ident[i] = '\0';
673 case 2: /* VPD-R Keyword Header */
675 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
676 (alloc *= 2) * sizeof *cfg->vpd.vpd_ros,
679 cfg->vpd.vpd_ros[off].keyword[0] = byte;
680 cfg->vpd.vpd_ros[off].keyword[1] = vpd_nextbyte(&vrs);
681 dflen = vpd_nextbyte(&vrs);
683 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
686 * if this happens, we can't trust the rest
689 printf("pci%d:%d:%d: bad keyword length: %d\n",
690 cfg->bus, cfg->slot, cfg->func, dflen);
694 } else if (dflen == 0) {
695 cfg->vpd.vpd_ros[off].value = malloc(1 *
696 sizeof *cfg->vpd.vpd_ros[off].value,
698 cfg->vpd.vpd_ros[off].value[0] = '\x00';
700 cfg->vpd.vpd_ros[off].value = malloc(
702 sizeof *cfg->vpd.vpd_ros[off].value,
706 /* keep in sync w/ state 3's transistions */
707 if (dflen == 0 && remain == 0)
715 case 3: /* VPD-R Keyword Value */
716 cfg->vpd.vpd_ros[off].value[i++] = byte;
717 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
718 "RV", 2) == 0 && cksumvalid == -1) {
723 "pci%d:%d:%d: bad VPD cksum, remain %hhu\n",
724 cfg->bus, cfg->slot, cfg->func,
733 /* keep in sync w/ state 2's transistions */
735 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
736 if (dflen == 0 && remain == 0) {
737 cfg->vpd.vpd_rocnt = off;
738 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
739 off * sizeof *cfg->vpd.vpd_ros,
742 } else if (dflen == 0)
752 case 5: /* VPD-W Keyword Header */
754 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
755 (alloc *= 2) * sizeof *cfg->vpd.vpd_w,
758 cfg->vpd.vpd_w[off].keyword[0] = byte;
759 cfg->vpd.vpd_w[off].keyword[1] = vpd_nextbyte(&vrs);
760 cfg->vpd.vpd_w[off].len = dflen = vpd_nextbyte(&vrs);
761 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
762 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
763 sizeof *cfg->vpd.vpd_w[off].value,
767 /* keep in sync w/ state 6's transistions */
768 if (dflen == 0 && remain == 0)
776 case 6: /* VPD-W Keyword Value */
777 cfg->vpd.vpd_w[off].value[i++] = byte;
780 /* keep in sync w/ state 5's transistions */
782 cfg->vpd.vpd_w[off++].value[i++] = '\0';
783 if (dflen == 0 && remain == 0) {
784 cfg->vpd.vpd_wcnt = off;
785 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
786 off * sizeof *cfg->vpd.vpd_w,
789 } else if (dflen == 0)
794 printf("pci%d:%d:%d: invalid state: %d\n",
795 cfg->bus, cfg->slot, cfg->func, state);
801 if (cksumvalid == 0) {
802 /* read-only data bad, clean up */
804 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
806 free(cfg->vpd.vpd_ros, M_DEVBUF);
807 cfg->vpd.vpd_ros = NULL;
813 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
815 struct pci_devinfo *dinfo = device_get_ivars(child);
816 pcicfgregs *cfg = &dinfo->cfg;
818 *identptr = cfg->vpd.vpd_ident;
820 if (*identptr == NULL)
827 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
830 struct pci_devinfo *dinfo = device_get_ivars(child);
831 pcicfgregs *cfg = &dinfo->cfg;
834 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
835 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
836 sizeof cfg->vpd.vpd_ros[i].keyword) == 0) {
837 *vptr = cfg->vpd.vpd_ros[i].value;
840 if (i != cfg->vpd.vpd_rocnt)
848 * Return the offset in configuration space of the requested extended
849 * capability entry or 0 if the specified capability was not found.
852 pci_find_extcap_method(device_t dev, device_t child, int capability,
855 struct pci_devinfo *dinfo = device_get_ivars(child);
856 pcicfgregs *cfg = &dinfo->cfg;
861 * Check the CAP_LIST bit of the PCI status register first.
863 status = pci_read_config(child, PCIR_STATUS, 2);
864 if (!(status & PCIM_STATUS_CAPPRESENT))
868 * Determine the start pointer of the capabilities list.
870 switch (cfg->hdrtype & PCIM_HDRTYPE) {
876 ptr = PCIR_CAP_PTR_2;
880 return (ENXIO); /* no extended capabilities support */
882 ptr = pci_read_config(child, ptr, 1);
885 * Traverse the capabilities list.
888 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
893 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
900 * Support for MSI-X message interrupts.
903 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
905 struct pci_devinfo *dinfo = device_get_ivars(dev);
906 pcicfgregs *cfg = &dinfo->cfg;
909 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
910 offset = cfg->msix.msix_table_offset + index * 16;
911 bus_write_4(cfg->msix.msix_table_res, offset, address & 0xffffffff);
912 bus_write_4(cfg->msix.msix_table_res, offset + 4, address >> 32);
913 bus_write_4(cfg->msix.msix_table_res, offset + 8, data);
917 pci_mask_msix(device_t dev, u_int index)
919 struct pci_devinfo *dinfo = device_get_ivars(dev);
920 pcicfgregs *cfg = &dinfo->cfg;
921 uint32_t offset, val;
923 KASSERT(cfg->msix.msix_msgnum > index, ("bogus index"));
924 offset = cfg->msix.msix_table_offset + index * 16 + 12;
925 val = bus_read_4(cfg->msix.msix_table_res, offset);
926 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
927 val |= PCIM_MSIX_VCTRL_MASK;
928 bus_write_4(cfg->msix.msix_table_res, offset, val);
933 pci_unmask_msix(device_t dev, u_int index)
935 struct pci_devinfo *dinfo = device_get_ivars(dev);
936 pcicfgregs *cfg = &dinfo->cfg;
937 uint32_t offset, val;
939 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
940 offset = cfg->msix.msix_table_offset + index * 16 + 12;
941 val = bus_read_4(cfg->msix.msix_table_res, offset);
942 if (val & PCIM_MSIX_VCTRL_MASK) {
943 val &= ~PCIM_MSIX_VCTRL_MASK;
944 bus_write_4(cfg->msix.msix_table_res, offset, val);
949 pci_pending_msix(device_t dev, u_int index)
951 struct pci_devinfo *dinfo = device_get_ivars(dev);
952 pcicfgregs *cfg = &dinfo->cfg;
953 uint32_t offset, bit;
955 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
956 offset = cfg->msix.msix_pba_offset + (index / 4) * 4;
957 bit = 1 << index % 32;
958 return (bus_read_4(cfg->msix.msix_pba_res, offset) & bit);
962 pci_alloc_msix(device_t dev, device_t child, int *count)
964 struct pci_devinfo *dinfo = device_get_ivars(child);
965 pcicfgregs *cfg = &dinfo->cfg;
966 struct resource_list_entry *rle;
967 int actual, error, i, irq, max;
969 /* MSI-X capability present? */
970 if (cfg->msix.msix_location == 0 || !pci_do_msix)
973 /* Make sure the appropriate BARs are mapped. */
974 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
975 cfg->msix.msix_table_bar);
976 if (rle == NULL || rle->res == NULL ||
977 !(rman_get_flags(rle->res) & RF_ACTIVE))
979 cfg->msix.msix_table_res = rle->res;
980 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
981 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
982 cfg->msix.msix_pba_bar);
983 if (rle == NULL || rle->res == NULL ||
984 !(rman_get_flags(rle->res) & RF_ACTIVE))
987 cfg->msix.msix_pba_res = rle->res;
989 /* Already have allocated messages? */
990 if (cfg->msix.msix_alloc != 0)
993 max = min(*count, cfg->msix.msix_msgnum);
994 for (i = 0; i < max; i++) {
995 /* Allocate a message. */
996 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, i,
1000 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1005 /* Mask all vectors. */
1006 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1007 pci_mask_msix(child, i);
1009 /* Update control register to enable MSI-X. */
1010 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1011 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1012 cfg->msix.msix_ctrl, 2);
1014 /* Update counts of alloc'd messages. */
1015 cfg->msix.msix_alloc = actual;
1021 pci_release_msix(device_t dev, device_t child)
1023 struct pci_devinfo *dinfo = device_get_ivars(child);
1024 pcicfgregs *cfg = &dinfo->cfg;
1025 struct resource_list_entry *rle;
1028 /* Do we have any messages to release? */
1029 if (cfg->msix.msix_alloc == 0)
1032 /* Make sure none of the resources are allocated. */
1033 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1034 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1035 KASSERT(rle != NULL, ("missing MSI resource"));
1036 if (rle->res != NULL)
1040 /* Update control register with to disable MSI-X. */
1041 cfg->msix.msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1042 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1043 cfg->msix.msix_ctrl, 2);
1045 /* Release the messages. */
1046 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1047 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1048 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1050 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1053 /* Update alloc count. */
1054 cfg->msix.msix_alloc = 0;
1059 * Support for MSI message signalled interrupts.
1062 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1064 struct pci_devinfo *dinfo = device_get_ivars(dev);
1065 pcicfgregs *cfg = &dinfo->cfg;
1067 /* Write data and address values. */
1068 cfg->msi.msi_addr = address;
1069 cfg->msi.msi_data = data;
1070 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
1071 address & 0xffffffff, 4);
1072 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
1073 pci_write_config(dev, cfg->msi.msi_location +
1074 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1075 pci_write_config(dev, cfg->msi.msi_location +
1076 PCIR_MSI_DATA_64BIT, data, 2);
1078 pci_write_config(dev, cfg->msi.msi_location +
1079 PCIR_MSI_DATA, data, 2);
1081 /* Enable MSI in the control register. */
1082 cfg->msi.msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1083 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
1084 cfg->msi.msi_ctrl, 2);
1088 * Restore MSI registers during resume. If MSI is enabled then
1089 * restore the data and address registers in addition to the control
1093 pci_resume_msi(device_t dev)
1095 struct pci_devinfo *dinfo = device_get_ivars(dev);
1096 pcicfgregs *cfg = &dinfo->cfg;
1100 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1101 address = cfg->msi.msi_addr;
1102 data = cfg->msi.msi_data;
1103 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
1104 address & 0xffffffff, 4);
1105 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
1106 pci_write_config(dev, cfg->msi.msi_location +
1107 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1108 pci_write_config(dev, cfg->msi.msi_location +
1109 PCIR_MSI_DATA_64BIT, data, 2);
1111 pci_write_config(dev, cfg->msi.msi_location +
1112 PCIR_MSI_DATA, data, 2);
1114 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
1115 cfg->msi.msi_ctrl, 2);
1119 * Attempt to allocate *count MSI messages. The actual number allocated is
1120 * returned in *count. After this function returns, each message will be
1121 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1124 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1126 struct pci_devinfo *dinfo = device_get_ivars(child);
1127 pcicfgregs *cfg = &dinfo->cfg;
1128 struct resource_list_entry *rle;
1129 int actual, error, i, irqs[32];
1132 /* Don't let count == 0 get us into trouble. */
1136 /* If rid 0 is allocated, then fail. */
1137 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1138 if (rle != NULL && rle->res != NULL)
1141 /* Try MSI-X first. */
1142 error = pci_alloc_msix(dev, child, count);
1143 if (error != ENODEV)
1146 /* MSI capability present? */
1147 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1150 /* Already have allocated messages? */
1151 if (cfg->msi.msi_alloc != 0)
1154 /* Don't ask for more than the device supports. */
1155 actual = min(*count, cfg->msi.msi_msgnum);
1157 /* Don't ask for more than 32 messages. */
1158 actual = min(actual, 32);
1160 /* MSI requires power of 2 number of messages. */
1161 if (!powerof2(actual))
1165 /* Try to allocate N messages. */
1166 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
1167 cfg->msi.msi_msgnum, irqs);
1178 * We now have N actual messages mapped onto SYS_RES_IRQ
1179 * resources in the irqs[] array, so add new resources
1180 * starting at rid 1.
1182 for (i = 0; i < actual; i++)
1183 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1184 irqs[i], irqs[i], 1);
1186 /* Update control register with actual count and enable MSI. */
1187 ctrl = cfg->msi.msi_ctrl;
1188 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1189 ctrl |= (ffs(actual) - 1) << 4;
1190 cfg->msi.msi_ctrl = ctrl;
1191 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1193 /* Update counts of alloc'd messages. */
1194 cfg->msi.msi_alloc = actual;
1199 /* Release the MSI messages associated with this device. */
1201 pci_release_msi_method(device_t dev, device_t child)
1203 struct pci_devinfo *dinfo = device_get_ivars(child);
1204 pcicfgregs *cfg = &dinfo->cfg;
1205 struct resource_list_entry *rle;
1206 int error, i, irqs[32];
1208 /* Try MSI-X first. */
1209 error = pci_release_msix(dev, child);
1210 if (error != ENODEV)
1213 /* Do we have any messages to release? */
1214 if (cfg->msi.msi_alloc == 0)
1216 KASSERT(cfg->msi.msi_alloc <= 32, ("more than 32 alloc'd messages"));
1218 /* Make sure none of the resources are allocated. */
1219 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1220 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1221 KASSERT(rle != NULL, ("missing MSI resource"));
1222 if (rle->res != NULL)
1224 irqs[i] = rle->start;
1227 /* Update control register with 0 count and disable MSI. */
1228 cfg->msi.msi_ctrl &= ~(PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE);
1229 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL,
1230 cfg->msi.msi_ctrl, 2);
1232 /* Release the messages. */
1233 PCIB_RELEASE_MSI(device_get_parent(dev), child, cfg->msi.msi_alloc,
1235 for (i = 0; i < cfg->msi.msi_alloc; i++)
1236 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1238 /* Update alloc count. */
1239 cfg->msi.msi_alloc = 0;
1244 * Return the max supported MSI or MSI-X messages this device supports.
1245 * Basically, assuming the MD code can alloc messages, this function
1246 * should return the maximum value that pci_alloc_msi() can return. Thus,
1247 * it is subject to the tunables, etc.
1250 pci_msi_count_method(device_t dev, device_t child)
1252 struct pci_devinfo *dinfo = device_get_ivars(child);
1253 pcicfgregs *cfg = &dinfo->cfg;
1255 if (pci_do_msix && cfg->msix.msix_location != 0)
1256 return (cfg->msix.msix_msgnum);
1257 if (pci_do_msi && cfg->msi.msi_location != 0)
1258 return (cfg->msi.msi_msgnum);
1262 /* free pcicfgregs structure and all depending data structures */
1265 pci_freecfg(struct pci_devinfo *dinfo)
1267 struct devlist *devlist_head;
1270 devlist_head = &pci_devq;
1272 if (dinfo->cfg.vpd.vpd_reg) {
1273 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
1274 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
1275 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
1276 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
1277 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
1278 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
1279 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
1281 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
1282 free(dinfo, M_DEVBUF);
1284 /* increment the generation count */
1287 /* we're losing one device */
1293 * PCI power manangement
1296 pci_set_powerstate_method(device_t dev, device_t child, int state)
1298 struct pci_devinfo *dinfo = device_get_ivars(child);
1299 pcicfgregs *cfg = &dinfo->cfg;
1301 int result, oldstate, highest, delay;
1303 if (cfg->pp.pp_cap == 0)
1304 return (EOPNOTSUPP);
1307 * Optimize a no state change request away. While it would be OK to
1308 * write to the hardware in theory, some devices have shown odd
1309 * behavior when going from D3 -> D3.
1311 oldstate = pci_get_powerstate(child);
1312 if (oldstate == state)
1316 * The PCI power management specification states that after a state
1317 * transition between PCI power states, system software must
1318 * guarantee a minimal delay before the function accesses the device.
1319 * Compute the worst case delay that we need to guarantee before we
1320 * access the device. Many devices will be responsive much more
1321 * quickly than this delay, but there are some that don't respond
1322 * instantly to state changes. Transitions to/from D3 state require
1323 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
1324 * is done below with DELAY rather than a sleeper function because
1325 * this function can be called from contexts where we cannot sleep.
1327 highest = (oldstate > state) ? oldstate : state;
1328 if (highest == PCI_POWERSTATE_D3)
1330 else if (highest == PCI_POWERSTATE_D2)
1334 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
1335 & ~PCIM_PSTAT_DMASK;
1338 case PCI_POWERSTATE_D0:
1339 status |= PCIM_PSTAT_D0;
1341 case PCI_POWERSTATE_D1:
1342 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
1343 return (EOPNOTSUPP);
1344 status |= PCIM_PSTAT_D1;
1346 case PCI_POWERSTATE_D2:
1347 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
1348 return (EOPNOTSUPP);
1349 status |= PCIM_PSTAT_D2;
1351 case PCI_POWERSTATE_D3:
1352 status |= PCIM_PSTAT_D3;
1360 "pci%d:%d:%d: Transition from D%d to D%d\n",
1361 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func,
1364 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
1371 pci_get_powerstate_method(device_t dev, device_t child)
1373 struct pci_devinfo *dinfo = device_get_ivars(child);
1374 pcicfgregs *cfg = &dinfo->cfg;
1378 if (cfg->pp.pp_cap != 0) {
1379 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
1380 switch (status & PCIM_PSTAT_DMASK) {
1382 result = PCI_POWERSTATE_D0;
1385 result = PCI_POWERSTATE_D1;
1388 result = PCI_POWERSTATE_D2;
1391 result = PCI_POWERSTATE_D3;
1394 result = PCI_POWERSTATE_UNKNOWN;
1398 /* No support, device is always at D0 */
1399 result = PCI_POWERSTATE_D0;
1405 * Some convenience functions for PCI device drivers.
1408 static __inline void
1409 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
1413 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1415 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1418 static __inline void
1419 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
1423 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1425 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1429 pci_enable_busmaster_method(device_t dev, device_t child)
1431 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1436 pci_disable_busmaster_method(device_t dev, device_t child)
1438 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1443 pci_enable_io_method(device_t dev, device_t child, int space)
1453 case SYS_RES_IOPORT:
1454 bit = PCIM_CMD_PORTEN;
1457 case SYS_RES_MEMORY:
1458 bit = PCIM_CMD_MEMEN;
1464 pci_set_command_bit(dev, child, bit);
1465 /* Some devices seem to need a brief stall here, what do to? */
1466 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1469 device_printf(child, "failed to enable %s mapping!\n", error);
1474 pci_disable_io_method(device_t dev, device_t child, int space)
1484 case SYS_RES_IOPORT:
1485 bit = PCIM_CMD_PORTEN;
1488 case SYS_RES_MEMORY:
1489 bit = PCIM_CMD_MEMEN;
1495 pci_clear_command_bit(dev, child, bit);
1496 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1497 if (command & bit) {
1498 device_printf(child, "failed to disable %s mapping!\n", error);
1505 * New style pci driver. Parent device is either a pci-host-bridge or a
1506 * pci-pci-bridge. Both kinds are represented by instances of pcib.
1510 pci_print_verbose(struct pci_devinfo *dinfo)
1515 pcicfgregs *cfg = &dinfo->cfg;
1517 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
1518 cfg->vendor, cfg->device, cfg->revid);
1519 printf("\tbus=%d, slot=%d, func=%d\n",
1520 cfg->bus, cfg->slot, cfg->func);
1521 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
1522 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
1524 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
1525 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
1526 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
1527 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
1528 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
1529 if (cfg->intpin > 0)
1530 printf("\tintpin=%c, irq=%d\n",
1531 cfg->intpin +'a' -1, cfg->intline);
1532 if (cfg->pp.pp_cap) {
1535 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
1536 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
1537 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
1538 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
1539 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
1540 status & PCIM_PSTAT_DMASK);
1542 if (cfg->vpd.vpd_reg) {
1543 printf("\tVPD Ident: %s\n", cfg->vpd.vpd_ident);
1544 for (i = 0; i < cfg->vpd.vpd_rocnt; i++) {
1545 struct vpd_readonly *vrop;
1546 vrop = &cfg->vpd.vpd_ros[i];
1547 if (strncmp("CP", vrop->keyword, 2) == 0)
1548 printf("\tCP: id %d, BAR%d, off %#x\n",
1549 vrop->value[0], vrop->value[1],
1551 *(uint16_t *)&vrop->value[2]));
1552 else if (strncmp("RV", vrop->keyword, 2) == 0)
1553 printf("\tRV: %#hhx\n", vrop->value[0]);
1555 printf("\t%.2s: %s\n", vrop->keyword,
1558 for (i = 0; i < cfg->vpd.vpd_wcnt; i++) {
1559 struct vpd_write *vwp;
1560 vwp = &cfg->vpd.vpd_w[i];
1561 if (strncmp("RW", vwp->keyword, 2) != 0)
1562 printf("\t%.2s(%#x-%#x): %s\n",
1563 vwp->keyword, vwp->start,
1564 vwp->start + vwp->len, vwp->value);
1567 if (cfg->msi.msi_location) {
1570 ctrl = cfg->msi.msi_ctrl;
1571 printf("\tMSI supports %d message%s%s%s\n",
1572 cfg->msi.msi_msgnum,
1573 (cfg->msi.msi_msgnum == 1) ? "" : "s",
1574 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
1575 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
1577 if (cfg->msix.msix_location) {
1578 printf("\tMSI-X supports %d message%s ",
1579 cfg->msix.msix_msgnum,
1580 (cfg->msix.msix_msgnum == 1) ? "" : "s");
1581 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
1582 printf("in map 0x%x\n",
1583 cfg->msix.msix_table_bar);
1585 printf("in maps 0x%x and 0x%x\n",
1586 cfg->msix.msix_table_bar,
1587 cfg->msix.msix_pba_bar);
1593 pci_porten(device_t pcib, int b, int s, int f)
1595 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1596 & PCIM_CMD_PORTEN) != 0;
1600 pci_memen(device_t pcib, int b, int s, int f)
1602 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1603 & PCIM_CMD_MEMEN) != 0;
1607 * Add a resource based on a pci map register. Return 1 if the map
1608 * register is a 32bit map register or 2 if it is a 64bit register.
1611 pci_add_map(device_t pcib, device_t bus, device_t dev,
1612 int b, int s, int f, int reg, struct resource_list *rl, int force,
1617 pci_addr_t start, end, count;
1624 struct resource *res;
1626 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1627 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
1628 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1629 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
1631 if (pci_maptype(map) & PCI_MAPMEM)
1632 type = SYS_RES_MEMORY;
1634 type = SYS_RES_IOPORT;
1635 ln2size = pci_mapsize(testval);
1636 ln2range = pci_maprange(testval);
1637 base = pci_mapbase(map);
1638 barlen = ln2range == 64 ? 2 : 1;
1641 * For I/O registers, if bottom bit is set, and the next bit up
1642 * isn't clear, we know we have a BAR that doesn't conform to the
1643 * spec, so ignore it. Also, sanity check the size of the data
1644 * areas to the type of memory involved. Memory must be at least
1645 * 16 bytes in size, while I/O ranges must be at least 4.
1647 if ((testval & 0x1) == 0x1 &&
1648 (testval & 0x2) != 0)
1650 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
1651 (type == SYS_RES_IOPORT && ln2size < 2))
1655 /* Read the other half of a 64bit map register */
1656 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
1658 printf("\tmap[%02x]: type %x, range %2d, base %#jx, size %2d",
1659 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
1660 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1661 printf(", port disabled\n");
1662 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1663 printf(", memory disabled\n");
1665 printf(", enabled\n");
1669 * If base is 0, then we have problems. It is best to ignore
1670 * such entries for the moment. These will be allocated later if
1671 * the driver specifically requests them. However, some
1672 * removable busses look better when all resources are allocated,
1673 * so allow '0' to be overriden.
1675 * Similarly treat maps whose values is the same as the test value
1676 * read back. These maps have had all f's written to them by the
1677 * BIOS in an attempt to disable the resources.
1679 if (!force && (base == 0 || map == testval))
1681 if ((u_long)base != base) {
1683 "pci%d:%d:%d bar %#x too many address bits", b, s, f, reg);
1688 * This code theoretically does the right thing, but has
1689 * undesirable side effects in some cases where peripherals
1690 * respond oddly to having these bits enabled. Let the user
1691 * be able to turn them off (since pci_enable_io_modes is 1 by
1694 if (pci_enable_io_modes) {
1695 /* Turn on resources that have been left off by a lazy BIOS */
1696 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
1697 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1698 cmd |= PCIM_CMD_PORTEN;
1699 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1701 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
1702 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1703 cmd |= PCIM_CMD_MEMEN;
1704 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1707 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1709 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1713 count = 1 << ln2size;
1714 if (base == 0 || base == pci_mapbase(testval)) {
1715 start = 0; /* Let the parent deside */
1719 end = base + (1 << ln2size) - 1;
1721 resource_list_add(rl, type, reg, start, end, count);
1724 * Not quite sure what to do on failure of allocating the resource
1725 * since I can postulate several right answers.
1727 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
1728 prefetch ? RF_PREFETCHABLE : 0);
1731 start = rman_get_start(res);
1732 if ((u_long)start != start) {
1733 /* Wait a minute! this platform can't do this address. */
1735 "pci%d.%d.%x bar %#x start %#jx, too many bits.",
1736 b, s, f, reg, (uintmax_t)start);
1737 resource_list_release(rl, bus, dev, type, reg, res);
1740 pci_write_config(dev, reg, start, 4);
1742 pci_write_config(dev, reg + 4, start >> 32, 4);
1747 * For ATA devices we need to decide early what addressing mode to use.
1748 * Legacy demands that the primary and secondary ATA ports sits on the
1749 * same addresses that old ISA hardware did. This dictates that we use
1750 * those addresses and ignore the BAR's if we cannot set PCI native
1754 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
1755 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
1757 int rid, type, progif;
1759 /* if this device supports PCI native addressing use it */
1760 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1761 if ((progif & 0x8a) == 0x8a) {
1762 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
1763 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
1764 printf("Trying ATA native PCI addressing mode\n");
1765 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
1769 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1770 type = SYS_RES_IOPORT;
1771 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
1772 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
1773 prefetchmask & (1 << 0));
1774 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
1775 prefetchmask & (1 << 1));
1778 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
1779 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
1782 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
1783 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
1786 if (progif & PCIP_STORAGE_IDE_MODESEC) {
1787 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
1788 prefetchmask & (1 << 2));
1789 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
1790 prefetchmask & (1 << 3));
1793 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
1794 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
1797 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
1798 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
1801 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
1802 prefetchmask & (1 << 4));
1803 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
1804 prefetchmask & (1 << 5));
1808 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
1810 struct pci_devinfo *dinfo = device_get_ivars(dev);
1811 pcicfgregs *cfg = &dinfo->cfg;
1812 char tunable_name[64];
1815 /* Has to have an intpin to have an interrupt. */
1816 if (cfg->intpin == 0)
1819 /* Let the user override the IRQ with a tunable. */
1820 irq = PCI_INVALID_IRQ;
1821 snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.INT%c.irq",
1822 cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
1823 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
1824 irq = PCI_INVALID_IRQ;
1827 * If we didn't get an IRQ via the tunable, then we either use the
1828 * IRQ value in the intline register or we ask the bus to route an
1829 * interrupt for us. If force_route is true, then we only use the
1830 * value in the intline register if the bus was unable to assign an
1833 if (!PCI_INTERRUPT_VALID(irq)) {
1834 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
1835 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
1836 if (!PCI_INTERRUPT_VALID(irq))
1840 /* If after all that we don't have an IRQ, just bail. */
1841 if (!PCI_INTERRUPT_VALID(irq))
1844 /* Update the config register if it changed. */
1845 if (irq != cfg->intline) {
1847 pci_write_config(dev, PCIR_INTLINE, irq, 1);
1850 /* Add this IRQ as rid 0 interrupt resource. */
1851 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
1855 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
1858 struct pci_devinfo *dinfo = device_get_ivars(dev);
1859 pcicfgregs *cfg = &dinfo->cfg;
1860 struct resource_list *rl = &dinfo->resources;
1861 struct pci_quirk *q;
1864 pcib = device_get_parent(bus);
1870 /* ATA devices needs special map treatment */
1871 if ((pci_get_class(dev) == PCIC_STORAGE) &&
1872 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
1873 (pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV))
1874 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
1876 for (i = 0; i < cfg->nummaps;)
1877 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
1878 rl, force, prefetchmask & (1 << i));
1881 * Add additional, quirked resources.
1883 for (q = &pci_quirks[0]; q->devid; q++) {
1884 if (q->devid == ((cfg->device << 16) | cfg->vendor)
1885 && q->type == PCI_QUIRK_MAP_REG)
1886 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
1890 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
1891 #ifdef __PCI_REROUTE_INTERRUPT
1893 * Try to re-route interrupts. Sometimes the BIOS or
1894 * firmware may leave bogus values in these registers.
1895 * If the re-route fails, then just stick with what we
1898 pci_assign_interrupt(bus, dev, 1);
1900 pci_assign_interrupt(bus, dev, 0);
1906 pci_add_children(device_t dev, int busno, size_t dinfo_size)
1908 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
1909 device_t pcib = device_get_parent(dev);
1910 struct pci_devinfo *dinfo;
1912 int s, f, pcifunchigh;
1915 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
1916 ("dinfo_size too small"));
1917 maxslots = PCIB_MAXSLOTS(pcib);
1918 for (s = 0; s <= maxslots; s++) {
1922 hdrtype = REG(PCIR_HDRTYPE, 1);
1923 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
1925 if (hdrtype & PCIM_MFDEV)
1926 pcifunchigh = PCI_FUNCMAX;
1927 for (f = 0; f <= pcifunchigh; f++) {
1928 dinfo = pci_read_device(pcib, busno, s, f, dinfo_size);
1929 if (dinfo != NULL) {
1930 pci_add_child(dev, dinfo);
1938 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
1940 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
1941 device_set_ivars(dinfo->cfg.dev, dinfo);
1942 resource_list_init(&dinfo->resources);
1943 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
1944 pci_cfg_restore(dinfo->cfg.dev, dinfo);
1945 pci_print_verbose(dinfo);
1946 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
1950 pci_probe(device_t dev)
1953 device_set_desc(dev, "PCI bus");
1955 /* Allow other subclasses to override this driver. */
1960 pci_attach(device_t dev)
1965 * Since there can be multiple independantly numbered PCI
1966 * busses on systems with multiple PCI domains, we can't use
1967 * the unit number to decide which bus we are probing. We ask
1968 * the parent pcib what our bus number is.
1970 busno = pcib_get_bus(dev);
1972 device_printf(dev, "physical bus=%d\n", busno);
1974 pci_add_children(dev, busno, sizeof(struct pci_devinfo));
1976 return (bus_generic_attach(dev));
1980 pci_suspend(device_t dev)
1982 int dstate, error, i, numdevs;
1983 device_t acpi_dev, child, *devlist;
1984 struct pci_devinfo *dinfo;
1987 * Save the PCI configuration space for each child and set the
1988 * device in the appropriate power state for this sleep state.
1991 if (pci_do_power_resume)
1992 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
1993 device_get_children(dev, &devlist, &numdevs);
1994 for (i = 0; i < numdevs; i++) {
1996 dinfo = (struct pci_devinfo *) device_get_ivars(child);
1997 pci_cfg_save(child, dinfo, 0);
2000 /* Suspend devices before potentially powering them down. */
2001 error = bus_generic_suspend(dev);
2003 free(devlist, M_TEMP);
2008 * Always set the device to D3. If ACPI suggests a different
2009 * power state, use it instead. If ACPI is not present, the
2010 * firmware is responsible for managing device power. Skip
2011 * children who aren't attached since they are powered down
2012 * separately. Only manage type 0 devices for now.
2014 for (i = 0; acpi_dev && i < numdevs; i++) {
2016 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2017 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2018 dstate = PCI_POWERSTATE_D3;
2019 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2020 pci_set_powerstate(child, dstate);
2023 free(devlist, M_TEMP);
2028 pci_resume(device_t dev)
2031 device_t acpi_dev, child, *devlist;
2032 struct pci_devinfo *dinfo;
2035 * Set each child to D0 and restore its PCI configuration space.
2038 if (pci_do_power_resume)
2039 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2040 device_get_children(dev, &devlist, &numdevs);
2041 for (i = 0; i < numdevs; i++) {
2043 * Notify ACPI we're going to D0 but ignore the result. If
2044 * ACPI is not present, the firmware is responsible for
2045 * managing device power. Only manage type 0 devices for now.
2048 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2049 if (acpi_dev && device_is_attached(child) &&
2050 dinfo->cfg.hdrtype == 0) {
2051 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2052 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2055 /* Now the device is powered up, restore its config space. */
2056 pci_cfg_restore(child, dinfo);
2058 free(devlist, M_TEMP);
2059 return (bus_generic_resume(dev));
2063 pci_load_vendor_data(void)
2065 caddr_t vendordata, info;
2067 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2068 info = preload_search_info(vendordata, MODINFO_ADDR);
2069 pci_vendordata = *(char **)info;
2070 info = preload_search_info(vendordata, MODINFO_SIZE);
2071 pci_vendordata_size = *(size_t *)info;
2072 /* terminate the database */
2073 pci_vendordata[pci_vendordata_size] = '\n';
2078 pci_driver_added(device_t dev, driver_t *driver)
2083 struct pci_devinfo *dinfo;
2087 device_printf(dev, "driver added\n");
2088 DEVICE_IDENTIFY(driver, dev);
2089 device_get_children(dev, &devlist, &numdevs);
2090 for (i = 0; i < numdevs; i++) {
2092 if (device_get_state(child) != DS_NOTPRESENT)
2094 dinfo = device_get_ivars(child);
2095 pci_print_verbose(dinfo);
2097 printf("pci%d:%d:%d: reprobing on driver added\n",
2098 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func);
2099 pci_cfg_restore(child, dinfo);
2100 if (device_probe_and_attach(child) != 0)
2101 pci_cfg_save(child, dinfo, 1);
2103 free(devlist, M_TEMP);
2107 pci_print_child(device_t dev, device_t child)
2109 struct pci_devinfo *dinfo;
2110 struct resource_list *rl;
2113 dinfo = device_get_ivars(child);
2114 rl = &dinfo->resources;
2116 retval += bus_print_child_header(dev, child);
2118 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
2119 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
2120 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
2121 if (device_get_flags(dev))
2122 retval += printf(" flags %#x", device_get_flags(dev));
2124 retval += printf(" at device %d.%d", pci_get_slot(child),
2125 pci_get_function(child));
2127 retval += bus_print_child_footer(dev, child);
2137 } pci_nomatch_tab[] = {
2138 {PCIC_OLD, -1, "old"},
2139 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
2140 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
2141 {PCIC_STORAGE, -1, "mass storage"},
2142 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
2143 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
2144 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
2145 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
2146 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
2147 {PCIC_NETWORK, -1, "network"},
2148 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
2149 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
2150 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
2151 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
2152 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
2153 {PCIC_DISPLAY, -1, "display"},
2154 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
2155 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
2156 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
2157 {PCIC_MULTIMEDIA, -1, "multimedia"},
2158 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
2159 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
2160 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
2161 {PCIC_MEMORY, -1, "memory"},
2162 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
2163 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
2164 {PCIC_BRIDGE, -1, "bridge"},
2165 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
2166 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
2167 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
2168 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
2169 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
2170 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
2171 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
2172 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
2173 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
2174 {PCIC_SIMPLECOMM, -1, "simple comms"},
2175 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
2176 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
2177 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
2178 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
2179 {PCIC_BASEPERIPH, -1, "base peripheral"},
2180 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
2181 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
2182 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
2183 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
2184 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
2185 {PCIC_INPUTDEV, -1, "input device"},
2186 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
2187 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
2188 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
2189 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
2190 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
2191 {PCIC_DOCKING, -1, "docking station"},
2192 {PCIC_PROCESSOR, -1, "processor"},
2193 {PCIC_SERIALBUS, -1, "serial bus"},
2194 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
2195 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
2196 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
2197 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
2198 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
2199 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
2200 {PCIC_WIRELESS, -1, "wireless controller"},
2201 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
2202 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
2203 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
2204 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
2205 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
2206 {PCIC_SATCOM, -1, "satellite communication"},
2207 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
2208 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
2209 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
2210 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
2211 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
2212 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
2213 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
2214 {PCIC_DASP, -1, "dasp"},
2215 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
2220 pci_probe_nomatch(device_t dev, device_t child)
2223 char *cp, *scp, *device;
2226 * Look for a listing for this device in a loaded device database.
2228 if ((device = pci_describe_device(child)) != NULL) {
2229 device_printf(dev, "<%s>", device);
2230 free(device, M_DEVBUF);
2233 * Scan the class/subclass descriptions for a general
2238 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
2239 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
2240 if (pci_nomatch_tab[i].subclass == -1) {
2241 cp = pci_nomatch_tab[i].desc;
2242 } else if (pci_nomatch_tab[i].subclass ==
2243 pci_get_subclass(child)) {
2244 scp = pci_nomatch_tab[i].desc;
2248 device_printf(dev, "<%s%s%s>",
2250 ((cp != NULL) && (scp != NULL)) ? ", " : "",
2253 printf(" at device %d.%d (no driver attached)\n",
2254 pci_get_slot(child), pci_get_function(child));
2255 if (pci_do_power_nodriver)
2257 (struct pci_devinfo *) device_get_ivars(child), 1);
2262 * Parse the PCI device database, if loaded, and return a pointer to a
2263 * description of the device.
2265 * The database is flat text formatted as follows:
2267 * Any line not in a valid format is ignored.
2268 * Lines are terminated with newline '\n' characters.
2270 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
2273 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
2274 * - devices cannot be listed without a corresponding VENDOR line.
2275 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
2276 * another TAB, then the device name.
2280 * Assuming (ptr) points to the beginning of a line in the database,
2281 * return the vendor or device and description of the next entry.
2282 * The value of (vendor) or (device) inappropriate for the entry type
2283 * is set to -1. Returns nonzero at the end of the database.
2285 * Note that this is slightly unrobust in the face of corrupt data;
2286 * we attempt to safeguard against this by spamming the end of the
2287 * database with a newline when we initialise.
2290 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
2299 left = pci_vendordata_size - (cp - pci_vendordata);
2307 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
2311 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
2314 /* skip to next line */
2315 while (*cp != '\n' && left > 0) {
2324 /* skip to next line */
2325 while (*cp != '\n' && left > 0) {
2329 if (*cp == '\n' && left > 0)
2336 pci_describe_device(device_t dev)
2339 char *desc, *vp, *dp, *line;
2341 desc = vp = dp = NULL;
2344 * If we have no vendor data, we can't do anything.
2346 if (pci_vendordata == NULL)
2350 * Scan the vendor data looking for this device
2352 line = pci_vendordata;
2353 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2356 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
2358 if (vendor == pci_get_vendor(dev))
2361 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2364 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
2372 if (device == pci_get_device(dev))
2376 snprintf(dp, 80, "0x%x", pci_get_device(dev));
2377 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
2379 sprintf(desc, "%s, %s", vp, dp);
2389 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
2391 struct pci_devinfo *dinfo;
2394 dinfo = device_get_ivars(child);
2398 case PCI_IVAR_ETHADDR:
2400 * The generic accessor doesn't deal with failure, so
2401 * we set the return value, then return an error.
2403 *((uint8_t **) result) = NULL;
2405 case PCI_IVAR_SUBVENDOR:
2406 *result = cfg->subvendor;
2408 case PCI_IVAR_SUBDEVICE:
2409 *result = cfg->subdevice;
2411 case PCI_IVAR_VENDOR:
2412 *result = cfg->vendor;
2414 case PCI_IVAR_DEVICE:
2415 *result = cfg->device;
2417 case PCI_IVAR_DEVID:
2418 *result = (cfg->device << 16) | cfg->vendor;
2420 case PCI_IVAR_CLASS:
2421 *result = cfg->baseclass;
2423 case PCI_IVAR_SUBCLASS:
2424 *result = cfg->subclass;
2426 case PCI_IVAR_PROGIF:
2427 *result = cfg->progif;
2429 case PCI_IVAR_REVID:
2430 *result = cfg->revid;
2432 case PCI_IVAR_INTPIN:
2433 *result = cfg->intpin;
2436 *result = cfg->intline;
2442 *result = cfg->slot;
2444 case PCI_IVAR_FUNCTION:
2445 *result = cfg->func;
2447 case PCI_IVAR_CMDREG:
2448 *result = cfg->cmdreg;
2450 case PCI_IVAR_CACHELNSZ:
2451 *result = cfg->cachelnsz;
2453 case PCI_IVAR_MINGNT:
2454 *result = cfg->mingnt;
2456 case PCI_IVAR_MAXLAT:
2457 *result = cfg->maxlat;
2459 case PCI_IVAR_LATTIMER:
2460 *result = cfg->lattimer;
2469 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
2471 struct pci_devinfo *dinfo;
2473 dinfo = device_get_ivars(child);
2476 case PCI_IVAR_INTPIN:
2477 dinfo->cfg.intpin = value;
2479 case PCI_IVAR_ETHADDR:
2480 case PCI_IVAR_SUBVENDOR:
2481 case PCI_IVAR_SUBDEVICE:
2482 case PCI_IVAR_VENDOR:
2483 case PCI_IVAR_DEVICE:
2484 case PCI_IVAR_DEVID:
2485 case PCI_IVAR_CLASS:
2486 case PCI_IVAR_SUBCLASS:
2487 case PCI_IVAR_PROGIF:
2488 case PCI_IVAR_REVID:
2492 case PCI_IVAR_FUNCTION:
2493 return (EINVAL); /* disallow for now */
2501 #include "opt_ddb.h"
2503 #include <ddb/ddb.h>
2504 #include <sys/cons.h>
2507 * List resources based on pci map registers, used for within ddb
2510 DB_SHOW_COMMAND(pciregs, db_pci_dump)
2512 struct pci_devinfo *dinfo;
2513 struct devlist *devlist_head;
2516 int i, error, none_count;
2519 /* get the head of the device queue */
2520 devlist_head = &pci_devq;
2523 * Go through the list of devices and print out devices
2525 for (error = 0, i = 0,
2526 dinfo = STAILQ_FIRST(devlist_head);
2527 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
2528 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
2530 /* Populate pd_name and pd_unit */
2533 name = device_get_name(dinfo->cfg.dev);
2536 db_printf("%s%d@pci%d:%d:%d:\tclass=0x%06x card=0x%08x "
2537 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
2538 (name && *name) ? name : "none",
2539 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
2541 p->pc_sel.pc_bus, p->pc_sel.pc_dev,
2542 p->pc_sel.pc_func, (p->pc_class << 16) |
2543 (p->pc_subclass << 8) | p->pc_progif,
2544 (p->pc_subdevice << 16) | p->pc_subvendor,
2545 (p->pc_device << 16) | p->pc_vendor,
2546 p->pc_revid, p->pc_hdr);
2551 static struct resource *
2552 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
2553 u_long start, u_long end, u_long count, u_int flags)
2555 struct pci_devinfo *dinfo = device_get_ivars(child);
2556 struct resource_list *rl = &dinfo->resources;
2557 struct resource_list_entry *rle;
2558 struct resource *res;
2559 pci_addr_t map, testval;
2563 * Weed out the bogons, and figure out how large the BAR/map
2564 * is. Bars that read back 0 here are bogus and unimplemented.
2565 * Note: atapci in legacy mode are special and handled elsewhere
2566 * in the code. If you have a atapci device in legacy mode and
2567 * it fails here, that other code is broken.
2570 map = pci_read_config(child, *rid, 4);
2571 pci_write_config(child, *rid, 0xffffffff, 4);
2572 testval = pci_read_config(child, *rid, 4);
2573 if (pci_maprange(testval) == 64)
2574 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
2575 if (pci_mapbase(testval) == 0)
2577 if (pci_maptype(testval) & PCI_MAPMEM) {
2578 if (type != SYS_RES_MEMORY) {
2581 "child %s requested type %d for rid %#x,"
2582 " but the BAR says it is an memio\n",
2583 device_get_nameunit(child), type, *rid);
2587 if (type != SYS_RES_IOPORT) {
2590 "child %s requested type %d for rid %#x,"
2591 " but the BAR says it is an ioport\n",
2592 device_get_nameunit(child), type, *rid);
2597 * For real BARs, we need to override the size that
2598 * the driver requests, because that's what the BAR
2599 * actually uses and we would otherwise have a
2600 * situation where we might allocate the excess to
2601 * another driver, which won't work.
2603 mapsize = pci_mapsize(testval);
2604 count = 1UL << mapsize;
2605 if (RF_ALIGNMENT(flags) < mapsize)
2606 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
2609 * Allocate enough resource, and then write back the
2610 * appropriate bar for that resource.
2612 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
2613 start, end, count, flags);
2615 device_printf(child,
2616 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
2617 count, *rid, type, start, end);
2620 resource_list_add(rl, type, *rid, start, end, count);
2621 rle = resource_list_find(rl, type, *rid);
2623 panic("pci_alloc_map: unexpectedly can't find resource.");
2625 rle->start = rman_get_start(res);
2626 rle->end = rman_get_end(res);
2629 device_printf(child,
2630 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
2631 count, *rid, type, rman_get_start(res));
2632 map = rman_get_start(res);
2634 pci_write_config(child, *rid, map, 4);
2635 if (pci_maprange(testval) == 64)
2636 pci_write_config(child, *rid + 4, map >> 32, 4);
2642 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
2643 u_long start, u_long end, u_long count, u_int flags)
2645 struct pci_devinfo *dinfo = device_get_ivars(child);
2646 struct resource_list *rl = &dinfo->resources;
2647 struct resource_list_entry *rle;
2648 pcicfgregs *cfg = &dinfo->cfg;
2651 * Perform lazy resource allocation
2653 if (device_get_parent(child) == dev) {
2657 * Can't alloc legacy interrupt once MSI messages
2658 * have been allocated.
2660 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
2661 cfg->msix.msix_alloc > 0))
2664 * If the child device doesn't have an
2665 * interrupt routed and is deserving of an
2666 * interrupt, try to assign it one.
2668 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
2670 pci_assign_interrupt(dev, child, 0);
2672 case SYS_RES_IOPORT:
2673 case SYS_RES_MEMORY:
2674 if (*rid < PCIR_BAR(cfg->nummaps)) {
2676 * Enable the I/O mode. We should
2677 * also be assigning resources too
2678 * when none are present. The
2679 * resource_list_alloc kind of sorta does
2682 if (PCI_ENABLE_IO(dev, child, type))
2685 rle = resource_list_find(rl, type, *rid);
2687 return (pci_alloc_map(dev, child, type, rid,
2688 start, end, count, flags));
2692 * If we've already allocated the resource, then
2693 * return it now. But first we may need to activate
2694 * it, since we don't allocate the resource as active
2695 * above. Normally this would be done down in the
2696 * nexus, but since we short-circuit that path we have
2697 * to do its job here. Not sure if we should free the
2698 * resource if it fails to activate.
2700 rle = resource_list_find(rl, type, *rid);
2701 if (rle != NULL && rle->res != NULL) {
2703 device_printf(child,
2704 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
2705 rman_get_size(rle->res), *rid, type,
2706 rman_get_start(rle->res));
2707 if ((flags & RF_ACTIVE) &&
2708 bus_generic_activate_resource(dev, child, type,
2709 *rid, rle->res) != 0)
2714 return (resource_list_alloc(rl, dev, child, type, rid,
2715 start, end, count, flags));
2719 pci_delete_resource(device_t dev, device_t child, int type, int rid)
2721 struct pci_devinfo *dinfo;
2722 struct resource_list *rl;
2723 struct resource_list_entry *rle;
2725 if (device_get_parent(child) != dev)
2728 dinfo = device_get_ivars(child);
2729 rl = &dinfo->resources;
2730 rle = resource_list_find(rl, type, rid);
2733 if (rman_get_device(rle->res) != dev ||
2734 rman_get_flags(rle->res) & RF_ACTIVE) {
2735 device_printf(dev, "delete_resource: "
2736 "Resource still owned by child, oops. "
2737 "(type=%d, rid=%d, addr=%lx)\n",
2738 rle->type, rle->rid,
2739 rman_get_start(rle->res));
2742 bus_release_resource(dev, type, rid, rle->res);
2744 resource_list_delete(rl, type, rid);
2747 * Why do we turn off the PCI configuration BAR when we delete a
2750 pci_write_config(child, rid, 0, 4);
2751 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
2754 struct resource_list *
2755 pci_get_resource_list (device_t dev, device_t child)
2757 struct pci_devinfo *dinfo = device_get_ivars(child);
2759 return (&dinfo->resources);
2763 pci_read_config_method(device_t dev, device_t child, int reg, int width)
2765 struct pci_devinfo *dinfo = device_get_ivars(child);
2766 pcicfgregs *cfg = &dinfo->cfg;
2768 return (PCIB_READ_CONFIG(device_get_parent(dev),
2769 cfg->bus, cfg->slot, cfg->func, reg, width));
2773 pci_write_config_method(device_t dev, device_t child, int reg,
2774 uint32_t val, int width)
2776 struct pci_devinfo *dinfo = device_get_ivars(child);
2777 pcicfgregs *cfg = &dinfo->cfg;
2779 PCIB_WRITE_CONFIG(device_get_parent(dev),
2780 cfg->bus, cfg->slot, cfg->func, reg, val, width);
2784 pci_child_location_str_method(device_t dev, device_t child, char *buf,
2788 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
2789 pci_get_function(child));
2794 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
2797 struct pci_devinfo *dinfo;
2800 dinfo = device_get_ivars(child);
2802 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
2803 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
2804 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
2810 pci_assign_interrupt_method(device_t dev, device_t child)
2812 struct pci_devinfo *dinfo = device_get_ivars(child);
2813 pcicfgregs *cfg = &dinfo->cfg;
2815 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
2820 pci_modevent(module_t mod, int what, void *arg)
2822 static struct cdev *pci_cdev;
2826 STAILQ_INIT(&pci_devq);
2828 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
2830 pci_load_vendor_data();
2834 destroy_dev(pci_cdev);
2842 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
2847 * Only do header type 0 devices. Type 1 devices are bridges,
2848 * which we know need special treatment. Type 2 devices are
2849 * cardbus bridges which also require special treatment.
2850 * Other types are unknown, and we err on the side of safety
2853 if (dinfo->cfg.hdrtype != 0)
2857 * Restore the device to full power mode. We must do this
2858 * before we restore the registers because moving from D3 to
2859 * D0 will cause the chip's BARs and some other registers to
2860 * be reset to some unknown power on reset values. Cut down
2861 * the noise on boot by doing nothing if we are already in
2864 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
2865 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
2867 for (i = 0; i < dinfo->cfg.nummaps; i++)
2868 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
2869 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
2870 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
2871 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
2872 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
2873 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
2874 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
2875 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
2876 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
2877 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
2878 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
2881 * Restore MSI configuration if it is present. If MSI is enabled,
2882 * then restore the data and addr registers.
2884 if (dinfo->cfg.msi.msi_location != 0)
2885 pci_resume_msi(dev);
2889 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
2896 * Only do header type 0 devices. Type 1 devices are bridges, which
2897 * we know need special treatment. Type 2 devices are cardbus bridges
2898 * which also require special treatment. Other types are unknown, and
2899 * we err on the side of safety by ignoring them. Powering down
2900 * bridges should not be undertaken lightly.
2902 if (dinfo->cfg.hdrtype != 0)
2904 for (i = 0; i < dinfo->cfg.nummaps; i++)
2905 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
2906 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
2909 * Some drivers apparently write to these registers w/o updating our
2910 * cached copy. No harm happens if we update the copy, so do so here
2911 * so we can restore them. The COMMAND register is modified by the
2912 * bus w/o updating the cache. This should represent the normally
2913 * writable portion of the 'defined' part of type 0 headers. In
2914 * theory we also need to save/restore the PCI capability structures
2915 * we know about, but apart from power we don't know any that are
2918 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
2919 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
2920 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
2921 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
2922 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
2923 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
2924 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
2925 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
2926 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
2927 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2928 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2929 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
2930 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
2931 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
2932 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
2935 * don't set the state for display devices, base peripherals and
2936 * memory devices since bad things happen when they are powered down.
2937 * We should (a) have drivers that can easily detach and (b) use
2938 * generic drivers for these devices so that some device actually
2939 * attaches. We need to make sure that when we implement (a) we don't
2940 * power the device down on a reattach.
2942 cls = pci_get_class(dev);
2945 switch (pci_do_power_nodriver)
2947 case 0: /* NO powerdown at all */
2949 case 1: /* Conservative about what to power down */
2950 if (cls == PCIC_STORAGE)
2953 case 2: /* Agressive about what to power down */
2954 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
2955 cls == PCIC_BASEPERIPH)
2958 case 3: /* Power down everything */
2962 * PCI spec says we can only go into D3 state from D0 state.
2963 * Transition from D[12] into D0 before going to D3 state.
2965 ps = pci_get_powerstate(dev);
2966 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
2967 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
2968 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
2969 pci_set_powerstate(dev, PCI_POWERSTATE_D3);