2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
55 #if defined(__i386__) || defined(__amd64__)
56 #include <machine/intr_machdep.h>
59 #include <sys/pciio.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
68 #include <contrib/dev/acpica/acpi.h>
71 #define ACPI_PWR_FOR_SLEEP(x, y, z)
74 static uint32_t pci_mapbase(unsigned mapreg);
75 static int pci_maptype(unsigned mapreg);
76 static int pci_mapsize(unsigned testval);
77 static int pci_maprange(unsigned mapreg);
78 static void pci_fixancient(pcicfgregs *cfg);
80 static int pci_porten(device_t pcib, int b, int s, int f);
81 static int pci_memen(device_t pcib, int b, int s, int f);
82 static void pci_assign_interrupt(device_t bus, device_t dev,
84 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
85 int b, int s, int f, int reg,
86 struct resource_list *rl, int force, int prefetch);
87 static int pci_probe(device_t dev);
88 static int pci_attach(device_t dev);
89 static void pci_load_vendor_data(void);
90 static int pci_describe_parse_line(char **ptr, int *vendor,
91 int *device, char **desc);
92 static char *pci_describe_device(device_t dev);
93 static int pci_modevent(module_t mod, int what, void *arg);
94 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
96 static void pci_read_extcap(device_t pcib, pcicfgregs *cfg);
97 static uint32_t pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
100 static void pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
101 int reg, uint32_t data);
103 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
105 static device_method_t pci_methods[] = {
106 /* Device interface */
107 DEVMETHOD(device_probe, pci_probe),
108 DEVMETHOD(device_attach, pci_attach),
109 DEVMETHOD(device_detach, bus_generic_detach),
110 DEVMETHOD(device_shutdown, bus_generic_shutdown),
111 DEVMETHOD(device_suspend, pci_suspend),
112 DEVMETHOD(device_resume, pci_resume),
115 DEVMETHOD(bus_print_child, pci_print_child),
116 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
117 DEVMETHOD(bus_read_ivar, pci_read_ivar),
118 DEVMETHOD(bus_write_ivar, pci_write_ivar),
119 DEVMETHOD(bus_driver_added, pci_driver_added),
120 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
121 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
123 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
124 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
125 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
126 DEVMETHOD(bus_delete_resource, pci_delete_resource),
127 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
128 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
129 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
130 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
131 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
132 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
135 DEVMETHOD(pci_read_config, pci_read_config_method),
136 DEVMETHOD(pci_write_config, pci_write_config_method),
137 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
138 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
139 DEVMETHOD(pci_enable_io, pci_enable_io_method),
140 DEVMETHOD(pci_disable_io, pci_disable_io_method),
141 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
142 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
143 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
144 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
145 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
146 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
147 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
148 DEVMETHOD(pci_release_msi, pci_release_msi_method),
149 DEVMETHOD(pci_msi_count, pci_msi_count_method),
154 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
156 static devclass_t pci_devclass;
157 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
158 MODULE_VERSION(pci, 1);
160 static char *pci_vendordata;
161 static size_t pci_vendordata_size;
165 uint32_t devid; /* Vendor/device of the card */
167 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
168 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
173 struct pci_quirk pci_quirks[] = {
174 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
175 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
176 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
177 /* As does the Serverworks OSB4 (the SMBus mapping register) */
178 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
181 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
182 * or the CMIC-SL (AKA ServerWorks GC_LE).
184 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
185 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
188 * MSI doesn't work on earlier Intel chipsets including
189 * E7500, E7501, E7505, E7210, and 855.
191 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
192 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
193 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
194 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
195 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
198 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
201 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
206 /* map register information */
207 #define PCI_MAPMEM 0x01 /* memory map */
208 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
209 #define PCI_MAPPORT 0x04 /* port map */
211 struct devlist pci_devq;
212 uint32_t pci_generation;
213 uint32_t pci_numdevs = 0;
216 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
218 static int pci_enable_io_modes = 1;
219 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
220 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
221 &pci_enable_io_modes, 1,
222 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
223 enable these bits correctly. We'd like to do this all the time, but there\n\
224 are some peripherals that this causes problems with.");
226 static int pci_do_power_nodriver = 0;
227 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
228 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
229 &pci_do_power_nodriver, 0,
230 "Place a function into D3 state when no driver attaches to it. 0 means\n\
231 disable. 1 means conservatively place devices into D3 state. 2 means\n\
232 agressively place devices into D3 state. 3 means put absolutely everything\n\
235 static int pci_do_power_resume = 1;
236 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
237 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
238 &pci_do_power_resume, 1,
239 "Transition from D3 -> D0 on resume.");
241 static int pci_do_msi = 1;
242 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
243 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
244 "Enable support for MSI interrupts");
246 static int pci_do_msix = 1;
247 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
248 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
249 "Enable support for MSI-X interrupts");
251 static int pci_honor_msi_blacklist = 1;
252 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
253 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
254 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
256 /* Find a device_t by bus/slot/function */
259 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
261 struct pci_devinfo *dinfo;
263 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
264 if ((dinfo->cfg.bus == bus) &&
265 (dinfo->cfg.slot == slot) &&
266 (dinfo->cfg.func == func)) {
267 return (dinfo->cfg.dev);
274 /* Find a device_t by vendor/device ID */
277 pci_find_device(uint16_t vendor, uint16_t device)
279 struct pci_devinfo *dinfo;
281 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
282 if ((dinfo->cfg.vendor == vendor) &&
283 (dinfo->cfg.device == device)) {
284 return (dinfo->cfg.dev);
291 /* return base address of memory or port map */
294 pci_mapbase(uint32_t mapreg)
297 if ((mapreg & 0x01) == 0)
299 return (mapreg & ~mask);
302 /* return map type of memory or port map */
305 pci_maptype(unsigned mapreg)
307 static uint8_t maptype[0x10] = {
308 PCI_MAPMEM, PCI_MAPPORT,
310 PCI_MAPMEM, PCI_MAPPORT,
312 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
313 PCI_MAPMEM|PCI_MAPMEMP, 0,
314 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
318 return maptype[mapreg & 0x0f];
321 /* return log2 of map size decoded for memory or port map */
324 pci_mapsize(uint32_t testval)
328 testval = pci_mapbase(testval);
331 while ((testval & 1) == 0)
340 /* return log2 of address range supported by map register */
343 pci_maprange(unsigned mapreg)
346 switch (mapreg & 0x07) {
362 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
365 pci_fixancient(pcicfgregs *cfg)
367 if (cfg->hdrtype != 0)
370 /* PCI to PCI bridges use header type 1 */
371 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
375 /* extract header type specific config data */
378 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
380 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
381 switch (cfg->hdrtype) {
383 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
384 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
385 cfg->nummaps = PCI_MAXMAPS_0;
388 cfg->subvendor = REG(PCIR_SUBVEND_1, 2);
389 cfg->subdevice = REG(PCIR_SUBDEV_1, 2);
390 cfg->nummaps = PCI_MAXMAPS_1;
393 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
394 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
395 cfg->nummaps = PCI_MAXMAPS_2;
401 /* read configuration header into pcicfgregs structure */
403 pci_read_device(device_t pcib, int b, int s, int f, size_t size)
405 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
406 pcicfgregs *cfg = NULL;
407 struct pci_devinfo *devlist_entry;
408 struct devlist *devlist_head;
410 devlist_head = &pci_devq;
412 devlist_entry = NULL;
414 if (REG(PCIR_DEVVENDOR, 4) != -1) {
415 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
416 if (devlist_entry == NULL)
419 cfg = &devlist_entry->cfg;
424 cfg->vendor = REG(PCIR_VENDOR, 2);
425 cfg->device = REG(PCIR_DEVICE, 2);
426 cfg->cmdreg = REG(PCIR_COMMAND, 2);
427 cfg->statreg = REG(PCIR_STATUS, 2);
428 cfg->baseclass = REG(PCIR_CLASS, 1);
429 cfg->subclass = REG(PCIR_SUBCLASS, 1);
430 cfg->progif = REG(PCIR_PROGIF, 1);
431 cfg->revid = REG(PCIR_REVID, 1);
432 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
433 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
434 cfg->lattimer = REG(PCIR_LATTIMER, 1);
435 cfg->intpin = REG(PCIR_INTPIN, 1);
436 cfg->intline = REG(PCIR_INTLINE, 1);
438 cfg->mingnt = REG(PCIR_MINGNT, 1);
439 cfg->maxlat = REG(PCIR_MAXLAT, 1);
441 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
442 cfg->hdrtype &= ~PCIM_MFDEV;
445 pci_hdrtypedata(pcib, b, s, f, cfg);
447 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
448 pci_read_extcap(pcib, cfg);
450 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
452 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
453 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
454 devlist_entry->conf.pc_sel.pc_func = cfg->func;
455 devlist_entry->conf.pc_hdr = cfg->hdrtype;
457 devlist_entry->conf.pc_subvendor = cfg->subvendor;
458 devlist_entry->conf.pc_subdevice = cfg->subdevice;
459 devlist_entry->conf.pc_vendor = cfg->vendor;
460 devlist_entry->conf.pc_device = cfg->device;
462 devlist_entry->conf.pc_class = cfg->baseclass;
463 devlist_entry->conf.pc_subclass = cfg->subclass;
464 devlist_entry->conf.pc_progif = cfg->progif;
465 devlist_entry->conf.pc_revid = cfg->revid;
470 return (devlist_entry);
475 pci_read_extcap(device_t pcib, pcicfgregs *cfg)
477 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
478 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
479 #if defined(__i386__) || defined(__amd64__)
483 int ptr, nextptr, ptrptr;
485 switch (cfg->hdrtype & PCIM_HDRTYPE) {
488 ptrptr = PCIR_CAP_PTR;
491 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
494 return; /* no extended capabilities support */
496 nextptr = REG(ptrptr, 1); /* sanity check? */
499 * Read capability entries.
501 while (nextptr != 0) {
504 printf("illegal PCI extended capability offset %d\n",
508 /* Find the next entry */
510 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
512 /* Process this entry */
513 switch (REG(ptr + PCICAP_ID, 1)) {
514 case PCIY_PMG: /* PCI power management */
515 if (cfg->pp.pp_cap == 0) {
516 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
517 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
518 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
519 if ((nextptr - ptr) > PCIR_POWER_DATA)
520 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
523 #if defined(__i386__) || defined(__amd64__)
524 case PCIY_HT: /* HyperTransport */
525 /* Determine HT-specific capability type. */
526 val = REG(ptr + PCIR_HT_COMMAND, 2);
527 switch (val & PCIM_HTCMD_CAP_MASK) {
528 case PCIM_HTCAP_MSI_MAPPING:
529 /* Sanity check the mapping window. */
530 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
532 addr = REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
533 if (addr != MSI_INTEL_ADDR_BASE)
535 "HT Bridge at %d:%d:%d has non-default MSI window 0x%llx\n",
536 cfg->bus, cfg->slot, cfg->func,
539 /* Enable MSI -> HT mapping. */
540 val |= PCIM_HTCMD_MSI_ENABLE;
541 WREG(ptr + PCIR_HT_COMMAND, val, 2);
546 case PCIY_MSI: /* PCI MSI */
547 cfg->msi.msi_location = ptr;
548 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
549 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
550 PCIM_MSICTRL_MMC_MASK)>>1);
552 case PCIY_MSIX: /* PCI MSI-X */
553 cfg->msix.msix_location = ptr;
554 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
555 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
556 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
557 val = REG(ptr + PCIR_MSIX_TABLE, 4);
558 cfg->msix.msix_table_bar = PCIR_BAR(val &
560 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
561 val = REG(ptr + PCIR_MSIX_PBA, 4);
562 cfg->msix.msix_pba_bar = PCIR_BAR(val &
564 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
566 case PCIY_VPD: /* PCI Vital Product Data */
567 cfg->vpd.vpd_reg = ptr;
568 pci_read_vpd(pcib, cfg);
574 /* REG and WREG use carry through to next functions */
578 * PCI Vital Product Data
581 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg)
584 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
586 WREG(cfg->vpd.vpd_reg + 2, reg, 2);
587 while ((REG(cfg->vpd.vpd_reg + 2, 2) & 0x8000) != 0x8000)
588 DELAY(1); /* limit looping */
590 return REG(cfg->vpd.vpd_reg + 4, 4);
595 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
597 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
599 WREG(cfg->vpd.vpd_reg + 4, data, 4);
600 WREG(cfg->vpd.vpd_reg + 2, reg | 0x8000, 2);
601 while ((REG(cfg->vpd.vpd_reg + 2, 2) & 0x8000) == 0x8000)
602 DELAY(1); /* limit looping */
608 struct vpd_readstate {
618 vpd_nextbyte(struct vpd_readstate *vrs)
622 if (vrs->bytesinval == 0) {
623 vrs->val = le32toh(pci_read_vpd_reg(vrs->pcib, vrs->cfg,
626 byte = vrs->val & 0xff;
629 vrs->val = vrs->val >> 8;
630 byte = vrs->val & 0xff;
639 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
641 struct vpd_readstate vrs;
648 int alloc, off; /* alloc/off for RO/W arrays */
652 /* init vpd reader */
660 name = remain = i = 0; /* shut up stupid gcc */
661 alloc = off = 0; /* shut up stupid gcc */
662 dflen = 0; /* shut up stupid gcc */
666 byte = vpd_nextbyte(&vrs);
668 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
669 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
670 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
673 case 0: /* item name */
675 remain = vpd_nextbyte(&vrs);
676 remain |= vpd_nextbyte(&vrs) << 8;
677 if (remain > (0x7f*4 - vrs.off)) {
680 "pci%d:%d:%d: invalid vpd data, remain %#x\n",
681 cfg->bus, cfg->slot, cfg->func,
687 name = (byte >> 3) & 0xf;
690 case 0x2: /* String */
691 cfg->vpd.vpd_ident = malloc(remain + 1,
700 case 0x10: /* VPD-R */
703 cfg->vpd.vpd_ros = malloc(alloc *
704 sizeof *cfg->vpd.vpd_ros, M_DEVBUF,
708 case 0x11: /* VPD-W */
711 cfg->vpd.vpd_w = malloc(alloc *
712 sizeof *cfg->vpd.vpd_w, M_DEVBUF,
716 default: /* Invalid data, abort */
722 case 1: /* Identifier String */
723 cfg->vpd.vpd_ident[i++] = byte;
726 cfg->vpd.vpd_ident[i] = '\0';
731 case 2: /* VPD-R Keyword Header */
733 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
734 (alloc *= 2) * sizeof *cfg->vpd.vpd_ros,
737 cfg->vpd.vpd_ros[off].keyword[0] = byte;
738 cfg->vpd.vpd_ros[off].keyword[1] = vpd_nextbyte(&vrs);
739 dflen = vpd_nextbyte(&vrs);
741 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
744 * if this happens, we can't trust the rest
747 printf("pci%d:%d:%d: bad keyword length: %d\n",
748 cfg->bus, cfg->slot, cfg->func, dflen);
752 } else if (dflen == 0) {
753 cfg->vpd.vpd_ros[off].value = malloc(1 *
754 sizeof *cfg->vpd.vpd_ros[off].value,
756 cfg->vpd.vpd_ros[off].value[0] = '\x00';
758 cfg->vpd.vpd_ros[off].value = malloc(
760 sizeof *cfg->vpd.vpd_ros[off].value,
764 /* keep in sync w/ state 3's transistions */
765 if (dflen == 0 && remain == 0)
773 case 3: /* VPD-R Keyword Value */
774 cfg->vpd.vpd_ros[off].value[i++] = byte;
775 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
776 "RV", 2) == 0 && cksumvalid == -1) {
781 "pci%d:%d:%d: bad VPD cksum, remain %hhu\n",
782 cfg->bus, cfg->slot, cfg->func,
791 /* keep in sync w/ state 2's transistions */
793 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
794 if (dflen == 0 && remain == 0) {
795 cfg->vpd.vpd_rocnt = off;
796 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
797 off * sizeof *cfg->vpd.vpd_ros,
800 } else if (dflen == 0)
810 case 5: /* VPD-W Keyword Header */
812 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
813 (alloc *= 2) * sizeof *cfg->vpd.vpd_w,
816 cfg->vpd.vpd_w[off].keyword[0] = byte;
817 cfg->vpd.vpd_w[off].keyword[1] = vpd_nextbyte(&vrs);
818 cfg->vpd.vpd_w[off].len = dflen = vpd_nextbyte(&vrs);
819 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
820 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
821 sizeof *cfg->vpd.vpd_w[off].value,
825 /* keep in sync w/ state 6's transistions */
826 if (dflen == 0 && remain == 0)
834 case 6: /* VPD-W Keyword Value */
835 cfg->vpd.vpd_w[off].value[i++] = byte;
838 /* keep in sync w/ state 5's transistions */
840 cfg->vpd.vpd_w[off++].value[i++] = '\0';
841 if (dflen == 0 && remain == 0) {
842 cfg->vpd.vpd_wcnt = off;
843 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
844 off * sizeof *cfg->vpd.vpd_w,
847 } else if (dflen == 0)
852 printf("pci%d:%d:%d: invalid state: %d\n",
853 cfg->bus, cfg->slot, cfg->func, state);
859 if (cksumvalid == 0) {
860 /* read-only data bad, clean up */
862 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
864 free(cfg->vpd.vpd_ros, M_DEVBUF);
865 cfg->vpd.vpd_ros = NULL;
872 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
874 struct pci_devinfo *dinfo = device_get_ivars(child);
875 pcicfgregs *cfg = &dinfo->cfg;
877 *identptr = cfg->vpd.vpd_ident;
879 if (*identptr == NULL)
886 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
889 struct pci_devinfo *dinfo = device_get_ivars(child);
890 pcicfgregs *cfg = &dinfo->cfg;
893 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
894 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
895 sizeof cfg->vpd.vpd_ros[i].keyword) == 0) {
896 *vptr = cfg->vpd.vpd_ros[i].value;
899 if (i != cfg->vpd.vpd_rocnt)
907 * Return the offset in configuration space of the requested extended
908 * capability entry or 0 if the specified capability was not found.
911 pci_find_extcap_method(device_t dev, device_t child, int capability,
914 struct pci_devinfo *dinfo = device_get_ivars(child);
915 pcicfgregs *cfg = &dinfo->cfg;
920 * Check the CAP_LIST bit of the PCI status register first.
922 status = pci_read_config(child, PCIR_STATUS, 2);
923 if (!(status & PCIM_STATUS_CAPPRESENT))
927 * Determine the start pointer of the capabilities list.
929 switch (cfg->hdrtype & PCIM_HDRTYPE) {
935 ptr = PCIR_CAP_PTR_2;
939 return (ENXIO); /* no extended capabilities support */
941 ptr = pci_read_config(child, ptr, 1);
944 * Traverse the capabilities list.
947 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
952 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
959 * Support for MSI-X message interrupts.
962 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
964 struct pci_devinfo *dinfo = device_get_ivars(dev);
965 pcicfgregs *cfg = &dinfo->cfg;
968 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
969 offset = cfg->msix.msix_table_offset + index * 16;
970 bus_write_4(cfg->msix.msix_table_res, offset, address & 0xffffffff);
971 bus_write_4(cfg->msix.msix_table_res, offset + 4, address >> 32);
972 bus_write_4(cfg->msix.msix_table_res, offset + 8, data);
976 pci_mask_msix(device_t dev, u_int index)
978 struct pci_devinfo *dinfo = device_get_ivars(dev);
979 pcicfgregs *cfg = &dinfo->cfg;
980 uint32_t offset, val;
982 KASSERT(cfg->msix.msix_msgnum > index, ("bogus index"));
983 offset = cfg->msix.msix_table_offset + index * 16 + 12;
984 val = bus_read_4(cfg->msix.msix_table_res, offset);
985 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
986 val |= PCIM_MSIX_VCTRL_MASK;
987 bus_write_4(cfg->msix.msix_table_res, offset, val);
992 pci_unmask_msix(device_t dev, u_int index)
994 struct pci_devinfo *dinfo = device_get_ivars(dev);
995 pcicfgregs *cfg = &dinfo->cfg;
996 uint32_t offset, val;
998 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
999 offset = cfg->msix.msix_table_offset + index * 16 + 12;
1000 val = bus_read_4(cfg->msix.msix_table_res, offset);
1001 if (val & PCIM_MSIX_VCTRL_MASK) {
1002 val &= ~PCIM_MSIX_VCTRL_MASK;
1003 bus_write_4(cfg->msix.msix_table_res, offset, val);
1008 pci_pending_msix(device_t dev, u_int index)
1010 struct pci_devinfo *dinfo = device_get_ivars(dev);
1011 pcicfgregs *cfg = &dinfo->cfg;
1012 uint32_t offset, bit;
1014 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
1015 offset = cfg->msix.msix_pba_offset + (index / 4) * 4;
1016 bit = 1 << index % 32;
1017 return (bus_read_4(cfg->msix.msix_pba_res, offset) & bit);
1021 pci_alloc_msix(device_t dev, device_t child, int *count)
1023 struct pci_devinfo *dinfo = device_get_ivars(child);
1024 pcicfgregs *cfg = &dinfo->cfg;
1025 struct resource_list_entry *rle;
1026 int actual, error, i, irq, max;
1028 /* MSI-X capability present? */
1029 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1032 /* Make sure the appropriate BARs are mapped. */
1033 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1034 cfg->msix.msix_table_bar);
1035 if (rle == NULL || rle->res == NULL ||
1036 !(rman_get_flags(rle->res) & RF_ACTIVE))
1038 cfg->msix.msix_table_res = rle->res;
1039 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1040 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1041 cfg->msix.msix_pba_bar);
1042 if (rle == NULL || rle->res == NULL ||
1043 !(rman_get_flags(rle->res) & RF_ACTIVE))
1046 cfg->msix.msix_pba_res = rle->res;
1048 /* Already have allocated messages? */
1049 if (cfg->msix.msix_alloc != 0)
1053 device_printf(child,
1054 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1055 *count, cfg->msix.msix_msgnum);
1056 max = min(*count, cfg->msix.msix_msgnum);
1057 for (i = 0; i < max; i++) {
1058 /* Allocate a message. */
1059 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, i,
1063 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1069 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1071 device_printf(child, "using IRQ %lu for MSI-X\n",
1077 * Be fancy and try to print contiguous runs of
1078 * IRQ values as ranges. 'irq' is the previous IRQ.
1079 * 'run' is true if we are in a range.
1081 device_printf(child, "using IRQs %lu", rle->start);
1084 for (i = 1; i < actual; i++) {
1085 rle = resource_list_find(&dinfo->resources,
1086 SYS_RES_IRQ, i + 1);
1088 /* Still in a run? */
1089 if (rle->start == irq + 1) {
1095 /* Finish previous range. */
1101 /* Start new range. */
1102 printf(",%lu", rle->start);
1106 /* Unfinished range? */
1109 printf(" for MSI-X\n");
1113 /* Mask all vectors. */
1114 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1115 pci_mask_msix(child, i);
1117 /* Update control register to enable MSI-X. */
1118 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1119 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1120 cfg->msix.msix_ctrl, 2);
1122 /* Update counts of alloc'd messages. */
1123 cfg->msix.msix_alloc = actual;
1129 pci_release_msix(device_t dev, device_t child)
1131 struct pci_devinfo *dinfo = device_get_ivars(child);
1132 pcicfgregs *cfg = &dinfo->cfg;
1133 struct resource_list_entry *rle;
1136 /* Do we have any messages to release? */
1137 if (cfg->msix.msix_alloc == 0)
1140 /* Make sure none of the resources are allocated. */
1141 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1142 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1143 KASSERT(rle != NULL, ("missing MSI resource"));
1144 if (rle->res != NULL)
1148 /* Update control register with to disable MSI-X. */
1149 cfg->msix.msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1150 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1151 cfg->msix.msix_ctrl, 2);
1153 /* Release the messages. */
1154 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1155 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1156 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1158 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1161 /* Update alloc count. */
1162 cfg->msix.msix_alloc = 0;
1167 * Support for MSI message signalled interrupts.
1170 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1172 struct pci_devinfo *dinfo = device_get_ivars(dev);
1173 pcicfgregs *cfg = &dinfo->cfg;
1175 /* Write data and address values. */
1176 cfg->msi.msi_addr = address;
1177 cfg->msi.msi_data = data;
1178 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
1179 address & 0xffffffff, 4);
1180 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
1181 pci_write_config(dev, cfg->msi.msi_location +
1182 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1183 pci_write_config(dev, cfg->msi.msi_location +
1184 PCIR_MSI_DATA_64BIT, data, 2);
1186 pci_write_config(dev, cfg->msi.msi_location +
1187 PCIR_MSI_DATA, data, 2);
1189 /* Enable MSI in the control register. */
1190 cfg->msi.msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1191 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
1192 cfg->msi.msi_ctrl, 2);
1196 * Restore MSI registers during resume. If MSI is enabled then
1197 * restore the data and address registers in addition to the control
1201 pci_resume_msi(device_t dev)
1203 struct pci_devinfo *dinfo = device_get_ivars(dev);
1204 pcicfgregs *cfg = &dinfo->cfg;
1208 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1209 address = cfg->msi.msi_addr;
1210 data = cfg->msi.msi_data;
1211 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
1212 address & 0xffffffff, 4);
1213 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
1214 pci_write_config(dev, cfg->msi.msi_location +
1215 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1216 pci_write_config(dev, cfg->msi.msi_location +
1217 PCIR_MSI_DATA_64BIT, data, 2);
1219 pci_write_config(dev, cfg->msi.msi_location +
1220 PCIR_MSI_DATA, data, 2);
1222 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
1223 cfg->msi.msi_ctrl, 2);
1227 * Returns true if the specified device is blacklisted because MSI
1231 pci_msi_device_blacklisted(device_t dev)
1233 struct pci_quirk *q;
1235 if (!pci_honor_msi_blacklist)
1238 for (q = &pci_quirks[0]; q->devid; q++) {
1239 if (q->devid == pci_get_devid(dev) &&
1240 q->type == PCI_QUIRK_DISABLE_MSI)
1247 * Determine if MSI is blacklisted globally on this sytem. Currently,
1248 * we just check for blacklisted chipsets as represented by the
1249 * host-PCI bridge at device 0:0:0. In the future, it may become
1250 * necessary to check other system attributes, such as the kenv values
1251 * that give the motherboard manufacturer and model number.
1254 pci_msi_blacklisted(void)
1258 if (!pci_honor_msi_blacklist)
1261 dev = pci_find_bsf(0, 0, 0);
1263 return (pci_msi_device_blacklisted(dev));
1268 * Attempt to allocate *count MSI messages. The actual number allocated is
1269 * returned in *count. After this function returns, each message will be
1270 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1273 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1275 struct pci_devinfo *dinfo = device_get_ivars(child);
1276 pcicfgregs *cfg = &dinfo->cfg;
1277 struct resource_list_entry *rle;
1278 int actual, error, i, irqs[32];
1281 /* Don't let count == 0 get us into trouble. */
1285 /* If rid 0 is allocated, then fail. */
1286 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1287 if (rle != NULL && rle->res != NULL)
1290 /* If MSI is blacklisted for this system, fail. */
1291 if (pci_msi_blacklisted())
1294 /* Try MSI-X first. */
1295 error = pci_alloc_msix(dev, child, count);
1296 if (error != ENODEV)
1299 /* MSI capability present? */
1300 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1303 /* Already have allocated messages? */
1304 if (cfg->msi.msi_alloc != 0)
1308 device_printf(child,
1309 "attempting to allocate %d MSI vectors (%d supported)\n",
1310 *count, cfg->msi.msi_msgnum);
1312 /* Don't ask for more than the device supports. */
1313 actual = min(*count, cfg->msi.msi_msgnum);
1315 /* Don't ask for more than 32 messages. */
1316 actual = min(actual, 32);
1318 /* MSI requires power of 2 number of messages. */
1319 if (!powerof2(actual))
1323 /* Try to allocate N messages. */
1324 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
1325 cfg->msi.msi_msgnum, irqs);
1336 * We now have N actual messages mapped onto SYS_RES_IRQ
1337 * resources in the irqs[] array, so add new resources
1338 * starting at rid 1.
1340 for (i = 0; i < actual; i++)
1341 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1342 irqs[i], irqs[i], 1);
1346 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
1351 * Be fancy and try to print contiguous runs
1352 * of IRQ values as ranges. 'run' is true if
1353 * we are in a range.
1355 device_printf(child, "using IRQs %d", irqs[0]);
1357 for (i = 1; i < actual; i++) {
1359 /* Still in a run? */
1360 if (irqs[i] == irqs[i - 1] + 1) {
1365 /* Finish previous range. */
1367 printf("-%d", irqs[i - 1]);
1371 /* Start new range. */
1372 printf(",%d", irqs[i]);
1375 /* Unfinished range? */
1377 printf("%d", irqs[actual - 1]);
1378 printf(" for MSI\n");
1382 /* Update control register with actual count and enable MSI. */
1383 ctrl = cfg->msi.msi_ctrl;
1384 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1385 ctrl |= (ffs(actual) - 1) << 4;
1386 cfg->msi.msi_ctrl = ctrl;
1387 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1389 /* Update counts of alloc'd messages. */
1390 cfg->msi.msi_alloc = actual;
1395 /* Release the MSI messages associated with this device. */
1397 pci_release_msi_method(device_t dev, device_t child)
1399 struct pci_devinfo *dinfo = device_get_ivars(child);
1400 pcicfgregs *cfg = &dinfo->cfg;
1401 struct resource_list_entry *rle;
1402 int error, i, irqs[32];
1404 /* Try MSI-X first. */
1405 error = pci_release_msix(dev, child);
1406 if (error != ENODEV)
1409 /* Do we have any messages to release? */
1410 if (cfg->msi.msi_alloc == 0)
1412 KASSERT(cfg->msi.msi_alloc <= 32, ("more than 32 alloc'd messages"));
1414 /* Make sure none of the resources are allocated. */
1415 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1416 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1417 KASSERT(rle != NULL, ("missing MSI resource"));
1418 if (rle->res != NULL)
1420 irqs[i] = rle->start;
1423 /* Update control register with 0 count and disable MSI. */
1424 cfg->msi.msi_ctrl &= ~(PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE);
1425 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL,
1426 cfg->msi.msi_ctrl, 2);
1428 /* Release the messages. */
1429 PCIB_RELEASE_MSI(device_get_parent(dev), child, cfg->msi.msi_alloc,
1431 for (i = 0; i < cfg->msi.msi_alloc; i++)
1432 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1434 /* Update alloc count. */
1435 cfg->msi.msi_alloc = 0;
1440 * Return the max supported MSI or MSI-X messages this device supports.
1441 * Basically, assuming the MD code can alloc messages, this function
1442 * should return the maximum value that pci_alloc_msi() can return. Thus,
1443 * it is subject to the tunables, etc.
1446 pci_msi_count_method(device_t dev, device_t child)
1448 struct pci_devinfo *dinfo = device_get_ivars(child);
1449 pcicfgregs *cfg = &dinfo->cfg;
1451 if (pci_do_msix && cfg->msix.msix_location != 0)
1452 return (cfg->msix.msix_msgnum);
1453 if (pci_do_msi && cfg->msi.msi_location != 0)
1454 return (cfg->msi.msi_msgnum);
1458 /* free pcicfgregs structure and all depending data structures */
1461 pci_freecfg(struct pci_devinfo *dinfo)
1463 struct devlist *devlist_head;
1466 devlist_head = &pci_devq;
1468 if (dinfo->cfg.vpd.vpd_reg) {
1469 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
1470 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
1471 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
1472 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
1473 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
1474 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
1475 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
1477 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
1478 free(dinfo, M_DEVBUF);
1480 /* increment the generation count */
1483 /* we're losing one device */
1489 * PCI power manangement
1492 pci_set_powerstate_method(device_t dev, device_t child, int state)
1494 struct pci_devinfo *dinfo = device_get_ivars(child);
1495 pcicfgregs *cfg = &dinfo->cfg;
1497 int result, oldstate, highest, delay;
1499 if (cfg->pp.pp_cap == 0)
1500 return (EOPNOTSUPP);
1503 * Optimize a no state change request away. While it would be OK to
1504 * write to the hardware in theory, some devices have shown odd
1505 * behavior when going from D3 -> D3.
1507 oldstate = pci_get_powerstate(child);
1508 if (oldstate == state)
1512 * The PCI power management specification states that after a state
1513 * transition between PCI power states, system software must
1514 * guarantee a minimal delay before the function accesses the device.
1515 * Compute the worst case delay that we need to guarantee before we
1516 * access the device. Many devices will be responsive much more
1517 * quickly than this delay, but there are some that don't respond
1518 * instantly to state changes. Transitions to/from D3 state require
1519 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
1520 * is done below with DELAY rather than a sleeper function because
1521 * this function can be called from contexts where we cannot sleep.
1523 highest = (oldstate > state) ? oldstate : state;
1524 if (highest == PCI_POWERSTATE_D3)
1526 else if (highest == PCI_POWERSTATE_D2)
1530 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
1531 & ~PCIM_PSTAT_DMASK;
1534 case PCI_POWERSTATE_D0:
1535 status |= PCIM_PSTAT_D0;
1537 case PCI_POWERSTATE_D1:
1538 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
1539 return (EOPNOTSUPP);
1540 status |= PCIM_PSTAT_D1;
1542 case PCI_POWERSTATE_D2:
1543 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
1544 return (EOPNOTSUPP);
1545 status |= PCIM_PSTAT_D2;
1547 case PCI_POWERSTATE_D3:
1548 status |= PCIM_PSTAT_D3;
1556 "pci%d:%d:%d: Transition from D%d to D%d\n",
1557 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func,
1560 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
1567 pci_get_powerstate_method(device_t dev, device_t child)
1569 struct pci_devinfo *dinfo = device_get_ivars(child);
1570 pcicfgregs *cfg = &dinfo->cfg;
1574 if (cfg->pp.pp_cap != 0) {
1575 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
1576 switch (status & PCIM_PSTAT_DMASK) {
1578 result = PCI_POWERSTATE_D0;
1581 result = PCI_POWERSTATE_D1;
1584 result = PCI_POWERSTATE_D2;
1587 result = PCI_POWERSTATE_D3;
1590 result = PCI_POWERSTATE_UNKNOWN;
1594 /* No support, device is always at D0 */
1595 result = PCI_POWERSTATE_D0;
1601 * Some convenience functions for PCI device drivers.
1604 static __inline void
1605 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
1609 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1611 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1614 static __inline void
1615 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
1619 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1621 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1625 pci_enable_busmaster_method(device_t dev, device_t child)
1627 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1632 pci_disable_busmaster_method(device_t dev, device_t child)
1634 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1639 pci_enable_io_method(device_t dev, device_t child, int space)
1649 case SYS_RES_IOPORT:
1650 bit = PCIM_CMD_PORTEN;
1653 case SYS_RES_MEMORY:
1654 bit = PCIM_CMD_MEMEN;
1660 pci_set_command_bit(dev, child, bit);
1661 /* Some devices seem to need a brief stall here, what do to? */
1662 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1665 device_printf(child, "failed to enable %s mapping!\n", error);
1670 pci_disable_io_method(device_t dev, device_t child, int space)
1680 case SYS_RES_IOPORT:
1681 bit = PCIM_CMD_PORTEN;
1684 case SYS_RES_MEMORY:
1685 bit = PCIM_CMD_MEMEN;
1691 pci_clear_command_bit(dev, child, bit);
1692 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1693 if (command & bit) {
1694 device_printf(child, "failed to disable %s mapping!\n", error);
1701 * New style pci driver. Parent device is either a pci-host-bridge or a
1702 * pci-pci-bridge. Both kinds are represented by instances of pcib.
1706 pci_print_verbose(struct pci_devinfo *dinfo)
1711 pcicfgregs *cfg = &dinfo->cfg;
1713 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
1714 cfg->vendor, cfg->device, cfg->revid);
1715 printf("\tbus=%d, slot=%d, func=%d\n",
1716 cfg->bus, cfg->slot, cfg->func);
1717 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
1718 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
1720 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
1721 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
1722 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
1723 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
1724 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
1725 if (cfg->intpin > 0)
1726 printf("\tintpin=%c, irq=%d\n",
1727 cfg->intpin +'a' -1, cfg->intline);
1728 if (cfg->pp.pp_cap) {
1731 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
1732 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
1733 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
1734 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
1735 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
1736 status & PCIM_PSTAT_DMASK);
1738 if (cfg->vpd.vpd_reg) {
1739 printf("\tVPD Ident: %s\n", cfg->vpd.vpd_ident);
1740 for (i = 0; i < cfg->vpd.vpd_rocnt; i++) {
1741 struct vpd_readonly *vrop;
1742 vrop = &cfg->vpd.vpd_ros[i];
1743 if (strncmp("CP", vrop->keyword, 2) == 0)
1744 printf("\tCP: id %d, BAR%d, off %#x\n",
1745 vrop->value[0], vrop->value[1],
1747 *(uint16_t *)&vrop->value[2]));
1748 else if (strncmp("RV", vrop->keyword, 2) == 0)
1749 printf("\tRV: %#hhx\n", vrop->value[0]);
1751 printf("\t%.2s: %s\n", vrop->keyword,
1754 for (i = 0; i < cfg->vpd.vpd_wcnt; i++) {
1755 struct vpd_write *vwp;
1756 vwp = &cfg->vpd.vpd_w[i];
1757 if (strncmp("RW", vwp->keyword, 2) != 0)
1758 printf("\t%.2s(%#x-%#x): %s\n",
1759 vwp->keyword, vwp->start,
1760 vwp->start + vwp->len, vwp->value);
1763 if (cfg->msi.msi_location) {
1766 ctrl = cfg->msi.msi_ctrl;
1767 printf("\tMSI supports %d message%s%s%s\n",
1768 cfg->msi.msi_msgnum,
1769 (cfg->msi.msi_msgnum == 1) ? "" : "s",
1770 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
1771 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
1773 if (cfg->msix.msix_location) {
1774 printf("\tMSI-X supports %d message%s ",
1775 cfg->msix.msix_msgnum,
1776 (cfg->msix.msix_msgnum == 1) ? "" : "s");
1777 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
1778 printf("in map 0x%x\n",
1779 cfg->msix.msix_table_bar);
1781 printf("in maps 0x%x and 0x%x\n",
1782 cfg->msix.msix_table_bar,
1783 cfg->msix.msix_pba_bar);
1789 pci_porten(device_t pcib, int b, int s, int f)
1791 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1792 & PCIM_CMD_PORTEN) != 0;
1796 pci_memen(device_t pcib, int b, int s, int f)
1798 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1799 & PCIM_CMD_MEMEN) != 0;
1803 * Add a resource based on a pci map register. Return 1 if the map
1804 * register is a 32bit map register or 2 if it is a 64bit register.
1807 pci_add_map(device_t pcib, device_t bus, device_t dev,
1808 int b, int s, int f, int reg, struct resource_list *rl, int force,
1813 pci_addr_t start, end, count;
1820 struct resource *res;
1822 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1823 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
1824 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1825 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
1827 if (pci_maptype(map) & PCI_MAPMEM)
1828 type = SYS_RES_MEMORY;
1830 type = SYS_RES_IOPORT;
1831 ln2size = pci_mapsize(testval);
1832 ln2range = pci_maprange(testval);
1833 base = pci_mapbase(map);
1834 barlen = ln2range == 64 ? 2 : 1;
1837 * For I/O registers, if bottom bit is set, and the next bit up
1838 * isn't clear, we know we have a BAR that doesn't conform to the
1839 * spec, so ignore it. Also, sanity check the size of the data
1840 * areas to the type of memory involved. Memory must be at least
1841 * 16 bytes in size, while I/O ranges must be at least 4.
1843 if ((testval & 0x1) == 0x1 &&
1844 (testval & 0x2) != 0)
1846 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
1847 (type == SYS_RES_IOPORT && ln2size < 2))
1851 /* Read the other half of a 64bit map register */
1852 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
1854 printf("\tmap[%02x]: type %x, range %2d, base %#jx, size %2d",
1855 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
1856 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1857 printf(", port disabled\n");
1858 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1859 printf(", memory disabled\n");
1861 printf(", enabled\n");
1865 * If base is 0, then we have problems. It is best to ignore
1866 * such entries for the moment. These will be allocated later if
1867 * the driver specifically requests them. However, some
1868 * removable busses look better when all resources are allocated,
1869 * so allow '0' to be overriden.
1871 * Similarly treat maps whose values is the same as the test value
1872 * read back. These maps have had all f's written to them by the
1873 * BIOS in an attempt to disable the resources.
1875 if (!force && (base == 0 || map == testval))
1877 if ((u_long)base != base) {
1879 "pci%d:%d:%d bar %#x too many address bits", b, s, f, reg);
1884 * This code theoretically does the right thing, but has
1885 * undesirable side effects in some cases where peripherals
1886 * respond oddly to having these bits enabled. Let the user
1887 * be able to turn them off (since pci_enable_io_modes is 1 by
1890 if (pci_enable_io_modes) {
1891 /* Turn on resources that have been left off by a lazy BIOS */
1892 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
1893 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1894 cmd |= PCIM_CMD_PORTEN;
1895 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1897 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
1898 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1899 cmd |= PCIM_CMD_MEMEN;
1900 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1903 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1905 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1909 count = 1 << ln2size;
1910 if (base == 0 || base == pci_mapbase(testval)) {
1911 start = 0; /* Let the parent deside */
1915 end = base + (1 << ln2size) - 1;
1917 resource_list_add(rl, type, reg, start, end, count);
1920 * Not quite sure what to do on failure of allocating the resource
1921 * since I can postulate several right answers.
1923 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
1924 prefetch ? RF_PREFETCHABLE : 0);
1927 start = rman_get_start(res);
1928 if ((u_long)start != start) {
1929 /* Wait a minute! this platform can't do this address. */
1931 "pci%d.%d.%x bar %#x start %#jx, too many bits.",
1932 b, s, f, reg, (uintmax_t)start);
1933 resource_list_release(rl, bus, dev, type, reg, res);
1936 pci_write_config(dev, reg, start, 4);
1938 pci_write_config(dev, reg + 4, start >> 32, 4);
1943 * For ATA devices we need to decide early what addressing mode to use.
1944 * Legacy demands that the primary and secondary ATA ports sits on the
1945 * same addresses that old ISA hardware did. This dictates that we use
1946 * those addresses and ignore the BAR's if we cannot set PCI native
1950 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
1951 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
1953 int rid, type, progif;
1955 /* if this device supports PCI native addressing use it */
1956 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1957 if ((progif & 0x8a) == 0x8a) {
1958 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
1959 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
1960 printf("Trying ATA native PCI addressing mode\n");
1961 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
1965 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1966 type = SYS_RES_IOPORT;
1967 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
1968 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
1969 prefetchmask & (1 << 0));
1970 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
1971 prefetchmask & (1 << 1));
1974 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
1975 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
1978 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
1979 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
1982 if (progif & PCIP_STORAGE_IDE_MODESEC) {
1983 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
1984 prefetchmask & (1 << 2));
1985 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
1986 prefetchmask & (1 << 3));
1989 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
1990 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
1993 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
1994 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
1997 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
1998 prefetchmask & (1 << 4));
1999 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
2000 prefetchmask & (1 << 5));
2004 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2006 struct pci_devinfo *dinfo = device_get_ivars(dev);
2007 pcicfgregs *cfg = &dinfo->cfg;
2008 char tunable_name[64];
2011 /* Has to have an intpin to have an interrupt. */
2012 if (cfg->intpin == 0)
2015 /* Let the user override the IRQ with a tunable. */
2016 irq = PCI_INVALID_IRQ;
2017 snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.INT%c.irq",
2018 cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2019 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2020 irq = PCI_INVALID_IRQ;
2023 * If we didn't get an IRQ via the tunable, then we either use the
2024 * IRQ value in the intline register or we ask the bus to route an
2025 * interrupt for us. If force_route is true, then we only use the
2026 * value in the intline register if the bus was unable to assign an
2029 if (!PCI_INTERRUPT_VALID(irq)) {
2030 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2031 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2032 if (!PCI_INTERRUPT_VALID(irq))
2036 /* If after all that we don't have an IRQ, just bail. */
2037 if (!PCI_INTERRUPT_VALID(irq))
2040 /* Update the config register if it changed. */
2041 if (irq != cfg->intline) {
2043 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2046 /* Add this IRQ as rid 0 interrupt resource. */
2047 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2051 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
2054 struct pci_devinfo *dinfo = device_get_ivars(dev);
2055 pcicfgregs *cfg = &dinfo->cfg;
2056 struct resource_list *rl = &dinfo->resources;
2057 struct pci_quirk *q;
2060 pcib = device_get_parent(bus);
2066 /* ATA devices needs special map treatment */
2067 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2068 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
2069 (pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV))
2070 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
2072 for (i = 0; i < cfg->nummaps;)
2073 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2074 rl, force, prefetchmask & (1 << i));
2077 * Add additional, quirked resources.
2079 for (q = &pci_quirks[0]; q->devid; q++) {
2080 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2081 && q->type == PCI_QUIRK_MAP_REG)
2082 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2086 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2087 #ifdef __PCI_REROUTE_INTERRUPT
2089 * Try to re-route interrupts. Sometimes the BIOS or
2090 * firmware may leave bogus values in these registers.
2091 * If the re-route fails, then just stick with what we
2094 pci_assign_interrupt(bus, dev, 1);
2096 pci_assign_interrupt(bus, dev, 0);
2102 pci_add_children(device_t dev, int busno, size_t dinfo_size)
2104 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2105 device_t pcib = device_get_parent(dev);
2106 struct pci_devinfo *dinfo;
2108 int s, f, pcifunchigh;
2111 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2112 ("dinfo_size too small"));
2113 maxslots = PCIB_MAXSLOTS(pcib);
2114 for (s = 0; s <= maxslots; s++) {
2118 hdrtype = REG(PCIR_HDRTYPE, 1);
2119 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2121 if (hdrtype & PCIM_MFDEV)
2122 pcifunchigh = PCI_FUNCMAX;
2123 for (f = 0; f <= pcifunchigh; f++) {
2124 dinfo = pci_read_device(pcib, busno, s, f, dinfo_size);
2125 if (dinfo != NULL) {
2126 pci_add_child(dev, dinfo);
2134 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2136 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2137 device_set_ivars(dinfo->cfg.dev, dinfo);
2138 resource_list_init(&dinfo->resources);
2139 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2140 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2141 pci_print_verbose(dinfo);
2142 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
2146 pci_probe(device_t dev)
2149 device_set_desc(dev, "PCI bus");
2151 /* Allow other subclasses to override this driver. */
2156 pci_attach(device_t dev)
2161 * Since there can be multiple independantly numbered PCI
2162 * busses on systems with multiple PCI domains, we can't use
2163 * the unit number to decide which bus we are probing. We ask
2164 * the parent pcib what our bus number is.
2166 busno = pcib_get_bus(dev);
2168 device_printf(dev, "physical bus=%d\n", busno);
2170 pci_add_children(dev, busno, sizeof(struct pci_devinfo));
2172 return (bus_generic_attach(dev));
2176 pci_suspend(device_t dev)
2178 int dstate, error, i, numdevs;
2179 device_t acpi_dev, child, *devlist;
2180 struct pci_devinfo *dinfo;
2183 * Save the PCI configuration space for each child and set the
2184 * device in the appropriate power state for this sleep state.
2187 if (pci_do_power_resume)
2188 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2189 device_get_children(dev, &devlist, &numdevs);
2190 for (i = 0; i < numdevs; i++) {
2192 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2193 pci_cfg_save(child, dinfo, 0);
2196 /* Suspend devices before potentially powering them down. */
2197 error = bus_generic_suspend(dev);
2199 free(devlist, M_TEMP);
2204 * Always set the device to D3. If ACPI suggests a different
2205 * power state, use it instead. If ACPI is not present, the
2206 * firmware is responsible for managing device power. Skip
2207 * children who aren't attached since they are powered down
2208 * separately. Only manage type 0 devices for now.
2210 for (i = 0; acpi_dev && i < numdevs; i++) {
2212 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2213 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2214 dstate = PCI_POWERSTATE_D3;
2215 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2216 pci_set_powerstate(child, dstate);
2219 free(devlist, M_TEMP);
2224 pci_resume(device_t dev)
2227 device_t acpi_dev, child, *devlist;
2228 struct pci_devinfo *dinfo;
2231 * Set each child to D0 and restore its PCI configuration space.
2234 if (pci_do_power_resume)
2235 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2236 device_get_children(dev, &devlist, &numdevs);
2237 for (i = 0; i < numdevs; i++) {
2239 * Notify ACPI we're going to D0 but ignore the result. If
2240 * ACPI is not present, the firmware is responsible for
2241 * managing device power. Only manage type 0 devices for now.
2244 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2245 if (acpi_dev && device_is_attached(child) &&
2246 dinfo->cfg.hdrtype == 0) {
2247 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2248 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2251 /* Now the device is powered up, restore its config space. */
2252 pci_cfg_restore(child, dinfo);
2254 free(devlist, M_TEMP);
2255 return (bus_generic_resume(dev));
2259 pci_load_vendor_data(void)
2261 caddr_t vendordata, info;
2263 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2264 info = preload_search_info(vendordata, MODINFO_ADDR);
2265 pci_vendordata = *(char **)info;
2266 info = preload_search_info(vendordata, MODINFO_SIZE);
2267 pci_vendordata_size = *(size_t *)info;
2268 /* terminate the database */
2269 pci_vendordata[pci_vendordata_size] = '\n';
2274 pci_driver_added(device_t dev, driver_t *driver)
2279 struct pci_devinfo *dinfo;
2283 device_printf(dev, "driver added\n");
2284 DEVICE_IDENTIFY(driver, dev);
2285 device_get_children(dev, &devlist, &numdevs);
2286 for (i = 0; i < numdevs; i++) {
2288 if (device_get_state(child) != DS_NOTPRESENT)
2290 dinfo = device_get_ivars(child);
2291 pci_print_verbose(dinfo);
2293 printf("pci%d:%d:%d: reprobing on driver added\n",
2294 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func);
2295 pci_cfg_restore(child, dinfo);
2296 if (device_probe_and_attach(child) != 0)
2297 pci_cfg_save(child, dinfo, 1);
2299 free(devlist, M_TEMP);
2303 pci_print_child(device_t dev, device_t child)
2305 struct pci_devinfo *dinfo;
2306 struct resource_list *rl;
2309 dinfo = device_get_ivars(child);
2310 rl = &dinfo->resources;
2312 retval += bus_print_child_header(dev, child);
2314 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
2315 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
2316 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
2317 if (device_get_flags(dev))
2318 retval += printf(" flags %#x", device_get_flags(dev));
2320 retval += printf(" at device %d.%d", pci_get_slot(child),
2321 pci_get_function(child));
2323 retval += bus_print_child_footer(dev, child);
2333 } pci_nomatch_tab[] = {
2334 {PCIC_OLD, -1, "old"},
2335 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
2336 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
2337 {PCIC_STORAGE, -1, "mass storage"},
2338 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
2339 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
2340 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
2341 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
2342 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
2343 {PCIC_NETWORK, -1, "network"},
2344 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
2345 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
2346 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
2347 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
2348 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
2349 {PCIC_DISPLAY, -1, "display"},
2350 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
2351 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
2352 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
2353 {PCIC_MULTIMEDIA, -1, "multimedia"},
2354 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
2355 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
2356 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
2357 {PCIC_MEMORY, -1, "memory"},
2358 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
2359 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
2360 {PCIC_BRIDGE, -1, "bridge"},
2361 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
2362 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
2363 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
2364 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
2365 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
2366 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
2367 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
2368 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
2369 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
2370 {PCIC_SIMPLECOMM, -1, "simple comms"},
2371 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
2372 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
2373 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
2374 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
2375 {PCIC_BASEPERIPH, -1, "base peripheral"},
2376 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
2377 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
2378 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
2379 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
2380 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
2381 {PCIC_INPUTDEV, -1, "input device"},
2382 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
2383 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
2384 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
2385 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
2386 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
2387 {PCIC_DOCKING, -1, "docking station"},
2388 {PCIC_PROCESSOR, -1, "processor"},
2389 {PCIC_SERIALBUS, -1, "serial bus"},
2390 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
2391 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
2392 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
2393 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
2394 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
2395 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
2396 {PCIC_WIRELESS, -1, "wireless controller"},
2397 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
2398 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
2399 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
2400 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
2401 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
2402 {PCIC_SATCOM, -1, "satellite communication"},
2403 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
2404 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
2405 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
2406 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
2407 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
2408 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
2409 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
2410 {PCIC_DASP, -1, "dasp"},
2411 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
2416 pci_probe_nomatch(device_t dev, device_t child)
2419 char *cp, *scp, *device;
2422 * Look for a listing for this device in a loaded device database.
2424 if ((device = pci_describe_device(child)) != NULL) {
2425 device_printf(dev, "<%s>", device);
2426 free(device, M_DEVBUF);
2429 * Scan the class/subclass descriptions for a general
2434 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
2435 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
2436 if (pci_nomatch_tab[i].subclass == -1) {
2437 cp = pci_nomatch_tab[i].desc;
2438 } else if (pci_nomatch_tab[i].subclass ==
2439 pci_get_subclass(child)) {
2440 scp = pci_nomatch_tab[i].desc;
2444 device_printf(dev, "<%s%s%s>",
2446 ((cp != NULL) && (scp != NULL)) ? ", " : "",
2449 printf(" at device %d.%d (no driver attached)\n",
2450 pci_get_slot(child), pci_get_function(child));
2451 if (pci_do_power_nodriver)
2453 (struct pci_devinfo *) device_get_ivars(child), 1);
2458 * Parse the PCI device database, if loaded, and return a pointer to a
2459 * description of the device.
2461 * The database is flat text formatted as follows:
2463 * Any line not in a valid format is ignored.
2464 * Lines are terminated with newline '\n' characters.
2466 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
2469 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
2470 * - devices cannot be listed without a corresponding VENDOR line.
2471 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
2472 * another TAB, then the device name.
2476 * Assuming (ptr) points to the beginning of a line in the database,
2477 * return the vendor or device and description of the next entry.
2478 * The value of (vendor) or (device) inappropriate for the entry type
2479 * is set to -1. Returns nonzero at the end of the database.
2481 * Note that this is slightly unrobust in the face of corrupt data;
2482 * we attempt to safeguard against this by spamming the end of the
2483 * database with a newline when we initialise.
2486 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
2495 left = pci_vendordata_size - (cp - pci_vendordata);
2503 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
2507 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
2510 /* skip to next line */
2511 while (*cp != '\n' && left > 0) {
2520 /* skip to next line */
2521 while (*cp != '\n' && left > 0) {
2525 if (*cp == '\n' && left > 0)
2532 pci_describe_device(device_t dev)
2535 char *desc, *vp, *dp, *line;
2537 desc = vp = dp = NULL;
2540 * If we have no vendor data, we can't do anything.
2542 if (pci_vendordata == NULL)
2546 * Scan the vendor data looking for this device
2548 line = pci_vendordata;
2549 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2552 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
2554 if (vendor == pci_get_vendor(dev))
2557 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2560 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
2568 if (device == pci_get_device(dev))
2572 snprintf(dp, 80, "0x%x", pci_get_device(dev));
2573 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
2575 sprintf(desc, "%s, %s", vp, dp);
2585 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
2587 struct pci_devinfo *dinfo;
2590 dinfo = device_get_ivars(child);
2594 case PCI_IVAR_ETHADDR:
2596 * The generic accessor doesn't deal with failure, so
2597 * we set the return value, then return an error.
2599 *((uint8_t **) result) = NULL;
2601 case PCI_IVAR_SUBVENDOR:
2602 *result = cfg->subvendor;
2604 case PCI_IVAR_SUBDEVICE:
2605 *result = cfg->subdevice;
2607 case PCI_IVAR_VENDOR:
2608 *result = cfg->vendor;
2610 case PCI_IVAR_DEVICE:
2611 *result = cfg->device;
2613 case PCI_IVAR_DEVID:
2614 *result = (cfg->device << 16) | cfg->vendor;
2616 case PCI_IVAR_CLASS:
2617 *result = cfg->baseclass;
2619 case PCI_IVAR_SUBCLASS:
2620 *result = cfg->subclass;
2622 case PCI_IVAR_PROGIF:
2623 *result = cfg->progif;
2625 case PCI_IVAR_REVID:
2626 *result = cfg->revid;
2628 case PCI_IVAR_INTPIN:
2629 *result = cfg->intpin;
2632 *result = cfg->intline;
2638 *result = cfg->slot;
2640 case PCI_IVAR_FUNCTION:
2641 *result = cfg->func;
2643 case PCI_IVAR_CMDREG:
2644 *result = cfg->cmdreg;
2646 case PCI_IVAR_CACHELNSZ:
2647 *result = cfg->cachelnsz;
2649 case PCI_IVAR_MINGNT:
2650 *result = cfg->mingnt;
2652 case PCI_IVAR_MAXLAT:
2653 *result = cfg->maxlat;
2655 case PCI_IVAR_LATTIMER:
2656 *result = cfg->lattimer;
2665 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
2667 struct pci_devinfo *dinfo;
2669 dinfo = device_get_ivars(child);
2672 case PCI_IVAR_INTPIN:
2673 dinfo->cfg.intpin = value;
2675 case PCI_IVAR_ETHADDR:
2676 case PCI_IVAR_SUBVENDOR:
2677 case PCI_IVAR_SUBDEVICE:
2678 case PCI_IVAR_VENDOR:
2679 case PCI_IVAR_DEVICE:
2680 case PCI_IVAR_DEVID:
2681 case PCI_IVAR_CLASS:
2682 case PCI_IVAR_SUBCLASS:
2683 case PCI_IVAR_PROGIF:
2684 case PCI_IVAR_REVID:
2688 case PCI_IVAR_FUNCTION:
2689 return (EINVAL); /* disallow for now */
2697 #include "opt_ddb.h"
2699 #include <ddb/ddb.h>
2700 #include <sys/cons.h>
2703 * List resources based on pci map registers, used for within ddb
2706 DB_SHOW_COMMAND(pciregs, db_pci_dump)
2708 struct pci_devinfo *dinfo;
2709 struct devlist *devlist_head;
2712 int i, error, none_count;
2715 /* get the head of the device queue */
2716 devlist_head = &pci_devq;
2719 * Go through the list of devices and print out devices
2721 for (error = 0, i = 0,
2722 dinfo = STAILQ_FIRST(devlist_head);
2723 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
2724 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
2726 /* Populate pd_name and pd_unit */
2729 name = device_get_name(dinfo->cfg.dev);
2732 db_printf("%s%d@pci%d:%d:%d:\tclass=0x%06x card=0x%08x "
2733 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
2734 (name && *name) ? name : "none",
2735 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
2737 p->pc_sel.pc_bus, p->pc_sel.pc_dev,
2738 p->pc_sel.pc_func, (p->pc_class << 16) |
2739 (p->pc_subclass << 8) | p->pc_progif,
2740 (p->pc_subdevice << 16) | p->pc_subvendor,
2741 (p->pc_device << 16) | p->pc_vendor,
2742 p->pc_revid, p->pc_hdr);
2747 static struct resource *
2748 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
2749 u_long start, u_long end, u_long count, u_int flags)
2751 struct pci_devinfo *dinfo = device_get_ivars(child);
2752 struct resource_list *rl = &dinfo->resources;
2753 struct resource_list_entry *rle;
2754 struct resource *res;
2755 pci_addr_t map, testval;
2759 * Weed out the bogons, and figure out how large the BAR/map
2760 * is. Bars that read back 0 here are bogus and unimplemented.
2761 * Note: atapci in legacy mode are special and handled elsewhere
2762 * in the code. If you have a atapci device in legacy mode and
2763 * it fails here, that other code is broken.
2766 map = pci_read_config(child, *rid, 4);
2767 pci_write_config(child, *rid, 0xffffffff, 4);
2768 testval = pci_read_config(child, *rid, 4);
2769 if (pci_maprange(testval) == 64)
2770 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
2771 if (pci_mapbase(testval) == 0)
2773 if (pci_maptype(testval) & PCI_MAPMEM) {
2774 if (type != SYS_RES_MEMORY) {
2777 "child %s requested type %d for rid %#x,"
2778 " but the BAR says it is an memio\n",
2779 device_get_nameunit(child), type, *rid);
2783 if (type != SYS_RES_IOPORT) {
2786 "child %s requested type %d for rid %#x,"
2787 " but the BAR says it is an ioport\n",
2788 device_get_nameunit(child), type, *rid);
2793 * For real BARs, we need to override the size that
2794 * the driver requests, because that's what the BAR
2795 * actually uses and we would otherwise have a
2796 * situation where we might allocate the excess to
2797 * another driver, which won't work.
2799 mapsize = pci_mapsize(testval);
2800 count = 1UL << mapsize;
2801 if (RF_ALIGNMENT(flags) < mapsize)
2802 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
2805 * Allocate enough resource, and then write back the
2806 * appropriate bar for that resource.
2808 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
2809 start, end, count, flags);
2811 device_printf(child,
2812 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
2813 count, *rid, type, start, end);
2816 resource_list_add(rl, type, *rid, start, end, count);
2817 rle = resource_list_find(rl, type, *rid);
2819 panic("pci_alloc_map: unexpectedly can't find resource.");
2821 rle->start = rman_get_start(res);
2822 rle->end = rman_get_end(res);
2825 device_printf(child,
2826 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
2827 count, *rid, type, rman_get_start(res));
2828 map = rman_get_start(res);
2830 pci_write_config(child, *rid, map, 4);
2831 if (pci_maprange(testval) == 64)
2832 pci_write_config(child, *rid + 4, map >> 32, 4);
2838 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
2839 u_long start, u_long end, u_long count, u_int flags)
2841 struct pci_devinfo *dinfo = device_get_ivars(child);
2842 struct resource_list *rl = &dinfo->resources;
2843 struct resource_list_entry *rle;
2844 pcicfgregs *cfg = &dinfo->cfg;
2847 * Perform lazy resource allocation
2849 if (device_get_parent(child) == dev) {
2853 * Can't alloc legacy interrupt once MSI messages
2854 * have been allocated.
2856 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
2857 cfg->msix.msix_alloc > 0))
2860 * If the child device doesn't have an
2861 * interrupt routed and is deserving of an
2862 * interrupt, try to assign it one.
2864 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
2866 pci_assign_interrupt(dev, child, 0);
2868 case SYS_RES_IOPORT:
2869 case SYS_RES_MEMORY:
2870 if (*rid < PCIR_BAR(cfg->nummaps)) {
2872 * Enable the I/O mode. We should
2873 * also be assigning resources too
2874 * when none are present. The
2875 * resource_list_alloc kind of sorta does
2878 if (PCI_ENABLE_IO(dev, child, type))
2881 rle = resource_list_find(rl, type, *rid);
2883 return (pci_alloc_map(dev, child, type, rid,
2884 start, end, count, flags));
2888 * If we've already allocated the resource, then
2889 * return it now. But first we may need to activate
2890 * it, since we don't allocate the resource as active
2891 * above. Normally this would be done down in the
2892 * nexus, but since we short-circuit that path we have
2893 * to do its job here. Not sure if we should free the
2894 * resource if it fails to activate.
2896 rle = resource_list_find(rl, type, *rid);
2897 if (rle != NULL && rle->res != NULL) {
2899 device_printf(child,
2900 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
2901 rman_get_size(rle->res), *rid, type,
2902 rman_get_start(rle->res));
2903 if ((flags & RF_ACTIVE) &&
2904 bus_generic_activate_resource(dev, child, type,
2905 *rid, rle->res) != 0)
2910 return (resource_list_alloc(rl, dev, child, type, rid,
2911 start, end, count, flags));
2915 pci_delete_resource(device_t dev, device_t child, int type, int rid)
2917 struct pci_devinfo *dinfo;
2918 struct resource_list *rl;
2919 struct resource_list_entry *rle;
2921 if (device_get_parent(child) != dev)
2924 dinfo = device_get_ivars(child);
2925 rl = &dinfo->resources;
2926 rle = resource_list_find(rl, type, rid);
2929 if (rman_get_device(rle->res) != dev ||
2930 rman_get_flags(rle->res) & RF_ACTIVE) {
2931 device_printf(dev, "delete_resource: "
2932 "Resource still owned by child, oops. "
2933 "(type=%d, rid=%d, addr=%lx)\n",
2934 rle->type, rle->rid,
2935 rman_get_start(rle->res));
2938 bus_release_resource(dev, type, rid, rle->res);
2940 resource_list_delete(rl, type, rid);
2943 * Why do we turn off the PCI configuration BAR when we delete a
2946 pci_write_config(child, rid, 0, 4);
2947 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
2950 struct resource_list *
2951 pci_get_resource_list (device_t dev, device_t child)
2953 struct pci_devinfo *dinfo = device_get_ivars(child);
2955 return (&dinfo->resources);
2959 pci_read_config_method(device_t dev, device_t child, int reg, int width)
2961 struct pci_devinfo *dinfo = device_get_ivars(child);
2962 pcicfgregs *cfg = &dinfo->cfg;
2964 return (PCIB_READ_CONFIG(device_get_parent(dev),
2965 cfg->bus, cfg->slot, cfg->func, reg, width));
2969 pci_write_config_method(device_t dev, device_t child, int reg,
2970 uint32_t val, int width)
2972 struct pci_devinfo *dinfo = device_get_ivars(child);
2973 pcicfgregs *cfg = &dinfo->cfg;
2975 PCIB_WRITE_CONFIG(device_get_parent(dev),
2976 cfg->bus, cfg->slot, cfg->func, reg, val, width);
2980 pci_child_location_str_method(device_t dev, device_t child, char *buf,
2984 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
2985 pci_get_function(child));
2990 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
2993 struct pci_devinfo *dinfo;
2996 dinfo = device_get_ivars(child);
2998 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
2999 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3000 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3006 pci_assign_interrupt_method(device_t dev, device_t child)
3008 struct pci_devinfo *dinfo = device_get_ivars(child);
3009 pcicfgregs *cfg = &dinfo->cfg;
3011 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3016 pci_modevent(module_t mod, int what, void *arg)
3018 static struct cdev *pci_cdev;
3022 STAILQ_INIT(&pci_devq);
3024 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
3026 pci_load_vendor_data();
3030 destroy_dev(pci_cdev);
3038 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3043 * Only do header type 0 devices. Type 1 devices are bridges,
3044 * which we know need special treatment. Type 2 devices are
3045 * cardbus bridges which also require special treatment.
3046 * Other types are unknown, and we err on the side of safety
3049 if (dinfo->cfg.hdrtype != 0)
3053 * Restore the device to full power mode. We must do this
3054 * before we restore the registers because moving from D3 to
3055 * D0 will cause the chip's BARs and some other registers to
3056 * be reset to some unknown power on reset values. Cut down
3057 * the noise on boot by doing nothing if we are already in
3060 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3061 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3063 for (i = 0; i < dinfo->cfg.nummaps; i++)
3064 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3065 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3066 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3067 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3068 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3069 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3070 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3071 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3072 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3073 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3074 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3077 * Restore MSI configuration if it is present. If MSI is enabled,
3078 * then restore the data and addr registers.
3080 if (dinfo->cfg.msi.msi_location != 0)
3081 pci_resume_msi(dev);
3085 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3092 * Only do header type 0 devices. Type 1 devices are bridges, which
3093 * we know need special treatment. Type 2 devices are cardbus bridges
3094 * which also require special treatment. Other types are unknown, and
3095 * we err on the side of safety by ignoring them. Powering down
3096 * bridges should not be undertaken lightly.
3098 if (dinfo->cfg.hdrtype != 0)
3100 for (i = 0; i < dinfo->cfg.nummaps; i++)
3101 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3102 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3105 * Some drivers apparently write to these registers w/o updating our
3106 * cached copy. No harm happens if we update the copy, so do so here
3107 * so we can restore them. The COMMAND register is modified by the
3108 * bus w/o updating the cache. This should represent the normally
3109 * writable portion of the 'defined' part of type 0 headers. In
3110 * theory we also need to save/restore the PCI capability structures
3111 * we know about, but apart from power we don't know any that are
3114 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3115 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3116 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3117 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3118 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3119 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3120 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3121 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3122 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3123 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3124 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3125 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3126 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3127 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3128 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
3131 * don't set the state for display devices, base peripherals and
3132 * memory devices since bad things happen when they are powered down.
3133 * We should (a) have drivers that can easily detach and (b) use
3134 * generic drivers for these devices so that some device actually
3135 * attaches. We need to make sure that when we implement (a) we don't
3136 * power the device down on a reattach.
3138 cls = pci_get_class(dev);
3141 switch (pci_do_power_nodriver)
3143 case 0: /* NO powerdown at all */
3145 case 1: /* Conservative about what to power down */
3146 if (cls == PCIC_STORAGE)
3149 case 2: /* Agressive about what to power down */
3150 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
3151 cls == PCIC_BASEPERIPH)
3154 case 3: /* Power down everything */
3158 * PCI spec says we can only go into D3 state from D0 state.
3159 * Transition from D[12] into D0 before going to D3 state.
3161 ps = pci_get_powerstate(dev);
3162 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
3163 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3164 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
3165 pci_set_powerstate(dev, PCI_POWERSTATE_D3);