2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
55 #if defined(__i386__) || defined(__amd64__)
56 #include <machine/intr_machdep.h>
59 #include <sys/pciio.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
68 #include <contrib/dev/acpica/acpi.h>
71 #define ACPI_PWR_FOR_SLEEP(x, y, z)
74 static uint32_t pci_mapbase(unsigned mapreg);
75 static int pci_maptype(unsigned mapreg);
76 static int pci_mapsize(unsigned testval);
77 static int pci_maprange(unsigned mapreg);
78 static void pci_fixancient(pcicfgregs *cfg);
80 static int pci_porten(device_t pcib, int b, int s, int f);
81 static int pci_memen(device_t pcib, int b, int s, int f);
82 static void pci_assign_interrupt(device_t bus, device_t dev,
84 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
85 int b, int s, int f, int reg,
86 struct resource_list *rl, int force, int prefetch);
87 static int pci_probe(device_t dev);
88 static int pci_attach(device_t dev);
89 static void pci_load_vendor_data(void);
90 static int pci_describe_parse_line(char **ptr, int *vendor,
91 int *device, char **desc);
92 static char *pci_describe_device(device_t dev);
93 static int pci_modevent(module_t mod, int what, void *arg);
94 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
96 static void pci_read_extcap(device_t pcib, pcicfgregs *cfg);
97 static uint32_t pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
100 static void pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
101 int reg, uint32_t data);
103 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
105 static device_method_t pci_methods[] = {
106 /* Device interface */
107 DEVMETHOD(device_probe, pci_probe),
108 DEVMETHOD(device_attach, pci_attach),
109 DEVMETHOD(device_detach, bus_generic_detach),
110 DEVMETHOD(device_shutdown, bus_generic_shutdown),
111 DEVMETHOD(device_suspend, pci_suspend),
112 DEVMETHOD(device_resume, pci_resume),
115 DEVMETHOD(bus_print_child, pci_print_child),
116 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
117 DEVMETHOD(bus_read_ivar, pci_read_ivar),
118 DEVMETHOD(bus_write_ivar, pci_write_ivar),
119 DEVMETHOD(bus_driver_added, pci_driver_added),
120 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
121 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
123 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
124 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
125 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
126 DEVMETHOD(bus_delete_resource, pci_delete_resource),
127 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
128 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
129 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
130 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
131 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
132 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
135 DEVMETHOD(pci_read_config, pci_read_config_method),
136 DEVMETHOD(pci_write_config, pci_write_config_method),
137 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
138 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
139 DEVMETHOD(pci_enable_io, pci_enable_io_method),
140 DEVMETHOD(pci_disable_io, pci_disable_io_method),
141 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
142 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
143 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
144 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
145 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
146 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
147 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
148 DEVMETHOD(pci_release_msi, pci_release_msi_method),
149 DEVMETHOD(pci_msi_count, pci_msi_count_method),
154 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
156 static devclass_t pci_devclass;
157 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
158 MODULE_VERSION(pci, 1);
160 static char *pci_vendordata;
161 static size_t pci_vendordata_size;
165 uint32_t devid; /* Vendor/device of the card */
167 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
172 struct pci_quirk pci_quirks[] = {
173 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
174 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
175 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
176 /* As does the Serverworks OSB4 (the SMBus mapping register) */
177 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
182 /* map register information */
183 #define PCI_MAPMEM 0x01 /* memory map */
184 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
185 #define PCI_MAPPORT 0x04 /* port map */
187 struct devlist pci_devq;
188 uint32_t pci_generation;
189 uint32_t pci_numdevs = 0;
192 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
194 static int pci_enable_io_modes = 1;
195 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
196 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
197 &pci_enable_io_modes, 1,
198 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
199 enable these bits correctly. We'd like to do this all the time, but there\n\
200 are some peripherals that this causes problems with.");
202 static int pci_do_power_nodriver = 0;
203 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
204 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
205 &pci_do_power_nodriver, 0,
206 "Place a function into D3 state when no driver attaches to it. 0 means\n\
207 disable. 1 means conservatively place devices into D3 state. 2 means\n\
208 agressively place devices into D3 state. 3 means put absolutely everything\n\
211 static int pci_do_power_resume = 1;
212 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
213 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
214 &pci_do_power_resume, 1,
215 "Transition from D3 -> D0 on resume.");
217 static int pci_do_msi = 1;
218 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
219 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
220 "Enable support for MSI interrupts");
222 static int pci_do_msix = 1;
223 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
224 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
225 "Enable support for MSI-X interrupts");
227 /* Find a device_t by bus/slot/function */
230 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
232 struct pci_devinfo *dinfo;
234 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
235 if ((dinfo->cfg.bus == bus) &&
236 (dinfo->cfg.slot == slot) &&
237 (dinfo->cfg.func == func)) {
238 return (dinfo->cfg.dev);
245 /* Find a device_t by vendor/device ID */
248 pci_find_device(uint16_t vendor, uint16_t device)
250 struct pci_devinfo *dinfo;
252 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
253 if ((dinfo->cfg.vendor == vendor) &&
254 (dinfo->cfg.device == device)) {
255 return (dinfo->cfg.dev);
262 /* return base address of memory or port map */
265 pci_mapbase(uint32_t mapreg)
268 if ((mapreg & 0x01) == 0)
270 return (mapreg & ~mask);
273 /* return map type of memory or port map */
276 pci_maptype(unsigned mapreg)
278 static uint8_t maptype[0x10] = {
279 PCI_MAPMEM, PCI_MAPPORT,
281 PCI_MAPMEM, PCI_MAPPORT,
283 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
284 PCI_MAPMEM|PCI_MAPMEMP, 0,
285 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
289 return maptype[mapreg & 0x0f];
292 /* return log2 of map size decoded for memory or port map */
295 pci_mapsize(uint32_t testval)
299 testval = pci_mapbase(testval);
302 while ((testval & 1) == 0)
311 /* return log2 of address range supported by map register */
314 pci_maprange(unsigned mapreg)
317 switch (mapreg & 0x07) {
333 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
336 pci_fixancient(pcicfgregs *cfg)
338 if (cfg->hdrtype != 0)
341 /* PCI to PCI bridges use header type 1 */
342 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
346 /* extract header type specific config data */
349 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
351 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
352 switch (cfg->hdrtype) {
354 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
355 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
356 cfg->nummaps = PCI_MAXMAPS_0;
359 cfg->subvendor = REG(PCIR_SUBVEND_1, 2);
360 cfg->subdevice = REG(PCIR_SUBDEV_1, 2);
361 cfg->nummaps = PCI_MAXMAPS_1;
364 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
365 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
366 cfg->nummaps = PCI_MAXMAPS_2;
372 /* read configuration header into pcicfgregs structure */
374 pci_read_device(device_t pcib, int b, int s, int f, size_t size)
376 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
377 pcicfgregs *cfg = NULL;
378 struct pci_devinfo *devlist_entry;
379 struct devlist *devlist_head;
381 devlist_head = &pci_devq;
383 devlist_entry = NULL;
385 if (REG(PCIR_DEVVENDOR, 4) != -1) {
386 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
387 if (devlist_entry == NULL)
390 cfg = &devlist_entry->cfg;
395 cfg->vendor = REG(PCIR_VENDOR, 2);
396 cfg->device = REG(PCIR_DEVICE, 2);
397 cfg->cmdreg = REG(PCIR_COMMAND, 2);
398 cfg->statreg = REG(PCIR_STATUS, 2);
399 cfg->baseclass = REG(PCIR_CLASS, 1);
400 cfg->subclass = REG(PCIR_SUBCLASS, 1);
401 cfg->progif = REG(PCIR_PROGIF, 1);
402 cfg->revid = REG(PCIR_REVID, 1);
403 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
404 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
405 cfg->lattimer = REG(PCIR_LATTIMER, 1);
406 cfg->intpin = REG(PCIR_INTPIN, 1);
407 cfg->intline = REG(PCIR_INTLINE, 1);
409 cfg->mingnt = REG(PCIR_MINGNT, 1);
410 cfg->maxlat = REG(PCIR_MAXLAT, 1);
412 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
413 cfg->hdrtype &= ~PCIM_MFDEV;
416 pci_hdrtypedata(pcib, b, s, f, cfg);
418 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
419 pci_read_extcap(pcib, cfg);
421 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
423 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
424 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
425 devlist_entry->conf.pc_sel.pc_func = cfg->func;
426 devlist_entry->conf.pc_hdr = cfg->hdrtype;
428 devlist_entry->conf.pc_subvendor = cfg->subvendor;
429 devlist_entry->conf.pc_subdevice = cfg->subdevice;
430 devlist_entry->conf.pc_vendor = cfg->vendor;
431 devlist_entry->conf.pc_device = cfg->device;
433 devlist_entry->conf.pc_class = cfg->baseclass;
434 devlist_entry->conf.pc_subclass = cfg->subclass;
435 devlist_entry->conf.pc_progif = cfg->progif;
436 devlist_entry->conf.pc_revid = cfg->revid;
441 return (devlist_entry);
446 pci_read_extcap(device_t pcib, pcicfgregs *cfg)
448 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
449 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
450 #if defined(__i386__) || defined(__amd64__)
454 int ptr, nextptr, ptrptr;
456 switch (cfg->hdrtype & PCIM_HDRTYPE) {
459 ptrptr = PCIR_CAP_PTR;
462 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
465 return; /* no extended capabilities support */
467 nextptr = REG(ptrptr, 1); /* sanity check? */
470 * Read capability entries.
472 while (nextptr != 0) {
475 printf("illegal PCI extended capability offset %d\n",
479 /* Find the next entry */
481 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
483 /* Process this entry */
484 switch (REG(ptr + PCICAP_ID, 1)) {
485 case PCIY_PMG: /* PCI power management */
486 if (cfg->pp.pp_cap == 0) {
487 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
488 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
489 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
490 if ((nextptr - ptr) > PCIR_POWER_DATA)
491 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
494 #if defined(__i386__) || defined(__amd64__)
495 case PCIY_HT: /* HyperTransport */
496 /* Determine HT-specific capability type. */
497 val = REG(ptr + PCIR_HT_COMMAND, 2);
498 switch (val & PCIM_HTCMD_CAP_MASK) {
499 case PCIM_HTCAP_MSI_MAPPING:
500 /* Sanity check the mapping window. */
501 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
503 addr = REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
504 if (addr != MSI_INTEL_ADDR_BASE)
506 "HT Bridge at %d:%d:%d has non-default MSI window 0x%llx\n",
507 cfg->bus, cfg->slot, cfg->func,
510 /* Enable MSI -> HT mapping. */
511 val |= PCIM_HTCMD_MSI_ENABLE;
512 WREG(ptr + PCIR_HT_COMMAND, val, 2);
517 case PCIY_MSI: /* PCI MSI */
518 cfg->msi.msi_location = ptr;
519 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
520 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
521 PCIM_MSICTRL_MMC_MASK)>>1);
523 case PCIY_MSIX: /* PCI MSI-X */
524 cfg->msix.msix_location = ptr;
525 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
526 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
527 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
528 val = REG(ptr + PCIR_MSIX_TABLE, 4);
529 cfg->msix.msix_table_bar = PCIR_BAR(val &
531 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
532 val = REG(ptr + PCIR_MSIX_PBA, 4);
533 cfg->msix.msix_pba_bar = PCIR_BAR(val &
535 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
537 case PCIY_VPD: /* PCI Vital Product Data */
538 cfg->vpd.vpd_reg = ptr;
539 pci_read_vpd(pcib, cfg);
545 /* REG and WREG use carry through to next functions */
549 * PCI Vital Product Data
552 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg)
555 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
557 WREG(cfg->vpd.vpd_reg + 2, reg, 2);
558 while ((REG(cfg->vpd.vpd_reg + 2, 2) & 0x8000) != 0x8000)
559 DELAY(1); /* limit looping */
561 return REG(cfg->vpd.vpd_reg + 4, 4);
566 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
568 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
570 WREG(cfg->vpd.vpd_reg + 4, data, 4);
571 WREG(cfg->vpd.vpd_reg + 2, reg | 0x8000, 2);
572 while ((REG(cfg->vpd.vpd_reg + 2, 2) & 0x8000) == 0x8000)
573 DELAY(1); /* limit looping */
579 struct vpd_readstate {
589 vpd_nextbyte(struct vpd_readstate *vrs)
593 if (vrs->bytesinval == 0) {
594 vrs->val = le32toh(pci_read_vpd_reg(vrs->pcib, vrs->cfg,
597 byte = vrs->val & 0xff;
600 vrs->val = vrs->val >> 8;
601 byte = vrs->val & 0xff;
610 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
612 struct vpd_readstate vrs;
619 int alloc, off; /* alloc/off for RO/W arrays */
623 /* init vpd reader */
631 name = remain = i = 0; /* shut up stupid gcc */
632 alloc = off = 0; /* shut up stupid gcc */
633 dflen = 0; /* shut up stupid gcc */
637 byte = vpd_nextbyte(&vrs);
639 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
640 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
641 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
644 case 0: /* item name */
646 remain = vpd_nextbyte(&vrs);
647 remain |= vpd_nextbyte(&vrs) << 8;
648 if (remain > (0x7f*4 - vrs.off)) {
651 "pci%d:%d:%d: invalid vpd data, remain %#x\n",
652 cfg->bus, cfg->slot, cfg->func,
658 name = (byte >> 3) & 0xf;
661 case 0x2: /* String */
662 cfg->vpd.vpd_ident = malloc(remain + 1,
671 case 0x10: /* VPD-R */
674 cfg->vpd.vpd_ros = malloc(alloc *
675 sizeof *cfg->vpd.vpd_ros, M_DEVBUF,
679 case 0x11: /* VPD-W */
682 cfg->vpd.vpd_w = malloc(alloc *
683 sizeof *cfg->vpd.vpd_w, M_DEVBUF,
687 default: /* Invalid data, abort */
693 case 1: /* Identifier String */
694 cfg->vpd.vpd_ident[i++] = byte;
697 cfg->vpd.vpd_ident[i] = '\0';
702 case 2: /* VPD-R Keyword Header */
704 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
705 (alloc *= 2) * sizeof *cfg->vpd.vpd_ros,
708 cfg->vpd.vpd_ros[off].keyword[0] = byte;
709 cfg->vpd.vpd_ros[off].keyword[1] = vpd_nextbyte(&vrs);
710 dflen = vpd_nextbyte(&vrs);
712 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
715 * if this happens, we can't trust the rest
718 printf("pci%d:%d:%d: bad keyword length: %d\n",
719 cfg->bus, cfg->slot, cfg->func, dflen);
723 } else if (dflen == 0) {
724 cfg->vpd.vpd_ros[off].value = malloc(1 *
725 sizeof *cfg->vpd.vpd_ros[off].value,
727 cfg->vpd.vpd_ros[off].value[0] = '\x00';
729 cfg->vpd.vpd_ros[off].value = malloc(
731 sizeof *cfg->vpd.vpd_ros[off].value,
735 /* keep in sync w/ state 3's transistions */
736 if (dflen == 0 && remain == 0)
744 case 3: /* VPD-R Keyword Value */
745 cfg->vpd.vpd_ros[off].value[i++] = byte;
746 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
747 "RV", 2) == 0 && cksumvalid == -1) {
752 "pci%d:%d:%d: bad VPD cksum, remain %hhu\n",
753 cfg->bus, cfg->slot, cfg->func,
762 /* keep in sync w/ state 2's transistions */
764 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
765 if (dflen == 0 && remain == 0) {
766 cfg->vpd.vpd_rocnt = off;
767 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
768 off * sizeof *cfg->vpd.vpd_ros,
771 } else if (dflen == 0)
781 case 5: /* VPD-W Keyword Header */
783 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
784 (alloc *= 2) * sizeof *cfg->vpd.vpd_w,
787 cfg->vpd.vpd_w[off].keyword[0] = byte;
788 cfg->vpd.vpd_w[off].keyword[1] = vpd_nextbyte(&vrs);
789 cfg->vpd.vpd_w[off].len = dflen = vpd_nextbyte(&vrs);
790 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
791 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
792 sizeof *cfg->vpd.vpd_w[off].value,
796 /* keep in sync w/ state 6's transistions */
797 if (dflen == 0 && remain == 0)
805 case 6: /* VPD-W Keyword Value */
806 cfg->vpd.vpd_w[off].value[i++] = byte;
809 /* keep in sync w/ state 5's transistions */
811 cfg->vpd.vpd_w[off++].value[i++] = '\0';
812 if (dflen == 0 && remain == 0) {
813 cfg->vpd.vpd_wcnt = off;
814 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
815 off * sizeof *cfg->vpd.vpd_w,
818 } else if (dflen == 0)
823 printf("pci%d:%d:%d: invalid state: %d\n",
824 cfg->bus, cfg->slot, cfg->func, state);
830 if (cksumvalid == 0) {
831 /* read-only data bad, clean up */
833 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
835 free(cfg->vpd.vpd_ros, M_DEVBUF);
836 cfg->vpd.vpd_ros = NULL;
843 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
845 struct pci_devinfo *dinfo = device_get_ivars(child);
846 pcicfgregs *cfg = &dinfo->cfg;
848 *identptr = cfg->vpd.vpd_ident;
850 if (*identptr == NULL)
857 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
860 struct pci_devinfo *dinfo = device_get_ivars(child);
861 pcicfgregs *cfg = &dinfo->cfg;
864 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
865 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
866 sizeof cfg->vpd.vpd_ros[i].keyword) == 0) {
867 *vptr = cfg->vpd.vpd_ros[i].value;
870 if (i != cfg->vpd.vpd_rocnt)
878 * Return the offset in configuration space of the requested extended
879 * capability entry or 0 if the specified capability was not found.
882 pci_find_extcap_method(device_t dev, device_t child, int capability,
885 struct pci_devinfo *dinfo = device_get_ivars(child);
886 pcicfgregs *cfg = &dinfo->cfg;
891 * Check the CAP_LIST bit of the PCI status register first.
893 status = pci_read_config(child, PCIR_STATUS, 2);
894 if (!(status & PCIM_STATUS_CAPPRESENT))
898 * Determine the start pointer of the capabilities list.
900 switch (cfg->hdrtype & PCIM_HDRTYPE) {
906 ptr = PCIR_CAP_PTR_2;
910 return (ENXIO); /* no extended capabilities support */
912 ptr = pci_read_config(child, ptr, 1);
915 * Traverse the capabilities list.
918 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
923 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
930 * Support for MSI-X message interrupts.
933 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
935 struct pci_devinfo *dinfo = device_get_ivars(dev);
936 pcicfgregs *cfg = &dinfo->cfg;
939 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
940 offset = cfg->msix.msix_table_offset + index * 16;
941 bus_write_4(cfg->msix.msix_table_res, offset, address & 0xffffffff);
942 bus_write_4(cfg->msix.msix_table_res, offset + 4, address >> 32);
943 bus_write_4(cfg->msix.msix_table_res, offset + 8, data);
947 pci_mask_msix(device_t dev, u_int index)
949 struct pci_devinfo *dinfo = device_get_ivars(dev);
950 pcicfgregs *cfg = &dinfo->cfg;
951 uint32_t offset, val;
953 KASSERT(cfg->msix.msix_msgnum > index, ("bogus index"));
954 offset = cfg->msix.msix_table_offset + index * 16 + 12;
955 val = bus_read_4(cfg->msix.msix_table_res, offset);
956 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
957 val |= PCIM_MSIX_VCTRL_MASK;
958 bus_write_4(cfg->msix.msix_table_res, offset, val);
963 pci_unmask_msix(device_t dev, u_int index)
965 struct pci_devinfo *dinfo = device_get_ivars(dev);
966 pcicfgregs *cfg = &dinfo->cfg;
967 uint32_t offset, val;
969 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
970 offset = cfg->msix.msix_table_offset + index * 16 + 12;
971 val = bus_read_4(cfg->msix.msix_table_res, offset);
972 if (val & PCIM_MSIX_VCTRL_MASK) {
973 val &= ~PCIM_MSIX_VCTRL_MASK;
974 bus_write_4(cfg->msix.msix_table_res, offset, val);
979 pci_pending_msix(device_t dev, u_int index)
981 struct pci_devinfo *dinfo = device_get_ivars(dev);
982 pcicfgregs *cfg = &dinfo->cfg;
983 uint32_t offset, bit;
985 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
986 offset = cfg->msix.msix_pba_offset + (index / 4) * 4;
987 bit = 1 << index % 32;
988 return (bus_read_4(cfg->msix.msix_pba_res, offset) & bit);
992 pci_alloc_msix(device_t dev, device_t child, int *count)
994 struct pci_devinfo *dinfo = device_get_ivars(child);
995 pcicfgregs *cfg = &dinfo->cfg;
996 struct resource_list_entry *rle;
997 int actual, error, i, irq, max;
999 /* MSI-X capability present? */
1000 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1003 /* Make sure the appropriate BARs are mapped. */
1004 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1005 cfg->msix.msix_table_bar);
1006 if (rle == NULL || rle->res == NULL ||
1007 !(rman_get_flags(rle->res) & RF_ACTIVE))
1009 cfg->msix.msix_table_res = rle->res;
1010 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1011 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1012 cfg->msix.msix_pba_bar);
1013 if (rle == NULL || rle->res == NULL ||
1014 !(rman_get_flags(rle->res) & RF_ACTIVE))
1017 cfg->msix.msix_pba_res = rle->res;
1019 /* Already have allocated messages? */
1020 if (cfg->msix.msix_alloc != 0)
1024 device_printf(child,
1025 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1026 *count, cfg->msix.msix_msgnum);
1027 max = min(*count, cfg->msix.msix_msgnum);
1028 for (i = 0; i < max; i++) {
1029 /* Allocate a message. */
1030 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, i,
1034 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1040 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1042 device_printf(child, "using IRQ %lu for MSI-X\n",
1048 * Be fancy and try to print contiguous runs of
1049 * IRQ values as ranges. 'irq' is the previous IRQ.
1050 * 'run' is true if we are in a range.
1052 device_printf(child, "using IRQs %lu", rle->start);
1055 for (i = 1; i < actual; i++) {
1056 rle = resource_list_find(&dinfo->resources,
1057 SYS_RES_IRQ, i + 1);
1059 /* Still in a run? */
1060 if (rle->start == irq + 1) {
1066 /* Finish previous range. */
1072 /* Start new range. */
1073 printf(",%lu", rle->start);
1077 /* Unfinished range? */
1080 printf(" for MSI-X\n");
1084 /* Mask all vectors. */
1085 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1086 pci_mask_msix(child, i);
1088 /* Update control register to enable MSI-X. */
1089 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1090 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1091 cfg->msix.msix_ctrl, 2);
1093 /* Update counts of alloc'd messages. */
1094 cfg->msix.msix_alloc = actual;
1100 pci_release_msix(device_t dev, device_t child)
1102 struct pci_devinfo *dinfo = device_get_ivars(child);
1103 pcicfgregs *cfg = &dinfo->cfg;
1104 struct resource_list_entry *rle;
1107 /* Do we have any messages to release? */
1108 if (cfg->msix.msix_alloc == 0)
1111 /* Make sure none of the resources are allocated. */
1112 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1113 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1114 KASSERT(rle != NULL, ("missing MSI resource"));
1115 if (rle->res != NULL)
1119 /* Update control register with to disable MSI-X. */
1120 cfg->msix.msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1121 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1122 cfg->msix.msix_ctrl, 2);
1124 /* Release the messages. */
1125 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1126 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1127 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1129 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1132 /* Update alloc count. */
1133 cfg->msix.msix_alloc = 0;
1138 * Support for MSI message signalled interrupts.
1141 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1143 struct pci_devinfo *dinfo = device_get_ivars(dev);
1144 pcicfgregs *cfg = &dinfo->cfg;
1146 /* Write data and address values. */
1147 cfg->msi.msi_addr = address;
1148 cfg->msi.msi_data = data;
1149 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
1150 address & 0xffffffff, 4);
1151 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
1152 pci_write_config(dev, cfg->msi.msi_location +
1153 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1154 pci_write_config(dev, cfg->msi.msi_location +
1155 PCIR_MSI_DATA_64BIT, data, 2);
1157 pci_write_config(dev, cfg->msi.msi_location +
1158 PCIR_MSI_DATA, data, 2);
1160 /* Enable MSI in the control register. */
1161 cfg->msi.msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1162 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
1163 cfg->msi.msi_ctrl, 2);
1167 * Restore MSI registers during resume. If MSI is enabled then
1168 * restore the data and address registers in addition to the control
1172 pci_resume_msi(device_t dev)
1174 struct pci_devinfo *dinfo = device_get_ivars(dev);
1175 pcicfgregs *cfg = &dinfo->cfg;
1179 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1180 address = cfg->msi.msi_addr;
1181 data = cfg->msi.msi_data;
1182 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
1183 address & 0xffffffff, 4);
1184 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
1185 pci_write_config(dev, cfg->msi.msi_location +
1186 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1187 pci_write_config(dev, cfg->msi.msi_location +
1188 PCIR_MSI_DATA_64BIT, data, 2);
1190 pci_write_config(dev, cfg->msi.msi_location +
1191 PCIR_MSI_DATA, data, 2);
1193 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
1194 cfg->msi.msi_ctrl, 2);
1198 * Attempt to allocate *count MSI messages. The actual number allocated is
1199 * returned in *count. After this function returns, each message will be
1200 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1203 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1205 struct pci_devinfo *dinfo = device_get_ivars(child);
1206 pcicfgregs *cfg = &dinfo->cfg;
1207 struct resource_list_entry *rle;
1208 int actual, error, i, irqs[32];
1211 /* Don't let count == 0 get us into trouble. */
1215 /* If rid 0 is allocated, then fail. */
1216 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1217 if (rle != NULL && rle->res != NULL)
1220 /* Try MSI-X first. */
1221 error = pci_alloc_msix(dev, child, count);
1222 if (error != ENODEV)
1225 /* MSI capability present? */
1226 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1229 /* Already have allocated messages? */
1230 if (cfg->msi.msi_alloc != 0)
1234 device_printf(child,
1235 "attempting to allocate %d MSI vectors (%d supported)\n",
1236 *count, cfg->msi.msi_msgnum);
1238 /* Don't ask for more than the device supports. */
1239 actual = min(*count, cfg->msi.msi_msgnum);
1241 /* Don't ask for more than 32 messages. */
1242 actual = min(actual, 32);
1244 /* MSI requires power of 2 number of messages. */
1245 if (!powerof2(actual))
1249 /* Try to allocate N messages. */
1250 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
1251 cfg->msi.msi_msgnum, irqs);
1262 * We now have N actual messages mapped onto SYS_RES_IRQ
1263 * resources in the irqs[] array, so add new resources
1264 * starting at rid 1.
1266 for (i = 0; i < actual; i++)
1267 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1268 irqs[i], irqs[i], 1);
1272 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
1277 * Be fancy and try to print contiguous runs
1278 * of IRQ values as ranges. 'run' is true if
1279 * we are in a range.
1281 device_printf(child, "using IRQs %d", irqs[0]);
1283 for (i = 1; i < actual; i++) {
1285 /* Still in a run? */
1286 if (irqs[i] == irqs[i - 1] + 1) {
1291 /* Finish previous range. */
1293 printf("-%d", irqs[i - 1]);
1297 /* Start new range. */
1298 printf(",%d", irqs[i]);
1301 /* Unfinished range? */
1303 printf("%d", irqs[actual - 1]);
1304 printf(" for MSI\n");
1308 /* Update control register with actual count and enable MSI. */
1309 ctrl = cfg->msi.msi_ctrl;
1310 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1311 ctrl |= (ffs(actual) - 1) << 4;
1312 cfg->msi.msi_ctrl = ctrl;
1313 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1315 /* Update counts of alloc'd messages. */
1316 cfg->msi.msi_alloc = actual;
1321 /* Release the MSI messages associated with this device. */
1323 pci_release_msi_method(device_t dev, device_t child)
1325 struct pci_devinfo *dinfo = device_get_ivars(child);
1326 pcicfgregs *cfg = &dinfo->cfg;
1327 struct resource_list_entry *rle;
1328 int error, i, irqs[32];
1330 /* Try MSI-X first. */
1331 error = pci_release_msix(dev, child);
1332 if (error != ENODEV)
1335 /* Do we have any messages to release? */
1336 if (cfg->msi.msi_alloc == 0)
1338 KASSERT(cfg->msi.msi_alloc <= 32, ("more than 32 alloc'd messages"));
1340 /* Make sure none of the resources are allocated. */
1341 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1342 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1343 KASSERT(rle != NULL, ("missing MSI resource"));
1344 if (rle->res != NULL)
1346 irqs[i] = rle->start;
1349 /* Update control register with 0 count and disable MSI. */
1350 cfg->msi.msi_ctrl &= ~(PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE);
1351 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL,
1352 cfg->msi.msi_ctrl, 2);
1354 /* Release the messages. */
1355 PCIB_RELEASE_MSI(device_get_parent(dev), child, cfg->msi.msi_alloc,
1357 for (i = 0; i < cfg->msi.msi_alloc; i++)
1358 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1360 /* Update alloc count. */
1361 cfg->msi.msi_alloc = 0;
1366 * Return the max supported MSI or MSI-X messages this device supports.
1367 * Basically, assuming the MD code can alloc messages, this function
1368 * should return the maximum value that pci_alloc_msi() can return. Thus,
1369 * it is subject to the tunables, etc.
1372 pci_msi_count_method(device_t dev, device_t child)
1374 struct pci_devinfo *dinfo = device_get_ivars(child);
1375 pcicfgregs *cfg = &dinfo->cfg;
1377 if (pci_do_msix && cfg->msix.msix_location != 0)
1378 return (cfg->msix.msix_msgnum);
1379 if (pci_do_msi && cfg->msi.msi_location != 0)
1380 return (cfg->msi.msi_msgnum);
1384 /* free pcicfgregs structure and all depending data structures */
1387 pci_freecfg(struct pci_devinfo *dinfo)
1389 struct devlist *devlist_head;
1392 devlist_head = &pci_devq;
1394 if (dinfo->cfg.vpd.vpd_reg) {
1395 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
1396 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
1397 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
1398 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
1399 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
1400 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
1401 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
1403 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
1404 free(dinfo, M_DEVBUF);
1406 /* increment the generation count */
1409 /* we're losing one device */
1415 * PCI power manangement
1418 pci_set_powerstate_method(device_t dev, device_t child, int state)
1420 struct pci_devinfo *dinfo = device_get_ivars(child);
1421 pcicfgregs *cfg = &dinfo->cfg;
1423 int result, oldstate, highest, delay;
1425 if (cfg->pp.pp_cap == 0)
1426 return (EOPNOTSUPP);
1429 * Optimize a no state change request away. While it would be OK to
1430 * write to the hardware in theory, some devices have shown odd
1431 * behavior when going from D3 -> D3.
1433 oldstate = pci_get_powerstate(child);
1434 if (oldstate == state)
1438 * The PCI power management specification states that after a state
1439 * transition between PCI power states, system software must
1440 * guarantee a minimal delay before the function accesses the device.
1441 * Compute the worst case delay that we need to guarantee before we
1442 * access the device. Many devices will be responsive much more
1443 * quickly than this delay, but there are some that don't respond
1444 * instantly to state changes. Transitions to/from D3 state require
1445 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
1446 * is done below with DELAY rather than a sleeper function because
1447 * this function can be called from contexts where we cannot sleep.
1449 highest = (oldstate > state) ? oldstate : state;
1450 if (highest == PCI_POWERSTATE_D3)
1452 else if (highest == PCI_POWERSTATE_D2)
1456 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
1457 & ~PCIM_PSTAT_DMASK;
1460 case PCI_POWERSTATE_D0:
1461 status |= PCIM_PSTAT_D0;
1463 case PCI_POWERSTATE_D1:
1464 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
1465 return (EOPNOTSUPP);
1466 status |= PCIM_PSTAT_D1;
1468 case PCI_POWERSTATE_D2:
1469 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
1470 return (EOPNOTSUPP);
1471 status |= PCIM_PSTAT_D2;
1473 case PCI_POWERSTATE_D3:
1474 status |= PCIM_PSTAT_D3;
1482 "pci%d:%d:%d: Transition from D%d to D%d\n",
1483 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func,
1486 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
1493 pci_get_powerstate_method(device_t dev, device_t child)
1495 struct pci_devinfo *dinfo = device_get_ivars(child);
1496 pcicfgregs *cfg = &dinfo->cfg;
1500 if (cfg->pp.pp_cap != 0) {
1501 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
1502 switch (status & PCIM_PSTAT_DMASK) {
1504 result = PCI_POWERSTATE_D0;
1507 result = PCI_POWERSTATE_D1;
1510 result = PCI_POWERSTATE_D2;
1513 result = PCI_POWERSTATE_D3;
1516 result = PCI_POWERSTATE_UNKNOWN;
1520 /* No support, device is always at D0 */
1521 result = PCI_POWERSTATE_D0;
1527 * Some convenience functions for PCI device drivers.
1530 static __inline void
1531 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
1535 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1537 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1540 static __inline void
1541 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
1545 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1547 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1551 pci_enable_busmaster_method(device_t dev, device_t child)
1553 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1558 pci_disable_busmaster_method(device_t dev, device_t child)
1560 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1565 pci_enable_io_method(device_t dev, device_t child, int space)
1575 case SYS_RES_IOPORT:
1576 bit = PCIM_CMD_PORTEN;
1579 case SYS_RES_MEMORY:
1580 bit = PCIM_CMD_MEMEN;
1586 pci_set_command_bit(dev, child, bit);
1587 /* Some devices seem to need a brief stall here, what do to? */
1588 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1591 device_printf(child, "failed to enable %s mapping!\n", error);
1596 pci_disable_io_method(device_t dev, device_t child, int space)
1606 case SYS_RES_IOPORT:
1607 bit = PCIM_CMD_PORTEN;
1610 case SYS_RES_MEMORY:
1611 bit = PCIM_CMD_MEMEN;
1617 pci_clear_command_bit(dev, child, bit);
1618 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1619 if (command & bit) {
1620 device_printf(child, "failed to disable %s mapping!\n", error);
1627 * New style pci driver. Parent device is either a pci-host-bridge or a
1628 * pci-pci-bridge. Both kinds are represented by instances of pcib.
1632 pci_print_verbose(struct pci_devinfo *dinfo)
1637 pcicfgregs *cfg = &dinfo->cfg;
1639 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
1640 cfg->vendor, cfg->device, cfg->revid);
1641 printf("\tbus=%d, slot=%d, func=%d\n",
1642 cfg->bus, cfg->slot, cfg->func);
1643 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
1644 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
1646 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
1647 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
1648 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
1649 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
1650 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
1651 if (cfg->intpin > 0)
1652 printf("\tintpin=%c, irq=%d\n",
1653 cfg->intpin +'a' -1, cfg->intline);
1654 if (cfg->pp.pp_cap) {
1657 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
1658 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
1659 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
1660 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
1661 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
1662 status & PCIM_PSTAT_DMASK);
1664 if (cfg->vpd.vpd_reg) {
1665 printf("\tVPD Ident: %s\n", cfg->vpd.vpd_ident);
1666 for (i = 0; i < cfg->vpd.vpd_rocnt; i++) {
1667 struct vpd_readonly *vrop;
1668 vrop = &cfg->vpd.vpd_ros[i];
1669 if (strncmp("CP", vrop->keyword, 2) == 0)
1670 printf("\tCP: id %d, BAR%d, off %#x\n",
1671 vrop->value[0], vrop->value[1],
1673 *(uint16_t *)&vrop->value[2]));
1674 else if (strncmp("RV", vrop->keyword, 2) == 0)
1675 printf("\tRV: %#hhx\n", vrop->value[0]);
1677 printf("\t%.2s: %s\n", vrop->keyword,
1680 for (i = 0; i < cfg->vpd.vpd_wcnt; i++) {
1681 struct vpd_write *vwp;
1682 vwp = &cfg->vpd.vpd_w[i];
1683 if (strncmp("RW", vwp->keyword, 2) != 0)
1684 printf("\t%.2s(%#x-%#x): %s\n",
1685 vwp->keyword, vwp->start,
1686 vwp->start + vwp->len, vwp->value);
1689 if (cfg->msi.msi_location) {
1692 ctrl = cfg->msi.msi_ctrl;
1693 printf("\tMSI supports %d message%s%s%s\n",
1694 cfg->msi.msi_msgnum,
1695 (cfg->msi.msi_msgnum == 1) ? "" : "s",
1696 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
1697 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
1699 if (cfg->msix.msix_location) {
1700 printf("\tMSI-X supports %d message%s ",
1701 cfg->msix.msix_msgnum,
1702 (cfg->msix.msix_msgnum == 1) ? "" : "s");
1703 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
1704 printf("in map 0x%x\n",
1705 cfg->msix.msix_table_bar);
1707 printf("in maps 0x%x and 0x%x\n",
1708 cfg->msix.msix_table_bar,
1709 cfg->msix.msix_pba_bar);
1715 pci_porten(device_t pcib, int b, int s, int f)
1717 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1718 & PCIM_CMD_PORTEN) != 0;
1722 pci_memen(device_t pcib, int b, int s, int f)
1724 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1725 & PCIM_CMD_MEMEN) != 0;
1729 * Add a resource based on a pci map register. Return 1 if the map
1730 * register is a 32bit map register or 2 if it is a 64bit register.
1733 pci_add_map(device_t pcib, device_t bus, device_t dev,
1734 int b, int s, int f, int reg, struct resource_list *rl, int force,
1739 pci_addr_t start, end, count;
1746 struct resource *res;
1748 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1749 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
1750 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1751 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
1753 if (pci_maptype(map) & PCI_MAPMEM)
1754 type = SYS_RES_MEMORY;
1756 type = SYS_RES_IOPORT;
1757 ln2size = pci_mapsize(testval);
1758 ln2range = pci_maprange(testval);
1759 base = pci_mapbase(map);
1760 barlen = ln2range == 64 ? 2 : 1;
1763 * For I/O registers, if bottom bit is set, and the next bit up
1764 * isn't clear, we know we have a BAR that doesn't conform to the
1765 * spec, so ignore it. Also, sanity check the size of the data
1766 * areas to the type of memory involved. Memory must be at least
1767 * 16 bytes in size, while I/O ranges must be at least 4.
1769 if ((testval & 0x1) == 0x1 &&
1770 (testval & 0x2) != 0)
1772 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
1773 (type == SYS_RES_IOPORT && ln2size < 2))
1777 /* Read the other half of a 64bit map register */
1778 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
1780 printf("\tmap[%02x]: type %x, range %2d, base %#jx, size %2d",
1781 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
1782 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1783 printf(", port disabled\n");
1784 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1785 printf(", memory disabled\n");
1787 printf(", enabled\n");
1791 * If base is 0, then we have problems. It is best to ignore
1792 * such entries for the moment. These will be allocated later if
1793 * the driver specifically requests them. However, some
1794 * removable busses look better when all resources are allocated,
1795 * so allow '0' to be overriden.
1797 * Similarly treat maps whose values is the same as the test value
1798 * read back. These maps have had all f's written to them by the
1799 * BIOS in an attempt to disable the resources.
1801 if (!force && (base == 0 || map == testval))
1803 if ((u_long)base != base) {
1805 "pci%d:%d:%d bar %#x too many address bits", b, s, f, reg);
1810 * This code theoretically does the right thing, but has
1811 * undesirable side effects in some cases where peripherals
1812 * respond oddly to having these bits enabled. Let the user
1813 * be able to turn them off (since pci_enable_io_modes is 1 by
1816 if (pci_enable_io_modes) {
1817 /* Turn on resources that have been left off by a lazy BIOS */
1818 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
1819 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1820 cmd |= PCIM_CMD_PORTEN;
1821 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1823 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
1824 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1825 cmd |= PCIM_CMD_MEMEN;
1826 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1829 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1831 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1835 count = 1 << ln2size;
1836 if (base == 0 || base == pci_mapbase(testval)) {
1837 start = 0; /* Let the parent deside */
1841 end = base + (1 << ln2size) - 1;
1843 resource_list_add(rl, type, reg, start, end, count);
1846 * Not quite sure what to do on failure of allocating the resource
1847 * since I can postulate several right answers.
1849 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
1850 prefetch ? RF_PREFETCHABLE : 0);
1853 start = rman_get_start(res);
1854 if ((u_long)start != start) {
1855 /* Wait a minute! this platform can't do this address. */
1857 "pci%d.%d.%x bar %#x start %#jx, too many bits.",
1858 b, s, f, reg, (uintmax_t)start);
1859 resource_list_release(rl, bus, dev, type, reg, res);
1862 pci_write_config(dev, reg, start, 4);
1864 pci_write_config(dev, reg + 4, start >> 32, 4);
1869 * For ATA devices we need to decide early what addressing mode to use.
1870 * Legacy demands that the primary and secondary ATA ports sits on the
1871 * same addresses that old ISA hardware did. This dictates that we use
1872 * those addresses and ignore the BAR's if we cannot set PCI native
1876 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
1877 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
1879 int rid, type, progif;
1881 /* if this device supports PCI native addressing use it */
1882 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1883 if ((progif & 0x8a) == 0x8a) {
1884 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
1885 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
1886 printf("Trying ATA native PCI addressing mode\n");
1887 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
1891 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1892 type = SYS_RES_IOPORT;
1893 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
1894 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
1895 prefetchmask & (1 << 0));
1896 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
1897 prefetchmask & (1 << 1));
1900 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
1901 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
1904 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
1905 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
1908 if (progif & PCIP_STORAGE_IDE_MODESEC) {
1909 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
1910 prefetchmask & (1 << 2));
1911 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
1912 prefetchmask & (1 << 3));
1915 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
1916 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
1919 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
1920 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
1923 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
1924 prefetchmask & (1 << 4));
1925 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
1926 prefetchmask & (1 << 5));
1930 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
1932 struct pci_devinfo *dinfo = device_get_ivars(dev);
1933 pcicfgregs *cfg = &dinfo->cfg;
1934 char tunable_name[64];
1937 /* Has to have an intpin to have an interrupt. */
1938 if (cfg->intpin == 0)
1941 /* Let the user override the IRQ with a tunable. */
1942 irq = PCI_INVALID_IRQ;
1943 snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.INT%c.irq",
1944 cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
1945 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
1946 irq = PCI_INVALID_IRQ;
1949 * If we didn't get an IRQ via the tunable, then we either use the
1950 * IRQ value in the intline register or we ask the bus to route an
1951 * interrupt for us. If force_route is true, then we only use the
1952 * value in the intline register if the bus was unable to assign an
1955 if (!PCI_INTERRUPT_VALID(irq)) {
1956 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
1957 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
1958 if (!PCI_INTERRUPT_VALID(irq))
1962 /* If after all that we don't have an IRQ, just bail. */
1963 if (!PCI_INTERRUPT_VALID(irq))
1966 /* Update the config register if it changed. */
1967 if (irq != cfg->intline) {
1969 pci_write_config(dev, PCIR_INTLINE, irq, 1);
1972 /* Add this IRQ as rid 0 interrupt resource. */
1973 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
1977 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
1980 struct pci_devinfo *dinfo = device_get_ivars(dev);
1981 pcicfgregs *cfg = &dinfo->cfg;
1982 struct resource_list *rl = &dinfo->resources;
1983 struct pci_quirk *q;
1986 pcib = device_get_parent(bus);
1992 /* ATA devices needs special map treatment */
1993 if ((pci_get_class(dev) == PCIC_STORAGE) &&
1994 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
1995 (pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV))
1996 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
1998 for (i = 0; i < cfg->nummaps;)
1999 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2000 rl, force, prefetchmask & (1 << i));
2003 * Add additional, quirked resources.
2005 for (q = &pci_quirks[0]; q->devid; q++) {
2006 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2007 && q->type == PCI_QUIRK_MAP_REG)
2008 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2012 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2013 #ifdef __PCI_REROUTE_INTERRUPT
2015 * Try to re-route interrupts. Sometimes the BIOS or
2016 * firmware may leave bogus values in these registers.
2017 * If the re-route fails, then just stick with what we
2020 pci_assign_interrupt(bus, dev, 1);
2022 pci_assign_interrupt(bus, dev, 0);
2028 pci_add_children(device_t dev, int busno, size_t dinfo_size)
2030 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2031 device_t pcib = device_get_parent(dev);
2032 struct pci_devinfo *dinfo;
2034 int s, f, pcifunchigh;
2037 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2038 ("dinfo_size too small"));
2039 maxslots = PCIB_MAXSLOTS(pcib);
2040 for (s = 0; s <= maxslots; s++) {
2044 hdrtype = REG(PCIR_HDRTYPE, 1);
2045 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2047 if (hdrtype & PCIM_MFDEV)
2048 pcifunchigh = PCI_FUNCMAX;
2049 for (f = 0; f <= pcifunchigh; f++) {
2050 dinfo = pci_read_device(pcib, busno, s, f, dinfo_size);
2051 if (dinfo != NULL) {
2052 pci_add_child(dev, dinfo);
2060 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2062 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2063 device_set_ivars(dinfo->cfg.dev, dinfo);
2064 resource_list_init(&dinfo->resources);
2065 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2066 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2067 pci_print_verbose(dinfo);
2068 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
2072 pci_probe(device_t dev)
2075 device_set_desc(dev, "PCI bus");
2077 /* Allow other subclasses to override this driver. */
2082 pci_attach(device_t dev)
2087 * Since there can be multiple independantly numbered PCI
2088 * busses on systems with multiple PCI domains, we can't use
2089 * the unit number to decide which bus we are probing. We ask
2090 * the parent pcib what our bus number is.
2092 busno = pcib_get_bus(dev);
2094 device_printf(dev, "physical bus=%d\n", busno);
2096 pci_add_children(dev, busno, sizeof(struct pci_devinfo));
2098 return (bus_generic_attach(dev));
2102 pci_suspend(device_t dev)
2104 int dstate, error, i, numdevs;
2105 device_t acpi_dev, child, *devlist;
2106 struct pci_devinfo *dinfo;
2109 * Save the PCI configuration space for each child and set the
2110 * device in the appropriate power state for this sleep state.
2113 if (pci_do_power_resume)
2114 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2115 device_get_children(dev, &devlist, &numdevs);
2116 for (i = 0; i < numdevs; i++) {
2118 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2119 pci_cfg_save(child, dinfo, 0);
2122 /* Suspend devices before potentially powering them down. */
2123 error = bus_generic_suspend(dev);
2125 free(devlist, M_TEMP);
2130 * Always set the device to D3. If ACPI suggests a different
2131 * power state, use it instead. If ACPI is not present, the
2132 * firmware is responsible for managing device power. Skip
2133 * children who aren't attached since they are powered down
2134 * separately. Only manage type 0 devices for now.
2136 for (i = 0; acpi_dev && i < numdevs; i++) {
2138 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2139 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2140 dstate = PCI_POWERSTATE_D3;
2141 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2142 pci_set_powerstate(child, dstate);
2145 free(devlist, M_TEMP);
2150 pci_resume(device_t dev)
2153 device_t acpi_dev, child, *devlist;
2154 struct pci_devinfo *dinfo;
2157 * Set each child to D0 and restore its PCI configuration space.
2160 if (pci_do_power_resume)
2161 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2162 device_get_children(dev, &devlist, &numdevs);
2163 for (i = 0; i < numdevs; i++) {
2165 * Notify ACPI we're going to D0 but ignore the result. If
2166 * ACPI is not present, the firmware is responsible for
2167 * managing device power. Only manage type 0 devices for now.
2170 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2171 if (acpi_dev && device_is_attached(child) &&
2172 dinfo->cfg.hdrtype == 0) {
2173 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2174 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2177 /* Now the device is powered up, restore its config space. */
2178 pci_cfg_restore(child, dinfo);
2180 free(devlist, M_TEMP);
2181 return (bus_generic_resume(dev));
2185 pci_load_vendor_data(void)
2187 caddr_t vendordata, info;
2189 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2190 info = preload_search_info(vendordata, MODINFO_ADDR);
2191 pci_vendordata = *(char **)info;
2192 info = preload_search_info(vendordata, MODINFO_SIZE);
2193 pci_vendordata_size = *(size_t *)info;
2194 /* terminate the database */
2195 pci_vendordata[pci_vendordata_size] = '\n';
2200 pci_driver_added(device_t dev, driver_t *driver)
2205 struct pci_devinfo *dinfo;
2209 device_printf(dev, "driver added\n");
2210 DEVICE_IDENTIFY(driver, dev);
2211 device_get_children(dev, &devlist, &numdevs);
2212 for (i = 0; i < numdevs; i++) {
2214 if (device_get_state(child) != DS_NOTPRESENT)
2216 dinfo = device_get_ivars(child);
2217 pci_print_verbose(dinfo);
2219 printf("pci%d:%d:%d: reprobing on driver added\n",
2220 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func);
2221 pci_cfg_restore(child, dinfo);
2222 if (device_probe_and_attach(child) != 0)
2223 pci_cfg_save(child, dinfo, 1);
2225 free(devlist, M_TEMP);
2229 pci_print_child(device_t dev, device_t child)
2231 struct pci_devinfo *dinfo;
2232 struct resource_list *rl;
2235 dinfo = device_get_ivars(child);
2236 rl = &dinfo->resources;
2238 retval += bus_print_child_header(dev, child);
2240 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
2241 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
2242 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
2243 if (device_get_flags(dev))
2244 retval += printf(" flags %#x", device_get_flags(dev));
2246 retval += printf(" at device %d.%d", pci_get_slot(child),
2247 pci_get_function(child));
2249 retval += bus_print_child_footer(dev, child);
2259 } pci_nomatch_tab[] = {
2260 {PCIC_OLD, -1, "old"},
2261 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
2262 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
2263 {PCIC_STORAGE, -1, "mass storage"},
2264 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
2265 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
2266 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
2267 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
2268 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
2269 {PCIC_NETWORK, -1, "network"},
2270 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
2271 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
2272 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
2273 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
2274 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
2275 {PCIC_DISPLAY, -1, "display"},
2276 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
2277 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
2278 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
2279 {PCIC_MULTIMEDIA, -1, "multimedia"},
2280 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
2281 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
2282 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
2283 {PCIC_MEMORY, -1, "memory"},
2284 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
2285 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
2286 {PCIC_BRIDGE, -1, "bridge"},
2287 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
2288 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
2289 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
2290 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
2291 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
2292 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
2293 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
2294 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
2295 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
2296 {PCIC_SIMPLECOMM, -1, "simple comms"},
2297 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
2298 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
2299 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
2300 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
2301 {PCIC_BASEPERIPH, -1, "base peripheral"},
2302 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
2303 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
2304 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
2305 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
2306 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
2307 {PCIC_INPUTDEV, -1, "input device"},
2308 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
2309 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
2310 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
2311 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
2312 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
2313 {PCIC_DOCKING, -1, "docking station"},
2314 {PCIC_PROCESSOR, -1, "processor"},
2315 {PCIC_SERIALBUS, -1, "serial bus"},
2316 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
2317 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
2318 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
2319 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
2320 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
2321 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
2322 {PCIC_WIRELESS, -1, "wireless controller"},
2323 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
2324 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
2325 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
2326 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
2327 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
2328 {PCIC_SATCOM, -1, "satellite communication"},
2329 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
2330 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
2331 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
2332 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
2333 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
2334 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
2335 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
2336 {PCIC_DASP, -1, "dasp"},
2337 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
2342 pci_probe_nomatch(device_t dev, device_t child)
2345 char *cp, *scp, *device;
2348 * Look for a listing for this device in a loaded device database.
2350 if ((device = pci_describe_device(child)) != NULL) {
2351 device_printf(dev, "<%s>", device);
2352 free(device, M_DEVBUF);
2355 * Scan the class/subclass descriptions for a general
2360 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
2361 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
2362 if (pci_nomatch_tab[i].subclass == -1) {
2363 cp = pci_nomatch_tab[i].desc;
2364 } else if (pci_nomatch_tab[i].subclass ==
2365 pci_get_subclass(child)) {
2366 scp = pci_nomatch_tab[i].desc;
2370 device_printf(dev, "<%s%s%s>",
2372 ((cp != NULL) && (scp != NULL)) ? ", " : "",
2375 printf(" at device %d.%d (no driver attached)\n",
2376 pci_get_slot(child), pci_get_function(child));
2377 if (pci_do_power_nodriver)
2379 (struct pci_devinfo *) device_get_ivars(child), 1);
2384 * Parse the PCI device database, if loaded, and return a pointer to a
2385 * description of the device.
2387 * The database is flat text formatted as follows:
2389 * Any line not in a valid format is ignored.
2390 * Lines are terminated with newline '\n' characters.
2392 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
2395 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
2396 * - devices cannot be listed without a corresponding VENDOR line.
2397 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
2398 * another TAB, then the device name.
2402 * Assuming (ptr) points to the beginning of a line in the database,
2403 * return the vendor or device and description of the next entry.
2404 * The value of (vendor) or (device) inappropriate for the entry type
2405 * is set to -1. Returns nonzero at the end of the database.
2407 * Note that this is slightly unrobust in the face of corrupt data;
2408 * we attempt to safeguard against this by spamming the end of the
2409 * database with a newline when we initialise.
2412 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
2421 left = pci_vendordata_size - (cp - pci_vendordata);
2429 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
2433 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
2436 /* skip to next line */
2437 while (*cp != '\n' && left > 0) {
2446 /* skip to next line */
2447 while (*cp != '\n' && left > 0) {
2451 if (*cp == '\n' && left > 0)
2458 pci_describe_device(device_t dev)
2461 char *desc, *vp, *dp, *line;
2463 desc = vp = dp = NULL;
2466 * If we have no vendor data, we can't do anything.
2468 if (pci_vendordata == NULL)
2472 * Scan the vendor data looking for this device
2474 line = pci_vendordata;
2475 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2478 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
2480 if (vendor == pci_get_vendor(dev))
2483 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2486 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
2494 if (device == pci_get_device(dev))
2498 snprintf(dp, 80, "0x%x", pci_get_device(dev));
2499 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
2501 sprintf(desc, "%s, %s", vp, dp);
2511 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
2513 struct pci_devinfo *dinfo;
2516 dinfo = device_get_ivars(child);
2520 case PCI_IVAR_ETHADDR:
2522 * The generic accessor doesn't deal with failure, so
2523 * we set the return value, then return an error.
2525 *((uint8_t **) result) = NULL;
2527 case PCI_IVAR_SUBVENDOR:
2528 *result = cfg->subvendor;
2530 case PCI_IVAR_SUBDEVICE:
2531 *result = cfg->subdevice;
2533 case PCI_IVAR_VENDOR:
2534 *result = cfg->vendor;
2536 case PCI_IVAR_DEVICE:
2537 *result = cfg->device;
2539 case PCI_IVAR_DEVID:
2540 *result = (cfg->device << 16) | cfg->vendor;
2542 case PCI_IVAR_CLASS:
2543 *result = cfg->baseclass;
2545 case PCI_IVAR_SUBCLASS:
2546 *result = cfg->subclass;
2548 case PCI_IVAR_PROGIF:
2549 *result = cfg->progif;
2551 case PCI_IVAR_REVID:
2552 *result = cfg->revid;
2554 case PCI_IVAR_INTPIN:
2555 *result = cfg->intpin;
2558 *result = cfg->intline;
2564 *result = cfg->slot;
2566 case PCI_IVAR_FUNCTION:
2567 *result = cfg->func;
2569 case PCI_IVAR_CMDREG:
2570 *result = cfg->cmdreg;
2572 case PCI_IVAR_CACHELNSZ:
2573 *result = cfg->cachelnsz;
2575 case PCI_IVAR_MINGNT:
2576 *result = cfg->mingnt;
2578 case PCI_IVAR_MAXLAT:
2579 *result = cfg->maxlat;
2581 case PCI_IVAR_LATTIMER:
2582 *result = cfg->lattimer;
2591 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
2593 struct pci_devinfo *dinfo;
2595 dinfo = device_get_ivars(child);
2598 case PCI_IVAR_INTPIN:
2599 dinfo->cfg.intpin = value;
2601 case PCI_IVAR_ETHADDR:
2602 case PCI_IVAR_SUBVENDOR:
2603 case PCI_IVAR_SUBDEVICE:
2604 case PCI_IVAR_VENDOR:
2605 case PCI_IVAR_DEVICE:
2606 case PCI_IVAR_DEVID:
2607 case PCI_IVAR_CLASS:
2608 case PCI_IVAR_SUBCLASS:
2609 case PCI_IVAR_PROGIF:
2610 case PCI_IVAR_REVID:
2614 case PCI_IVAR_FUNCTION:
2615 return (EINVAL); /* disallow for now */
2623 #include "opt_ddb.h"
2625 #include <ddb/ddb.h>
2626 #include <sys/cons.h>
2629 * List resources based on pci map registers, used for within ddb
2632 DB_SHOW_COMMAND(pciregs, db_pci_dump)
2634 struct pci_devinfo *dinfo;
2635 struct devlist *devlist_head;
2638 int i, error, none_count;
2641 /* get the head of the device queue */
2642 devlist_head = &pci_devq;
2645 * Go through the list of devices and print out devices
2647 for (error = 0, i = 0,
2648 dinfo = STAILQ_FIRST(devlist_head);
2649 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
2650 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
2652 /* Populate pd_name and pd_unit */
2655 name = device_get_name(dinfo->cfg.dev);
2658 db_printf("%s%d@pci%d:%d:%d:\tclass=0x%06x card=0x%08x "
2659 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
2660 (name && *name) ? name : "none",
2661 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
2663 p->pc_sel.pc_bus, p->pc_sel.pc_dev,
2664 p->pc_sel.pc_func, (p->pc_class << 16) |
2665 (p->pc_subclass << 8) | p->pc_progif,
2666 (p->pc_subdevice << 16) | p->pc_subvendor,
2667 (p->pc_device << 16) | p->pc_vendor,
2668 p->pc_revid, p->pc_hdr);
2673 static struct resource *
2674 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
2675 u_long start, u_long end, u_long count, u_int flags)
2677 struct pci_devinfo *dinfo = device_get_ivars(child);
2678 struct resource_list *rl = &dinfo->resources;
2679 struct resource_list_entry *rle;
2680 struct resource *res;
2681 pci_addr_t map, testval;
2685 * Weed out the bogons, and figure out how large the BAR/map
2686 * is. Bars that read back 0 here are bogus and unimplemented.
2687 * Note: atapci in legacy mode are special and handled elsewhere
2688 * in the code. If you have a atapci device in legacy mode and
2689 * it fails here, that other code is broken.
2692 map = pci_read_config(child, *rid, 4);
2693 pci_write_config(child, *rid, 0xffffffff, 4);
2694 testval = pci_read_config(child, *rid, 4);
2695 if (pci_maprange(testval) == 64)
2696 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
2697 if (pci_mapbase(testval) == 0)
2699 if (pci_maptype(testval) & PCI_MAPMEM) {
2700 if (type != SYS_RES_MEMORY) {
2703 "child %s requested type %d for rid %#x,"
2704 " but the BAR says it is an memio\n",
2705 device_get_nameunit(child), type, *rid);
2709 if (type != SYS_RES_IOPORT) {
2712 "child %s requested type %d for rid %#x,"
2713 " but the BAR says it is an ioport\n",
2714 device_get_nameunit(child), type, *rid);
2719 * For real BARs, we need to override the size that
2720 * the driver requests, because that's what the BAR
2721 * actually uses and we would otherwise have a
2722 * situation where we might allocate the excess to
2723 * another driver, which won't work.
2725 mapsize = pci_mapsize(testval);
2726 count = 1UL << mapsize;
2727 if (RF_ALIGNMENT(flags) < mapsize)
2728 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
2731 * Allocate enough resource, and then write back the
2732 * appropriate bar for that resource.
2734 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
2735 start, end, count, flags);
2737 device_printf(child,
2738 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
2739 count, *rid, type, start, end);
2742 resource_list_add(rl, type, *rid, start, end, count);
2743 rle = resource_list_find(rl, type, *rid);
2745 panic("pci_alloc_map: unexpectedly can't find resource.");
2747 rle->start = rman_get_start(res);
2748 rle->end = rman_get_end(res);
2751 device_printf(child,
2752 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
2753 count, *rid, type, rman_get_start(res));
2754 map = rman_get_start(res);
2756 pci_write_config(child, *rid, map, 4);
2757 if (pci_maprange(testval) == 64)
2758 pci_write_config(child, *rid + 4, map >> 32, 4);
2764 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
2765 u_long start, u_long end, u_long count, u_int flags)
2767 struct pci_devinfo *dinfo = device_get_ivars(child);
2768 struct resource_list *rl = &dinfo->resources;
2769 struct resource_list_entry *rle;
2770 pcicfgregs *cfg = &dinfo->cfg;
2773 * Perform lazy resource allocation
2775 if (device_get_parent(child) == dev) {
2779 * Can't alloc legacy interrupt once MSI messages
2780 * have been allocated.
2782 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
2783 cfg->msix.msix_alloc > 0))
2786 * If the child device doesn't have an
2787 * interrupt routed and is deserving of an
2788 * interrupt, try to assign it one.
2790 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
2792 pci_assign_interrupt(dev, child, 0);
2794 case SYS_RES_IOPORT:
2795 case SYS_RES_MEMORY:
2796 if (*rid < PCIR_BAR(cfg->nummaps)) {
2798 * Enable the I/O mode. We should
2799 * also be assigning resources too
2800 * when none are present. The
2801 * resource_list_alloc kind of sorta does
2804 if (PCI_ENABLE_IO(dev, child, type))
2807 rle = resource_list_find(rl, type, *rid);
2809 return (pci_alloc_map(dev, child, type, rid,
2810 start, end, count, flags));
2814 * If we've already allocated the resource, then
2815 * return it now. But first we may need to activate
2816 * it, since we don't allocate the resource as active
2817 * above. Normally this would be done down in the
2818 * nexus, but since we short-circuit that path we have
2819 * to do its job here. Not sure if we should free the
2820 * resource if it fails to activate.
2822 rle = resource_list_find(rl, type, *rid);
2823 if (rle != NULL && rle->res != NULL) {
2825 device_printf(child,
2826 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
2827 rman_get_size(rle->res), *rid, type,
2828 rman_get_start(rle->res));
2829 if ((flags & RF_ACTIVE) &&
2830 bus_generic_activate_resource(dev, child, type,
2831 *rid, rle->res) != 0)
2836 return (resource_list_alloc(rl, dev, child, type, rid,
2837 start, end, count, flags));
2841 pci_delete_resource(device_t dev, device_t child, int type, int rid)
2843 struct pci_devinfo *dinfo;
2844 struct resource_list *rl;
2845 struct resource_list_entry *rle;
2847 if (device_get_parent(child) != dev)
2850 dinfo = device_get_ivars(child);
2851 rl = &dinfo->resources;
2852 rle = resource_list_find(rl, type, rid);
2855 if (rman_get_device(rle->res) != dev ||
2856 rman_get_flags(rle->res) & RF_ACTIVE) {
2857 device_printf(dev, "delete_resource: "
2858 "Resource still owned by child, oops. "
2859 "(type=%d, rid=%d, addr=%lx)\n",
2860 rle->type, rle->rid,
2861 rman_get_start(rle->res));
2864 bus_release_resource(dev, type, rid, rle->res);
2866 resource_list_delete(rl, type, rid);
2869 * Why do we turn off the PCI configuration BAR when we delete a
2872 pci_write_config(child, rid, 0, 4);
2873 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
2876 struct resource_list *
2877 pci_get_resource_list (device_t dev, device_t child)
2879 struct pci_devinfo *dinfo = device_get_ivars(child);
2881 return (&dinfo->resources);
2885 pci_read_config_method(device_t dev, device_t child, int reg, int width)
2887 struct pci_devinfo *dinfo = device_get_ivars(child);
2888 pcicfgregs *cfg = &dinfo->cfg;
2890 return (PCIB_READ_CONFIG(device_get_parent(dev),
2891 cfg->bus, cfg->slot, cfg->func, reg, width));
2895 pci_write_config_method(device_t dev, device_t child, int reg,
2896 uint32_t val, int width)
2898 struct pci_devinfo *dinfo = device_get_ivars(child);
2899 pcicfgregs *cfg = &dinfo->cfg;
2901 PCIB_WRITE_CONFIG(device_get_parent(dev),
2902 cfg->bus, cfg->slot, cfg->func, reg, val, width);
2906 pci_child_location_str_method(device_t dev, device_t child, char *buf,
2910 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
2911 pci_get_function(child));
2916 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
2919 struct pci_devinfo *dinfo;
2922 dinfo = device_get_ivars(child);
2924 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
2925 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
2926 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
2932 pci_assign_interrupt_method(device_t dev, device_t child)
2934 struct pci_devinfo *dinfo = device_get_ivars(child);
2935 pcicfgregs *cfg = &dinfo->cfg;
2937 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
2942 pci_modevent(module_t mod, int what, void *arg)
2944 static struct cdev *pci_cdev;
2948 STAILQ_INIT(&pci_devq);
2950 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
2952 pci_load_vendor_data();
2956 destroy_dev(pci_cdev);
2964 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
2969 * Only do header type 0 devices. Type 1 devices are bridges,
2970 * which we know need special treatment. Type 2 devices are
2971 * cardbus bridges which also require special treatment.
2972 * Other types are unknown, and we err on the side of safety
2975 if (dinfo->cfg.hdrtype != 0)
2979 * Restore the device to full power mode. We must do this
2980 * before we restore the registers because moving from D3 to
2981 * D0 will cause the chip's BARs and some other registers to
2982 * be reset to some unknown power on reset values. Cut down
2983 * the noise on boot by doing nothing if we are already in
2986 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
2987 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
2989 for (i = 0; i < dinfo->cfg.nummaps; i++)
2990 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
2991 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
2992 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
2993 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
2994 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
2995 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
2996 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
2997 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
2998 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
2999 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3000 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3003 * Restore MSI configuration if it is present. If MSI is enabled,
3004 * then restore the data and addr registers.
3006 if (dinfo->cfg.msi.msi_location != 0)
3007 pci_resume_msi(dev);
3011 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3018 * Only do header type 0 devices. Type 1 devices are bridges, which
3019 * we know need special treatment. Type 2 devices are cardbus bridges
3020 * which also require special treatment. Other types are unknown, and
3021 * we err on the side of safety by ignoring them. Powering down
3022 * bridges should not be undertaken lightly.
3024 if (dinfo->cfg.hdrtype != 0)
3026 for (i = 0; i < dinfo->cfg.nummaps; i++)
3027 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3028 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3031 * Some drivers apparently write to these registers w/o updating our
3032 * cached copy. No harm happens if we update the copy, so do so here
3033 * so we can restore them. The COMMAND register is modified by the
3034 * bus w/o updating the cache. This should represent the normally
3035 * writable portion of the 'defined' part of type 0 headers. In
3036 * theory we also need to save/restore the PCI capability structures
3037 * we know about, but apart from power we don't know any that are
3040 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3041 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3042 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3043 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3044 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3045 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3046 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3047 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3048 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3049 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3050 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3051 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3052 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3053 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3054 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
3057 * don't set the state for display devices, base peripherals and
3058 * memory devices since bad things happen when they are powered down.
3059 * We should (a) have drivers that can easily detach and (b) use
3060 * generic drivers for these devices so that some device actually
3061 * attaches. We need to make sure that when we implement (a) we don't
3062 * power the device down on a reattach.
3064 cls = pci_get_class(dev);
3067 switch (pci_do_power_nodriver)
3069 case 0: /* NO powerdown at all */
3071 case 1: /* Conservative about what to power down */
3072 if (cls == PCIC_STORAGE)
3075 case 2: /* Agressive about what to power down */
3076 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
3077 cls == PCIC_BASEPERIPH)
3080 case 3: /* Power down everything */
3084 * PCI spec says we can only go into D3 state from D0 state.
3085 * Transition from D[12] into D0 before going to D3 state.
3087 ps = pci_get_powerstate(dev);
3088 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
3089 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3090 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
3091 pci_set_powerstate(dev, PCI_POWERSTATE_D3);