2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
55 #if defined(__i386__) || defined(__amd64__)
56 #include <machine/intr_machdep.h>
59 #include <sys/pciio.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
68 #include <contrib/dev/acpica/acpi.h>
71 #define ACPI_PWR_FOR_SLEEP(x, y, z)
74 static uint32_t pci_mapbase(unsigned mapreg);
75 static int pci_maptype(unsigned mapreg);
76 static int pci_mapsize(unsigned testval);
77 static int pci_maprange(unsigned mapreg);
78 static void pci_fixancient(pcicfgregs *cfg);
80 static int pci_porten(device_t pcib, int b, int s, int f);
81 static int pci_memen(device_t pcib, int b, int s, int f);
82 static void pci_assign_interrupt(device_t bus, device_t dev,
84 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
85 int b, int s, int f, int reg,
86 struct resource_list *rl, int force, int prefetch);
87 static int pci_probe(device_t dev);
88 static int pci_attach(device_t dev);
89 static void pci_load_vendor_data(void);
90 static int pci_describe_parse_line(char **ptr, int *vendor,
91 int *device, char **desc);
92 static char *pci_describe_device(device_t dev);
93 static int pci_modevent(module_t mod, int what, void *arg);
94 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
96 static void pci_read_extcap(device_t pcib, pcicfgregs *cfg);
97 static uint32_t pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
100 static void pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
101 int reg, uint32_t data);
103 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
105 static device_method_t pci_methods[] = {
106 /* Device interface */
107 DEVMETHOD(device_probe, pci_probe),
108 DEVMETHOD(device_attach, pci_attach),
109 DEVMETHOD(device_detach, bus_generic_detach),
110 DEVMETHOD(device_shutdown, bus_generic_shutdown),
111 DEVMETHOD(device_suspend, pci_suspend),
112 DEVMETHOD(device_resume, pci_resume),
115 DEVMETHOD(bus_print_child, pci_print_child),
116 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
117 DEVMETHOD(bus_read_ivar, pci_read_ivar),
118 DEVMETHOD(bus_write_ivar, pci_write_ivar),
119 DEVMETHOD(bus_driver_added, pci_driver_added),
120 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
121 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
123 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
124 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
125 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
126 DEVMETHOD(bus_delete_resource, pci_delete_resource),
127 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
128 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
129 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
130 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
131 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
132 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
135 DEVMETHOD(pci_read_config, pci_read_config_method),
136 DEVMETHOD(pci_write_config, pci_write_config_method),
137 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
138 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
139 DEVMETHOD(pci_enable_io, pci_enable_io_method),
140 DEVMETHOD(pci_disable_io, pci_disable_io_method),
141 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
142 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
143 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
144 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
145 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
146 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
147 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
148 DEVMETHOD(pci_release_msi, pci_release_msi_method),
149 DEVMETHOD(pci_msi_count, pci_msi_count_method),
154 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
156 static devclass_t pci_devclass;
157 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
158 MODULE_VERSION(pci, 1);
160 static char *pci_vendordata;
161 static size_t pci_vendordata_size;
165 uint32_t devid; /* Vendor/device of the card */
167 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
168 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
173 struct pci_quirk pci_quirks[] = {
174 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
175 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
176 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
177 /* As does the Serverworks OSB4 (the SMBus mapping register) */
178 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
181 * MSI doesn't work with the Intel E7501 chipset, at least on
182 * the Tyan 2721 motherboard.
184 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
187 * MSI doesn't work with the Intel E7505 chipset, at least on
188 * the Tyan S2665ANF motherboard.
190 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
195 /* map register information */
196 #define PCI_MAPMEM 0x01 /* memory map */
197 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
198 #define PCI_MAPPORT 0x04 /* port map */
200 struct devlist pci_devq;
201 uint32_t pci_generation;
202 uint32_t pci_numdevs = 0;
205 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
207 static int pci_enable_io_modes = 1;
208 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
209 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
210 &pci_enable_io_modes, 1,
211 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
212 enable these bits correctly. We'd like to do this all the time, but there\n\
213 are some peripherals that this causes problems with.");
215 static int pci_do_power_nodriver = 0;
216 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
217 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
218 &pci_do_power_nodriver, 0,
219 "Place a function into D3 state when no driver attaches to it. 0 means\n\
220 disable. 1 means conservatively place devices into D3 state. 2 means\n\
221 agressively place devices into D3 state. 3 means put absolutely everything\n\
224 static int pci_do_power_resume = 1;
225 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
226 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
227 &pci_do_power_resume, 1,
228 "Transition from D3 -> D0 on resume.");
230 static int pci_do_msi = 1;
231 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
232 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
233 "Enable support for MSI interrupts");
235 static int pci_do_msix = 1;
236 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
237 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
238 "Enable support for MSI-X interrupts");
240 static int pci_honor_msi_blacklist = 1;
241 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
242 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
243 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
245 /* Find a device_t by bus/slot/function */
248 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
250 struct pci_devinfo *dinfo;
252 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
253 if ((dinfo->cfg.bus == bus) &&
254 (dinfo->cfg.slot == slot) &&
255 (dinfo->cfg.func == func)) {
256 return (dinfo->cfg.dev);
263 /* Find a device_t by vendor/device ID */
266 pci_find_device(uint16_t vendor, uint16_t device)
268 struct pci_devinfo *dinfo;
270 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
271 if ((dinfo->cfg.vendor == vendor) &&
272 (dinfo->cfg.device == device)) {
273 return (dinfo->cfg.dev);
280 /* return base address of memory or port map */
283 pci_mapbase(uint32_t mapreg)
286 if ((mapreg & 0x01) == 0)
288 return (mapreg & ~mask);
291 /* return map type of memory or port map */
294 pci_maptype(unsigned mapreg)
296 static uint8_t maptype[0x10] = {
297 PCI_MAPMEM, PCI_MAPPORT,
299 PCI_MAPMEM, PCI_MAPPORT,
301 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
302 PCI_MAPMEM|PCI_MAPMEMP, 0,
303 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
307 return maptype[mapreg & 0x0f];
310 /* return log2 of map size decoded for memory or port map */
313 pci_mapsize(uint32_t testval)
317 testval = pci_mapbase(testval);
320 while ((testval & 1) == 0)
329 /* return log2 of address range supported by map register */
332 pci_maprange(unsigned mapreg)
335 switch (mapreg & 0x07) {
351 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
354 pci_fixancient(pcicfgregs *cfg)
356 if (cfg->hdrtype != 0)
359 /* PCI to PCI bridges use header type 1 */
360 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
364 /* extract header type specific config data */
367 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
369 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
370 switch (cfg->hdrtype) {
372 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
373 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
374 cfg->nummaps = PCI_MAXMAPS_0;
377 cfg->subvendor = REG(PCIR_SUBVEND_1, 2);
378 cfg->subdevice = REG(PCIR_SUBDEV_1, 2);
379 cfg->nummaps = PCI_MAXMAPS_1;
382 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
383 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
384 cfg->nummaps = PCI_MAXMAPS_2;
390 /* read configuration header into pcicfgregs structure */
392 pci_read_device(device_t pcib, int b, int s, int f, size_t size)
394 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
395 pcicfgregs *cfg = NULL;
396 struct pci_devinfo *devlist_entry;
397 struct devlist *devlist_head;
399 devlist_head = &pci_devq;
401 devlist_entry = NULL;
403 if (REG(PCIR_DEVVENDOR, 4) != -1) {
404 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
405 if (devlist_entry == NULL)
408 cfg = &devlist_entry->cfg;
413 cfg->vendor = REG(PCIR_VENDOR, 2);
414 cfg->device = REG(PCIR_DEVICE, 2);
415 cfg->cmdreg = REG(PCIR_COMMAND, 2);
416 cfg->statreg = REG(PCIR_STATUS, 2);
417 cfg->baseclass = REG(PCIR_CLASS, 1);
418 cfg->subclass = REG(PCIR_SUBCLASS, 1);
419 cfg->progif = REG(PCIR_PROGIF, 1);
420 cfg->revid = REG(PCIR_REVID, 1);
421 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
422 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
423 cfg->lattimer = REG(PCIR_LATTIMER, 1);
424 cfg->intpin = REG(PCIR_INTPIN, 1);
425 cfg->intline = REG(PCIR_INTLINE, 1);
427 cfg->mingnt = REG(PCIR_MINGNT, 1);
428 cfg->maxlat = REG(PCIR_MAXLAT, 1);
430 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
431 cfg->hdrtype &= ~PCIM_MFDEV;
434 pci_hdrtypedata(pcib, b, s, f, cfg);
436 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
437 pci_read_extcap(pcib, cfg);
439 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
441 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
442 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
443 devlist_entry->conf.pc_sel.pc_func = cfg->func;
444 devlist_entry->conf.pc_hdr = cfg->hdrtype;
446 devlist_entry->conf.pc_subvendor = cfg->subvendor;
447 devlist_entry->conf.pc_subdevice = cfg->subdevice;
448 devlist_entry->conf.pc_vendor = cfg->vendor;
449 devlist_entry->conf.pc_device = cfg->device;
451 devlist_entry->conf.pc_class = cfg->baseclass;
452 devlist_entry->conf.pc_subclass = cfg->subclass;
453 devlist_entry->conf.pc_progif = cfg->progif;
454 devlist_entry->conf.pc_revid = cfg->revid;
459 return (devlist_entry);
464 pci_read_extcap(device_t pcib, pcicfgregs *cfg)
466 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
467 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
468 #if defined(__i386__) || defined(__amd64__)
472 int ptr, nextptr, ptrptr;
474 switch (cfg->hdrtype & PCIM_HDRTYPE) {
477 ptrptr = PCIR_CAP_PTR;
480 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
483 return; /* no extended capabilities support */
485 nextptr = REG(ptrptr, 1); /* sanity check? */
488 * Read capability entries.
490 while (nextptr != 0) {
493 printf("illegal PCI extended capability offset %d\n",
497 /* Find the next entry */
499 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
501 /* Process this entry */
502 switch (REG(ptr + PCICAP_ID, 1)) {
503 case PCIY_PMG: /* PCI power management */
504 if (cfg->pp.pp_cap == 0) {
505 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
506 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
507 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
508 if ((nextptr - ptr) > PCIR_POWER_DATA)
509 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
512 #if defined(__i386__) || defined(__amd64__)
513 case PCIY_HT: /* HyperTransport */
514 /* Determine HT-specific capability type. */
515 val = REG(ptr + PCIR_HT_COMMAND, 2);
516 switch (val & PCIM_HTCMD_CAP_MASK) {
517 case PCIM_HTCAP_MSI_MAPPING:
518 /* Sanity check the mapping window. */
519 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4);
521 addr = REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4);
522 if (addr != MSI_INTEL_ADDR_BASE)
524 "HT Bridge at %d:%d:%d has non-default MSI window 0x%llx\n",
525 cfg->bus, cfg->slot, cfg->func,
528 /* Enable MSI -> HT mapping. */
529 val |= PCIM_HTCMD_MSI_ENABLE;
530 WREG(ptr + PCIR_HT_COMMAND, val, 2);
535 case PCIY_MSI: /* PCI MSI */
536 cfg->msi.msi_location = ptr;
537 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
538 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
539 PCIM_MSICTRL_MMC_MASK)>>1);
541 case PCIY_MSIX: /* PCI MSI-X */
542 cfg->msix.msix_location = ptr;
543 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
544 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
545 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
546 val = REG(ptr + PCIR_MSIX_TABLE, 4);
547 cfg->msix.msix_table_bar = PCIR_BAR(val &
549 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
550 val = REG(ptr + PCIR_MSIX_PBA, 4);
551 cfg->msix.msix_pba_bar = PCIR_BAR(val &
553 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
555 case PCIY_VPD: /* PCI Vital Product Data */
556 cfg->vpd.vpd_reg = ptr;
557 pci_read_vpd(pcib, cfg);
563 /* REG and WREG use carry through to next functions */
567 * PCI Vital Product Data
570 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg)
573 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
575 WREG(cfg->vpd.vpd_reg + 2, reg, 2);
576 while ((REG(cfg->vpd.vpd_reg + 2, 2) & 0x8000) != 0x8000)
577 DELAY(1); /* limit looping */
579 return REG(cfg->vpd.vpd_reg + 4, 4);
584 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
586 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
588 WREG(cfg->vpd.vpd_reg + 4, data, 4);
589 WREG(cfg->vpd.vpd_reg + 2, reg | 0x8000, 2);
590 while ((REG(cfg->vpd.vpd_reg + 2, 2) & 0x8000) == 0x8000)
591 DELAY(1); /* limit looping */
597 struct vpd_readstate {
607 vpd_nextbyte(struct vpd_readstate *vrs)
611 if (vrs->bytesinval == 0) {
612 vrs->val = le32toh(pci_read_vpd_reg(vrs->pcib, vrs->cfg,
615 byte = vrs->val & 0xff;
618 vrs->val = vrs->val >> 8;
619 byte = vrs->val & 0xff;
628 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
630 struct vpd_readstate vrs;
637 int alloc, off; /* alloc/off for RO/W arrays */
641 /* init vpd reader */
649 name = remain = i = 0; /* shut up stupid gcc */
650 alloc = off = 0; /* shut up stupid gcc */
651 dflen = 0; /* shut up stupid gcc */
655 byte = vpd_nextbyte(&vrs);
657 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
658 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
659 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
662 case 0: /* item name */
664 remain = vpd_nextbyte(&vrs);
665 remain |= vpd_nextbyte(&vrs) << 8;
666 if (remain > (0x7f*4 - vrs.off)) {
669 "pci%d:%d:%d: invalid vpd data, remain %#x\n",
670 cfg->bus, cfg->slot, cfg->func,
676 name = (byte >> 3) & 0xf;
679 case 0x2: /* String */
680 cfg->vpd.vpd_ident = malloc(remain + 1,
689 case 0x10: /* VPD-R */
692 cfg->vpd.vpd_ros = malloc(alloc *
693 sizeof *cfg->vpd.vpd_ros, M_DEVBUF,
697 case 0x11: /* VPD-W */
700 cfg->vpd.vpd_w = malloc(alloc *
701 sizeof *cfg->vpd.vpd_w, M_DEVBUF,
705 default: /* Invalid data, abort */
711 case 1: /* Identifier String */
712 cfg->vpd.vpd_ident[i++] = byte;
715 cfg->vpd.vpd_ident[i] = '\0';
720 case 2: /* VPD-R Keyword Header */
722 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
723 (alloc *= 2) * sizeof *cfg->vpd.vpd_ros,
726 cfg->vpd.vpd_ros[off].keyword[0] = byte;
727 cfg->vpd.vpd_ros[off].keyword[1] = vpd_nextbyte(&vrs);
728 dflen = vpd_nextbyte(&vrs);
730 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
733 * if this happens, we can't trust the rest
736 printf("pci%d:%d:%d: bad keyword length: %d\n",
737 cfg->bus, cfg->slot, cfg->func, dflen);
741 } else if (dflen == 0) {
742 cfg->vpd.vpd_ros[off].value = malloc(1 *
743 sizeof *cfg->vpd.vpd_ros[off].value,
745 cfg->vpd.vpd_ros[off].value[0] = '\x00';
747 cfg->vpd.vpd_ros[off].value = malloc(
749 sizeof *cfg->vpd.vpd_ros[off].value,
753 /* keep in sync w/ state 3's transistions */
754 if (dflen == 0 && remain == 0)
762 case 3: /* VPD-R Keyword Value */
763 cfg->vpd.vpd_ros[off].value[i++] = byte;
764 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
765 "RV", 2) == 0 && cksumvalid == -1) {
770 "pci%d:%d:%d: bad VPD cksum, remain %hhu\n",
771 cfg->bus, cfg->slot, cfg->func,
780 /* keep in sync w/ state 2's transistions */
782 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
783 if (dflen == 0 && remain == 0) {
784 cfg->vpd.vpd_rocnt = off;
785 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
786 off * sizeof *cfg->vpd.vpd_ros,
789 } else if (dflen == 0)
799 case 5: /* VPD-W Keyword Header */
801 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
802 (alloc *= 2) * sizeof *cfg->vpd.vpd_w,
805 cfg->vpd.vpd_w[off].keyword[0] = byte;
806 cfg->vpd.vpd_w[off].keyword[1] = vpd_nextbyte(&vrs);
807 cfg->vpd.vpd_w[off].len = dflen = vpd_nextbyte(&vrs);
808 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
809 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
810 sizeof *cfg->vpd.vpd_w[off].value,
814 /* keep in sync w/ state 6's transistions */
815 if (dflen == 0 && remain == 0)
823 case 6: /* VPD-W Keyword Value */
824 cfg->vpd.vpd_w[off].value[i++] = byte;
827 /* keep in sync w/ state 5's transistions */
829 cfg->vpd.vpd_w[off++].value[i++] = '\0';
830 if (dflen == 0 && remain == 0) {
831 cfg->vpd.vpd_wcnt = off;
832 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
833 off * sizeof *cfg->vpd.vpd_w,
836 } else if (dflen == 0)
841 printf("pci%d:%d:%d: invalid state: %d\n",
842 cfg->bus, cfg->slot, cfg->func, state);
848 if (cksumvalid == 0) {
849 /* read-only data bad, clean up */
851 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
853 free(cfg->vpd.vpd_ros, M_DEVBUF);
854 cfg->vpd.vpd_ros = NULL;
861 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
863 struct pci_devinfo *dinfo = device_get_ivars(child);
864 pcicfgregs *cfg = &dinfo->cfg;
866 *identptr = cfg->vpd.vpd_ident;
868 if (*identptr == NULL)
875 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
878 struct pci_devinfo *dinfo = device_get_ivars(child);
879 pcicfgregs *cfg = &dinfo->cfg;
882 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
883 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
884 sizeof cfg->vpd.vpd_ros[i].keyword) == 0) {
885 *vptr = cfg->vpd.vpd_ros[i].value;
888 if (i != cfg->vpd.vpd_rocnt)
896 * Return the offset in configuration space of the requested extended
897 * capability entry or 0 if the specified capability was not found.
900 pci_find_extcap_method(device_t dev, device_t child, int capability,
903 struct pci_devinfo *dinfo = device_get_ivars(child);
904 pcicfgregs *cfg = &dinfo->cfg;
909 * Check the CAP_LIST bit of the PCI status register first.
911 status = pci_read_config(child, PCIR_STATUS, 2);
912 if (!(status & PCIM_STATUS_CAPPRESENT))
916 * Determine the start pointer of the capabilities list.
918 switch (cfg->hdrtype & PCIM_HDRTYPE) {
924 ptr = PCIR_CAP_PTR_2;
928 return (ENXIO); /* no extended capabilities support */
930 ptr = pci_read_config(child, ptr, 1);
933 * Traverse the capabilities list.
936 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
941 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
948 * Support for MSI-X message interrupts.
951 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
953 struct pci_devinfo *dinfo = device_get_ivars(dev);
954 pcicfgregs *cfg = &dinfo->cfg;
957 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
958 offset = cfg->msix.msix_table_offset + index * 16;
959 bus_write_4(cfg->msix.msix_table_res, offset, address & 0xffffffff);
960 bus_write_4(cfg->msix.msix_table_res, offset + 4, address >> 32);
961 bus_write_4(cfg->msix.msix_table_res, offset + 8, data);
965 pci_mask_msix(device_t dev, u_int index)
967 struct pci_devinfo *dinfo = device_get_ivars(dev);
968 pcicfgregs *cfg = &dinfo->cfg;
969 uint32_t offset, val;
971 KASSERT(cfg->msix.msix_msgnum > index, ("bogus index"));
972 offset = cfg->msix.msix_table_offset + index * 16 + 12;
973 val = bus_read_4(cfg->msix.msix_table_res, offset);
974 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
975 val |= PCIM_MSIX_VCTRL_MASK;
976 bus_write_4(cfg->msix.msix_table_res, offset, val);
981 pci_unmask_msix(device_t dev, u_int index)
983 struct pci_devinfo *dinfo = device_get_ivars(dev);
984 pcicfgregs *cfg = &dinfo->cfg;
985 uint32_t offset, val;
987 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
988 offset = cfg->msix.msix_table_offset + index * 16 + 12;
989 val = bus_read_4(cfg->msix.msix_table_res, offset);
990 if (val & PCIM_MSIX_VCTRL_MASK) {
991 val &= ~PCIM_MSIX_VCTRL_MASK;
992 bus_write_4(cfg->msix.msix_table_res, offset, val);
997 pci_pending_msix(device_t dev, u_int index)
999 struct pci_devinfo *dinfo = device_get_ivars(dev);
1000 pcicfgregs *cfg = &dinfo->cfg;
1001 uint32_t offset, bit;
1003 KASSERT(cfg->msix.msix_alloc > index, ("bogus index"));
1004 offset = cfg->msix.msix_pba_offset + (index / 4) * 4;
1005 bit = 1 << index % 32;
1006 return (bus_read_4(cfg->msix.msix_pba_res, offset) & bit);
1010 pci_alloc_msix(device_t dev, device_t child, int *count)
1012 struct pci_devinfo *dinfo = device_get_ivars(child);
1013 pcicfgregs *cfg = &dinfo->cfg;
1014 struct resource_list_entry *rle;
1015 int actual, error, i, irq, max;
1017 /* MSI-X capability present? */
1018 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1021 /* Make sure the appropriate BARs are mapped. */
1022 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1023 cfg->msix.msix_table_bar);
1024 if (rle == NULL || rle->res == NULL ||
1025 !(rman_get_flags(rle->res) & RF_ACTIVE))
1027 cfg->msix.msix_table_res = rle->res;
1028 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1029 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1030 cfg->msix.msix_pba_bar);
1031 if (rle == NULL || rle->res == NULL ||
1032 !(rman_get_flags(rle->res) & RF_ACTIVE))
1035 cfg->msix.msix_pba_res = rle->res;
1037 /* Already have allocated messages? */
1038 if (cfg->msix.msix_alloc != 0)
1042 device_printf(child,
1043 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1044 *count, cfg->msix.msix_msgnum);
1045 max = min(*count, cfg->msix.msix_msgnum);
1046 for (i = 0; i < max; i++) {
1047 /* Allocate a message. */
1048 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, i,
1052 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1058 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1060 device_printf(child, "using IRQ %lu for MSI-X\n",
1066 * Be fancy and try to print contiguous runs of
1067 * IRQ values as ranges. 'irq' is the previous IRQ.
1068 * 'run' is true if we are in a range.
1070 device_printf(child, "using IRQs %lu", rle->start);
1073 for (i = 1; i < actual; i++) {
1074 rle = resource_list_find(&dinfo->resources,
1075 SYS_RES_IRQ, i + 1);
1077 /* Still in a run? */
1078 if (rle->start == irq + 1) {
1084 /* Finish previous range. */
1090 /* Start new range. */
1091 printf(",%lu", rle->start);
1095 /* Unfinished range? */
1098 printf(" for MSI-X\n");
1102 /* Mask all vectors. */
1103 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1104 pci_mask_msix(child, i);
1106 /* Update control register to enable MSI-X. */
1107 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1108 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1109 cfg->msix.msix_ctrl, 2);
1111 /* Update counts of alloc'd messages. */
1112 cfg->msix.msix_alloc = actual;
1118 pci_release_msix(device_t dev, device_t child)
1120 struct pci_devinfo *dinfo = device_get_ivars(child);
1121 pcicfgregs *cfg = &dinfo->cfg;
1122 struct resource_list_entry *rle;
1125 /* Do we have any messages to release? */
1126 if (cfg->msix.msix_alloc == 0)
1129 /* Make sure none of the resources are allocated. */
1130 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1131 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1132 KASSERT(rle != NULL, ("missing MSI resource"));
1133 if (rle->res != NULL)
1137 /* Update control register with to disable MSI-X. */
1138 cfg->msix.msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1139 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1140 cfg->msix.msix_ctrl, 2);
1142 /* Release the messages. */
1143 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1144 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1145 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1147 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1150 /* Update alloc count. */
1151 cfg->msix.msix_alloc = 0;
1156 * Support for MSI message signalled interrupts.
1159 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1161 struct pci_devinfo *dinfo = device_get_ivars(dev);
1162 pcicfgregs *cfg = &dinfo->cfg;
1164 /* Write data and address values. */
1165 cfg->msi.msi_addr = address;
1166 cfg->msi.msi_data = data;
1167 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
1168 address & 0xffffffff, 4);
1169 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
1170 pci_write_config(dev, cfg->msi.msi_location +
1171 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1172 pci_write_config(dev, cfg->msi.msi_location +
1173 PCIR_MSI_DATA_64BIT, data, 2);
1175 pci_write_config(dev, cfg->msi.msi_location +
1176 PCIR_MSI_DATA, data, 2);
1178 /* Enable MSI in the control register. */
1179 cfg->msi.msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1180 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
1181 cfg->msi.msi_ctrl, 2);
1185 * Restore MSI registers during resume. If MSI is enabled then
1186 * restore the data and address registers in addition to the control
1190 pci_resume_msi(device_t dev)
1192 struct pci_devinfo *dinfo = device_get_ivars(dev);
1193 pcicfgregs *cfg = &dinfo->cfg;
1197 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1198 address = cfg->msi.msi_addr;
1199 data = cfg->msi.msi_data;
1200 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_ADDR,
1201 address & 0xffffffff, 4);
1202 if (cfg->msi.msi_ctrl & PCIM_MSICTRL_64BIT) {
1203 pci_write_config(dev, cfg->msi.msi_location +
1204 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1205 pci_write_config(dev, cfg->msi.msi_location +
1206 PCIR_MSI_DATA_64BIT, data, 2);
1208 pci_write_config(dev, cfg->msi.msi_location +
1209 PCIR_MSI_DATA, data, 2);
1211 pci_write_config(dev, cfg->msi.msi_location + PCIR_MSI_CTRL,
1212 cfg->msi.msi_ctrl, 2);
1216 * Returns true if the specified device is blacklisted because MSI
1220 pci_msi_device_blacklisted(device_t dev)
1222 struct pci_quirk *q;
1224 if (!pci_honor_msi_blacklist)
1227 for (q = &pci_quirks[0]; q->devid; q++) {
1228 if (q->devid == pci_get_devid(dev) &&
1229 q->type == PCI_QUIRK_DISABLE_MSI)
1236 * Determine if MSI is blacklisted globally on this sytem. Currently,
1237 * we just check for blacklisted chipsets as represented by the
1238 * host-PCI bridge at device 0:0:0. In the future, it may become
1239 * necessary to check other system attributes, such as the kenv values
1240 * that give the motherboard manufacturer and model number.
1243 pci_msi_blacklisted(void)
1247 if (!pci_honor_msi_blacklist)
1250 dev = pci_find_bsf(0, 0, 0);
1252 return (pci_msi_device_blacklisted(dev));
1257 * Attempt to allocate *count MSI messages. The actual number allocated is
1258 * returned in *count. After this function returns, each message will be
1259 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1262 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1264 struct pci_devinfo *dinfo = device_get_ivars(child);
1265 pcicfgregs *cfg = &dinfo->cfg;
1266 struct resource_list_entry *rle;
1267 int actual, error, i, irqs[32];
1270 /* Don't let count == 0 get us into trouble. */
1274 /* If rid 0 is allocated, then fail. */
1275 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1276 if (rle != NULL && rle->res != NULL)
1279 /* If MSI is blacklisted for this system, fail. */
1280 if (pci_msi_blacklisted())
1283 /* Try MSI-X first. */
1284 error = pci_alloc_msix(dev, child, count);
1285 if (error != ENODEV)
1288 /* MSI capability present? */
1289 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1292 /* Already have allocated messages? */
1293 if (cfg->msi.msi_alloc != 0)
1297 device_printf(child,
1298 "attempting to allocate %d MSI vectors (%d supported)\n",
1299 *count, cfg->msi.msi_msgnum);
1301 /* Don't ask for more than the device supports. */
1302 actual = min(*count, cfg->msi.msi_msgnum);
1304 /* Don't ask for more than 32 messages. */
1305 actual = min(actual, 32);
1307 /* MSI requires power of 2 number of messages. */
1308 if (!powerof2(actual))
1312 /* Try to allocate N messages. */
1313 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
1314 cfg->msi.msi_msgnum, irqs);
1325 * We now have N actual messages mapped onto SYS_RES_IRQ
1326 * resources in the irqs[] array, so add new resources
1327 * starting at rid 1.
1329 for (i = 0; i < actual; i++)
1330 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1331 irqs[i], irqs[i], 1);
1335 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
1340 * Be fancy and try to print contiguous runs
1341 * of IRQ values as ranges. 'run' is true if
1342 * we are in a range.
1344 device_printf(child, "using IRQs %d", irqs[0]);
1346 for (i = 1; i < actual; i++) {
1348 /* Still in a run? */
1349 if (irqs[i] == irqs[i - 1] + 1) {
1354 /* Finish previous range. */
1356 printf("-%d", irqs[i - 1]);
1360 /* Start new range. */
1361 printf(",%d", irqs[i]);
1364 /* Unfinished range? */
1366 printf("%d", irqs[actual - 1]);
1367 printf(" for MSI\n");
1371 /* Update control register with actual count and enable MSI. */
1372 ctrl = cfg->msi.msi_ctrl;
1373 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1374 ctrl |= (ffs(actual) - 1) << 4;
1375 cfg->msi.msi_ctrl = ctrl;
1376 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1378 /* Update counts of alloc'd messages. */
1379 cfg->msi.msi_alloc = actual;
1384 /* Release the MSI messages associated with this device. */
1386 pci_release_msi_method(device_t dev, device_t child)
1388 struct pci_devinfo *dinfo = device_get_ivars(child);
1389 pcicfgregs *cfg = &dinfo->cfg;
1390 struct resource_list_entry *rle;
1391 int error, i, irqs[32];
1393 /* Try MSI-X first. */
1394 error = pci_release_msix(dev, child);
1395 if (error != ENODEV)
1398 /* Do we have any messages to release? */
1399 if (cfg->msi.msi_alloc == 0)
1401 KASSERT(cfg->msi.msi_alloc <= 32, ("more than 32 alloc'd messages"));
1403 /* Make sure none of the resources are allocated. */
1404 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1405 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1406 KASSERT(rle != NULL, ("missing MSI resource"));
1407 if (rle->res != NULL)
1409 irqs[i] = rle->start;
1412 /* Update control register with 0 count and disable MSI. */
1413 cfg->msi.msi_ctrl &= ~(PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE);
1414 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL,
1415 cfg->msi.msi_ctrl, 2);
1417 /* Release the messages. */
1418 PCIB_RELEASE_MSI(device_get_parent(dev), child, cfg->msi.msi_alloc,
1420 for (i = 0; i < cfg->msi.msi_alloc; i++)
1421 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1423 /* Update alloc count. */
1424 cfg->msi.msi_alloc = 0;
1429 * Return the max supported MSI or MSI-X messages this device supports.
1430 * Basically, assuming the MD code can alloc messages, this function
1431 * should return the maximum value that pci_alloc_msi() can return. Thus,
1432 * it is subject to the tunables, etc.
1435 pci_msi_count_method(device_t dev, device_t child)
1437 struct pci_devinfo *dinfo = device_get_ivars(child);
1438 pcicfgregs *cfg = &dinfo->cfg;
1440 if (pci_do_msix && cfg->msix.msix_location != 0)
1441 return (cfg->msix.msix_msgnum);
1442 if (pci_do_msi && cfg->msi.msi_location != 0)
1443 return (cfg->msi.msi_msgnum);
1447 /* free pcicfgregs structure and all depending data structures */
1450 pci_freecfg(struct pci_devinfo *dinfo)
1452 struct devlist *devlist_head;
1455 devlist_head = &pci_devq;
1457 if (dinfo->cfg.vpd.vpd_reg) {
1458 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
1459 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
1460 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
1461 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
1462 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
1463 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
1464 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
1466 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
1467 free(dinfo, M_DEVBUF);
1469 /* increment the generation count */
1472 /* we're losing one device */
1478 * PCI power manangement
1481 pci_set_powerstate_method(device_t dev, device_t child, int state)
1483 struct pci_devinfo *dinfo = device_get_ivars(child);
1484 pcicfgregs *cfg = &dinfo->cfg;
1486 int result, oldstate, highest, delay;
1488 if (cfg->pp.pp_cap == 0)
1489 return (EOPNOTSUPP);
1492 * Optimize a no state change request away. While it would be OK to
1493 * write to the hardware in theory, some devices have shown odd
1494 * behavior when going from D3 -> D3.
1496 oldstate = pci_get_powerstate(child);
1497 if (oldstate == state)
1501 * The PCI power management specification states that after a state
1502 * transition between PCI power states, system software must
1503 * guarantee a minimal delay before the function accesses the device.
1504 * Compute the worst case delay that we need to guarantee before we
1505 * access the device. Many devices will be responsive much more
1506 * quickly than this delay, but there are some that don't respond
1507 * instantly to state changes. Transitions to/from D3 state require
1508 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
1509 * is done below with DELAY rather than a sleeper function because
1510 * this function can be called from contexts where we cannot sleep.
1512 highest = (oldstate > state) ? oldstate : state;
1513 if (highest == PCI_POWERSTATE_D3)
1515 else if (highest == PCI_POWERSTATE_D2)
1519 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
1520 & ~PCIM_PSTAT_DMASK;
1523 case PCI_POWERSTATE_D0:
1524 status |= PCIM_PSTAT_D0;
1526 case PCI_POWERSTATE_D1:
1527 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
1528 return (EOPNOTSUPP);
1529 status |= PCIM_PSTAT_D1;
1531 case PCI_POWERSTATE_D2:
1532 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
1533 return (EOPNOTSUPP);
1534 status |= PCIM_PSTAT_D2;
1536 case PCI_POWERSTATE_D3:
1537 status |= PCIM_PSTAT_D3;
1545 "pci%d:%d:%d: Transition from D%d to D%d\n",
1546 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func,
1549 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
1556 pci_get_powerstate_method(device_t dev, device_t child)
1558 struct pci_devinfo *dinfo = device_get_ivars(child);
1559 pcicfgregs *cfg = &dinfo->cfg;
1563 if (cfg->pp.pp_cap != 0) {
1564 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
1565 switch (status & PCIM_PSTAT_DMASK) {
1567 result = PCI_POWERSTATE_D0;
1570 result = PCI_POWERSTATE_D1;
1573 result = PCI_POWERSTATE_D2;
1576 result = PCI_POWERSTATE_D3;
1579 result = PCI_POWERSTATE_UNKNOWN;
1583 /* No support, device is always at D0 */
1584 result = PCI_POWERSTATE_D0;
1590 * Some convenience functions for PCI device drivers.
1593 static __inline void
1594 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
1598 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1600 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1603 static __inline void
1604 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
1608 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1610 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1614 pci_enable_busmaster_method(device_t dev, device_t child)
1616 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1621 pci_disable_busmaster_method(device_t dev, device_t child)
1623 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1628 pci_enable_io_method(device_t dev, device_t child, int space)
1638 case SYS_RES_IOPORT:
1639 bit = PCIM_CMD_PORTEN;
1642 case SYS_RES_MEMORY:
1643 bit = PCIM_CMD_MEMEN;
1649 pci_set_command_bit(dev, child, bit);
1650 /* Some devices seem to need a brief stall here, what do to? */
1651 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1654 device_printf(child, "failed to enable %s mapping!\n", error);
1659 pci_disable_io_method(device_t dev, device_t child, int space)
1669 case SYS_RES_IOPORT:
1670 bit = PCIM_CMD_PORTEN;
1673 case SYS_RES_MEMORY:
1674 bit = PCIM_CMD_MEMEN;
1680 pci_clear_command_bit(dev, child, bit);
1681 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1682 if (command & bit) {
1683 device_printf(child, "failed to disable %s mapping!\n", error);
1690 * New style pci driver. Parent device is either a pci-host-bridge or a
1691 * pci-pci-bridge. Both kinds are represented by instances of pcib.
1695 pci_print_verbose(struct pci_devinfo *dinfo)
1700 pcicfgregs *cfg = &dinfo->cfg;
1702 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
1703 cfg->vendor, cfg->device, cfg->revid);
1704 printf("\tbus=%d, slot=%d, func=%d\n",
1705 cfg->bus, cfg->slot, cfg->func);
1706 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
1707 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
1709 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
1710 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
1711 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
1712 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
1713 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
1714 if (cfg->intpin > 0)
1715 printf("\tintpin=%c, irq=%d\n",
1716 cfg->intpin +'a' -1, cfg->intline);
1717 if (cfg->pp.pp_cap) {
1720 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
1721 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
1722 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
1723 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
1724 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
1725 status & PCIM_PSTAT_DMASK);
1727 if (cfg->vpd.vpd_reg) {
1728 printf("\tVPD Ident: %s\n", cfg->vpd.vpd_ident);
1729 for (i = 0; i < cfg->vpd.vpd_rocnt; i++) {
1730 struct vpd_readonly *vrop;
1731 vrop = &cfg->vpd.vpd_ros[i];
1732 if (strncmp("CP", vrop->keyword, 2) == 0)
1733 printf("\tCP: id %d, BAR%d, off %#x\n",
1734 vrop->value[0], vrop->value[1],
1736 *(uint16_t *)&vrop->value[2]));
1737 else if (strncmp("RV", vrop->keyword, 2) == 0)
1738 printf("\tRV: %#hhx\n", vrop->value[0]);
1740 printf("\t%.2s: %s\n", vrop->keyword,
1743 for (i = 0; i < cfg->vpd.vpd_wcnt; i++) {
1744 struct vpd_write *vwp;
1745 vwp = &cfg->vpd.vpd_w[i];
1746 if (strncmp("RW", vwp->keyword, 2) != 0)
1747 printf("\t%.2s(%#x-%#x): %s\n",
1748 vwp->keyword, vwp->start,
1749 vwp->start + vwp->len, vwp->value);
1752 if (cfg->msi.msi_location) {
1755 ctrl = cfg->msi.msi_ctrl;
1756 printf("\tMSI supports %d message%s%s%s\n",
1757 cfg->msi.msi_msgnum,
1758 (cfg->msi.msi_msgnum == 1) ? "" : "s",
1759 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
1760 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
1762 if (cfg->msix.msix_location) {
1763 printf("\tMSI-X supports %d message%s ",
1764 cfg->msix.msix_msgnum,
1765 (cfg->msix.msix_msgnum == 1) ? "" : "s");
1766 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
1767 printf("in map 0x%x\n",
1768 cfg->msix.msix_table_bar);
1770 printf("in maps 0x%x and 0x%x\n",
1771 cfg->msix.msix_table_bar,
1772 cfg->msix.msix_pba_bar);
1778 pci_porten(device_t pcib, int b, int s, int f)
1780 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1781 & PCIM_CMD_PORTEN) != 0;
1785 pci_memen(device_t pcib, int b, int s, int f)
1787 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1788 & PCIM_CMD_MEMEN) != 0;
1792 * Add a resource based on a pci map register. Return 1 if the map
1793 * register is a 32bit map register or 2 if it is a 64bit register.
1796 pci_add_map(device_t pcib, device_t bus, device_t dev,
1797 int b, int s, int f, int reg, struct resource_list *rl, int force,
1802 pci_addr_t start, end, count;
1809 struct resource *res;
1811 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1812 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
1813 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1814 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
1816 if (pci_maptype(map) & PCI_MAPMEM)
1817 type = SYS_RES_MEMORY;
1819 type = SYS_RES_IOPORT;
1820 ln2size = pci_mapsize(testval);
1821 ln2range = pci_maprange(testval);
1822 base = pci_mapbase(map);
1823 barlen = ln2range == 64 ? 2 : 1;
1826 * For I/O registers, if bottom bit is set, and the next bit up
1827 * isn't clear, we know we have a BAR that doesn't conform to the
1828 * spec, so ignore it. Also, sanity check the size of the data
1829 * areas to the type of memory involved. Memory must be at least
1830 * 16 bytes in size, while I/O ranges must be at least 4.
1832 if ((testval & 0x1) == 0x1 &&
1833 (testval & 0x2) != 0)
1835 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
1836 (type == SYS_RES_IOPORT && ln2size < 2))
1840 /* Read the other half of a 64bit map register */
1841 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
1843 printf("\tmap[%02x]: type %x, range %2d, base %#jx, size %2d",
1844 reg, pci_maptype(map), ln2range, (uintmax_t)base, ln2size);
1845 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1846 printf(", port disabled\n");
1847 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1848 printf(", memory disabled\n");
1850 printf(", enabled\n");
1854 * If base is 0, then we have problems. It is best to ignore
1855 * such entries for the moment. These will be allocated later if
1856 * the driver specifically requests them. However, some
1857 * removable busses look better when all resources are allocated,
1858 * so allow '0' to be overriden.
1860 * Similarly treat maps whose values is the same as the test value
1861 * read back. These maps have had all f's written to them by the
1862 * BIOS in an attempt to disable the resources.
1864 if (!force && (base == 0 || map == testval))
1866 if ((u_long)base != base) {
1868 "pci%d:%d:%d bar %#x too many address bits", b, s, f, reg);
1873 * This code theoretically does the right thing, but has
1874 * undesirable side effects in some cases where peripherals
1875 * respond oddly to having these bits enabled. Let the user
1876 * be able to turn them off (since pci_enable_io_modes is 1 by
1879 if (pci_enable_io_modes) {
1880 /* Turn on resources that have been left off by a lazy BIOS */
1881 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
1882 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1883 cmd |= PCIM_CMD_PORTEN;
1884 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1886 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
1887 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1888 cmd |= PCIM_CMD_MEMEN;
1889 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1892 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1894 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1898 count = 1 << ln2size;
1899 if (base == 0 || base == pci_mapbase(testval)) {
1900 start = 0; /* Let the parent deside */
1904 end = base + (1 << ln2size) - 1;
1906 resource_list_add(rl, type, reg, start, end, count);
1909 * Not quite sure what to do on failure of allocating the resource
1910 * since I can postulate several right answers.
1912 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
1913 prefetch ? RF_PREFETCHABLE : 0);
1916 start = rman_get_start(res);
1917 if ((u_long)start != start) {
1918 /* Wait a minute! this platform can't do this address. */
1920 "pci%d.%d.%x bar %#x start %#jx, too many bits.",
1921 b, s, f, reg, (uintmax_t)start);
1922 resource_list_release(rl, bus, dev, type, reg, res);
1925 pci_write_config(dev, reg, start, 4);
1927 pci_write_config(dev, reg + 4, start >> 32, 4);
1932 * For ATA devices we need to decide early what addressing mode to use.
1933 * Legacy demands that the primary and secondary ATA ports sits on the
1934 * same addresses that old ISA hardware did. This dictates that we use
1935 * those addresses and ignore the BAR's if we cannot set PCI native
1939 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
1940 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
1942 int rid, type, progif;
1944 /* if this device supports PCI native addressing use it */
1945 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1946 if ((progif & 0x8a) == 0x8a) {
1947 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
1948 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
1949 printf("Trying ATA native PCI addressing mode\n");
1950 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
1954 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1955 type = SYS_RES_IOPORT;
1956 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
1957 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
1958 prefetchmask & (1 << 0));
1959 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
1960 prefetchmask & (1 << 1));
1963 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
1964 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
1967 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
1968 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
1971 if (progif & PCIP_STORAGE_IDE_MODESEC) {
1972 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
1973 prefetchmask & (1 << 2));
1974 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
1975 prefetchmask & (1 << 3));
1978 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
1979 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
1982 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
1983 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
1986 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
1987 prefetchmask & (1 << 4));
1988 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
1989 prefetchmask & (1 << 5));
1993 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
1995 struct pci_devinfo *dinfo = device_get_ivars(dev);
1996 pcicfgregs *cfg = &dinfo->cfg;
1997 char tunable_name[64];
2000 /* Has to have an intpin to have an interrupt. */
2001 if (cfg->intpin == 0)
2004 /* Let the user override the IRQ with a tunable. */
2005 irq = PCI_INVALID_IRQ;
2006 snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.INT%c.irq",
2007 cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2008 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2009 irq = PCI_INVALID_IRQ;
2012 * If we didn't get an IRQ via the tunable, then we either use the
2013 * IRQ value in the intline register or we ask the bus to route an
2014 * interrupt for us. If force_route is true, then we only use the
2015 * value in the intline register if the bus was unable to assign an
2018 if (!PCI_INTERRUPT_VALID(irq)) {
2019 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2020 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2021 if (!PCI_INTERRUPT_VALID(irq))
2025 /* If after all that we don't have an IRQ, just bail. */
2026 if (!PCI_INTERRUPT_VALID(irq))
2029 /* Update the config register if it changed. */
2030 if (irq != cfg->intline) {
2032 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2035 /* Add this IRQ as rid 0 interrupt resource. */
2036 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2040 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
2043 struct pci_devinfo *dinfo = device_get_ivars(dev);
2044 pcicfgregs *cfg = &dinfo->cfg;
2045 struct resource_list *rl = &dinfo->resources;
2046 struct pci_quirk *q;
2049 pcib = device_get_parent(bus);
2055 /* ATA devices needs special map treatment */
2056 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2057 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
2058 (pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV))
2059 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
2061 for (i = 0; i < cfg->nummaps;)
2062 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
2063 rl, force, prefetchmask & (1 << i));
2066 * Add additional, quirked resources.
2068 for (q = &pci_quirks[0]; q->devid; q++) {
2069 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2070 && q->type == PCI_QUIRK_MAP_REG)
2071 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
2075 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2076 #ifdef __PCI_REROUTE_INTERRUPT
2078 * Try to re-route interrupts. Sometimes the BIOS or
2079 * firmware may leave bogus values in these registers.
2080 * If the re-route fails, then just stick with what we
2083 pci_assign_interrupt(bus, dev, 1);
2085 pci_assign_interrupt(bus, dev, 0);
2091 pci_add_children(device_t dev, int busno, size_t dinfo_size)
2093 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2094 device_t pcib = device_get_parent(dev);
2095 struct pci_devinfo *dinfo;
2097 int s, f, pcifunchigh;
2100 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2101 ("dinfo_size too small"));
2102 maxslots = PCIB_MAXSLOTS(pcib);
2103 for (s = 0; s <= maxslots; s++) {
2107 hdrtype = REG(PCIR_HDRTYPE, 1);
2108 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2110 if (hdrtype & PCIM_MFDEV)
2111 pcifunchigh = PCI_FUNCMAX;
2112 for (f = 0; f <= pcifunchigh; f++) {
2113 dinfo = pci_read_device(pcib, busno, s, f, dinfo_size);
2114 if (dinfo != NULL) {
2115 pci_add_child(dev, dinfo);
2123 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2125 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2126 device_set_ivars(dinfo->cfg.dev, dinfo);
2127 resource_list_init(&dinfo->resources);
2128 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2129 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2130 pci_print_verbose(dinfo);
2131 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
2135 pci_probe(device_t dev)
2138 device_set_desc(dev, "PCI bus");
2140 /* Allow other subclasses to override this driver. */
2145 pci_attach(device_t dev)
2150 * Since there can be multiple independantly numbered PCI
2151 * busses on systems with multiple PCI domains, we can't use
2152 * the unit number to decide which bus we are probing. We ask
2153 * the parent pcib what our bus number is.
2155 busno = pcib_get_bus(dev);
2157 device_printf(dev, "physical bus=%d\n", busno);
2159 pci_add_children(dev, busno, sizeof(struct pci_devinfo));
2161 return (bus_generic_attach(dev));
2165 pci_suspend(device_t dev)
2167 int dstate, error, i, numdevs;
2168 device_t acpi_dev, child, *devlist;
2169 struct pci_devinfo *dinfo;
2172 * Save the PCI configuration space for each child and set the
2173 * device in the appropriate power state for this sleep state.
2176 if (pci_do_power_resume)
2177 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2178 device_get_children(dev, &devlist, &numdevs);
2179 for (i = 0; i < numdevs; i++) {
2181 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2182 pci_cfg_save(child, dinfo, 0);
2185 /* Suspend devices before potentially powering them down. */
2186 error = bus_generic_suspend(dev);
2188 free(devlist, M_TEMP);
2193 * Always set the device to D3. If ACPI suggests a different
2194 * power state, use it instead. If ACPI is not present, the
2195 * firmware is responsible for managing device power. Skip
2196 * children who aren't attached since they are powered down
2197 * separately. Only manage type 0 devices for now.
2199 for (i = 0; acpi_dev && i < numdevs; i++) {
2201 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2202 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2203 dstate = PCI_POWERSTATE_D3;
2204 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2205 pci_set_powerstate(child, dstate);
2208 free(devlist, M_TEMP);
2213 pci_resume(device_t dev)
2216 device_t acpi_dev, child, *devlist;
2217 struct pci_devinfo *dinfo;
2220 * Set each child to D0 and restore its PCI configuration space.
2223 if (pci_do_power_resume)
2224 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2225 device_get_children(dev, &devlist, &numdevs);
2226 for (i = 0; i < numdevs; i++) {
2228 * Notify ACPI we're going to D0 but ignore the result. If
2229 * ACPI is not present, the firmware is responsible for
2230 * managing device power. Only manage type 0 devices for now.
2233 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2234 if (acpi_dev && device_is_attached(child) &&
2235 dinfo->cfg.hdrtype == 0) {
2236 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2237 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2240 /* Now the device is powered up, restore its config space. */
2241 pci_cfg_restore(child, dinfo);
2243 free(devlist, M_TEMP);
2244 return (bus_generic_resume(dev));
2248 pci_load_vendor_data(void)
2250 caddr_t vendordata, info;
2252 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2253 info = preload_search_info(vendordata, MODINFO_ADDR);
2254 pci_vendordata = *(char **)info;
2255 info = preload_search_info(vendordata, MODINFO_SIZE);
2256 pci_vendordata_size = *(size_t *)info;
2257 /* terminate the database */
2258 pci_vendordata[pci_vendordata_size] = '\n';
2263 pci_driver_added(device_t dev, driver_t *driver)
2268 struct pci_devinfo *dinfo;
2272 device_printf(dev, "driver added\n");
2273 DEVICE_IDENTIFY(driver, dev);
2274 device_get_children(dev, &devlist, &numdevs);
2275 for (i = 0; i < numdevs; i++) {
2277 if (device_get_state(child) != DS_NOTPRESENT)
2279 dinfo = device_get_ivars(child);
2280 pci_print_verbose(dinfo);
2282 printf("pci%d:%d:%d: reprobing on driver added\n",
2283 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func);
2284 pci_cfg_restore(child, dinfo);
2285 if (device_probe_and_attach(child) != 0)
2286 pci_cfg_save(child, dinfo, 1);
2288 free(devlist, M_TEMP);
2292 pci_print_child(device_t dev, device_t child)
2294 struct pci_devinfo *dinfo;
2295 struct resource_list *rl;
2298 dinfo = device_get_ivars(child);
2299 rl = &dinfo->resources;
2301 retval += bus_print_child_header(dev, child);
2303 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
2304 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
2305 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
2306 if (device_get_flags(dev))
2307 retval += printf(" flags %#x", device_get_flags(dev));
2309 retval += printf(" at device %d.%d", pci_get_slot(child),
2310 pci_get_function(child));
2312 retval += bus_print_child_footer(dev, child);
2322 } pci_nomatch_tab[] = {
2323 {PCIC_OLD, -1, "old"},
2324 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
2325 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
2326 {PCIC_STORAGE, -1, "mass storage"},
2327 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
2328 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
2329 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
2330 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
2331 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
2332 {PCIC_NETWORK, -1, "network"},
2333 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
2334 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
2335 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
2336 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
2337 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
2338 {PCIC_DISPLAY, -1, "display"},
2339 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
2340 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
2341 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
2342 {PCIC_MULTIMEDIA, -1, "multimedia"},
2343 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
2344 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
2345 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
2346 {PCIC_MEMORY, -1, "memory"},
2347 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
2348 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
2349 {PCIC_BRIDGE, -1, "bridge"},
2350 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
2351 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
2352 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
2353 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
2354 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
2355 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
2356 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
2357 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
2358 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
2359 {PCIC_SIMPLECOMM, -1, "simple comms"},
2360 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
2361 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
2362 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
2363 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
2364 {PCIC_BASEPERIPH, -1, "base peripheral"},
2365 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
2366 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
2367 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
2368 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
2369 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
2370 {PCIC_INPUTDEV, -1, "input device"},
2371 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
2372 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
2373 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
2374 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
2375 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
2376 {PCIC_DOCKING, -1, "docking station"},
2377 {PCIC_PROCESSOR, -1, "processor"},
2378 {PCIC_SERIALBUS, -1, "serial bus"},
2379 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
2380 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
2381 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
2382 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
2383 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
2384 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
2385 {PCIC_WIRELESS, -1, "wireless controller"},
2386 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
2387 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
2388 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
2389 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
2390 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
2391 {PCIC_SATCOM, -1, "satellite communication"},
2392 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
2393 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
2394 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
2395 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
2396 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
2397 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
2398 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
2399 {PCIC_DASP, -1, "dasp"},
2400 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
2405 pci_probe_nomatch(device_t dev, device_t child)
2408 char *cp, *scp, *device;
2411 * Look for a listing for this device in a loaded device database.
2413 if ((device = pci_describe_device(child)) != NULL) {
2414 device_printf(dev, "<%s>", device);
2415 free(device, M_DEVBUF);
2418 * Scan the class/subclass descriptions for a general
2423 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
2424 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
2425 if (pci_nomatch_tab[i].subclass == -1) {
2426 cp = pci_nomatch_tab[i].desc;
2427 } else if (pci_nomatch_tab[i].subclass ==
2428 pci_get_subclass(child)) {
2429 scp = pci_nomatch_tab[i].desc;
2433 device_printf(dev, "<%s%s%s>",
2435 ((cp != NULL) && (scp != NULL)) ? ", " : "",
2438 printf(" at device %d.%d (no driver attached)\n",
2439 pci_get_slot(child), pci_get_function(child));
2440 if (pci_do_power_nodriver)
2442 (struct pci_devinfo *) device_get_ivars(child), 1);
2447 * Parse the PCI device database, if loaded, and return a pointer to a
2448 * description of the device.
2450 * The database is flat text formatted as follows:
2452 * Any line not in a valid format is ignored.
2453 * Lines are terminated with newline '\n' characters.
2455 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
2458 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
2459 * - devices cannot be listed without a corresponding VENDOR line.
2460 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
2461 * another TAB, then the device name.
2465 * Assuming (ptr) points to the beginning of a line in the database,
2466 * return the vendor or device and description of the next entry.
2467 * The value of (vendor) or (device) inappropriate for the entry type
2468 * is set to -1. Returns nonzero at the end of the database.
2470 * Note that this is slightly unrobust in the face of corrupt data;
2471 * we attempt to safeguard against this by spamming the end of the
2472 * database with a newline when we initialise.
2475 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
2484 left = pci_vendordata_size - (cp - pci_vendordata);
2492 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
2496 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
2499 /* skip to next line */
2500 while (*cp != '\n' && left > 0) {
2509 /* skip to next line */
2510 while (*cp != '\n' && left > 0) {
2514 if (*cp == '\n' && left > 0)
2521 pci_describe_device(device_t dev)
2524 char *desc, *vp, *dp, *line;
2526 desc = vp = dp = NULL;
2529 * If we have no vendor data, we can't do anything.
2531 if (pci_vendordata == NULL)
2535 * Scan the vendor data looking for this device
2537 line = pci_vendordata;
2538 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2541 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
2543 if (vendor == pci_get_vendor(dev))
2546 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2549 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
2557 if (device == pci_get_device(dev))
2561 snprintf(dp, 80, "0x%x", pci_get_device(dev));
2562 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
2564 sprintf(desc, "%s, %s", vp, dp);
2574 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
2576 struct pci_devinfo *dinfo;
2579 dinfo = device_get_ivars(child);
2583 case PCI_IVAR_ETHADDR:
2585 * The generic accessor doesn't deal with failure, so
2586 * we set the return value, then return an error.
2588 *((uint8_t **) result) = NULL;
2590 case PCI_IVAR_SUBVENDOR:
2591 *result = cfg->subvendor;
2593 case PCI_IVAR_SUBDEVICE:
2594 *result = cfg->subdevice;
2596 case PCI_IVAR_VENDOR:
2597 *result = cfg->vendor;
2599 case PCI_IVAR_DEVICE:
2600 *result = cfg->device;
2602 case PCI_IVAR_DEVID:
2603 *result = (cfg->device << 16) | cfg->vendor;
2605 case PCI_IVAR_CLASS:
2606 *result = cfg->baseclass;
2608 case PCI_IVAR_SUBCLASS:
2609 *result = cfg->subclass;
2611 case PCI_IVAR_PROGIF:
2612 *result = cfg->progif;
2614 case PCI_IVAR_REVID:
2615 *result = cfg->revid;
2617 case PCI_IVAR_INTPIN:
2618 *result = cfg->intpin;
2621 *result = cfg->intline;
2627 *result = cfg->slot;
2629 case PCI_IVAR_FUNCTION:
2630 *result = cfg->func;
2632 case PCI_IVAR_CMDREG:
2633 *result = cfg->cmdreg;
2635 case PCI_IVAR_CACHELNSZ:
2636 *result = cfg->cachelnsz;
2638 case PCI_IVAR_MINGNT:
2639 *result = cfg->mingnt;
2641 case PCI_IVAR_MAXLAT:
2642 *result = cfg->maxlat;
2644 case PCI_IVAR_LATTIMER:
2645 *result = cfg->lattimer;
2654 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
2656 struct pci_devinfo *dinfo;
2658 dinfo = device_get_ivars(child);
2661 case PCI_IVAR_INTPIN:
2662 dinfo->cfg.intpin = value;
2664 case PCI_IVAR_ETHADDR:
2665 case PCI_IVAR_SUBVENDOR:
2666 case PCI_IVAR_SUBDEVICE:
2667 case PCI_IVAR_VENDOR:
2668 case PCI_IVAR_DEVICE:
2669 case PCI_IVAR_DEVID:
2670 case PCI_IVAR_CLASS:
2671 case PCI_IVAR_SUBCLASS:
2672 case PCI_IVAR_PROGIF:
2673 case PCI_IVAR_REVID:
2677 case PCI_IVAR_FUNCTION:
2678 return (EINVAL); /* disallow for now */
2686 #include "opt_ddb.h"
2688 #include <ddb/ddb.h>
2689 #include <sys/cons.h>
2692 * List resources based on pci map registers, used for within ddb
2695 DB_SHOW_COMMAND(pciregs, db_pci_dump)
2697 struct pci_devinfo *dinfo;
2698 struct devlist *devlist_head;
2701 int i, error, none_count;
2704 /* get the head of the device queue */
2705 devlist_head = &pci_devq;
2708 * Go through the list of devices and print out devices
2710 for (error = 0, i = 0,
2711 dinfo = STAILQ_FIRST(devlist_head);
2712 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
2713 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
2715 /* Populate pd_name and pd_unit */
2718 name = device_get_name(dinfo->cfg.dev);
2721 db_printf("%s%d@pci%d:%d:%d:\tclass=0x%06x card=0x%08x "
2722 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
2723 (name && *name) ? name : "none",
2724 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
2726 p->pc_sel.pc_bus, p->pc_sel.pc_dev,
2727 p->pc_sel.pc_func, (p->pc_class << 16) |
2728 (p->pc_subclass << 8) | p->pc_progif,
2729 (p->pc_subdevice << 16) | p->pc_subvendor,
2730 (p->pc_device << 16) | p->pc_vendor,
2731 p->pc_revid, p->pc_hdr);
2736 static struct resource *
2737 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
2738 u_long start, u_long end, u_long count, u_int flags)
2740 struct pci_devinfo *dinfo = device_get_ivars(child);
2741 struct resource_list *rl = &dinfo->resources;
2742 struct resource_list_entry *rle;
2743 struct resource *res;
2744 pci_addr_t map, testval;
2748 * Weed out the bogons, and figure out how large the BAR/map
2749 * is. Bars that read back 0 here are bogus and unimplemented.
2750 * Note: atapci in legacy mode are special and handled elsewhere
2751 * in the code. If you have a atapci device in legacy mode and
2752 * it fails here, that other code is broken.
2755 map = pci_read_config(child, *rid, 4);
2756 pci_write_config(child, *rid, 0xffffffff, 4);
2757 testval = pci_read_config(child, *rid, 4);
2758 if (pci_maprange(testval) == 64)
2759 map |= (pci_addr_t)pci_read_config(child, *rid + 4, 4) << 32;
2760 if (pci_mapbase(testval) == 0)
2762 if (pci_maptype(testval) & PCI_MAPMEM) {
2763 if (type != SYS_RES_MEMORY) {
2766 "child %s requested type %d for rid %#x,"
2767 " but the BAR says it is an memio\n",
2768 device_get_nameunit(child), type, *rid);
2772 if (type != SYS_RES_IOPORT) {
2775 "child %s requested type %d for rid %#x,"
2776 " but the BAR says it is an ioport\n",
2777 device_get_nameunit(child), type, *rid);
2782 * For real BARs, we need to override the size that
2783 * the driver requests, because that's what the BAR
2784 * actually uses and we would otherwise have a
2785 * situation where we might allocate the excess to
2786 * another driver, which won't work.
2788 mapsize = pci_mapsize(testval);
2789 count = 1UL << mapsize;
2790 if (RF_ALIGNMENT(flags) < mapsize)
2791 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
2794 * Allocate enough resource, and then write back the
2795 * appropriate bar for that resource.
2797 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
2798 start, end, count, flags);
2800 device_printf(child,
2801 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
2802 count, *rid, type, start, end);
2805 resource_list_add(rl, type, *rid, start, end, count);
2806 rle = resource_list_find(rl, type, *rid);
2808 panic("pci_alloc_map: unexpectedly can't find resource.");
2810 rle->start = rman_get_start(res);
2811 rle->end = rman_get_end(res);
2814 device_printf(child,
2815 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
2816 count, *rid, type, rman_get_start(res));
2817 map = rman_get_start(res);
2819 pci_write_config(child, *rid, map, 4);
2820 if (pci_maprange(testval) == 64)
2821 pci_write_config(child, *rid + 4, map >> 32, 4);
2827 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
2828 u_long start, u_long end, u_long count, u_int flags)
2830 struct pci_devinfo *dinfo = device_get_ivars(child);
2831 struct resource_list *rl = &dinfo->resources;
2832 struct resource_list_entry *rle;
2833 pcicfgregs *cfg = &dinfo->cfg;
2836 * Perform lazy resource allocation
2838 if (device_get_parent(child) == dev) {
2842 * Can't alloc legacy interrupt once MSI messages
2843 * have been allocated.
2845 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
2846 cfg->msix.msix_alloc > 0))
2849 * If the child device doesn't have an
2850 * interrupt routed and is deserving of an
2851 * interrupt, try to assign it one.
2853 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
2855 pci_assign_interrupt(dev, child, 0);
2857 case SYS_RES_IOPORT:
2858 case SYS_RES_MEMORY:
2859 if (*rid < PCIR_BAR(cfg->nummaps)) {
2861 * Enable the I/O mode. We should
2862 * also be assigning resources too
2863 * when none are present. The
2864 * resource_list_alloc kind of sorta does
2867 if (PCI_ENABLE_IO(dev, child, type))
2870 rle = resource_list_find(rl, type, *rid);
2872 return (pci_alloc_map(dev, child, type, rid,
2873 start, end, count, flags));
2877 * If we've already allocated the resource, then
2878 * return it now. But first we may need to activate
2879 * it, since we don't allocate the resource as active
2880 * above. Normally this would be done down in the
2881 * nexus, but since we short-circuit that path we have
2882 * to do its job here. Not sure if we should free the
2883 * resource if it fails to activate.
2885 rle = resource_list_find(rl, type, *rid);
2886 if (rle != NULL && rle->res != NULL) {
2888 device_printf(child,
2889 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
2890 rman_get_size(rle->res), *rid, type,
2891 rman_get_start(rle->res));
2892 if ((flags & RF_ACTIVE) &&
2893 bus_generic_activate_resource(dev, child, type,
2894 *rid, rle->res) != 0)
2899 return (resource_list_alloc(rl, dev, child, type, rid,
2900 start, end, count, flags));
2904 pci_delete_resource(device_t dev, device_t child, int type, int rid)
2906 struct pci_devinfo *dinfo;
2907 struct resource_list *rl;
2908 struct resource_list_entry *rle;
2910 if (device_get_parent(child) != dev)
2913 dinfo = device_get_ivars(child);
2914 rl = &dinfo->resources;
2915 rle = resource_list_find(rl, type, rid);
2918 if (rman_get_device(rle->res) != dev ||
2919 rman_get_flags(rle->res) & RF_ACTIVE) {
2920 device_printf(dev, "delete_resource: "
2921 "Resource still owned by child, oops. "
2922 "(type=%d, rid=%d, addr=%lx)\n",
2923 rle->type, rle->rid,
2924 rman_get_start(rle->res));
2927 bus_release_resource(dev, type, rid, rle->res);
2929 resource_list_delete(rl, type, rid);
2932 * Why do we turn off the PCI configuration BAR when we delete a
2935 pci_write_config(child, rid, 0, 4);
2936 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
2939 struct resource_list *
2940 pci_get_resource_list (device_t dev, device_t child)
2942 struct pci_devinfo *dinfo = device_get_ivars(child);
2944 return (&dinfo->resources);
2948 pci_read_config_method(device_t dev, device_t child, int reg, int width)
2950 struct pci_devinfo *dinfo = device_get_ivars(child);
2951 pcicfgregs *cfg = &dinfo->cfg;
2953 return (PCIB_READ_CONFIG(device_get_parent(dev),
2954 cfg->bus, cfg->slot, cfg->func, reg, width));
2958 pci_write_config_method(device_t dev, device_t child, int reg,
2959 uint32_t val, int width)
2961 struct pci_devinfo *dinfo = device_get_ivars(child);
2962 pcicfgregs *cfg = &dinfo->cfg;
2964 PCIB_WRITE_CONFIG(device_get_parent(dev),
2965 cfg->bus, cfg->slot, cfg->func, reg, val, width);
2969 pci_child_location_str_method(device_t dev, device_t child, char *buf,
2973 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
2974 pci_get_function(child));
2979 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
2982 struct pci_devinfo *dinfo;
2985 dinfo = device_get_ivars(child);
2987 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
2988 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
2989 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
2995 pci_assign_interrupt_method(device_t dev, device_t child)
2997 struct pci_devinfo *dinfo = device_get_ivars(child);
2998 pcicfgregs *cfg = &dinfo->cfg;
3000 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3005 pci_modevent(module_t mod, int what, void *arg)
3007 static struct cdev *pci_cdev;
3011 STAILQ_INIT(&pci_devq);
3013 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
3015 pci_load_vendor_data();
3019 destroy_dev(pci_cdev);
3027 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3032 * Only do header type 0 devices. Type 1 devices are bridges,
3033 * which we know need special treatment. Type 2 devices are
3034 * cardbus bridges which also require special treatment.
3035 * Other types are unknown, and we err on the side of safety
3038 if (dinfo->cfg.hdrtype != 0)
3042 * Restore the device to full power mode. We must do this
3043 * before we restore the registers because moving from D3 to
3044 * D0 will cause the chip's BARs and some other registers to
3045 * be reset to some unknown power on reset values. Cut down
3046 * the noise on boot by doing nothing if we are already in
3049 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3050 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3052 for (i = 0; i < dinfo->cfg.nummaps; i++)
3053 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3054 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3055 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3056 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3057 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3058 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3059 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3060 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3061 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3062 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3063 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3066 * Restore MSI configuration if it is present. If MSI is enabled,
3067 * then restore the data and addr registers.
3069 if (dinfo->cfg.msi.msi_location != 0)
3070 pci_resume_msi(dev);
3074 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3081 * Only do header type 0 devices. Type 1 devices are bridges, which
3082 * we know need special treatment. Type 2 devices are cardbus bridges
3083 * which also require special treatment. Other types are unknown, and
3084 * we err on the side of safety by ignoring them. Powering down
3085 * bridges should not be undertaken lightly.
3087 if (dinfo->cfg.hdrtype != 0)
3089 for (i = 0; i < dinfo->cfg.nummaps; i++)
3090 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3091 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3094 * Some drivers apparently write to these registers w/o updating our
3095 * cached copy. No harm happens if we update the copy, so do so here
3096 * so we can restore them. The COMMAND register is modified by the
3097 * bus w/o updating the cache. This should represent the normally
3098 * writable portion of the 'defined' part of type 0 headers. In
3099 * theory we also need to save/restore the PCI capability structures
3100 * we know about, but apart from power we don't know any that are
3103 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3104 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3105 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3106 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3107 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3108 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3109 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3110 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3111 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3112 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3113 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3114 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3115 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3116 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3117 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
3120 * don't set the state for display devices, base peripherals and
3121 * memory devices since bad things happen when they are powered down.
3122 * We should (a) have drivers that can easily detach and (b) use
3123 * generic drivers for these devices so that some device actually
3124 * attaches. We need to make sure that when we implement (a) we don't
3125 * power the device down on a reattach.
3127 cls = pci_get_class(dev);
3130 switch (pci_do_power_nodriver)
3132 case 0: /* NO powerdown at all */
3134 case 1: /* Conservative about what to power down */
3135 if (cls == PCIC_STORAGE)
3138 case 2: /* Agressive about what to power down */
3139 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
3140 cls == PCIC_BASEPERIPH)
3143 case 3: /* Power down everything */
3147 * PCI spec says we can only go into D3 state from D0 state.
3148 * Transition from D[12] into D0 before going to D3 state.
3150 ps = pci_get_powerstate(dev);
3151 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
3152 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3153 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
3154 pci_set_powerstate(dev, PCI_POWERSTATE_D3);