2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/ehcireg.h>
66 #include <dev/usb/controller/ohcireg.h>
67 #include <dev/usb/controller/uhcireg.h>
73 #include <contrib/dev/acpica/include/acpi.h>
76 #define ACPI_PWR_FOR_SLEEP(x, y, z)
79 static pci_addr_t pci_mapbase(uint64_t mapreg);
80 static const char *pci_maptype(uint64_t mapreg);
81 static int pci_mapsize(uint64_t testval);
82 static int pci_maprange(uint64_t mapreg);
83 static void pci_fixancient(pcicfgregs *cfg);
84 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
86 static int pci_porten(device_t dev);
87 static int pci_memen(device_t dev);
88 static void pci_assign_interrupt(device_t bus, device_t dev,
90 static int pci_add_map(device_t bus, device_t dev, int reg,
91 struct resource_list *rl, int force, int prefetch);
92 static int pci_probe(device_t dev);
93 static int pci_attach(device_t dev);
94 static void pci_load_vendor_data(void);
95 static int pci_describe_parse_line(char **ptr, int *vendor,
96 int *device, char **desc);
97 static char *pci_describe_device(device_t dev);
98 static int pci_modevent(module_t mod, int what, void *arg);
99 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
101 static void pci_read_extcap(device_t pcib, pcicfgregs *cfg);
102 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
103 int reg, uint32_t *data);
105 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
106 int reg, uint32_t data);
108 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
109 static void pci_disable_msi(device_t dev);
110 static void pci_enable_msi(device_t dev, uint64_t address,
112 static void pci_enable_msix(device_t dev, u_int index,
113 uint64_t address, uint32_t data);
114 static void pci_mask_msix(device_t dev, u_int index);
115 static void pci_unmask_msix(device_t dev, u_int index);
116 static int pci_msi_blacklisted(void);
117 static void pci_resume_msi(device_t dev);
118 static void pci_resume_msix(device_t dev);
120 static device_method_t pci_methods[] = {
121 /* Device interface */
122 DEVMETHOD(device_probe, pci_probe),
123 DEVMETHOD(device_attach, pci_attach),
124 DEVMETHOD(device_detach, bus_generic_detach),
125 DEVMETHOD(device_shutdown, bus_generic_shutdown),
126 DEVMETHOD(device_suspend, pci_suspend),
127 DEVMETHOD(device_resume, pci_resume),
130 DEVMETHOD(bus_print_child, pci_print_child),
131 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
132 DEVMETHOD(bus_read_ivar, pci_read_ivar),
133 DEVMETHOD(bus_write_ivar, pci_write_ivar),
134 DEVMETHOD(bus_driver_added, pci_driver_added),
135 DEVMETHOD(bus_setup_intr, pci_setup_intr),
136 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
138 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
139 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
140 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
141 DEVMETHOD(bus_delete_resource, pci_delete_resource),
142 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
143 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
144 DEVMETHOD(bus_activate_resource, pci_activate_resource),
145 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
146 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
147 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
150 DEVMETHOD(pci_read_config, pci_read_config_method),
151 DEVMETHOD(pci_write_config, pci_write_config_method),
152 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
153 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
154 DEVMETHOD(pci_enable_io, pci_enable_io_method),
155 DEVMETHOD(pci_disable_io, pci_disable_io_method),
156 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
157 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
158 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
159 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
160 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
161 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
162 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
163 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
164 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
165 DEVMETHOD(pci_release_msi, pci_release_msi_method),
166 DEVMETHOD(pci_msi_count, pci_msi_count_method),
167 DEVMETHOD(pci_msix_count, pci_msix_count_method),
172 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
174 static devclass_t pci_devclass;
175 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
176 MODULE_VERSION(pci, 1);
178 static char *pci_vendordata;
179 static size_t pci_vendordata_size;
183 uint32_t devid; /* Vendor/device of the card */
185 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
186 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
191 struct pci_quirk pci_quirks[] = {
192 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
193 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
194 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
195 /* As does the Serverworks OSB4 (the SMBus mapping register) */
196 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
199 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
200 * or the CMIC-SL (AKA ServerWorks GC_LE).
202 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
203 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
206 * MSI doesn't work on earlier Intel chipsets including
207 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
209 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
210 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
211 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
212 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
213 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
214 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
215 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
218 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
221 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
226 /* map register information */
227 #define PCI_MAPMEM 0x01 /* memory map */
228 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
229 #define PCI_MAPPORT 0x04 /* port map */
231 struct devlist pci_devq;
232 uint32_t pci_generation;
233 uint32_t pci_numdevs = 0;
234 static int pcie_chipset, pcix_chipset;
237 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
239 static int pci_enable_io_modes = 1;
240 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
241 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
242 &pci_enable_io_modes, 1,
243 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
244 enable these bits correctly. We'd like to do this all the time, but there\n\
245 are some peripherals that this causes problems with.");
247 static int pci_do_power_nodriver = 0;
248 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
249 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
250 &pci_do_power_nodriver, 0,
251 "Place a function into D3 state when no driver attaches to it. 0 means\n\
252 disable. 1 means conservatively place devices into D3 state. 2 means\n\
253 agressively place devices into D3 state. 3 means put absolutely everything\n\
256 static int pci_do_power_resume = 1;
257 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
258 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
259 &pci_do_power_resume, 1,
260 "Transition from D3 -> D0 on resume.");
262 static int pci_do_msi = 1;
263 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
264 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
265 "Enable support for MSI interrupts");
267 static int pci_do_msix = 1;
268 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
269 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
270 "Enable support for MSI-X interrupts");
272 static int pci_honor_msi_blacklist = 1;
273 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
274 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
275 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
277 static int pci_usb_takeover = 1;
278 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
279 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RD | CTLFLAG_TUN,
280 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
281 Disable this if you depend on BIOS emulation of USB devices, that is\n\
282 you use USB devices (like keyboard or mouse) but do not load USB drivers");
284 /* Find a device_t by bus/slot/function in domain 0 */
287 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
290 return (pci_find_dbsf(0, bus, slot, func));
293 /* Find a device_t by domain/bus/slot/function */
296 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
298 struct pci_devinfo *dinfo;
300 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
301 if ((dinfo->cfg.domain == domain) &&
302 (dinfo->cfg.bus == bus) &&
303 (dinfo->cfg.slot == slot) &&
304 (dinfo->cfg.func == func)) {
305 return (dinfo->cfg.dev);
312 /* Find a device_t by vendor/device ID */
315 pci_find_device(uint16_t vendor, uint16_t device)
317 struct pci_devinfo *dinfo;
319 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
320 if ((dinfo->cfg.vendor == vendor) &&
321 (dinfo->cfg.device == device)) {
322 return (dinfo->cfg.dev);
330 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
335 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
338 retval += vprintf(fmt, ap);
343 /* return base address of memory or port map */
346 pci_mapbase(uint64_t mapreg)
349 if (PCI_BAR_MEM(mapreg))
350 return (mapreg & PCIM_BAR_MEM_BASE);
352 return (mapreg & PCIM_BAR_IO_BASE);
355 /* return map type of memory or port map */
358 pci_maptype(uint64_t mapreg)
361 if (PCI_BAR_IO(mapreg))
363 if (mapreg & PCIM_BAR_MEM_PREFETCH)
364 return ("Prefetchable Memory");
368 /* return log2 of map size decoded for memory or port map */
371 pci_mapsize(uint64_t testval)
375 testval = pci_mapbase(testval);
378 while ((testval & 1) == 0)
387 /* return log2 of address range supported by map register */
390 pci_maprange(uint64_t mapreg)
394 if (PCI_BAR_IO(mapreg))
397 switch (mapreg & PCIM_BAR_MEM_TYPE) {
398 case PCIM_BAR_MEM_32:
401 case PCIM_BAR_MEM_1MB:
404 case PCIM_BAR_MEM_64:
411 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
414 pci_fixancient(pcicfgregs *cfg)
416 if (cfg->hdrtype != 0)
419 /* PCI to PCI bridges use header type 1 */
420 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
424 /* extract header type specific config data */
427 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
429 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
430 switch (cfg->hdrtype) {
432 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
433 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
434 cfg->nummaps = PCI_MAXMAPS_0;
437 cfg->nummaps = PCI_MAXMAPS_1;
440 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
441 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
442 cfg->nummaps = PCI_MAXMAPS_2;
448 /* read configuration header into pcicfgregs structure */
450 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
452 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
453 pcicfgregs *cfg = NULL;
454 struct pci_devinfo *devlist_entry;
455 struct devlist *devlist_head;
457 devlist_head = &pci_devq;
459 devlist_entry = NULL;
461 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
462 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
463 if (devlist_entry == NULL)
466 cfg = &devlist_entry->cfg;
472 cfg->vendor = REG(PCIR_VENDOR, 2);
473 cfg->device = REG(PCIR_DEVICE, 2);
474 cfg->cmdreg = REG(PCIR_COMMAND, 2);
475 cfg->statreg = REG(PCIR_STATUS, 2);
476 cfg->baseclass = REG(PCIR_CLASS, 1);
477 cfg->subclass = REG(PCIR_SUBCLASS, 1);
478 cfg->progif = REG(PCIR_PROGIF, 1);
479 cfg->revid = REG(PCIR_REVID, 1);
480 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
481 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
482 cfg->lattimer = REG(PCIR_LATTIMER, 1);
483 cfg->intpin = REG(PCIR_INTPIN, 1);
484 cfg->intline = REG(PCIR_INTLINE, 1);
486 cfg->mingnt = REG(PCIR_MINGNT, 1);
487 cfg->maxlat = REG(PCIR_MAXLAT, 1);
489 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
490 cfg->hdrtype &= ~PCIM_MFDEV;
493 pci_hdrtypedata(pcib, b, s, f, cfg);
495 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
496 pci_read_extcap(pcib, cfg);
498 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
500 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
501 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
502 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
503 devlist_entry->conf.pc_sel.pc_func = cfg->func;
504 devlist_entry->conf.pc_hdr = cfg->hdrtype;
506 devlist_entry->conf.pc_subvendor = cfg->subvendor;
507 devlist_entry->conf.pc_subdevice = cfg->subdevice;
508 devlist_entry->conf.pc_vendor = cfg->vendor;
509 devlist_entry->conf.pc_device = cfg->device;
511 devlist_entry->conf.pc_class = cfg->baseclass;
512 devlist_entry->conf.pc_subclass = cfg->subclass;
513 devlist_entry->conf.pc_progif = cfg->progif;
514 devlist_entry->conf.pc_revid = cfg->revid;
519 return (devlist_entry);
524 pci_read_extcap(device_t pcib, pcicfgregs *cfg)
526 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
527 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
528 #if defined(__i386__) || defined(__amd64__)
532 int ptr, nextptr, ptrptr;
534 switch (cfg->hdrtype & PCIM_HDRTYPE) {
537 ptrptr = PCIR_CAP_PTR;
540 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
543 return; /* no extended capabilities support */
545 nextptr = REG(ptrptr, 1); /* sanity check? */
548 * Read capability entries.
550 while (nextptr != 0) {
553 printf("illegal PCI extended capability offset %d\n",
557 /* Find the next entry */
559 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
561 /* Process this entry */
562 switch (REG(ptr + PCICAP_ID, 1)) {
563 case PCIY_PMG: /* PCI power management */
564 if (cfg->pp.pp_cap == 0) {
565 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
566 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
567 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
568 if ((nextptr - ptr) > PCIR_POWER_DATA)
569 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
572 #if defined(__i386__) || defined(__amd64__)
573 case PCIY_HT: /* HyperTransport */
574 /* Determine HT-specific capability type. */
575 val = REG(ptr + PCIR_HT_COMMAND, 2);
576 switch (val & PCIM_HTCMD_CAP_MASK) {
577 case PCIM_HTCAP_MSI_MAPPING:
578 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
579 /* Sanity check the mapping window. */
580 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
583 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
585 if (addr != MSI_INTEL_ADDR_BASE)
587 "HT Bridge at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
588 cfg->domain, cfg->bus,
589 cfg->slot, cfg->func,
592 addr = MSI_INTEL_ADDR_BASE;
594 cfg->ht.ht_msimap = ptr;
595 cfg->ht.ht_msictrl = val;
596 cfg->ht.ht_msiaddr = addr;
601 case PCIY_MSI: /* PCI MSI */
602 cfg->msi.msi_location = ptr;
603 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
604 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
605 PCIM_MSICTRL_MMC_MASK)>>1);
607 case PCIY_MSIX: /* PCI MSI-X */
608 cfg->msix.msix_location = ptr;
609 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
610 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
611 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
612 val = REG(ptr + PCIR_MSIX_TABLE, 4);
613 cfg->msix.msix_table_bar = PCIR_BAR(val &
615 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
616 val = REG(ptr + PCIR_MSIX_PBA, 4);
617 cfg->msix.msix_pba_bar = PCIR_BAR(val &
619 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
621 case PCIY_VPD: /* PCI Vital Product Data */
622 cfg->vpd.vpd_reg = ptr;
625 /* Should always be true. */
626 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
627 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
628 cfg->subvendor = val & 0xffff;
629 cfg->subdevice = val >> 16;
632 case PCIY_PCIX: /* PCI-X */
634 * Assume we have a PCI-X chipset if we have
635 * at least one PCI-PCI bridge with a PCI-X
636 * capability. Note that some systems with
637 * PCI-express or HT chipsets might match on
638 * this check as well.
640 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
643 case PCIY_EXPRESS: /* PCI-express */
645 * Assume we have a PCI-express chipset if we have
646 * at least one PCI-express device.
654 /* REG and WREG use carry through to next functions */
658 * PCI Vital Product Data
661 #define PCI_VPD_TIMEOUT 1000000
664 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
666 int count = PCI_VPD_TIMEOUT;
668 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
670 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
672 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
675 DELAY(1); /* limit looping */
677 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
684 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
686 int count = PCI_VPD_TIMEOUT;
688 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
690 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
691 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
692 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
695 DELAY(1); /* limit looping */
702 #undef PCI_VPD_TIMEOUT
704 struct vpd_readstate {
714 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
719 if (vrs->bytesinval == 0) {
720 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
722 vrs->val = le32toh(reg);
724 byte = vrs->val & 0xff;
727 vrs->val = vrs->val >> 8;
728 byte = vrs->val & 0xff;
738 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
740 struct vpd_readstate vrs;
745 int alloc, off; /* alloc/off for RO/W arrays */
751 /* init vpd reader */
759 name = remain = i = 0; /* shut up stupid gcc */
760 alloc = off = 0; /* shut up stupid gcc */
761 dflen = 0; /* shut up stupid gcc */
764 if (vpd_nextbyte(&vrs, &byte)) {
769 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
770 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
771 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
774 case 0: /* item name */
776 if (vpd_nextbyte(&vrs, &byte2)) {
781 if (vpd_nextbyte(&vrs, &byte2)) {
785 remain |= byte2 << 8;
786 if (remain > (0x7f*4 - vrs.off)) {
789 "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n",
790 cfg->domain, cfg->bus, cfg->slot,
796 name = (byte >> 3) & 0xf;
799 case 0x2: /* String */
800 cfg->vpd.vpd_ident = malloc(remain + 1,
808 case 0x10: /* VPD-R */
811 cfg->vpd.vpd_ros = malloc(alloc *
812 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
816 case 0x11: /* VPD-W */
819 cfg->vpd.vpd_w = malloc(alloc *
820 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
824 default: /* Invalid data, abort */
830 case 1: /* Identifier String */
831 cfg->vpd.vpd_ident[i++] = byte;
834 cfg->vpd.vpd_ident[i] = '\0';
839 case 2: /* VPD-R Keyword Header */
841 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
842 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
843 M_DEVBUF, M_WAITOK | M_ZERO);
845 cfg->vpd.vpd_ros[off].keyword[0] = byte;
846 if (vpd_nextbyte(&vrs, &byte2)) {
850 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
851 if (vpd_nextbyte(&vrs, &byte2)) {
857 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
860 * if this happens, we can't trust the rest
864 "pci%d:%d:%d:%d: bad keyword length: %d\n",
865 cfg->domain, cfg->bus, cfg->slot,
870 } else if (dflen == 0) {
871 cfg->vpd.vpd_ros[off].value = malloc(1 *
872 sizeof(*cfg->vpd.vpd_ros[off].value),
874 cfg->vpd.vpd_ros[off].value[0] = '\x00';
876 cfg->vpd.vpd_ros[off].value = malloc(
878 sizeof(*cfg->vpd.vpd_ros[off].value),
882 /* keep in sync w/ state 3's transistions */
883 if (dflen == 0 && remain == 0)
891 case 3: /* VPD-R Keyword Value */
892 cfg->vpd.vpd_ros[off].value[i++] = byte;
893 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
894 "RV", 2) == 0 && cksumvalid == -1) {
900 "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n",
901 cfg->domain, cfg->bus,
902 cfg->slot, cfg->func,
911 /* keep in sync w/ state 2's transistions */
913 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
914 if (dflen == 0 && remain == 0) {
915 cfg->vpd.vpd_rocnt = off;
916 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
917 off * sizeof(*cfg->vpd.vpd_ros),
918 M_DEVBUF, M_WAITOK | M_ZERO);
920 } else if (dflen == 0)
930 case 5: /* VPD-W Keyword Header */
932 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
933 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
934 M_DEVBUF, M_WAITOK | M_ZERO);
936 cfg->vpd.vpd_w[off].keyword[0] = byte;
937 if (vpd_nextbyte(&vrs, &byte2)) {
941 cfg->vpd.vpd_w[off].keyword[1] = byte2;
942 if (vpd_nextbyte(&vrs, &byte2)) {
946 cfg->vpd.vpd_w[off].len = dflen = byte2;
947 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
948 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
949 sizeof(*cfg->vpd.vpd_w[off].value),
953 /* keep in sync w/ state 6's transistions */
954 if (dflen == 0 && remain == 0)
962 case 6: /* VPD-W Keyword Value */
963 cfg->vpd.vpd_w[off].value[i++] = byte;
966 /* keep in sync w/ state 5's transistions */
968 cfg->vpd.vpd_w[off++].value[i++] = '\0';
969 if (dflen == 0 && remain == 0) {
970 cfg->vpd.vpd_wcnt = off;
971 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
972 off * sizeof(*cfg->vpd.vpd_w),
973 M_DEVBUF, M_WAITOK | M_ZERO);
975 } else if (dflen == 0)
980 printf("pci%d:%d:%d:%d: invalid state: %d\n",
981 cfg->domain, cfg->bus, cfg->slot, cfg->func,
988 if (cksumvalid == 0 || state < -1) {
989 /* read-only data bad, clean up */
990 if (cfg->vpd.vpd_ros != NULL) {
991 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
992 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
993 free(cfg->vpd.vpd_ros, M_DEVBUF);
994 cfg->vpd.vpd_ros = NULL;
998 /* I/O error, clean up */
999 printf("pci%d:%d:%d:%d: failed to read VPD data.\n",
1000 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1001 if (cfg->vpd.vpd_ident != NULL) {
1002 free(cfg->vpd.vpd_ident, M_DEVBUF);
1003 cfg->vpd.vpd_ident = NULL;
1005 if (cfg->vpd.vpd_w != NULL) {
1006 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1007 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1008 free(cfg->vpd.vpd_w, M_DEVBUF);
1009 cfg->vpd.vpd_w = NULL;
1012 cfg->vpd.vpd_cached = 1;
1018 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1020 struct pci_devinfo *dinfo = device_get_ivars(child);
1021 pcicfgregs *cfg = &dinfo->cfg;
1023 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1024 pci_read_vpd(device_get_parent(dev), cfg);
1026 *identptr = cfg->vpd.vpd_ident;
1028 if (*identptr == NULL)
1035 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1038 struct pci_devinfo *dinfo = device_get_ivars(child);
1039 pcicfgregs *cfg = &dinfo->cfg;
1042 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1043 pci_read_vpd(device_get_parent(dev), cfg);
1045 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1046 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1047 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1048 *vptr = cfg->vpd.vpd_ros[i].value;
1051 if (i != cfg->vpd.vpd_rocnt)
1059 * Find the requested extended capability and return the offset in
1060 * configuration space via the pointer provided. The function returns
1061 * 0 on success and error code otherwise.
1064 pci_find_extcap_method(device_t dev, device_t child, int capability,
1067 struct pci_devinfo *dinfo = device_get_ivars(child);
1068 pcicfgregs *cfg = &dinfo->cfg;
1073 * Check the CAP_LIST bit of the PCI status register first.
1075 status = pci_read_config(child, PCIR_STATUS, 2);
1076 if (!(status & PCIM_STATUS_CAPPRESENT))
1080 * Determine the start pointer of the capabilities list.
1082 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1088 ptr = PCIR_CAP_PTR_2;
1092 return (ENXIO); /* no extended capabilities support */
1094 ptr = pci_read_config(child, ptr, 1);
1097 * Traverse the capabilities list.
1100 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1105 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1112 * Support for MSI-X message interrupts.
1115 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1117 struct pci_devinfo *dinfo = device_get_ivars(dev);
1118 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1121 KASSERT(msix->msix_table_len > index, ("bogus index"));
1122 offset = msix->msix_table_offset + index * 16;
1123 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1124 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1125 bus_write_4(msix->msix_table_res, offset + 8, data);
1127 /* Enable MSI -> HT mapping. */
1128 pci_ht_map_msi(dev, address);
1132 pci_mask_msix(device_t dev, u_int index)
1134 struct pci_devinfo *dinfo = device_get_ivars(dev);
1135 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1136 uint32_t offset, val;
1138 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1139 offset = msix->msix_table_offset + index * 16 + 12;
1140 val = bus_read_4(msix->msix_table_res, offset);
1141 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1142 val |= PCIM_MSIX_VCTRL_MASK;
1143 bus_write_4(msix->msix_table_res, offset, val);
1148 pci_unmask_msix(device_t dev, u_int index)
1150 struct pci_devinfo *dinfo = device_get_ivars(dev);
1151 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1152 uint32_t offset, val;
1154 KASSERT(msix->msix_table_len > index, ("bogus index"));
1155 offset = msix->msix_table_offset + index * 16 + 12;
1156 val = bus_read_4(msix->msix_table_res, offset);
1157 if (val & PCIM_MSIX_VCTRL_MASK) {
1158 val &= ~PCIM_MSIX_VCTRL_MASK;
1159 bus_write_4(msix->msix_table_res, offset, val);
1164 pci_pending_msix(device_t dev, u_int index)
1166 struct pci_devinfo *dinfo = device_get_ivars(dev);
1167 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1168 uint32_t offset, bit;
1170 KASSERT(msix->msix_table_len > index, ("bogus index"));
1171 offset = msix->msix_pba_offset + (index / 32) * 4;
1172 bit = 1 << index % 32;
1173 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1177 * Restore MSI-X registers and table during resume. If MSI-X is
1178 * enabled then walk the virtual table to restore the actual MSI-X
1182 pci_resume_msix(device_t dev)
1184 struct pci_devinfo *dinfo = device_get_ivars(dev);
1185 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1186 struct msix_table_entry *mte;
1187 struct msix_vector *mv;
1190 if (msix->msix_alloc > 0) {
1191 /* First, mask all vectors. */
1192 for (i = 0; i < msix->msix_msgnum; i++)
1193 pci_mask_msix(dev, i);
1195 /* Second, program any messages with at least one handler. */
1196 for (i = 0; i < msix->msix_table_len; i++) {
1197 mte = &msix->msix_table[i];
1198 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1200 mv = &msix->msix_vectors[mte->mte_vector - 1];
1201 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1202 pci_unmask_msix(dev, i);
1205 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1206 msix->msix_ctrl, 2);
1210 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1211 * returned in *count. After this function returns, each message will be
1212 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1215 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1217 struct pci_devinfo *dinfo = device_get_ivars(child);
1218 pcicfgregs *cfg = &dinfo->cfg;
1219 struct resource_list_entry *rle;
1220 int actual, error, i, irq, max;
1222 /* Don't let count == 0 get us into trouble. */
1226 /* If rid 0 is allocated, then fail. */
1227 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1228 if (rle != NULL && rle->res != NULL)
1231 /* Already have allocated messages? */
1232 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1235 /* If MSI is blacklisted for this system, fail. */
1236 if (pci_msi_blacklisted())
1239 /* MSI-X capability present? */
1240 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1243 /* Make sure the appropriate BARs are mapped. */
1244 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1245 cfg->msix.msix_table_bar);
1246 if (rle == NULL || rle->res == NULL ||
1247 !(rman_get_flags(rle->res) & RF_ACTIVE))
1249 cfg->msix.msix_table_res = rle->res;
1250 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1251 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1252 cfg->msix.msix_pba_bar);
1253 if (rle == NULL || rle->res == NULL ||
1254 !(rman_get_flags(rle->res) & RF_ACTIVE))
1257 cfg->msix.msix_pba_res = rle->res;
1260 device_printf(child,
1261 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1262 *count, cfg->msix.msix_msgnum);
1263 max = min(*count, cfg->msix.msix_msgnum);
1264 for (i = 0; i < max; i++) {
1265 /* Allocate a message. */
1266 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1269 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1275 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1277 device_printf(child, "using IRQ %lu for MSI-X\n",
1283 * Be fancy and try to print contiguous runs of
1284 * IRQ values as ranges. 'irq' is the previous IRQ.
1285 * 'run' is true if we are in a range.
1287 device_printf(child, "using IRQs %lu", rle->start);
1290 for (i = 1; i < actual; i++) {
1291 rle = resource_list_find(&dinfo->resources,
1292 SYS_RES_IRQ, i + 1);
1294 /* Still in a run? */
1295 if (rle->start == irq + 1) {
1301 /* Finish previous range. */
1307 /* Start new range. */
1308 printf(",%lu", rle->start);
1312 /* Unfinished range? */
1315 printf(" for MSI-X\n");
1319 /* Mask all vectors. */
1320 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1321 pci_mask_msix(child, i);
1323 /* Allocate and initialize vector data and virtual table. */
1324 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1325 M_DEVBUF, M_WAITOK | M_ZERO);
1326 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1327 M_DEVBUF, M_WAITOK | M_ZERO);
1328 for (i = 0; i < actual; i++) {
1329 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1330 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1331 cfg->msix.msix_table[i].mte_vector = i + 1;
1334 /* Update control register to enable MSI-X. */
1335 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1336 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1337 cfg->msix.msix_ctrl, 2);
1339 /* Update counts of alloc'd messages. */
1340 cfg->msix.msix_alloc = actual;
1341 cfg->msix.msix_table_len = actual;
1347 * By default, pci_alloc_msix() will assign the allocated IRQ
1348 * resources consecutively to the first N messages in the MSI-X table.
1349 * However, device drivers may want to use different layouts if they
1350 * either receive fewer messages than they asked for, or they wish to
1351 * populate the MSI-X table sparsely. This method allows the driver
1352 * to specify what layout it wants. It must be called after a
1353 * successful pci_alloc_msix() but before any of the associated
1354 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1356 * The 'vectors' array contains 'count' message vectors. The array
1357 * maps directly to the MSI-X table in that index 0 in the array
1358 * specifies the vector for the first message in the MSI-X table, etc.
1359 * The vector value in each array index can either be 0 to indicate
1360 * that no vector should be assigned to a message slot, or it can be a
1361 * number from 1 to N (where N is the count returned from a
1362 * succcessful call to pci_alloc_msix()) to indicate which message
1363 * vector (IRQ) to be used for the corresponding message.
1365 * On successful return, each message with a non-zero vector will have
1366 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1367 * 1. Additionally, if any of the IRQs allocated via the previous
1368 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1369 * will be freed back to the system automatically.
1371 * For example, suppose a driver has a MSI-X table with 6 messages and
1372 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1373 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1374 * C. After the call to pci_alloc_msix(), the device will be setup to
1375 * have an MSI-X table of ABC--- (where - means no vector assigned).
1376 * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 },
1377 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1378 * be freed back to the system. This device will also have valid
1379 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1381 * In any case, the SYS_RES_IRQ rid X will always map to the message
1382 * at MSI-X table index X - 1 and will only be valid if a vector is
1383 * assigned to that table entry.
1386 pci_remap_msix_method(device_t dev, device_t child, int count,
1387 const u_int *vectors)
1389 struct pci_devinfo *dinfo = device_get_ivars(child);
1390 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1391 struct resource_list_entry *rle;
1392 int i, irq, j, *used;
1395 * Have to have at least one message in the table but the
1396 * table can't be bigger than the actual MSI-X table in the
1399 if (count == 0 || count > msix->msix_msgnum)
1402 /* Sanity check the vectors. */
1403 for (i = 0; i < count; i++)
1404 if (vectors[i] > msix->msix_alloc)
1408 * Make sure there aren't any holes in the vectors to be used.
1409 * It's a big pain to support it, and it doesn't really make
1410 * sense anyway. Also, at least one vector must be used.
1412 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1414 for (i = 0; i < count; i++)
1415 if (vectors[i] != 0)
1416 used[vectors[i] - 1] = 1;
1417 for (i = 0; i < msix->msix_alloc - 1; i++)
1418 if (used[i] == 0 && used[i + 1] == 1) {
1419 free(used, M_DEVBUF);
1423 free(used, M_DEVBUF);
1427 /* Make sure none of the resources are allocated. */
1428 for (i = 0; i < msix->msix_table_len; i++) {
1429 if (msix->msix_table[i].mte_vector == 0)
1431 if (msix->msix_table[i].mte_handlers > 0)
1433 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1434 KASSERT(rle != NULL, ("missing resource"));
1435 if (rle->res != NULL)
1439 /* Free the existing resource list entries. */
1440 for (i = 0; i < msix->msix_table_len; i++) {
1441 if (msix->msix_table[i].mte_vector == 0)
1443 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1447 * Build the new virtual table keeping track of which vectors are
1450 free(msix->msix_table, M_DEVBUF);
1451 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1452 M_DEVBUF, M_WAITOK | M_ZERO);
1453 for (i = 0; i < count; i++)
1454 msix->msix_table[i].mte_vector = vectors[i];
1455 msix->msix_table_len = count;
1457 /* Free any unused IRQs and resize the vectors array if necessary. */
1458 j = msix->msix_alloc - 1;
1460 struct msix_vector *vec;
1462 while (used[j] == 0) {
1463 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1464 msix->msix_vectors[j].mv_irq);
1467 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1469 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1471 free(msix->msix_vectors, M_DEVBUF);
1472 msix->msix_vectors = vec;
1473 msix->msix_alloc = j + 1;
1475 free(used, M_DEVBUF);
1477 /* Map the IRQs onto the rids. */
1478 for (i = 0; i < count; i++) {
1479 if (vectors[i] == 0)
1481 irq = msix->msix_vectors[vectors[i]].mv_irq;
1482 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1487 device_printf(child, "Remapped MSI-X IRQs as: ");
1488 for (i = 0; i < count; i++) {
1491 if (vectors[i] == 0)
1495 msix->msix_vectors[vectors[i]].mv_irq);
1504 pci_release_msix(device_t dev, device_t child)
1506 struct pci_devinfo *dinfo = device_get_ivars(child);
1507 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1508 struct resource_list_entry *rle;
1511 /* Do we have any messages to release? */
1512 if (msix->msix_alloc == 0)
1515 /* Make sure none of the resources are allocated. */
1516 for (i = 0; i < msix->msix_table_len; i++) {
1517 if (msix->msix_table[i].mte_vector == 0)
1519 if (msix->msix_table[i].mte_handlers > 0)
1521 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1522 KASSERT(rle != NULL, ("missing resource"));
1523 if (rle->res != NULL)
1527 /* Update control register to disable MSI-X. */
1528 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1529 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1530 msix->msix_ctrl, 2);
1532 /* Free the resource list entries. */
1533 for (i = 0; i < msix->msix_table_len; i++) {
1534 if (msix->msix_table[i].mte_vector == 0)
1536 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1538 free(msix->msix_table, M_DEVBUF);
1539 msix->msix_table_len = 0;
1541 /* Release the IRQs. */
1542 for (i = 0; i < msix->msix_alloc; i++)
1543 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1544 msix->msix_vectors[i].mv_irq);
1545 free(msix->msix_vectors, M_DEVBUF);
1546 msix->msix_alloc = 0;
1551 * Return the max supported MSI-X messages this device supports.
1552 * Basically, assuming the MD code can alloc messages, this function
1553 * should return the maximum value that pci_alloc_msix() can return.
1554 * Thus, it is subject to the tunables, etc.
1557 pci_msix_count_method(device_t dev, device_t child)
1559 struct pci_devinfo *dinfo = device_get_ivars(child);
1560 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1562 if (pci_do_msix && msix->msix_location != 0)
1563 return (msix->msix_msgnum);
1568 * HyperTransport MSI mapping control
1571 pci_ht_map_msi(device_t dev, uint64_t addr)
1573 struct pci_devinfo *dinfo = device_get_ivars(dev);
1574 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1579 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1580 ht->ht_msiaddr >> 20 == addr >> 20) {
1581 /* Enable MSI -> HT mapping. */
1582 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1583 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1587 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1588 /* Disable MSI -> HT mapping. */
1589 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1590 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1596 * Support for MSI message signalled interrupts.
1599 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1601 struct pci_devinfo *dinfo = device_get_ivars(dev);
1602 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1604 /* Write data and address values. */
1605 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1606 address & 0xffffffff, 4);
1607 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1608 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1610 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1613 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1616 /* Enable MSI in the control register. */
1617 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1618 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1621 /* Enable MSI -> HT mapping. */
1622 pci_ht_map_msi(dev, address);
1626 pci_disable_msi(device_t dev)
1628 struct pci_devinfo *dinfo = device_get_ivars(dev);
1629 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1631 /* Disable MSI -> HT mapping. */
1632 pci_ht_map_msi(dev, 0);
1634 /* Disable MSI in the control register. */
1635 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1636 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1641 * Restore MSI registers during resume. If MSI is enabled then
1642 * restore the data and address registers in addition to the control
1646 pci_resume_msi(device_t dev)
1648 struct pci_devinfo *dinfo = device_get_ivars(dev);
1649 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1653 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1654 address = msi->msi_addr;
1655 data = msi->msi_data;
1656 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1657 address & 0xffffffff, 4);
1658 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1659 pci_write_config(dev, msi->msi_location +
1660 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1661 pci_write_config(dev, msi->msi_location +
1662 PCIR_MSI_DATA_64BIT, data, 2);
1664 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1667 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1672 pci_remap_msi_irq(device_t dev, u_int irq)
1674 struct pci_devinfo *dinfo = device_get_ivars(dev);
1675 pcicfgregs *cfg = &dinfo->cfg;
1676 struct resource_list_entry *rle;
1677 struct msix_table_entry *mte;
1678 struct msix_vector *mv;
1684 bus = device_get_parent(dev);
1687 * Handle MSI first. We try to find this IRQ among our list
1688 * of MSI IRQs. If we find it, we request updated address and
1689 * data registers and apply the results.
1691 if (cfg->msi.msi_alloc > 0) {
1693 /* If we don't have any active handlers, nothing to do. */
1694 if (cfg->msi.msi_handlers == 0)
1696 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1697 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1699 if (rle->start == irq) {
1700 error = PCIB_MAP_MSI(device_get_parent(bus),
1701 dev, irq, &addr, &data);
1704 pci_disable_msi(dev);
1705 dinfo->cfg.msi.msi_addr = addr;
1706 dinfo->cfg.msi.msi_data = data;
1707 pci_enable_msi(dev, addr, data);
1715 * For MSI-X, we check to see if we have this IRQ. If we do,
1716 * we request the updated mapping info. If that works, we go
1717 * through all the slots that use this IRQ and update them.
1719 if (cfg->msix.msix_alloc > 0) {
1720 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1721 mv = &cfg->msix.msix_vectors[i];
1722 if (mv->mv_irq == irq) {
1723 error = PCIB_MAP_MSI(device_get_parent(bus),
1724 dev, irq, &addr, &data);
1727 mv->mv_address = addr;
1729 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1730 mte = &cfg->msix.msix_table[j];
1731 if (mte->mte_vector != i + 1)
1733 if (mte->mte_handlers == 0)
1735 pci_mask_msix(dev, j);
1736 pci_enable_msix(dev, j, addr, data);
1737 pci_unmask_msix(dev, j);
1748 * Returns true if the specified device is blacklisted because MSI
1752 pci_msi_device_blacklisted(device_t dev)
1754 struct pci_quirk *q;
1756 if (!pci_honor_msi_blacklist)
1759 for (q = &pci_quirks[0]; q->devid; q++) {
1760 if (q->devid == pci_get_devid(dev) &&
1761 q->type == PCI_QUIRK_DISABLE_MSI)
1768 * Determine if MSI is blacklisted globally on this sytem. Currently,
1769 * we just check for blacklisted chipsets as represented by the
1770 * host-PCI bridge at device 0:0:0. In the future, it may become
1771 * necessary to check other system attributes, such as the kenv values
1772 * that give the motherboard manufacturer and model number.
1775 pci_msi_blacklisted(void)
1779 if (!pci_honor_msi_blacklist)
1782 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1783 if (!(pcie_chipset || pcix_chipset))
1786 dev = pci_find_bsf(0, 0, 0);
1788 return (pci_msi_device_blacklisted(dev));
1793 * Attempt to allocate *count MSI messages. The actual number allocated is
1794 * returned in *count. After this function returns, each message will be
1795 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1798 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1800 struct pci_devinfo *dinfo = device_get_ivars(child);
1801 pcicfgregs *cfg = &dinfo->cfg;
1802 struct resource_list_entry *rle;
1803 int actual, error, i, irqs[32];
1806 /* Don't let count == 0 get us into trouble. */
1810 /* If rid 0 is allocated, then fail. */
1811 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1812 if (rle != NULL && rle->res != NULL)
1815 /* Already have allocated messages? */
1816 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1819 /* If MSI is blacklisted for this system, fail. */
1820 if (pci_msi_blacklisted())
1823 /* MSI capability present? */
1824 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1828 device_printf(child,
1829 "attempting to allocate %d MSI vectors (%d supported)\n",
1830 *count, cfg->msi.msi_msgnum);
1832 /* Don't ask for more than the device supports. */
1833 actual = min(*count, cfg->msi.msi_msgnum);
1835 /* Don't ask for more than 32 messages. */
1836 actual = min(actual, 32);
1838 /* MSI requires power of 2 number of messages. */
1839 if (!powerof2(actual))
1843 /* Try to allocate N messages. */
1844 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
1845 cfg->msi.msi_msgnum, irqs);
1856 * We now have N actual messages mapped onto SYS_RES_IRQ
1857 * resources in the irqs[] array, so add new resources
1858 * starting at rid 1.
1860 for (i = 0; i < actual; i++)
1861 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1862 irqs[i], irqs[i], 1);
1866 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
1871 * Be fancy and try to print contiguous runs
1872 * of IRQ values as ranges. 'run' is true if
1873 * we are in a range.
1875 device_printf(child, "using IRQs %d", irqs[0]);
1877 for (i = 1; i < actual; i++) {
1879 /* Still in a run? */
1880 if (irqs[i] == irqs[i - 1] + 1) {
1885 /* Finish previous range. */
1887 printf("-%d", irqs[i - 1]);
1891 /* Start new range. */
1892 printf(",%d", irqs[i]);
1895 /* Unfinished range? */
1897 printf("-%d", irqs[actual - 1]);
1898 printf(" for MSI\n");
1902 /* Update control register with actual count. */
1903 ctrl = cfg->msi.msi_ctrl;
1904 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1905 ctrl |= (ffs(actual) - 1) << 4;
1906 cfg->msi.msi_ctrl = ctrl;
1907 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1909 /* Update counts of alloc'd messages. */
1910 cfg->msi.msi_alloc = actual;
1911 cfg->msi.msi_handlers = 0;
1916 /* Release the MSI messages associated with this device. */
1918 pci_release_msi_method(device_t dev, device_t child)
1920 struct pci_devinfo *dinfo = device_get_ivars(child);
1921 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1922 struct resource_list_entry *rle;
1923 int error, i, irqs[32];
1925 /* Try MSI-X first. */
1926 error = pci_release_msix(dev, child);
1927 if (error != ENODEV)
1930 /* Do we have any messages to release? */
1931 if (msi->msi_alloc == 0)
1933 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
1935 /* Make sure none of the resources are allocated. */
1936 if (msi->msi_handlers > 0)
1938 for (i = 0; i < msi->msi_alloc; i++) {
1939 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1940 KASSERT(rle != NULL, ("missing MSI resource"));
1941 if (rle->res != NULL)
1943 irqs[i] = rle->start;
1946 /* Update control register with 0 count. */
1947 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
1948 ("%s: MSI still enabled", __func__));
1949 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
1950 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1953 /* Release the messages. */
1954 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
1955 for (i = 0; i < msi->msi_alloc; i++)
1956 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1958 /* Update alloc count. */
1966 * Return the max supported MSI messages this device supports.
1967 * Basically, assuming the MD code can alloc messages, this function
1968 * should return the maximum value that pci_alloc_msi() can return.
1969 * Thus, it is subject to the tunables, etc.
1972 pci_msi_count_method(device_t dev, device_t child)
1974 struct pci_devinfo *dinfo = device_get_ivars(child);
1975 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1977 if (pci_do_msi && msi->msi_location != 0)
1978 return (msi->msi_msgnum);
1982 /* free pcicfgregs structure and all depending data structures */
1985 pci_freecfg(struct pci_devinfo *dinfo)
1987 struct devlist *devlist_head;
1990 devlist_head = &pci_devq;
1992 if (dinfo->cfg.vpd.vpd_reg) {
1993 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
1994 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
1995 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
1996 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
1997 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
1998 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
1999 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2001 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2002 free(dinfo, M_DEVBUF);
2004 /* increment the generation count */
2007 /* we're losing one device */
2013 * PCI power manangement
2016 pci_set_powerstate_method(device_t dev, device_t child, int state)
2018 struct pci_devinfo *dinfo = device_get_ivars(child);
2019 pcicfgregs *cfg = &dinfo->cfg;
2021 int result, oldstate, highest, delay;
2023 if (cfg->pp.pp_cap == 0)
2024 return (EOPNOTSUPP);
2027 * Optimize a no state change request away. While it would be OK to
2028 * write to the hardware in theory, some devices have shown odd
2029 * behavior when going from D3 -> D3.
2031 oldstate = pci_get_powerstate(child);
2032 if (oldstate == state)
2036 * The PCI power management specification states that after a state
2037 * transition between PCI power states, system software must
2038 * guarantee a minimal delay before the function accesses the device.
2039 * Compute the worst case delay that we need to guarantee before we
2040 * access the device. Many devices will be responsive much more
2041 * quickly than this delay, but there are some that don't respond
2042 * instantly to state changes. Transitions to/from D3 state require
2043 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2044 * is done below with DELAY rather than a sleeper function because
2045 * this function can be called from contexts where we cannot sleep.
2047 highest = (oldstate > state) ? oldstate : state;
2048 if (highest == PCI_POWERSTATE_D3)
2050 else if (highest == PCI_POWERSTATE_D2)
2054 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2055 & ~PCIM_PSTAT_DMASK;
2058 case PCI_POWERSTATE_D0:
2059 status |= PCIM_PSTAT_D0;
2061 case PCI_POWERSTATE_D1:
2062 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2063 return (EOPNOTSUPP);
2064 status |= PCIM_PSTAT_D1;
2066 case PCI_POWERSTATE_D2:
2067 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2068 return (EOPNOTSUPP);
2069 status |= PCIM_PSTAT_D2;
2071 case PCI_POWERSTATE_D3:
2072 status |= PCIM_PSTAT_D3;
2079 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2082 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2089 pci_get_powerstate_method(device_t dev, device_t child)
2091 struct pci_devinfo *dinfo = device_get_ivars(child);
2092 pcicfgregs *cfg = &dinfo->cfg;
2096 if (cfg->pp.pp_cap != 0) {
2097 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2098 switch (status & PCIM_PSTAT_DMASK) {
2100 result = PCI_POWERSTATE_D0;
2103 result = PCI_POWERSTATE_D1;
2106 result = PCI_POWERSTATE_D2;
2109 result = PCI_POWERSTATE_D3;
2112 result = PCI_POWERSTATE_UNKNOWN;
2116 /* No support, device is always at D0 */
2117 result = PCI_POWERSTATE_D0;
2123 * Some convenience functions for PCI device drivers.
2126 static __inline void
2127 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2131 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2133 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2136 static __inline void
2137 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2141 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2143 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2147 pci_enable_busmaster_method(device_t dev, device_t child)
2149 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2154 pci_disable_busmaster_method(device_t dev, device_t child)
2156 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2161 pci_enable_io_method(device_t dev, device_t child, int space)
2166 case SYS_RES_IOPORT:
2167 bit = PCIM_CMD_PORTEN;
2169 case SYS_RES_MEMORY:
2170 bit = PCIM_CMD_MEMEN;
2175 pci_set_command_bit(dev, child, bit);
2180 pci_disable_io_method(device_t dev, device_t child, int space)
2185 case SYS_RES_IOPORT:
2186 bit = PCIM_CMD_PORTEN;
2188 case SYS_RES_MEMORY:
2189 bit = PCIM_CMD_MEMEN;
2194 pci_clear_command_bit(dev, child, bit);
2199 * New style pci driver. Parent device is either a pci-host-bridge or a
2200 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2204 pci_print_verbose(struct pci_devinfo *dinfo)
2208 pcicfgregs *cfg = &dinfo->cfg;
2210 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2211 cfg->vendor, cfg->device, cfg->revid);
2212 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2213 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2214 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2215 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2217 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2218 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2219 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2220 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2221 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2222 if (cfg->intpin > 0)
2223 printf("\tintpin=%c, irq=%d\n",
2224 cfg->intpin +'a' -1, cfg->intline);
2225 if (cfg->pp.pp_cap) {
2228 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2229 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2230 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2231 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2232 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2233 status & PCIM_PSTAT_DMASK);
2235 if (cfg->msi.msi_location) {
2238 ctrl = cfg->msi.msi_ctrl;
2239 printf("\tMSI supports %d message%s%s%s\n",
2240 cfg->msi.msi_msgnum,
2241 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2242 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2243 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2245 if (cfg->msix.msix_location) {
2246 printf("\tMSI-X supports %d message%s ",
2247 cfg->msix.msix_msgnum,
2248 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2249 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2250 printf("in map 0x%x\n",
2251 cfg->msix.msix_table_bar);
2253 printf("in maps 0x%x and 0x%x\n",
2254 cfg->msix.msix_table_bar,
2255 cfg->msix.msix_pba_bar);
2261 pci_porten(device_t dev)
2263 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2267 pci_memen(device_t dev)
2269 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2273 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2275 pci_addr_t map, testval;
2279 map = pci_read_config(dev, reg, 4);
2280 ln2range = pci_maprange(map);
2282 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2285 * Disable decoding via the command register before
2286 * determining the BAR's length since we will be placing it in
2289 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2290 pci_write_config(dev, PCIR_COMMAND,
2291 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2294 * Determine the BAR's length by writing all 1's. The bottom
2295 * log_2(size) bits of the BAR will stick as 0 when we read
2298 pci_write_config(dev, reg, 0xffffffff, 4);
2299 testval = pci_read_config(dev, reg, 4);
2300 if (ln2range == 64) {
2301 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2302 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2306 * Restore the original value of the BAR. We may have reprogrammed
2307 * the BAR of the low-level console device and when booting verbose,
2308 * we need the console device addressable.
2310 pci_write_config(dev, reg, map, 4);
2312 pci_write_config(dev, reg + 4, map >> 32, 4);
2313 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2316 *testvalp = testval;
2320 pci_write_bar(device_t dev, int reg, pci_addr_t base)
2325 map = pci_read_config(dev, reg, 4);
2326 ln2range = pci_maprange(map);
2327 pci_write_config(dev, reg, base, 4);
2329 pci_write_config(dev, reg + 4, base >> 32, 4);
2333 * Add a resource based on a pci map register. Return 1 if the map
2334 * register is a 32bit map register or 2 if it is a 64bit register.
2337 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2338 int force, int prefetch)
2340 pci_addr_t base, map, testval;
2341 pci_addr_t start, end, count;
2342 int barlen, basezero, maprange, mapsize, type;
2344 struct resource *res;
2346 pci_read_bar(dev, reg, &map, &testval);
2347 if (PCI_BAR_MEM(map)) {
2348 type = SYS_RES_MEMORY;
2349 if (map & PCIM_BAR_MEM_PREFETCH)
2352 type = SYS_RES_IOPORT;
2353 mapsize = pci_mapsize(testval);
2354 base = pci_mapbase(map);
2355 #ifdef __PCI_BAR_ZERO_VALID
2358 basezero = base == 0;
2360 maprange = pci_maprange(map);
2361 barlen = maprange == 64 ? 2 : 1;
2364 * For I/O registers, if bottom bit is set, and the next bit up
2365 * isn't clear, we know we have a BAR that doesn't conform to the
2366 * spec, so ignore it. Also, sanity check the size of the data
2367 * areas to the type of memory involved. Memory must be at least
2368 * 16 bytes in size, while I/O ranges must be at least 4.
2370 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2372 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2373 (type == SYS_RES_IOPORT && mapsize < 2))
2377 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2378 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2379 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2380 printf(", port disabled\n");
2381 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2382 printf(", memory disabled\n");
2384 printf(", enabled\n");
2388 * If base is 0, then we have problems if this architecture does
2389 * not allow that. It is best to ignore such entries for the
2390 * moment. These will be allocated later if the driver specifically
2391 * requests them. However, some removable busses look better when
2392 * all resources are allocated, so allow '0' to be overriden.
2394 * Similarly treat maps whose values is the same as the test value
2395 * read back. These maps have had all f's written to them by the
2396 * BIOS in an attempt to disable the resources.
2398 if (!force && (basezero || map == testval))
2400 if ((u_long)base != base) {
2402 "pci%d:%d:%d:%d bar %#x too many address bits",
2403 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2404 pci_get_function(dev), reg);
2409 * This code theoretically does the right thing, but has
2410 * undesirable side effects in some cases where peripherals
2411 * respond oddly to having these bits enabled. Let the user
2412 * be able to turn them off (since pci_enable_io_modes is 1 by
2415 if (pci_enable_io_modes) {
2416 /* Turn on resources that have been left off by a lazy BIOS */
2417 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2418 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2419 cmd |= PCIM_CMD_PORTEN;
2420 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2422 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2423 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2424 cmd |= PCIM_CMD_MEMEN;
2425 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2428 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2430 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2434 count = 1 << mapsize;
2435 if (basezero || base == pci_mapbase(testval)) {
2436 start = 0; /* Let the parent decide. */
2440 end = base + (1 << mapsize) - 1;
2442 resource_list_add(rl, type, reg, start, end, count);
2445 * Try to allocate the resource for this BAR from our parent
2446 * so that this resource range is already reserved. The
2447 * driver for this device will later inherit this resource in
2448 * pci_alloc_resource().
2450 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
2451 prefetch ? RF_PREFETCHABLE : 0);
2454 * If the allocation fails, clear the BAR and delete
2455 * the resource list entry to force
2456 * pci_alloc_resource() to allocate resources from the
2459 resource_list_delete(rl, type, reg);
2462 start = rman_get_start(res);
2463 rman_set_device(res, bus);
2465 pci_write_bar(dev, reg, start);
2470 * For ATA devices we need to decide early what addressing mode to use.
2471 * Legacy demands that the primary and secondary ATA ports sits on the
2472 * same addresses that old ISA hardware did. This dictates that we use
2473 * those addresses and ignore the BAR's if we cannot set PCI native
2477 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2478 uint32_t prefetchmask)
2481 int rid, type, progif;
2483 /* if this device supports PCI native addressing use it */
2484 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2485 if ((progif & 0x8a) == 0x8a) {
2486 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2487 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2488 printf("Trying ATA native PCI addressing mode\n");
2489 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2493 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2494 type = SYS_RES_IOPORT;
2495 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2496 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2497 prefetchmask & (1 << 0));
2498 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2499 prefetchmask & (1 << 1));
2502 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2503 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7,
2505 rman_set_device(r, bus);
2507 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2508 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6,
2510 rman_set_device(r, bus);
2512 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2513 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
2514 prefetchmask & (1 << 2));
2515 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
2516 prefetchmask & (1 << 3));
2519 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2520 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177,
2522 rman_set_device(r, bus);
2524 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2525 r = resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376,
2527 rman_set_device(r, bus);
2529 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
2530 prefetchmask & (1 << 4));
2531 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
2532 prefetchmask & (1 << 5));
2536 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2538 struct pci_devinfo *dinfo = device_get_ivars(dev);
2539 pcicfgregs *cfg = &dinfo->cfg;
2540 char tunable_name[64];
2543 /* Has to have an intpin to have an interrupt. */
2544 if (cfg->intpin == 0)
2547 /* Let the user override the IRQ with a tunable. */
2548 irq = PCI_INVALID_IRQ;
2549 snprintf(tunable_name, sizeof(tunable_name),
2550 "hw.pci%d.%d.%d.INT%c.irq",
2551 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2552 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2553 irq = PCI_INVALID_IRQ;
2556 * If we didn't get an IRQ via the tunable, then we either use the
2557 * IRQ value in the intline register or we ask the bus to route an
2558 * interrupt for us. If force_route is true, then we only use the
2559 * value in the intline register if the bus was unable to assign an
2562 if (!PCI_INTERRUPT_VALID(irq)) {
2563 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2564 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2565 if (!PCI_INTERRUPT_VALID(irq))
2569 /* If after all that we don't have an IRQ, just bail. */
2570 if (!PCI_INTERRUPT_VALID(irq))
2573 /* Update the config register if it changed. */
2574 if (irq != cfg->intline) {
2576 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2579 /* Add this IRQ as rid 0 interrupt resource. */
2580 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
2583 /* Perform early OHCI takeover from SMM. */
2585 ohci_early_takeover(device_t self)
2587 struct resource *res;
2593 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
2597 ctl = bus_read_4(res, OHCI_CONTROL);
2598 if (ctl & OHCI_IR) {
2600 printf("ohci early: "
2601 "SMM active, request owner change\n");
2602 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
2603 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
2605 ctl = bus_read_4(res, OHCI_CONTROL);
2607 if (ctl & OHCI_IR) {
2609 printf("ohci early: "
2610 "SMM does not respond, resetting\n");
2611 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
2613 /* Disable interrupts */
2614 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
2617 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
2620 /* Perform early UHCI takeover from SMM. */
2622 uhci_early_takeover(device_t self)
2624 struct resource *res;
2628 * Set the PIRQD enable bit and switch off all the others. We don't
2629 * want legacy support to interfere with us XXX Does this also mean
2630 * that the BIOS won't touch the keyboard anymore if it is connected
2631 * to the ports of the root hub?
2633 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
2635 /* Disable interrupts */
2636 rid = PCI_UHCI_BASE_REG;
2637 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
2639 bus_write_2(res, UHCI_INTR, 0);
2640 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
2644 /* Perform early EHCI takeover from SMM. */
2646 ehci_early_takeover(device_t self)
2648 struct resource *res;
2658 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
2662 cparams = bus_read_4(res, EHCI_HCCPARAMS);
2664 /* Synchronise with the BIOS if it owns the controller. */
2665 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
2666 eecp = EHCI_EECP_NEXT(eec)) {
2667 eec = pci_read_config(self, eecp, 4);
2668 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
2671 bios_sem = pci_read_config(self, eecp +
2672 EHCI_LEGSUP_BIOS_SEM, 1);
2673 if (bios_sem == 0) {
2677 printf("ehci early: "
2678 "SMM active, request owner change\n");
2680 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
2682 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
2684 bios_sem = pci_read_config(self, eecp +
2685 EHCI_LEGSUP_BIOS_SEM, 1);
2688 if (bios_sem != 0) {
2690 printf("ehci early: "
2691 "SMM does not respond\n");
2693 /* Disable interrupts */
2694 offs = bus_read_1(res, EHCI_CAPLENGTH);
2695 bus_write_4(res, offs + EHCI_USBINTR, 0);
2697 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
2701 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
2703 struct pci_devinfo *dinfo = device_get_ivars(dev);
2704 pcicfgregs *cfg = &dinfo->cfg;
2705 struct resource_list *rl = &dinfo->resources;
2706 struct pci_quirk *q;
2709 /* ATA devices needs special map treatment */
2710 if ((pci_get_class(dev) == PCIC_STORAGE) &&
2711 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
2712 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
2713 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
2714 !pci_read_config(dev, PCIR_BAR(2), 4))) )
2715 pci_ata_maps(bus, dev, rl, force, prefetchmask);
2717 for (i = 0; i < cfg->nummaps;)
2718 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
2719 prefetchmask & (1 << i));
2722 * Add additional, quirked resources.
2724 for (q = &pci_quirks[0]; q->devid; q++) {
2725 if (q->devid == ((cfg->device << 16) | cfg->vendor)
2726 && q->type == PCI_QUIRK_MAP_REG)
2727 pci_add_map(bus, dev, q->arg1, rl, force, 0);
2730 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
2731 #ifdef __PCI_REROUTE_INTERRUPT
2733 * Try to re-route interrupts. Sometimes the BIOS or
2734 * firmware may leave bogus values in these registers.
2735 * If the re-route fails, then just stick with what we
2738 pci_assign_interrupt(bus, dev, 1);
2740 pci_assign_interrupt(bus, dev, 0);
2744 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
2745 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
2746 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
2747 ehci_early_takeover(dev);
2748 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
2749 ohci_early_takeover(dev);
2750 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
2751 uhci_early_takeover(dev);
2756 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
2758 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
2759 device_t pcib = device_get_parent(dev);
2760 struct pci_devinfo *dinfo;
2762 int s, f, pcifunchigh;
2765 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
2766 ("dinfo_size too small"));
2767 maxslots = PCIB_MAXSLOTS(pcib);
2768 for (s = 0; s <= maxslots; s++) {
2772 hdrtype = REG(PCIR_HDRTYPE, 1);
2773 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2775 if (hdrtype & PCIM_MFDEV)
2776 pcifunchigh = PCI_FUNCMAX;
2777 for (f = 0; f <= pcifunchigh; f++) {
2778 dinfo = pci_read_device(pcib, domain, busno, s, f,
2780 if (dinfo != NULL) {
2781 pci_add_child(dev, dinfo);
2789 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2791 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2792 device_set_ivars(dinfo->cfg.dev, dinfo);
2793 resource_list_init(&dinfo->resources);
2794 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2795 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2796 pci_print_verbose(dinfo);
2797 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
2801 pci_probe(device_t dev)
2804 device_set_desc(dev, "PCI bus");
2806 /* Allow other subclasses to override this driver. */
2807 return (BUS_PROBE_GENERIC);
2811 pci_attach(device_t dev)
2816 * Since there can be multiple independantly numbered PCI
2817 * busses on systems with multiple PCI domains, we can't use
2818 * the unit number to decide which bus we are probing. We ask
2819 * the parent pcib what our domain and bus numbers are.
2821 domain = pcib_get_domain(dev);
2822 busno = pcib_get_bus(dev);
2824 device_printf(dev, "domain=%d, physical bus=%d\n",
2826 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
2827 return (bus_generic_attach(dev));
2831 pci_suspend(device_t dev)
2833 int dstate, error, i, numdevs;
2834 device_t acpi_dev, child, *devlist;
2835 struct pci_devinfo *dinfo;
2838 * Save the PCI configuration space for each child and set the
2839 * device in the appropriate power state for this sleep state.
2842 if (pci_do_power_resume)
2843 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2844 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
2846 for (i = 0; i < numdevs; i++) {
2848 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2849 pci_cfg_save(child, dinfo, 0);
2852 /* Suspend devices before potentially powering them down. */
2853 error = bus_generic_suspend(dev);
2855 free(devlist, M_TEMP);
2860 * Always set the device to D3. If ACPI suggests a different
2861 * power state, use it instead. If ACPI is not present, the
2862 * firmware is responsible for managing device power. Skip
2863 * children who aren't attached since they are powered down
2864 * separately. Only manage type 0 devices for now.
2866 for (i = 0; acpi_dev && i < numdevs; i++) {
2868 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2869 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2870 dstate = PCI_POWERSTATE_D3;
2871 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2872 pci_set_powerstate(child, dstate);
2875 free(devlist, M_TEMP);
2880 pci_resume(device_t dev)
2882 int i, numdevs, error;
2883 device_t acpi_dev, child, *devlist;
2884 struct pci_devinfo *dinfo;
2887 * Set each child to D0 and restore its PCI configuration space.
2890 if (pci_do_power_resume)
2891 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2892 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
2894 for (i = 0; i < numdevs; i++) {
2896 * Notify ACPI we're going to D0 but ignore the result. If
2897 * ACPI is not present, the firmware is responsible for
2898 * managing device power. Only manage type 0 devices for now.
2901 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2902 if (acpi_dev && device_is_attached(child) &&
2903 dinfo->cfg.hdrtype == 0) {
2904 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2905 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2908 /* Now the device is powered up, restore its config space. */
2909 pci_cfg_restore(child, dinfo);
2911 free(devlist, M_TEMP);
2912 return (bus_generic_resume(dev));
2916 pci_load_vendor_data(void)
2918 caddr_t vendordata, info;
2920 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2921 info = preload_search_info(vendordata, MODINFO_ADDR);
2922 pci_vendordata = *(char **)info;
2923 info = preload_search_info(vendordata, MODINFO_SIZE);
2924 pci_vendordata_size = *(size_t *)info;
2925 /* terminate the database */
2926 pci_vendordata[pci_vendordata_size] = '\n';
2931 pci_driver_added(device_t dev, driver_t *driver)
2936 struct pci_devinfo *dinfo;
2940 device_printf(dev, "driver added\n");
2941 DEVICE_IDENTIFY(driver, dev);
2942 if (device_get_children(dev, &devlist, &numdevs) != 0)
2944 for (i = 0; i < numdevs; i++) {
2946 if (device_get_state(child) != DS_NOTPRESENT)
2948 dinfo = device_get_ivars(child);
2949 pci_print_verbose(dinfo);
2951 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
2952 pci_cfg_restore(child, dinfo);
2953 if (device_probe_and_attach(child) != 0)
2954 pci_cfg_save(child, dinfo, 1);
2956 free(devlist, M_TEMP);
2960 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2961 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
2963 struct pci_devinfo *dinfo;
2964 struct msix_table_entry *mte;
2965 struct msix_vector *mv;
2971 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
2976 /* If this is not a direct child, just bail out. */
2977 if (device_get_parent(child) != dev) {
2982 rid = rman_get_rid(irq);
2984 /* Make sure that INTx is enabled */
2985 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
2988 * Check to see if the interrupt is MSI or MSI-X.
2989 * Ask our parent to map the MSI and give
2990 * us the address and data register values.
2991 * If we fail for some reason, teardown the
2992 * interrupt handler.
2994 dinfo = device_get_ivars(child);
2995 if (dinfo->cfg.msi.msi_alloc > 0) {
2996 if (dinfo->cfg.msi.msi_addr == 0) {
2997 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
2998 ("MSI has handlers, but vectors not mapped"));
2999 error = PCIB_MAP_MSI(device_get_parent(dev),
3000 child, rman_get_start(irq), &addr, &data);
3003 dinfo->cfg.msi.msi_addr = addr;
3004 dinfo->cfg.msi.msi_data = data;
3006 if (dinfo->cfg.msi.msi_handlers == 0)
3007 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3008 dinfo->cfg.msi.msi_data);
3009 dinfo->cfg.msi.msi_handlers++;
3011 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3012 ("No MSI or MSI-X interrupts allocated"));
3013 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3014 ("MSI-X index too high"));
3015 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3016 KASSERT(mte->mte_vector != 0, ("no message vector"));
3017 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3018 KASSERT(mv->mv_irq == rman_get_start(irq),
3020 if (mv->mv_address == 0) {
3021 KASSERT(mte->mte_handlers == 0,
3022 ("MSI-X table entry has handlers, but vector not mapped"));
3023 error = PCIB_MAP_MSI(device_get_parent(dev),
3024 child, rman_get_start(irq), &addr, &data);
3027 mv->mv_address = addr;
3030 if (mte->mte_handlers == 0) {
3031 pci_enable_msix(child, rid - 1, mv->mv_address,
3033 pci_unmask_msix(child, rid - 1);
3035 mte->mte_handlers++;
3038 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3039 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3042 (void)bus_generic_teardown_intr(dev, child, irq,
3052 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3055 struct msix_table_entry *mte;
3056 struct resource_list_entry *rle;
3057 struct pci_devinfo *dinfo;
3060 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3063 /* If this isn't a direct child, just bail out */
3064 if (device_get_parent(child) != dev)
3065 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3067 rid = rman_get_rid(irq);
3070 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3073 * Check to see if the interrupt is MSI or MSI-X. If so,
3074 * decrement the appropriate handlers count and mask the
3075 * MSI-X message, or disable MSI messages if the count
3078 dinfo = device_get_ivars(child);
3079 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3080 if (rle->res != irq)
3082 if (dinfo->cfg.msi.msi_alloc > 0) {
3083 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3084 ("MSI-X index too high"));
3085 if (dinfo->cfg.msi.msi_handlers == 0)
3087 dinfo->cfg.msi.msi_handlers--;
3088 if (dinfo->cfg.msi.msi_handlers == 0)
3089 pci_disable_msi(child);
3091 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3092 ("No MSI or MSI-X interrupts allocated"));
3093 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3094 ("MSI-X index too high"));
3095 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3096 if (mte->mte_handlers == 0)
3098 mte->mte_handlers--;
3099 if (mte->mte_handlers == 0)
3100 pci_mask_msix(child, rid - 1);
3103 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3106 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3111 pci_print_child(device_t dev, device_t child)
3113 struct pci_devinfo *dinfo;
3114 struct resource_list *rl;
3117 dinfo = device_get_ivars(child);
3118 rl = &dinfo->resources;
3120 retval += bus_print_child_header(dev, child);
3122 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3123 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3124 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3125 if (device_get_flags(dev))
3126 retval += printf(" flags %#x", device_get_flags(dev));
3128 retval += printf(" at device %d.%d", pci_get_slot(child),
3129 pci_get_function(child));
3131 retval += bus_print_child_footer(dev, child);
3141 } pci_nomatch_tab[] = {
3142 {PCIC_OLD, -1, "old"},
3143 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3144 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3145 {PCIC_STORAGE, -1, "mass storage"},
3146 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3147 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3148 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3149 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3150 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3151 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3152 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3153 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3154 {PCIC_NETWORK, -1, "network"},
3155 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3156 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3157 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3158 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3159 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3160 {PCIC_DISPLAY, -1, "display"},
3161 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3162 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3163 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3164 {PCIC_MULTIMEDIA, -1, "multimedia"},
3165 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3166 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3167 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3168 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3169 {PCIC_MEMORY, -1, "memory"},
3170 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3171 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3172 {PCIC_BRIDGE, -1, "bridge"},
3173 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3174 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3175 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3176 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3177 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3178 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3179 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3180 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3181 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3182 {PCIC_SIMPLECOMM, -1, "simple comms"},
3183 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3184 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3185 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3186 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3187 {PCIC_BASEPERIPH, -1, "base peripheral"},
3188 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3189 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3190 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3191 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3192 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3193 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3194 {PCIC_INPUTDEV, -1, "input device"},
3195 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3196 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3197 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3198 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3199 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3200 {PCIC_DOCKING, -1, "docking station"},
3201 {PCIC_PROCESSOR, -1, "processor"},
3202 {PCIC_SERIALBUS, -1, "serial bus"},
3203 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3204 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3205 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3206 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3207 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3208 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3209 {PCIC_WIRELESS, -1, "wireless controller"},
3210 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3211 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3212 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3213 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3214 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3215 {PCIC_SATCOM, -1, "satellite communication"},
3216 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3217 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3218 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3219 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3220 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3221 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3222 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3223 {PCIC_DASP, -1, "dasp"},
3224 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3229 pci_probe_nomatch(device_t dev, device_t child)
3232 char *cp, *scp, *device;
3235 * Look for a listing for this device in a loaded device database.
3237 if ((device = pci_describe_device(child)) != NULL) {
3238 device_printf(dev, "<%s>", device);
3239 free(device, M_DEVBUF);
3242 * Scan the class/subclass descriptions for a general
3247 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3248 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3249 if (pci_nomatch_tab[i].subclass == -1) {
3250 cp = pci_nomatch_tab[i].desc;
3251 } else if (pci_nomatch_tab[i].subclass ==
3252 pci_get_subclass(child)) {
3253 scp = pci_nomatch_tab[i].desc;
3257 device_printf(dev, "<%s%s%s>",
3259 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3262 printf(" at device %d.%d (no driver attached)\n",
3263 pci_get_slot(child), pci_get_function(child));
3264 pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1);
3269 * Parse the PCI device database, if loaded, and return a pointer to a
3270 * description of the device.
3272 * The database is flat text formatted as follows:
3274 * Any line not in a valid format is ignored.
3275 * Lines are terminated with newline '\n' characters.
3277 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3280 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3281 * - devices cannot be listed without a corresponding VENDOR line.
3282 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3283 * another TAB, then the device name.
3287 * Assuming (ptr) points to the beginning of a line in the database,
3288 * return the vendor or device and description of the next entry.
3289 * The value of (vendor) or (device) inappropriate for the entry type
3290 * is set to -1. Returns nonzero at the end of the database.
3292 * Note that this is slightly unrobust in the face of corrupt data;
3293 * we attempt to safeguard against this by spamming the end of the
3294 * database with a newline when we initialise.
3297 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3306 left = pci_vendordata_size - (cp - pci_vendordata);
3314 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3318 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3321 /* skip to next line */
3322 while (*cp != '\n' && left > 0) {
3331 /* skip to next line */
3332 while (*cp != '\n' && left > 0) {
3336 if (*cp == '\n' && left > 0)
3343 pci_describe_device(device_t dev)
3346 char *desc, *vp, *dp, *line;
3348 desc = vp = dp = NULL;
3351 * If we have no vendor data, we can't do anything.
3353 if (pci_vendordata == NULL)
3357 * Scan the vendor data looking for this device
3359 line = pci_vendordata;
3360 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3363 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3365 if (vendor == pci_get_vendor(dev))
3368 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3371 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3379 if (device == pci_get_device(dev))
3383 snprintf(dp, 80, "0x%x", pci_get_device(dev));
3384 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3386 sprintf(desc, "%s, %s", vp, dp);
3396 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3398 struct pci_devinfo *dinfo;
3401 dinfo = device_get_ivars(child);
3405 case PCI_IVAR_ETHADDR:
3407 * The generic accessor doesn't deal with failure, so
3408 * we set the return value, then return an error.
3410 *((uint8_t **) result) = NULL;
3412 case PCI_IVAR_SUBVENDOR:
3413 *result = cfg->subvendor;
3415 case PCI_IVAR_SUBDEVICE:
3416 *result = cfg->subdevice;
3418 case PCI_IVAR_VENDOR:
3419 *result = cfg->vendor;
3421 case PCI_IVAR_DEVICE:
3422 *result = cfg->device;
3424 case PCI_IVAR_DEVID:
3425 *result = (cfg->device << 16) | cfg->vendor;
3427 case PCI_IVAR_CLASS:
3428 *result = cfg->baseclass;
3430 case PCI_IVAR_SUBCLASS:
3431 *result = cfg->subclass;
3433 case PCI_IVAR_PROGIF:
3434 *result = cfg->progif;
3436 case PCI_IVAR_REVID:
3437 *result = cfg->revid;
3439 case PCI_IVAR_INTPIN:
3440 *result = cfg->intpin;
3443 *result = cfg->intline;
3445 case PCI_IVAR_DOMAIN:
3446 *result = cfg->domain;
3452 *result = cfg->slot;
3454 case PCI_IVAR_FUNCTION:
3455 *result = cfg->func;
3457 case PCI_IVAR_CMDREG:
3458 *result = cfg->cmdreg;
3460 case PCI_IVAR_CACHELNSZ:
3461 *result = cfg->cachelnsz;
3463 case PCI_IVAR_MINGNT:
3464 *result = cfg->mingnt;
3466 case PCI_IVAR_MAXLAT:
3467 *result = cfg->maxlat;
3469 case PCI_IVAR_LATTIMER:
3470 *result = cfg->lattimer;
3479 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
3481 struct pci_devinfo *dinfo;
3483 dinfo = device_get_ivars(child);
3486 case PCI_IVAR_INTPIN:
3487 dinfo->cfg.intpin = value;
3489 case PCI_IVAR_ETHADDR:
3490 case PCI_IVAR_SUBVENDOR:
3491 case PCI_IVAR_SUBDEVICE:
3492 case PCI_IVAR_VENDOR:
3493 case PCI_IVAR_DEVICE:
3494 case PCI_IVAR_DEVID:
3495 case PCI_IVAR_CLASS:
3496 case PCI_IVAR_SUBCLASS:
3497 case PCI_IVAR_PROGIF:
3498 case PCI_IVAR_REVID:
3500 case PCI_IVAR_DOMAIN:
3503 case PCI_IVAR_FUNCTION:
3504 return (EINVAL); /* disallow for now */
3512 #include "opt_ddb.h"
3514 #include <ddb/ddb.h>
3515 #include <sys/cons.h>
3518 * List resources based on pci map registers, used for within ddb
3521 DB_SHOW_COMMAND(pciregs, db_pci_dump)
3523 struct pci_devinfo *dinfo;
3524 struct devlist *devlist_head;
3527 int i, error, none_count;
3530 /* get the head of the device queue */
3531 devlist_head = &pci_devq;
3534 * Go through the list of devices and print out devices
3536 for (error = 0, i = 0,
3537 dinfo = STAILQ_FIRST(devlist_head);
3538 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
3539 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
3541 /* Populate pd_name and pd_unit */
3544 name = device_get_name(dinfo->cfg.dev);
3547 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
3548 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
3549 (name && *name) ? name : "none",
3550 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
3552 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
3553 p->pc_sel.pc_func, (p->pc_class << 16) |
3554 (p->pc_subclass << 8) | p->pc_progif,
3555 (p->pc_subdevice << 16) | p->pc_subvendor,
3556 (p->pc_device << 16) | p->pc_vendor,
3557 p->pc_revid, p->pc_hdr);
3562 static struct resource *
3563 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
3564 u_long start, u_long end, u_long count, u_int flags)
3566 struct pci_devinfo *dinfo = device_get_ivars(child);
3567 struct resource_list *rl = &dinfo->resources;
3568 struct resource_list_entry *rle;
3569 struct resource *res;
3570 pci_addr_t map, testval;
3574 * Weed out the bogons, and figure out how large the BAR/map
3575 * is. Bars that read back 0 here are bogus and unimplemented.
3576 * Note: atapci in legacy mode are special and handled elsewhere
3577 * in the code. If you have a atapci device in legacy mode and
3578 * it fails here, that other code is broken.
3581 pci_read_bar(child, *rid, &map, &testval);
3583 /* Ignore a BAR with a base of 0. */
3584 if (pci_mapbase(testval) == 0)
3587 if (PCI_BAR_MEM(testval)) {
3588 if (type != SYS_RES_MEMORY) {
3591 "child %s requested type %d for rid %#x,"
3592 " but the BAR says it is an memio\n",
3593 device_get_nameunit(child), type, *rid);
3597 if (type != SYS_RES_IOPORT) {
3600 "child %s requested type %d for rid %#x,"
3601 " but the BAR says it is an ioport\n",
3602 device_get_nameunit(child), type, *rid);
3608 * For real BARs, we need to override the size that
3609 * the driver requests, because that's what the BAR
3610 * actually uses and we would otherwise have a
3611 * situation where we might allocate the excess to
3612 * another driver, which won't work.
3614 mapsize = pci_mapsize(testval);
3615 count = 1UL << mapsize;
3616 if (RF_ALIGNMENT(flags) < mapsize)
3617 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
3618 if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH))
3619 flags |= RF_PREFETCHABLE;
3622 * Allocate enough resource, and then write back the
3623 * appropriate bar for that resource.
3625 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
3626 start, end, count, flags & ~RF_ACTIVE);
3628 device_printf(child,
3629 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
3630 count, *rid, type, start, end);
3633 rman_set_device(res, dev);
3634 resource_list_add(rl, type, *rid, start, end, count);
3635 rle = resource_list_find(rl, type, *rid);
3637 panic("pci_alloc_map: unexpectedly can't find resource.");
3639 rle->start = rman_get_start(res);
3640 rle->end = rman_get_end(res);
3643 device_printf(child,
3644 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
3645 count, *rid, type, rman_get_start(res));
3646 map = rman_get_start(res);
3647 pci_write_bar(child, *rid, map);
3654 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
3655 u_long start, u_long end, u_long count, u_int flags)
3657 struct pci_devinfo *dinfo = device_get_ivars(child);
3658 struct resource_list *rl = &dinfo->resources;
3659 struct resource_list_entry *rle;
3660 struct resource *res;
3661 pcicfgregs *cfg = &dinfo->cfg;
3663 if (device_get_parent(child) != dev)
3664 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
3665 type, rid, start, end, count, flags));
3668 * Perform lazy resource allocation
3673 * Can't alloc legacy interrupt once MSI messages have
3676 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
3677 cfg->msix.msix_alloc > 0))
3681 * If the child device doesn't have an interrupt
3682 * routed and is deserving of an interrupt, try to
3685 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
3687 pci_assign_interrupt(dev, child, 0);
3689 case SYS_RES_IOPORT:
3690 case SYS_RES_MEMORY:
3691 /* Allocate resources for this BAR if needed. */
3692 rle = resource_list_find(rl, type, *rid);
3694 res = pci_alloc_map(dev, child, type, rid, start, end,
3698 rle = resource_list_find(rl, type, *rid);
3702 * If the resource belongs to the bus, then give it to
3703 * the child. We need to activate it if requested
3704 * since the bus always allocates inactive resources.
3706 if (rle != NULL && rle->res != NULL &&
3707 rman_get_device(rle->res) == dev) {
3709 device_printf(child,
3710 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
3711 rman_get_size(rle->res), *rid, type,
3712 rman_get_start(rle->res));
3713 rman_set_device(rle->res, child);
3714 if ((flags & RF_ACTIVE) &&
3715 bus_activate_resource(child, type, *rid,
3721 return (resource_list_alloc(rl, dev, child, type, rid,
3722 start, end, count, flags));
3726 pci_release_resource(device_t dev, device_t child, int type, int rid,
3731 if (device_get_parent(child) != dev)
3732 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
3736 * For BARs we don't actually want to release the resource.
3737 * Instead, we deactivate the resource if needed and then give
3738 * ownership of the BAR back to the bus.
3741 case SYS_RES_IOPORT:
3742 case SYS_RES_MEMORY:
3743 if (rman_get_device(r) != child)
3745 if (rman_get_flags(r) & RF_ACTIVE) {
3746 error = bus_deactivate_resource(child, type, rid, r);
3750 rman_set_device(r, dev);
3753 return (bus_generic_rl_release_resource(dev, child, type, rid, r));
3757 pci_activate_resource(device_t dev, device_t child, int type, int rid,
3762 error = bus_generic_activate_resource(dev, child, type, rid, r);
3766 /* Enable decoding in the command register when activating BARs. */
3767 if (device_get_parent(child) == dev) {
3769 case SYS_RES_IOPORT:
3770 case SYS_RES_MEMORY:
3771 error = PCI_ENABLE_IO(dev, child, type);
3779 pci_delete_resource(device_t dev, device_t child, int type, int rid)
3781 struct pci_devinfo *dinfo;
3782 struct resource_list *rl;
3783 struct resource_list_entry *rle;
3785 if (device_get_parent(child) != dev)
3788 dinfo = device_get_ivars(child);
3789 rl = &dinfo->resources;
3790 rle = resource_list_find(rl, type, rid);
3795 if (rman_get_device(rle->res) != dev ||
3796 rman_get_flags(rle->res) & RF_ACTIVE) {
3797 device_printf(dev, "delete_resource: "
3798 "Resource still owned by child, oops. "
3799 "(type=%d, rid=%d, addr=%lx)\n",
3800 rle->type, rle->rid,
3801 rman_get_start(rle->res));
3805 #ifndef __PCI_BAR_ZERO_VALID
3807 * If this is a BAR, clear the BAR so it stops
3808 * decoding before releasing the resource.
3811 case SYS_RES_IOPORT:
3812 case SYS_RES_MEMORY:
3813 pci_write_bar(child, rid, 0);
3817 bus_release_resource(dev, type, rid, rle->res);
3819 resource_list_delete(rl, type, rid);
3822 struct resource_list *
3823 pci_get_resource_list (device_t dev, device_t child)
3825 struct pci_devinfo *dinfo = device_get_ivars(child);
3827 return (&dinfo->resources);
3831 pci_read_config_method(device_t dev, device_t child, int reg, int width)
3833 struct pci_devinfo *dinfo = device_get_ivars(child);
3834 pcicfgregs *cfg = &dinfo->cfg;
3836 return (PCIB_READ_CONFIG(device_get_parent(dev),
3837 cfg->bus, cfg->slot, cfg->func, reg, width));
3841 pci_write_config_method(device_t dev, device_t child, int reg,
3842 uint32_t val, int width)
3844 struct pci_devinfo *dinfo = device_get_ivars(child);
3845 pcicfgregs *cfg = &dinfo->cfg;
3847 PCIB_WRITE_CONFIG(device_get_parent(dev),
3848 cfg->bus, cfg->slot, cfg->func, reg, val, width);
3852 pci_child_location_str_method(device_t dev, device_t child, char *buf,
3856 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
3857 pci_get_function(child));
3862 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
3865 struct pci_devinfo *dinfo;
3868 dinfo = device_get_ivars(child);
3870 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
3871 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3872 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3878 pci_assign_interrupt_method(device_t dev, device_t child)
3880 struct pci_devinfo *dinfo = device_get_ivars(child);
3881 pcicfgregs *cfg = &dinfo->cfg;
3883 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3888 pci_modevent(module_t mod, int what, void *arg)
3890 static struct cdev *pci_cdev;
3894 STAILQ_INIT(&pci_devq);
3896 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
3898 pci_load_vendor_data();
3902 destroy_dev(pci_cdev);
3910 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3915 * Only do header type 0 devices. Type 1 devices are bridges,
3916 * which we know need special treatment. Type 2 devices are
3917 * cardbus bridges which also require special treatment.
3918 * Other types are unknown, and we err on the side of safety
3921 if (dinfo->cfg.hdrtype != 0)
3925 * Restore the device to full power mode. We must do this
3926 * before we restore the registers because moving from D3 to
3927 * D0 will cause the chip's BARs and some other registers to
3928 * be reset to some unknown power on reset values. Cut down
3929 * the noise on boot by doing nothing if we are already in
3932 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3933 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3935 for (i = 0; i < dinfo->cfg.nummaps; i++)
3936 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3937 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3938 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3939 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3940 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3941 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3942 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3943 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3944 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3945 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3946 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3948 /* Restore MSI and MSI-X configurations if they are present. */
3949 if (dinfo->cfg.msi.msi_location != 0)
3950 pci_resume_msi(dev);
3951 if (dinfo->cfg.msix.msix_location != 0)
3952 pci_resume_msix(dev);
3956 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3963 * Only do header type 0 devices. Type 1 devices are bridges, which
3964 * we know need special treatment. Type 2 devices are cardbus bridges
3965 * which also require special treatment. Other types are unknown, and
3966 * we err on the side of safety by ignoring them. Powering down
3967 * bridges should not be undertaken lightly.
3969 if (dinfo->cfg.hdrtype != 0)
3971 for (i = 0; i < dinfo->cfg.nummaps; i++)
3972 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3973 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3976 * Some drivers apparently write to these registers w/o updating our
3977 * cached copy. No harm happens if we update the copy, so do so here
3978 * so we can restore them. The COMMAND register is modified by the
3979 * bus w/o updating the cache. This should represent the normally
3980 * writable portion of the 'defined' part of type 0 headers. In
3981 * theory we also need to save/restore the PCI capability structures
3982 * we know about, but apart from power we don't know any that are
3985 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3986 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3987 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3988 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3989 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3990 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3991 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3992 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3993 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3994 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3995 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3996 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3997 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3998 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3999 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4002 * don't set the state for display devices, base peripherals and
4003 * memory devices since bad things happen when they are powered down.
4004 * We should (a) have drivers that can easily detach and (b) use
4005 * generic drivers for these devices so that some device actually
4006 * attaches. We need to make sure that when we implement (a) we don't
4007 * power the device down on a reattach.
4009 cls = pci_get_class(dev);
4012 switch (pci_do_power_nodriver)
4014 case 0: /* NO powerdown at all */
4016 case 1: /* Conservative about what to power down */
4017 if (cls == PCIC_STORAGE)
4020 case 2: /* Agressive about what to power down */
4021 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4022 cls == PCIC_BASEPERIPH)
4025 case 3: /* Power down everything */
4029 * PCI spec says we can only go into D3 state from D0 state.
4030 * Transition from D[12] into D0 before going to D3 state.
4032 ps = pci_get_powerstate(dev);
4033 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4034 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4035 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4036 pci_set_powerstate(dev, PCI_POWERSTATE_D3);