2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <vm/vm_extern.h>
51 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
65 #include <dev/usb/controller/xhcireg.h>
66 #include <dev/usb/controller/ehcireg.h>
67 #include <dev/usb/controller/ohcireg.h>
68 #include <dev/usb/controller/uhcireg.h>
73 #define PCIR_IS_BIOS(cfg, reg) \
74 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
75 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
77 static int pci_has_quirk(uint32_t devid, int quirk);
78 static pci_addr_t pci_mapbase(uint64_t mapreg);
79 static const char *pci_maptype(uint64_t mapreg);
80 static int pci_mapsize(uint64_t testval);
81 static int pci_maprange(uint64_t mapreg);
82 static pci_addr_t pci_rombase(uint64_t mapreg);
83 static int pci_romsize(uint64_t testval);
84 static void pci_fixancient(pcicfgregs *cfg);
85 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
87 static int pci_porten(device_t dev);
88 static int pci_memen(device_t dev);
89 static void pci_assign_interrupt(device_t bus, device_t dev,
91 static int pci_add_map(device_t bus, device_t dev, int reg,
92 struct resource_list *rl, int force, int prefetch);
93 static int pci_probe(device_t dev);
94 static int pci_attach(device_t dev);
95 static void pci_load_vendor_data(void);
96 static int pci_describe_parse_line(char **ptr, int *vendor,
97 int *device, char **desc);
98 static char *pci_describe_device(device_t dev);
99 static int pci_modevent(module_t mod, int what, void *arg);
100 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
102 static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
103 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
104 int reg, uint32_t *data);
106 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
107 int reg, uint32_t data);
109 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
110 static void pci_disable_msi(device_t dev);
111 static void pci_enable_msi(device_t dev, uint64_t address,
113 static void pci_enable_msix(device_t dev, u_int index,
114 uint64_t address, uint32_t data);
115 static void pci_mask_msix(device_t dev, u_int index);
116 static void pci_unmask_msix(device_t dev, u_int index);
117 static int pci_msi_blacklisted(void);
118 static int pci_msix_blacklisted(void);
119 static void pci_resume_msi(device_t dev);
120 static void pci_resume_msix(device_t dev);
121 static int pci_remap_intr_method(device_t bus, device_t dev,
124 static device_method_t pci_methods[] = {
125 /* Device interface */
126 DEVMETHOD(device_probe, pci_probe),
127 DEVMETHOD(device_attach, pci_attach),
128 DEVMETHOD(device_detach, bus_generic_detach),
129 DEVMETHOD(device_shutdown, bus_generic_shutdown),
130 DEVMETHOD(device_suspend, pci_suspend),
131 DEVMETHOD(device_resume, pci_resume),
134 DEVMETHOD(bus_print_child, pci_print_child),
135 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
136 DEVMETHOD(bus_read_ivar, pci_read_ivar),
137 DEVMETHOD(bus_write_ivar, pci_write_ivar),
138 DEVMETHOD(bus_driver_added, pci_driver_added),
139 DEVMETHOD(bus_setup_intr, pci_setup_intr),
140 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
142 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
143 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
144 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
145 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
146 DEVMETHOD(bus_delete_resource, pci_delete_resource),
147 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
148 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
149 DEVMETHOD(bus_release_resource, pci_release_resource),
150 DEVMETHOD(bus_activate_resource, pci_activate_resource),
151 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
152 DEVMETHOD(bus_child_detached, pci_child_detached),
153 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
154 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
155 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
158 DEVMETHOD(pci_read_config, pci_read_config_method),
159 DEVMETHOD(pci_write_config, pci_write_config_method),
160 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
161 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
162 DEVMETHOD(pci_enable_io, pci_enable_io_method),
163 DEVMETHOD(pci_disable_io, pci_disable_io_method),
164 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
165 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
166 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
167 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
168 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
169 DEVMETHOD(pci_find_cap, pci_find_cap_method),
170 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
171 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
172 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
173 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
174 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
175 DEVMETHOD(pci_release_msi, pci_release_msi_method),
176 DEVMETHOD(pci_msi_count, pci_msi_count_method),
177 DEVMETHOD(pci_msix_count, pci_msix_count_method),
182 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
184 static devclass_t pci_devclass;
185 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
186 MODULE_VERSION(pci, 1);
188 static char *pci_vendordata;
189 static size_t pci_vendordata_size;
192 uint32_t devid; /* Vendor/device of the card */
194 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
195 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
196 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
197 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
198 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
203 static const struct pci_quirk pci_quirks[] = {
204 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
205 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
206 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
207 /* As does the Serverworks OSB4 (the SMBus mapping register) */
208 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
211 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
212 * or the CMIC-SL (AKA ServerWorks GC_LE).
214 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
215 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
218 * MSI doesn't work on earlier Intel chipsets including
219 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
221 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
222 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
223 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
224 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
225 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
226 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
227 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
230 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
233 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
236 * MSI-X allocation doesn't work properly for devices passed through
237 * by VMware up to at least ESXi 5.1.
239 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
240 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */
243 * Some virtualization environments emulate an older chipset
244 * but support MSI just fine. QEMU uses the Intel 82440.
246 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
249 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
250 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
251 * It prevents us from attaching hpet(4) when the bit is unset.
252 * Note this quirk only affects SB600 revision A13 and earlier.
253 * For SB600 A21 and later, firmware must set the bit to hide it.
254 * For SB700 and later, it is unused and hardcoded to zero.
256 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
261 /* map register information */
262 #define PCI_MAPMEM 0x01 /* memory map */
263 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
264 #define PCI_MAPPORT 0x04 /* port map */
266 struct devlist pci_devq;
267 uint32_t pci_generation;
268 uint32_t pci_numdevs = 0;
269 static int pcie_chipset, pcix_chipset;
272 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
274 static int pci_enable_io_modes = 1;
275 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
276 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
277 &pci_enable_io_modes, 1,
278 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
279 enable these bits correctly. We'd like to do this all the time, but there\n\
280 are some peripherals that this causes problems with.");
282 static int pci_do_realloc_bars = 0;
283 TUNABLE_INT("hw.pci.realloc_bars", &pci_do_realloc_bars);
284 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RW,
285 &pci_do_realloc_bars, 0,
286 "Attempt to allocate a new range for any BARs whose original firmware-assigned ranges fail to allocate during the initial device scan.");
288 static int pci_do_power_nodriver = 0;
289 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
290 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
291 &pci_do_power_nodriver, 0,
292 "Place a function into D3 state when no driver attaches to it. 0 means\n\
293 disable. 1 means conservatively place devices into D3 state. 2 means\n\
294 agressively place devices into D3 state. 3 means put absolutely everything\n\
297 int pci_do_power_resume = 1;
298 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
299 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
300 &pci_do_power_resume, 1,
301 "Transition from D3 -> D0 on resume.");
303 int pci_do_power_suspend = 1;
304 TUNABLE_INT("hw.pci.do_power_suspend", &pci_do_power_suspend);
305 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RW,
306 &pci_do_power_suspend, 1,
307 "Transition from D0 -> D3 on suspend.");
309 static int pci_do_msi = 1;
310 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
311 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
312 "Enable support for MSI interrupts");
314 static int pci_do_msix = 1;
315 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
316 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
317 "Enable support for MSI-X interrupts");
319 static int pci_honor_msi_blacklist = 1;
320 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
321 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
322 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
324 #if defined(__i386__) || defined(__amd64__)
325 static int pci_usb_takeover = 1;
327 static int pci_usb_takeover = 0;
329 TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover);
330 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
331 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
332 Disable this if you depend on BIOS emulation of USB devices, that is\n\
333 you use USB devices (like keyboard or mouse) but do not load USB drivers");
336 pci_has_quirk(uint32_t devid, int quirk)
338 const struct pci_quirk *q;
340 for (q = &pci_quirks[0]; q->devid; q++) {
341 if (q->devid == devid && q->type == quirk)
347 /* Find a device_t by bus/slot/function in domain 0 */
350 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
353 return (pci_find_dbsf(0, bus, slot, func));
356 /* Find a device_t by domain/bus/slot/function */
359 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
361 struct pci_devinfo *dinfo;
363 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
364 if ((dinfo->cfg.domain == domain) &&
365 (dinfo->cfg.bus == bus) &&
366 (dinfo->cfg.slot == slot) &&
367 (dinfo->cfg.func == func)) {
368 return (dinfo->cfg.dev);
375 /* Find a device_t by vendor/device ID */
378 pci_find_device(uint16_t vendor, uint16_t device)
380 struct pci_devinfo *dinfo;
382 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
383 if ((dinfo->cfg.vendor == vendor) &&
384 (dinfo->cfg.device == device)) {
385 return (dinfo->cfg.dev);
393 pci_find_class(uint8_t class, uint8_t subclass)
395 struct pci_devinfo *dinfo;
397 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
398 if (dinfo->cfg.baseclass == class &&
399 dinfo->cfg.subclass == subclass) {
400 return (dinfo->cfg.dev);
408 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
413 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
416 retval += vprintf(fmt, ap);
421 /* return base address of memory or port map */
424 pci_mapbase(uint64_t mapreg)
427 if (PCI_BAR_MEM(mapreg))
428 return (mapreg & PCIM_BAR_MEM_BASE);
430 return (mapreg & PCIM_BAR_IO_BASE);
433 /* return map type of memory or port map */
436 pci_maptype(uint64_t mapreg)
439 if (PCI_BAR_IO(mapreg))
441 if (mapreg & PCIM_BAR_MEM_PREFETCH)
442 return ("Prefetchable Memory");
446 /* return log2 of map size decoded for memory or port map */
449 pci_mapsize(uint64_t testval)
453 testval = pci_mapbase(testval);
456 while ((testval & 1) == 0)
465 /* return base address of device ROM */
468 pci_rombase(uint64_t mapreg)
471 return (mapreg & PCIM_BIOS_ADDR_MASK);
474 /* return log2 of map size decided for device ROM */
477 pci_romsize(uint64_t testval)
481 testval = pci_rombase(testval);
484 while ((testval & 1) == 0)
493 /* return log2 of address range supported by map register */
496 pci_maprange(uint64_t mapreg)
500 if (PCI_BAR_IO(mapreg))
503 switch (mapreg & PCIM_BAR_MEM_TYPE) {
504 case PCIM_BAR_MEM_32:
507 case PCIM_BAR_MEM_1MB:
510 case PCIM_BAR_MEM_64:
517 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
520 pci_fixancient(pcicfgregs *cfg)
522 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
525 /* PCI to PCI bridges use header type 1 */
526 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
527 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
530 /* extract header type specific config data */
533 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
535 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
536 switch (cfg->hdrtype & PCIM_HDRTYPE) {
537 case PCIM_HDRTYPE_NORMAL:
538 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
539 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
540 cfg->nummaps = PCI_MAXMAPS_0;
542 case PCIM_HDRTYPE_BRIDGE:
543 cfg->nummaps = PCI_MAXMAPS_1;
545 case PCIM_HDRTYPE_CARDBUS:
546 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
547 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
548 cfg->nummaps = PCI_MAXMAPS_2;
554 /* read configuration header into pcicfgregs structure */
556 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
558 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
559 pcicfgregs *cfg = NULL;
560 struct pci_devinfo *devlist_entry;
561 struct devlist *devlist_head;
563 devlist_head = &pci_devq;
565 devlist_entry = NULL;
567 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
568 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
569 if (devlist_entry == NULL)
572 cfg = &devlist_entry->cfg;
578 cfg->vendor = REG(PCIR_VENDOR, 2);
579 cfg->device = REG(PCIR_DEVICE, 2);
580 cfg->cmdreg = REG(PCIR_COMMAND, 2);
581 cfg->statreg = REG(PCIR_STATUS, 2);
582 cfg->baseclass = REG(PCIR_CLASS, 1);
583 cfg->subclass = REG(PCIR_SUBCLASS, 1);
584 cfg->progif = REG(PCIR_PROGIF, 1);
585 cfg->revid = REG(PCIR_REVID, 1);
586 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
587 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
588 cfg->lattimer = REG(PCIR_LATTIMER, 1);
589 cfg->intpin = REG(PCIR_INTPIN, 1);
590 cfg->intline = REG(PCIR_INTLINE, 1);
592 cfg->mingnt = REG(PCIR_MINGNT, 1);
593 cfg->maxlat = REG(PCIR_MAXLAT, 1);
595 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
596 cfg->hdrtype &= ~PCIM_MFDEV;
597 STAILQ_INIT(&cfg->maps);
600 pci_hdrtypedata(pcib, b, s, f, cfg);
602 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
603 pci_read_cap(pcib, cfg);
605 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
607 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
608 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
609 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
610 devlist_entry->conf.pc_sel.pc_func = cfg->func;
611 devlist_entry->conf.pc_hdr = cfg->hdrtype;
613 devlist_entry->conf.pc_subvendor = cfg->subvendor;
614 devlist_entry->conf.pc_subdevice = cfg->subdevice;
615 devlist_entry->conf.pc_vendor = cfg->vendor;
616 devlist_entry->conf.pc_device = cfg->device;
618 devlist_entry->conf.pc_class = cfg->baseclass;
619 devlist_entry->conf.pc_subclass = cfg->subclass;
620 devlist_entry->conf.pc_progif = cfg->progif;
621 devlist_entry->conf.pc_revid = cfg->revid;
626 return (devlist_entry);
631 pci_read_cap(device_t pcib, pcicfgregs *cfg)
633 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
634 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
635 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
639 int ptr, nextptr, ptrptr;
641 switch (cfg->hdrtype & PCIM_HDRTYPE) {
642 case PCIM_HDRTYPE_NORMAL:
643 case PCIM_HDRTYPE_BRIDGE:
644 ptrptr = PCIR_CAP_PTR;
646 case PCIM_HDRTYPE_CARDBUS:
647 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
650 return; /* no extended capabilities support */
652 nextptr = REG(ptrptr, 1); /* sanity check? */
655 * Read capability entries.
657 while (nextptr != 0) {
660 printf("illegal PCI extended capability offset %d\n",
664 /* Find the next entry */
666 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
668 /* Process this entry */
669 switch (REG(ptr + PCICAP_ID, 1)) {
670 case PCIY_PMG: /* PCI power management */
671 if (cfg->pp.pp_cap == 0) {
672 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
673 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
674 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
675 if ((nextptr - ptr) > PCIR_POWER_DATA)
676 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
679 case PCIY_HT: /* HyperTransport */
680 /* Determine HT-specific capability type. */
681 val = REG(ptr + PCIR_HT_COMMAND, 2);
683 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
684 cfg->ht.ht_slave = ptr;
686 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
687 switch (val & PCIM_HTCMD_CAP_MASK) {
688 case PCIM_HTCAP_MSI_MAPPING:
689 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
690 /* Sanity check the mapping window. */
691 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
694 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
696 if (addr != MSI_INTEL_ADDR_BASE)
698 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
699 cfg->domain, cfg->bus,
700 cfg->slot, cfg->func,
703 addr = MSI_INTEL_ADDR_BASE;
705 cfg->ht.ht_msimap = ptr;
706 cfg->ht.ht_msictrl = val;
707 cfg->ht.ht_msiaddr = addr;
712 case PCIY_MSI: /* PCI MSI */
713 cfg->msi.msi_location = ptr;
714 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
715 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
716 PCIM_MSICTRL_MMC_MASK)>>1);
718 case PCIY_MSIX: /* PCI MSI-X */
719 cfg->msix.msix_location = ptr;
720 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
721 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
722 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
723 val = REG(ptr + PCIR_MSIX_TABLE, 4);
724 cfg->msix.msix_table_bar = PCIR_BAR(val &
726 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
727 val = REG(ptr + PCIR_MSIX_PBA, 4);
728 cfg->msix.msix_pba_bar = PCIR_BAR(val &
730 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
732 case PCIY_VPD: /* PCI Vital Product Data */
733 cfg->vpd.vpd_reg = ptr;
736 /* Should always be true. */
737 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
738 PCIM_HDRTYPE_BRIDGE) {
739 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
740 cfg->subvendor = val & 0xffff;
741 cfg->subdevice = val >> 16;
744 case PCIY_PCIX: /* PCI-X */
746 * Assume we have a PCI-X chipset if we have
747 * at least one PCI-PCI bridge with a PCI-X
748 * capability. Note that some systems with
749 * PCI-express or HT chipsets might match on
750 * this check as well.
752 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
755 cfg->pcix.pcix_location = ptr;
757 case PCIY_EXPRESS: /* PCI-express */
759 * Assume we have a PCI-express chipset if we have
760 * at least one PCI-express device.
763 cfg->pcie.pcie_location = ptr;
764 val = REG(ptr + PCIER_FLAGS, 2);
765 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
772 #if defined(__powerpc__)
774 * Enable the MSI mapping window for all HyperTransport
775 * slaves. PCI-PCI bridges have their windows enabled via
778 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
779 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
781 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
782 cfg->domain, cfg->bus, cfg->slot, cfg->func);
783 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
784 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
788 /* REG and WREG use carry through to next functions */
792 * PCI Vital Product Data
795 #define PCI_VPD_TIMEOUT 1000000
798 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
800 int count = PCI_VPD_TIMEOUT;
802 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
804 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
806 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
809 DELAY(1); /* limit looping */
811 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
818 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
820 int count = PCI_VPD_TIMEOUT;
822 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
824 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
825 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
826 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
829 DELAY(1); /* limit looping */
836 #undef PCI_VPD_TIMEOUT
838 struct vpd_readstate {
848 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
853 if (vrs->bytesinval == 0) {
854 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®))
856 vrs->val = le32toh(reg);
858 byte = vrs->val & 0xff;
861 vrs->val = vrs->val >> 8;
862 byte = vrs->val & 0xff;
872 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
874 struct vpd_readstate vrs;
879 int alloc, off; /* alloc/off for RO/W arrays */
885 /* init vpd reader */
893 name = remain = i = 0; /* shut up stupid gcc */
894 alloc = off = 0; /* shut up stupid gcc */
895 dflen = 0; /* shut up stupid gcc */
898 if (vpd_nextbyte(&vrs, &byte)) {
903 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
904 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
905 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
908 case 0: /* item name */
910 if (vpd_nextbyte(&vrs, &byte2)) {
915 if (vpd_nextbyte(&vrs, &byte2)) {
919 remain |= byte2 << 8;
920 if (remain > (0x7f*4 - vrs.off)) {
923 "invalid VPD data, remain %#x\n",
929 name = (byte >> 3) & 0xf;
932 case 0x2: /* String */
933 cfg->vpd.vpd_ident = malloc(remain + 1,
941 case 0x10: /* VPD-R */
944 cfg->vpd.vpd_ros = malloc(alloc *
945 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
949 case 0x11: /* VPD-W */
952 cfg->vpd.vpd_w = malloc(alloc *
953 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
957 default: /* Invalid data, abort */
963 case 1: /* Identifier String */
964 cfg->vpd.vpd_ident[i++] = byte;
967 cfg->vpd.vpd_ident[i] = '\0';
972 case 2: /* VPD-R Keyword Header */
974 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
975 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
976 M_DEVBUF, M_WAITOK | M_ZERO);
978 cfg->vpd.vpd_ros[off].keyword[0] = byte;
979 if (vpd_nextbyte(&vrs, &byte2)) {
983 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
984 if (vpd_nextbyte(&vrs, &byte2)) {
990 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
993 * if this happens, we can't trust the rest
996 pci_printf(cfg, "bad keyword length: %d\n",
1001 } else if (dflen == 0) {
1002 cfg->vpd.vpd_ros[off].value = malloc(1 *
1003 sizeof(*cfg->vpd.vpd_ros[off].value),
1004 M_DEVBUF, M_WAITOK);
1005 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1007 cfg->vpd.vpd_ros[off].value = malloc(
1009 sizeof(*cfg->vpd.vpd_ros[off].value),
1010 M_DEVBUF, M_WAITOK);
1013 /* keep in sync w/ state 3's transistions */
1014 if (dflen == 0 && remain == 0)
1016 else if (dflen == 0)
1022 case 3: /* VPD-R Keyword Value */
1023 cfg->vpd.vpd_ros[off].value[i++] = byte;
1024 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1025 "RV", 2) == 0 && cksumvalid == -1) {
1031 "bad VPD cksum, remain %hhu\n",
1040 /* keep in sync w/ state 2's transistions */
1042 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1043 if (dflen == 0 && remain == 0) {
1044 cfg->vpd.vpd_rocnt = off;
1045 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1046 off * sizeof(*cfg->vpd.vpd_ros),
1047 M_DEVBUF, M_WAITOK | M_ZERO);
1049 } else if (dflen == 0)
1059 case 5: /* VPD-W Keyword Header */
1061 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1062 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1063 M_DEVBUF, M_WAITOK | M_ZERO);
1065 cfg->vpd.vpd_w[off].keyword[0] = byte;
1066 if (vpd_nextbyte(&vrs, &byte2)) {
1070 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1071 if (vpd_nextbyte(&vrs, &byte2)) {
1075 cfg->vpd.vpd_w[off].len = dflen = byte2;
1076 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1077 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1078 sizeof(*cfg->vpd.vpd_w[off].value),
1079 M_DEVBUF, M_WAITOK);
1082 /* keep in sync w/ state 6's transistions */
1083 if (dflen == 0 && remain == 0)
1085 else if (dflen == 0)
1091 case 6: /* VPD-W Keyword Value */
1092 cfg->vpd.vpd_w[off].value[i++] = byte;
1095 /* keep in sync w/ state 5's transistions */
1097 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1098 if (dflen == 0 && remain == 0) {
1099 cfg->vpd.vpd_wcnt = off;
1100 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1101 off * sizeof(*cfg->vpd.vpd_w),
1102 M_DEVBUF, M_WAITOK | M_ZERO);
1104 } else if (dflen == 0)
1109 pci_printf(cfg, "invalid state: %d\n", state);
1115 if (cksumvalid == 0 || state < -1) {
1116 /* read-only data bad, clean up */
1117 if (cfg->vpd.vpd_ros != NULL) {
1118 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1119 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1120 free(cfg->vpd.vpd_ros, M_DEVBUF);
1121 cfg->vpd.vpd_ros = NULL;
1125 /* I/O error, clean up */
1126 pci_printf(cfg, "failed to read VPD data.\n");
1127 if (cfg->vpd.vpd_ident != NULL) {
1128 free(cfg->vpd.vpd_ident, M_DEVBUF);
1129 cfg->vpd.vpd_ident = NULL;
1131 if (cfg->vpd.vpd_w != NULL) {
1132 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1133 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1134 free(cfg->vpd.vpd_w, M_DEVBUF);
1135 cfg->vpd.vpd_w = NULL;
1138 cfg->vpd.vpd_cached = 1;
1144 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1146 struct pci_devinfo *dinfo = device_get_ivars(child);
1147 pcicfgregs *cfg = &dinfo->cfg;
1149 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1150 pci_read_vpd(device_get_parent(dev), cfg);
1152 *identptr = cfg->vpd.vpd_ident;
1154 if (*identptr == NULL)
1161 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1164 struct pci_devinfo *dinfo = device_get_ivars(child);
1165 pcicfgregs *cfg = &dinfo->cfg;
1168 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1169 pci_read_vpd(device_get_parent(dev), cfg);
1171 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1172 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1173 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1174 *vptr = cfg->vpd.vpd_ros[i].value;
1183 * Find the requested HyperTransport capability and return the offset
1184 * in configuration space via the pointer provided. The function
1185 * returns 0 on success and an error code otherwise.
1188 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1193 error = pci_find_cap(child, PCIY_HT, &ptr);
1198 * Traverse the capabilities list checking each HT capability
1199 * to see if it matches the requested HT capability.
1202 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1203 if (capability == PCIM_HTCAP_SLAVE ||
1204 capability == PCIM_HTCAP_HOST)
1207 val &= PCIM_HTCMD_CAP_MASK;
1208 if (val == capability) {
1214 /* Skip to the next HT capability. */
1216 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1217 if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1226 * Find the requested capability and return the offset in
1227 * configuration space via the pointer provided. The function returns
1228 * 0 on success and an error code otherwise.
1231 pci_find_cap_method(device_t dev, device_t child, int capability,
1234 struct pci_devinfo *dinfo = device_get_ivars(child);
1235 pcicfgregs *cfg = &dinfo->cfg;
1240 * Check the CAP_LIST bit of the PCI status register first.
1242 status = pci_read_config(child, PCIR_STATUS, 2);
1243 if (!(status & PCIM_STATUS_CAPPRESENT))
1247 * Determine the start pointer of the capabilities list.
1249 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1250 case PCIM_HDRTYPE_NORMAL:
1251 case PCIM_HDRTYPE_BRIDGE:
1254 case PCIM_HDRTYPE_CARDBUS:
1255 ptr = PCIR_CAP_PTR_2;
1259 return (ENXIO); /* no extended capabilities support */
1261 ptr = pci_read_config(child, ptr, 1);
1264 * Traverse the capabilities list.
1267 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1272 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1279 * Find the requested extended capability and return the offset in
1280 * configuration space via the pointer provided. The function returns
1281 * 0 on success and an error code otherwise.
1284 pci_find_extcap_method(device_t dev, device_t child, int capability,
1287 struct pci_devinfo *dinfo = device_get_ivars(child);
1288 pcicfgregs *cfg = &dinfo->cfg;
1292 /* Only supported for PCI-express devices. */
1293 if (cfg->pcie.pcie_location == 0)
1297 ecap = pci_read_config(child, ptr, 4);
1298 if (ecap == 0xffffffff || ecap == 0)
1301 if (PCI_EXTCAP_ID(ecap) == capability) {
1306 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1309 ecap = pci_read_config(child, ptr, 4);
1316 * Support for MSI-X message interrupts.
1319 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
1321 struct pci_devinfo *dinfo = device_get_ivars(dev);
1322 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1325 KASSERT(msix->msix_table_len > index, ("bogus index"));
1326 offset = msix->msix_table_offset + index * 16;
1327 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1328 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1329 bus_write_4(msix->msix_table_res, offset + 8, data);
1331 /* Enable MSI -> HT mapping. */
1332 pci_ht_map_msi(dev, address);
1336 pci_mask_msix(device_t dev, u_int index)
1338 struct pci_devinfo *dinfo = device_get_ivars(dev);
1339 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1340 uint32_t offset, val;
1342 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1343 offset = msix->msix_table_offset + index * 16 + 12;
1344 val = bus_read_4(msix->msix_table_res, offset);
1345 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1346 val |= PCIM_MSIX_VCTRL_MASK;
1347 bus_write_4(msix->msix_table_res, offset, val);
1352 pci_unmask_msix(device_t dev, u_int index)
1354 struct pci_devinfo *dinfo = device_get_ivars(dev);
1355 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1356 uint32_t offset, val;
1358 KASSERT(msix->msix_table_len > index, ("bogus index"));
1359 offset = msix->msix_table_offset + index * 16 + 12;
1360 val = bus_read_4(msix->msix_table_res, offset);
1361 if (val & PCIM_MSIX_VCTRL_MASK) {
1362 val &= ~PCIM_MSIX_VCTRL_MASK;
1363 bus_write_4(msix->msix_table_res, offset, val);
1368 pci_pending_msix(device_t dev, u_int index)
1370 struct pci_devinfo *dinfo = device_get_ivars(dev);
1371 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1372 uint32_t offset, bit;
1374 KASSERT(msix->msix_table_len > index, ("bogus index"));
1375 offset = msix->msix_pba_offset + (index / 32) * 4;
1376 bit = 1 << index % 32;
1377 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1381 * Restore MSI-X registers and table during resume. If MSI-X is
1382 * enabled then walk the virtual table to restore the actual MSI-X
1386 pci_resume_msix(device_t dev)
1388 struct pci_devinfo *dinfo = device_get_ivars(dev);
1389 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1390 struct msix_table_entry *mte;
1391 struct msix_vector *mv;
1394 if (msix->msix_alloc > 0) {
1395 /* First, mask all vectors. */
1396 for (i = 0; i < msix->msix_msgnum; i++)
1397 pci_mask_msix(dev, i);
1399 /* Second, program any messages with at least one handler. */
1400 for (i = 0; i < msix->msix_table_len; i++) {
1401 mte = &msix->msix_table[i];
1402 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1404 mv = &msix->msix_vectors[mte->mte_vector - 1];
1405 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1406 pci_unmask_msix(dev, i);
1409 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1410 msix->msix_ctrl, 2);
1414 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1415 * returned in *count. After this function returns, each message will be
1416 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1419 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1421 struct pci_devinfo *dinfo = device_get_ivars(child);
1422 pcicfgregs *cfg = &dinfo->cfg;
1423 struct resource_list_entry *rle;
1424 int actual, error, i, irq, max;
1426 /* Don't let count == 0 get us into trouble. */
1430 /* If rid 0 is allocated, then fail. */
1431 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1432 if (rle != NULL && rle->res != NULL)
1435 /* Already have allocated messages? */
1436 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1439 /* If MSI-X is blacklisted for this system, fail. */
1440 if (pci_msix_blacklisted())
1443 /* MSI-X capability present? */
1444 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1447 /* Make sure the appropriate BARs are mapped. */
1448 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1449 cfg->msix.msix_table_bar);
1450 if (rle == NULL || rle->res == NULL ||
1451 !(rman_get_flags(rle->res) & RF_ACTIVE))
1453 cfg->msix.msix_table_res = rle->res;
1454 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1455 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1456 cfg->msix.msix_pba_bar);
1457 if (rle == NULL || rle->res == NULL ||
1458 !(rman_get_flags(rle->res) & RF_ACTIVE))
1461 cfg->msix.msix_pba_res = rle->res;
1464 device_printf(child,
1465 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1466 *count, cfg->msix.msix_msgnum);
1467 max = min(*count, cfg->msix.msix_msgnum);
1468 for (i = 0; i < max; i++) {
1469 /* Allocate a message. */
1470 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1476 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1482 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1484 device_printf(child, "using IRQ %lu for MSI-X\n",
1490 * Be fancy and try to print contiguous runs of
1491 * IRQ values as ranges. 'irq' is the previous IRQ.
1492 * 'run' is true if we are in a range.
1494 device_printf(child, "using IRQs %lu", rle->start);
1497 for (i = 1; i < actual; i++) {
1498 rle = resource_list_find(&dinfo->resources,
1499 SYS_RES_IRQ, i + 1);
1501 /* Still in a run? */
1502 if (rle->start == irq + 1) {
1508 /* Finish previous range. */
1514 /* Start new range. */
1515 printf(",%lu", rle->start);
1519 /* Unfinished range? */
1522 printf(" for MSI-X\n");
1526 /* Mask all vectors. */
1527 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1528 pci_mask_msix(child, i);
1530 /* Allocate and initialize vector data and virtual table. */
1531 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1532 M_DEVBUF, M_WAITOK | M_ZERO);
1533 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1534 M_DEVBUF, M_WAITOK | M_ZERO);
1535 for (i = 0; i < actual; i++) {
1536 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1537 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1538 cfg->msix.msix_table[i].mte_vector = i + 1;
1541 /* Update control register to enable MSI-X. */
1542 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1543 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1544 cfg->msix.msix_ctrl, 2);
1546 /* Update counts of alloc'd messages. */
1547 cfg->msix.msix_alloc = actual;
1548 cfg->msix.msix_table_len = actual;
1554 * By default, pci_alloc_msix() will assign the allocated IRQ
1555 * resources consecutively to the first N messages in the MSI-X table.
1556 * However, device drivers may want to use different layouts if they
1557 * either receive fewer messages than they asked for, or they wish to
1558 * populate the MSI-X table sparsely. This method allows the driver
1559 * to specify what layout it wants. It must be called after a
1560 * successful pci_alloc_msix() but before any of the associated
1561 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1563 * The 'vectors' array contains 'count' message vectors. The array
1564 * maps directly to the MSI-X table in that index 0 in the array
1565 * specifies the vector for the first message in the MSI-X table, etc.
1566 * The vector value in each array index can either be 0 to indicate
1567 * that no vector should be assigned to a message slot, or it can be a
1568 * number from 1 to N (where N is the count returned from a
1569 * succcessful call to pci_alloc_msix()) to indicate which message
1570 * vector (IRQ) to be used for the corresponding message.
1572 * On successful return, each message with a non-zero vector will have
1573 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1574 * 1. Additionally, if any of the IRQs allocated via the previous
1575 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1576 * will be freed back to the system automatically.
1578 * For example, suppose a driver has a MSI-X table with 6 messages and
1579 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1580 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1581 * C. After the call to pci_alloc_msix(), the device will be setup to
1582 * have an MSI-X table of ABC--- (where - means no vector assigned).
1583 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1584 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1585 * be freed back to the system. This device will also have valid
1586 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1588 * In any case, the SYS_RES_IRQ rid X will always map to the message
1589 * at MSI-X table index X - 1 and will only be valid if a vector is
1590 * assigned to that table entry.
1593 pci_remap_msix_method(device_t dev, device_t child, int count,
1594 const u_int *vectors)
1596 struct pci_devinfo *dinfo = device_get_ivars(child);
1597 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1598 struct resource_list_entry *rle;
1599 int i, irq, j, *used;
1602 * Have to have at least one message in the table but the
1603 * table can't be bigger than the actual MSI-X table in the
1606 if (count == 0 || count > msix->msix_msgnum)
1609 /* Sanity check the vectors. */
1610 for (i = 0; i < count; i++)
1611 if (vectors[i] > msix->msix_alloc)
1615 * Make sure there aren't any holes in the vectors to be used.
1616 * It's a big pain to support it, and it doesn't really make
1617 * sense anyway. Also, at least one vector must be used.
1619 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1621 for (i = 0; i < count; i++)
1622 if (vectors[i] != 0)
1623 used[vectors[i] - 1] = 1;
1624 for (i = 0; i < msix->msix_alloc - 1; i++)
1625 if (used[i] == 0 && used[i + 1] == 1) {
1626 free(used, M_DEVBUF);
1630 free(used, M_DEVBUF);
1634 /* Make sure none of the resources are allocated. */
1635 for (i = 0; i < msix->msix_table_len; i++) {
1636 if (msix->msix_table[i].mte_vector == 0)
1638 if (msix->msix_table[i].mte_handlers > 0)
1640 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1641 KASSERT(rle != NULL, ("missing resource"));
1642 if (rle->res != NULL)
1646 /* Free the existing resource list entries. */
1647 for (i = 0; i < msix->msix_table_len; i++) {
1648 if (msix->msix_table[i].mte_vector == 0)
1650 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1654 * Build the new virtual table keeping track of which vectors are
1657 free(msix->msix_table, M_DEVBUF);
1658 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1659 M_DEVBUF, M_WAITOK | M_ZERO);
1660 for (i = 0; i < count; i++)
1661 msix->msix_table[i].mte_vector = vectors[i];
1662 msix->msix_table_len = count;
1664 /* Free any unused IRQs and resize the vectors array if necessary. */
1665 j = msix->msix_alloc - 1;
1667 struct msix_vector *vec;
1669 while (used[j] == 0) {
1670 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1671 msix->msix_vectors[j].mv_irq);
1674 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1676 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1678 free(msix->msix_vectors, M_DEVBUF);
1679 msix->msix_vectors = vec;
1680 msix->msix_alloc = j + 1;
1682 free(used, M_DEVBUF);
1684 /* Map the IRQs onto the rids. */
1685 for (i = 0; i < count; i++) {
1686 if (vectors[i] == 0)
1688 irq = msix->msix_vectors[vectors[i]].mv_irq;
1689 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1694 device_printf(child, "Remapped MSI-X IRQs as: ");
1695 for (i = 0; i < count; i++) {
1698 if (vectors[i] == 0)
1702 msix->msix_vectors[vectors[i]].mv_irq);
1711 pci_release_msix(device_t dev, device_t child)
1713 struct pci_devinfo *dinfo = device_get_ivars(child);
1714 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1715 struct resource_list_entry *rle;
1718 /* Do we have any messages to release? */
1719 if (msix->msix_alloc == 0)
1722 /* Make sure none of the resources are allocated. */
1723 for (i = 0; i < msix->msix_table_len; i++) {
1724 if (msix->msix_table[i].mte_vector == 0)
1726 if (msix->msix_table[i].mte_handlers > 0)
1728 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1729 KASSERT(rle != NULL, ("missing resource"));
1730 if (rle->res != NULL)
1734 /* Update control register to disable MSI-X. */
1735 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1736 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1737 msix->msix_ctrl, 2);
1739 /* Free the resource list entries. */
1740 for (i = 0; i < msix->msix_table_len; i++) {
1741 if (msix->msix_table[i].mte_vector == 0)
1743 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1745 free(msix->msix_table, M_DEVBUF);
1746 msix->msix_table_len = 0;
1748 /* Release the IRQs. */
1749 for (i = 0; i < msix->msix_alloc; i++)
1750 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1751 msix->msix_vectors[i].mv_irq);
1752 free(msix->msix_vectors, M_DEVBUF);
1753 msix->msix_alloc = 0;
1758 * Return the max supported MSI-X messages this device supports.
1759 * Basically, assuming the MD code can alloc messages, this function
1760 * should return the maximum value that pci_alloc_msix() can return.
1761 * Thus, it is subject to the tunables, etc.
1764 pci_msix_count_method(device_t dev, device_t child)
1766 struct pci_devinfo *dinfo = device_get_ivars(child);
1767 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1769 if (pci_do_msix && msix->msix_location != 0)
1770 return (msix->msix_msgnum);
1775 * HyperTransport MSI mapping control
1778 pci_ht_map_msi(device_t dev, uint64_t addr)
1780 struct pci_devinfo *dinfo = device_get_ivars(dev);
1781 struct pcicfg_ht *ht = &dinfo->cfg.ht;
1786 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1787 ht->ht_msiaddr >> 20 == addr >> 20) {
1788 /* Enable MSI -> HT mapping. */
1789 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1790 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1794 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1795 /* Disable MSI -> HT mapping. */
1796 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1797 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1803 pci_get_max_read_req(device_t dev)
1805 struct pci_devinfo *dinfo = device_get_ivars(dev);
1809 cap = dinfo->cfg.pcie.pcie_location;
1812 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1813 val &= PCIEM_CTL_MAX_READ_REQUEST;
1815 return (1 << (val + 7));
1819 pci_set_max_read_req(device_t dev, int size)
1821 struct pci_devinfo *dinfo = device_get_ivars(dev);
1825 cap = dinfo->cfg.pcie.pcie_location;
1832 size = (1 << (fls(size) - 1));
1833 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1834 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1835 val |= (fls(size) - 8) << 12;
1836 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1841 * Support for MSI message signalled interrupts.
1844 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
1846 struct pci_devinfo *dinfo = device_get_ivars(dev);
1847 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1849 /* Write data and address values. */
1850 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1851 address & 0xffffffff, 4);
1852 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1853 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1855 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1858 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1861 /* Enable MSI in the control register. */
1862 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1863 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1866 /* Enable MSI -> HT mapping. */
1867 pci_ht_map_msi(dev, address);
1871 pci_disable_msi(device_t dev)
1873 struct pci_devinfo *dinfo = device_get_ivars(dev);
1874 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1876 /* Disable MSI -> HT mapping. */
1877 pci_ht_map_msi(dev, 0);
1879 /* Disable MSI in the control register. */
1880 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1881 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1886 * Restore MSI registers during resume. If MSI is enabled then
1887 * restore the data and address registers in addition to the control
1891 pci_resume_msi(device_t dev)
1893 struct pci_devinfo *dinfo = device_get_ivars(dev);
1894 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1898 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1899 address = msi->msi_addr;
1900 data = msi->msi_data;
1901 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1902 address & 0xffffffff, 4);
1903 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1904 pci_write_config(dev, msi->msi_location +
1905 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1906 pci_write_config(dev, msi->msi_location +
1907 PCIR_MSI_DATA_64BIT, data, 2);
1909 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1912 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1917 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1919 struct pci_devinfo *dinfo = device_get_ivars(dev);
1920 pcicfgregs *cfg = &dinfo->cfg;
1921 struct resource_list_entry *rle;
1922 struct msix_table_entry *mte;
1923 struct msix_vector *mv;
1929 * Handle MSI first. We try to find this IRQ among our list
1930 * of MSI IRQs. If we find it, we request updated address and
1931 * data registers and apply the results.
1933 if (cfg->msi.msi_alloc > 0) {
1935 /* If we don't have any active handlers, nothing to do. */
1936 if (cfg->msi.msi_handlers == 0)
1938 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1939 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1941 if (rle->start == irq) {
1942 error = PCIB_MAP_MSI(device_get_parent(bus),
1943 dev, irq, &addr, &data);
1946 pci_disable_msi(dev);
1947 dinfo->cfg.msi.msi_addr = addr;
1948 dinfo->cfg.msi.msi_data = data;
1949 pci_enable_msi(dev, addr, data);
1957 * For MSI-X, we check to see if we have this IRQ. If we do,
1958 * we request the updated mapping info. If that works, we go
1959 * through all the slots that use this IRQ and update them.
1961 if (cfg->msix.msix_alloc > 0) {
1962 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1963 mv = &cfg->msix.msix_vectors[i];
1964 if (mv->mv_irq == irq) {
1965 error = PCIB_MAP_MSI(device_get_parent(bus),
1966 dev, irq, &addr, &data);
1969 mv->mv_address = addr;
1971 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1972 mte = &cfg->msix.msix_table[j];
1973 if (mte->mte_vector != i + 1)
1975 if (mte->mte_handlers == 0)
1977 pci_mask_msix(dev, j);
1978 pci_enable_msix(dev, j, addr, data);
1979 pci_unmask_msix(dev, j);
1990 * Returns true if the specified device is blacklisted because MSI
1994 pci_msi_device_blacklisted(device_t dev)
1997 if (!pci_honor_msi_blacklist)
2000 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2004 * Determine if MSI is blacklisted globally on this system. Currently,
2005 * we just check for blacklisted chipsets as represented by the
2006 * host-PCI bridge at device 0:0:0. In the future, it may become
2007 * necessary to check other system attributes, such as the kenv values
2008 * that give the motherboard manufacturer and model number.
2011 pci_msi_blacklisted(void)
2015 if (!pci_honor_msi_blacklist)
2018 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2019 if (!(pcie_chipset || pcix_chipset)) {
2020 if (vm_guest != VM_GUEST_NO) {
2022 * Whitelist older chipsets in virtual
2023 * machines known to support MSI.
2025 dev = pci_find_bsf(0, 0, 0);
2027 return (!pci_has_quirk(pci_get_devid(dev),
2028 PCI_QUIRK_ENABLE_MSI_VM));
2033 dev = pci_find_bsf(0, 0, 0);
2035 return (pci_msi_device_blacklisted(dev));
2040 * Returns true if the specified device is blacklisted because MSI-X
2041 * doesn't work. Note that this assumes that if MSI doesn't work,
2042 * MSI-X doesn't either.
2045 pci_msix_device_blacklisted(device_t dev)
2048 if (!pci_honor_msi_blacklist)
2051 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2054 return (pci_msi_device_blacklisted(dev));
2058 * Determine if MSI-X is blacklisted globally on this system. If MSI
2059 * is blacklisted, assume that MSI-X is as well. Check for additional
2060 * chipsets where MSI works but MSI-X does not.
2063 pci_msix_blacklisted(void)
2067 if (!pci_honor_msi_blacklist)
2070 dev = pci_find_bsf(0, 0, 0);
2071 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2072 PCI_QUIRK_DISABLE_MSIX))
2075 return (pci_msi_blacklisted());
2079 * Attempt to allocate *count MSI messages. The actual number allocated is
2080 * returned in *count. After this function returns, each message will be
2081 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2084 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2086 struct pci_devinfo *dinfo = device_get_ivars(child);
2087 pcicfgregs *cfg = &dinfo->cfg;
2088 struct resource_list_entry *rle;
2089 int actual, error, i, irqs[32];
2092 /* Don't let count == 0 get us into trouble. */
2096 /* If rid 0 is allocated, then fail. */
2097 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2098 if (rle != NULL && rle->res != NULL)
2101 /* Already have allocated messages? */
2102 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2105 /* If MSI is blacklisted for this system, fail. */
2106 if (pci_msi_blacklisted())
2109 /* MSI capability present? */
2110 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2114 device_printf(child,
2115 "attempting to allocate %d MSI vectors (%d supported)\n",
2116 *count, cfg->msi.msi_msgnum);
2118 /* Don't ask for more than the device supports. */
2119 actual = min(*count, cfg->msi.msi_msgnum);
2121 /* Don't ask for more than 32 messages. */
2122 actual = min(actual, 32);
2124 /* MSI requires power of 2 number of messages. */
2125 if (!powerof2(actual))
2129 /* Try to allocate N messages. */
2130 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2142 * We now have N actual messages mapped onto SYS_RES_IRQ
2143 * resources in the irqs[] array, so add new resources
2144 * starting at rid 1.
2146 for (i = 0; i < actual; i++)
2147 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2148 irqs[i], irqs[i], 1);
2152 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2157 * Be fancy and try to print contiguous runs
2158 * of IRQ values as ranges. 'run' is true if
2159 * we are in a range.
2161 device_printf(child, "using IRQs %d", irqs[0]);
2163 for (i = 1; i < actual; i++) {
2165 /* Still in a run? */
2166 if (irqs[i] == irqs[i - 1] + 1) {
2171 /* Finish previous range. */
2173 printf("-%d", irqs[i - 1]);
2177 /* Start new range. */
2178 printf(",%d", irqs[i]);
2181 /* Unfinished range? */
2183 printf("-%d", irqs[actual - 1]);
2184 printf(" for MSI\n");
2188 /* Update control register with actual count. */
2189 ctrl = cfg->msi.msi_ctrl;
2190 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2191 ctrl |= (ffs(actual) - 1) << 4;
2192 cfg->msi.msi_ctrl = ctrl;
2193 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2195 /* Update counts of alloc'd messages. */
2196 cfg->msi.msi_alloc = actual;
2197 cfg->msi.msi_handlers = 0;
2202 /* Release the MSI messages associated with this device. */
2204 pci_release_msi_method(device_t dev, device_t child)
2206 struct pci_devinfo *dinfo = device_get_ivars(child);
2207 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2208 struct resource_list_entry *rle;
2209 int error, i, irqs[32];
2211 /* Try MSI-X first. */
2212 error = pci_release_msix(dev, child);
2213 if (error != ENODEV)
2216 /* Do we have any messages to release? */
2217 if (msi->msi_alloc == 0)
2219 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2221 /* Make sure none of the resources are allocated. */
2222 if (msi->msi_handlers > 0)
2224 for (i = 0; i < msi->msi_alloc; i++) {
2225 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2226 KASSERT(rle != NULL, ("missing MSI resource"));
2227 if (rle->res != NULL)
2229 irqs[i] = rle->start;
2232 /* Update control register with 0 count. */
2233 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2234 ("%s: MSI still enabled", __func__));
2235 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2236 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2239 /* Release the messages. */
2240 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2241 for (i = 0; i < msi->msi_alloc; i++)
2242 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2244 /* Update alloc count. */
2252 * Return the max supported MSI messages this device supports.
2253 * Basically, assuming the MD code can alloc messages, this function
2254 * should return the maximum value that pci_alloc_msi() can return.
2255 * Thus, it is subject to the tunables, etc.
2258 pci_msi_count_method(device_t dev, device_t child)
2260 struct pci_devinfo *dinfo = device_get_ivars(child);
2261 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2263 if (pci_do_msi && msi->msi_location != 0)
2264 return (msi->msi_msgnum);
2268 /* free pcicfgregs structure and all depending data structures */
2271 pci_freecfg(struct pci_devinfo *dinfo)
2273 struct devlist *devlist_head;
2274 struct pci_map *pm, *next;
2277 devlist_head = &pci_devq;
2279 if (dinfo->cfg.vpd.vpd_reg) {
2280 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2281 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2282 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2283 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2284 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2285 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2286 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2288 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2291 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2292 free(dinfo, M_DEVBUF);
2294 /* increment the generation count */
2297 /* we're losing one device */
2303 * PCI power manangement
2306 pci_set_powerstate_method(device_t dev, device_t child, int state)
2308 struct pci_devinfo *dinfo = device_get_ivars(child);
2309 pcicfgregs *cfg = &dinfo->cfg;
2311 int result, oldstate, highest, delay;
2313 if (cfg->pp.pp_cap == 0)
2314 return (EOPNOTSUPP);
2317 * Optimize a no state change request away. While it would be OK to
2318 * write to the hardware in theory, some devices have shown odd
2319 * behavior when going from D3 -> D3.
2321 oldstate = pci_get_powerstate(child);
2322 if (oldstate == state)
2326 * The PCI power management specification states that after a state
2327 * transition between PCI power states, system software must
2328 * guarantee a minimal delay before the function accesses the device.
2329 * Compute the worst case delay that we need to guarantee before we
2330 * access the device. Many devices will be responsive much more
2331 * quickly than this delay, but there are some that don't respond
2332 * instantly to state changes. Transitions to/from D3 state require
2333 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2334 * is done below with DELAY rather than a sleeper function because
2335 * this function can be called from contexts where we cannot sleep.
2337 highest = (oldstate > state) ? oldstate : state;
2338 if (highest == PCI_POWERSTATE_D3)
2340 else if (highest == PCI_POWERSTATE_D2)
2344 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2345 & ~PCIM_PSTAT_DMASK;
2348 case PCI_POWERSTATE_D0:
2349 status |= PCIM_PSTAT_D0;
2351 case PCI_POWERSTATE_D1:
2352 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2353 return (EOPNOTSUPP);
2354 status |= PCIM_PSTAT_D1;
2356 case PCI_POWERSTATE_D2:
2357 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2358 return (EOPNOTSUPP);
2359 status |= PCIM_PSTAT_D2;
2361 case PCI_POWERSTATE_D3:
2362 status |= PCIM_PSTAT_D3;
2369 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2372 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2379 pci_get_powerstate_method(device_t dev, device_t child)
2381 struct pci_devinfo *dinfo = device_get_ivars(child);
2382 pcicfgregs *cfg = &dinfo->cfg;
2386 if (cfg->pp.pp_cap != 0) {
2387 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2388 switch (status & PCIM_PSTAT_DMASK) {
2390 result = PCI_POWERSTATE_D0;
2393 result = PCI_POWERSTATE_D1;
2396 result = PCI_POWERSTATE_D2;
2399 result = PCI_POWERSTATE_D3;
2402 result = PCI_POWERSTATE_UNKNOWN;
2406 /* No support, device is always at D0 */
2407 result = PCI_POWERSTATE_D0;
2413 * Some convenience functions for PCI device drivers.
2416 static __inline void
2417 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2421 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2423 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2426 static __inline void
2427 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2431 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2433 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2437 pci_enable_busmaster_method(device_t dev, device_t child)
2439 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2444 pci_disable_busmaster_method(device_t dev, device_t child)
2446 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2451 pci_enable_io_method(device_t dev, device_t child, int space)
2456 case SYS_RES_IOPORT:
2457 bit = PCIM_CMD_PORTEN;
2459 case SYS_RES_MEMORY:
2460 bit = PCIM_CMD_MEMEN;
2465 pci_set_command_bit(dev, child, bit);
2470 pci_disable_io_method(device_t dev, device_t child, int space)
2475 case SYS_RES_IOPORT:
2476 bit = PCIM_CMD_PORTEN;
2478 case SYS_RES_MEMORY:
2479 bit = PCIM_CMD_MEMEN;
2484 pci_clear_command_bit(dev, child, bit);
2489 * New style pci driver. Parent device is either a pci-host-bridge or a
2490 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2494 pci_print_verbose(struct pci_devinfo *dinfo)
2498 pcicfgregs *cfg = &dinfo->cfg;
2500 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2501 cfg->vendor, cfg->device, cfg->revid);
2502 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2503 cfg->domain, cfg->bus, cfg->slot, cfg->func);
2504 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2505 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2507 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2508 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2509 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2510 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2511 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2512 if (cfg->intpin > 0)
2513 printf("\tintpin=%c, irq=%d\n",
2514 cfg->intpin +'a' -1, cfg->intline);
2515 if (cfg->pp.pp_cap) {
2518 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2519 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
2520 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2521 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2522 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2523 status & PCIM_PSTAT_DMASK);
2525 if (cfg->msi.msi_location) {
2528 ctrl = cfg->msi.msi_ctrl;
2529 printf("\tMSI supports %d message%s%s%s\n",
2530 cfg->msi.msi_msgnum,
2531 (cfg->msi.msi_msgnum == 1) ? "" : "s",
2532 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2533 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2535 if (cfg->msix.msix_location) {
2536 printf("\tMSI-X supports %d message%s ",
2537 cfg->msix.msix_msgnum,
2538 (cfg->msix.msix_msgnum == 1) ? "" : "s");
2539 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2540 printf("in map 0x%x\n",
2541 cfg->msix.msix_table_bar);
2543 printf("in maps 0x%x and 0x%x\n",
2544 cfg->msix.msix_table_bar,
2545 cfg->msix.msix_pba_bar);
2551 pci_porten(device_t dev)
2553 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2557 pci_memen(device_t dev)
2559 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2563 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2565 struct pci_devinfo *dinfo;
2566 pci_addr_t map, testval;
2571 * The device ROM BAR is special. It is always a 32-bit
2572 * memory BAR. Bit 0 is special and should not be set when
2575 dinfo = device_get_ivars(dev);
2576 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2577 map = pci_read_config(dev, reg, 4);
2578 pci_write_config(dev, reg, 0xfffffffe, 4);
2579 testval = pci_read_config(dev, reg, 4);
2580 pci_write_config(dev, reg, map, 4);
2582 *testvalp = testval;
2586 map = pci_read_config(dev, reg, 4);
2587 ln2range = pci_maprange(map);
2589 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2592 * Disable decoding via the command register before
2593 * determining the BAR's length since we will be placing it in
2596 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2597 pci_write_config(dev, PCIR_COMMAND,
2598 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2601 * Determine the BAR's length by writing all 1's. The bottom
2602 * log_2(size) bits of the BAR will stick as 0 when we read
2605 pci_write_config(dev, reg, 0xffffffff, 4);
2606 testval = pci_read_config(dev, reg, 4);
2607 if (ln2range == 64) {
2608 pci_write_config(dev, reg + 4, 0xffffffff, 4);
2609 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2613 * Restore the original value of the BAR. We may have reprogrammed
2614 * the BAR of the low-level console device and when booting verbose,
2615 * we need the console device addressable.
2617 pci_write_config(dev, reg, map, 4);
2619 pci_write_config(dev, reg + 4, map >> 32, 4);
2620 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2623 *testvalp = testval;
2627 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2629 struct pci_devinfo *dinfo;
2632 /* The device ROM BAR is always a 32-bit memory BAR. */
2633 dinfo = device_get_ivars(dev);
2634 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2637 ln2range = pci_maprange(pm->pm_value);
2638 pci_write_config(dev, pm->pm_reg, base, 4);
2640 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2641 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2643 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2644 pm->pm_reg + 4, 4) << 32;
2648 pci_find_bar(device_t dev, int reg)
2650 struct pci_devinfo *dinfo;
2653 dinfo = device_get_ivars(dev);
2654 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2655 if (pm->pm_reg == reg)
2662 pci_bar_enabled(device_t dev, struct pci_map *pm)
2664 struct pci_devinfo *dinfo;
2667 dinfo = device_get_ivars(dev);
2668 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2669 !(pm->pm_value & PCIM_BIOS_ENABLE))
2671 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2672 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2673 return ((cmd & PCIM_CMD_MEMEN) != 0);
2675 return ((cmd & PCIM_CMD_PORTEN) != 0);
2678 static struct pci_map *
2679 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2681 struct pci_devinfo *dinfo;
2682 struct pci_map *pm, *prev;
2684 dinfo = device_get_ivars(dev);
2685 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2687 pm->pm_value = value;
2689 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2690 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2692 if (STAILQ_NEXT(prev, pm_link) == NULL ||
2693 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2697 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2699 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2704 pci_restore_bars(device_t dev)
2706 struct pci_devinfo *dinfo;
2710 dinfo = device_get_ivars(dev);
2711 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2712 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2715 ln2range = pci_maprange(pm->pm_value);
2716 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2718 pci_write_config(dev, pm->pm_reg + 4,
2719 pm->pm_value >> 32, 4);
2724 * Add a resource based on a pci map register. Return 1 if the map
2725 * register is a 32bit map register or 2 if it is a 64bit register.
2728 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2729 int force, int prefetch)
2732 pci_addr_t base, map, testval;
2733 pci_addr_t start, end, count;
2734 int barlen, basezero, maprange, mapsize, type;
2736 struct resource *res;
2739 * The BAR may already exist if the device is a CardBus card
2740 * whose CIS is stored in this BAR.
2742 pm = pci_find_bar(dev, reg);
2744 maprange = pci_maprange(pm->pm_value);
2745 barlen = maprange == 64 ? 2 : 1;
2749 pci_read_bar(dev, reg, &map, &testval);
2750 if (PCI_BAR_MEM(map)) {
2751 type = SYS_RES_MEMORY;
2752 if (map & PCIM_BAR_MEM_PREFETCH)
2755 type = SYS_RES_IOPORT;
2756 mapsize = pci_mapsize(testval);
2757 base = pci_mapbase(map);
2758 #ifdef __PCI_BAR_ZERO_VALID
2761 basezero = base == 0;
2763 maprange = pci_maprange(map);
2764 barlen = maprange == 64 ? 2 : 1;
2767 * For I/O registers, if bottom bit is set, and the next bit up
2768 * isn't clear, we know we have a BAR that doesn't conform to the
2769 * spec, so ignore it. Also, sanity check the size of the data
2770 * areas to the type of memory involved. Memory must be at least
2771 * 16 bytes in size, while I/O ranges must be at least 4.
2773 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2775 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2776 (type == SYS_RES_IOPORT && mapsize < 2))
2779 /* Save a record of this BAR. */
2780 pm = pci_add_bar(dev, reg, map, mapsize);
2782 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2783 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2784 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2785 printf(", port disabled\n");
2786 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2787 printf(", memory disabled\n");
2789 printf(", enabled\n");
2793 * If base is 0, then we have problems if this architecture does
2794 * not allow that. It is best to ignore such entries for the
2795 * moment. These will be allocated later if the driver specifically
2796 * requests them. However, some removable busses look better when
2797 * all resources are allocated, so allow '0' to be overriden.
2799 * Similarly treat maps whose values is the same as the test value
2800 * read back. These maps have had all f's written to them by the
2801 * BIOS in an attempt to disable the resources.
2803 if (!force && (basezero || map == testval))
2805 if ((u_long)base != base) {
2807 "pci%d:%d:%d:%d bar %#x too many address bits",
2808 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2809 pci_get_function(dev), reg);
2814 * This code theoretically does the right thing, but has
2815 * undesirable side effects in some cases where peripherals
2816 * respond oddly to having these bits enabled. Let the user
2817 * be able to turn them off (since pci_enable_io_modes is 1 by
2820 if (pci_enable_io_modes) {
2821 /* Turn on resources that have been left off by a lazy BIOS */
2822 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2823 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2824 cmd |= PCIM_CMD_PORTEN;
2825 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2827 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2828 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2829 cmd |= PCIM_CMD_MEMEN;
2830 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2833 if (type == SYS_RES_IOPORT && !pci_porten(dev))
2835 if (type == SYS_RES_MEMORY && !pci_memen(dev))
2839 count = (pci_addr_t)1 << mapsize;
2840 if (basezero || base == pci_mapbase(testval)) {
2841 start = 0; /* Let the parent decide. */
2845 end = base + count - 1;
2847 resource_list_add(rl, type, reg, start, end, count);
2850 * Try to allocate the resource for this BAR from our parent
2851 * so that this resource range is already reserved. The
2852 * driver for this device will later inherit this resource in
2853 * pci_alloc_resource().
2855 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count,
2856 prefetch ? RF_PREFETCHABLE : 0);
2857 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2859 * If the allocation fails, try to allocate a resource for
2860 * this BAR using any available range. The firmware felt
2861 * it was important enough to assign a resource, so don't
2862 * disable decoding if we can help it.
2864 resource_list_delete(rl, type, reg);
2865 resource_list_add(rl, type, reg, 0, ~0ul, count);
2866 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul,
2867 count, prefetch ? RF_PREFETCHABLE : 0);
2871 * If the allocation fails, delete the resource list entry
2872 * and disable decoding for this device.
2874 * If the driver requests this resource in the future,
2875 * pci_reserve_map() will try to allocate a fresh
2878 resource_list_delete(rl, type, reg);
2879 pci_disable_io(dev, type);
2882 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2883 pci_get_domain(dev), pci_get_bus(dev),
2884 pci_get_slot(dev), pci_get_function(dev), reg);
2886 start = rman_get_start(res);
2887 pci_write_bar(dev, pm, start);
2893 * For ATA devices we need to decide early what addressing mode to use.
2894 * Legacy demands that the primary and secondary ATA ports sits on the
2895 * same addresses that old ISA hardware did. This dictates that we use
2896 * those addresses and ignore the BAR's if we cannot set PCI native
2900 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2901 uint32_t prefetchmask)
2904 int rid, type, progif;
2906 /* if this device supports PCI native addressing use it */
2907 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2908 if ((progif & 0x8a) == 0x8a) {
2909 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2910 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2911 printf("Trying ATA native PCI addressing mode\n");
2912 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2916 progif = pci_read_config(dev, PCIR_PROGIF, 1);
2917 type = SYS_RES_IOPORT;
2918 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2919 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2920 prefetchmask & (1 << 0));
2921 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2922 prefetchmask & (1 << 1));
2925 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2926 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
2929 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2930 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
2933 if (progif & PCIP_STORAGE_IDE_MODESEC) {
2934 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
2935 prefetchmask & (1 << 2));
2936 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
2937 prefetchmask & (1 << 3));
2940 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2941 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
2944 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2945 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
2948 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
2949 prefetchmask & (1 << 4));
2950 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
2951 prefetchmask & (1 << 5));
2955 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2957 struct pci_devinfo *dinfo = device_get_ivars(dev);
2958 pcicfgregs *cfg = &dinfo->cfg;
2959 char tunable_name[64];
2962 /* Has to have an intpin to have an interrupt. */
2963 if (cfg->intpin == 0)
2966 /* Let the user override the IRQ with a tunable. */
2967 irq = PCI_INVALID_IRQ;
2968 snprintf(tunable_name, sizeof(tunable_name),
2969 "hw.pci%d.%d.%d.INT%c.irq",
2970 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
2971 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
2972 irq = PCI_INVALID_IRQ;
2975 * If we didn't get an IRQ via the tunable, then we either use the
2976 * IRQ value in the intline register or we ask the bus to route an
2977 * interrupt for us. If force_route is true, then we only use the
2978 * value in the intline register if the bus was unable to assign an
2981 if (!PCI_INTERRUPT_VALID(irq)) {
2982 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
2983 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
2984 if (!PCI_INTERRUPT_VALID(irq))
2988 /* If after all that we don't have an IRQ, just bail. */
2989 if (!PCI_INTERRUPT_VALID(irq))
2992 /* Update the config register if it changed. */
2993 if (irq != cfg->intline) {
2995 pci_write_config(dev, PCIR_INTLINE, irq, 1);
2998 /* Add this IRQ as rid 0 interrupt resource. */
2999 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3002 /* Perform early OHCI takeover from SMM. */
3004 ohci_early_takeover(device_t self)
3006 struct resource *res;
3012 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3016 ctl = bus_read_4(res, OHCI_CONTROL);
3017 if (ctl & OHCI_IR) {
3019 printf("ohci early: "
3020 "SMM active, request owner change\n");
3021 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3022 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3024 ctl = bus_read_4(res, OHCI_CONTROL);
3026 if (ctl & OHCI_IR) {
3028 printf("ohci early: "
3029 "SMM does not respond, resetting\n");
3030 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3032 /* Disable interrupts */
3033 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3036 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3039 /* Perform early UHCI takeover from SMM. */
3041 uhci_early_takeover(device_t self)
3043 struct resource *res;
3047 * Set the PIRQD enable bit and switch off all the others. We don't
3048 * want legacy support to interfere with us XXX Does this also mean
3049 * that the BIOS won't touch the keyboard anymore if it is connected
3050 * to the ports of the root hub?
3052 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3054 /* Disable interrupts */
3055 rid = PCI_UHCI_BASE_REG;
3056 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3058 bus_write_2(res, UHCI_INTR, 0);
3059 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3063 /* Perform early EHCI takeover from SMM. */
3065 ehci_early_takeover(device_t self)
3067 struct resource *res;
3077 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3081 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3083 /* Synchronise with the BIOS if it owns the controller. */
3084 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3085 eecp = EHCI_EECP_NEXT(eec)) {
3086 eec = pci_read_config(self, eecp, 4);
3087 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3090 bios_sem = pci_read_config(self, eecp +
3091 EHCI_LEGSUP_BIOS_SEM, 1);
3092 if (bios_sem == 0) {
3096 printf("ehci early: "
3097 "SMM active, request owner change\n");
3099 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3101 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3103 bios_sem = pci_read_config(self, eecp +
3104 EHCI_LEGSUP_BIOS_SEM, 1);
3107 if (bios_sem != 0) {
3109 printf("ehci early: "
3110 "SMM does not respond\n");
3112 /* Disable interrupts */
3113 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3114 bus_write_4(res, offs + EHCI_USBINTR, 0);
3116 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3119 /* Perform early XHCI takeover from SMM. */
3121 xhci_early_takeover(device_t self)
3123 struct resource *res;
3133 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3137 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3141 /* Synchronise with the BIOS if it owns the controller. */
3142 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3143 eecp += XHCI_XECP_NEXT(eec) << 2) {
3144 eec = bus_read_4(res, eecp);
3146 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3149 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3154 printf("xhci early: "
3155 "SMM active, request owner change\n");
3157 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3159 /* wait a maximum of 5 second */
3161 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3163 bios_sem = bus_read_1(res, eecp +
3164 XHCI_XECP_BIOS_SEM);
3167 if (bios_sem != 0) {
3169 printf("xhci early: "
3170 "SMM does not respond\n");
3173 /* Disable interrupts */
3174 offs = bus_read_1(res, XHCI_CAPLENGTH);
3175 bus_write_4(res, offs + XHCI_USBCMD, 0);
3176 bus_read_4(res, offs + XHCI_USBSTS);
3178 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3182 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3184 struct pci_devinfo *dinfo;
3186 struct resource_list *rl;
3187 const struct pci_quirk *q;
3191 dinfo = device_get_ivars(dev);
3193 rl = &dinfo->resources;
3194 devid = (cfg->device << 16) | cfg->vendor;
3196 /* ATA devices needs special map treatment */
3197 if ((pci_get_class(dev) == PCIC_STORAGE) &&
3198 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3199 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3200 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3201 !pci_read_config(dev, PCIR_BAR(2), 4))) )
3202 pci_ata_maps(bus, dev, rl, force, prefetchmask);
3204 for (i = 0; i < cfg->nummaps;) {
3206 * Skip quirked resources.
3208 for (q = &pci_quirks[0]; q->devid != 0; q++)
3209 if (q->devid == devid &&
3210 q->type == PCI_QUIRK_UNMAP_REG &&
3211 q->arg1 == PCIR_BAR(i))
3213 if (q->devid != 0) {
3217 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3218 prefetchmask & (1 << i));
3222 * Add additional, quirked resources.
3224 for (q = &pci_quirks[0]; q->devid != 0; q++)
3225 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3226 pci_add_map(bus, dev, q->arg1, rl, force, 0);
3228 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3229 #ifdef __PCI_REROUTE_INTERRUPT
3231 * Try to re-route interrupts. Sometimes the BIOS or
3232 * firmware may leave bogus values in these registers.
3233 * If the re-route fails, then just stick with what we
3236 pci_assign_interrupt(bus, dev, 1);
3238 pci_assign_interrupt(bus, dev, 0);
3242 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3243 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3244 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3245 xhci_early_takeover(dev);
3246 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3247 ehci_early_takeover(dev);
3248 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3249 ohci_early_takeover(dev);
3250 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3251 uhci_early_takeover(dev);
3256 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3258 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3259 device_t pcib = device_get_parent(dev);
3260 struct pci_devinfo *dinfo;
3262 int s, f, pcifunchigh;
3265 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3266 ("dinfo_size too small"));
3267 maxslots = PCIB_MAXSLOTS(pcib);
3268 for (s = 0; s <= maxslots; s++) {
3272 hdrtype = REG(PCIR_HDRTYPE, 1);
3273 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3275 if (hdrtype & PCIM_MFDEV)
3276 pcifunchigh = PCI_FUNCMAX;
3277 for (f = 0; f <= pcifunchigh; f++) {
3278 dinfo = pci_read_device(pcib, domain, busno, s, f,
3280 if (dinfo != NULL) {
3281 pci_add_child(dev, dinfo);
3289 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3291 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3292 device_set_ivars(dinfo->cfg.dev, dinfo);
3293 resource_list_init(&dinfo->resources);
3294 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3295 pci_cfg_restore(dinfo->cfg.dev, dinfo);
3296 pci_print_verbose(dinfo);
3297 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3301 pci_probe(device_t dev)
3304 device_set_desc(dev, "PCI bus");
3306 /* Allow other subclasses to override this driver. */
3307 return (BUS_PROBE_GENERIC);
3311 pci_attach_common(device_t dev)
3313 struct pci_softc *sc;
3315 #ifdef PCI_DMA_BOUNDARY
3316 int error, tag_valid;
3319 sc = device_get_softc(dev);
3320 domain = pcib_get_domain(dev);
3321 busno = pcib_get_bus(dev);
3323 device_printf(dev, "domain=%d, physical bus=%d\n",
3325 #ifdef PCI_DMA_BOUNDARY
3327 if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3328 devclass_find("pci")) {
3329 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3330 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3331 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3332 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3334 device_printf(dev, "Failed to create DMA tag: %d\n",
3341 sc->sc_dma_tag = bus_get_dma_tag(dev);
3346 pci_attach(device_t dev)
3348 int busno, domain, error;
3350 error = pci_attach_common(dev);
3355 * Since there can be multiple independantly numbered PCI
3356 * busses on systems with multiple PCI domains, we can't use
3357 * the unit number to decide which bus we are probing. We ask
3358 * the parent pcib what our domain and bus numbers are.
3360 domain = pcib_get_domain(dev);
3361 busno = pcib_get_bus(dev);
3362 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3363 return (bus_generic_attach(dev));
3367 pci_set_power_children(device_t dev, device_t *devlist, int numdevs,
3370 device_t child, pcib;
3371 struct pci_devinfo *dinfo;
3375 * Set the device to the given state. If the firmware suggests
3376 * a different power state, use it instead. If power management
3377 * is not present, the firmware is responsible for managing
3378 * device power. Skip children who aren't attached since they
3379 * are handled separately.
3381 pcib = device_get_parent(dev);
3382 for (i = 0; i < numdevs; i++) {
3384 dinfo = device_get_ivars(child);
3386 if (device_is_attached(child) &&
3387 PCIB_POWER_FOR_SLEEP(pcib, dev, &dstate) == 0)
3388 pci_set_powerstate(child, dstate);
3393 pci_suspend(device_t dev)
3395 device_t child, *devlist;
3396 struct pci_devinfo *dinfo;
3397 int error, i, numdevs;
3400 * Save the PCI configuration space for each child and set the
3401 * device in the appropriate power state for this sleep state.
3403 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3405 for (i = 0; i < numdevs; i++) {
3407 dinfo = device_get_ivars(child);
3408 pci_cfg_save(child, dinfo, 0);
3411 /* Suspend devices before potentially powering them down. */
3412 error = bus_generic_suspend(dev);
3414 free(devlist, M_TEMP);
3417 if (pci_do_power_suspend)
3418 pci_set_power_children(dev, devlist, numdevs,
3420 free(devlist, M_TEMP);
3425 pci_resume(device_t dev)
3427 device_t child, *devlist;
3428 struct pci_devinfo *dinfo;
3429 int error, i, numdevs;
3432 * Set each child to D0 and restore its PCI configuration space.
3434 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3436 if (pci_do_power_resume)
3437 pci_set_power_children(dev, devlist, numdevs,
3440 /* Now the device is powered up, restore its config space. */
3441 for (i = 0; i < numdevs; i++) {
3443 dinfo = device_get_ivars(child);
3445 pci_cfg_restore(child, dinfo);
3446 if (!device_is_attached(child))
3447 pci_cfg_save(child, dinfo, 1);
3451 * Resume critical devices first, then everything else later.
3453 for (i = 0; i < numdevs; i++) {
3455 switch (pci_get_class(child)) {
3459 case PCIC_BASEPERIPH:
3460 DEVICE_RESUME(child);
3464 for (i = 0; i < numdevs; i++) {
3466 switch (pci_get_class(child)) {
3470 case PCIC_BASEPERIPH:
3473 DEVICE_RESUME(child);
3476 free(devlist, M_TEMP);
3481 pci_load_vendor_data(void)
3487 data = preload_search_by_type("pci_vendor_data");
3489 ptr = preload_fetch_addr(data);
3490 sz = preload_fetch_size(data);
3491 if (ptr != NULL && sz != 0) {
3492 pci_vendordata = ptr;
3493 pci_vendordata_size = sz;
3494 /* terminate the database */
3495 pci_vendordata[pci_vendordata_size] = '\n';
3501 pci_driver_added(device_t dev, driver_t *driver)
3506 struct pci_devinfo *dinfo;
3510 device_printf(dev, "driver added\n");
3511 DEVICE_IDENTIFY(driver, dev);
3512 if (device_get_children(dev, &devlist, &numdevs) != 0)
3514 for (i = 0; i < numdevs; i++) {
3516 if (device_get_state(child) != DS_NOTPRESENT)
3518 dinfo = device_get_ivars(child);
3519 pci_print_verbose(dinfo);
3521 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3522 pci_cfg_restore(child, dinfo);
3523 if (device_probe_and_attach(child) != 0)
3524 pci_child_detached(dev, child);
3526 free(devlist, M_TEMP);
3530 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3531 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3533 struct pci_devinfo *dinfo;
3534 struct msix_table_entry *mte;
3535 struct msix_vector *mv;
3541 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3546 /* If this is not a direct child, just bail out. */
3547 if (device_get_parent(child) != dev) {
3552 rid = rman_get_rid(irq);
3554 /* Make sure that INTx is enabled */
3555 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3558 * Check to see if the interrupt is MSI or MSI-X.
3559 * Ask our parent to map the MSI and give
3560 * us the address and data register values.
3561 * If we fail for some reason, teardown the
3562 * interrupt handler.
3564 dinfo = device_get_ivars(child);
3565 if (dinfo->cfg.msi.msi_alloc > 0) {
3566 if (dinfo->cfg.msi.msi_addr == 0) {
3567 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3568 ("MSI has handlers, but vectors not mapped"));
3569 error = PCIB_MAP_MSI(device_get_parent(dev),
3570 child, rman_get_start(irq), &addr, &data);
3573 dinfo->cfg.msi.msi_addr = addr;
3574 dinfo->cfg.msi.msi_data = data;
3576 if (dinfo->cfg.msi.msi_handlers == 0)
3577 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3578 dinfo->cfg.msi.msi_data);
3579 dinfo->cfg.msi.msi_handlers++;
3581 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3582 ("No MSI or MSI-X interrupts allocated"));
3583 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3584 ("MSI-X index too high"));
3585 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3586 KASSERT(mte->mte_vector != 0, ("no message vector"));
3587 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3588 KASSERT(mv->mv_irq == rman_get_start(irq),
3590 if (mv->mv_address == 0) {
3591 KASSERT(mte->mte_handlers == 0,
3592 ("MSI-X table entry has handlers, but vector not mapped"));
3593 error = PCIB_MAP_MSI(device_get_parent(dev),
3594 child, rman_get_start(irq), &addr, &data);
3597 mv->mv_address = addr;
3600 if (mte->mte_handlers == 0) {
3601 pci_enable_msix(child, rid - 1, mv->mv_address,
3603 pci_unmask_msix(child, rid - 1);
3605 mte->mte_handlers++;
3608 /* Make sure that INTx is disabled if we are using MSI/MSIX */
3609 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3612 (void)bus_generic_teardown_intr(dev, child, irq,
3622 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3625 struct msix_table_entry *mte;
3626 struct resource_list_entry *rle;
3627 struct pci_devinfo *dinfo;
3630 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3633 /* If this isn't a direct child, just bail out */
3634 if (device_get_parent(child) != dev)
3635 return(bus_generic_teardown_intr(dev, child, irq, cookie));
3637 rid = rman_get_rid(irq);
3640 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3643 * Check to see if the interrupt is MSI or MSI-X. If so,
3644 * decrement the appropriate handlers count and mask the
3645 * MSI-X message, or disable MSI messages if the count
3648 dinfo = device_get_ivars(child);
3649 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3650 if (rle->res != irq)
3652 if (dinfo->cfg.msi.msi_alloc > 0) {
3653 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3654 ("MSI-X index too high"));
3655 if (dinfo->cfg.msi.msi_handlers == 0)
3657 dinfo->cfg.msi.msi_handlers--;
3658 if (dinfo->cfg.msi.msi_handlers == 0)
3659 pci_disable_msi(child);
3661 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3662 ("No MSI or MSI-X interrupts allocated"));
3663 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3664 ("MSI-X index too high"));
3665 mte = &dinfo->cfg.msix.msix_table[rid - 1];
3666 if (mte->mte_handlers == 0)
3668 mte->mte_handlers--;
3669 if (mte->mte_handlers == 0)
3670 pci_mask_msix(child, rid - 1);
3673 error = bus_generic_teardown_intr(dev, child, irq, cookie);
3676 ("%s: generic teardown failed for MSI/MSI-X", __func__));
3681 pci_print_child(device_t dev, device_t child)
3683 struct pci_devinfo *dinfo;
3684 struct resource_list *rl;
3687 dinfo = device_get_ivars(child);
3688 rl = &dinfo->resources;
3690 retval += bus_print_child_header(dev, child);
3692 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3693 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3694 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3695 if (device_get_flags(dev))
3696 retval += printf(" flags %#x", device_get_flags(dev));
3698 retval += printf(" at device %d.%d", pci_get_slot(child),
3699 pci_get_function(child));
3701 retval += bus_print_child_footer(dev, child);
3711 } pci_nomatch_tab[] = {
3712 {PCIC_OLD, -1, "old"},
3713 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
3714 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
3715 {PCIC_STORAGE, -1, "mass storage"},
3716 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
3717 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
3718 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
3719 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
3720 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
3721 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
3722 {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
3723 {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
3724 {PCIC_STORAGE, PCIS_STORAGE_NVM, "NVM"},
3725 {PCIC_NETWORK, -1, "network"},
3726 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
3727 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
3728 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
3729 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
3730 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
3731 {PCIC_DISPLAY, -1, "display"},
3732 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
3733 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
3734 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
3735 {PCIC_MULTIMEDIA, -1, "multimedia"},
3736 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
3737 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
3738 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
3739 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
3740 {PCIC_MEMORY, -1, "memory"},
3741 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
3742 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
3743 {PCIC_BRIDGE, -1, "bridge"},
3744 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
3745 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
3746 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
3747 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
3748 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
3749 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
3750 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
3751 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
3752 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
3753 {PCIC_SIMPLECOMM, -1, "simple comms"},
3754 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
3755 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
3756 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
3757 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
3758 {PCIC_BASEPERIPH, -1, "base peripheral"},
3759 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
3760 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
3761 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
3762 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
3763 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
3764 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
3765 {PCIC_INPUTDEV, -1, "input device"},
3766 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
3767 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
3768 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
3769 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
3770 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
3771 {PCIC_DOCKING, -1, "docking station"},
3772 {PCIC_PROCESSOR, -1, "processor"},
3773 {PCIC_SERIALBUS, -1, "serial bus"},
3774 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
3775 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
3776 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
3777 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
3778 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
3779 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
3780 {PCIC_WIRELESS, -1, "wireless controller"},
3781 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
3782 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
3783 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
3784 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
3785 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
3786 {PCIC_SATCOM, -1, "satellite communication"},
3787 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
3788 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
3789 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
3790 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
3791 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
3792 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
3793 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
3794 {PCIC_DASP, -1, "dasp"},
3795 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
3800 pci_probe_nomatch(device_t dev, device_t child)
3803 const char *cp, *scp;
3807 * Look for a listing for this device in a loaded device database.
3809 if ((device = pci_describe_device(child)) != NULL) {
3810 device_printf(dev, "<%s>", device);
3811 free(device, M_DEVBUF);
3814 * Scan the class/subclass descriptions for a general
3819 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
3820 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
3821 if (pci_nomatch_tab[i].subclass == -1) {
3822 cp = pci_nomatch_tab[i].desc;
3823 } else if (pci_nomatch_tab[i].subclass ==
3824 pci_get_subclass(child)) {
3825 scp = pci_nomatch_tab[i].desc;
3829 device_printf(dev, "<%s%s%s>",
3831 ((cp != NULL) && (scp != NULL)) ? ", " : "",
3834 printf(" at device %d.%d (no driver attached)\n",
3835 pci_get_slot(child), pci_get_function(child));
3836 pci_cfg_save(child, device_get_ivars(child), 1);
3840 pci_child_detached(device_t dev, device_t child)
3842 struct pci_devinfo *dinfo;
3843 struct resource_list *rl;
3845 dinfo = device_get_ivars(child);
3846 rl = &dinfo->resources;
3849 * Have to deallocate IRQs before releasing any MSI messages and
3850 * have to release MSI messages before deallocating any memory
3853 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
3854 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
3855 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
3856 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
3857 (void)pci_release_msi(child);
3859 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
3860 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
3861 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
3862 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
3864 pci_cfg_save(child, dinfo, 1);
3868 * Parse the PCI device database, if loaded, and return a pointer to a
3869 * description of the device.
3871 * The database is flat text formatted as follows:
3873 * Any line not in a valid format is ignored.
3874 * Lines are terminated with newline '\n' characters.
3876 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
3879 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
3880 * - devices cannot be listed without a corresponding VENDOR line.
3881 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
3882 * another TAB, then the device name.
3886 * Assuming (ptr) points to the beginning of a line in the database,
3887 * return the vendor or device and description of the next entry.
3888 * The value of (vendor) or (device) inappropriate for the entry type
3889 * is set to -1. Returns nonzero at the end of the database.
3891 * Note that this is slightly unrobust in the face of corrupt data;
3892 * we attempt to safeguard against this by spamming the end of the
3893 * database with a newline when we initialise.
3896 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
3905 left = pci_vendordata_size - (cp - pci_vendordata);
3913 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
3917 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
3920 /* skip to next line */
3921 while (*cp != '\n' && left > 0) {
3930 /* skip to next line */
3931 while (*cp != '\n' && left > 0) {
3935 if (*cp == '\n' && left > 0)
3942 pci_describe_device(device_t dev)
3945 char *desc, *vp, *dp, *line;
3947 desc = vp = dp = NULL;
3950 * If we have no vendor data, we can't do anything.
3952 if (pci_vendordata == NULL)
3956 * Scan the vendor data looking for this device
3958 line = pci_vendordata;
3959 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3962 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
3964 if (vendor == pci_get_vendor(dev))
3967 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
3970 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
3978 if (device == pci_get_device(dev))
3982 snprintf(dp, 80, "0x%x", pci_get_device(dev));
3983 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
3985 sprintf(desc, "%s, %s", vp, dp);
3995 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
3997 struct pci_devinfo *dinfo;
4000 dinfo = device_get_ivars(child);
4004 case PCI_IVAR_ETHADDR:
4006 * The generic accessor doesn't deal with failure, so
4007 * we set the return value, then return an error.
4009 *((uint8_t **) result) = NULL;
4011 case PCI_IVAR_SUBVENDOR:
4012 *result = cfg->subvendor;
4014 case PCI_IVAR_SUBDEVICE:
4015 *result = cfg->subdevice;
4017 case PCI_IVAR_VENDOR:
4018 *result = cfg->vendor;
4020 case PCI_IVAR_DEVICE:
4021 *result = cfg->device;
4023 case PCI_IVAR_DEVID:
4024 *result = (cfg->device << 16) | cfg->vendor;
4026 case PCI_IVAR_CLASS:
4027 *result = cfg->baseclass;
4029 case PCI_IVAR_SUBCLASS:
4030 *result = cfg->subclass;
4032 case PCI_IVAR_PROGIF:
4033 *result = cfg->progif;
4035 case PCI_IVAR_REVID:
4036 *result = cfg->revid;
4038 case PCI_IVAR_INTPIN:
4039 *result = cfg->intpin;
4042 *result = cfg->intline;
4044 case PCI_IVAR_DOMAIN:
4045 *result = cfg->domain;
4051 *result = cfg->slot;
4053 case PCI_IVAR_FUNCTION:
4054 *result = cfg->func;
4056 case PCI_IVAR_CMDREG:
4057 *result = cfg->cmdreg;
4059 case PCI_IVAR_CACHELNSZ:
4060 *result = cfg->cachelnsz;
4062 case PCI_IVAR_MINGNT:
4063 *result = cfg->mingnt;
4065 case PCI_IVAR_MAXLAT:
4066 *result = cfg->maxlat;
4068 case PCI_IVAR_LATTIMER:
4069 *result = cfg->lattimer;
4078 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4080 struct pci_devinfo *dinfo;
4082 dinfo = device_get_ivars(child);
4085 case PCI_IVAR_INTPIN:
4086 dinfo->cfg.intpin = value;
4088 case PCI_IVAR_ETHADDR:
4089 case PCI_IVAR_SUBVENDOR:
4090 case PCI_IVAR_SUBDEVICE:
4091 case PCI_IVAR_VENDOR:
4092 case PCI_IVAR_DEVICE:
4093 case PCI_IVAR_DEVID:
4094 case PCI_IVAR_CLASS:
4095 case PCI_IVAR_SUBCLASS:
4096 case PCI_IVAR_PROGIF:
4097 case PCI_IVAR_REVID:
4099 case PCI_IVAR_DOMAIN:
4102 case PCI_IVAR_FUNCTION:
4103 return (EINVAL); /* disallow for now */
4110 #include "opt_ddb.h"
4112 #include <ddb/ddb.h>
4113 #include <sys/cons.h>
4116 * List resources based on pci map registers, used for within ddb
4119 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4121 struct pci_devinfo *dinfo;
4122 struct devlist *devlist_head;
4125 int i, error, none_count;
4128 /* get the head of the device queue */
4129 devlist_head = &pci_devq;
4132 * Go through the list of devices and print out devices
4134 for (error = 0, i = 0,
4135 dinfo = STAILQ_FIRST(devlist_head);
4136 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4137 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4139 /* Populate pd_name and pd_unit */
4142 name = device_get_name(dinfo->cfg.dev);
4145 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4146 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4147 (name && *name) ? name : "none",
4148 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4150 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4151 p->pc_sel.pc_func, (p->pc_class << 16) |
4152 (p->pc_subclass << 8) | p->pc_progif,
4153 (p->pc_subdevice << 16) | p->pc_subvendor,
4154 (p->pc_device << 16) | p->pc_vendor,
4155 p->pc_revid, p->pc_hdr);
4160 static struct resource *
4161 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4162 u_long start, u_long end, u_long count, u_int flags)
4164 struct pci_devinfo *dinfo = device_get_ivars(child);
4165 struct resource_list *rl = &dinfo->resources;
4166 struct resource_list_entry *rle;
4167 struct resource *res;
4169 pci_addr_t map, testval;
4173 pm = pci_find_bar(child, *rid);
4175 /* This is a BAR that we failed to allocate earlier. */
4176 mapsize = pm->pm_size;
4180 * Weed out the bogons, and figure out how large the
4181 * BAR/map is. BARs that read back 0 here are bogus
4182 * and unimplemented. Note: atapci in legacy mode are
4183 * special and handled elsewhere in the code. If you
4184 * have a atapci device in legacy mode and it fails
4185 * here, that other code is broken.
4187 pci_read_bar(child, *rid, &map, &testval);
4190 * Determine the size of the BAR and ignore BARs with a size
4191 * of 0. Device ROM BARs use a different mask value.
4193 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4194 mapsize = pci_romsize(testval);
4196 mapsize = pci_mapsize(testval);
4199 pm = pci_add_bar(child, *rid, map, mapsize);
4202 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4203 if (type != SYS_RES_MEMORY) {
4206 "child %s requested type %d for rid %#x,"
4207 " but the BAR says it is an memio\n",
4208 device_get_nameunit(child), type, *rid);
4212 if (type != SYS_RES_IOPORT) {
4215 "child %s requested type %d for rid %#x,"
4216 " but the BAR says it is an ioport\n",
4217 device_get_nameunit(child), type, *rid);
4223 * For real BARs, we need to override the size that
4224 * the driver requests, because that's what the BAR
4225 * actually uses and we would otherwise have a
4226 * situation where we might allocate the excess to
4227 * another driver, which won't work.
4229 count = (pci_addr_t)1 << mapsize;
4230 if (RF_ALIGNMENT(flags) < mapsize)
4231 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4232 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4233 flags |= RF_PREFETCHABLE;
4236 * Allocate enough resource, and then write back the
4237 * appropriate BAR for that resource.
4239 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
4240 start, end, count, flags & ~RF_ACTIVE);
4242 device_printf(child,
4243 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4244 count, *rid, type, start, end);
4247 resource_list_add(rl, type, *rid, start, end, count);
4248 rle = resource_list_find(rl, type, *rid);
4250 panic("pci_reserve_map: unexpectedly can't find resource.");
4252 rle->start = rman_get_start(res);
4253 rle->end = rman_get_end(res);
4255 rle->flags = RLE_RESERVED;
4257 device_printf(child,
4258 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4259 count, *rid, type, rman_get_start(res));
4260 map = rman_get_start(res);
4261 pci_write_bar(child, pm, map);
4267 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4268 u_long start, u_long end, u_long count, u_int flags)
4270 struct pci_devinfo *dinfo;
4271 struct resource_list *rl;
4272 struct resource_list_entry *rle;
4273 struct resource *res;
4276 if (device_get_parent(child) != dev)
4277 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4278 type, rid, start, end, count, flags));
4281 * Perform lazy resource allocation
4283 dinfo = device_get_ivars(child);
4284 rl = &dinfo->resources;
4289 * Can't alloc legacy interrupt once MSI messages have
4292 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4293 cfg->msix.msix_alloc > 0))
4297 * If the child device doesn't have an interrupt
4298 * routed and is deserving of an interrupt, try to
4301 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4303 pci_assign_interrupt(dev, child, 0);
4305 case SYS_RES_IOPORT:
4306 case SYS_RES_MEMORY:
4309 * PCI-PCI bridge I/O window resources are not BARs.
4310 * For those allocations just pass the request up the
4313 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4315 case PCIR_IOBASEL_1:
4316 case PCIR_MEMBASE_1:
4317 case PCIR_PMBASEL_1:
4319 * XXX: Should we bother creating a resource
4322 return (bus_generic_alloc_resource(dev, child,
4323 type, rid, start, end, count, flags));
4327 /* Reserve resources for this BAR if needed. */
4328 rle = resource_list_find(rl, type, *rid);
4330 res = pci_reserve_map(dev, child, type, rid, start, end,
4336 return (resource_list_alloc(rl, dev, child, type, rid,
4337 start, end, count, flags));
4341 pci_release_resource(device_t dev, device_t child, int type, int rid,
4344 struct pci_devinfo *dinfo;
4345 struct resource_list *rl;
4348 if (device_get_parent(child) != dev)
4349 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4352 dinfo = device_get_ivars(child);
4356 * PCI-PCI bridge I/O window resources are not BARs. For
4357 * those allocations just pass the request up the tree.
4359 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4360 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4362 case PCIR_IOBASEL_1:
4363 case PCIR_MEMBASE_1:
4364 case PCIR_PMBASEL_1:
4365 return (bus_generic_release_resource(dev, child, type,
4371 rl = &dinfo->resources;
4372 return (resource_list_release(rl, dev, child, type, rid, r));
4376 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4379 struct pci_devinfo *dinfo;
4382 error = bus_generic_activate_resource(dev, child, type, rid, r);
4386 /* Enable decoding in the command register when activating BARs. */
4387 if (device_get_parent(child) == dev) {
4388 /* Device ROMs need their decoding explicitly enabled. */
4389 dinfo = device_get_ivars(child);
4390 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4391 pci_write_bar(child, pci_find_bar(child, rid),
4392 rman_get_start(r) | PCIM_BIOS_ENABLE);
4394 case SYS_RES_IOPORT:
4395 case SYS_RES_MEMORY:
4396 error = PCI_ENABLE_IO(dev, child, type);
4404 pci_deactivate_resource(device_t dev, device_t child, int type,
4405 int rid, struct resource *r)
4407 struct pci_devinfo *dinfo;
4410 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4414 /* Disable decoding for device ROMs. */
4415 if (device_get_parent(child) == dev) {
4416 dinfo = device_get_ivars(child);
4417 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4418 pci_write_bar(child, pci_find_bar(child, rid),
4425 pci_delete_child(device_t dev, device_t child)
4427 struct resource_list_entry *rle;
4428 struct resource_list *rl;
4429 struct pci_devinfo *dinfo;
4431 dinfo = device_get_ivars(child);
4432 rl = &dinfo->resources;
4434 if (device_is_attached(child))
4435 device_detach(child);
4437 /* Turn off access to resources we're about to free */
4438 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4439 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4441 /* Free all allocated resources */
4442 STAILQ_FOREACH(rle, rl, link) {
4444 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4445 resource_list_busy(rl, rle->type, rle->rid)) {
4446 pci_printf(&dinfo->cfg,
4447 "Resource still owned, oops. "
4448 "(type=%d, rid=%d, addr=%lx)\n",
4449 rle->type, rle->rid,
4450 rman_get_start(rle->res));
4451 bus_release_resource(child, rle->type, rle->rid,
4454 resource_list_unreserve(rl, dev, child, rle->type,
4458 resource_list_free(rl);
4460 device_delete_child(dev, child);
4465 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4467 struct pci_devinfo *dinfo;
4468 struct resource_list *rl;
4469 struct resource_list_entry *rle;
4471 if (device_get_parent(child) != dev)
4474 dinfo = device_get_ivars(child);
4475 rl = &dinfo->resources;
4476 rle = resource_list_find(rl, type, rid);
4481 if (rman_get_flags(rle->res) & RF_ACTIVE ||
4482 resource_list_busy(rl, type, rid)) {
4483 device_printf(dev, "delete_resource: "
4484 "Resource still owned by child, oops. "
4485 "(type=%d, rid=%d, addr=%lx)\n",
4486 type, rid, rman_get_start(rle->res));
4489 resource_list_unreserve(rl, dev, child, type, rid);
4491 resource_list_delete(rl, type, rid);
4494 struct resource_list *
4495 pci_get_resource_list (device_t dev, device_t child)
4497 struct pci_devinfo *dinfo = device_get_ivars(child);
4499 return (&dinfo->resources);
4503 pci_get_dma_tag(device_t bus, device_t dev)
4505 struct pci_softc *sc = device_get_softc(bus);
4507 return (sc->sc_dma_tag);
4511 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4513 struct pci_devinfo *dinfo = device_get_ivars(child);
4514 pcicfgregs *cfg = &dinfo->cfg;
4516 return (PCIB_READ_CONFIG(device_get_parent(dev),
4517 cfg->bus, cfg->slot, cfg->func, reg, width));
4521 pci_write_config_method(device_t dev, device_t child, int reg,
4522 uint32_t val, int width)
4524 struct pci_devinfo *dinfo = device_get_ivars(child);
4525 pcicfgregs *cfg = &dinfo->cfg;
4527 PCIB_WRITE_CONFIG(device_get_parent(dev),
4528 cfg->bus, cfg->slot, cfg->func, reg, val, width);
4532 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4536 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4537 pci_get_function(child));
4542 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4545 struct pci_devinfo *dinfo;
4548 dinfo = device_get_ivars(child);
4550 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4551 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4552 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4558 pci_assign_interrupt_method(device_t dev, device_t child)
4560 struct pci_devinfo *dinfo = device_get_ivars(child);
4561 pcicfgregs *cfg = &dinfo->cfg;
4563 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4568 pci_modevent(module_t mod, int what, void *arg)
4570 static struct cdev *pci_cdev;
4574 STAILQ_INIT(&pci_devq);
4576 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4578 pci_load_vendor_data();
4582 destroy_dev(pci_cdev);
4590 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
4592 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
4593 struct pcicfg_pcie *cfg;
4596 cfg = &dinfo->cfg.pcie;
4597 pos = cfg->pcie_location;
4599 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4601 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
4603 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4604 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4605 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4606 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
4608 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4609 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4610 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4611 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
4613 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4614 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4615 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
4618 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
4619 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
4620 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
4626 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
4628 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
4629 dinfo->cfg.pcix.pcix_command, 2);
4633 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
4637 * Only do header type 0 devices. Type 1 devices are bridges,
4638 * which we know need special treatment. Type 2 devices are
4639 * cardbus bridges which also require special treatment.
4640 * Other types are unknown, and we err on the side of safety
4643 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
4647 * Restore the device to full power mode. We must do this
4648 * before we restore the registers because moving from D3 to
4649 * D0 will cause the chip's BARs and some other registers to
4650 * be reset to some unknown power on reset values. Cut down
4651 * the noise on boot by doing nothing if we are already in
4654 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
4655 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4656 pci_restore_bars(dev);
4657 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
4658 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
4659 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
4660 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
4661 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
4662 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
4663 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
4664 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
4665 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
4668 * Restore extended capabilities for PCI-Express and PCI-X
4670 if (dinfo->cfg.pcie.pcie_location != 0)
4671 pci_cfg_restore_pcie(dev, dinfo);
4672 if (dinfo->cfg.pcix.pcix_location != 0)
4673 pci_cfg_restore_pcix(dev, dinfo);
4675 /* Restore MSI and MSI-X configurations if they are present. */
4676 if (dinfo->cfg.msi.msi_location != 0)
4677 pci_resume_msi(dev);
4678 if (dinfo->cfg.msix.msix_location != 0)
4679 pci_resume_msix(dev);
4683 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
4685 #define RREG(n) pci_read_config(dev, pos + (n), 2)
4686 struct pcicfg_pcie *cfg;
4689 cfg = &dinfo->cfg.pcie;
4690 pos = cfg->pcie_location;
4692 cfg->pcie_flags = RREG(PCIER_FLAGS);
4694 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4696 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
4698 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4699 cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4700 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4701 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
4703 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4704 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4705 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4706 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
4708 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4709 cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4710 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
4713 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
4714 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
4715 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
4721 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
4723 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
4724 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
4728 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
4734 * Only do header type 0 devices. Type 1 devices are bridges, which
4735 * we know need special treatment. Type 2 devices are cardbus bridges
4736 * which also require special treatment. Other types are unknown, and
4737 * we err on the side of safety by ignoring them. Powering down
4738 * bridges should not be undertaken lightly.
4740 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
4744 * Some drivers apparently write to these registers w/o updating our
4745 * cached copy. No harm happens if we update the copy, so do so here
4746 * so we can restore them. The COMMAND register is modified by the
4747 * bus w/o updating the cache. This should represent the normally
4748 * writable portion of the 'defined' part of type 0 headers. In
4749 * theory we also need to save/restore the PCI capability structures
4750 * we know about, but apart from power we don't know any that are
4753 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
4754 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
4755 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
4756 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
4757 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
4758 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
4759 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
4760 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
4761 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
4762 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
4763 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
4764 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
4765 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
4766 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
4767 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
4769 if (dinfo->cfg.pcie.pcie_location != 0)
4770 pci_cfg_save_pcie(dev, dinfo);
4772 if (dinfo->cfg.pcix.pcix_location != 0)
4773 pci_cfg_save_pcix(dev, dinfo);
4776 * don't set the state for display devices, base peripherals and
4777 * memory devices since bad things happen when they are powered down.
4778 * We should (a) have drivers that can easily detach and (b) use
4779 * generic drivers for these devices so that some device actually
4780 * attaches. We need to make sure that when we implement (a) we don't
4781 * power the device down on a reattach.
4783 cls = pci_get_class(dev);
4786 switch (pci_do_power_nodriver)
4788 case 0: /* NO powerdown at all */
4790 case 1: /* Conservative about what to power down */
4791 if (cls == PCIC_STORAGE)
4794 case 2: /* Agressive about what to power down */
4795 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
4796 cls == PCIC_BASEPERIPH)
4799 case 3: /* Power down everything */
4803 * PCI spec says we can only go into D3 state from D0 state.
4804 * Transition from D[12] into D0 before going to D3 state.
4806 ps = pci_get_powerstate(dev);
4807 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
4808 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4809 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
4810 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
4813 /* Wrapper APIs suitable for device driver use. */
4815 pci_save_state(device_t dev)
4817 struct pci_devinfo *dinfo;
4819 dinfo = device_get_ivars(dev);
4820 pci_cfg_save(dev, dinfo, 0);
4824 pci_restore_state(device_t dev)
4826 struct pci_devinfo *dinfo;
4828 dinfo = device_get_ivars(dev);
4829 pci_cfg_restore(dev, dinfo);