2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #define CONFIG_PCI_MSI
35 #include <linux/types.h>
37 #include <sys/param.h>
39 #include <sys/pciio.h>
41 #include <dev/pci/pcivar.h>
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pci_private.h>
45 #include <machine/resource.h>
47 #include <linux/list.h>
48 #include <linux/dmapool.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/compiler.h>
51 #include <linux/errno.h>
52 #include <asm/atomic.h>
53 #include <linux/device.h>
55 struct pci_device_id {
61 uintptr_t driver_data;
64 #define MODULE_DEVICE_TABLE(bus, table)
65 #define PCI_ANY_ID (-1)
66 #define PCI_VENDOR_ID_MELLANOX 0x15b3
67 #define PCI_VENDOR_ID_TOPSPIN 0x1867
68 #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
69 #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46
70 #define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
71 #define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282
72 #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
73 #define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
75 #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
76 #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
77 #define PCI_FUNC(devfn) ((devfn) & 0x07)
79 #define PCI_VDEVICE(_vendor, _device) \
80 .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device), \
81 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
82 #define PCI_DEVICE(_vendor, _device) \
83 .vendor = (_vendor), .device = (_device), \
84 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
86 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
88 #define PCI_VENDOR_ID PCIR_DEVVENDOR
89 #define PCI_COMMAND PCIR_COMMAND
90 #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL /* Device Control */
91 #define PCI_EXP_LNKCTL PCIER_LINK_CTL /* Link Control */
92 #define PCI_EXP_FLAGS_TYPE PCIEM_FLAGS_TYPE /* Device/Port type */
93 #define PCI_EXP_DEVCAP PCIER_DEVICE_CAP /* Device capabilities */
94 #define PCI_EXP_DEVSTA PCIER_DEVICE_STA /* Device Status */
95 #define PCI_EXP_LNKCAP PCIER_LINK_CAP /* Link Capabilities */
96 #define PCI_EXP_LNKSTA PCIER_LINK_STA /* Link Status */
97 #define PCI_EXP_SLTCAP PCIER_SLOT_CAP /* Slot Capabilities */
98 #define PCI_EXP_SLTCTL PCIER_SLOT_CTL /* Slot Control */
99 #define PCI_EXP_SLTSTA PCIER_SLOT_STA /* Slot Status */
100 #define PCI_EXP_RTCTL PCIER_ROOT_CTL /* Root Control */
101 #define PCI_EXP_RTCAP PCIER_ROOT_CAP /* Root Capabilities */
102 #define PCI_EXP_RTSTA PCIER_ROOT_STA /* Root Status */
103 #define PCI_EXP_DEVCAP2 PCIER_DEVICE_CAP2 /* Device Capabilities 2 */
104 #define PCI_EXP_DEVCTL2 PCIER_DEVICE_CTL2 /* Device Control 2 */
105 #define PCI_EXP_LNKCAP2 PCIER_LINK_CAP2 /* Link Capabilities 2 */
106 #define PCI_EXP_LNKCTL2 PCIER_LINK_CTL2 /* Link Control 2 */
107 #define PCI_EXP_LNKSTA2 PCIER_LINK_STA2 /* Link Status 2 */
108 #define PCI_EXP_FLAGS PCIER_FLAGS /* Capabilities register */
109 #define PCI_EXP_FLAGS_VERS PCIEM_FLAGS_VERSION /* Capability version */
110 #define PCI_EXP_TYPE_ROOT_PORT PCIEM_TYPE_ROOT_PORT /* Root Port */
111 #define PCI_EXP_TYPE_ENDPOINT PCIEM_TYPE_ENDPOINT /* Express Endpoint */
112 #define PCI_EXP_TYPE_LEG_END PCIEM_TYPE_LEGACY_ENDPOINT /* Legacy Endpoint */
113 #define PCI_EXP_TYPE_DOWNSTREAM PCIEM_TYPE_DOWNSTREAM_PORT /* Downstream Port */
114 #define PCI_EXP_FLAGS_SLOT PCIEM_FLAGS_SLOT /* Slot implemented */
115 #define PCI_EXP_TYPE_RC_EC PCIEM_TYPE_ROOT_EC /* Root Complex Event Collector */
118 #define IORESOURCE_MEM (1 << SYS_RES_MEMORY)
119 #define IORESOURCE_IO (1 << SYS_RES_IOPORT)
120 #define IORESOURCE_IRQ (1 << SYS_RES_IRQ)
126 struct list_head links;
128 const struct pci_device_id *id_table;
129 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
130 void (*remove)(struct pci_dev *dev);
131 int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
132 int (*resume) (struct pci_dev *dev); /* Device woken up */
135 const struct pci_error_handlers *err_handler;
138 extern struct list_head pci_drivers;
139 extern struct list_head pci_devices;
140 extern spinlock_t pci_lock;
142 #define __devexit_p(x) x
146 struct list_head links;
147 struct pci_driver *pdrv;
156 static inline struct resource_list_entry *
157 _pci_get_rle(struct pci_dev *pdev, int type, int rid)
159 struct pci_devinfo *dinfo;
160 struct resource_list *rl;
162 dinfo = device_get_ivars(pdev->dev.bsddev);
163 rl = &dinfo->resources;
164 return resource_list_find(rl, type, rid);
167 static inline struct resource_list_entry *
168 _pci_get_bar(struct pci_dev *pdev, int bar)
170 struct resource_list_entry *rle;
173 if ((rle = _pci_get_rle(pdev, SYS_RES_MEMORY, bar)) == NULL)
174 rle = _pci_get_rle(pdev, SYS_RES_IOPORT, bar);
178 static inline struct device *
179 _pci_find_irq_dev(unsigned int irq)
181 struct pci_dev *pdev;
183 spin_lock(&pci_lock);
184 list_for_each_entry(pdev, &pci_devices, links) {
185 if (irq == pdev->dev.irq)
187 if (irq >= pdev->dev.msix && irq < pdev->dev.msix_max)
190 spin_unlock(&pci_lock);
196 static inline unsigned long
197 pci_resource_start(struct pci_dev *pdev, int bar)
199 struct resource_list_entry *rle;
201 if ((rle = _pci_get_bar(pdev, bar)) == NULL)
206 static inline unsigned long
207 pci_resource_len(struct pci_dev *pdev, int bar)
209 struct resource_list_entry *rle;
211 if ((rle = _pci_get_bar(pdev, bar)) == NULL)
217 pci_resource_type(struct pci_dev *pdev, int bar)
219 struct resource_list_entry *rle;
221 if ((rle = _pci_get_bar(pdev, bar)) == NULL)
227 * All drivers just seem to want to inspect the type not flags.
230 pci_resource_flags(struct pci_dev *pdev, int bar)
234 type = pci_resource_type(pdev, bar);
240 static inline const char *
241 pci_name(struct pci_dev *d)
244 return device_get_desc(d->dev.bsddev);
248 pci_get_drvdata(struct pci_dev *pdev)
251 return dev_get_drvdata(&pdev->dev);
255 pci_set_drvdata(struct pci_dev *pdev, void *data)
258 dev_set_drvdata(&pdev->dev, data);
262 pci_enable_device(struct pci_dev *pdev)
265 pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT);
266 pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY);
271 pci_disable_device(struct pci_dev *pdev)
276 pci_set_master(struct pci_dev *pdev)
279 pci_enable_busmaster(pdev->dev.bsddev);
284 pci_clear_master(struct pci_dev *pdev)
287 pci_disable_busmaster(pdev->dev.bsddev);
292 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
297 type = pci_resource_type(pdev, bar);
301 if (bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
308 pci_release_region(struct pci_dev *pdev, int bar)
310 struct resource_list_entry *rle;
312 if ((rle = _pci_get_bar(pdev, bar)) == NULL)
314 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
318 pci_release_regions(struct pci_dev *pdev)
322 for (i = 0; i <= PCIR_MAX_BAR_0; i++)
323 pci_release_region(pdev, i);
327 pci_request_regions(struct pci_dev *pdev, const char *res_name)
332 for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
333 error = pci_request_region(pdev, i, res_name);
334 if (error && error != -ENODEV) {
335 pci_release_regions(pdev);
343 pci_disable_msix(struct pci_dev *pdev)
346 pci_release_msi(pdev->dev.bsddev);
349 #define PCI_CAP_ID_EXP PCIY_EXPRESS
350 #define PCI_CAP_ID_PCIX PCIY_PCIX
354 pci_find_capability(struct pci_dev *pdev, int capid)
358 if (pci_find_cap(pdev->dev.bsddev, capid, ®))
367 * pci_pcie_cap - get the saved PCIe capability offset
370 * PCIe capability offset is calculated at PCI device initialization
371 * time and saved in the data structure. This function returns saved
372 * PCIe capability offset. Using this instead of pci_find_capability()
373 * reduces unnecessary search in the PCI configuration space. If you
374 * need to calculate PCIe capability offset from raw device for some
375 * reasons, please use pci_find_capability() instead.
377 static inline int pci_pcie_cap(struct pci_dev *dev)
379 return pci_find_capability(dev, PCI_CAP_ID_EXP);
384 pci_read_config_byte(struct pci_dev *pdev, int where, u8 *val)
387 *val = (u8)pci_read_config(pdev->dev.bsddev, where, 1);
392 pci_read_config_word(struct pci_dev *pdev, int where, u16 *val)
395 *val = (u16)pci_read_config(pdev->dev.bsddev, where, 2);
400 pci_read_config_dword(struct pci_dev *pdev, int where, u32 *val)
403 *val = (u32)pci_read_config(pdev->dev.bsddev, where, 4);
408 pci_write_config_byte(struct pci_dev *pdev, int where, u8 val)
411 pci_write_config(pdev->dev.bsddev, where, val, 1);
416 pci_write_config_word(struct pci_dev *pdev, int where, u16 val)
419 pci_write_config(pdev->dev.bsddev, where, val, 2);
424 pci_write_config_dword(struct pci_dev *pdev, int where, u32 val)
427 pci_write_config(pdev->dev.bsddev, where, val, 4);
431 static struct pci_driver *
432 linux_pci_find(device_t dev, const struct pci_device_id **idp)
434 const struct pci_device_id *id;
435 struct pci_driver *pdrv;
439 vendor = pci_get_vendor(dev);
440 device = pci_get_device(dev);
442 spin_lock(&pci_lock);
443 list_for_each_entry(pdrv, &pci_drivers, links) {
444 for (id = pdrv->id_table; id->vendor != 0; id++) {
445 if (vendor == id->vendor && device == id->device) {
447 spin_unlock(&pci_lock);
452 spin_unlock(&pci_lock);
457 linux_pci_probe(device_t dev)
459 const struct pci_device_id *id;
460 struct pci_driver *pdrv;
462 if ((pdrv = linux_pci_find(dev, &id)) == NULL)
464 if (device_get_driver(dev) != &pdrv->driver)
466 device_set_desc(dev, pdrv->name);
471 linux_pci_attach(device_t dev)
473 struct resource_list_entry *rle;
474 struct pci_dev *pdev;
475 struct pci_driver *pdrv;
476 const struct pci_device_id *id;
479 pdrv = linux_pci_find(dev, &id);
480 pdev = device_get_softc(dev);
481 pdev->dev.parent = &linux_rootdev;
482 pdev->dev.bsddev = dev;
483 INIT_LIST_HEAD(&pdev->dev.irqents);
484 pdev->device = id->device;
485 pdev->vendor = id->vendor;
486 pdev->dev.dma_mask = &pdev->dma_mask;
488 kobject_init(&pdev->dev.kobj, &dev_ktype);
489 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev));
490 kobject_add(&pdev->dev.kobj, &linux_rootdev.kobj,
491 kobject_name(&pdev->dev.kobj));
492 rle = _pci_get_rle(pdev, SYS_RES_IRQ, 0);
494 pdev->dev.irq = rle->start;
497 pdev->irq = pdev->dev.irq;
499 spin_lock(&pci_lock);
500 list_add(&pdev->links, &pci_devices);
501 spin_unlock(&pci_lock);
502 error = pdrv->probe(pdev, id);
505 spin_lock(&pci_lock);
506 list_del(&pdev->links);
507 spin_unlock(&pci_lock);
508 put_device(&pdev->dev);
515 linux_pci_detach(device_t dev)
517 struct pci_dev *pdev;
519 pdev = device_get_softc(dev);
521 pdev->pdrv->remove(pdev);
523 spin_lock(&pci_lock);
524 list_del(&pdev->links);
525 spin_unlock(&pci_lock);
526 put_device(&pdev->dev);
531 static device_method_t pci_methods[] = {
532 DEVMETHOD(device_probe, linux_pci_probe),
533 DEVMETHOD(device_attach, linux_pci_attach),
534 DEVMETHOD(device_detach, linux_pci_detach),
539 pci_register_driver(struct pci_driver *pdrv)
544 spin_lock(&pci_lock);
545 list_add(&pdrv->links, &pci_drivers);
546 spin_unlock(&pci_lock);
547 bus = devclass_find("pci");
548 pdrv->driver.name = pdrv->name;
549 pdrv->driver.methods = pci_methods;
550 pdrv->driver.size = sizeof(struct pci_dev);
552 error = devclass_add_driver(bus, &pdrv->driver, BUS_PASS_DEFAULT,
561 pci_unregister_driver(struct pci_driver *pdrv)
565 list_del(&pdrv->links);
566 bus = devclass_find("pci");
568 devclass_delete_driver(bus, &pdrv->driver);
578 * Enable msix, positive errors indicate actual number of available
579 * vectors. Negative errors are failures.
582 pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, int nreq)
584 struct resource_list_entry *rle;
589 avail = pci_msix_count(pdev->dev.bsddev);
596 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
599 * Handle case where "pci_alloc_msix()" may allocate less
600 * interrupts than available and return with no error:
603 pci_release_msi(pdev->dev.bsddev);
606 rle = _pci_get_rle(pdev, SYS_RES_IRQ, 1);
607 pdev->dev.msix = rle->start;
608 pdev->dev.msix_max = rle->start + avail;
609 for (i = 0; i < nreq; i++)
610 entries[i].vector = pdev->dev.msix + i;
614 #define pci_enable_msix_range linux_pci_enable_msix_range
616 pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
617 int minvec, int maxvec)
626 rc = pci_enable_msix(dev, entries, nvec);
638 static inline int pci_channel_offline(struct pci_dev *pdev)
643 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
647 static inline void pci_disable_sriov(struct pci_dev *dev)
652 * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
653 * @_table: device table name
655 * This macro is used to create a struct pci_device_id array (a device table)
656 * in a generic manner.
658 #define DEFINE_PCI_DEVICE_TABLE(_table) \
659 const struct pci_device_id _table[] __devinitdata
662 /* XXX This should not be necessary. */
663 #define pcix_set_mmrbc(d, v) 0
664 #define pcix_get_max_mmrbc(d) 0
665 #define pcie_set_readrq(d, v) 0
667 #define PCI_DMA_BIDIRECTIONAL 0
668 #define PCI_DMA_TODEVICE 1
669 #define PCI_DMA_FROMDEVICE 2
670 #define PCI_DMA_NONE 3
672 #define pci_pool dma_pool
673 #define pci_pool_destroy dma_pool_destroy
674 #define pci_pool_alloc dma_pool_alloc
675 #define pci_pool_free dma_pool_free
676 #define pci_pool_create(_name, _pdev, _size, _align, _alloc) \
677 dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc)
678 #define pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle) \
679 dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
680 _size, _vaddr, _dma_handle)
681 #define pci_map_sg(_hwdev, _sg, _nents, _dir) \
682 dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
683 _sg, _nents, (enum dma_data_direction)_dir)
684 #define pci_map_single(_hwdev, _ptr, _size, _dir) \
685 dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
686 (_ptr), (_size), (enum dma_data_direction)_dir)
687 #define pci_unmap_single(_hwdev, _addr, _size, _dir) \
688 dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
689 _addr, _size, (enum dma_data_direction)_dir)
690 #define pci_unmap_sg(_hwdev, _sg, _nents, _dir) \
691 dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
692 _sg, _nents, (enum dma_data_direction)_dir)
693 #define pci_map_page(_hwdev, _page, _offset, _size, _dir) \
694 dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\
695 _offset, _size, (enum dma_data_direction)_dir)
696 #define pci_unmap_page(_hwdev, _dma_address, _size, _dir) \
697 dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
698 _dma_address, _size, (enum dma_data_direction)_dir)
699 #define pci_set_dma_mask(_pdev, mask) dma_set_mask(&(_pdev)->dev, (mask))
700 #define pci_dma_mapping_error(_pdev, _dma_addr) \
701 dma_mapping_error(&(_pdev)->dev, _dma_addr)
702 #define pci_set_consistent_dma_mask(_pdev, _mask) \
703 dma_set_coherent_mask(&(_pdev)->dev, (_mask))
704 #define DECLARE_PCI_UNMAP_ADDR(x) DEFINE_DMA_UNMAP_ADDR(x);
705 #define DECLARE_PCI_UNMAP_LEN(x) DEFINE_DMA_UNMAP_LEN(x);
706 #define pci_unmap_addr dma_unmap_addr
707 #define pci_unmap_addr_set dma_unmap_addr_set
708 #define pci_unmap_len dma_unmap_len
709 #define pci_unmap_len_set dma_unmap_len_set
711 typedef unsigned int __bitwise pci_channel_state_t;
712 typedef unsigned int __bitwise pci_ers_result_t;
714 enum pci_channel_state {
715 /* I/O channel is in normal state */
716 pci_channel_io_normal = (__force pci_channel_state_t) 1,
718 /* I/O to channel is blocked */
719 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
721 /* PCI card is dead */
722 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
725 enum pci_ers_result {
726 /* no result/none/not supported in device driver */
727 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
729 /* Device driver can recover without slot reset */
730 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
732 /* Device driver wants slot to be reset. */
733 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
735 /* Device has completely failed, is unrecoverable */
736 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
738 /* Device driver is fully recovered and operational */
739 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
743 /* PCI bus error event callbacks */
744 struct pci_error_handlers {
745 /* PCI bus error detected on this device */
746 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
747 enum pci_channel_state error);
749 /* MMIO has been re-enabled, but not DMA */
750 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
752 /* PCI Express link has been reset */
753 pci_ers_result_t (*link_reset)(struct pci_dev *dev);
755 /* PCI slot has been reset */
756 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
758 /* Device driver may resume normal operations */
759 void (*resume)(struct pci_dev *dev);
762 /* freeBSD does not support SRIOV - yet */
763 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
768 static inline bool pci_is_pcie(struct pci_dev *dev)
770 return !!pci_pcie_cap(dev);
773 static inline u16 pcie_flags_reg(struct pci_dev *dev)
778 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
782 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
788 static inline int pci_pcie_type(struct pci_dev *dev)
790 return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
793 static inline int pcie_cap_version(struct pci_dev *dev)
795 return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS;
798 static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev)
800 int type = pci_pcie_type(dev);
802 return pcie_cap_version(dev) > 1 ||
803 type == PCI_EXP_TYPE_ROOT_PORT ||
804 type == PCI_EXP_TYPE_ENDPOINT ||
805 type == PCI_EXP_TYPE_LEG_END;
808 static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
813 static inline bool pcie_cap_has_sltctl(struct pci_dev *dev)
815 int type = pci_pcie_type(dev);
817 return pcie_cap_version(dev) > 1 ||
818 type == PCI_EXP_TYPE_ROOT_PORT ||
819 (type == PCI_EXP_TYPE_DOWNSTREAM &&
820 pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT);
823 static inline bool pcie_cap_has_rtctl(struct pci_dev *dev)
825 int type = pci_pcie_type(dev);
827 return pcie_cap_version(dev) > 1 ||
828 type == PCI_EXP_TYPE_ROOT_PORT ||
829 type == PCI_EXP_TYPE_RC_EC;
832 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
834 if (!pci_is_pcie(dev))
838 case PCI_EXP_FLAGS_TYPE:
843 return pcie_cap_has_devctl(dev);
847 return pcie_cap_has_lnkctl(dev);
851 return pcie_cap_has_sltctl(dev);
855 return pcie_cap_has_rtctl(dev);
856 case PCI_EXP_DEVCAP2:
857 case PCI_EXP_DEVCTL2:
858 case PCI_EXP_LNKCAP2:
859 case PCI_EXP_LNKCTL2:
860 case PCI_EXP_LNKSTA2:
861 return pcie_cap_version(dev) > 1;
868 static inline int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
873 if (!pcie_capability_reg_implemented(dev, pos))
876 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
880 #endif /* _LINUX_PCI_H_ */