From d753f28614ff35172330b2e587355bc34d093424 Mon Sep 17 00:00:00 2001 From: bz Date: Tue, 30 Sep 2014 16:55:19 +0000 Subject: [PATCH] MFC 271745,271834,271899,271900,271913,272022,272023: Revert changes to shared code of the ixl and ixlv drivers to allow for easier long-term maintainability. Restrict the drivers to building on amd64 for now as it is only tested on that 64bit architecture. Just depend on PCI and neither INET nor INET6; also make sure we can build individual drivers and they do not depend on each other anymore. Reviewed by: gnn, eric.joyner intel.com PR: 193824 Approved by: re (gjb) git-svn-id: svn://svn.freebsd.org/base/stable/10@272313 ccf9f872-aa2e-dd11-9fc8-001c23d0bc1f --- sys/amd64/conf/GENERIC | 2 ++ sys/amd64/conf/NOTES | 4 ++++ sys/conf/NOTES | 2 -- sys/conf/files.amd64 | 20 +++++++++++++++++ sys/dev/ixl/i40e_alloc.h | 11 +++++----- sys/dev/ixl/i40e_common.c | 8 +++---- sys/dev/ixl/i40e_osdep.c | 46 +++++++++++++++++++-------------------- sys/dev/ixl/i40e_osdep.h | 5 ----- sys/dev/ixl/if_ixl.c | 18 ++++++++------- sys/dev/ixl/if_ixlv.c | 18 ++++++++------- sys/dev/ixl/ixl_txrx.c | 13 ++++++++++- 11 files changed, 91 insertions(+), 56 deletions(-) diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC index 8a7763d8b..03a0f9fc0 100644 --- a/sys/amd64/conf/GENERIC +++ b/sys/amd64/conf/GENERIC @@ -208,6 +208,8 @@ device de # DEC/Intel DC21x4x (``Tulip'') device em # Intel PRO/1000 Gigabit Ethernet Family device igb # Intel PRO/1000 PCIE Server Gigabit Family device ixgbe # Intel PRO/10GbE PCIE Ethernet Family +device ixl # Intel XL710 40Gbe PCIE Ethernet +device ixlv # Intel XL710 40Gbe VF PCIE Ethernet device le # AMD Am7900 LANCE and Am79C9xx PCnet device ti # Alteon Networks Tigon I/II gigabit Ethernet device txp # 3Com 3cR990 (``Typhoon'') diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES index 298669897..6db6e43aa 100644 --- a/sys/amd64/conf/NOTES +++ b/sys/amd64/conf/NOTES @@ -305,6 +305,8 @@ options DRM_DEBUG # Include debug printfs (slow) # Requires the iwi firmware module # iwn: Intel Wireless WiFi Link 4965/1000/5000/6000 802.11 network adapters # Requires the iwn firmware module +# ixl: Intel XL710 40Gbe PCIE Ethernet +# ixlv: Intel XL710 40Gbe VF PCIE Ethernet # mlx4ib: Mellanox ConnectX HCA InfiniBand # mlxen: Mellanox ConnectX HCA Ethernet # mthca: Mellanox HCA InfiniBand @@ -323,6 +325,8 @@ options ED_SIC device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. +device ixl # Intel XL710 40Gbe PCIE Ethernet +device ixlv # Intel XL710 40Gbe VF PCIE Ethernet device mlx4ib # Mellanox ConnectX HCA InfiniBand device mlxen # Mellanox ConnectX HCA Ethernet device mthca # Mellanox HCA InfiniBand diff --git a/sys/conf/NOTES b/sys/conf/NOTES index 66f257b40..3e5f80687 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -2090,8 +2090,6 @@ device em # Intel Pro/1000 Gigabit Ethernet device igb # Intel Pro/1000 PCIE Gigabit Ethernet device ixgb # Intel Pro/10Gbe PCI-X Ethernet device ixgbe # Intel Pro/10Gbe PCIE Ethernet -device ixl # Intel XL710 40Gbe PCIE Ethernet -device ixlv # Intel XL710 40Gbe VF PCIE Ethernet device le # AMD Am7900 LANCE and Am79C9xx PCnet device mxge # Myricom Myri-10G 10GbE NIC device nxge # Neterion Xframe 10GbE Server/Storage Adapter diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64 index 28f9619a1..e936175df 100644 --- a/sys/conf/files.amd64 +++ b/sys/conf/files.amd64 @@ -203,6 +203,26 @@ dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_linux.c optional ipmi compat_linux32 +dev/ixl/if_ixl.c optional ixl pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/if_ixlv.c optional ixlv pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/ixlvc.c optional ixlv pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/ixl_txrx.c optional ixl pci | ixlv pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_osdep.c optional ixl pci | ixlv pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_lan_hmc.c optional ixl pci | ixlv pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_hmc.c optional ixl pci | ixlv pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_common.c optional ixl pci | ixlv pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_nvm.c optional ixl pci | ixlv pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_adminq.c optional ixl pci | ixlv pci \ + compile-with "${NORMAL_C} -I$S/dev/ixl" dev/fdc/fdc.c optional fdc dev/fdc/fdc_acpi.c optional fdc dev/fdc/fdc_isa.c optional fdc isa diff --git a/sys/dev/ixl/i40e_alloc.h b/sys/dev/ixl/i40e_alloc.h index 94673572b..dc6fadd18 100755 --- a/sys/dev/ixl/i40e_alloc.h +++ b/sys/dev/ixl/i40e_alloc.h @@ -51,15 +51,16 @@ enum i40e_memory_type { }; /* prototype for functions used for dynamic memory allocation */ -enum i40e_status_code i40e_allocate_dma(struct i40e_hw *hw, +enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, - bus_size_t size, u32 alignment); -enum i40e_status_code i40e_free_dma(struct i40e_hw *hw, + enum i40e_memory_type type, + u64 size, u32 alignment); +enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem); -enum i40e_status_code i40e_allocate_virt(struct i40e_hw *hw, +enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size); -enum i40e_status_code i40e_free_virt(struct i40e_hw *hw, +enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem); #endif /* _I40E_ALLOC_H_ */ diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c index 143eeb757..ad1f9457c 100755 --- a/sys/dev/ixl/i40e_common.c +++ b/sys/dev/ixl/i40e_common.c @@ -4375,8 +4375,8 @@ enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw, cmd_resp->address = CPU_TO_LE32(addr); cmd_resp->length = CPU_TO_LE32(dw_count); - cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)(uintptr_t)buffer)); - cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)(uintptr_t)buffer)); + cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)buffer)); + cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer)); status = i40e_asq_send_command(hw, &desc, buffer, I40E_LO_DWORD(4*dw_count), NULL); @@ -4458,8 +4458,8 @@ enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw, cmd_resp->address = CPU_TO_LE32(addr); cmd_resp->length = CPU_TO_LE32(dw_count); - cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)(uintptr_t)buffer)); - cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)(uintptr_t)buffer)); + cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buffer)); + cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer)); status = i40e_asq_send_command(hw, &desc, buffer, I40E_LO_DWORD(4*dw_count), NULL); diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c index 30e2e57fc..214dbfc92 100755 --- a/sys/dev/ixl/i40e_osdep.c +++ b/sys/dev/ixl/i40e_osdep.c @@ -49,22 +49,22 @@ i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) } i40e_status -i40e_allocate_virt(struct i40e_hw *hw, struct i40e_virt_mem *m, u32 size) +i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) { - m->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); - return(m->va == NULL); + mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); + return(mem->va == NULL); } i40e_status -i40e_free_virt(struct i40e_hw *hw, struct i40e_virt_mem *m) +i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem) { - free(m->va, M_DEVBUF); + free(mem->va, M_DEVBUF); return(0); } i40e_status -i40e_allocate_dma(struct i40e_hw *hw, struct i40e_dma_mem *dma, - bus_size_t size, u32 alignment) +i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, + enum i40e_memory_type type __unused, u64 size, u32 alignment) { device_t dev = ((struct i40e_osdep *)hw->back)->dev; int err; @@ -81,25 +81,25 @@ i40e_allocate_dma(struct i40e_hw *hw, struct i40e_dma_mem *dma, BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ - &dma->tag); + &mem->tag); if (err != 0) { device_printf(dev, "i40e_allocate_dma: bus_dma_tag_create failed, " "error %u\n", err); goto fail_0; } - err = bus_dmamem_alloc(dma->tag, (void **)&dma->va, - BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dma->map); + err = bus_dmamem_alloc(mem->tag, (void **)&mem->va, + BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map); if (err != 0) { device_printf(dev, "i40e_allocate_dma: bus_dmamem_alloc failed, " "error %u\n", err); goto fail_1; } - err = bus_dmamap_load(dma->tag, dma->map, dma->va, + err = bus_dmamap_load(mem->tag, mem->map, mem->va, size, i40e_dmamap_cb, - &dma->pa, + &mem->pa, BUS_DMA_NOWAIT); if (err != 0) { device_printf(dev, @@ -107,28 +107,28 @@ i40e_allocate_dma(struct i40e_hw *hw, struct i40e_dma_mem *dma, "error %u\n", err); goto fail_2; } - dma->size = size; - bus_dmamap_sync(dma->tag, dma->map, + mem->size = size; + bus_dmamap_sync(mem->tag, mem->map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); return (0); fail_2: - bus_dmamem_free(dma->tag, dma->va, dma->map); + bus_dmamem_free(mem->tag, mem->va, mem->map); fail_1: - bus_dma_tag_destroy(dma->tag); + bus_dma_tag_destroy(mem->tag); fail_0: - dma->map = NULL; - dma->tag = NULL; + mem->map = NULL; + mem->tag = NULL; return (err); } i40e_status -i40e_free_dma(struct i40e_hw *hw, struct i40e_dma_mem *dma) +i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem) { - bus_dmamap_sync(dma->tag, dma->map, + bus_dmamap_sync(mem->tag, mem->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(dma->tag, dma->map); - bus_dmamem_free(dma->tag, dma->va, dma->map); - bus_dma_tag_destroy(dma->tag); + bus_dmamap_unload(mem->tag, mem->map); + bus_dmamem_free(mem->tag, mem->va, mem->map); + bus_dma_tag_destroy(mem->tag); return (0); } diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h index 3bae1672d..97908baca 100755 --- a/sys/dev/ixl/i40e_osdep.h +++ b/sys/dev/ixl/i40e_osdep.h @@ -170,9 +170,6 @@ struct i40e_hw; /* forward decl */ u16 i40e_read_pci_cfg(struct i40e_hw *, u32); void i40e_write_pci_cfg(struct i40e_hw *, u32, u16); -#define i40e_allocate_dma_mem(h, m, unused, s, a) i40e_allocate_dma(h, m, s, a) -#define i40e_free_dma_mem(h, m) i40e_free_dma(h, m) - #define i40e_debug(h, m, s, ...) i40e_debug_d(h, m, s, ##__VA_ARGS__) extern void i40e_debug_d(void *hw, u32 mask, char *fmt_str, ...); @@ -180,8 +177,6 @@ struct i40e_virt_mem { void *va; u32 size; }; -#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt(h, m, s) -#define i40e_free_virt_mem(h, m) i40e_free_virt(h, m) /* ** This hardware supports either 16 or 32 byte rx descriptors diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c index e381f4e4f..abae7a5b4 100755 --- a/sys/dev/ixl/if_ixl.c +++ b/sys/dev/ixl/if_ixl.c @@ -921,8 +921,10 @@ ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ixl_init(pf); +#ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); +#endif } else error = ether_ioctl(ifp, command, data); break; @@ -2591,7 +2593,7 @@ ixl_free_vsi(struct ixl_vsi *vsi) IXL_TX_LOCK(txr); ixl_free_que_tx(que); if (txr->base) - i40e_free_dma(&pf->hw, &txr->dma); + i40e_free_dma_mem(&pf->hw, &txr->dma); IXL_TX_UNLOCK(txr); IXL_TX_LOCK_DESTROY(txr); @@ -2600,7 +2602,7 @@ ixl_free_vsi(struct ixl_vsi *vsi) IXL_RX_LOCK(rxr); ixl_free_que_rx(que); if (rxr->base) - i40e_free_dma(&pf->hw, &rxr->dma); + i40e_free_dma_mem(&pf->hw, &rxr->dma); IXL_RX_UNLOCK(rxr); IXL_RX_LOCK_DESTROY(rxr); @@ -2668,8 +2670,8 @@ ixl_setup_stations(struct ixl_pf *pf) tsize = roundup2((que->num_desc * sizeof(struct i40e_tx_desc)) + sizeof(u32), DBA_ALIGN); - if (i40e_allocate_dma(&pf->hw, - &txr->dma, tsize, DBA_ALIGN)) { + if (i40e_allocate_dma_mem(&pf->hw, + &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; @@ -2708,8 +2710,8 @@ ixl_setup_stations(struct ixl_pf *pf) device_get_nameunit(dev), que->me); mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); - if (i40e_allocate_dma(&pf->hw, - &rxr->dma, rsize, 4096)) { + if (i40e_allocate_dma_mem(&pf->hw, + &rxr->dma, i40e_mem_reserved, rsize, 4096)) { device_printf(dev, "Unable to allocate RX Descriptor memory\n"); error = ENOMEM; @@ -2735,9 +2737,9 @@ ixl_setup_stations(struct ixl_pf *pf) rxr = &que->rxr; txr = &que->txr; if (rxr->base) - i40e_free_dma(&pf->hw, &rxr->dma); + i40e_free_dma_mem(&pf->hw, &rxr->dma); if (txr->base) - i40e_free_dma(&pf->hw, &txr->dma); + i40e_free_dma_mem(&pf->hw, &txr->dma); } early: diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c index 2a63387c8..a29d669e9 100644 --- a/sys/dev/ixl/if_ixlv.c +++ b/sys/dev/ixl/if_ixlv.c @@ -755,8 +755,10 @@ ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data) ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ixlv_init(sc); +#ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); +#endif } else error = ether_ioctl(ifp, command, data); break; @@ -1457,8 +1459,8 @@ ixlv_setup_queues(struct ixlv_sc *sc) tsize = roundup2((que->num_desc * sizeof(struct i40e_tx_desc)) + sizeof(u32), DBA_ALIGN); - if (i40e_allocate_dma(&sc->hw, - &txr->dma, tsize, DBA_ALIGN)) { + if (i40e_allocate_dma_mem(&sc->hw, + &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; @@ -1497,8 +1499,8 @@ ixlv_setup_queues(struct ixlv_sc *sc) device_get_nameunit(dev), que->me); mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); - if (i40e_allocate_dma(&sc->hw, - &rxr->dma, rsize, 4096)) { //JFV - should this be DBA? + if (i40e_allocate_dma_mem(&sc->hw, + &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA? device_printf(dev, "Unable to allocate RX Descriptor memory\n"); error = ENOMEM; @@ -1525,9 +1527,9 @@ ixlv_setup_queues(struct ixlv_sc *sc) rxr = &que->rxr; txr = &que->txr; if (rxr->base) - i40e_free_dma(&sc->hw, &rxr->dma); + i40e_free_dma_mem(&sc->hw, &rxr->dma); if (txr->base) - i40e_free_dma(&sc->hw, &txr->dma); + i40e_free_dma_mem(&sc->hw, &txr->dma); } early: @@ -2346,7 +2348,7 @@ ixlv_free_queues(struct ixl_vsi *vsi) IXL_TX_LOCK(txr); ixl_free_que_tx(que); if (txr->base) - i40e_free_dma(&sc->hw, &txr->dma); + i40e_free_dma_mem(&sc->hw, &txr->dma); IXL_TX_UNLOCK(txr); IXL_TX_LOCK_DESTROY(txr); @@ -2355,7 +2357,7 @@ ixlv_free_queues(struct ixl_vsi *vsi) IXL_RX_LOCK(rxr); ixl_free_que_rx(que); if (rxr->base) - i40e_free_dma(&sc->hw, &rxr->dma); + i40e_free_dma_mem(&sc->hw, &rxr->dma); IXL_RX_UNLOCK(rxr); IXL_RX_LOCK_DESTROY(rxr); diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c index 12e09f776..306d4ecf0 100755 --- a/sys/dev/ixl/ixl_txrx.c +++ b/sys/dev/ixl/ixl_txrx.c @@ -1085,10 +1085,12 @@ ixl_allocate_rx_data(struct ixl_queue *que) int ixl_init_rx_ring(struct ixl_queue *que) { + struct rx_ring *rxr = &que->rxr; +#if defined(INET6) || defined(INET) struct ixl_vsi *vsi = que->vsi; struct ifnet *ifp = vsi->ifp; - struct rx_ring *rxr = &que->rxr; struct lro_ctrl *lro = &rxr->lro; +#endif struct ixl_rx_buf *buf; bus_dma_segment_t pseg[1], hseg[1]; int rsize, nsegs, error = 0; @@ -1187,6 +1189,7 @@ ixl_init_rx_ring(struct ixl_queue *que) rxr->bytes = 0; rxr->discard = FALSE; +#if defined(INET6) || defined(INET) /* ** Now set up the LRO interface: */ @@ -1200,6 +1203,7 @@ ixl_init_rx_ring(struct ixl_queue *que) rxr->lro_enabled = TRUE; lro->ifp = vsi->ifp; } +#endif bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); @@ -1274,6 +1278,8 @@ ixl_free_que_rx(struct ixl_queue *que) static __inline void ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) { + +#if defined(INET6) || defined(INET) /* * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet * should be computed by hardware. Also it should not have VLAN tag in @@ -1293,6 +1299,7 @@ ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) if (tcp_lro_rx(&rxr->lro, m, 0) == 0) return; } +#endif IXL_RX_UNLOCK(rxr); (*ifp->if_input)(ifp, m); IXL_RX_LOCK(rxr); @@ -1350,8 +1357,10 @@ ixl_rxeof(struct ixl_queue *que, int count) struct ixl_vsi *vsi = que->vsi; struct rx_ring *rxr = &que->rxr; struct ifnet *ifp = vsi->ifp; +#if defined(INET6) || defined(INET) struct lro_ctrl *lro = &rxr->lro; struct lro_entry *queued; +#endif int i, nextp, processed = 0; union i40e_rx_desc *cur; struct ixl_rx_buf *rbuf, *nbuf; @@ -1559,6 +1568,7 @@ ixl_rxeof(struct ixl_queue *que, int count) rxr->next_check = i; +#if defined(INET6) || defined(INET) /* * Flush any outstanding LRO work */ @@ -1566,6 +1576,7 @@ ixl_rxeof(struct ixl_queue *que, int count) SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } +#endif IXL_RX_UNLOCK(rxr); return (FALSE); -- 2.45.0