1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
40 #include "ixl_iw_int.h"
44 #include "ixl_pf_iov.h"
47 /*********************************************************************
49 *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR 2
51 #define IXL_DRIVER_VERSION_MINOR 0
52 #define IXL_DRIVER_VERSION_BUILD 0
54 #define IXL_DRIVER_VERSION_STRING \
55 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \
56 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \
57 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
59 /*********************************************************************
62 * Used by probe to select devices to load on
64 * ( Vendor ID, Device ID, Branding String )
65 *********************************************************************/
67 static pci_vendor_info_t ixl_vendor_info_array[] =
69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 /* required last entry */
89 /*********************************************************************
91 *********************************************************************/
92 /*** IFLIB interface ***/
93 static void *ixl_register(device_t dev);
94 static int ixl_if_attach_pre(if_ctx_t ctx);
95 static int ixl_if_attach_post(if_ctx_t ctx);
96 static int ixl_if_detach(if_ctx_t ctx);
97 static int ixl_if_shutdown(if_ctx_t ctx);
98 static int ixl_if_suspend(if_ctx_t ctx);
99 static int ixl_if_resume(if_ctx_t ctx);
100 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
101 static void ixl_if_enable_intr(if_ctx_t ctx);
102 static void ixl_if_disable_intr(if_ctx_t ctx);
103 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
104 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
105 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
106 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
107 static void ixl_if_queues_free(if_ctx_t ctx);
108 static void ixl_if_update_admin_status(if_ctx_t ctx);
109 static void ixl_if_multi_set(if_ctx_t ctx);
110 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
111 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
112 static int ixl_if_media_change(if_ctx_t ctx);
113 static int ixl_if_promisc_set(if_ctx_t ctx, int flags);
114 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid);
115 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
116 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
117 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
118 static void ixl_if_vflr_handle(if_ctx_t ctx);
119 // static void ixl_if_link_intr_enable(if_ctx_t ctx);
120 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
121 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
124 static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int);
125 static void ixl_save_pf_tunables(struct ixl_pf *);
126 static int ixl_allocate_pci_resources(struct ixl_pf *);
128 /*********************************************************************
129 * FreeBSD Device Interface Entry Points
130 *********************************************************************/
132 static device_method_t ixl_methods[] = {
133 /* Device interface */
134 DEVMETHOD(device_register, ixl_register),
135 DEVMETHOD(device_probe, iflib_device_probe),
136 DEVMETHOD(device_attach, iflib_device_attach),
137 DEVMETHOD(device_detach, iflib_device_detach),
138 DEVMETHOD(device_shutdown, iflib_device_shutdown),
140 DEVMETHOD(pci_iov_init, ixl_iov_init),
141 DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
142 DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
147 static driver_t ixl_driver = {
148 "ixl", ixl_methods, sizeof(struct ixl_pf),
151 devclass_t ixl_devclass;
152 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
153 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
154 MODULE_VERSION(ixl, 3);
156 MODULE_DEPEND(ixl, pci, 1, 1, 1);
157 MODULE_DEPEND(ixl, ether, 1, 1, 1);
158 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
160 static device_method_t ixl_if_methods[] = {
161 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
162 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
163 DEVMETHOD(ifdi_detach, ixl_if_detach),
164 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
165 DEVMETHOD(ifdi_suspend, ixl_if_suspend),
166 DEVMETHOD(ifdi_resume, ixl_if_resume),
167 DEVMETHOD(ifdi_init, ixl_if_init),
168 DEVMETHOD(ifdi_stop, ixl_if_stop),
169 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
170 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
171 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
172 //DEVMETHOD(ifdi_link_intr_enable, ixl_if_link_intr_enable),
173 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
174 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
175 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
176 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
177 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
178 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
179 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
180 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
181 DEVMETHOD(ifdi_media_status, ixl_if_media_status),
182 DEVMETHOD(ifdi_media_change, ixl_if_media_change),
183 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
184 DEVMETHOD(ifdi_timer, ixl_if_timer),
185 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
186 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
187 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
188 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
189 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
190 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
196 static driver_t ixl_if_driver = {
197 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
201 ** TUNEABLE PARAMETERS:
204 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
205 "IXL driver parameters");
208 * Leave this on unless you need to send flow control
209 * frames (or other control frames) from software
211 static int ixl_enable_tx_fc_filter = 1;
212 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
213 &ixl_enable_tx_fc_filter);
214 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
215 &ixl_enable_tx_fc_filter, 0,
216 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
218 static int ixl_i2c_access_method = 0;
219 TUNABLE_INT("hw.ixl.i2c_access_method",
220 &ixl_i2c_access_method);
221 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
222 &ixl_i2c_access_method, 0,
223 IXL_SYSCTL_HELP_I2C_METHOD);
226 * Different method for processing TX descriptor
229 static int ixl_enable_head_writeback = 1;
230 TUNABLE_INT("hw.ixl.enable_head_writeback",
231 &ixl_enable_head_writeback);
232 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
233 &ixl_enable_head_writeback, 0,
234 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
236 static int ixl_core_debug_mask = 0;
237 TUNABLE_INT("hw.ixl.core_debug_mask",
238 &ixl_core_debug_mask);
239 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
240 &ixl_core_debug_mask, 0,
241 "Display debug statements that are printed in non-shared code");
243 static int ixl_shared_debug_mask = 0;
244 TUNABLE_INT("hw.ixl.shared_debug_mask",
245 &ixl_shared_debug_mask);
246 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
247 &ixl_shared_debug_mask, 0,
248 "Display debug statements that are printed in shared code");
252 ** Controls for Interrupt Throttling
253 ** - true/false for dynamic adjustment
254 ** - default values for static ITR
256 static int ixl_dynamic_rx_itr = 0;
257 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
258 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
259 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
261 static int ixl_dynamic_tx_itr = 0;
262 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
263 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
264 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
267 static int ixl_rx_itr = IXL_ITR_8K;
268 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
270 &ixl_rx_itr, 0, "RX Interrupt Rate");
272 static int ixl_tx_itr = IXL_ITR_4K;
273 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
274 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
275 &ixl_tx_itr, 0, "TX Interrupt Rate");
278 int ixl_enable_iwarp = 0;
279 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
280 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
281 &ixl_enable_iwarp, 0, "iWARP enabled");
283 #if __FreeBSD_version < 1100000
284 int ixl_limit_iwarp_msix = 1;
286 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
288 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
289 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
290 &ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP");
293 extern struct if_txrx ixl_txrx_hwb;
294 extern struct if_txrx ixl_txrx_dwb;
296 static struct if_shared_ctx ixl_sctx_init = {
297 .isc_magic = IFLIB_MAGIC,
298 .isc_q_align = PAGE_SIZE,
299 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
300 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
301 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
302 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
303 .isc_rx_maxsize = 16384,
304 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
305 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
310 .isc_admin_intrcnt = 1,
311 .isc_vendor_info = ixl_vendor_info_array,
312 .isc_driver_version = IXL_DRIVER_VERSION_STRING,
313 .isc_driver = &ixl_if_driver,
314 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_ADMIN_ALWAYS_RUN,
316 .isc_nrxd_min = {IXL_MIN_RING},
317 .isc_ntxd_min = {IXL_MIN_RING},
318 .isc_nrxd_max = {IXL_MAX_RING},
319 .isc_ntxd_max = {IXL_MAX_RING},
320 .isc_nrxd_default = {IXL_DEFAULT_RING},
321 .isc_ntxd_default = {IXL_DEFAULT_RING},
324 if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
328 ixl_register(device_t dev)
334 ixl_allocate_pci_resources(struct ixl_pf *pf)
337 struct i40e_hw *hw = &pf->hw;
338 device_t dev = iflib_get_dev(pf->vsi.ctx);
342 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
345 if (!(pf->pci_mem)) {
346 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
350 /* Save off the PCI information */
351 hw->vendor_id = pci_get_vendor(dev);
352 hw->device_id = pci_get_device(dev);
353 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
354 hw->subsystem_vendor_id =
355 pci_read_config(dev, PCIR_SUBVEND_0, 2);
356 hw->subsystem_device_id =
357 pci_read_config(dev, PCIR_SUBDEV_0, 2);
359 hw->bus.device = pci_get_slot(dev);
360 hw->bus.func = pci_get_function(dev);
362 /* Save off register access information */
363 pf->osdep.mem_bus_space_tag =
364 rman_get_bustag(pf->pci_mem);
365 pf->osdep.mem_bus_space_handle =
366 rman_get_bushandle(pf->pci_mem);
367 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
368 pf->osdep.flush_reg = I40E_GLGEN_STAT;
371 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
372 pf->hw.back = &pf->osdep;
378 ixl_if_attach_pre(if_ctx_t ctx)
384 if_softc_ctx_t scctx;
385 struct i40e_filter_control_settings filter;
386 enum i40e_status_code status;
389 INIT_DEBUGOUT("ixl_if_attach_pre: begin");
391 /* Allocate, clear, and link in our primary soft structure */
392 dev = iflib_get_dev(ctx);
393 pf = iflib_get_softc(ctx);
400 ** Note this assumes we have a single embedded VSI,
401 ** this could be enhanced later to allocate multiple
403 //vsi->dev = pf->dev;
408 vsi->media = iflib_get_media(ctx);
409 vsi->shared = scctx = iflib_get_softc_ctx(ctx);
411 /* Save tunable values */
412 ixl_save_pf_tunables(pf);
414 /* Do PCI setup - map BAR0, etc */
415 if (ixl_allocate_pci_resources(pf)) {
416 device_printf(dev, "Allocation of PCI resources failed\n");
421 /* Establish a clean starting point */
423 status = i40e_pf_reset(hw);
425 device_printf(dev, "PF reset failure %s\n",
426 i40e_stat_str(hw, status));
431 /* Initialize the shared code */
432 status = i40e_init_shared_code(hw);
434 device_printf(dev, "Unable to initialize shared code, error %s\n",
435 i40e_stat_str(hw, status));
440 /* Set up the admin queue */
441 hw->aq.num_arq_entries = IXL_AQ_LEN;
442 hw->aq.num_asq_entries = IXL_AQ_LEN;
443 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
444 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
446 status = i40e_init_adminq(hw);
447 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
448 device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
449 i40e_stat_str(hw, status));
453 ixl_print_nvm_version(pf);
455 if (status == I40E_ERR_FIRMWARE_API_VERSION) {
456 device_printf(dev, "The driver for the device stopped "
457 "because the NVM image is newer than expected.\n");
458 device_printf(dev, "You must install the most recent version of "
459 "the network driver.\n");
464 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
465 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
466 device_printf(dev, "The driver for the device detected "
467 "a newer version of the NVM image than expected.\n");
468 device_printf(dev, "Please install the most recent version "
469 "of the network driver.\n");
470 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
471 device_printf(dev, "The driver for the device detected "
472 "an older version of the NVM image than expected.\n");
473 device_printf(dev, "Please update the NVM image.\n");
477 i40e_clear_pxe_mode(hw);
479 /* Get capabilities from the device */
480 error = ixl_get_hw_capabilities(pf);
482 device_printf(dev, "get_hw_capabilities failed: %d\n",
487 /* Set up host memory cache */
488 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
489 hw->func_caps.num_rx_qp, 0, 0);
491 device_printf(dev, "init_lan_hmc failed: %s\n",
492 i40e_stat_str(hw, status));
495 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
497 device_printf(dev, "configure_lan_hmc failed: %s\n",
498 i40e_stat_str(hw, status));
502 /* Disable LLDP from the firmware for certain NVM versions */
503 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
504 (pf->hw.aq.fw_maj_ver < 4)) {
505 i40e_aq_stop_lldp(hw, TRUE, NULL);
506 pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
509 /* Get MAC addresses from hardware */
510 i40e_get_mac_addr(hw, hw->mac.addr);
511 error = i40e_validate_mac_addr(hw->mac.addr);
513 device_printf(dev, "validate_mac_addr failed: %d\n", error);
516 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
517 iflib_set_mac(ctx, hw->mac.addr);
518 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
520 /* Set up the device filtering */
521 bzero(&filter, sizeof(filter));
522 filter.enable_ethtype = TRUE;
523 filter.enable_macvlan = TRUE;
524 filter.enable_fdir = FALSE;
525 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
526 if (i40e_set_filter_control(hw, &filter))
527 device_printf(dev, "i40e_set_filter_control() failed\n");
529 /* Query device FW LLDP status */
530 ixl_get_fw_lldp_status(pf);
531 /* Tell FW to apply DCB config on link up */
532 i40e_aq_set_dcb_parameters(hw, true, NULL);
534 /* Fill out iflib parameters */
535 if (hw->mac.type == I40E_MAC_X722)
536 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
538 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
539 if (vsi->enable_head_writeback) {
540 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
541 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
542 scctx->isc_txrx = &ixl_txrx_hwb;
544 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
545 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
546 scctx->isc_txrx = &ixl_txrx_dwb;
548 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
549 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
550 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
551 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
552 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
553 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
554 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
555 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
556 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
557 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
559 INIT_DEBUGOUT("ixl_if_attach_pre: end");
563 i40e_shutdown_lan_hmc(hw);
565 i40e_shutdown_adminq(hw);
567 ixl_free_pci_resources(pf);
573 ixl_if_attach_post(if_ctx_t ctx)
580 enum i40e_status_code status;
582 INIT_DEBUGOUT("ixl_if_attach_post: begin");
584 dev = iflib_get_dev(ctx);
585 pf = iflib_get_softc(ctx);
587 vsi->ifp = iflib_get_ifp(ctx);
590 /* Setup OS network interface / ifnet */
591 if (ixl_setup_interface(dev, pf)) {
592 device_printf(dev, "interface setup failed!\n");
597 /* Determine link state */
598 if (ixl_attach_get_link_status(pf)) {
603 error = ixl_switch_config(pf);
605 device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
610 /* Add protocol filters to list */
611 ixl_init_filters(vsi);
613 /* Init queue allocation manager */
614 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
616 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
620 /* reserve a contiguous allocation for the PF's VSI */
621 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
622 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
624 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
628 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
629 pf->qtag.num_allocated, pf->qtag.num_active);
631 /* Limit PHY interrupts to link, autoneg, and modules failure */
632 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
635 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
636 " aq_err %s\n", i40e_stat_str(hw, status),
637 i40e_aq_str(hw, hw->aq.asq_last_status));
641 /* Get the bus configuration and set the shared code */
642 ixl_get_bus_info(pf);
644 /* Keep admin queue interrupts active while driver is loaded */
645 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
646 ixl_configure_intr0_msix(pf);
647 ixl_enable_intr0(hw);
650 /* Set initial advertised speed sysctl value */
651 ixl_set_initial_advertised_speeds(pf);
653 /* Initialize statistics & add sysctls */
654 ixl_add_device_sysctls(pf);
655 ixl_pf_reset_stats(pf);
656 ixl_update_stats_counters(pf);
657 ixl_add_hw_stats(pf);
659 hw->phy.get_link_info = true;
660 i40e_get_link_status(hw, &pf->link_up);
661 ixl_update_link_status(pf);
664 ixl_initialize_sriov(pf);
668 if (hw->func_caps.iwarp && ixl_enable_iwarp) {
669 pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
670 if (pf->iw_enabled) {
671 error = ixl_iw_pf_attach(pf);
674 "interfacing to iwarp driver failed: %d\n",
678 device_printf(dev, "iWARP ready\n");
681 "iwarp disabled on this device (no msix vectors)\n");
683 pf->iw_enabled = false;
684 device_printf(dev, "The device is not iWARP enabled\n");
688 INIT_DBG_DEV(dev, "end");
692 INIT_DEBUGOUT("end: error %d", error);
693 /* ixl_if_detach() is called on error from this */
698 ixl_if_detach(if_ctx_t ctx)
700 struct ixl_pf *pf = iflib_get_softc(ctx);
701 struct ixl_vsi *vsi = &pf->vsi;
702 struct i40e_hw *hw = &pf->hw;
703 device_t dev = pf->dev;
704 enum i40e_status_code status;
705 #if defined(PCI_IOV) || defined(IXL_IW)
709 INIT_DBG_DEV(dev, "begin");
712 if (ixl_enable_iwarp && pf->iw_enabled) {
713 error = ixl_iw_pf_detach(pf);
714 if (error == EBUSY) {
715 device_printf(dev, "iwarp in use; stop it first.\n");
721 error = pci_iov_detach(dev);
723 device_printf(dev, "SR-IOV in use; detach first.\n");
727 /* Remove all previously allocated media types */
728 ifmedia_removeall(vsi->media);
730 /* Shutdown LAN HMC */
731 if (hw->hmc.hmc_obj) {
732 status = i40e_shutdown_lan_hmc(hw);
735 "i40e_shutdown_lan_hmc() failed with status %s\n",
736 i40e_stat_str(hw, status));
739 /* Shutdown admin queue */
740 ixl_disable_intr0(hw);
741 status = i40e_shutdown_adminq(hw);
744 "i40e_shutdown_adminq() failed with status %s\n",
745 i40e_stat_str(hw, status));
747 ixl_pf_qmgr_destroy(&pf->qmgr);
748 ixl_free_pci_resources(pf);
749 ixl_free_mac_filters(vsi);
750 INIT_DBG_DEV(dev, "end");
754 /* TODO: Do shutdown-specific stuff here */
756 ixl_if_shutdown(if_ctx_t ctx)
760 INIT_DEBUGOUT("ixl_if_shutdown: begin");
762 /* TODO: Call ixl_if_stop()? */
764 /* TODO: Then setup low power mode */
770 ixl_if_suspend(if_ctx_t ctx)
774 INIT_DEBUGOUT("ixl_if_suspend: begin");
776 /* TODO: Call ixl_if_stop()? */
778 /* TODO: Then setup low power mode */
784 ixl_if_resume(if_ctx_t ctx)
786 struct ifnet *ifp = iflib_get_ifp(ctx);
788 INIT_DEBUGOUT("ixl_if_resume: begin");
790 /* Read & clear wake-up registers */
792 /* Required after D3->D0 transition */
793 if (ifp->if_flags & IFF_UP)
799 /* Set Report Status queue fields to 0 */
801 ixl_init_tx_rsqs(struct ixl_vsi *vsi)
803 if_softc_ctx_t scctx = vsi->shared;
804 struct ixl_tx_queue *tx_que;
807 for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
808 struct tx_ring *txr = &tx_que->txr;
810 txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
812 for (j = 0; j < scctx->isc_ntxd[0]; j++)
813 txr->tx_rsq[j] = QIDX_INVALID;
818 ixl_init_tx_cidx(struct ixl_vsi *vsi)
820 struct ixl_tx_queue *tx_que;
823 for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
824 struct tx_ring *txr = &tx_que->txr;
826 txr->tx_cidx_processed = 0;
831 ixl_if_init(if_ctx_t ctx)
833 struct ixl_pf *pf = iflib_get_softc(ctx);
834 struct ixl_vsi *vsi = &pf->vsi;
835 struct i40e_hw *hw = &pf->hw;
836 device_t dev = iflib_get_dev(ctx);
837 u8 tmpaddr[ETHER_ADDR_LEN];
841 * If the aq is dead here, it probably means something outside of the driver
842 * did something to the adapter, like a PF reset.
843 * So rebuild the driver's state here if that occurs.
845 if (!i40e_check_asq_alive(&pf->hw)) {
846 device_printf(dev, "Admin Queue is down; resetting...\n");
847 ixl_teardown_hw_structs(pf);
851 /* Get the latest mac address... User might use a LAA */
852 bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
853 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
854 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
855 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
856 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
857 ret = i40e_aq_mac_address_write(hw,
858 I40E_AQC_WRITE_TYPE_LAA_ONLY,
861 device_printf(dev, "LLA address change failed!!\n");
864 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
867 iflib_set_mac(ctx, hw->mac.addr);
869 /* Prepare the VSI: rings, hmc contexts, etc... */
870 if (ixl_initialize_vsi(vsi)) {
871 device_printf(dev, "initialize vsi failed!!\n");
875 // TODO: Call iflib setup multicast filters here?
876 // It's called in ixgbe in D5213
877 ixl_if_multi_set(ctx);
882 /* Set up MSI/X routing and the ITR settings */
883 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
884 ixl_configure_queue_intr_msix(pf);
885 ixl_configure_itr(pf);
887 ixl_configure_legacy(pf);
889 if (vsi->enable_head_writeback)
890 ixl_init_tx_cidx(vsi);
892 ixl_init_tx_rsqs(vsi);
894 ixl_enable_rings(vsi);
896 i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
898 ixl_reconfigure_filters(vsi);
901 if (ixl_enable_iwarp && pf->iw_enabled) {
902 ret = ixl_iw_pf_init(pf);
905 "initialize iwarp failed, code %d\n", ret);
911 ixl_if_stop(if_ctx_t ctx)
913 struct ixl_pf *pf = iflib_get_softc(ctx);
914 struct ixl_vsi *vsi = &pf->vsi;
916 INIT_DEBUGOUT("ixl_if_stop: begin\n");
918 // TODO: This may need to be reworked
920 /* Stop iWARP device */
921 if (ixl_enable_iwarp && pf->iw_enabled)
925 ixl_disable_rings_intr(vsi);
926 ixl_disable_rings(vsi);
930 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
932 struct ixl_pf *pf = iflib_get_softc(ctx);
933 struct ixl_vsi *vsi = &pf->vsi;
934 struct ixl_rx_queue *rx_que = vsi->rx_queues;
935 struct ixl_tx_queue *tx_que = vsi->tx_queues;
936 int err, i, rid, vector = 0;
939 /* Admin Que must use vector 0*/
941 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
942 ixl_msix_adminq, pf, 0, "aq");
944 iflib_irq_free(ctx, &vsi->irq);
945 device_printf(iflib_get_dev(ctx),
946 "Failed to register Admin que handler");
949 // TODO: Re-enable this at some point
950 // iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov");
952 /* Now set up the stations */
953 for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++, rx_que++) {
956 snprintf(buf, sizeof(buf), "rxq%d", i);
957 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
958 IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
959 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
960 * what's expected in the iflib context? */
962 device_printf(iflib_get_dev(ctx),
963 "Failed to allocate q int %d err: %d", i, err);
964 vsi->num_rx_queues = i + 1;
967 rx_que->msix = vector;
970 bzero(buf, sizeof(buf));
972 for (i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
973 snprintf(buf, sizeof(buf), "txq%d", i);
974 iflib_softirq_alloc_generic(ctx,
975 &vsi->rx_queues[i % vsi->num_rx_queues].que_irq,
976 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
978 /* TODO: Maybe call a strategy function for this to figure out which
979 * interrupts to map Tx queues to. I don't know if there's an immediately
980 * better way than this other than a user-supplied map, though. */
981 tx_que->msix = (i % vsi->num_rx_queues) + 1;
986 iflib_irq_free(ctx, &vsi->irq);
987 rx_que = vsi->rx_queues;
988 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
989 iflib_irq_free(ctx, &rx_que->que_irq);
994 * Enable all interrupts
997 * iflib_init_locked, after ixl_if_init()
1000 ixl_if_enable_intr(if_ctx_t ctx)
1002 struct ixl_pf *pf = iflib_get_softc(ctx);
1003 struct ixl_vsi *vsi = &pf->vsi;
1004 struct i40e_hw *hw = vsi->hw;
1005 struct ixl_rx_queue *que = vsi->rx_queues;
1007 ixl_enable_intr0(hw);
1008 /* Enable queue interrupts */
1009 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1010 /* TODO: Queue index parameter is probably wrong */
1011 ixl_enable_queue(hw, que->rxr.me);
1015 * Disable queue interrupts
1017 * Other interrupt causes need to remain active.
1020 ixl_if_disable_intr(if_ctx_t ctx)
1022 struct ixl_pf *pf = iflib_get_softc(ctx);
1023 struct ixl_vsi *vsi = &pf->vsi;
1024 struct i40e_hw *hw = vsi->hw;
1025 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1027 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1028 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1029 ixl_disable_queue(hw, rx_que->msix - 1);
1031 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1032 // stops queues from triggering interrupts
1033 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1038 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1040 struct ixl_pf *pf = iflib_get_softc(ctx);
1041 struct ixl_vsi *vsi = &pf->vsi;
1042 struct i40e_hw *hw = vsi->hw;
1043 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
1045 ixl_enable_queue(hw, rx_que->msix - 1);
1050 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1052 struct ixl_pf *pf = iflib_get_softc(ctx);
1053 struct ixl_vsi *vsi = &pf->vsi;
1054 struct i40e_hw *hw = vsi->hw;
1055 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1057 ixl_enable_queue(hw, tx_que->msix - 1);
1063 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1065 struct ixl_pf *pf = iflib_get_softc(ctx);
1066 struct ixl_vsi *vsi = &pf->vsi;
1067 if_softc_ctx_t scctx = vsi->shared;
1068 struct ixl_tx_queue *que;
1070 int i, j, error = 0;
1072 MPASS(vsi->num_tx_queues > 0);
1074 MPASS(vsi->num_tx_queues == ntxqsets);
1076 /* Allocate queue structure memory */
1077 if (!(vsi->tx_queues =
1078 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1079 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1083 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1084 struct tx_ring *txr = &que->txr;
1089 if (!vsi->enable_head_writeback) {
1090 /* Allocate report status array */
1091 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1092 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1096 /* Init report status array */
1097 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1098 txr->tx_rsq[j] = QIDX_INVALID;
1100 /* get the virtual and physical address of the hardware queues */
1101 txr->tail = I40E_QTX_TAIL(txr->me);
1102 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1103 txr->tx_paddr = paddrs[i * ntxqs];
1109 ixl_if_queues_free(ctx);
1114 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1116 struct ixl_pf *pf = iflib_get_softc(ctx);
1117 struct ixl_vsi *vsi = &pf->vsi;
1118 struct ixl_rx_queue *que;
1121 MPASS(vsi->num_rx_queues > 0);
1123 MPASS(vsi->num_rx_queues == nrxqsets);
1125 /* Allocate queue structure memory */
1126 if (!(vsi->rx_queues =
1127 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1128 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1129 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1134 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1135 struct rx_ring *rxr = &que->rxr;
1140 /* get the virtual and physical address of the hardware queues */
1141 rxr->tail = I40E_QRX_TAIL(rxr->me);
1142 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1143 rxr->rx_paddr = paddrs[i * nrxqs];
1149 ixl_if_queues_free(ctx);
1154 ixl_if_queues_free(if_ctx_t ctx)
1156 struct ixl_pf *pf = iflib_get_softc(ctx);
1157 struct ixl_vsi *vsi = &pf->vsi;
1159 if (vsi->enable_head_writeback) {
1160 struct ixl_tx_queue *que;
1163 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1164 struct tx_ring *txr = &que->txr;
1165 if (txr->tx_rsq != NULL) {
1166 free(txr->tx_rsq, M_IXL);
1172 if (vsi->tx_queues != NULL) {
1173 free(vsi->tx_queues, M_IXL);
1174 vsi->tx_queues = NULL;
1176 if (vsi->rx_queues != NULL) {
1177 free(vsi->rx_queues, M_IXL);
1178 vsi->rx_queues = NULL;
1183 ixl_update_link_status(struct ixl_pf *pf)
1185 struct ixl_vsi *vsi = &pf->vsi;
1186 struct i40e_hw *hw = &pf->hw;
1190 if (vsi->link_active == FALSE) {
1191 vsi->link_active = TRUE;
1192 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1193 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1194 ixl_link_up_msg(pf);
1196 ixl_broadcast_link_state(pf);
1200 } else { /* Link down */
1201 if (vsi->link_active == TRUE) {
1202 vsi->link_active = FALSE;
1203 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1205 ixl_broadcast_link_state(pf);
1212 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1214 enum i40e_status_code status = I40E_SUCCESS;
1215 struct i40e_arq_event_info event;
1216 struct i40e_hw *hw = &pf->hw;
1217 device_t dev = pf->dev;
1221 event.buf_len = IXL_AQ_BUF_SZ;
1222 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1223 if (!event.msg_buf) {
1224 device_printf(dev, "%s: Unable to allocate memory for Admin"
1225 " Queue event!\n", __func__);
1229 /* clean and process any events */
1231 status = i40e_clean_arq_element(hw, &event, pending);
1234 opcode = LE16_TO_CPU(event.desc.opcode);
1235 ixl_dbg(pf, IXL_DBG_AQ,
1236 "Admin Queue event: %#06x\n", opcode);
1238 case i40e_aqc_opc_get_link_status:
1239 ixl_link_event(pf, &event);
1241 case i40e_aqc_opc_send_msg_to_pf:
1243 ixl_handle_vf_msg(pf, &event);
1247 * This should only occur on no-drop queues, which
1248 * aren't currently configured.
1250 case i40e_aqc_opc_event_lan_overflow:
1251 device_printf(dev, "LAN overflow event\n");
1256 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1258 free(event.msg_buf, M_IXL);
1260 /* Re-enable admin queue interrupt cause */
1261 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1262 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1263 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1269 ixl_if_update_admin_status(if_ctx_t ctx)
1271 struct ixl_pf *pf = iflib_get_softc(ctx);
1272 struct i40e_hw *hw = &pf->hw;
1275 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
1276 ixl_handle_empr_reset(pf);
1278 if (pf->state & IXL_PF_STATE_MDD_PENDING)
1279 ixl_handle_mdd_event(pf);
1282 if (pf->state & IXL_PF_STATE_VF_RESET_REQ)
1283 iflib_iov_intr_deferred(ctx);
1286 ixl_process_adminq(pf, &pending);
1287 ixl_update_link_status(pf);
1290 * If there are still messages to process, reschedule ourselves.
1291 * Otherwise, re-enable our interrupt and go to sleep.
1294 iflib_admin_intr_deferred(ctx);
1296 ixl_enable_intr0(hw);
1300 ixl_if_multi_set(if_ctx_t ctx)
1302 struct ixl_pf *pf = iflib_get_softc(ctx);
1303 struct ixl_vsi *vsi = &pf->vsi;
1304 struct i40e_hw *hw = vsi->hw;
1305 int mcnt = 0, flags;
1307 IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1309 mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
1310 /* delete existing MC filters */
1313 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1314 i40e_aq_set_vsi_multicast_promiscuous(hw,
1315 vsi->seid, TRUE, NULL);
1318 /* (re-)install filters for all mcast addresses */
1319 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
1322 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1323 ixl_add_hw_filters(vsi, flags, mcnt);
1326 IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1330 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1332 struct ixl_pf *pf = iflib_get_softc(ctx);
1333 struct ixl_vsi *vsi = &pf->vsi;
1335 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1336 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1337 ETHER_VLAN_ENCAP_LEN)
1340 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1341 ETHER_VLAN_ENCAP_LEN;
1347 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1349 struct ixl_pf *pf = iflib_get_softc(ctx);
1350 struct i40e_hw *hw = &pf->hw;
1352 INIT_DEBUGOUT("ixl_media_status: begin");
1354 ifmr->ifm_status = IFM_AVALID;
1355 ifmr->ifm_active = IFM_ETHER;
1361 ifmr->ifm_status |= IFM_ACTIVE;
1362 /* Hardware is always full-duplex */
1363 ifmr->ifm_active |= IFM_FDX;
1365 switch (hw->phy.link_info.phy_type) {
1367 case I40E_PHY_TYPE_100BASE_TX:
1368 ifmr->ifm_active |= IFM_100_TX;
1371 case I40E_PHY_TYPE_1000BASE_T:
1372 ifmr->ifm_active |= IFM_1000_T;
1374 case I40E_PHY_TYPE_1000BASE_SX:
1375 ifmr->ifm_active |= IFM_1000_SX;
1377 case I40E_PHY_TYPE_1000BASE_LX:
1378 ifmr->ifm_active |= IFM_1000_LX;
1380 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1381 ifmr->ifm_active |= IFM_1000_T;
1384 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1385 ifmr->ifm_active |= IFM_10G_TWINAX;
1387 case I40E_PHY_TYPE_10GBASE_SR:
1388 ifmr->ifm_active |= IFM_10G_SR;
1390 case I40E_PHY_TYPE_10GBASE_LR:
1391 ifmr->ifm_active |= IFM_10G_LR;
1393 case I40E_PHY_TYPE_10GBASE_T:
1394 ifmr->ifm_active |= IFM_10G_T;
1396 case I40E_PHY_TYPE_XAUI:
1397 case I40E_PHY_TYPE_XFI:
1398 ifmr->ifm_active |= IFM_10G_TWINAX;
1400 case I40E_PHY_TYPE_10GBASE_AOC:
1401 ifmr->ifm_active |= IFM_10G_AOC;
1404 case I40E_PHY_TYPE_25GBASE_KR:
1405 ifmr->ifm_active |= IFM_25G_KR;
1407 case I40E_PHY_TYPE_25GBASE_CR:
1408 ifmr->ifm_active |= IFM_25G_CR;
1410 case I40E_PHY_TYPE_25GBASE_SR:
1411 ifmr->ifm_active |= IFM_25G_SR;
1413 case I40E_PHY_TYPE_25GBASE_LR:
1414 ifmr->ifm_active |= IFM_25G_LR;
1416 case I40E_PHY_TYPE_25GBASE_AOC:
1417 ifmr->ifm_active |= IFM_25G_AOC;
1419 case I40E_PHY_TYPE_25GBASE_ACC:
1420 ifmr->ifm_active |= IFM_25G_ACC;
1423 case I40E_PHY_TYPE_40GBASE_CR4:
1424 case I40E_PHY_TYPE_40GBASE_CR4_CU:
1425 ifmr->ifm_active |= IFM_40G_CR4;
1427 case I40E_PHY_TYPE_40GBASE_SR4:
1428 ifmr->ifm_active |= IFM_40G_SR4;
1430 case I40E_PHY_TYPE_40GBASE_LR4:
1431 ifmr->ifm_active |= IFM_40G_LR4;
1433 case I40E_PHY_TYPE_XLAUI:
1434 ifmr->ifm_active |= IFM_OTHER;
1436 case I40E_PHY_TYPE_1000BASE_KX:
1437 ifmr->ifm_active |= IFM_1000_KX;
1439 case I40E_PHY_TYPE_SGMII:
1440 ifmr->ifm_active |= IFM_1000_SGMII;
1442 /* ERJ: What's the difference between these? */
1443 case I40E_PHY_TYPE_10GBASE_CR1_CU:
1444 case I40E_PHY_TYPE_10GBASE_CR1:
1445 ifmr->ifm_active |= IFM_10G_CR1;
1447 case I40E_PHY_TYPE_10GBASE_KX4:
1448 ifmr->ifm_active |= IFM_10G_KX4;
1450 case I40E_PHY_TYPE_10GBASE_KR:
1451 ifmr->ifm_active |= IFM_10G_KR;
1453 case I40E_PHY_TYPE_SFI:
1454 ifmr->ifm_active |= IFM_10G_SFI;
1456 /* Our single 20G media type */
1457 case I40E_PHY_TYPE_20GBASE_KR2:
1458 ifmr->ifm_active |= IFM_20G_KR2;
1460 case I40E_PHY_TYPE_40GBASE_KR4:
1461 ifmr->ifm_active |= IFM_40G_KR4;
1463 case I40E_PHY_TYPE_XLPPI:
1464 case I40E_PHY_TYPE_40GBASE_AOC:
1465 ifmr->ifm_active |= IFM_40G_XLPPI;
1467 /* Unknown to driver */
1469 ifmr->ifm_active |= IFM_UNKNOWN;
1472 /* Report flow control status as well */
1473 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1474 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1475 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1476 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1480 ixl_if_media_change(if_ctx_t ctx)
1482 struct ifmedia *ifm = iflib_get_media(ctx);
1484 INIT_DEBUGOUT("ixl_media_change: begin");
1486 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1489 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1494 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1496 struct ixl_pf *pf = iflib_get_softc(ctx);
1497 struct ixl_vsi *vsi = &pf->vsi;
1498 struct ifnet *ifp = iflib_get_ifp(ctx);
1499 struct i40e_hw *hw = vsi->hw;
1501 bool uni = FALSE, multi = FALSE;
1503 if (flags & IFF_PROMISC)
1505 else if (flags & IFF_ALLMULTI ||
1506 if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR)
1509 err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1510 vsi->seid, uni, NULL, true);
1513 err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1514 vsi->seid, multi, NULL);
1519 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1521 struct ixl_pf *pf = iflib_get_softc(ctx);
1522 //struct i40e_hw *hw = &pf->hw;
1523 //struct ixl_tx_queue *que = &vsi->tx_queues[qid];
1528 ** Check status of the queues
1530 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1531 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1533 /* If queue param has outstanding work, trigger sw irq */
1534 // TODO: TX queues in iflib don't use HW interrupts; does this do anything?
1536 wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask);
1542 /* Fire off the adminq task */
1543 iflib_admin_intr_deferred(ctx);
1546 ixl_update_stats_counters(pf);
1550 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1552 struct ixl_pf *pf = iflib_get_softc(ctx);
1553 struct ixl_vsi *vsi = &pf->vsi;
1554 struct i40e_hw *hw = vsi->hw;
1556 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1560 ixl_add_filter(vsi, hw->mac.addr, vtag);
1564 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1566 struct ixl_pf *pf = iflib_get_softc(ctx);
1567 struct ixl_vsi *vsi = &pf->vsi;
1568 struct i40e_hw *hw = vsi->hw;
1570 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1574 ixl_del_filter(vsi, hw->mac.addr, vtag);
1578 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1580 struct ixl_pf *pf = iflib_get_softc(ctx);
1581 struct ixl_vsi *vsi = &pf->vsi;
1582 if_t ifp = iflib_get_ifp(ctx);
1585 case IFCOUNTER_IPACKETS:
1586 return (vsi->ipackets);
1587 case IFCOUNTER_IERRORS:
1588 return (vsi->ierrors);
1589 case IFCOUNTER_OPACKETS:
1590 return (vsi->opackets);
1591 case IFCOUNTER_OERRORS:
1592 return (vsi->oerrors);
1593 case IFCOUNTER_COLLISIONS:
1594 /* Collisions are by standard impossible in 40G/10G Ethernet */
1596 case IFCOUNTER_IBYTES:
1597 return (vsi->ibytes);
1598 case IFCOUNTER_OBYTES:
1599 return (vsi->obytes);
1600 case IFCOUNTER_IMCASTS:
1601 return (vsi->imcasts);
1602 case IFCOUNTER_OMCASTS:
1603 return (vsi->omcasts);
1604 case IFCOUNTER_IQDROPS:
1605 return (vsi->iqdrops);
1606 case IFCOUNTER_OQDROPS:
1607 return (vsi->oqdrops);
1608 case IFCOUNTER_NOPROTO:
1609 return (vsi->noproto);
1611 return (if_get_counter_default(ifp, cnt));
1616 ixl_if_vflr_handle(if_ctx_t ctx)
1618 IXL_DEV_ERR(iflib_get_dev(ctx), "");
1620 // TODO: call ixl_handle_vflr()
1624 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1626 struct ixl_pf *pf = iflib_get_softc(ctx);
1628 if (pf->read_i2c_byte == NULL)
1631 for (int i = 0; i < req->len; i++)
1632 if (pf->read_i2c_byte(pf, req->offset + i,
1633 req->dev_addr, &req->data[i]))
1639 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1641 struct ixl_pf *pf = iflib_get_softc(ctx);
1642 struct ifdrv *ifd = (struct ifdrv *)data;
1645 /* NVM update command */
1646 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
1647 error = ixl_handle_nvmupd_cmd(pf, ifd);
1655 ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
1657 struct ixl_vsi *vsi = arg;
1659 if (ifma->ifma_addr->sa_family != AF_LINK)
1661 ixl_add_mc_filter(vsi,
1662 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1667 * Sanity check and save off tunable values.
1670 ixl_save_pf_tunables(struct ixl_pf *pf)
1672 device_t dev = pf->dev;
1674 /* Save tunable information */
1675 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1676 pf->dbg_mask = ixl_core_debug_mask;
1677 pf->hw.debug_mask = ixl_shared_debug_mask;
1678 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1680 pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1681 pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1684 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1685 pf->i2c_access_method = 0;
1687 pf->i2c_access_method = ixl_i2c_access_method;
1689 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1690 device_printf(dev, "Invalid tx_itr value of %d set!\n",
1692 device_printf(dev, "tx_itr must be between %d and %d, "
1695 device_printf(dev, "Using default value of %d instead\n",
1697 pf->tx_itr = IXL_ITR_4K;
1699 pf->tx_itr = ixl_tx_itr;
1701 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1702 device_printf(dev, "Invalid rx_itr value of %d set!\n",
1704 device_printf(dev, "rx_itr must be between %d and %d, "
1707 device_printf(dev, "Using default value of %d instead\n",
1709 pf->rx_itr = IXL_ITR_8K;
1711 pf->rx_itr = ixl_rx_itr;