]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/ixl_pf_main.c
Fix insufficient oce(4) ioctl(2) privilege checking.
[FreeBSD/FreeBSD.git] / sys / dev / ixl / ixl_pf_main.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2019, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #include "ixl_pf.h"
37
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46
47 #ifdef DEV_NETMAP
48 #include <dev/netmap/if_ixl_netmap.h>
49 #endif /* DEV_NETMAP */
50
51 static int      ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int);
52 static u64      ixl_max_aq_speed_to_value(u8);
53 static u8       ixl_convert_sysctl_aq_link_speed(u8, bool);
54 static void     ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
55 static enum i40e_status_code ixl_set_lla(struct ixl_vsi *);
56 static const char * ixl_link_speed_string(u8 link_speed);
57
58
59 /* Sysctls */
60 static int      ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
61 static int      ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
62 static int      ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
63 static int      ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
64 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
65 static int      ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
66 static int      ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
67 static int      ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
68 static int      ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
69
70 /* Debug Sysctls */
71 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
72 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
73 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
74 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
75 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
76 static int      ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
77 static int      ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
78 static int      ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
79 static int      ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
80 static int      ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
81 static int      ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
82 static int      ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
83 static int      ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
84 static int      ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
85 static int      ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
86 static int      ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
87 static int      ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
88 static int      ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
89 static int      ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
90
91
92 #ifdef IXL_IW
93 extern int ixl_enable_iwarp;
94 extern int ixl_limit_iwarp_msix;
95 #endif
96
97 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
98     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
99
100 const char * const ixl_fc_string[6] = {
101         "None",
102         "Rx",
103         "Tx",
104         "Full",
105         "Priority",
106         "Default"
107 };
108
109 static char *ixl_fec_string[3] = {
110        "CL108 RS-FEC",
111        "CL74 FC-FEC/BASE-R",
112        "None"
113 };
114
115 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
116
117 void
118 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
119 {
120         va_list args;
121
122         if (!(mask & pf->dbg_mask))
123                 return;
124
125         /* Re-implement device_printf() */
126         device_print_prettyname(pf->dev);
127         va_start(args, fmt);
128         vprintf(fmt, args);
129         va_end(args);
130 }
131
132 /*
133 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
134 */
135 void
136 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
137 {
138         u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
139         u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
140         u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
141
142         sbuf_printf(buf,
143             "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
144             hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
145             hw->aq.api_maj_ver, hw->aq.api_min_ver,
146             (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
147             IXL_NVM_VERSION_HI_SHIFT,
148             (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
149             IXL_NVM_VERSION_LO_SHIFT,
150             hw->nvm.eetrack,
151             oem_ver, oem_build, oem_patch);
152 }
153
154 void
155 ixl_print_nvm_version(struct ixl_pf *pf)
156 {
157         struct i40e_hw *hw = &pf->hw;
158         device_t dev = pf->dev;
159         struct sbuf *sbuf;
160
161         sbuf = sbuf_new_auto();
162         ixl_nvm_version_str(hw, sbuf);
163         sbuf_finish(sbuf);
164         device_printf(dev, "%s\n", sbuf_data(sbuf));
165         sbuf_delete(sbuf);
166 }
167
168 bool
169 ixl_fw_recovery_mode(struct ixl_pf *pf)
170 {
171         return (rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK);
172 }
173
174 static void
175 ixl_configure_tx_itr(struct ixl_pf *pf)
176 {
177         struct i40e_hw          *hw = &pf->hw;
178         struct ixl_vsi          *vsi = &pf->vsi;
179         struct ixl_queue        *que = vsi->queues;
180
181         vsi->tx_itr_setting = pf->tx_itr;
182
183         for (int i = 0; i < vsi->num_queues; i++, que++) {
184                 struct tx_ring  *txr = &que->txr;
185
186                 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
187                     vsi->tx_itr_setting);
188                 txr->itr = vsi->tx_itr_setting;
189                 txr->latency = IXL_AVE_LATENCY;
190         }
191 }
192
193 static void
194 ixl_configure_rx_itr(struct ixl_pf *pf)
195 {
196         struct i40e_hw          *hw = &pf->hw;
197         struct ixl_vsi          *vsi = &pf->vsi;
198         struct ixl_queue        *que = vsi->queues;
199
200         vsi->rx_itr_setting = pf->rx_itr;
201
202         for (int i = 0; i < vsi->num_queues; i++, que++) {
203                 struct rx_ring  *rxr = &que->rxr;
204
205                 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
206                     vsi->rx_itr_setting);
207                 rxr->itr = vsi->rx_itr_setting;
208                 rxr->latency = IXL_AVE_LATENCY;
209         }
210 }
211
212 /*
213  * Write PF ITR values to queue ITR registers.
214  */
215 void
216 ixl_configure_itr(struct ixl_pf *pf)
217 {
218         ixl_configure_tx_itr(pf);
219         ixl_configure_rx_itr(pf);
220 }
221
222 /*********************************************************************
223  *  Init entry point
224  *
225  *  This routine is used in two ways. It is used by the stack as
226  *  init entry point in network interface structure. It is also used
227  *  by the driver as a hw/sw initialization routine to get to a
228  *  consistent state.
229  *
230  *  return 0 on success, positive on failure
231  **********************************************************************/
232 void
233 ixl_init_locked(struct ixl_pf *pf)
234 {
235         struct i40e_hw  *hw = &pf->hw;
236         struct ixl_vsi  *vsi = &pf->vsi;
237         struct ifnet    *ifp = vsi->ifp;
238         device_t        dev = pf->dev;
239         struct i40e_filter_control_settings     filter;
240
241         INIT_DEBUGOUT("ixl_init_locked: begin");
242         IXL_PF_LOCK_ASSERT(pf);
243
244         if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
245                 device_printf(dev, "Running in recovery mode, only firmware update available\n");
246                 return;
247         }
248
249         ixl_stop_locked(pf);
250
251         /*
252          * If the aq is dead here, it probably means something outside of the driver
253          * did something to the adapter, like a PF reset.
254          * So rebuild the driver's state here if that occurs.
255          */
256         if (!i40e_check_asq_alive(&pf->hw)) {
257                 device_printf(dev, "Admin Queue is down; resetting...\n");
258                 ixl_teardown_hw_structs(pf);
259                 ixl_reset(pf);
260         }
261
262         /* Get the latest mac address... User might use a LAA */
263         if (ixl_set_lla(vsi)) {
264                 device_printf(dev, "LLA address change failed!\n");
265                 return;
266         }
267
268         /* Set the various hardware offload abilities */
269         ifp->if_hwassist = 0;
270         if (ifp->if_capenable & IFCAP_TSO)
271                 ifp->if_hwassist |= CSUM_TSO;
272         if (ifp->if_capenable & IFCAP_TXCSUM)
273                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
274         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
275                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
276
277         /* Set up the device filtering */
278         bzero(&filter, sizeof(filter));
279         filter.enable_ethtype = TRUE;
280         filter.enable_macvlan = TRUE;
281         filter.enable_fdir = FALSE;
282         filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
283         if (i40e_set_filter_control(hw, &filter))
284                 device_printf(dev, "i40e_set_filter_control() failed\n");
285
286         /* Prepare the VSI: rings, hmc contexts, etc... */
287         if (ixl_initialize_vsi(vsi)) {
288                 device_printf(dev, "initialize vsi failed!!\n");
289                 return;
290         }
291
292         /* Set up RSS */
293         ixl_config_rss(pf);
294
295         /* Set up MSI/X routing and the ITR settings */
296         if (pf->msix > 1) {
297                 ixl_configure_queue_intr_msix(pf);
298                 ixl_configure_itr(pf);
299         } else
300                 ixl_configure_legacy(pf);
301
302         ixl_enable_rings(vsi);
303
304         i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
305
306         ixl_reconfigure_filters(vsi);
307
308         /* Check if PROMISC or ALLMULTI flags have been set
309          * by user before bringing interface up */
310         ixl_set_promisc(vsi);
311
312         /* And now turn on interrupts */
313         ixl_enable_intr(vsi);
314
315         /* Get link info */
316         hw->phy.get_link_info = TRUE;
317         i40e_get_link_status(hw, &pf->link_up);
318         ixl_update_link_status(pf);
319
320         /* Now inform the stack we're ready */
321         ifp->if_drv_flags |= IFF_DRV_RUNNING;
322
323 #ifdef IXL_IW
324         if (ixl_enable_iwarp && pf->iw_enabled) {
325                 int ret = ixl_iw_pf_init(pf);
326                 if (ret)
327                         device_printf(dev,
328                             "initialize iwarp failed, code %d\n", ret);
329         }
330 #endif
331 }
332
333
334 /*********************************************************************
335  *
336  *  Get the hardware capabilities
337  *
338  **********************************************************************/
339
340 int
341 ixl_get_hw_capabilities(struct ixl_pf *pf)
342 {
343         struct i40e_aqc_list_capabilities_element_resp *buf;
344         struct i40e_hw  *hw = &pf->hw;
345         device_t        dev = pf->dev;
346         int             error, len, i2c_intfc_num;
347         u16             needed;
348         bool            again = TRUE;
349
350         if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
351                 hw->func_caps.iwarp = 0;
352                 return 0;
353         }
354
355         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
356 retry:
357         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
358             malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
359                 device_printf(dev, "Unable to allocate cap memory\n");
360                 return (ENOMEM);
361         }
362
363         /* This populates the hw struct */
364         error = i40e_aq_discover_capabilities(hw, buf, len,
365             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
366         free(buf, M_DEVBUF);
367         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
368             (again == TRUE)) {
369                 /* retry once with a larger buffer */
370                 again = FALSE;
371                 len = needed;
372                 goto retry;
373         } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
374                 device_printf(dev, "capability discovery failed: %d\n",
375                     pf->hw.aq.asq_last_status);
376                 return (ENODEV);
377         }
378
379 #ifdef IXL_DEBUG
380         device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
381             "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
382             hw->pf_id, hw->func_caps.num_vfs,
383             hw->func_caps.num_msix_vectors,
384             hw->func_caps.num_msix_vectors_vf,
385             hw->func_caps.fd_filters_guaranteed,
386             hw->func_caps.fd_filters_best_effort,
387             hw->func_caps.num_tx_qp,
388             hw->func_caps.num_rx_qp,
389             hw->func_caps.base_queue);
390 #endif
391         /*
392          * Some devices have both MDIO and I2C; since this isn't reported
393          * by the FW, check registers to see if an I2C interface exists.
394          */
395         i2c_intfc_num = ixl_find_i2c_interface(pf);
396         if (i2c_intfc_num != -1)
397                 pf->has_i2c = true;
398
399         /* Determine functions to use for driver I2C accesses */
400         switch (pf->i2c_access_method) {
401         case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
402                 if (hw->mac.type == I40E_MAC_XL710 &&
403                     hw->aq.api_maj_ver == 1 &&
404                     hw->aq.api_min_ver >= 7) {
405                         pf->read_i2c_byte = ixl_read_i2c_byte_aq;
406                         pf->write_i2c_byte = ixl_write_i2c_byte_aq;
407                 } else {
408                         pf->read_i2c_byte = ixl_read_i2c_byte_reg;
409                         pf->write_i2c_byte = ixl_write_i2c_byte_reg;
410                 }
411                 break;
412         }
413         case IXL_I2C_ACCESS_METHOD_AQ:
414                 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
415                 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
416                 break;
417         case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
418                 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
419                 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
420                 break;
421         case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
422                 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
423                 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
424                 break;
425         default:
426                 /* Should not happen */
427                 device_printf(dev, "Error setting I2C access functions\n");
428                 break;
429         }
430
431         /* Print a subset of the capability information. */
432         device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
433             hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
434             hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
435             (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
436             (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
437             (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
438             "MDIO shared");
439
440         return (error);
441 }
442
443 void
444 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
445 {
446         device_t        dev = vsi->dev;
447
448         /* Enable/disable TXCSUM/TSO4 */
449         if (!(ifp->if_capenable & IFCAP_TXCSUM)
450             && !(ifp->if_capenable & IFCAP_TSO4)) {
451                 if (mask & IFCAP_TXCSUM) {
452                         ifp->if_capenable |= IFCAP_TXCSUM;
453                         /* enable TXCSUM, restore TSO if previously enabled */
454                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
455                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
456                                 ifp->if_capenable |= IFCAP_TSO4;
457                         }
458                 }
459                 else if (mask & IFCAP_TSO4) {
460                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
461                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
462                         device_printf(dev,
463                             "TSO4 requires txcsum, enabling both...\n");
464                 }
465         } else if((ifp->if_capenable & IFCAP_TXCSUM)
466             && !(ifp->if_capenable & IFCAP_TSO4)) {
467                 if (mask & IFCAP_TXCSUM)
468                         ifp->if_capenable &= ~IFCAP_TXCSUM;
469                 else if (mask & IFCAP_TSO4)
470                         ifp->if_capenable |= IFCAP_TSO4;
471         } else if((ifp->if_capenable & IFCAP_TXCSUM)
472             && (ifp->if_capenable & IFCAP_TSO4)) {
473                 if (mask & IFCAP_TXCSUM) {
474                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
475                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
476                         device_printf(dev, 
477                             "TSO4 requires txcsum, disabling both...\n");
478                 } else if (mask & IFCAP_TSO4)
479                         ifp->if_capenable &= ~IFCAP_TSO4;
480         }
481
482         /* Enable/disable TXCSUM_IPV6/TSO6 */
483         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
484             && !(ifp->if_capenable & IFCAP_TSO6)) {
485                 if (mask & IFCAP_TXCSUM_IPV6) {
486                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
487                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
488                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
489                                 ifp->if_capenable |= IFCAP_TSO6;
490                         }
491                 } else if (mask & IFCAP_TSO6) {
492                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
493                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
494                         device_printf(dev,
495                             "TSO6 requires txcsum6, enabling both...\n");
496                 }
497         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
498             && !(ifp->if_capenable & IFCAP_TSO6)) {
499                 if (mask & IFCAP_TXCSUM_IPV6)
500                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
501                 else if (mask & IFCAP_TSO6)
502                         ifp->if_capenable |= IFCAP_TSO6;
503         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
504             && (ifp->if_capenable & IFCAP_TSO6)) {
505                 if (mask & IFCAP_TXCSUM_IPV6) {
506                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
507                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
508                         device_printf(dev,
509                             "TSO6 requires txcsum6, disabling both...\n");
510                 } else if (mask & IFCAP_TSO6)
511                         ifp->if_capenable &= ~IFCAP_TSO6;
512         }
513 }
514
515 /* For the set_advertise sysctl */
516 void
517 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
518 {
519         device_t dev = pf->dev;
520         int err;
521
522         /* Make sure to initialize the device to the complete list of
523          * supported speeds on driver load, to ensure unloading and
524          * reloading the driver will restore this value.
525          */
526         err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
527         if (err) {
528                 /* Non-fatal error */
529                 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
530                               __func__, err);
531                 return;
532         }
533
534         pf->advertised_speed =
535             ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
536 }
537
538 int
539 ixl_teardown_hw_structs(struct ixl_pf *pf)
540 {
541         enum i40e_status_code status = 0;
542         struct i40e_hw *hw = &pf->hw;
543         device_t dev = pf->dev;
544
545         /* Shutdown LAN HMC */
546         if (hw->hmc.hmc_obj) {
547                 status = i40e_shutdown_lan_hmc(hw);
548                 if (status) {
549                         device_printf(dev,
550                             "init: LAN HMC shutdown failure; status %d\n", status);
551                         goto err_out;
552                 }
553         }
554
555         /* Shutdown admin queue */
556         ixl_disable_intr0(hw);
557         status = i40e_shutdown_adminq(hw);
558         if (status)
559                 device_printf(dev,
560                     "init: Admin Queue shutdown failure; status %d\n", status);
561
562 err_out:
563         return (status);
564 }
565
566 int
567 ixl_reset(struct ixl_pf *pf)
568 {
569         struct i40e_hw *hw = &pf->hw;
570         device_t dev = pf->dev;
571         u8 set_fc_err_mask;
572         int error = 0;
573
574         // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
575         i40e_clear_hw(hw);
576         error = i40e_pf_reset(hw);
577         if (error) {
578                 device_printf(dev, "init: PF reset failure\n");
579                 error = EIO;
580                 goto err_out;
581         }
582
583         error = i40e_init_adminq(hw);
584         if (error) {
585                 device_printf(dev, "init: Admin queue init failure;"
586                     " status code %d\n", error);
587                 error = EIO;
588                 goto err_out;
589         }
590
591         i40e_clear_pxe_mode(hw);
592
593         error = ixl_get_hw_capabilities(pf);
594         if (error) {
595                 device_printf(dev, "init: Error retrieving HW capabilities;"
596                     " status code %d\n", error);
597                 goto err_out;
598         }
599
600         error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
601             hw->func_caps.num_rx_qp, 0, 0);
602         if (error) {
603                 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
604                     error);
605                 error = EIO;
606                 goto err_out;
607         }
608
609         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
610         if (error) {
611                 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
612                     error);
613                 error = EIO;
614                 goto err_out;
615         }
616
617         // XXX: possible fix for panic, but our failure recovery is still broken
618         error = ixl_switch_config(pf);
619         if (error) {
620                 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
621                      error);
622                 goto err_out;
623         }
624
625         error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
626             NULL);
627         if (error) {
628                 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
629                     " aq_err %d\n", error, hw->aq.asq_last_status);
630                 error = EIO;
631                 goto err_out;
632         }
633
634         error = i40e_set_fc(hw, &set_fc_err_mask, true);
635         if (error) {
636                 device_printf(dev, "init: setting link flow control failed; retcode %d,"
637                     " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
638                 goto err_out;
639         }
640
641         // XXX: (Rebuild VSIs?)
642
643         /* Firmware delay workaround */
644         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
645             (hw->aq.fw_maj_ver < 4)) {
646                 i40e_msec_delay(75);
647                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
648                 if (error) {
649                         device_printf(dev, "init: link restart failed, aq_err %d\n",
650                             hw->aq.asq_last_status);
651                         goto err_out;
652                 }
653         }
654
655
656         /* Re-enable admin queue interrupt */
657         if (pf->msix > 1) {
658                 ixl_configure_intr0_msix(pf);
659                 ixl_enable_intr0(hw);
660         }
661
662 err_out:
663         return (error);
664 }
665
666 /*
667 ** MSIX Interrupt Handlers and Tasklets
668 */
669 void
670 ixl_handle_que(void *context, int pending)
671 {
672         struct ixl_queue *que = context;
673         struct ixl_vsi *vsi = que->vsi;
674         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
675         struct i40e_hw  *hw = vsi->hw;
676         struct tx_ring  *txr = &que->txr;
677         struct ifnet    *ifp = vsi->ifp;
678         bool            more;
679
680         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
681                 more = ixl_rxeof(que, IXL_RX_LIMIT);
682                 IXL_TX_LOCK(txr);
683                 ixl_txeof(que);
684                 if (!drbr_empty(ifp, txr->br))
685                         ixl_mq_start_locked(ifp, txr);
686                 IXL_TX_UNLOCK(txr);
687                 if (more) {
688                         taskqueue_enqueue(que->tq, &que->task);
689                         return;
690                 }
691         }
692
693         /* Re-enable queue interrupt */
694         if (pf->msix > 1)
695                 ixl_enable_queue(hw, que->me);
696         else
697                 ixl_enable_intr0(hw);
698 }
699
700
701 /*********************************************************************
702  *
703  *  Legacy Interrupt Service routine
704  *
705  **********************************************************************/
706 void
707 ixl_intr(void *arg)
708 {
709         struct ixl_pf           *pf = arg;
710         struct i40e_hw          *hw =  &pf->hw;
711         struct ixl_vsi          *vsi = &pf->vsi;
712         struct ixl_queue        *que = vsi->queues;
713         struct ifnet            *ifp = vsi->ifp;
714         struct tx_ring          *txr = &que->txr;
715         u32                     icr0;
716         bool                    more;
717
718         ixl_disable_intr0(hw);
719
720         pf->admin_irq++;
721
722         /* Clear PBA at start of ISR if using legacy interrupts */
723         if (pf->msix == 0)
724                 wr32(hw, I40E_PFINT_DYN_CTL0,
725                     I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
726                     (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
727
728         icr0 = rd32(hw, I40E_PFINT_ICR0);
729
730
731 #ifdef PCI_IOV
732         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
733                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
734 #endif
735
736         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
737                 taskqueue_enqueue(pf->tq, &pf->adminq);
738
739         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
740                 ++que->irqs;
741
742                 more = ixl_rxeof(que, IXL_RX_LIMIT);
743
744                 IXL_TX_LOCK(txr);
745                 ixl_txeof(que);
746                 if (!drbr_empty(vsi->ifp, txr->br))
747                         ixl_mq_start_locked(ifp, txr);
748                 IXL_TX_UNLOCK(txr);
749
750                 if (more)
751                         taskqueue_enqueue(que->tq, &que->task);
752         }
753
754         ixl_enable_intr0(hw);
755 }
756
757
758 /*********************************************************************
759  *
760  *  MSIX VSI Interrupt Service routine
761  *
762  **********************************************************************/
763 void
764 ixl_msix_que(void *arg)
765 {
766         struct ixl_queue *que = arg;
767         struct ixl_vsi  *vsi = que->vsi;
768         struct i40e_hw  *hw = vsi->hw;
769         struct tx_ring  *txr = &que->txr;
770         bool            more_tx, more_rx;
771
772         /* Protect against spurious interrupts */
773         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
774                 return;
775
776         /* There are drivers which disable auto-masking of interrupts,
777          * which is a global setting for all ports. We have to make sure
778          * to mask it to not lose IRQs */
779         ixl_disable_queue(hw, que->me);
780
781         ++que->irqs;
782
783         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
784
785         IXL_TX_LOCK(txr);
786         more_tx = ixl_txeof(que);
787         /*
788         ** Make certain that if the stack 
789         ** has anything queued the task gets
790         ** scheduled to handle it.
791         */
792         if (!drbr_empty(vsi->ifp, txr->br))
793                 more_tx = 1;
794         IXL_TX_UNLOCK(txr);
795
796         ixl_set_queue_rx_itr(que);
797         ixl_set_queue_tx_itr(que);
798
799         if (more_tx || more_rx)
800                 taskqueue_enqueue(que->tq, &que->task);
801         else
802                 ixl_enable_queue(hw, que->me);
803
804         return;
805 }
806
807
808 /*********************************************************************
809  *
810  *  MSIX Admin Queue Interrupt Service routine
811  *
812  **********************************************************************/
813 void
814 ixl_msix_adminq(void *arg)
815 {
816         struct ixl_pf   *pf = arg;
817         struct i40e_hw  *hw = &pf->hw;
818         device_t        dev = pf->dev;
819         u32             reg, mask, rstat_reg;
820         bool            do_task = FALSE;
821
822         ++pf->admin_irq;
823
824         reg = rd32(hw, I40E_PFINT_ICR0);
825         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
826
827         /* Check on the cause */
828         if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
829                 mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
830                 do_task = TRUE;
831         }
832
833         if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
834                 ixl_handle_mdd_event(pf);
835                 mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
836         }
837
838         if (reg & I40E_PFINT_ICR0_GRST_MASK) {
839                 device_printf(dev, "Reset Requested!\n");
840                 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
841                 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
842                     >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
843                 device_printf(dev, "Reset type: ");
844                 switch (rstat_reg) {
845                 /* These others might be handled similarly to an EMPR reset */
846                 case I40E_RESET_CORER:
847                         printf("CORER\n");
848                         break;
849                 case I40E_RESET_GLOBR:
850                         printf("GLOBR\n");
851                         break;
852                 case I40E_RESET_EMPR:
853                         printf("EMPR\n");
854                         atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
855                         break;
856                 default:
857                         printf("POR\n");
858                         break;
859                 }
860                 /* overload admin queue task to check reset progress */
861                 do_task = TRUE;
862         }
863
864         if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
865                 device_printf(dev, "ECC Error detected!\n");
866         }
867
868         if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
869                 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
870                 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
871                         device_printf(dev, "HMC Error detected!\n");
872                         device_printf(dev, "INFO 0x%08x\n", reg);
873                         reg = rd32(hw, I40E_PFHMC_ERRORDATA);
874                         device_printf(dev, "DATA 0x%08x\n", reg);
875                         wr32(hw, I40E_PFHMC_ERRORINFO, 0);
876                 }
877         }
878
879         if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
880                 device_printf(dev, "PCI Exception detected!\n");
881         }
882
883 #ifdef PCI_IOV
884         if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
885                 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
886                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
887         }
888 #endif
889
890         if (do_task)
891                 taskqueue_enqueue(pf->tq, &pf->adminq);
892         else
893                 ixl_enable_intr0(hw);
894 }
895
896 void
897 ixl_set_promisc(struct ixl_vsi *vsi)
898 {
899         struct ifnet    *ifp = vsi->ifp;
900         struct i40e_hw  *hw = vsi->hw;
901         int             err, mcnt = 0;
902         bool            uni = FALSE, multi = FALSE;
903
904         if (ifp->if_flags & IFF_PROMISC)
905                 uni = multi = TRUE;
906         else if (ifp->if_flags & IFF_ALLMULTI)
907                         multi = TRUE;
908         else { /* Need to count the multicast addresses */
909                 struct  ifmultiaddr *ifma;
910                 if_maddr_rlock(ifp);
911                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
912                         if (ifma->ifma_addr->sa_family != AF_LINK)
913                                 continue;
914                         if (mcnt == MAX_MULTICAST_ADDR) {
915                                 multi = TRUE;
916                                 break;
917                         }
918                         mcnt++;
919                 }
920                 if_maddr_runlock(ifp);
921         }
922
923         err = i40e_aq_set_vsi_unicast_promiscuous(hw,
924             vsi->seid, uni, NULL, TRUE);
925         err = i40e_aq_set_vsi_multicast_promiscuous(hw,
926             vsi->seid, multi, NULL);
927         return;
928 }
929
930 /*********************************************************************
931  *      Filter Routines
932  *
933  *      Routines for multicast and vlan filter management.
934  *
935  *********************************************************************/
936 void
937 ixl_add_multi(struct ixl_vsi *vsi)
938 {
939         struct ifmultiaddr      *ifma;
940         struct ifnet            *ifp = vsi->ifp;
941         struct i40e_hw          *hw = vsi->hw;
942         int                     mcnt = 0, flags;
943
944         IOCTL_DEBUGOUT("ixl_add_multi: begin");
945
946         if_maddr_rlock(ifp);
947         /*
948         ** First just get a count, to decide if we
949         ** we simply use multicast promiscuous.
950         */
951         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
952                 if (ifma->ifma_addr->sa_family != AF_LINK)
953                         continue;
954                 mcnt++;
955         }
956         if_maddr_runlock(ifp);
957
958         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
959                 /* delete existing MC filters */
960                 ixl_del_hw_filters(vsi, mcnt);
961                 i40e_aq_set_vsi_multicast_promiscuous(hw,
962                     vsi->seid, TRUE, NULL);
963                 return;
964         }
965
966         mcnt = 0;
967         if_maddr_rlock(ifp);
968         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
969                 if (ifma->ifma_addr->sa_family != AF_LINK)
970                         continue;
971                 ixl_add_mc_filter(vsi,
972                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
973                 mcnt++;
974         }
975         if_maddr_runlock(ifp);
976         if (mcnt > 0) {
977                 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
978                 ixl_add_hw_filters(vsi, flags, mcnt);
979         }
980
981         IOCTL_DEBUGOUT("ixl_add_multi: end");
982         return;
983 }
984
985 void
986 ixl_del_multi(struct ixl_vsi *vsi)
987 {
988         struct ifnet            *ifp = vsi->ifp;
989         struct ifmultiaddr      *ifma;
990         struct ixl_mac_filter   *f;
991         int                     mcnt = 0;
992         bool            match = FALSE;
993
994         IOCTL_DEBUGOUT("ixl_del_multi: begin");
995
996         /* Search for removed multicast addresses */
997         if_maddr_rlock(ifp);
998         SLIST_FOREACH(f, &vsi->ftl, next) {
999                 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1000                         match = FALSE;
1001                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1002                                 if (ifma->ifma_addr->sa_family != AF_LINK)
1003                                         continue;
1004                                 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1005                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
1006                                         match = TRUE;
1007                                         break;
1008                                 }
1009                         }
1010                         if (match == FALSE) {
1011                                 f->flags |= IXL_FILTER_DEL;
1012                                 mcnt++;
1013                         }
1014                 }
1015         }
1016         if_maddr_runlock(ifp);
1017
1018         if (mcnt > 0)
1019                 ixl_del_hw_filters(vsi, mcnt);
1020 }
1021
1022 /*********************************************************************
1023  *  Timer routine
1024  *
1025  *  This routine checks for link status, updates statistics,
1026  *  and runs the watchdog check.
1027  *
1028  **********************************************************************/
1029
1030 void
1031 ixl_local_timer(void *arg)
1032 {
1033         struct ixl_pf           *pf = arg;
1034         struct ifnet *ifp = pf->vsi.ifp;
1035
1036         if (ixl_fw_recovery_mode(pf)) {
1037                 if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) {
1038                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1039                             ixl_stop_locked(pf);
1040                         atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE | IXL_PF_STATE_EMPR_RESETTING);
1041                         device_printf(pf->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1042                 }
1043         }
1044
1045         IXL_PF_LOCK_ASSERT(pf);
1046
1047         /* Fire off the adminq task */
1048         taskqueue_enqueue(pf->tq, &pf->adminq);
1049
1050         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1051                 /* Update stats */
1052                 ixl_update_stats_counters(pf);
1053         }
1054
1055         if (ixl_queue_hang_check(&pf->vsi)) {
1056                 /* Increment stat when a queue shows hung */
1057                 pf->watchdog_events++;
1058         }
1059
1060         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1061 }
1062
1063 void
1064 ixl_link_up_msg(struct ixl_pf *pf)
1065 {
1066         struct i40e_hw *hw = &pf->hw;
1067         struct ifnet *ifp = pf->vsi.ifp;
1068         char *req_fec_string, *neg_fec_string;
1069         u8 fec_abilities;
1070
1071         fec_abilities = hw->phy.link_info.req_fec_info;
1072         /* If both RS and KR are requested, only show RS */
1073         if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
1074                 req_fec_string = ixl_fec_string[0];
1075         else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
1076                 req_fec_string = ixl_fec_string[1];
1077         else
1078                 req_fec_string = ixl_fec_string[2];
1079
1080         if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
1081                 neg_fec_string = ixl_fec_string[0];
1082         else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
1083                 neg_fec_string = ixl_fec_string[1];
1084         else
1085                 neg_fec_string = ixl_fec_string[2];
1086
1087         log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
1088             ifp->if_xname,
1089             ixl_link_speed_string(hw->phy.link_info.link_speed),
1090             req_fec_string, neg_fec_string,
1091             (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
1092             (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
1093                 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1094                 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
1095                 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1096                 ixl_fc_string[1] : ixl_fc_string[0]);
1097 }
1098
1099 /*
1100 ** Note: this routine updates the OS on the link state
1101 **      the real check of the hardware only happens with
1102 **      a link interrupt.
1103 */
1104 void
1105 ixl_update_link_status(struct ixl_pf *pf)
1106 {
1107         struct ixl_vsi          *vsi = &pf->vsi;
1108         struct ifnet            *ifp = vsi->ifp;
1109         device_t                dev = pf->dev;
1110
1111         if (pf->link_up) {
1112                 if (vsi->link_active == FALSE) {
1113                         vsi->link_active = TRUE;
1114 #if __FreeBSD_version >= 1100000
1115                         ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
1116 #else
1117                         if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed));
1118 #endif
1119                         if_link_state_change(ifp, LINK_STATE_UP);
1120                         ixl_link_up_msg(pf);
1121 #ifdef PCI_IOV
1122                         ixl_broadcast_link_state(pf);
1123 #endif
1124                 }
1125         } else { /* Link down */
1126                 if (vsi->link_active == TRUE) {
1127                         if (bootverbose)
1128                                 device_printf(dev, "Link is Down\n");
1129                         if_link_state_change(ifp, LINK_STATE_DOWN);
1130                         vsi->link_active = FALSE;
1131 #ifdef PCI_IOV
1132                         ixl_broadcast_link_state(pf);
1133 #endif
1134                 }
1135         }
1136 }
1137
1138 /*********************************************************************
1139  *
1140  *  This routine disables all traffic on the adapter by issuing a
1141  *  global reset on the MAC and deallocates TX/RX buffers.
1142  *
1143  **********************************************************************/
1144
1145 void
1146 ixl_stop_locked(struct ixl_pf *pf)
1147 {
1148         struct ixl_vsi  *vsi = &pf->vsi;
1149         struct ifnet    *ifp = vsi->ifp;
1150
1151         INIT_DEBUGOUT("ixl_stop: begin\n");
1152
1153         IXL_PF_LOCK_ASSERT(pf);
1154
1155         /* Tell the stack that the interface is no longer active */
1156         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1157
1158 #ifdef IXL_IW
1159         /* Stop iWARP device */
1160         if (ixl_enable_iwarp && pf->iw_enabled)
1161                 ixl_iw_pf_stop(pf);
1162 #endif
1163
1164         ixl_disable_rings_intr(vsi);
1165         ixl_disable_rings(vsi);
1166 }
1167
1168 void
1169 ixl_stop(struct ixl_pf *pf)
1170 {
1171         IXL_PF_LOCK(pf);
1172         ixl_stop_locked(pf);
1173         IXL_PF_UNLOCK(pf);
1174 }
1175
1176 /*********************************************************************
1177  *
1178  *  Setup MSIX Interrupt resources and handlers for the VSI
1179  *
1180  **********************************************************************/
1181 int
1182 ixl_setup_legacy(struct ixl_pf *pf)
1183 {
1184         device_t        dev = pf->dev;
1185         int             error, rid = 0;
1186
1187         if (pf->msix == 1)
1188                 rid = 1;
1189         pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1190             &rid, RF_SHAREABLE | RF_ACTIVE);
1191         if (pf->res == NULL) {
1192                 device_printf(dev, "bus_alloc_resource_any() for"
1193                     " legacy/msi interrupt\n");
1194                 return (ENXIO);
1195         }
1196
1197         /* Set the handler function */
1198         error = bus_setup_intr(dev, pf->res,
1199             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1200             ixl_intr, pf, &pf->tag);
1201         if (error) {
1202                 pf->res = NULL;
1203                 device_printf(dev, "bus_setup_intr() for legacy/msi"
1204                     " interrupt handler failed, error %d\n", error);
1205                 return (ENXIO);
1206         }
1207         error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
1208         if (error) {
1209                 /* non-fatal */
1210                 device_printf(dev, "bus_describe_intr() for Admin Queue"
1211                     " interrupt name failed, error %d\n", error);
1212         }
1213
1214         return (0);
1215 }
1216
1217 int
1218 ixl_setup_adminq_tq(struct ixl_pf *pf)
1219 {
1220         device_t dev = pf->dev;
1221         int error = 0;
1222
1223         /* Tasklet for Admin Queue interrupts */
1224         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1225 #ifdef PCI_IOV
1226         /* VFLR Tasklet */
1227         TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1228 #endif
1229         /* Create and start Admin Queue taskqueue */
1230         pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1231             taskqueue_thread_enqueue, &pf->tq);
1232         if (!pf->tq) {
1233                 device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1234                 return (ENOMEM);
1235         }
1236         error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1237             device_get_nameunit(dev));
1238         if (error) {
1239                 device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1240                     error);
1241                 taskqueue_free(pf->tq);
1242                 return (error);
1243         }
1244         return (0);
1245 }
1246
1247 int
1248 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1249 {
1250         struct ixl_queue *que = vsi->queues;
1251         device_t dev = vsi->dev;
1252 #ifdef  RSS
1253         int             cpu_id = 0;
1254         cpuset_t        cpu_mask;
1255 #endif
1256
1257         /* Create queue tasks and start queue taskqueues */
1258         for (int i = 0; i < vsi->num_queues; i++, que++) {
1259                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1260                 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1261                 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1262                     taskqueue_thread_enqueue, &que->tq);
1263 #ifdef RSS
1264                 CPU_SETOF(cpu_id, &cpu_mask);
1265                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1266                     &cpu_mask, "%s (bucket %d)",
1267                     device_get_nameunit(dev), cpu_id);
1268 #else
1269                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1270                     "%s (que %d)", device_get_nameunit(dev), que->me);
1271 #endif
1272         }
1273
1274         return (0);
1275 }
1276
1277 void
1278 ixl_free_adminq_tq(struct ixl_pf *pf)
1279 {
1280         if (pf->tq) {
1281                 taskqueue_free(pf->tq);
1282                 pf->tq = NULL;
1283         }
1284 }
1285
1286 void
1287 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1288 {
1289         struct ixl_queue *que = vsi->queues;
1290
1291         for (int i = 0; i < vsi->num_queues; i++, que++) {
1292                 if (que->tq) {
1293                         taskqueue_free(que->tq);
1294                         que->tq = NULL;
1295                 }
1296         }
1297 }
1298
1299 int
1300 ixl_setup_adminq_msix(struct ixl_pf *pf)
1301 {
1302         device_t dev = pf->dev;
1303         int rid, error = 0;
1304
1305         /* Admin IRQ rid is 1, vector is 0 */
1306         rid = 1;
1307         /* Get interrupt resource from bus */
1308         pf->res = bus_alloc_resource_any(dev,
1309             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1310         if (!pf->res) {
1311                 device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1312                     " interrupt failed [rid=%d]\n", rid);
1313                 return (ENXIO);
1314         }
1315         /* Then associate interrupt with handler */
1316         error = bus_setup_intr(dev, pf->res,
1317             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1318             ixl_msix_adminq, pf, &pf->tag);
1319         if (error) {
1320                 pf->res = NULL;
1321                 device_printf(dev, "bus_setup_intr() for Admin Queue"
1322                     " interrupt handler failed, error %d\n", error);
1323                 return (ENXIO);
1324         }
1325         error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1326         if (error) {
1327                 /* non-fatal */
1328                 device_printf(dev, "bus_describe_intr() for Admin Queue"
1329                     " interrupt name failed, error %d\n", error);
1330         }
1331         pf->admvec = 0;
1332
1333         return (0);
1334 }
1335
1336 /*
1337  * Allocate interrupt resources from bus and associate an interrupt handler
1338  * to those for the VSI's queues.
1339  */
1340 int
1341 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1342 {
1343         device_t        dev = vsi->dev;
1344         struct          ixl_queue *que = vsi->queues;
1345         struct          tx_ring  *txr;
1346         int             error, rid, vector = 1;
1347
1348         /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1349         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1350                 int cpu_id = i;
1351                 rid = vector + 1;
1352                 txr = &que->txr;
1353                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1354                     RF_SHAREABLE | RF_ACTIVE);
1355                 if (!que->res) {
1356                         device_printf(dev, "bus_alloc_resource_any() for"
1357                             " Queue %d interrupt failed [rid=%d]\n",
1358                             que->me, rid);
1359                         return (ENXIO);
1360                 }
1361                 /* Set the handler function */
1362                 error = bus_setup_intr(dev, que->res,
1363                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1364                     ixl_msix_que, que, &que->tag);
1365                 if (error) {
1366                         device_printf(dev, "bus_setup_intr() for Queue %d"
1367                             " interrupt handler failed, error %d\n",
1368                             que->me, error);
1369                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1370                         return (error);
1371                 }
1372                 error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1373                 if (error) {
1374                         device_printf(dev, "bus_describe_intr() for Queue %d"
1375                             " interrupt name failed, error %d\n",
1376                             que->me, error);
1377                 }
1378                 /* Bind the vector to a CPU */
1379 #ifdef RSS
1380                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1381 #endif
1382                 error = bus_bind_intr(dev, que->res, cpu_id);
1383                 if (error) {
1384                         device_printf(dev, "bus_bind_intr() for Queue %d"
1385                             " to CPU %d failed, error %d\n",
1386                             que->me, cpu_id, error);
1387                 }
1388                 que->msix = vector;
1389         }
1390
1391         return (0);
1392 }
1393
1394 /*
1395  * Allocate MSI/X vectors from the OS.
1396  * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1397  */
1398 int
1399 ixl_init_msix(struct ixl_pf *pf)
1400 {
1401         device_t dev = pf->dev;
1402         struct i40e_hw *hw = &pf->hw;
1403 #ifdef IXL_IW
1404 #if __FreeBSD_version >= 1100000
1405         cpuset_t cpu_set;
1406 #endif
1407 #endif
1408         int auto_max_queues;
1409         int rid, want, vectors, queues, available;
1410 #ifdef IXL_IW
1411         int iw_want=0, iw_vectors;
1412
1413         pf->iw_msix = 0;
1414 #endif
1415
1416         /* Override by tuneable */
1417         if (!pf->enable_msix)
1418                 goto no_msix;
1419
1420         /* First try MSI/X */
1421         rid = PCIR_BAR(IXL_MSIX_BAR);
1422         pf->msix_mem = bus_alloc_resource_any(dev,
1423             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1424         if (!pf->msix_mem) {
1425                 /* May not be enabled */
1426                 device_printf(pf->dev,
1427                     "Unable to map MSIX table\n");
1428                 goto no_msix;
1429         }
1430
1431         available = pci_msix_count(dev); 
1432         if (available < 2) {
1433                 /* system has msix disabled (0), or only one vector (1) */
1434                 device_printf(pf->dev, "Less than two MSI-X vectors available\n");
1435                 bus_release_resource(dev, SYS_RES_MEMORY,
1436                     rid, pf->msix_mem);
1437                 pf->msix_mem = NULL;
1438                 goto no_msix;
1439         }
1440
1441         /* Clamp max number of queues based on:
1442          * - # of MSI-X vectors available
1443          * - # of cpus available
1444          * - # of queues that can be assigned to the LAN VSI
1445          */
1446         auto_max_queues = min(mp_ncpus, available - 1);
1447         if (hw->mac.type == I40E_MAC_X722)
1448                 auto_max_queues = min(auto_max_queues, 128);
1449         else
1450                 auto_max_queues = min(auto_max_queues, 64);
1451
1452         /* Override with tunable value if tunable is less than autoconfig count */
1453         if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1454                 queues = pf->max_queues;
1455         /* Use autoconfig amount if that's lower */
1456         else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1457                 device_printf(dev, "ixl_max_queues (%d) is too large, using "
1458                     "autoconfig amount (%d)...\n",
1459                     pf->max_queues, auto_max_queues);
1460                 queues = auto_max_queues;
1461         }
1462         /* Limit maximum auto-configured queues to 8 if no user value is set */
1463         else
1464                 queues = min(auto_max_queues, 8);
1465
1466 #ifdef  RSS
1467         /* If we're doing RSS, clamp at the number of RSS buckets */
1468         if (queues > rss_getnumbuckets())
1469                 queues = rss_getnumbuckets();
1470 #endif
1471
1472         /*
1473         ** Want one vector (RX/TX pair) per queue
1474         ** plus an additional for the admin queue.
1475         */
1476         want = queues + 1;
1477         if (want <= available)  /* Have enough */
1478                 vectors = want;
1479         else {
1480                 device_printf(pf->dev,
1481                     "MSIX Configuration Problem, "
1482                     "%d vectors available but %d wanted!\n",
1483                     available, want);
1484                 pf->msix_mem = NULL;
1485                 goto no_msix; /* Will go to Legacy setup */
1486         }
1487
1488 #ifdef IXL_IW
1489         if (ixl_enable_iwarp && hw->func_caps.iwarp) {
1490 #if __FreeBSD_version >= 1100000
1491                 if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0)
1492                 {
1493                         iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX);
1494                 }
1495 #endif
1496                 if(!iw_want)
1497                         iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX);
1498                 if(ixl_limit_iwarp_msix > 0)
1499                         iw_want = min(iw_want, ixl_limit_iwarp_msix);
1500                 else
1501                         iw_want = min(iw_want, 1);
1502
1503                 available -= vectors;
1504                 if (available > 0) {
1505                         iw_vectors = (available >= iw_want) ?
1506                                 iw_want : available;
1507                         vectors += iw_vectors;
1508                 } else
1509                         iw_vectors = 0;
1510         }
1511 #endif
1512
1513         ixl_set_msix_enable(dev);
1514         if (pci_alloc_msix(dev, &vectors) == 0) {
1515                 device_printf(pf->dev,
1516                     "Using MSIX interrupts with %d vectors\n", vectors);
1517                 pf->msix = vectors;
1518 #ifdef IXL_IW
1519                 if (ixl_enable_iwarp && hw->func_caps.iwarp)
1520                 {
1521                         pf->iw_msix = iw_vectors;
1522                         device_printf(pf->dev,
1523                                         "Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n",
1524                                         iw_vectors);
1525                 }
1526 #endif
1527
1528                 pf->vsi.num_queues = queues;
1529 #ifdef RSS
1530                 /*
1531                  * If we're doing RSS, the number of queues needs to
1532                  * match the number of RSS buckets that are configured.
1533                  *
1534                  * + If there's more queues than RSS buckets, we'll end
1535                  *   up with queues that get no traffic.
1536                  *
1537                  * + If there's more RSS buckets than queues, we'll end
1538                  *   up having multiple RSS buckets map to the same queue,
1539                  *   so there'll be some contention.
1540                  */
1541                 if (queues != rss_getnumbuckets()) {
1542                         device_printf(dev,
1543                             "%s: queues (%d) != RSS buckets (%d)"
1544                             "; performance will be impacted.\n",
1545                             __func__, queues, rss_getnumbuckets());
1546                 }
1547 #endif
1548                 return (vectors);
1549         }
1550 no_msix:
1551         vectors = pci_msi_count(dev);
1552         pf->vsi.num_queues = 1;
1553         pf->max_queues = 1;
1554         if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1555                 device_printf(pf->dev, "Using an MSI interrupt\n");
1556         else {
1557                 vectors = 0;
1558                 device_printf(pf->dev, "Using a Legacy interrupt\n");
1559         }
1560         return (vectors);
1561 }
1562
1563 /*
1564  * Configure admin queue/misc interrupt cause registers in hardware.
1565  */
1566 void
1567 ixl_configure_intr0_msix(struct ixl_pf *pf)
1568 {
1569         struct i40e_hw *hw = &pf->hw;
1570         u32 reg;
1571
1572         /* First set up the adminq - vector 0 */
1573         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
1574         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
1575
1576         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1577             I40E_PFINT_ICR0_ENA_GRST_MASK |
1578             I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1579             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1580             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1581             I40E_PFINT_ICR0_ENA_VFLR_MASK |
1582             I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
1583             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1584         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1585
1586         /*
1587          * 0x7FF is the end of the queue list.
1588          * This means we won't use MSI-X vector 0 for a queue interrupt
1589          * in MSIX mode.
1590          */
1591         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1592         /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1593         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1594
1595         wr32(hw, I40E_PFINT_DYN_CTL0,
1596             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1597             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1598
1599         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1600 }
1601
1602 /*
1603  * Configure queue interrupt cause registers in hardware.
1604  */
1605 void
1606 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1607 {
1608         struct i40e_hw  *hw = &pf->hw;
1609         struct ixl_vsi *vsi = &pf->vsi;
1610         u32             reg;
1611         u16             vector = 1;
1612
1613         for (int i = 0; i < vsi->num_queues; i++, vector++) {
1614                 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1615                 /* First queue type is RX / 0 */
1616                 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1617
1618                 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1619                 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1620                 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1621                 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1622                 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1623                 wr32(hw, I40E_QINT_RQCTL(i), reg);
1624
1625                 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1626                 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1627                 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1628                 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1629                 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1630                 wr32(hw, I40E_QINT_TQCTL(i), reg);
1631         }
1632 }
1633
1634 /*
1635  * Configure for MSI single vector operation 
1636  */
1637 void
1638 ixl_configure_legacy(struct ixl_pf *pf)
1639 {
1640         struct i40e_hw  *hw = &pf->hw;
1641         struct ixl_vsi  *vsi = &pf->vsi;
1642         struct ixl_queue *que = vsi->queues;
1643         struct rx_ring  *rxr = &que->rxr;
1644         struct tx_ring  *txr = &que->txr;
1645         u32 reg;
1646
1647         /* Configure ITR */
1648         vsi->tx_itr_setting = pf->tx_itr;
1649         wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
1650             vsi->tx_itr_setting);
1651         txr->itr = vsi->tx_itr_setting;
1652
1653         vsi->rx_itr_setting = pf->rx_itr;
1654         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
1655             vsi->rx_itr_setting);
1656         rxr->itr = vsi->rx_itr_setting;
1657
1658         /* Setup "other" causes */
1659         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1660             | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1661             | I40E_PFINT_ICR0_ENA_GRST_MASK
1662             | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1663             | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1664             | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1665             | I40E_PFINT_ICR0_ENA_VFLR_MASK
1666             | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1667             ;
1668         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1669
1670         /* No ITR for non-queue interrupts */
1671         wr32(hw, I40E_PFINT_STAT_CTL0,
1672             IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1673
1674         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1675         wr32(hw, I40E_PFINT_LNKLST0, 0);
1676
1677         /* Associate the queue pair to the vector and enable the q int */
1678         reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1679             | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1680             | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1681         wr32(hw, I40E_QINT_RQCTL(0), reg);
1682
1683         reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1684             | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1685             | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1686         wr32(hw, I40E_QINT_TQCTL(0), reg);
1687 }
1688
1689 int
1690 ixl_allocate_pci_resources(struct ixl_pf *pf)
1691 {
1692         int             rid;
1693         struct i40e_hw *hw = &pf->hw;
1694         device_t        dev = pf->dev;
1695
1696         /* Map BAR0 */
1697         rid = PCIR_BAR(0);
1698         pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1699             &rid, RF_ACTIVE);
1700
1701         if (!(pf->pci_mem)) {
1702                 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1703                 return (ENXIO);
1704         }
1705         /* Ensure proper PCI device operation */
1706         ixl_set_busmaster(dev);
1707
1708         /* Save off the PCI information */
1709         hw->vendor_id = pci_get_vendor(dev);
1710         hw->device_id = pci_get_device(dev);
1711         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1712         hw->subsystem_vendor_id =
1713             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1714         hw->subsystem_device_id =
1715             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1716
1717         hw->bus.device = pci_get_slot(dev);
1718         hw->bus.func = pci_get_function(dev);
1719
1720         /* Save off register access information */
1721         pf->osdep.mem_bus_space_tag =
1722                 rman_get_bustag(pf->pci_mem);
1723         pf->osdep.mem_bus_space_handle =
1724                 rman_get_bushandle(pf->pci_mem);
1725         pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1726         pf->osdep.flush_reg = I40E_GLGEN_STAT;
1727         pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1728
1729         pf->hw.back = &pf->osdep;
1730
1731         return (0);
1732 }
1733
1734 /*
1735  * Teardown and release the admin queue/misc vector
1736  * interrupt.
1737  */
1738 int
1739 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1740 {
1741         device_t                dev = pf->dev;
1742         int                     rid, error = 0;
1743
1744         if (pf->admvec) /* we are doing MSIX */
1745                 rid = pf->admvec + 1;
1746         else
1747                 (pf->msix != 0) ? (rid = 1):(rid = 0);
1748
1749         if (pf->tag != NULL) {
1750                 bus_teardown_intr(dev, pf->res, pf->tag);
1751                 if (error) {
1752                         device_printf(dev, "bus_teardown_intr() for"
1753                             " interrupt 0 failed\n");
1754                         // return (ENXIO);
1755                 }
1756                 pf->tag = NULL;
1757         }
1758         if (pf->res != NULL) {
1759                 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1760                 if (error) {
1761                         device_printf(dev, "bus_release_resource() for"
1762                             " interrupt 0 failed [rid=%d]\n", rid);
1763                         // return (ENXIO);
1764                 }
1765                 pf->res = NULL;
1766         }
1767
1768         return (0);
1769 }
1770
1771 int
1772 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1773 {
1774         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
1775         struct ixl_queue        *que = vsi->queues;
1776         device_t                dev = vsi->dev;
1777         int                     rid, error = 0;
1778
1779         /* We may get here before stations are setup */
1780         if ((pf->msix < 2) || (que == NULL))
1781                 return (0);
1782
1783         /* Release all MSIX queue resources */
1784         for (int i = 0; i < vsi->num_queues; i++, que++) {
1785                 rid = que->msix + 1;
1786                 if (que->tag != NULL) {
1787                         error = bus_teardown_intr(dev, que->res, que->tag);
1788                         if (error) {
1789                                 device_printf(dev, "bus_teardown_intr() for"
1790                                     " Queue %d interrupt failed\n",
1791                                     que->me);
1792                                 // return (ENXIO);
1793                         }
1794                         que->tag = NULL;
1795                 }
1796                 if (que->res != NULL) {
1797                         error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1798                         if (error) {
1799                                 device_printf(dev, "bus_release_resource() for"
1800                                     " Queue %d interrupt failed [rid=%d]\n",
1801                                     que->me, rid);
1802                                 // return (ENXIO);
1803                         }
1804                         que->res = NULL;
1805                 }
1806         }
1807
1808         return (0);
1809 }
1810
1811 void
1812 ixl_free_pci_resources(struct ixl_pf *pf)
1813 {
1814         device_t                dev = pf->dev;
1815         int                     memrid;
1816
1817         ixl_teardown_queue_msix(&pf->vsi);
1818         ixl_teardown_adminq_msix(pf);
1819
1820         if (pf->msix > 0)
1821                 pci_release_msi(dev);
1822         
1823         memrid = PCIR_BAR(IXL_MSIX_BAR);
1824
1825         if (pf->msix_mem != NULL)
1826                 bus_release_resource(dev, SYS_RES_MEMORY,
1827                     memrid, pf->msix_mem);
1828
1829         if (pf->pci_mem != NULL)
1830                 bus_release_resource(dev, SYS_RES_MEMORY,
1831                     PCIR_BAR(0), pf->pci_mem);
1832
1833         return;
1834 }
1835
1836 void
1837 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
1838 {
1839         /* Display supported media types */
1840         if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
1841                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1842
1843         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
1844                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1845         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
1846                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1847         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
1848                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1849
1850         if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
1851                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_2500_T, 0, NULL);
1852
1853         if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
1854                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_5000_T, 0, NULL);
1855
1856         if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
1857             phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
1858             phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
1859                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1860
1861         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
1862                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1863         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
1864                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1865         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
1866                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1867
1868         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
1869             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
1870             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
1871             phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
1872             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1873                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1874         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
1875                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1876         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
1877                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1878
1879         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
1880                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1881
1882         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
1883             || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
1884                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1885         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
1886                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
1887         if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
1888                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1889         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
1890                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1891         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
1892                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1893
1894         if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
1895                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1896
1897         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1898                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1899         if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
1900                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1901
1902         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1903                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1904         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1905                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1906         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1907                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1908         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1909                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
1910         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
1911                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1912         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1913                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1914 }
1915
1916 /*********************************************************************
1917  *
1918  *  Setup networking device structure and register an interface.
1919  *
1920  **********************************************************************/
1921 int
1922 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1923 {
1924         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
1925         struct ifnet            *ifp;
1926         struct i40e_hw          *hw = vsi->hw;
1927         struct ixl_queue        *que = vsi->queues;
1928         struct i40e_aq_get_phy_abilities_resp abilities;
1929         enum i40e_status_code aq_error = 0;
1930
1931         INIT_DEBUGOUT("ixl_setup_interface: begin");
1932
1933         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1934         if (ifp == NULL) {
1935                 device_printf(dev, "can not allocate ifnet structure\n");
1936                 return (ENOMEM);
1937         }
1938         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1939         ifp->if_mtu = ETHERMTU;
1940         ifp->if_init = ixl_init;
1941         ifp->if_softc = vsi;
1942         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1943         ifp->if_ioctl = ixl_ioctl;
1944
1945 #if __FreeBSD_version >= 1100036
1946         if_setgetcounterfn(ifp, ixl_get_counter);
1947 #endif
1948
1949         ifp->if_transmit = ixl_mq_start;
1950
1951         ifp->if_qflush = ixl_qflush;
1952
1953         ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
1954
1955         vsi->max_frame_size =
1956             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1957             + ETHER_VLAN_ENCAP_LEN;
1958
1959         /* Set TSO limits */
1960         ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1961         ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1962         ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
1963
1964         /*
1965          * Tell the upper layer(s) we support long frames.
1966          */
1967         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1968
1969         ifp->if_capabilities |= IFCAP_HWCSUM;
1970         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1971         ifp->if_capabilities |= IFCAP_TSO;
1972         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1973         ifp->if_capabilities |= IFCAP_LRO;
1974
1975         /* VLAN capabilties */
1976         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1977                              |  IFCAP_VLAN_HWTSO
1978                              |  IFCAP_VLAN_MTU
1979                              |  IFCAP_VLAN_HWCSUM;
1980         ifp->if_capenable = ifp->if_capabilities;
1981
1982         /*
1983         ** Don't turn this on by default, if vlans are
1984         ** created on another pseudo device (eg. lagg)
1985         ** then vlan events are not passed thru, breaking
1986         ** operation, but with HW FILTER off it works. If
1987         ** using vlans directly on the ixl driver you can
1988         ** enable this and get full hardware tag filtering.
1989         */
1990         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1991
1992         /*
1993          * Specify the media types supported by this adapter and register
1994          * callbacks to update media and link information
1995          */
1996         ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1997                      ixl_media_status);
1998
1999         if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) == 0) {
2000                 aq_error = i40e_aq_get_phy_capabilities(hw,
2001                     FALSE, TRUE, &abilities, NULL);
2002                 /* May need delay to detect fiber correctly */
2003                 if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2004                         i40e_msec_delay(200);
2005                         aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2006                             TRUE, &abilities, NULL);
2007                 }
2008                 if (aq_error) {
2009                         if (aq_error == I40E_ERR_UNKNOWN_PHY)
2010                                 device_printf(dev, "Unknown PHY type detected!\n");
2011                         else
2012                                 device_printf(dev,
2013                                     "Error getting supported media types, err %d,"
2014                                     " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2015                 } else {
2016                         pf->supported_speeds = abilities.link_speed;
2017 #if __FreeBSD_version >= 1100000
2018                         ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
2019 #else
2020                         if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
2021 #endif
2022
2023                         ixl_add_ifmedia(vsi, hw->phy.phy_types);
2024                 }
2025         }
2026
2027         /* Use autoselect media by default */
2028         ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2029         ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2030
2031         ether_ifattach(ifp, hw->mac.addr);
2032
2033         return (0);
2034 }
2035
2036 /*
2037 ** Run when the Admin Queue gets a link state change interrupt.
2038 */
2039 void
2040 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2041 {
2042         struct i40e_hw  *hw = &pf->hw; 
2043         device_t dev = pf->dev;
2044         struct i40e_aqc_get_link_status *status =
2045             (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2046
2047         /* Request link status from adapter */
2048         hw->phy.get_link_info = TRUE;
2049         i40e_get_link_status(hw, &pf->link_up);
2050
2051         /* Print out message if an unqualified module is found */
2052         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2053             (pf->advertised_speed) &&
2054             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2055             (!(status->link_info & I40E_AQ_LINK_UP)))
2056                 device_printf(dev, "Link failed because "
2057                     "an unqualified module was detected!\n");
2058
2059         /* Update OS link info */
2060         ixl_update_link_status(pf);
2061 }
2062
2063 /*********************************************************************
2064  *
2065  *  Get Firmware Switch configuration
2066  *      - this will need to be more robust when more complex
2067  *        switch configurations are enabled.
2068  *
2069  **********************************************************************/
2070 int
2071 ixl_switch_config(struct ixl_pf *pf)
2072 {
2073         struct i40e_hw  *hw = &pf->hw; 
2074         struct ixl_vsi  *vsi = &pf->vsi;
2075         device_t        dev = vsi->dev;
2076         struct i40e_aqc_get_switch_config_resp *sw_config;
2077         u8      aq_buf[I40E_AQ_LARGE_BUF];
2078         int     ret;
2079         u16     next = 0;
2080
2081         memset(&aq_buf, 0, sizeof(aq_buf));
2082         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2083         ret = i40e_aq_get_switch_config(hw, sw_config,
2084             sizeof(aq_buf), &next, NULL);
2085         if (ret) {
2086                 device_printf(dev, "aq_get_switch_config() failed, error %d,"
2087                     " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
2088                 return (ret);
2089         }
2090         if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
2091                 device_printf(dev,
2092                     "Switch config: header reported: %d in structure, %d total\n",
2093                     sw_config->header.num_reported, sw_config->header.num_total);
2094                 for (int i = 0; i < sw_config->header.num_reported; i++) {
2095                         device_printf(dev,
2096                             "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2097                             sw_config->element[i].element_type,
2098                             sw_config->element[i].seid,
2099                             sw_config->element[i].uplink_seid,
2100                             sw_config->element[i].downlink_seid);
2101                 }
2102         }
2103         /* Simplified due to a single VSI */
2104         vsi->uplink_seid = sw_config->element[0].uplink_seid;
2105         vsi->downlink_seid = sw_config->element[0].downlink_seid;
2106         vsi->seid = sw_config->element[0].seid;
2107         return (ret);
2108 }
2109
2110 /*********************************************************************
2111  *
2112  *  Initialize the VSI:  this handles contexts, which means things
2113  *                       like the number of descriptors, buffer size,
2114  *                       plus we init the rings thru this function.
2115  *
2116  **********************************************************************/
2117 int
2118 ixl_initialize_vsi(struct ixl_vsi *vsi)
2119 {
2120         struct ixl_pf           *pf = vsi->back;
2121         struct ixl_queue        *que = vsi->queues;
2122         device_t                dev = vsi->dev;
2123         struct i40e_hw          *hw = vsi->hw;
2124         struct i40e_vsi_context ctxt;
2125         int                     tc_queues;
2126         int                     err = 0;
2127
2128         memset(&ctxt, 0, sizeof(ctxt));
2129         ctxt.seid = vsi->seid;
2130         if (pf->veb_seid != 0)
2131                 ctxt.uplink_seid = pf->veb_seid;
2132         ctxt.pf_num = hw->pf_id;
2133         err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2134         if (err) {
2135                 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
2136                     " aq_error %d\n", err, hw->aq.asq_last_status);
2137                 return (err);
2138         }
2139         ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
2140             "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2141             "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2142             "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2143             ctxt.uplink_seid, ctxt.vsi_number,
2144             ctxt.vsis_allocated, ctxt.vsis_unallocated,
2145             ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2146             ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2147         /*
2148         ** Set the queue and traffic class bits
2149         **  - when multiple traffic classes are supported
2150         **    this will need to be more robust.
2151         */
2152         ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2153         ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2154         /* In contig mode, que_mapping[0] is first queue index used by this VSI */
2155         ctxt.info.queue_mapping[0] = 0;
2156         /*
2157          * This VSI will only use traffic class 0; start traffic class 0's
2158          * queue allocation at queue 0, and assign it 2^tc_queues queues (though
2159          * the driver may not use all of them).
2160          */
2161         tc_queues = bsrl(pf->qtag.num_allocated);
2162         ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
2163             & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2164             ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2165             & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2166
2167         /* Set VLAN receive stripping mode */
2168         ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2169         ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2170         if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2171                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2172         else
2173                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2174
2175 #ifdef IXL_IW
2176         /* Set TCP Enable for iWARP capable VSI */
2177         if (ixl_enable_iwarp && pf->iw_enabled) {
2178                 ctxt.info.valid_sections |=
2179                     htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
2180                 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
2181         }
2182 #endif
2183         /* Save VSI number and info for use later */
2184         vsi->vsi_num = ctxt.vsi_number;
2185         bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2186
2187         /* Reset VSI statistics */
2188         ixl_vsi_reset_stats(vsi);
2189
2190         ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2191
2192         err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2193         if (err) {
2194                 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2195                     " aq_error %d\n", err, hw->aq.asq_last_status);
2196                 return (err);
2197         }
2198
2199         for (int i = 0; i < vsi->num_queues; i++, que++) {
2200                 struct tx_ring          *txr = &que->txr;
2201                 struct rx_ring          *rxr = &que->rxr;
2202                 struct i40e_hmc_obj_txq tctx;
2203                 struct i40e_hmc_obj_rxq rctx;
2204                 u32                     txctl;
2205                 u16                     size;
2206
2207                 /* Setup the HMC TX Context  */
2208                 size = que->num_tx_desc * sizeof(struct i40e_tx_desc);
2209                 bzero(&tctx, sizeof(tctx));
2210                 tctx.new_context = 1;
2211                 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2212                 tctx.qlen = que->num_tx_desc;
2213                 tctx.fc_ena = 0;        /* Disable FCoE */
2214                 /*
2215                  * This value needs to pulled from the VSI that this queue
2216                  * is assigned to. Index into array is traffic class.
2217                  */
2218                 tctx.rdylist = vsi->info.qs_handle[0];
2219                 /*
2220                  * Set these to enable Head Writeback
2221                  * - Address is last entry in TX ring (reserved for HWB index)
2222                  * Leave these as 0 for Descriptor Writeback
2223                  */
2224                 if (vsi->enable_head_writeback) {
2225                         tctx.head_wb_ena = 1;
2226                         tctx.head_wb_addr = txr->dma.pa +
2227                             (que->num_tx_desc * sizeof(struct i40e_tx_desc));
2228                 }
2229                 tctx.rdylist_act = 0;
2230                 err = i40e_clear_lan_tx_queue_context(hw, i);
2231                 if (err) {
2232                         device_printf(dev, "Unable to clear TX context\n");
2233                         break;
2234                 }
2235                 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2236                 if (err) {
2237                         device_printf(dev, "Unable to set TX context\n");
2238                         break;
2239                 }
2240                 /* Associate the ring with this PF */
2241                 txctl = I40E_QTX_CTL_PF_QUEUE;
2242                 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2243                     I40E_QTX_CTL_PF_INDX_MASK);
2244                 wr32(hw, I40E_QTX_CTL(i), txctl);
2245                 ixl_flush(hw);
2246
2247                 /* Do ring (re)init */
2248                 ixl_init_tx_ring(que);
2249
2250                 /* Next setup the HMC RX Context  */
2251                 if (vsi->max_frame_size <= MCLBYTES)
2252                         rxr->mbuf_sz = MCLBYTES;
2253                 else
2254                         rxr->mbuf_sz = MJUMPAGESIZE;
2255
2256                 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2257
2258                 /* Set up an RX context for the HMC */
2259                 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2260                 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2261                 /* ignore header split for now */
2262                 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2263                 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2264                     vsi->max_frame_size : max_rxmax;
2265                 rctx.dtype = 0;
2266                 rctx.dsize = 1;         /* do 32byte descriptors */
2267                 rctx.hsplit_0 = 0;      /* no header split */
2268                 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2269                 rctx.qlen = que->num_rx_desc;
2270                 rctx.tphrdesc_ena = 1;
2271                 rctx.tphwdesc_ena = 1;
2272                 rctx.tphdata_ena = 0;   /* Header Split related */
2273                 rctx.tphhead_ena = 0;   /* Header Split related */
2274                 rctx.lrxqthresh = 2;    /* Interrupt at <128 desc avail */
2275                 rctx.crcstrip = 1;
2276                 rctx.l2tsel = 1;
2277                 rctx.showiv = 1;        /* Strip inner VLAN header */
2278                 rctx.fc_ena = 0;        /* Disable FCoE */
2279                 rctx.prefena = 1;       /* Prefetch descriptors */
2280
2281                 err = i40e_clear_lan_rx_queue_context(hw, i);
2282                 if (err) {
2283                         device_printf(dev,
2284                             "Unable to clear RX context %d\n", i);
2285                         break;
2286                 }
2287                 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2288                 if (err) {
2289                         device_printf(dev, "Unable to set RX context %d\n", i);
2290                         break;
2291                 }
2292                 err = ixl_init_rx_ring(que);
2293                 if (err) {
2294                         device_printf(dev, "Fail in init_rx_ring %d\n", i);
2295                         break;
2296                 }
2297 #ifdef DEV_NETMAP
2298                 /* preserve queue */
2299                 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2300                         struct netmap_adapter *na = NA(vsi->ifp);
2301                         struct netmap_kring *kring = na->rx_rings[i];
2302                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2303                         wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2304                 } else
2305 #endif /* DEV_NETMAP */
2306                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1);
2307         }
2308         return (err);
2309 }
2310
2311 void
2312 ixl_vsi_free_queues(struct ixl_vsi *vsi)
2313 {
2314         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
2315         struct ixl_queue        *que = vsi->queues;
2316
2317         if (NULL == vsi->queues)
2318                 return;
2319
2320         for (int i = 0; i < vsi->num_queues; i++, que++) {
2321                 struct tx_ring *txr = &que->txr;
2322                 struct rx_ring *rxr = &que->rxr;
2323         
2324                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2325                         continue;
2326                 IXL_TX_LOCK(txr);
2327                 if (txr->br)
2328                         buf_ring_free(txr->br, M_DEVBUF);
2329                 ixl_free_que_tx(que);
2330                 if (txr->base)
2331                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2332                 IXL_TX_UNLOCK(txr);
2333                 IXL_TX_LOCK_DESTROY(txr);
2334
2335                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2336                         continue;
2337                 IXL_RX_LOCK(rxr);
2338                 ixl_free_que_rx(que);
2339                 if (rxr->base)
2340                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2341                 IXL_RX_UNLOCK(rxr);
2342                 IXL_RX_LOCK_DESTROY(rxr);
2343         }
2344
2345         sysctl_ctx_free(&vsi->sysctl_ctx);
2346 }
2347
2348
2349 /*********************************************************************
2350  *
2351  *  Free all VSI structs.
2352  *
2353  **********************************************************************/
2354 void
2355 ixl_free_vsi(struct ixl_vsi *vsi)
2356 {
2357         /* Free station queues */
2358         ixl_vsi_free_queues(vsi);
2359         if (vsi->queues)
2360                 free(vsi->queues, M_DEVBUF);
2361
2362         /* Free VSI filter list */
2363         ixl_free_mac_filters(vsi);
2364 }
2365
2366 void
2367 ixl_free_mac_filters(struct ixl_vsi *vsi)
2368 {
2369         struct ixl_mac_filter *f;
2370
2371         while (!SLIST_EMPTY(&vsi->ftl)) {
2372                 f = SLIST_FIRST(&vsi->ftl);
2373                 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2374                 free(f, M_DEVBUF);
2375         }
2376
2377         vsi->num_hw_filters = 0;
2378 }
2379
2380 /*
2381  * Fill out fields in queue struct and setup tx/rx memory and structs
2382  */
2383 static int
2384 ixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index)
2385 {
2386         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2387         device_t dev = pf->dev;
2388         struct i40e_hw *hw = &pf->hw;
2389         struct tx_ring *txr = &que->txr;
2390         struct rx_ring *rxr = &que->rxr;
2391         int error = 0;
2392         int rsize, tsize;
2393
2394         que->num_tx_desc = vsi->num_tx_desc;
2395         que->num_rx_desc = vsi->num_rx_desc;
2396         que->me = index;
2397         que->vsi = vsi;
2398
2399         txr->que = que;
2400         txr->tail = I40E_QTX_TAIL(que->me);
2401
2402         /* Initialize the TX lock */
2403         snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2404             device_get_nameunit(dev), que->me);
2405         mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2406         /*
2407          * Create the TX descriptor ring
2408          *
2409          * In Head Writeback mode, the descriptor ring is one bigger
2410          * than the number of descriptors for space for the HW to
2411          * write back index of last completed descriptor.
2412          */
2413         if (vsi->enable_head_writeback) {
2414                 tsize = roundup2((que->num_tx_desc *
2415                     sizeof(struct i40e_tx_desc)) +
2416                     sizeof(u32), DBA_ALIGN);
2417         } else {
2418                 tsize = roundup2((que->num_tx_desc *
2419                     sizeof(struct i40e_tx_desc)), DBA_ALIGN);
2420         }
2421         if (i40e_allocate_dma_mem(hw,
2422             &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2423                 device_printf(dev,
2424                     "Unable to allocate TX Descriptor memory\n");
2425                 error = ENOMEM;
2426                 goto err_destroy_tx_mtx;
2427         }
2428         txr->base = (struct i40e_tx_desc *)txr->dma.va;
2429         bzero((void *)txr->base, tsize);
2430         /* Now allocate transmit soft structs for the ring */
2431         if (ixl_allocate_tx_data(que)) {
2432                 device_printf(dev,
2433                     "Critical Failure setting up TX structures\n");
2434                 error = ENOMEM;
2435                 goto err_free_tx_dma;
2436         }
2437         /* Allocate a buf ring */
2438         txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2439             M_NOWAIT, &txr->mtx);
2440         if (txr->br == NULL) {
2441                 device_printf(dev,
2442                     "Critical Failure setting up TX buf ring\n");
2443                 error = ENOMEM;
2444                 goto err_free_tx_data;
2445         }
2446
2447         rsize = roundup2(que->num_rx_desc *
2448             sizeof(union i40e_rx_desc), DBA_ALIGN);
2449         rxr->que = que;
2450         rxr->tail = I40E_QRX_TAIL(que->me);
2451
2452         /* Initialize the RX side lock */
2453         snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2454             device_get_nameunit(dev), que->me);
2455         mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2456
2457         if (i40e_allocate_dma_mem(hw,
2458             &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2459                 device_printf(dev,
2460                     "Unable to allocate RX Descriptor memory\n");
2461                 error = ENOMEM;
2462                 goto err_destroy_rx_mtx;
2463         }
2464         rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2465         bzero((void *)rxr->base, rsize);
2466         /* Allocate receive soft structs for the ring*/
2467         if (ixl_allocate_rx_data(que)) {
2468                 device_printf(dev,
2469                     "Critical Failure setting up receive structs\n");
2470                 error = ENOMEM;
2471                 goto err_free_rx_dma;
2472         }
2473
2474         return (0);
2475
2476 err_free_rx_dma:
2477         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2478 err_destroy_rx_mtx:
2479         mtx_destroy(&rxr->mtx);
2480         /* err_free_tx_buf_ring */
2481         buf_ring_free(txr->br, M_DEVBUF);
2482 err_free_tx_data:
2483         ixl_free_que_tx(que);
2484 err_free_tx_dma:
2485         i40e_free_dma_mem(&pf->hw, &txr->dma);
2486 err_destroy_tx_mtx:
2487         mtx_destroy(&txr->mtx);
2488
2489         return (error);
2490 }
2491
2492 int
2493 ixl_vsi_setup_queues(struct ixl_vsi *vsi)
2494 {
2495         struct ixl_queue        *que;
2496         int                     error = 0;
2497
2498         for (int i = 0; i < vsi->num_queues; i++) {
2499                 que = &vsi->queues[i];
2500                 error = ixl_vsi_setup_queue(vsi, que, i);
2501                 if (error)
2502                         break;
2503         }
2504         if (error == 0)
2505                 sysctl_ctx_init(&vsi->sysctl_ctx);
2506
2507         return (error);
2508 }
2509
2510
2511 /*********************************************************************
2512  *
2513  *  Allocate memory for the VSI (virtual station interface) and their
2514  *  associated queues, rings and the descriptors associated with each,
2515  *  called only once at attach.
2516  *
2517  **********************************************************************/
2518 int
2519 ixl_setup_stations(struct ixl_pf *pf)
2520 {
2521         device_t                dev = pf->dev;
2522         struct ixl_vsi          *vsi;
2523         int                     error = 0;
2524
2525         vsi = &pf->vsi;
2526         vsi->back = (void *)pf;
2527         vsi->hw = &pf->hw;
2528         vsi->id = 0;
2529         vsi->num_vlans = 0;
2530         vsi->back = pf;
2531
2532         if (pf->msix > 1)
2533                 vsi->flags |= IXL_FLAGS_USES_MSIX;
2534
2535         /* Get memory for the station queues */
2536         if (!(vsi->queues =
2537             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2538             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2539                 device_printf(dev, "Unable to allocate queue memory\n");
2540                 error = ENOMEM;
2541                 goto ixl_setup_stations_err;
2542         }
2543
2544         /* Then setup each queue */
2545         error = ixl_vsi_setup_queues(vsi);
2546 ixl_setup_stations_err:
2547         return (error);
2548 }
2549
2550 /*
2551 ** Provide a update to the queue RX
2552 ** interrupt moderation value.
2553 */
2554 void
2555 ixl_set_queue_rx_itr(struct ixl_queue *que)
2556 {
2557         struct ixl_vsi  *vsi = que->vsi;
2558         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2559         struct i40e_hw  *hw = vsi->hw;
2560         struct rx_ring  *rxr = &que->rxr;
2561         u16             rx_itr;
2562         u16             rx_latency = 0;
2563         int             rx_bytes;
2564
2565         /* Idle, do nothing */
2566         if (rxr->bytes == 0)
2567                 return;
2568
2569         if (pf->dynamic_rx_itr) {
2570                 rx_bytes = rxr->bytes/rxr->itr;
2571                 rx_itr = rxr->itr;
2572
2573                 /* Adjust latency range */
2574                 switch (rxr->latency) {
2575                 case IXL_LOW_LATENCY:
2576                         if (rx_bytes > 10) {
2577                                 rx_latency = IXL_AVE_LATENCY;
2578                                 rx_itr = IXL_ITR_20K;
2579                         }
2580                         break;
2581                 case IXL_AVE_LATENCY:
2582                         if (rx_bytes > 20) {
2583                                 rx_latency = IXL_BULK_LATENCY;
2584                                 rx_itr = IXL_ITR_8K;
2585                         } else if (rx_bytes <= 10) {
2586                                 rx_latency = IXL_LOW_LATENCY;
2587                                 rx_itr = IXL_ITR_100K;
2588                         }
2589                         break;
2590                 case IXL_BULK_LATENCY:
2591                         if (rx_bytes <= 20) {
2592                                 rx_latency = IXL_AVE_LATENCY;
2593                                 rx_itr = IXL_ITR_20K;
2594                         }
2595                         break;
2596                  }
2597
2598                 rxr->latency = rx_latency;
2599
2600                 if (rx_itr != rxr->itr) {
2601                         /* do an exponential smoothing */
2602                         rx_itr = (10 * rx_itr * rxr->itr) /
2603                             ((9 * rx_itr) + rxr->itr);
2604                         rxr->itr = min(rx_itr, IXL_MAX_ITR);
2605                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2606                             que->me), rxr->itr);
2607                 }
2608         } else { /* We may have have toggled to non-dynamic */
2609                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2610                         vsi->rx_itr_setting = pf->rx_itr;
2611                 /* Update the hardware if needed */
2612                 if (rxr->itr != vsi->rx_itr_setting) {
2613                         rxr->itr = vsi->rx_itr_setting;
2614                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2615                             que->me), rxr->itr);
2616                 }
2617         }
2618         rxr->bytes = 0;
2619         rxr->packets = 0;
2620         return;
2621 }
2622
2623
2624 /*
2625 ** Provide a update to the queue TX
2626 ** interrupt moderation value.
2627 */
2628 void
2629 ixl_set_queue_tx_itr(struct ixl_queue *que)
2630 {
2631         struct ixl_vsi  *vsi = que->vsi;
2632         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2633         struct i40e_hw  *hw = vsi->hw;
2634         struct tx_ring  *txr = &que->txr;
2635         u16             tx_itr;
2636         u16             tx_latency = 0;
2637         int             tx_bytes;
2638
2639
2640         /* Idle, do nothing */
2641         if (txr->bytes == 0)
2642                 return;
2643
2644         if (pf->dynamic_tx_itr) {
2645                 tx_bytes = txr->bytes/txr->itr;
2646                 tx_itr = txr->itr;
2647
2648                 switch (txr->latency) {
2649                 case IXL_LOW_LATENCY:
2650                         if (tx_bytes > 10) {
2651                                 tx_latency = IXL_AVE_LATENCY;
2652                                 tx_itr = IXL_ITR_20K;
2653                         }
2654                         break;
2655                 case IXL_AVE_LATENCY:
2656                         if (tx_bytes > 20) {
2657                                 tx_latency = IXL_BULK_LATENCY;
2658                                 tx_itr = IXL_ITR_8K;
2659                         } else if (tx_bytes <= 10) {
2660                                 tx_latency = IXL_LOW_LATENCY;
2661                                 tx_itr = IXL_ITR_100K;
2662                         }
2663                         break;
2664                 case IXL_BULK_LATENCY:
2665                         if (tx_bytes <= 20) {
2666                                 tx_latency = IXL_AVE_LATENCY;
2667                                 tx_itr = IXL_ITR_20K;
2668                         }
2669                         break;
2670                 }
2671
2672                 txr->latency = tx_latency;
2673
2674                 if (tx_itr != txr->itr) {
2675                  /* do an exponential smoothing */
2676                         tx_itr = (10 * tx_itr * txr->itr) /
2677                             ((9 * tx_itr) + txr->itr);
2678                         txr->itr = min(tx_itr, IXL_MAX_ITR);
2679                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2680                             que->me), txr->itr);
2681                 }
2682
2683         } else { /* We may have have toggled to non-dynamic */
2684                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2685                         vsi->tx_itr_setting = pf->tx_itr;
2686                 /* Update the hardware if needed */
2687                 if (txr->itr != vsi->tx_itr_setting) {
2688                         txr->itr = vsi->tx_itr_setting;
2689                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2690                             que->me), txr->itr);
2691                 }
2692         }
2693         txr->bytes = 0;
2694         txr->packets = 0;
2695         return;
2696 }
2697
2698 void
2699 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
2700 {
2701         struct sysctl_oid *tree;
2702         struct sysctl_oid_list *child;
2703         struct sysctl_oid_list *vsi_list;
2704
2705         tree = device_get_sysctl_tree(vsi->dev);
2706         child = SYSCTL_CHILDREN(tree);
2707         vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
2708                         CTLFLAG_RD, NULL, "VSI Number");
2709
2710         vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2711         ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
2712
2713         if (queues_sysctls)
2714                 ixl_vsi_add_queues_stats(vsi);
2715 }
2716
2717 /*
2718  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2719  * Writes to the ITR registers immediately.
2720  */
2721 static int
2722 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2723 {
2724         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2725         device_t dev = pf->dev;
2726         int error = 0;
2727         int requested_tx_itr;
2728
2729         requested_tx_itr = pf->tx_itr;
2730         error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2731         if ((error) || (req->newptr == NULL))
2732                 return (error);
2733         if (pf->dynamic_tx_itr) {
2734                 device_printf(dev,
2735                     "Cannot set TX itr value while dynamic TX itr is enabled\n");
2736                     return (EINVAL);
2737         }
2738         if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2739                 device_printf(dev,
2740                     "Invalid TX itr value; value must be between 0 and %d\n",
2741                         IXL_MAX_ITR);
2742                 return (EINVAL);
2743         }
2744
2745         pf->tx_itr = requested_tx_itr;
2746         ixl_configure_tx_itr(pf);
2747
2748         return (error);
2749 }
2750
2751 /*
2752  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2753  * Writes to the ITR registers immediately.
2754  */
2755 static int
2756 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2757 {
2758         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2759         device_t dev = pf->dev;
2760         int error = 0;
2761         int requested_rx_itr;
2762
2763         requested_rx_itr = pf->rx_itr;
2764         error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2765         if ((error) || (req->newptr == NULL))
2766                 return (error);
2767         if (pf->dynamic_rx_itr) {
2768                 device_printf(dev,
2769                     "Cannot set RX itr value while dynamic RX itr is enabled\n");
2770                     return (EINVAL);
2771         }
2772         if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2773                 device_printf(dev,
2774                     "Invalid RX itr value; value must be between 0 and %d\n",
2775                         IXL_MAX_ITR);
2776                 return (EINVAL);
2777         }
2778
2779         pf->rx_itr = requested_rx_itr;
2780         ixl_configure_rx_itr(pf);
2781
2782         return (error);
2783 }
2784
2785 void
2786 ixl_add_hw_stats(struct ixl_pf *pf)
2787 {
2788         device_t dev = pf->dev;
2789         struct i40e_hw_port_stats *pf_stats = &pf->stats;
2790
2791         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2792         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2793         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2794
2795         /* Driver statistics */
2796         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
2797                         CTLFLAG_RD, &pf->watchdog_events,
2798                         "Watchdog timeouts");
2799         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
2800                         CTLFLAG_RD, &pf->admin_irq,
2801                         "Admin Queue IRQ Handled");
2802
2803         ixl_vsi_add_sysctls(&pf->vsi, "pf", true);
2804         /* MAC stats */
2805         ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2806 }
2807
2808 void
2809 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2810         struct sysctl_oid_list *child,
2811         struct i40e_hw_port_stats *stats)
2812 {
2813         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2814                                     CTLFLAG_RD, NULL, "Mac Statistics");
2815         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2816
2817         struct i40e_eth_stats *eth_stats = &stats->eth;
2818         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2819
2820         struct ixl_sysctl_info ctls[] = 
2821         {
2822                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
2823                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2824                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2825                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2826                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2827                 /* Packet Reception Stats */
2828                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2829                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2830                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2831                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2832                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2833                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2834                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2835                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2836                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2837                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2838                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2839                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2840                 /* Packet Transmission Stats */
2841                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2842                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2843                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2844                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2845                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2846                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2847                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2848                 /* Flow control */
2849                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2850                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2851                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2852                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2853                 /* End */
2854                 {0,0,0}
2855         };
2856
2857         struct ixl_sysctl_info *entry = ctls;
2858         while (entry->stat != 0)
2859         {
2860                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2861                                 CTLFLAG_RD, entry->stat,
2862                                 entry->description);
2863                 entry++;
2864         }
2865 }
2866
2867 void
2868 ixl_set_rss_key(struct ixl_pf *pf)
2869 {
2870         struct i40e_hw *hw = &pf->hw;
2871         struct ixl_vsi *vsi = &pf->vsi;
2872         device_t        dev = pf->dev;
2873         u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2874         enum i40e_status_code status;
2875
2876 #ifdef RSS
2877         /* Fetch the configured RSS key */
2878         rss_getkey((uint8_t *) &rss_seed);
2879 #else
2880         ixl_get_default_rss_key(rss_seed);
2881 #endif
2882         /* Fill out hash function seed */
2883         if (hw->mac.type == I40E_MAC_X722) {
2884                 struct i40e_aqc_get_set_rss_key_data key_data;
2885                 bcopy(rss_seed, &key_data, 52);
2886                 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
2887                 if (status)
2888                         device_printf(dev,
2889                             "i40e_aq_set_rss_key status %s, error %s\n",
2890                             i40e_stat_str(hw, status),
2891                             i40e_aq_str(hw, hw->aq.asq_last_status));
2892         } else {
2893                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2894                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
2895         }
2896 }
2897
2898 /*
2899  * Configure enabled PCTYPES for RSS.
2900  */
2901 void
2902 ixl_set_rss_pctypes(struct ixl_pf *pf)
2903 {
2904         struct i40e_hw *hw = &pf->hw;
2905         u64             set_hena = 0, hena;
2906
2907 #ifdef RSS
2908         u32             rss_hash_config;
2909
2910         rss_hash_config = rss_gethashconfig();
2911         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2912                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2913         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2914                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2915         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2916                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2917         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2918                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2919         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2920                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2921         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2922                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2923         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2924                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2925 #else
2926         if (hw->mac.type == I40E_MAC_X722)
2927                 set_hena = IXL_DEFAULT_RSS_HENA_X722;
2928         else
2929                 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2930 #endif
2931         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
2932             ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
2933         hena |= set_hena;
2934         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
2935         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
2936
2937 }
2938
2939 void
2940 ixl_set_rss_hlut(struct ixl_pf *pf)
2941 {
2942         struct i40e_hw  *hw = &pf->hw;
2943         device_t        dev = pf->dev;
2944         struct ixl_vsi *vsi = &pf->vsi;
2945         int             i, que_id;
2946         int             lut_entry_width;
2947         u32             lut = 0;
2948         enum i40e_status_code status;
2949
2950         lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
2951
2952         /* Populate the LUT with max no. of queues in round robin fashion */
2953         u8 hlut_buf[512];
2954         for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
2955 #ifdef RSS
2956                 /*
2957                  * Fetch the RSS bucket id for the given indirection entry.
2958                  * Cap it at the number of configured buckets (which is
2959                  * num_queues.)
2960                  */
2961                 que_id = rss_get_indirection_to_bucket(i);
2962                 que_id = que_id % vsi->num_queues;
2963 #else
2964                 que_id = i % vsi->num_queues;
2965 #endif
2966                 lut = (que_id & ((0x1 << lut_entry_width) - 1));
2967                 hlut_buf[i] = lut;
2968         }
2969
2970         if (hw->mac.type == I40E_MAC_X722) {
2971                 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
2972                 if (status)
2973                         device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
2974                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2975         } else {
2976                 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
2977                         wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
2978                 ixl_flush(hw);
2979         }
2980 }
2981
2982 /*
2983 ** Setup the PF's RSS parameters.
2984 */
2985 void
2986 ixl_config_rss(struct ixl_pf *pf)
2987 {
2988         ixl_set_rss_key(pf);
2989         ixl_set_rss_pctypes(pf);
2990         ixl_set_rss_hlut(pf);
2991 }
2992
2993 /*
2994 ** This routine is run via an vlan config EVENT,
2995 ** it enables us to use the HW Filter table since
2996 ** we can get the vlan id. This just creates the
2997 ** entry in the soft version of the VFTA, init will
2998 ** repopulate the real table.
2999 */
3000 void
3001 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3002 {
3003         struct ixl_vsi  *vsi = ifp->if_softc;
3004         struct i40e_hw  *hw = vsi->hw;
3005         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3006
3007         if (ifp->if_softc !=  arg)   /* Not our event */
3008                 return;
3009
3010         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3011                 return;
3012
3013         IXL_PF_LOCK(pf);
3014         ++vsi->num_vlans;
3015         ixl_add_filter(vsi, hw->mac.addr, vtag);
3016         IXL_PF_UNLOCK(pf);
3017 }
3018
3019 /*
3020 ** This routine is run via an vlan
3021 ** unconfig EVENT, remove our entry
3022 ** in the soft vfta.
3023 */
3024 void
3025 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3026 {
3027         struct ixl_vsi  *vsi = ifp->if_softc;
3028         struct i40e_hw  *hw = vsi->hw;
3029         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3030
3031         if (ifp->if_softc !=  arg)
3032                 return;
3033
3034         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3035                 return;
3036
3037         IXL_PF_LOCK(pf);
3038         --vsi->num_vlans;
3039         ixl_del_filter(vsi, hw->mac.addr, vtag);
3040         IXL_PF_UNLOCK(pf);
3041 }
3042
3043 /*
3044  * In some firmware versions there is default MAC/VLAN filter
3045  * configured which interferes with filters managed by driver.
3046  * Make sure it's removed.
3047  */
3048 void
3049 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
3050 {
3051         struct i40e_aqc_remove_macvlan_element_data e;
3052
3053         bzero(&e, sizeof(e));
3054         bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
3055         e.vlan_tag = 0;
3056         e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3057         i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
3058
3059         bzero(&e, sizeof(e));
3060         bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
3061         e.vlan_tag = 0;
3062         e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
3063                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3064         i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
3065 }
3066
3067 static enum i40e_status_code
3068 ixl_set_lla(struct ixl_vsi *vsi)
3069 {
3070         struct i40e_hw  *hw = vsi->hw;
3071         u8              tmpaddr[ETHER_ADDR_LEN];
3072         enum i40e_status_code status;
3073
3074         status = I40E_SUCCESS;
3075
3076         bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETHER_ADDR_LEN);
3077         if (memcmp(hw->mac.addr, tmpaddr, ETHER_ADDR_LEN) == 0)
3078                 goto set_lla_exit;
3079
3080         status = i40e_validate_mac_addr(tmpaddr);
3081         if (status != I40E_SUCCESS)
3082                 goto set_lla_exit;
3083
3084         ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
3085         bcopy(tmpaddr, hw->mac.addr, ETHER_ADDR_LEN);
3086         status = i40e_aq_mac_address_write(hw,
3087                         I40E_AQC_WRITE_TYPE_LAA_ONLY,
3088                         hw->mac.addr, NULL);
3089         if (status != I40E_SUCCESS)
3090                 goto set_lla_exit;
3091
3092         ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
3093 set_lla_exit:
3094         return (status);
3095 }
3096
3097 /*
3098 ** Initialize filter list and add filters that the hardware
3099 ** needs to know about.
3100 **
3101 ** Requires VSI's seid to be set before calling.
3102 */
3103 void
3104 ixl_init_filters(struct ixl_vsi *vsi)
3105 {
3106         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3107
3108         /* Initialize mac filter list for VSI */
3109         SLIST_INIT(&vsi->ftl);
3110         vsi->num_hw_filters = 0;
3111
3112         /* Add broadcast address */
3113         ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3114
3115         if (IXL_VSI_IS_VF(vsi))
3116                 return;
3117
3118         ixl_del_default_hw_filters(vsi);
3119
3120         ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
3121
3122         /*
3123          * Prevent Tx flow control frames from being sent out by
3124          * non-firmware transmitters.
3125          * This affects every VSI in the PF.
3126          */
3127         if (pf->enable_tx_fc_filter)
3128                 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3129 }
3130
3131 /*
3132 ** This routine adds mulicast filters
3133 */
3134 void
3135 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3136 {
3137         struct ixl_mac_filter *f;
3138
3139         /* Does one already exist */
3140         f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3141         if (f != NULL)
3142                 return;
3143
3144         f = ixl_get_filter(vsi);
3145         if (f == NULL) {
3146                 printf("WARNING: no filter available!!\n");
3147                 return;
3148         }
3149         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3150         f->vlan = IXL_VLAN_ANY;
3151         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3152             | IXL_FILTER_MC);
3153
3154         return;
3155 }
3156
3157 void
3158 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3159 {
3160         ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_hw_filters);
3161 }
3162
3163 /*
3164 ** This routine adds macvlan filters
3165 */
3166 void
3167 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3168 {
3169         struct ixl_mac_filter   *f, *tmp;
3170         struct ixl_pf           *pf;
3171         device_t                dev;
3172
3173         DEBUGOUT("ixl_add_filter: begin");
3174
3175         pf = vsi->back;
3176         dev = pf->dev;
3177
3178         /* Does one already exist */
3179         f = ixl_find_filter(vsi, macaddr, vlan);
3180         if (f != NULL)
3181                 return;
3182         /*
3183         ** Is this the first vlan being registered, if so we
3184         ** need to remove the ANY filter that indicates we are
3185         ** not in a vlan, and replace that with a 0 filter.
3186         */
3187         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3188                 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3189                 if (tmp != NULL) {
3190                         ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3191                         ixl_add_filter(vsi, macaddr, 0);
3192                 }
3193         }
3194
3195         f = ixl_get_filter(vsi);
3196         if (f == NULL) {
3197                 device_printf(dev, "WARNING: no filter available!!\n");
3198                 return;
3199         }
3200         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3201         f->vlan = vlan;
3202         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3203         if (f->vlan != IXL_VLAN_ANY)
3204                 f->flags |= IXL_FILTER_VLAN;
3205         else
3206                 vsi->num_macs++;
3207
3208         ixl_add_hw_filters(vsi, f->flags, 1);
3209         return;
3210 }
3211
3212 void
3213 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3214 {
3215         struct ixl_mac_filter *f;
3216
3217         f = ixl_find_filter(vsi, macaddr, vlan);
3218         if (f == NULL)
3219                 return;
3220
3221         f->flags |= IXL_FILTER_DEL;
3222         ixl_del_hw_filters(vsi, 1);
3223         vsi->num_macs--;
3224
3225         /* Check if this is the last vlan removal */
3226         if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3227                 /* Switch back to a non-vlan filter */
3228                 ixl_del_filter(vsi, macaddr, 0);
3229                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3230         }
3231         return;
3232 }
3233
3234 /*
3235 ** Find the filter with both matching mac addr and vlan id
3236 */
3237 struct ixl_mac_filter *
3238 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3239 {
3240         struct ixl_mac_filter   *f;
3241         bool                    match = FALSE;
3242
3243         SLIST_FOREACH(f, &vsi->ftl, next) {
3244                 if (!cmp_etheraddr(f->macaddr, macaddr))
3245                         continue;
3246                 if (f->vlan == vlan) {
3247                         match = TRUE;
3248                         break;
3249                 }
3250         }       
3251
3252         if (!match)
3253                 f = NULL;
3254         return (f);
3255 }
3256
3257 /*
3258 ** This routine takes additions to the vsi filter
3259 ** table and creates an Admin Queue call to create
3260 ** the filters in the hardware.
3261 */
3262 void
3263 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3264 {
3265         struct i40e_aqc_add_macvlan_element_data *a, *b;
3266         struct ixl_mac_filter   *f;
3267         struct ixl_pf           *pf;
3268         struct i40e_hw          *hw;
3269         device_t                dev;
3270         int                     err, j = 0;
3271
3272         pf = vsi->back;
3273         dev = pf->dev;
3274         hw = &pf->hw;
3275         IXL_PF_LOCK_ASSERT(pf);
3276
3277         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3278             M_DEVBUF, M_NOWAIT | M_ZERO);
3279         if (a == NULL) {
3280                 device_printf(dev, "add_hw_filters failed to get memory\n");
3281                 return;
3282         }
3283
3284         /*
3285         ** Scan the filter list, each time we find one
3286         ** we add it to the admin queue array and turn off
3287         ** the add bit.
3288         */
3289         SLIST_FOREACH(f, &vsi->ftl, next) {
3290                 if ((f->flags & flags) == flags) {
3291                         b = &a[j]; // a pox on fvl long names :)
3292                         bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3293                         if (f->vlan == IXL_VLAN_ANY) {
3294                                 b->vlan_tag = 0;
3295                                 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3296                         } else {
3297                                 b->vlan_tag = f->vlan;
3298                                 b->flags = 0;
3299                         }
3300                         b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3301                         f->flags &= ~IXL_FILTER_ADD;
3302                         j++;
3303                 }
3304                 if (j == cnt)
3305                         break;
3306         }
3307         if (j > 0) {
3308                 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3309                 if (err) 
3310                         device_printf(dev, "aq_add_macvlan err %d, "
3311                             "aq_error %d\n", err, hw->aq.asq_last_status);
3312                 else
3313                         vsi->num_hw_filters += j;
3314         }
3315         free(a, M_DEVBUF);
3316         return;
3317 }
3318
3319 /*
3320 ** This routine takes removals in the vsi filter
3321 ** table and creates an Admin Queue call to delete
3322 ** the filters in the hardware.
3323 */
3324 void
3325 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3326 {
3327         struct i40e_aqc_remove_macvlan_element_data *d, *e;
3328         struct ixl_pf           *pf;
3329         struct i40e_hw          *hw;
3330         device_t                dev;
3331         struct ixl_mac_filter   *f, *f_temp;
3332         int                     err, j = 0;
3333
3334         DEBUGOUT("ixl_del_hw_filters: begin\n");
3335
3336         pf = vsi->back;
3337         hw = &pf->hw;
3338         dev = pf->dev;
3339
3340         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3341             M_DEVBUF, M_NOWAIT | M_ZERO);
3342         if (d == NULL) {
3343                 printf("del hw filter failed to get memory\n");
3344                 return;
3345         }
3346
3347         SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3348                 if (f->flags & IXL_FILTER_DEL) {
3349                         e = &d[j]; // a pox on fvl long names :)
3350                         bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3351                         e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3352                         if (f->vlan == IXL_VLAN_ANY) {
3353                                 e->vlan_tag = 0;
3354                                 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3355                         } else {
3356                                 e->vlan_tag = f->vlan;
3357                         }
3358                         /* delete entry from vsi list */
3359                         SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3360                         free(f, M_DEVBUF);
3361                         j++;
3362                 }
3363                 if (j == cnt)
3364                         break;
3365         }
3366         if (j > 0) {
3367                 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3368                 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3369                         int sc = 0;
3370                         for (int i = 0; i < j; i++)
3371                                 sc += (!d[i].error_code);
3372                         vsi->num_hw_filters -= sc;
3373                         device_printf(dev,
3374                             "Failed to remove %d/%d filters, aq error %d\n",
3375                             j - sc, j, hw->aq.asq_last_status);
3376                 } else
3377                         vsi->num_hw_filters -= j;
3378         }
3379         free(d, M_DEVBUF);
3380
3381         DEBUGOUT("ixl_del_hw_filters: end\n");
3382         return;
3383 }
3384
3385 int
3386 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3387 {
3388         struct i40e_hw  *hw = &pf->hw;
3389         int             error = 0;
3390         u32             reg;
3391         u16             pf_qidx;
3392
3393         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3394
3395         ixl_dbg(pf, IXL_DBG_EN_DIS,
3396             "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3397             pf_qidx, vsi_qidx);
3398
3399         i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3400
3401         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3402         reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3403             I40E_QTX_ENA_QENA_STAT_MASK;
3404         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3405         /* Verify the enable took */
3406         for (int j = 0; j < 10; j++) {
3407                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3408                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3409                         break;
3410                 i40e_usec_delay(10);
3411         }
3412         if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3413                 device_printf(pf->dev, "TX queue %d still disabled!\n",
3414                     pf_qidx);
3415                 error = ETIMEDOUT;
3416         }
3417
3418         return (error);
3419 }
3420
3421 int
3422 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3423 {
3424         struct i40e_hw  *hw = &pf->hw;
3425         int             error = 0;
3426         u32             reg;
3427         u16             pf_qidx;
3428
3429         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3430
3431         ixl_dbg(pf, IXL_DBG_EN_DIS,
3432             "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3433             pf_qidx, vsi_qidx);
3434
3435         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3436         reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3437             I40E_QRX_ENA_QENA_STAT_MASK;
3438         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3439         /* Verify the enable took */
3440         for (int j = 0; j < 10; j++) {
3441                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3442                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3443                         break;
3444                 i40e_usec_delay(10);
3445         }
3446         if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3447                 device_printf(pf->dev, "RX queue %d still disabled!\n",
3448                     pf_qidx);
3449                 error = ETIMEDOUT;
3450         }
3451
3452         return (error);
3453 }
3454
3455 int
3456 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3457 {
3458         int error = 0;
3459
3460         error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3461         /* Called function already prints error message */
3462         if (error)
3463                 return (error);
3464         error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3465         return (error);
3466 }
3467
3468 /* For PF VSI only */
3469 int
3470 ixl_enable_rings(struct ixl_vsi *vsi)
3471 {
3472         struct ixl_pf   *pf = vsi->back;
3473         int             error = 0;
3474
3475         for (int i = 0; i < vsi->num_queues; i++) {
3476                 error = ixl_enable_ring(pf, &pf->qtag, i);
3477                 if (error)
3478                         return (error);
3479         }
3480
3481         return (error);
3482 }
3483
3484 /*
3485  * Returns error on first ring that is detected hung.
3486  */
3487 int
3488 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3489 {
3490         struct i40e_hw  *hw = &pf->hw;
3491         int             error = 0;
3492         u32             reg;
3493         u16             pf_qidx;
3494
3495         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3496
3497         i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3498         i40e_usec_delay(500);
3499
3500         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3501         reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3502         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3503         /* Verify the disable took */
3504         for (int j = 0; j < 10; j++) {
3505                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3506                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3507                         break;
3508                 i40e_msec_delay(10);
3509         }
3510         if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3511                 device_printf(pf->dev, "TX queue %d still enabled!\n",
3512                     pf_qidx);
3513                 error = ETIMEDOUT;
3514         }
3515
3516         return (error);
3517 }
3518
3519 /*
3520  * Returns error on first ring that is detected hung.
3521  */
3522 int
3523 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3524 {
3525         struct i40e_hw  *hw = &pf->hw;
3526         int             error = 0;
3527         u32             reg;
3528         u16             pf_qidx;
3529
3530         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3531
3532         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3533         reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3534         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3535         /* Verify the disable took */
3536         for (int j = 0; j < 10; j++) {
3537                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3538                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3539                         break;
3540                 i40e_msec_delay(10);
3541         }
3542         if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3543                 device_printf(pf->dev, "RX queue %d still enabled!\n",
3544                     pf_qidx);
3545                 error = ETIMEDOUT;
3546         }
3547
3548         return (error);
3549 }
3550
3551 int
3552 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3553 {
3554         int error = 0;
3555
3556         error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3557         /* Called function already prints error message */
3558         if (error)
3559                 return (error);
3560         error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3561         return (error);
3562 }
3563
3564 /* For PF VSI only */
3565 int
3566 ixl_disable_rings(struct ixl_vsi *vsi)
3567 {
3568         struct ixl_pf   *pf = vsi->back;
3569         int             error = 0;
3570
3571         for (int i = 0; i < vsi->num_queues; i++) {
3572                 error = ixl_disable_ring(pf, &pf->qtag, i);
3573                 if (error)
3574                         return (error);
3575         }
3576
3577         return (error);
3578 }
3579
3580 /**
3581  * ixl_handle_mdd_event
3582  *
3583  * Called from interrupt handler to identify possibly malicious vfs
3584  * (But also detects events from the PF, as well)
3585  **/
3586 void
3587 ixl_handle_mdd_event(struct ixl_pf *pf)
3588 {
3589         struct i40e_hw *hw = &pf->hw;
3590         device_t dev = pf->dev;
3591         bool mdd_detected = false;
3592         bool pf_mdd_detected = false;
3593         u32 reg;
3594
3595         /* find what triggered the MDD event */
3596         reg = rd32(hw, I40E_GL_MDET_TX);
3597         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3598                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3599                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3600                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3601                                 I40E_GL_MDET_TX_EVENT_SHIFT;
3602                 u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3603                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
3604                 device_printf(dev,
3605                     "Malicious Driver Detection event %d"
3606                     " on TX queue %d, pf number %d\n",
3607                     event, queue, pf_num);
3608                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3609                 mdd_detected = true;
3610         }
3611         reg = rd32(hw, I40E_GL_MDET_RX);
3612         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3613                 u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3614                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3615                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3616                                 I40E_GL_MDET_RX_EVENT_SHIFT;
3617                 u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3618                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
3619                 device_printf(dev,
3620                     "Malicious Driver Detection event %d"
3621                     " on RX queue %d, pf number %d\n",
3622                     event, queue, pf_num);
3623                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3624                 mdd_detected = true;
3625         }
3626
3627         if (mdd_detected) {
3628                 reg = rd32(hw, I40E_PF_MDET_TX);
3629                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3630                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3631                         device_printf(dev,
3632                             "MDD TX event is for this function!\n");
3633                         pf_mdd_detected = true;
3634                 }
3635                 reg = rd32(hw, I40E_PF_MDET_RX);
3636                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3637                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3638                         device_printf(dev,
3639                             "MDD RX event is for this function!\n");
3640                         pf_mdd_detected = true;
3641                 }
3642         }
3643
3644         /* re-enable mdd interrupt cause */
3645         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3646         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3647         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3648         ixl_flush(hw);
3649 }
3650
3651 void
3652 ixl_enable_intr(struct ixl_vsi *vsi)
3653 {
3654         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
3655         struct i40e_hw          *hw = vsi->hw;
3656         struct ixl_queue        *que = vsi->queues;
3657
3658         if (pf->msix > 1) {
3659                 for (int i = 0; i < vsi->num_queues; i++, que++)
3660                         ixl_enable_queue(hw, que->me);
3661         } else
3662                 ixl_enable_intr0(hw);
3663 }
3664
3665 void
3666 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3667 {
3668         struct i40e_hw          *hw = vsi->hw;
3669         struct ixl_queue        *que = vsi->queues;
3670
3671         for (int i = 0; i < vsi->num_queues; i++, que++)
3672                 ixl_disable_queue(hw, que->me);
3673 }
3674
3675 void
3676 ixl_enable_intr0(struct i40e_hw *hw)
3677 {
3678         u32             reg;
3679
3680         /* Use IXL_ITR_NONE so ITR isn't updated here */
3681         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3682             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3683             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3684         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3685 }
3686
3687 void
3688 ixl_disable_intr0(struct i40e_hw *hw)
3689 {
3690         u32             reg;
3691
3692         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3693         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3694         ixl_flush(hw);
3695 }
3696
3697 void
3698 ixl_enable_queue(struct i40e_hw *hw, int id)
3699 {
3700         u32             reg;
3701
3702         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3703             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3704             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3705         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3706 }
3707
3708 void
3709 ixl_disable_queue(struct i40e_hw *hw, int id)
3710 {
3711         u32             reg;
3712
3713         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3714         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3715 }
3716
3717 void
3718 ixl_update_stats_counters(struct ixl_pf *pf)
3719 {
3720         struct i40e_hw  *hw = &pf->hw;
3721         struct ixl_vsi  *vsi = &pf->vsi;
3722         struct ixl_vf   *vf;
3723
3724         struct i40e_hw_port_stats *nsd = &pf->stats;
3725         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3726
3727         /* Update hw stats */
3728         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3729                            pf->stat_offsets_loaded,
3730                            &osd->crc_errors, &nsd->crc_errors);
3731         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3732                            pf->stat_offsets_loaded,
3733                            &osd->illegal_bytes, &nsd->illegal_bytes);
3734         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3735                            I40E_GLPRT_GORCL(hw->port),
3736                            pf->stat_offsets_loaded,
3737                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3738         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3739                            I40E_GLPRT_GOTCL(hw->port),
3740                            pf->stat_offsets_loaded,
3741                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3742         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3743                            pf->stat_offsets_loaded,
3744                            &osd->eth.rx_discards,
3745                            &nsd->eth.rx_discards);
3746         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3747                            I40E_GLPRT_UPRCL(hw->port),
3748                            pf->stat_offsets_loaded,
3749                            &osd->eth.rx_unicast,
3750                            &nsd->eth.rx_unicast);
3751         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3752                            I40E_GLPRT_UPTCL(hw->port),
3753                            pf->stat_offsets_loaded,
3754                            &osd->eth.tx_unicast,
3755                            &nsd->eth.tx_unicast);
3756         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3757                            I40E_GLPRT_MPRCL(hw->port),
3758                            pf->stat_offsets_loaded,
3759                            &osd->eth.rx_multicast,
3760                            &nsd->eth.rx_multicast);
3761         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3762                            I40E_GLPRT_MPTCL(hw->port),
3763                            pf->stat_offsets_loaded,
3764                            &osd->eth.tx_multicast,
3765                            &nsd->eth.tx_multicast);
3766         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3767                            I40E_GLPRT_BPRCL(hw->port),
3768                            pf->stat_offsets_loaded,
3769                            &osd->eth.rx_broadcast,
3770                            &nsd->eth.rx_broadcast);
3771         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3772                            I40E_GLPRT_BPTCL(hw->port),
3773                            pf->stat_offsets_loaded,
3774                            &osd->eth.tx_broadcast,
3775                            &nsd->eth.tx_broadcast);
3776
3777         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3778                            pf->stat_offsets_loaded,
3779                            &osd->tx_dropped_link_down,
3780                            &nsd->tx_dropped_link_down);
3781         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3782                            pf->stat_offsets_loaded,
3783                            &osd->mac_local_faults,
3784                            &nsd->mac_local_faults);
3785         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3786                            pf->stat_offsets_loaded,
3787                            &osd->mac_remote_faults,
3788                            &nsd->mac_remote_faults);
3789         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3790                            pf->stat_offsets_loaded,
3791                            &osd->rx_length_errors,
3792                            &nsd->rx_length_errors);
3793
3794         /* Flow control (LFC) stats */
3795         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3796                            pf->stat_offsets_loaded,
3797                            &osd->link_xon_rx, &nsd->link_xon_rx);
3798         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3799                            pf->stat_offsets_loaded,
3800                            &osd->link_xon_tx, &nsd->link_xon_tx);
3801         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3802                            pf->stat_offsets_loaded,
3803                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
3804         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3805                            pf->stat_offsets_loaded,
3806                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
3807
3808         /* Packet size stats rx */
3809         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3810                            I40E_GLPRT_PRC64L(hw->port),
3811                            pf->stat_offsets_loaded,
3812                            &osd->rx_size_64, &nsd->rx_size_64);
3813         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3814                            I40E_GLPRT_PRC127L(hw->port),
3815                            pf->stat_offsets_loaded,
3816                            &osd->rx_size_127, &nsd->rx_size_127);
3817         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3818                            I40E_GLPRT_PRC255L(hw->port),
3819                            pf->stat_offsets_loaded,
3820                            &osd->rx_size_255, &nsd->rx_size_255);
3821         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3822                            I40E_GLPRT_PRC511L(hw->port),
3823                            pf->stat_offsets_loaded,
3824                            &osd->rx_size_511, &nsd->rx_size_511);
3825         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3826                            I40E_GLPRT_PRC1023L(hw->port),
3827                            pf->stat_offsets_loaded,
3828                            &osd->rx_size_1023, &nsd->rx_size_1023);
3829         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3830                            I40E_GLPRT_PRC1522L(hw->port),
3831                            pf->stat_offsets_loaded,
3832                            &osd->rx_size_1522, &nsd->rx_size_1522);
3833         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3834                            I40E_GLPRT_PRC9522L(hw->port),
3835                            pf->stat_offsets_loaded,
3836                            &osd->rx_size_big, &nsd->rx_size_big);
3837
3838         /* Packet size stats tx */
3839         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3840                            I40E_GLPRT_PTC64L(hw->port),
3841                            pf->stat_offsets_loaded,
3842                            &osd->tx_size_64, &nsd->tx_size_64);
3843         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3844                            I40E_GLPRT_PTC127L(hw->port),
3845                            pf->stat_offsets_loaded,
3846                            &osd->tx_size_127, &nsd->tx_size_127);
3847         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3848                            I40E_GLPRT_PTC255L(hw->port),
3849                            pf->stat_offsets_loaded,
3850                            &osd->tx_size_255, &nsd->tx_size_255);
3851         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3852                            I40E_GLPRT_PTC511L(hw->port),
3853                            pf->stat_offsets_loaded,
3854                            &osd->tx_size_511, &nsd->tx_size_511);
3855         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3856                            I40E_GLPRT_PTC1023L(hw->port),
3857                            pf->stat_offsets_loaded,
3858                            &osd->tx_size_1023, &nsd->tx_size_1023);
3859         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3860                            I40E_GLPRT_PTC1522L(hw->port),
3861                            pf->stat_offsets_loaded,
3862                            &osd->tx_size_1522, &nsd->tx_size_1522);
3863         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3864                            I40E_GLPRT_PTC9522L(hw->port),
3865                            pf->stat_offsets_loaded,
3866                            &osd->tx_size_big, &nsd->tx_size_big);
3867
3868         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3869                            pf->stat_offsets_loaded,
3870                            &osd->rx_undersize, &nsd->rx_undersize);
3871         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3872                            pf->stat_offsets_loaded,
3873                            &osd->rx_fragments, &nsd->rx_fragments);
3874         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3875                            pf->stat_offsets_loaded,
3876                            &osd->rx_oversize, &nsd->rx_oversize);
3877         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3878                            pf->stat_offsets_loaded,
3879                            &osd->rx_jabber, &nsd->rx_jabber);
3880         /* EEE */
3881         i40e_get_phy_lpi_status(hw, nsd);
3882
3883         ixl_stat_update32(hw, I40E_PRTPM_TLPIC,
3884                           pf->stat_offsets_loaded,
3885                           &osd->tx_lpi_count, &nsd->tx_lpi_count);
3886         ixl_stat_update32(hw, I40E_PRTPM_RLPIC,
3887                           pf->stat_offsets_loaded,
3888                           &osd->rx_lpi_count, &nsd->rx_lpi_count);
3889
3890         pf->stat_offsets_loaded = true;
3891         /* End hw stats */
3892
3893         /* Update vsi stats */
3894         ixl_update_vsi_stats(vsi);
3895
3896         for (int i = 0; i < pf->num_vfs; i++) {
3897                 vf = &pf->vfs[i];
3898                 if (vf->vf_flags & VF_FLAG_ENABLED)
3899                         ixl_update_eth_stats(&pf->vfs[i].vsi);
3900         }
3901 }
3902
3903 int
3904 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
3905 {
3906         struct i40e_hw *hw = &pf->hw;
3907         struct ixl_vsi *vsi = &pf->vsi;
3908         device_t dev = pf->dev;
3909         int error = 0;
3910
3911         /* Teardown */
3912         if (is_up)
3913                 ixl_stop(pf);
3914
3915         ixl_teardown_queue_msix(vsi);
3916
3917         if (hw->hmc.hmc_obj) {
3918                 error = i40e_shutdown_lan_hmc(hw);
3919                 if (error)
3920                         device_printf(dev,
3921                             "Shutdown LAN HMC failed with code %d\n", error);
3922         }
3923
3924         callout_drain(&pf->timer);
3925
3926         ixl_disable_intr0(hw);
3927         ixl_teardown_adminq_msix(pf);
3928
3929         error = i40e_shutdown_adminq(hw);
3930         if (error)
3931                 device_printf(dev,
3932                     "Shutdown Admin queue failed with code %d\n", error);
3933
3934         /* Free ring buffers, locks and filters */
3935         ixl_vsi_free_queues(vsi);
3936
3937         ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
3938
3939         return (error);
3940 }
3941
3942 int
3943 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
3944 {
3945         struct i40e_hw *hw = &pf->hw;
3946         struct ixl_vsi *vsi = &pf->vsi;
3947         device_t dev = pf->dev;
3948         enum i40e_get_fw_lldp_status_resp lldp_status;
3949         int error = 0;
3950
3951         device_printf(dev, "Rebuilding driver state...\n");
3952
3953         if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) {
3954                 if (ixl_fw_recovery_mode(pf)) {
3955                         atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
3956                         pf->link_up = FALSE;
3957                         ixl_update_link_status(pf);
3958                 }
3959         }
3960
3961
3962         /* Setup */
3963         error = i40e_init_adminq(hw);
3964         if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
3965                 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
3966                     error);
3967                 goto ixl_rebuild_hw_structs_after_reset_err;
3968         }
3969
3970         i40e_clear_pxe_mode(hw);
3971
3972         error = ixl_get_hw_capabilities(pf);
3973         if (error) {
3974                 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
3975                 goto ixl_rebuild_hw_structs_after_reset_err;
3976         }
3977         ixl_configure_intr0_msix(pf);
3978         ixl_enable_intr0(hw);
3979
3980         /* Do not init LAN HMC and bring interface up in recovery mode */
3981         if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) == 0) {
3982                 error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
3983                     hw->func_caps.num_rx_qp, 0, 0);
3984                 if (error) {
3985                         device_printf(dev, "init_lan_hmc failed: %d\n", error);
3986                         goto ixl_rebuild_hw_structs_after_reset_err;
3987                 }
3988
3989                 error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
3990                 if (error) {
3991                         device_printf(dev, "configure_lan_hmc failed: %d\n", error);
3992                         goto ixl_rebuild_hw_structs_after_reset_err;
3993                 }
3994
3995                 if (!pf->qmgr.qinfo) {
3996                         /* Init queue allocation manager */
3997                         error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_rx_qp);
3998                         if (error) {
3999                                 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
4000                                     error);
4001                                 goto ixl_rebuild_hw_structs_after_reset_err;
4002                         }
4003                 }
4004                 /* reserve a contiguous allocation for the PF's VSI */
4005                 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
4006                 if (error) {
4007                         device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
4008                             error);
4009                         /* TODO: error handling */
4010                 } else
4011                         device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
4012                             pf->qtag.num_allocated, pf->qtag.num_active);
4013
4014                 error = ixl_switch_config(pf);
4015                 if (error) {
4016                         device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
4017                              error);
4018                         goto ixl_rebuild_hw_structs_after_reset_err;
4019                 }
4020         } /* not in recovery mode */
4021
4022         /* Remove default filters reinstalled by FW on reset */
4023         ixl_del_default_hw_filters(vsi);
4024
4025         if (ixl_vsi_setup_queues(vsi)) {
4026                 device_printf(dev, "setup queues failed!\n");
4027                 error = ENOMEM;
4028                 goto ixl_rebuild_hw_structs_after_reset_err;
4029         }
4030
4031         ixl_vsi_add_sysctls(vsi, "pf", true);
4032
4033         if (pf->msix > 1) {
4034                 error = ixl_setup_adminq_msix(pf);
4035                 if (error) {
4036                         device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
4037                             error);
4038                         goto ixl_rebuild_hw_structs_after_reset_err;
4039                 }
4040
4041                 ixl_configure_intr0_msix(pf);
4042                 ixl_enable_intr0(hw);
4043
4044                 error = ixl_setup_queue_msix(vsi);
4045                 if (error) {
4046                         device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
4047                             error);
4048                         goto ixl_rebuild_hw_structs_after_reset_err;
4049                 }
4050                 error = ixl_setup_queue_tqs(vsi);
4051                 if (error) {
4052                         device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
4053                             error);
4054                         goto ixl_rebuild_hw_structs_after_reset_err;
4055                 }
4056         } else {
4057                 error = ixl_setup_legacy(pf);
4058                 if (error) {
4059                         device_printf(dev, "ixl_setup_legacy() error: %d\n",
4060                             error);
4061                         goto ixl_rebuild_hw_structs_after_reset_err;
4062                 }
4063         }
4064
4065         /* Do not bring interface up in recovery mode */
4066         if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0)
4067                 return (error);
4068
4069         /* Determine link state */
4070         if (ixl_attach_get_link_status(pf)) {
4071                 error = EINVAL;
4072                 /* TODO: error handling */
4073         }
4074
4075         i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
4076
4077         /* Query device FW LLDP status */
4078         if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
4079                 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
4080                         atomic_set_int(&pf->state,
4081                             IXL_PF_STATE_FW_LLDP_DISABLED);
4082                 } else {
4083                         atomic_clear_int(&pf->state,
4084                             IXL_PF_STATE_FW_LLDP_DISABLED);
4085                 }
4086         }
4087
4088         if (is_up)
4089                 ixl_init(pf);
4090
4091         device_printf(dev, "Rebuilding driver state done.\n");
4092         IXL_PF_LOCK(pf);
4093         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
4094         IXL_PF_UNLOCK(pf);
4095         return (0);
4096
4097 ixl_rebuild_hw_structs_after_reset_err:
4098         device_printf(dev, "Reload the driver to recover\n");
4099         return (error);
4100 }
4101
4102 void
4103 ixl_handle_empr_reset(struct ixl_pf *pf)
4104 {
4105         struct ixl_vsi  *vsi = &pf->vsi;
4106         struct i40e_hw  *hw = &pf->hw;
4107         bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
4108         int error = 0;
4109
4110         ixl_prepare_for_reset(pf, is_up);
4111         /*
4112          * i40e_pf_reset checks the type of reset and acts
4113          * accordingly. If EMP or Core reset was performed
4114          * doing PF reset is not necessary and it sometimes
4115          * fails.
4116          */
4117         error = i40e_pf_reset(hw);
4118         if (error) {
4119                 device_printf(pf->dev, "PF reset failure %s\n",
4120                                 i40e_stat_str(hw, error));
4121         }
4122
4123         ixl_rebuild_hw_structs_after_reset(pf, is_up);
4124
4125         atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
4126 }
4127
4128 /*
4129 ** Tasklet handler for MSIX Adminq interrupts
4130 **  - do outside interrupt since it might sleep
4131 */
4132 void
4133 ixl_do_adminq(void *context, int pending)
4134 {
4135         struct ixl_pf                   *pf = context;
4136         struct i40e_hw                  *hw = &pf->hw;
4137         struct i40e_arq_event_info      event;
4138         i40e_status                     ret;
4139         device_t                        dev = pf->dev;
4140         u32                             loop = 0;
4141         u16                             opcode, arq_pending;
4142
4143         if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4144                 /* Flag cleared at end of this function */
4145                 ixl_handle_empr_reset(pf);
4146                 return;
4147         }
4148
4149         /* Admin Queue handling */
4150         event.buf_len = IXL_AQ_BUF_SZ;
4151         event.msg_buf = malloc(event.buf_len,
4152             M_DEVBUF, M_NOWAIT | M_ZERO);
4153         if (!event.msg_buf) {
4154                 device_printf(dev, "%s: Unable to allocate memory for Admin"
4155                     " Queue event!\n", __func__);
4156                 return;
4157         }
4158
4159         IXL_PF_LOCK(pf);
4160         /* clean and process any events */
4161         do {
4162                 ret = i40e_clean_arq_element(hw, &event, &arq_pending);
4163                 if (ret)
4164                         break;
4165                 opcode = LE16_TO_CPU(event.desc.opcode);
4166                 ixl_dbg(pf, IXL_DBG_AQ,
4167                     "Admin Queue event: %#06x\n", opcode);
4168                 switch (opcode) {
4169                 case i40e_aqc_opc_get_link_status:
4170                         ixl_link_event(pf, &event);
4171                         break;
4172                 case i40e_aqc_opc_send_msg_to_pf:
4173 #ifdef PCI_IOV
4174                         ixl_handle_vf_msg(pf, &event);
4175 #endif
4176                         break;
4177                 case i40e_aqc_opc_event_lan_overflow:
4178                 default:
4179                         break;
4180                 }
4181
4182         } while (arq_pending && (loop++ < IXL_ADM_LIMIT));
4183
4184         free(event.msg_buf, M_DEVBUF);
4185
4186         /* If there are still messages to process, reschedule. */
4187         if (arq_pending > 0)
4188                 taskqueue_enqueue(pf->tq, &pf->adminq);
4189         else
4190                 ixl_enable_intr0(hw);
4191
4192         IXL_PF_UNLOCK(pf);
4193 }
4194
4195 /**
4196  * Update VSI-specific ethernet statistics counters.
4197  **/
4198 void
4199 ixl_update_eth_stats(struct ixl_vsi *vsi)
4200 {
4201         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4202         struct i40e_hw *hw = &pf->hw;
4203         struct i40e_eth_stats *es;
4204         struct i40e_eth_stats *oes;
4205         u16 stat_idx = vsi->info.stat_counter_idx;
4206
4207         es = &vsi->eth_stats;
4208         oes = &vsi->eth_stats_offsets;
4209
4210         /* Gather up the stats that the hw collects */
4211         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4212                            vsi->stat_offsets_loaded,
4213                            &oes->tx_errors, &es->tx_errors);
4214         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4215                            vsi->stat_offsets_loaded,
4216                            &oes->rx_discards, &es->rx_discards);
4217
4218         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4219                            I40E_GLV_GORCL(stat_idx),
4220                            vsi->stat_offsets_loaded,
4221                            &oes->rx_bytes, &es->rx_bytes);
4222         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4223                            I40E_GLV_UPRCL(stat_idx),
4224                            vsi->stat_offsets_loaded,
4225                            &oes->rx_unicast, &es->rx_unicast);
4226         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4227                            I40E_GLV_MPRCL(stat_idx),
4228                            vsi->stat_offsets_loaded,
4229                            &oes->rx_multicast, &es->rx_multicast);
4230         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4231                            I40E_GLV_BPRCL(stat_idx),
4232                            vsi->stat_offsets_loaded,
4233                            &oes->rx_broadcast, &es->rx_broadcast);
4234
4235         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4236                            I40E_GLV_GOTCL(stat_idx),
4237                            vsi->stat_offsets_loaded,
4238                            &oes->tx_bytes, &es->tx_bytes);
4239         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4240                            I40E_GLV_UPTCL(stat_idx),
4241                            vsi->stat_offsets_loaded,
4242                            &oes->tx_unicast, &es->tx_unicast);
4243         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4244                            I40E_GLV_MPTCL(stat_idx),
4245                            vsi->stat_offsets_loaded,
4246                            &oes->tx_multicast, &es->tx_multicast);
4247         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4248                            I40E_GLV_BPTCL(stat_idx),
4249                            vsi->stat_offsets_loaded,
4250                            &oes->tx_broadcast, &es->tx_broadcast);
4251         vsi->stat_offsets_loaded = true;
4252 }
4253
4254 void
4255 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4256 {
4257         struct ixl_pf           *pf;
4258         struct ifnet            *ifp;
4259         struct i40e_eth_stats   *es;
4260         u64                     tx_discards;
4261
4262         struct i40e_hw_port_stats *nsd;
4263
4264         pf = vsi->back;
4265         ifp = vsi->ifp;
4266         es = &vsi->eth_stats;
4267         nsd = &pf->stats;
4268
4269         ixl_update_eth_stats(vsi);
4270
4271         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4272         for (int i = 0; i < vsi->num_queues; i++)
4273                 tx_discards += vsi->queues[i].txr.br->br_drops;
4274
4275         /* Update ifnet stats */
4276         IXL_SET_IPACKETS(vsi, es->rx_unicast +
4277                            es->rx_multicast +
4278                            es->rx_broadcast);
4279         IXL_SET_OPACKETS(vsi, es->tx_unicast +
4280                            es->tx_multicast +
4281                            es->tx_broadcast);
4282         IXL_SET_IBYTES(vsi, es->rx_bytes);
4283         IXL_SET_OBYTES(vsi, es->tx_bytes);
4284         IXL_SET_IMCASTS(vsi, es->rx_multicast);
4285         IXL_SET_OMCASTS(vsi, es->tx_multicast);
4286
4287         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4288             nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4289             nsd->rx_jabber);
4290         IXL_SET_OERRORS(vsi, es->tx_errors);
4291         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4292         IXL_SET_OQDROPS(vsi, tx_discards);
4293         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4294         IXL_SET_COLLISIONS(vsi, 0);
4295 }
4296
4297 /**
4298  * Reset all of the stats for the given pf
4299  **/
4300 void
4301 ixl_pf_reset_stats(struct ixl_pf *pf)
4302 {
4303         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4304         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4305         pf->stat_offsets_loaded = false;
4306 }
4307
4308 /**
4309  * Resets all stats of the given vsi
4310  **/
4311 void
4312 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4313 {
4314         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4315         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4316         vsi->stat_offsets_loaded = false;
4317 }
4318
4319 /**
4320  * Read and update a 48 bit stat from the hw
4321  *
4322  * Since the device stats are not reset at PFReset, they likely will not
4323  * be zeroed when the driver starts.  We'll save the first values read
4324  * and use them as offsets to be subtracted from the raw values in order
4325  * to report stats that count from zero.
4326  **/
4327 void
4328 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4329         bool offset_loaded, u64 *offset, u64 *stat)
4330 {
4331         u64 new_data;
4332
4333 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4334         new_data = rd64(hw, loreg);
4335 #else
4336         /*
4337          * Use two rd32's instead of one rd64; FreeBSD versions before
4338          * 10 don't support 64-bit bus reads/writes.
4339          */
4340         new_data = rd32(hw, loreg);
4341         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4342 #endif
4343
4344         if (!offset_loaded)
4345                 *offset = new_data;
4346         if (new_data >= *offset)
4347                 *stat = new_data - *offset;
4348         else
4349                 *stat = (new_data + ((u64)1 << 48)) - *offset;
4350         *stat &= 0xFFFFFFFFFFFFULL;
4351 }
4352
4353 /**
4354  * Read and update a 32 bit stat from the hw
4355  **/
4356 void
4357 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4358         bool offset_loaded, u64 *offset, u64 *stat)
4359 {
4360         u32 new_data;
4361
4362         new_data = rd32(hw, reg);
4363         if (!offset_loaded)
4364                 *offset = new_data;
4365         if (new_data >= *offset)
4366                 *stat = (u32)(new_data - *offset);
4367         else
4368                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4369 }
4370
4371 void
4372 ixl_add_device_sysctls(struct ixl_pf *pf)
4373 {
4374         device_t dev = pf->dev;
4375         struct i40e_hw *hw = &pf->hw;
4376
4377         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4378         struct sysctl_oid_list *ctx_list =
4379             SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4380
4381         struct sysctl_oid *debug_node;
4382         struct sysctl_oid_list *debug_list;
4383
4384         struct sysctl_oid *fec_node;
4385         struct sysctl_oid_list *fec_list;
4386
4387         struct sysctl_oid *eee_node;
4388         struct sysctl_oid_list *eee_list;
4389
4390         /* Set up sysctls */
4391         SYSCTL_ADD_PROC(ctx, ctx_list,
4392             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4393             pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4394
4395         SYSCTL_ADD_PROC(ctx, ctx_list,
4396             OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4397             pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4398
4399         SYSCTL_ADD_PROC(ctx, ctx_list,
4400             OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
4401             pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
4402
4403         SYSCTL_ADD_PROC(ctx, ctx_list,
4404             OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4405             pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
4406
4407         SYSCTL_ADD_PROC(ctx, ctx_list,
4408             OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4409             pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4410
4411         SYSCTL_ADD_PROC(ctx, ctx_list,
4412             OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4413             pf, 0, ixl_sysctl_unallocated_queues, "I",
4414             "Queues not allocated to a PF or VF");
4415
4416         SYSCTL_ADD_PROC(ctx, ctx_list,
4417             OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4418             pf, 0, ixl_sysctl_pf_tx_itr, "I",
4419             "Immediately set TX ITR value for all queues");
4420
4421         SYSCTL_ADD_PROC(ctx, ctx_list,
4422             OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4423             pf, 0, ixl_sysctl_pf_rx_itr, "I",
4424             "Immediately set RX ITR value for all queues");
4425
4426         SYSCTL_ADD_INT(ctx, ctx_list,
4427             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4428             &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4429
4430         SYSCTL_ADD_INT(ctx, ctx_list,
4431             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4432             &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4433
4434         SYSCTL_ADD_INT(ctx, ctx_list,
4435             OID_AUTO, "tx_ring_size", CTLFLAG_RD,
4436             &pf->vsi.num_tx_desc, 0, "TX ring size");
4437
4438         SYSCTL_ADD_INT(ctx, ctx_list,
4439             OID_AUTO, "rx_ring_size", CTLFLAG_RD,
4440             &pf->vsi.num_rx_desc, 0, "RX ring size");
4441
4442         /* Add FEC sysctls for 25G adapters */
4443         if (i40e_is_25G_device(hw->device_id)) {
4444                 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4445                     OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
4446                 fec_list = SYSCTL_CHILDREN(fec_node);
4447
4448                 SYSCTL_ADD_PROC(ctx, fec_list,
4449                     OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
4450                     pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
4451
4452                 SYSCTL_ADD_PROC(ctx, fec_list,
4453                     OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
4454                     pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
4455
4456                 SYSCTL_ADD_PROC(ctx, fec_list,
4457                     OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
4458                     pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
4459
4460                 SYSCTL_ADD_PROC(ctx, fec_list,
4461                     OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
4462                     pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
4463
4464                 SYSCTL_ADD_PROC(ctx, fec_list,
4465                     OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
4466                     pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
4467         }
4468
4469         SYSCTL_ADD_PROC(ctx, ctx_list,
4470             OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
4471             pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
4472
4473         eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4474             OID_AUTO, "eee", CTLFLAG_RD, NULL,
4475             "Energy Efficient Ethernet (EEE) Sysctls");
4476         eee_list = SYSCTL_CHILDREN(eee_node);
4477
4478         SYSCTL_ADD_PROC(ctx, eee_list,
4479             OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW,
4480             pf, 0, ixl_sysctl_eee_enable, "I",
4481             "Enable Energy Efficient Ethernet (EEE)");
4482
4483         SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4484             CTLFLAG_RD, &pf->stats.tx_lpi_status, 0,
4485             "TX LPI status");
4486
4487         SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4488             CTLFLAG_RD, &pf->stats.rx_lpi_status, 0,
4489             "RX LPI status");
4490
4491         SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
4492             CTLFLAG_RD, &pf->stats.tx_lpi_count,
4493             "TX LPI count");
4494
4495         SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
4496             CTLFLAG_RD, &pf->stats.rx_lpi_count,
4497             "RX LPI count");
4498         /* Add sysctls meant to print debug information, but don't list them
4499          * in "sysctl -a" output. */
4500         debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4501             OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4502         debug_list = SYSCTL_CHILDREN(debug_node);
4503
4504         SYSCTL_ADD_UINT(ctx, debug_list,
4505             OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4506             &pf->hw.debug_mask, 0, "Shared code debug message level");
4507
4508         SYSCTL_ADD_UINT(ctx, debug_list,
4509             OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4510             &pf->dbg_mask, 0, "Non-hared code debug message level");
4511
4512         SYSCTL_ADD_PROC(ctx, debug_list,
4513             OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4514             pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4515
4516         SYSCTL_ADD_PROC(ctx, debug_list,
4517             OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4518             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4519
4520         SYSCTL_ADD_PROC(ctx, debug_list,
4521             OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4522             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4523
4524         SYSCTL_ADD_PROC(ctx, debug_list,
4525             OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4526             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4527
4528         SYSCTL_ADD_PROC(ctx, debug_list,
4529             OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4530             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4531
4532         SYSCTL_ADD_PROC(ctx, debug_list,
4533             OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4534             pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4535
4536         SYSCTL_ADD_PROC(ctx, debug_list,
4537             OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4538             pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4539
4540         SYSCTL_ADD_PROC(ctx, debug_list,
4541             OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
4542             pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
4543
4544         SYSCTL_ADD_PROC(ctx, debug_list,
4545             OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
4546             pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
4547
4548         SYSCTL_ADD_PROC(ctx, debug_list,
4549             OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
4550             pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
4551
4552         if (pf->has_i2c) {
4553                 SYSCTL_ADD_PROC(ctx, debug_list,
4554                     OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4555                     pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
4556
4557                 SYSCTL_ADD_PROC(ctx, debug_list,
4558                     OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4559                     pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
4560
4561                 SYSCTL_ADD_PROC(ctx, debug_list,
4562                     OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
4563                     pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
4564         }
4565
4566 #ifdef PCI_IOV
4567         SYSCTL_ADD_UINT(ctx, debug_list,
4568             OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4569             0, "PF/VF Virtual Channel debug level");
4570 #endif
4571 }
4572
4573 /*
4574  * Primarily for finding out how many queues can be assigned to VFs,
4575  * at runtime.
4576  */
4577 static int
4578 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4579 {
4580         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4581         int queues;
4582
4583         IXL_PF_LOCK(pf);
4584         queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4585         IXL_PF_UNLOCK(pf);
4586
4587         return sysctl_handle_int(oidp, NULL, queues, req);
4588 }
4589
4590 /*
4591 ** Set flow control using sysctl:
4592 **      0 - off
4593 **      1 - rx pause
4594 **      2 - tx pause
4595 **      3 - full
4596 */
4597 int
4598 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4599 {
4600         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4601         struct i40e_hw *hw = &pf->hw;
4602         device_t dev = pf->dev;
4603         int requested_fc, error = 0;
4604         enum i40e_status_code aq_error = 0;
4605         u8 fc_aq_err = 0;
4606
4607         /* Get request */
4608         requested_fc = pf->fc;
4609         error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4610         if ((error) || (req->newptr == NULL))
4611                 return (error);
4612         if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
4613                 device_printf(dev, "Interface is currently in FW recovery mode. "
4614                                 "Setting flow control not supported\n");
4615                 return (EINVAL);
4616         }
4617         if (requested_fc < 0 || requested_fc > 3) {
4618                 device_printf(dev,
4619                     "Invalid fc mode; valid modes are 0 through 3\n");
4620                 return (EINVAL);
4621         }
4622
4623         /* Set fc ability for port */
4624         hw->fc.requested_mode = requested_fc;
4625         aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4626         if (aq_error) {
4627                 device_printf(dev,
4628                     "%s: Error setting new fc mode %d; fc_err %#x\n",
4629                     __func__, aq_error, fc_aq_err);
4630                 return (EIO);
4631         }
4632         pf->fc = requested_fc;
4633
4634         /* Get new link state */
4635         i40e_msec_delay(250);
4636         hw->phy.get_link_info = TRUE;
4637         i40e_get_link_status(hw, &pf->link_up);
4638
4639         return (0);
4640 }
4641
4642
4643 static const char *
4644 ixl_link_speed_string(u8 link_speed)
4645 {
4646         const char * link_speed_str[] = {
4647                 "Unknown",
4648                 "100 Mbps",
4649                 "1 Gbps",
4650                 "10 Gbps",
4651                 "40 Gbps",
4652                 "20 Gbps",
4653                 "25 Gbps",
4654                 "2.5 Gbps",
4655                 "5 Gbps"
4656         };
4657         int index;
4658
4659         switch (link_speed) {
4660                 case I40E_LINK_SPEED_100MB:
4661                         index = 1;
4662                         break;
4663                 case I40E_LINK_SPEED_1GB:
4664                         index = 2;
4665                         break;
4666                 case I40E_LINK_SPEED_10GB:
4667                         index = 3;
4668                         break;
4669                 case I40E_LINK_SPEED_40GB:
4670                         index = 4;
4671                         break;
4672                 case I40E_LINK_SPEED_20GB:
4673                         index = 5;
4674                         break;
4675                 case I40E_LINK_SPEED_25GB:
4676                         index = 6;
4677                         break;
4678                 case I40E_LINK_SPEED_2_5GB:
4679                         index = 7;
4680                         break;
4681                 case I40E_LINK_SPEED_5GB:
4682                         index = 8;
4683                         break;
4684                 case I40E_LINK_SPEED_UNKNOWN:
4685                 default:
4686                         index = 0;
4687                         break;
4688         }
4689
4690         return (link_speed_str[index]);
4691 }
4692
4693 int
4694 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
4695 {
4696         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4697         struct i40e_hw *hw = &pf->hw;
4698         int error = 0;
4699
4700         ixl_update_link_status(pf);
4701
4702         error = sysctl_handle_string(oidp,
4703             __DECONST(void *,
4704                 ixl_link_speed_string(hw->phy.link_info.link_speed)),
4705             8, req);
4706
4707         return (error);
4708 }
4709
4710 /*
4711  * Converts 8-bit speeds value to and from sysctl flags and
4712  * Admin Queue flags.
4713  */
4714 static u8
4715 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
4716 {
4717 #define SPEED_MAP_SIZE 8
4718         static u16 speedmap[SPEED_MAP_SIZE] = {
4719                 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
4720                 (I40E_LINK_SPEED_1GB   | (0x2 << 8)),
4721                 (I40E_LINK_SPEED_10GB  | (0x4 << 8)),
4722                 (I40E_LINK_SPEED_20GB  | (0x8 << 8)),
4723                 (I40E_LINK_SPEED_25GB  | (0x10 << 8)),
4724                 (I40E_LINK_SPEED_40GB  | (0x20 << 8)),
4725                 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
4726                 (I40E_LINK_SPEED_5GB   | (0x80 << 8)),
4727         };
4728         u8 retval = 0;
4729
4730         for (int i = 0; i < SPEED_MAP_SIZE; i++) {
4731                 if (to_aq)
4732                         retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
4733                 else
4734                         retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
4735         }
4736
4737         return (retval);
4738 }
4739
4740 int
4741 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
4742 {
4743         struct i40e_hw *hw = &pf->hw;
4744         device_t dev = pf->dev;
4745         struct i40e_aq_get_phy_abilities_resp abilities;
4746         struct i40e_aq_set_phy_config config;
4747         enum i40e_status_code aq_error = 0;
4748
4749         /* Get current capability information */
4750         aq_error = i40e_aq_get_phy_capabilities(hw,
4751             FALSE, FALSE, &abilities, NULL);
4752         if (aq_error) {
4753                 device_printf(dev,
4754                     "%s: Error getting phy capabilities %d,"
4755                     " aq error: %d\n", __func__, aq_error,
4756                     hw->aq.asq_last_status);
4757                 return (EIO);
4758         }
4759
4760         /* Prepare new config */
4761         bzero(&config, sizeof(config));
4762         if (from_aq)
4763                 config.link_speed = speeds;
4764         else
4765                 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
4766         config.phy_type = abilities.phy_type;
4767         config.phy_type_ext = abilities.phy_type_ext;
4768         config.abilities = abilities.abilities
4769             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4770         config.eee_capability = abilities.eee_capability;
4771         config.eeer = abilities.eeer_val;
4772         config.low_power_ctrl = abilities.d3_lpan;
4773         config.fec_config = abilities.fec_cfg_curr_mod_ext_info
4774             & I40E_AQ_PHY_FEC_CONFIG_MASK;
4775
4776         /* Do aq command & restart link */
4777         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4778         if (aq_error) {
4779                 device_printf(dev,
4780                     "%s: Error setting new phy config %d,"
4781                     " aq error: %d\n", __func__, aq_error,
4782                     hw->aq.asq_last_status);
4783                 return (EIO);
4784         }
4785
4786         return (0);
4787 }
4788
4789 /*
4790 ** Supported link speeds
4791 **      Flags:
4792 **       0x1 - 100 Mb
4793 **       0x2 - 1G
4794 **       0x4 - 10G
4795 **       0x8 - 20G
4796 **      0x10 - 25G
4797 **      0x20 - 40G
4798 **      0x40 - 2.5G
4799 **      0x80 - 5G
4800 */
4801 static int
4802 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
4803 {
4804         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4805         int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
4806
4807         return sysctl_handle_int(oidp, NULL, supported, req);
4808 }
4809
4810 /*
4811 ** Control link advertise speed:
4812 **      Flags:
4813 **       0x1 - advertise 100 Mb
4814 **       0x2 - advertise 1G
4815 **       0x4 - advertise 10G
4816 **       0x8 - advertise 20G
4817 **      0x10 - advertise 25G
4818 **      0x20 - advertise 40G
4819 **      0x40 - advertise 2.5G
4820 **      0x80 - advertise 5G
4821 **
4822 **      Set to 0 to disable link
4823 */
4824 int
4825 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
4826 {
4827         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4828         device_t dev = pf->dev;
4829         u8 converted_speeds;
4830         int requested_ls = 0;
4831         int error = 0;
4832
4833         /* Read in new mode */
4834         requested_ls = pf->advertised_speed;
4835         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4836         if ((error) || (req->newptr == NULL))
4837                 return (error);
4838         if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
4839                 device_printf(dev, "Interface is currently in FW recovery mode. "
4840                                 "Setting advertise speed not supported\n");
4841                 return (EINVAL);
4842         }
4843
4844         /* Error out if bits outside of possible flag range are set */
4845         if ((requested_ls & ~((u8)0xFF)) != 0) {
4846                 device_printf(dev, "Input advertised speed out of range; "
4847                     "valid flags are: 0x%02x\n",
4848                     ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4849                 return (EINVAL);
4850         }
4851
4852         /* Check if adapter supports input value */
4853         converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
4854         if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
4855                 device_printf(dev, "Invalid advertised speed; "
4856                     "valid flags are: 0x%02x\n",
4857                     ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4858                 return (EINVAL);
4859         }
4860
4861         error = ixl_set_advertised_speeds(pf, requested_ls, false);
4862         if (error)
4863                 return (error);
4864
4865         pf->advertised_speed = requested_ls;
4866         ixl_update_link_status(pf);
4867         return (0);
4868 }
4869
4870 /*
4871  * Input: bitmap of enum i40e_aq_link_speed
4872  */
4873 static u64
4874 ixl_max_aq_speed_to_value(u8 link_speeds)
4875 {
4876         if (link_speeds & I40E_LINK_SPEED_40GB)
4877                 return IF_Gbps(40);
4878         if (link_speeds & I40E_LINK_SPEED_25GB)
4879                 return IF_Gbps(25);
4880         if (link_speeds & I40E_LINK_SPEED_20GB)
4881                 return IF_Gbps(20);
4882         if (link_speeds & I40E_LINK_SPEED_10GB)
4883                 return IF_Gbps(10);
4884         if (link_speeds & I40E_LINK_SPEED_5GB)
4885                 return IF_Gbps(5);
4886         if (link_speeds & I40E_LINK_SPEED_2_5GB)
4887                 return IF_Mbps(2500);
4888         if (link_speeds & I40E_LINK_SPEED_1GB)
4889                 return IF_Gbps(1);
4890         if (link_speeds & I40E_LINK_SPEED_100MB)
4891                 return IF_Mbps(100);
4892         else
4893                 /* Minimum supported link speed */
4894                 return IF_Mbps(100);
4895 }
4896
4897 /*
4898 ** Get the width and transaction speed of
4899 ** the bus this adapter is plugged into.
4900 */
4901 void
4902 ixl_get_bus_info(struct ixl_pf *pf)
4903 {
4904         struct i40e_hw *hw = &pf->hw;
4905         device_t dev = pf->dev;
4906         u16 link;
4907         u32 offset, num_ports;
4908         u64 max_speed;
4909
4910         /* Some devices don't use PCIE */
4911         if (hw->mac.type == I40E_MAC_X722)
4912                 return;
4913
4914         /* Read PCI Express Capabilities Link Status Register */
4915         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4916         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4917
4918         /* Fill out hw struct with PCIE info */
4919         i40e_set_pci_config_data(hw, link);
4920
4921         /* Use info to print out bandwidth messages */
4922         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4923             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4924             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4925             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4926             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4927             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4928             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
4929             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4930             ("Unknown"));
4931
4932         /*
4933          * If adapter is in slot with maximum supported speed,
4934          * no warning message needs to be printed out.
4935          */
4936         if (hw->bus.speed >= i40e_bus_speed_8000
4937             && hw->bus.width >= i40e_bus_width_pcie_x8)
4938                 return;
4939
4940         num_ports = bitcount32(hw->func_caps.valid_functions);
4941         max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
4942
4943         if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
4944                 device_printf(dev, "PCI-Express bandwidth available"
4945                     " for this device may be insufficient for"
4946                     " optimal performance.\n");
4947                 device_printf(dev, "Please move the device to a different"
4948                     " PCI-e link with more lanes and/or higher"
4949                     " transfer rate.\n");
4950         }
4951 }
4952
4953 static int
4954 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4955 {
4956         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
4957         struct i40e_hw  *hw = &pf->hw;
4958         struct sbuf     *sbuf;
4959
4960         sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4961         ixl_nvm_version_str(hw, sbuf);
4962         sbuf_finish(sbuf);
4963         sbuf_delete(sbuf);
4964
4965         return (0);
4966 }
4967
4968 void
4969 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4970 {
4971         if ((nvma->command == I40E_NVM_READ) &&
4972             ((nvma->config & 0xFF) == 0xF) &&
4973             (((nvma->config & 0xF00) >> 8) == 0xF) &&
4974             (nvma->offset == 0) &&
4975             (nvma->data_size == 1)) {
4976                 // device_printf(dev, "- Get Driver Status Command\n");
4977         }
4978         else if (nvma->command == I40E_NVM_READ) {
4979         
4980         }
4981         else {
4982                 switch (nvma->command) {
4983                 case 0xB:
4984                         device_printf(dev, "- command: I40E_NVM_READ\n");
4985                         break;
4986                 case 0xC:
4987                         device_printf(dev, "- command: I40E_NVM_WRITE\n");
4988                         break;
4989                 default:
4990                         device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4991                         break;
4992                 }
4993
4994                 device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
4995                 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4996                 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4997                 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
4998         }
4999 }
5000
5001 int
5002 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
5003 {
5004         struct i40e_hw *hw = &pf->hw;
5005         struct i40e_nvm_access *nvma;
5006         device_t dev = pf->dev;
5007         enum i40e_status_code status = 0;
5008         size_t nvma_size, ifd_len, exp_len;
5009         int err, perrno;
5010
5011         DEBUGFUNC("ixl_handle_nvmupd_cmd");
5012
5013         /* Sanity checks */
5014         nvma_size = sizeof(struct i40e_nvm_access);
5015         ifd_len = ifd->ifd_len;
5016
5017         if (ifd_len < nvma_size ||
5018             ifd->ifd_data == NULL) {
5019                 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
5020                     __func__);
5021                 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
5022                     __func__, ifd_len, nvma_size);
5023                 device_printf(dev, "%s: data pointer: %p\n", __func__,
5024                     ifd->ifd_data);
5025                 return (EINVAL);
5026         }
5027
5028         nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
5029         err = copyin(ifd->ifd_data, nvma, ifd_len);
5030         if (err) {
5031                 device_printf(dev, "%s: Cannot get request from user space\n",
5032                     __func__);
5033                 free(nvma, M_DEVBUF);
5034                 return (err);
5035         }
5036
5037         if (pf->dbg_mask & IXL_DBG_NVMUPD)
5038                 ixl_print_nvm_cmd(dev, nvma);
5039
5040         if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5041                 int count = 0;
5042                 while (count++ < 100) {
5043                         i40e_msec_delay(100);
5044                         if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
5045                                 break;
5046                 }
5047         }
5048
5049         if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5050                 free(nvma, M_DEVBUF);
5051                 return (-EBUSY);
5052         }
5053
5054         if (nvma->data_size < 1 || nvma->data_size > 4096) {
5055                 device_printf(dev, "%s: invalid request, data size not in supported range\n",
5056                     __func__);
5057                 free(nvma, M_DEVBUF);
5058                 return (EINVAL);
5059         }
5060
5061         /*
5062          * Older versions of the NVM update tool don't set ifd_len to the size
5063          * of the entire buffer passed to the ioctl. Check the data_size field
5064          * in the contained i40e_nvm_access struct and ensure everything is
5065          * copied in from userspace.
5066          */
5067         exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
5068
5069         if (ifd_len < exp_len) {
5070                 ifd_len = exp_len;
5071                 nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
5072                 err = copyin(ifd->ifd_data, nvma, ifd_len);
5073                 if (err) {
5074                         device_printf(dev, "%s: Cannot get request from user space\n",
5075                                         __func__);
5076                         free(nvma, M_DEVBUF);
5077                         return (err);
5078                 }
5079         }
5080
5081         IXL_PF_LOCK(pf);
5082         status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
5083         IXL_PF_UNLOCK(pf);
5084
5085         err = copyout(nvma, ifd->ifd_data, ifd_len);
5086         free(nvma, M_DEVBUF);
5087         if (err) {
5088                 device_printf(dev, "%s: Cannot return data to user space\n",
5089                                 __func__);
5090                 return (err);
5091         }
5092
5093         /* Let the nvmupdate report errors, show them only when debug is enabled */
5094         if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
5095                 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
5096                     i40e_stat_str(hw, status), perrno);
5097
5098         /*
5099          * -EPERM is actually ERESTART, which the kernel interprets as it needing
5100          * to run this ioctl again. So use -EACCES for -EPERM instead.
5101          */
5102         if (perrno == -EPERM)
5103                 return (-EACCES);
5104         else
5105                 return (perrno);
5106 }
5107
5108 int
5109 ixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *pf, struct ifreq *ifr)
5110 {
5111         struct ifi2creq i2c;
5112         int error = 0;
5113         int i;
5114
5115         if (pf->read_i2c_byte == NULL)
5116                 return (EINVAL);
5117
5118 #ifdef ifr_data
5119         error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
5120 #else
5121         error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
5122 #endif
5123
5124         if (error != 0)
5125                 return (error);
5126         if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5127                 error = EINVAL;
5128                 return (error);
5129         }
5130         if (i2c.len > sizeof(i2c.data)) {
5131                 error = EINVAL;
5132                 return (error);
5133         }
5134
5135         for (i = 0; i < i2c.len; ++i) {
5136                 if (pf->read_i2c_byte(pf, i2c.offset + i,
5137                     i2c.dev_addr, &i2c.data[i]))
5138                 return (EIO);
5139         }
5140
5141 #ifdef ifr_data
5142         error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
5143 #else
5144         error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
5145 #endif
5146
5147         return (error);
5148 }
5149
5150 /*********************************************************************
5151  *
5152  *  Media Ioctl callback
5153  *
5154  *  This routine is called whenever the user queries the status of
5155  *  the interface using ifconfig.
5156  *
5157  *  When adding new media types here, make sure to add them to
5158  *  ixl_add_ifmedia(), too.
5159  *
5160  **********************************************************************/
5161 void
5162 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
5163 {
5164         struct ixl_vsi  *vsi = ifp->if_softc;
5165         struct ixl_pf   *pf = vsi->back;
5166         struct i40e_hw  *hw = &pf->hw;
5167
5168         INIT_DEBUGOUT("ixl_media_status: begin");
5169
5170         /* Don't touch PF during reset */
5171         if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING)
5172                 return;
5173
5174         IXL_PF_LOCK(pf);
5175
5176         i40e_get_link_status(hw, &pf->link_up);
5177         ixl_update_link_status(pf);
5178
5179         ifmr->ifm_status = IFM_AVALID;
5180         ifmr->ifm_active = IFM_ETHER;
5181
5182         if (!pf->link_up) {
5183                 IXL_PF_UNLOCK(pf);
5184                 return;
5185         }
5186
5187         ifmr->ifm_status |= IFM_ACTIVE;
5188
5189         /* Hardware always does full-duplex */
5190         ifmr->ifm_active |= IFM_FDX;
5191
5192         switch (hw->phy.link_info.phy_type) {
5193                 /* 100 M */
5194                 case I40E_PHY_TYPE_100BASE_TX:
5195                         ifmr->ifm_active |= IFM_100_TX;
5196                         break;
5197                 /* 1 G */
5198                 case I40E_PHY_TYPE_1000BASE_T:
5199                         ifmr->ifm_active |= IFM_1000_T;
5200                         break;
5201                 case I40E_PHY_TYPE_1000BASE_SX:
5202                         ifmr->ifm_active |= IFM_1000_SX;
5203                         break;
5204                 case I40E_PHY_TYPE_1000BASE_LX:
5205                         ifmr->ifm_active |= IFM_1000_LX;
5206                         break;
5207                 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
5208                         ifmr->ifm_active |= IFM_1000_T;
5209                         break;
5210                 /* 2.5 G */
5211                 case I40E_PHY_TYPE_2_5GBASE_T:
5212                         ifmr->ifm_active |= IFM_2500_T;
5213                         break;
5214                 /* 5 G */
5215                 case I40E_PHY_TYPE_5GBASE_T:
5216                         ifmr->ifm_active |= IFM_5000_T;
5217                         break;
5218                 /* 10 G */
5219                 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
5220                         ifmr->ifm_active |= IFM_10G_TWINAX;
5221                         break;
5222                 case I40E_PHY_TYPE_10GBASE_SR:
5223                         ifmr->ifm_active |= IFM_10G_SR;
5224                         break;
5225                 case I40E_PHY_TYPE_10GBASE_LR:
5226                         ifmr->ifm_active |= IFM_10G_LR;
5227                         break;
5228                 case I40E_PHY_TYPE_10GBASE_T:
5229                         ifmr->ifm_active |= IFM_10G_T;
5230                         break;
5231                 case I40E_PHY_TYPE_XAUI:
5232                 case I40E_PHY_TYPE_XFI:
5233                         ifmr->ifm_active |= IFM_10G_TWINAX;
5234                         break;
5235                 case I40E_PHY_TYPE_10GBASE_AOC:
5236                         ifmr->ifm_active |= IFM_10G_AOC;
5237                         break;
5238                 /* 25 G */
5239                 case I40E_PHY_TYPE_25GBASE_KR:
5240                         ifmr->ifm_active |= IFM_25G_KR;
5241                         break;
5242                 case I40E_PHY_TYPE_25GBASE_CR:
5243                         ifmr->ifm_active |= IFM_25G_CR;
5244                         break;
5245                 case I40E_PHY_TYPE_25GBASE_SR:
5246                         ifmr->ifm_active |= IFM_25G_SR;
5247                         break;
5248                 case I40E_PHY_TYPE_25GBASE_LR:
5249                         ifmr->ifm_active |= IFM_25G_LR;
5250                         break;
5251                 case I40E_PHY_TYPE_25GBASE_AOC:
5252                         ifmr->ifm_active |= IFM_25G_AOC;
5253                         break;
5254                 case I40E_PHY_TYPE_25GBASE_ACC:
5255                         ifmr->ifm_active |= IFM_25G_ACC;
5256                         break;
5257                 /* 40 G */
5258                 case I40E_PHY_TYPE_40GBASE_CR4:
5259                 case I40E_PHY_TYPE_40GBASE_CR4_CU:
5260                         ifmr->ifm_active |= IFM_40G_CR4;
5261                         break;
5262                 case I40E_PHY_TYPE_40GBASE_SR4:
5263                         ifmr->ifm_active |= IFM_40G_SR4;
5264                         break;
5265                 case I40E_PHY_TYPE_40GBASE_LR4:
5266                         ifmr->ifm_active |= IFM_40G_LR4;
5267                         break;
5268                 case I40E_PHY_TYPE_XLAUI:
5269                         ifmr->ifm_active |= IFM_OTHER;
5270                         break;
5271                 case I40E_PHY_TYPE_1000BASE_KX:
5272                         ifmr->ifm_active |= IFM_1000_KX;
5273                         break;
5274                 case I40E_PHY_TYPE_SGMII:
5275                         ifmr->ifm_active |= IFM_1000_SGMII;
5276                         break;
5277                 /* ERJ: What's the difference between these? */
5278                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
5279                 case I40E_PHY_TYPE_10GBASE_CR1:
5280                         ifmr->ifm_active |= IFM_10G_CR1;
5281                         break;
5282                 case I40E_PHY_TYPE_10GBASE_KX4:
5283                         ifmr->ifm_active |= IFM_10G_KX4;
5284                         break;
5285                 case I40E_PHY_TYPE_10GBASE_KR:
5286                         ifmr->ifm_active |= IFM_10G_KR;
5287                         break;
5288                 case I40E_PHY_TYPE_SFI:
5289                         ifmr->ifm_active |= IFM_10G_SFI;
5290                         break;
5291                 /* Our single 20G media type */
5292                 case I40E_PHY_TYPE_20GBASE_KR2:
5293                         ifmr->ifm_active |= IFM_20G_KR2;
5294                         break;
5295                 case I40E_PHY_TYPE_40GBASE_KR4:
5296                         ifmr->ifm_active |= IFM_40G_KR4;
5297                         break;
5298                 case I40E_PHY_TYPE_XLPPI:
5299                 case I40E_PHY_TYPE_40GBASE_AOC:
5300                         ifmr->ifm_active |= IFM_40G_XLPPI;
5301                         break;
5302                 /* Unknown to driver */
5303                 default:
5304                         ifmr->ifm_active |= IFM_UNKNOWN;
5305                         break;
5306         }
5307         /* Report flow control status as well */
5308         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
5309                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
5310         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
5311                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
5312
5313         IXL_PF_UNLOCK(pf);
5314 }
5315
5316 void
5317 ixl_init(void *arg)
5318 {
5319         struct ixl_pf *pf = arg;
5320
5321         IXL_PF_LOCK(pf);
5322         ixl_init_locked(pf);
5323         IXL_PF_UNLOCK(pf);
5324 }
5325
5326 /*
5327  * NOTE: Fortville does not support forcing media speeds. Instead,
5328  * use the set_advertise sysctl to set the speeds Fortville
5329  * will advertise or be allowed to operate at.
5330  */
5331 int
5332 ixl_media_change(struct ifnet * ifp)
5333 {
5334         struct ixl_vsi *vsi = ifp->if_softc;
5335         struct ifmedia *ifm = &vsi->media;
5336
5337         INIT_DEBUGOUT("ixl_media_change: begin");
5338
5339         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5340                 return (EINVAL);
5341
5342         if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
5343
5344         return (ENODEV);
5345 }
5346
5347 /*********************************************************************
5348  *  Ioctl entry point
5349  *
5350  *  ixl_ioctl is called when the user wants to configure the
5351  *  interface.
5352  *
5353  *  return 0 on success, positive on failure
5354  **********************************************************************/
5355
5356 int
5357 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
5358 {
5359         struct ixl_vsi  *vsi = ifp->if_softc;
5360         struct ixl_pf   *pf = vsi->back;
5361         struct ifreq    *ifr = (struct ifreq *)data;
5362         struct ifdrv    *ifd = (struct ifdrv *)data;
5363 #if defined(INET) || defined(INET6)
5364         struct ifaddr *ifa = (struct ifaddr *)data;
5365         bool            avoid_reset = FALSE;
5366 #endif
5367         int             error = 0;
5368
5369         if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) != 0) {
5370                 /* We are in recovery mode supporting only NVM update */
5371                 switch (command) {
5372                 case SIOCSDRVSPEC:
5373                 case SIOCGDRVSPEC:
5374                         IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5375                             "Info)\n");
5376
5377                         /* NVM update command */
5378                         if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5379                                 error = ixl_handle_nvmupd_cmd(pf, ifd);
5380                         else
5381                                 error = EINVAL;
5382                         break;
5383                 default:
5384                         error = EINVAL;
5385                         break;
5386                 }
5387
5388                 return (error);
5389         }
5390
5391         switch (command) {
5392
5393         case SIOCSIFADDR:
5394                 IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
5395 #ifdef INET
5396                 if (ifa->ifa_addr->sa_family == AF_INET)
5397                         avoid_reset = TRUE;
5398 #endif
5399 #ifdef INET6
5400                 if (ifa->ifa_addr->sa_family == AF_INET6)
5401                         avoid_reset = TRUE;
5402 #endif
5403 #if defined(INET) || defined(INET6)
5404                 /*
5405                 ** Calling init results in link renegotiation,
5406                 ** so we avoid doing it when possible.
5407                 */
5408                 if (avoid_reset) {
5409                         ifp->if_flags |= IFF_UP;
5410                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
5411                                 ixl_init(pf);
5412 #ifdef INET
5413                         if (!(ifp->if_flags & IFF_NOARP))
5414                                 arp_ifinit(ifp, ifa);
5415 #endif
5416                 } else
5417                         error = ether_ioctl(ifp, command, data);
5418                 break;
5419 #endif
5420         case SIOCSIFMTU:
5421                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5422                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
5423                    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
5424                         error = EINVAL;
5425                 } else {
5426                         IXL_PF_LOCK(pf);
5427                         ifp->if_mtu = ifr->ifr_mtu;
5428                         vsi->max_frame_size =
5429                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
5430                             + ETHER_VLAN_ENCAP_LEN;
5431                         ixl_init_locked(pf);
5432                         IXL_PF_UNLOCK(pf);
5433                 }
5434                 break;
5435         case SIOCSIFFLAGS:
5436                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5437                 IXL_PF_LOCK(pf);
5438                 if (ifp->if_flags & IFF_UP) {
5439                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5440                                 if ((ifp->if_flags ^ pf->if_flags) &
5441                                     (IFF_PROMISC | IFF_ALLMULTI)) {
5442                                         ixl_set_promisc(vsi);
5443                                 }
5444                         } else
5445                                 ixl_init_locked(pf);
5446                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5447                         ixl_stop_locked(pf);
5448
5449                 pf->if_flags = ifp->if_flags;
5450                 IXL_PF_UNLOCK(pf);
5451                 break;
5452         case SIOCSDRVSPEC:
5453         case SIOCGDRVSPEC:
5454                 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5455                     "Info)\n");
5456
5457                 /* NVM update command */
5458                 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5459                         error = ixl_handle_nvmupd_cmd(pf, ifd);
5460                 else
5461                         error = EINVAL;
5462                 break;
5463         case SIOCADDMULTI:
5464                 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
5465                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5466                         IXL_PF_LOCK(pf);
5467                         ixl_disable_rings_intr(vsi);
5468                         ixl_add_multi(vsi);
5469                         ixl_enable_intr(vsi);
5470                         IXL_PF_UNLOCK(pf);
5471                 }
5472                 break;
5473         case SIOCDELMULTI:
5474                 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
5475                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5476                         IXL_PF_LOCK(pf);
5477                         ixl_disable_rings_intr(vsi);
5478                         ixl_del_multi(vsi);
5479                         ixl_enable_intr(vsi);
5480                         IXL_PF_UNLOCK(pf);
5481                 }
5482                 break;
5483         case SIOCSIFMEDIA:
5484         case SIOCGIFMEDIA:
5485         case SIOCGIFXMEDIA:
5486                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5487                 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
5488                 break;
5489         case SIOCSIFCAP:
5490         {
5491                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5492                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5493
5494                 ixl_cap_txcsum_tso(vsi, ifp, mask);
5495
5496                 if (mask & IFCAP_RXCSUM)
5497                         ifp->if_capenable ^= IFCAP_RXCSUM;
5498                 if (mask & IFCAP_RXCSUM_IPV6)
5499                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5500                 if (mask & IFCAP_LRO)
5501                         ifp->if_capenable ^= IFCAP_LRO;
5502                 if (mask & IFCAP_VLAN_HWTAGGING)
5503                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5504                 if (mask & IFCAP_VLAN_HWFILTER)
5505                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5506                 if (mask & IFCAP_VLAN_HWTSO)
5507                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5508                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5509                         IXL_PF_LOCK(pf);
5510                         ixl_init_locked(pf);
5511                         IXL_PF_UNLOCK(pf);
5512                 }
5513                 VLAN_CAPABILITIES(ifp);
5514
5515                 break;
5516         }
5517 #if __FreeBSD_version >= 1003000
5518         case SIOCGI2C:
5519         {
5520                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5521                 if (!pf->has_i2c)
5522                         return (ENOTTY);
5523
5524                 error = ixl_handle_i2c_eeprom_read_cmd(pf, ifr);
5525                 break;
5526         }
5527 #endif
5528         default:
5529                 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5530                 error = ether_ioctl(ifp, command, data);
5531                 break;
5532         }
5533
5534         return (error);
5535 }
5536
5537 int
5538 ixl_find_i2c_interface(struct ixl_pf *pf)
5539 {
5540         struct i40e_hw *hw = &pf->hw;
5541         bool i2c_en, port_matched;
5542         u32 reg;
5543
5544         for (int i = 0; i < 4; i++) {
5545                 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
5546                 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
5547                 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
5548                     >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
5549                     & BIT(hw->port);
5550                 if (i2c_en && port_matched)
5551                         return (i);
5552         }
5553
5554         return (-1);
5555 }
5556
5557 static char *
5558 ixl_phy_type_string(u32 bit_pos, bool ext)
5559 {
5560         static char * phy_types_str[32] = {
5561                 "SGMII",
5562                 "1000BASE-KX",
5563                 "10GBASE-KX4",
5564                 "10GBASE-KR",
5565                 "40GBASE-KR4",
5566                 "XAUI",
5567                 "XFI",
5568                 "SFI",
5569                 "XLAUI",
5570                 "XLPPI",
5571                 "40GBASE-CR4",
5572                 "10GBASE-CR1",
5573                 "SFP+ Active DA",
5574                 "QSFP+ Active DA",
5575                 "Reserved (14)",
5576                 "Reserved (15)",
5577                 "Reserved (16)",
5578                 "100BASE-TX",
5579                 "1000BASE-T",
5580                 "10GBASE-T",
5581                 "10GBASE-SR",
5582                 "10GBASE-LR",
5583                 "10GBASE-SFP+Cu",
5584                 "10GBASE-CR1",
5585                 "40GBASE-CR4",
5586                 "40GBASE-SR4",
5587                 "40GBASE-LR4",
5588                 "1000BASE-SX",
5589                 "1000BASE-LX",
5590                 "1000BASE-T Optical",
5591                 "20GBASE-KR2",
5592                 "Reserved (31)"
5593         };
5594         static char * ext_phy_types_str[8] = {
5595                 "25GBASE-KR",
5596                 "25GBASE-CR",
5597                 "25GBASE-SR",
5598                 "25GBASE-LR",
5599                 "25GBASE-AOC",
5600                 "25GBASE-ACC",
5601                 "2.5GBASE-T",
5602                 "5GBASE-T"
5603         };
5604
5605         if (ext && bit_pos > 7) return "Invalid_Ext";
5606         if (bit_pos > 31) return "Invalid";
5607
5608         return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
5609 }
5610
5611 static char *
5612 ixl_phy_type_string_ls(u8 val)
5613 {
5614         if (val >= 0x1F)
5615                 return ixl_phy_type_string(val - 0x1F, true);
5616         else
5617                 return ixl_phy_type_string(val, false);
5618 }
5619
5620 int
5621 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
5622 {
5623         device_t dev = pf->dev;
5624         struct i40e_hw *hw = &pf->hw;
5625         struct i40e_aq_desc desc;
5626         enum i40e_status_code status;
5627
5628         struct i40e_aqc_get_link_status *aq_link_status =
5629                 (struct i40e_aqc_get_link_status *)&desc.params.raw;
5630
5631         i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
5632         link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
5633         status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
5634         if (status) {
5635                 device_printf(dev,
5636                     "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
5637                     __func__, i40e_stat_str(hw, status),
5638                     i40e_aq_str(hw, hw->aq.asq_last_status));
5639                 return (EIO);
5640         }
5641
5642         bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
5643         return (0);
5644 }
5645
5646 static int
5647 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5648 {
5649         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5650         device_t dev = pf->dev;
5651         struct sbuf *buf;
5652         int error = 0;
5653
5654         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5655         if (!buf) {
5656                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5657                 return (ENOMEM);
5658         }
5659
5660         struct i40e_aqc_get_link_status link_status;
5661         error = ixl_aq_get_link_status(pf, &link_status);
5662         if (error) {
5663                 sbuf_delete(buf);
5664                 return (error);
5665         }
5666
5667         sbuf_printf(buf, "\n"
5668             "PHY Type : 0x%02x<%s>\n"
5669             "Speed    : 0x%02x\n"
5670             "Link info: 0x%02x\n"
5671             "AN info  : 0x%02x\n"
5672             "Ext info : 0x%02x\n"
5673             "Loopback : 0x%02x\n"
5674             "Max Frame: %d\n"
5675             "Config   : 0x%02x\n"
5676             "Power    : 0x%02x",
5677             link_status.phy_type,
5678             ixl_phy_type_string_ls(link_status.phy_type),
5679             link_status.link_speed, 
5680             link_status.link_info,
5681             link_status.an_info,
5682             link_status.ext_info,
5683             link_status.loopback,
5684             link_status.max_frame_size,
5685             link_status.config,
5686             link_status.power_desc);
5687
5688         error = sbuf_finish(buf);
5689         if (error)
5690                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5691
5692         sbuf_delete(buf);
5693         return (error);
5694 }
5695
5696 static int
5697 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5698 {
5699         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5700         struct i40e_hw *hw = &pf->hw;
5701         device_t dev = pf->dev;
5702         enum i40e_status_code status;
5703         struct i40e_aq_get_phy_abilities_resp abilities;
5704         struct sbuf *buf;
5705         int error = 0;
5706
5707         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5708         if (!buf) {
5709                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5710                 return (ENOMEM);
5711         }
5712
5713         status = i40e_aq_get_phy_capabilities(hw,
5714             FALSE, FALSE, &abilities, NULL);
5715         if (status) {
5716                 device_printf(dev,
5717                     "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5718                     __func__, i40e_stat_str(hw, status),
5719                     i40e_aq_str(hw, hw->aq.asq_last_status));
5720                 sbuf_delete(buf);
5721                 return (EIO);
5722         }
5723
5724         sbuf_printf(buf, "\n"
5725             "PHY Type : %08x",
5726             abilities.phy_type);
5727
5728         if (abilities.phy_type != 0) {
5729                 sbuf_printf(buf, "<");
5730                 for (int i = 0; i < 32; i++)
5731                         if ((1 << i) & abilities.phy_type)
5732                                 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
5733                 sbuf_printf(buf, ">");
5734         }
5735
5736         sbuf_printf(buf, "\nPHY Ext  : %02x",
5737             abilities.phy_type_ext);
5738
5739         if (abilities.phy_type_ext != 0) {
5740                 sbuf_printf(buf, "<");
5741                 for (int i = 0; i < 4; i++)
5742                         if ((1 << i) & abilities.phy_type_ext)
5743                                 sbuf_printf(buf, "%s,",
5744                                     ixl_phy_type_string(i, true));
5745                 sbuf_printf(buf, ">");
5746         }
5747
5748         sbuf_printf(buf, "\nSpeed    : %02x", abilities.link_speed);
5749         if (abilities.link_speed != 0) {
5750                 u8 link_speed;
5751                 sbuf_printf(buf, " <");
5752                 for (int i = 0; i < 8; i++) {
5753                         link_speed = (1 << i) & abilities.link_speed;
5754                         if (link_speed)
5755                                 sbuf_printf(buf, "%s, ",
5756                                     ixl_link_speed_string(link_speed));
5757                 }
5758                 sbuf_printf(buf, ">");
5759         }
5760
5761         sbuf_printf(buf, "\n"
5762             "Abilities: %02x\n"
5763             "EEE cap  : %04x\n"
5764             "EEER reg : %08x\n"
5765             "D3 Lpan  : %02x\n"
5766             "ID       : %02x %02x %02x %02x\n"
5767             "ModType  : %02x %02x %02x\n"
5768             "ModType E: %01x\n"
5769             "FEC Cfg  : %02x\n"
5770             "Ext CC   : %02x",
5771             abilities.abilities, abilities.eee_capability,
5772             abilities.eeer_val, abilities.d3_lpan,
5773             abilities.phy_id[0], abilities.phy_id[1],
5774             abilities.phy_id[2], abilities.phy_id[3],
5775             abilities.module_type[0], abilities.module_type[1],
5776             abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
5777             abilities.fec_cfg_curr_mod_ext_info & 0x1F,
5778             abilities.ext_comp_code);
5779
5780         error = sbuf_finish(buf);
5781         if (error)
5782                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5783
5784         sbuf_delete(buf);
5785         return (error);
5786 }
5787
5788 static int
5789 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5790 {
5791         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5792         struct ixl_vsi *vsi = &pf->vsi;
5793         struct ixl_mac_filter *f;
5794         char *buf, *buf_i;
5795
5796         int error = 0;
5797         int ftl_len = 0;
5798         int ftl_counter = 0;
5799         int buf_len = 0;
5800         int entry_len = 42;
5801
5802         SLIST_FOREACH(f, &vsi->ftl, next) {
5803                 ftl_len++;
5804         }
5805
5806         if (ftl_len < 1) {
5807                 sysctl_handle_string(oidp, "(none)", 6, req);
5808                 return (0);
5809         }
5810
5811         buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5812         buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5813
5814         sprintf(buf_i++, "\n");
5815         SLIST_FOREACH(f, &vsi->ftl, next) {
5816                 sprintf(buf_i,
5817                     MAC_FORMAT ", vlan %4d, flags %#06x",
5818                     MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5819                 buf_i += entry_len;
5820                 /* don't print '\n' for last entry */
5821                 if (++ftl_counter != ftl_len) {
5822                         sprintf(buf_i, "\n");
5823                         buf_i++;
5824                 }
5825         }
5826
5827         error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5828         if (error)
5829                 printf("sysctl error: %d\n", error);
5830         free(buf, M_DEVBUF);
5831         return error;
5832 }
5833
5834 #define IXL_SW_RES_SIZE 0x14
5835 int
5836 ixl_res_alloc_cmp(const void *a, const void *b)
5837 {
5838         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5839         one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5840         two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5841
5842         return ((int)one->resource_type - (int)two->resource_type);
5843 }
5844
5845 /*
5846  * Longest string length: 25 
5847  */
5848 char *
5849 ixl_switch_res_type_string(u8 type)
5850 {
5851         static char * ixl_switch_res_type_strings[0x14] = {
5852                 "VEB",
5853                 "VSI",
5854                 "Perfect Match MAC address",
5855                 "S-tag",
5856                 "(Reserved)",
5857                 "Multicast hash entry",
5858                 "Unicast hash entry",
5859                 "VLAN",
5860                 "VSI List entry",
5861                 "(Reserved)",
5862                 "VLAN Statistic Pool",
5863                 "Mirror Rule",
5864                 "Queue Set",
5865                 "Inner VLAN Forward filter",
5866                 "(Reserved)",
5867                 "Inner MAC",
5868                 "IP",
5869                 "GRE/VN1 Key",
5870                 "VN2 Key",
5871                 "Tunneling Port"
5872         };
5873
5874         if (type < 0x14)
5875                 return ixl_switch_res_type_strings[type];
5876         else
5877                 return "(Reserved)";
5878 }
5879
5880 static int
5881 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5882 {
5883         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5884         struct i40e_hw *hw = &pf->hw;
5885         device_t dev = pf->dev;
5886         struct sbuf *buf;
5887         enum i40e_status_code status;
5888         int error = 0;
5889
5890         u8 num_entries;
5891         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5892
5893         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5894         if (!buf) {
5895                 device_printf(dev, "Could not allocate sbuf for output.\n");
5896                 return (ENOMEM);
5897         }
5898
5899         bzero(resp, sizeof(resp));
5900         status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5901                                 resp,
5902                                 IXL_SW_RES_SIZE,
5903                                 NULL);
5904         if (status) {
5905                 device_printf(dev,
5906                     "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5907                     __func__, i40e_stat_str(hw, status),
5908                     i40e_aq_str(hw, hw->aq.asq_last_status));
5909                 sbuf_delete(buf);
5910                 return (error);
5911         }
5912
5913         /* Sort entries by type for display */
5914         qsort(resp, num_entries,
5915             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5916             &ixl_res_alloc_cmp);
5917
5918         sbuf_cat(buf, "\n");
5919         sbuf_printf(buf, "# of entries: %d\n", num_entries);
5920         sbuf_printf(buf,
5921             "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
5922             "                          | (this)     | (all) | (this) | (all)       \n");
5923         for (int i = 0; i < num_entries; i++) {
5924                 sbuf_printf(buf,
5925                     "%25s | %10d   %5d   %6d   %12d",
5926                     ixl_switch_res_type_string(resp[i].resource_type),
5927                     resp[i].guaranteed,
5928                     resp[i].total,
5929                     resp[i].used,
5930                     resp[i].total_unalloced);
5931                 if (i < num_entries - 1)
5932                         sbuf_cat(buf, "\n");
5933         }
5934
5935         error = sbuf_finish(buf);
5936         if (error)
5937                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5938
5939         sbuf_delete(buf);
5940         return (error);
5941 }
5942
5943 /*
5944 ** Caller must init and delete sbuf; this function will clear and
5945 ** finish it for caller.
5946 **
5947 ** XXX: Cannot use the SEID for this, since there is no longer a 
5948 ** fixed mapping between SEID and element type.
5949 */
5950 char *
5951 ixl_switch_element_string(struct sbuf *s,
5952     struct i40e_aqc_switch_config_element_resp *element)
5953 {
5954         sbuf_clear(s);
5955
5956         switch (element->element_type) {
5957         case I40E_AQ_SW_ELEM_TYPE_MAC:
5958                 sbuf_printf(s, "MAC %3d", element->element_info);
5959                 break;
5960         case I40E_AQ_SW_ELEM_TYPE_PF:
5961                 sbuf_printf(s, "PF  %3d", element->element_info);
5962                 break;
5963         case I40E_AQ_SW_ELEM_TYPE_VF:
5964                 sbuf_printf(s, "VF  %3d", element->element_info);
5965                 break;
5966         case I40E_AQ_SW_ELEM_TYPE_EMP:
5967                 sbuf_cat(s, "EMP");
5968                 break;
5969         case I40E_AQ_SW_ELEM_TYPE_BMC:
5970                 sbuf_cat(s, "BMC");
5971                 break;
5972         case I40E_AQ_SW_ELEM_TYPE_PV:
5973                 sbuf_cat(s, "PV");
5974                 break;
5975         case I40E_AQ_SW_ELEM_TYPE_VEB:
5976                 sbuf_cat(s, "VEB");
5977                 break;
5978         case I40E_AQ_SW_ELEM_TYPE_PA:
5979                 sbuf_cat(s, "PA");
5980                 break;
5981         case I40E_AQ_SW_ELEM_TYPE_VSI:
5982                 sbuf_printf(s, "VSI %3d", element->element_info);
5983                 break;
5984         default:
5985                 sbuf_cat(s, "?");
5986                 break;
5987         }
5988
5989         sbuf_finish(s);
5990         return sbuf_data(s);
5991 }
5992
5993 static int
5994 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5995 {
5996         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5997         struct i40e_hw *hw = &pf->hw;
5998         device_t dev = pf->dev;
5999         struct sbuf *buf;
6000         struct sbuf *nmbuf;
6001         enum i40e_status_code status;
6002         int error = 0;
6003         u16 next = 0;
6004         u8 aq_buf[I40E_AQ_LARGE_BUF];
6005
6006         struct i40e_aqc_get_switch_config_resp *sw_config;
6007         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
6008
6009         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6010         if (!buf) {
6011                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
6012                 return (ENOMEM);
6013         }
6014
6015         status = i40e_aq_get_switch_config(hw, sw_config,
6016             sizeof(aq_buf), &next, NULL);
6017         if (status) {
6018                 device_printf(dev,
6019                     "%s: aq_get_switch_config() error %s, aq error %s\n",
6020                     __func__, i40e_stat_str(hw, status),
6021                     i40e_aq_str(hw, hw->aq.asq_last_status));
6022                 sbuf_delete(buf);
6023                 return error;
6024         }
6025         if (next)
6026                 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
6027                     __func__, next);
6028
6029         nmbuf = sbuf_new_auto();
6030         if (!nmbuf) {
6031                 device_printf(dev, "Could not allocate sbuf for name output.\n");
6032                 sbuf_delete(buf);
6033                 return (ENOMEM);
6034         }
6035
6036         sbuf_cat(buf, "\n");
6037         /* Assuming <= 255 elements in switch */
6038         sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
6039         sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
6040         /* Exclude:
6041         ** Revision -- all elements are revision 1 for now
6042         */
6043         sbuf_printf(buf,
6044             "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
6045             "                |          |          | (uplink)\n");
6046         for (int i = 0; i < sw_config->header.num_reported; i++) {
6047                 // "%4d (%8s) | %8s   %8s   %#8x",
6048                 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
6049                 sbuf_cat(buf, " ");
6050                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
6051                     &sw_config->element[i]));
6052                 sbuf_cat(buf, " | ");
6053                 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
6054                 sbuf_cat(buf, "   ");
6055                 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
6056                 sbuf_cat(buf, "   ");
6057                 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
6058                 if (i < sw_config->header.num_reported - 1)
6059                         sbuf_cat(buf, "\n");
6060         }
6061         sbuf_delete(nmbuf);
6062
6063         error = sbuf_finish(buf);
6064         if (error)
6065                 device_printf(dev, "Error finishing sbuf: %d\n", error);
6066
6067         sbuf_delete(buf);
6068
6069         return (error);
6070 }
6071
6072 static int
6073 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
6074 {
6075         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6076         struct i40e_hw *hw = &pf->hw;
6077         device_t dev = pf->dev;
6078         struct sbuf *buf;
6079         int error = 0;
6080         enum i40e_status_code status;
6081         u32 reg;
6082
6083         struct i40e_aqc_get_set_rss_key_data key_data;
6084
6085         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6086         if (!buf) {
6087                 device_printf(dev, "Could not allocate sbuf for output.\n");
6088                 return (ENOMEM);
6089         }
6090
6091         bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
6092
6093         sbuf_cat(buf, "\n");
6094         if (hw->mac.type == I40E_MAC_X722) {
6095                 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
6096                 if (status)
6097                         device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
6098                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6099         } else {
6100                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
6101                         reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
6102                         bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
6103                 }
6104         }
6105
6106         ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
6107
6108         error = sbuf_finish(buf);
6109         if (error)
6110                 device_printf(dev, "Error finishing sbuf: %d\n", error);
6111         sbuf_delete(buf);
6112
6113         return (error);
6114 }
6115
6116 static void
6117 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
6118 {
6119         int i, j, k, width;
6120         char c;
6121
6122         if (length < 1 || buf == NULL) return;
6123
6124         int byte_stride = 16;
6125         int lines = length / byte_stride;
6126         int rem = length % byte_stride;
6127         if (rem > 0)
6128                 lines++;
6129
6130         for (i = 0; i < lines; i++) {
6131                 width = (rem > 0 && i == lines - 1)
6132                     ? rem : byte_stride;
6133
6134                 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
6135
6136                 for (j = 0; j < width; j++)
6137                         sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
6138
6139                 if (width < byte_stride) {
6140                         for (k = 0; k < (byte_stride - width); k++)
6141                                 sbuf_printf(sb, "   ");
6142                 }
6143
6144                 if (!text) {
6145                         sbuf_printf(sb, "\n");
6146                         continue;
6147                 }
6148
6149                 for (j = 0; j < width; j++) {
6150                         c = (char)buf[i * byte_stride + j];
6151                         if (c < 32 || c > 126)
6152                                 sbuf_printf(sb, ".");
6153                         else
6154                                 sbuf_printf(sb, "%c", c);
6155
6156                         if (j == width - 1)
6157                                 sbuf_printf(sb, "\n");
6158                 }
6159         }
6160 }
6161
6162 static int
6163 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
6164 {
6165         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6166         struct i40e_hw *hw = &pf->hw;
6167         device_t dev = pf->dev;
6168         struct sbuf *buf;
6169         int error = 0;
6170         enum i40e_status_code status;
6171         u8 hlut[512];
6172         u32 reg;
6173
6174         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6175         if (!buf) {
6176                 device_printf(dev, "Could not allocate sbuf for output.\n");
6177                 return (ENOMEM);
6178         }
6179
6180         bzero(hlut, sizeof(hlut));
6181         sbuf_cat(buf, "\n");
6182         if (hw->mac.type == I40E_MAC_X722) {
6183                 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
6184                 if (status)
6185                         device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
6186                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6187         } else {
6188                 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
6189                         reg = rd32(hw, I40E_PFQF_HLUT(i));
6190                         bcopy(&reg, &hlut[i << 2], 4);
6191                 }
6192         }
6193         ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
6194
6195         error = sbuf_finish(buf);
6196         if (error)
6197                 device_printf(dev, "Error finishing sbuf: %d\n", error);
6198         sbuf_delete(buf);
6199
6200         return (error);
6201 }
6202
6203 static int
6204 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
6205 {
6206         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6207         struct i40e_hw *hw = &pf->hw;
6208         u64 hena;
6209
6210         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
6211             ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
6212
6213         return sysctl_handle_long(oidp, NULL, hena, req);
6214 }
6215
6216 /*
6217  * Sysctl to disable firmware's link management
6218  *
6219  * 1 - Disable link management on this port
6220  * 0 - Re-enable link management
6221  *
6222  * On normal NVMs, firmware manages link by default.
6223  */
6224 static int
6225 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
6226 {
6227         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6228         struct i40e_hw *hw = &pf->hw;
6229         device_t dev = pf->dev;
6230         int requested_mode = -1;
6231         enum i40e_status_code status = 0;
6232         int error = 0;
6233
6234         /* Read in new mode */
6235         error = sysctl_handle_int(oidp, &requested_mode, 0, req);
6236         if ((error) || (req->newptr == NULL))
6237                 return (error);
6238         /* Check for sane value */
6239         if (requested_mode < 0 || requested_mode > 1) {
6240                 device_printf(dev, "Valid modes are 0 or 1\n");
6241                 return (EINVAL);
6242         }
6243
6244         /* Set new mode */
6245         status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
6246         if (status) {
6247                 device_printf(dev,
6248                     "%s: Error setting new phy debug mode %s,"
6249                     " aq error: %s\n", __func__, i40e_stat_str(hw, status),
6250                     i40e_aq_str(hw, hw->aq.asq_last_status));
6251                 return (EIO);
6252         }
6253
6254         return (0);
6255 }
6256
6257 /*
6258  * Read some diagnostic data from a (Q)SFP+ module
6259  *
6260  *             SFP A2   QSFP Lower Page
6261  * Temperature 96-97    22-23
6262  * Vcc         98-99    26-27
6263  * TX power    102-103  34-35..40-41
6264  * RX power    104-105  50-51..56-57
6265  */
6266 static int
6267 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
6268 {
6269         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6270         device_t dev = pf->dev;
6271         struct sbuf *sbuf;
6272         int error = 0;
6273         u8 output;
6274
6275         if (req->oldptr == NULL) {
6276                 error = SYSCTL_OUT(req, 0, 128);
6277                 return (0);
6278         }
6279
6280         error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
6281         if (error) {
6282                 device_printf(dev, "Error reading from i2c\n");
6283                 return (error);
6284         }
6285
6286         /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
6287         if (output == 0x3) {
6288                 /*
6289                  * Check for:
6290                  * - Internally calibrated data
6291                  * - Diagnostic monitoring is implemented
6292                  */
6293                 pf->read_i2c_byte(pf, 92, 0xA0, &output);
6294                 if (!(output & 0x60)) {
6295                         device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
6296                         return (0);
6297                 }
6298
6299                 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6300
6301                 for (u8 offset = 96; offset < 100; offset++) {
6302                         pf->read_i2c_byte(pf, offset, 0xA2, &output);
6303                         sbuf_printf(sbuf, "%02X ", output);
6304                 }
6305                 for (u8 offset = 102; offset < 106; offset++) {
6306                         pf->read_i2c_byte(pf, offset, 0xA2, &output);
6307                         sbuf_printf(sbuf, "%02X ", output);
6308                 }
6309         } else if (output == 0xD || output == 0x11) {
6310                 /*
6311                  * QSFP+ modules are always internally calibrated, and must indicate
6312                  * what types of diagnostic monitoring are implemented
6313                  */
6314                 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6315
6316                 for (u8 offset = 22; offset < 24; offset++) {
6317                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
6318                         sbuf_printf(sbuf, "%02X ", output);
6319                 }
6320                 for (u8 offset = 26; offset < 28; offset++) {
6321                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
6322                         sbuf_printf(sbuf, "%02X ", output);
6323                 }
6324                 /* Read the data from the first lane */
6325                 for (u8 offset = 34; offset < 36; offset++) {
6326                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
6327                         sbuf_printf(sbuf, "%02X ", output);
6328                 }
6329                 for (u8 offset = 50; offset < 52; offset++) {
6330                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
6331                         sbuf_printf(sbuf, "%02X ", output);
6332                 }
6333         } else {
6334                 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
6335                 return (0);
6336         }
6337
6338         sbuf_finish(sbuf);
6339         sbuf_delete(sbuf);
6340
6341         return (0);
6342 }
6343
6344 /*
6345  * Sysctl to read a byte from I2C bus.
6346  *
6347  * Input: 32-bit value:
6348  *      bits 0-7:   device address (0xA0 or 0xA2)
6349  *      bits 8-15:  offset (0-255)
6350  *      bits 16-31: unused
6351  * Output: 8-bit value read
6352  */
6353 static int
6354 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
6355 {
6356         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6357         device_t dev = pf->dev;
6358         int input = -1, error = 0;
6359
6360         u8 dev_addr, offset, output;
6361
6362         /* Read in I2C read parameters */
6363         error = sysctl_handle_int(oidp, &input, 0, req);
6364         if ((error) || (req->newptr == NULL))
6365                 return (error);
6366         /* Validate device address */
6367         dev_addr = input & 0xFF;
6368         if (dev_addr != 0xA0 && dev_addr != 0xA2) {
6369                 return (EINVAL);
6370         }
6371         offset = (input >> 8) & 0xFF;
6372
6373         error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
6374         if (error)
6375                 return (error);
6376
6377         device_printf(dev, "%02X\n", output);
6378         return (0);
6379 }
6380
6381 /*
6382  * Sysctl to write a byte to the I2C bus.
6383  *
6384  * Input: 32-bit value:
6385  *      bits 0-7:   device address (0xA0 or 0xA2)
6386  *      bits 8-15:  offset (0-255)
6387  *      bits 16-23: value to write
6388  *      bits 24-31: unused
6389  * Output: 8-bit value written
6390  */
6391 static int
6392 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
6393 {
6394         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6395         device_t dev = pf->dev;
6396         int input = -1, error = 0;
6397
6398         u8 dev_addr, offset, value;
6399
6400         /* Read in I2C write parameters */
6401         error = sysctl_handle_int(oidp, &input, 0, req);
6402         if ((error) || (req->newptr == NULL))
6403                 return (error);
6404         /* Validate device address */
6405         dev_addr = input & 0xFF;
6406         if (dev_addr != 0xA0 && dev_addr != 0xA2) {
6407                 return (EINVAL);
6408         }
6409         offset = (input >> 8) & 0xFF;
6410         value = (input >> 16) & 0xFF;
6411
6412         error = pf->write_i2c_byte(pf, offset, dev_addr, value);
6413         if (error)
6414                 return (error);
6415
6416         device_printf(dev, "%02X written\n", value);
6417         return (0);
6418 }
6419
6420 static int
6421 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
6422     u8 bit_pos, int *is_set)
6423 {
6424         device_t dev = pf->dev;
6425         struct i40e_hw *hw = &pf->hw;
6426         enum i40e_status_code status;
6427
6428         status = i40e_aq_get_phy_capabilities(hw,
6429             FALSE, FALSE, abilities, NULL);
6430         if (status) {
6431                 device_printf(dev,
6432                     "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
6433                     __func__, i40e_stat_str(hw, status),
6434                     i40e_aq_str(hw, hw->aq.asq_last_status));
6435                 return (EIO);
6436         }
6437
6438         *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
6439         return (0);
6440 }
6441
6442 static int
6443 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
6444     u8 bit_pos, int set)
6445 {
6446         device_t dev = pf->dev;
6447         struct i40e_hw *hw = &pf->hw;
6448         struct i40e_aq_set_phy_config config;
6449         enum i40e_status_code status;
6450
6451         /* Set new PHY config */
6452         memset(&config, 0, sizeof(config));
6453         config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
6454         if (set)
6455                 config.fec_config |= bit_pos;
6456         if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
6457                 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
6458                 config.phy_type = abilities->phy_type;
6459                 config.phy_type_ext = abilities->phy_type_ext;
6460                 config.link_speed = abilities->link_speed;
6461                 config.eee_capability = abilities->eee_capability;
6462                 config.eeer = abilities->eeer_val;
6463                 config.low_power_ctrl = abilities->d3_lpan;
6464                 status = i40e_aq_set_phy_config(hw, &config, NULL);
6465
6466                 if (status) {
6467                         device_printf(dev,
6468                             "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
6469                             __func__, i40e_stat_str(hw, status),
6470                             i40e_aq_str(hw, hw->aq.asq_last_status));
6471                         return (EIO);
6472                 }
6473         }
6474
6475         return (0);
6476 }
6477
6478 static int
6479 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
6480 {
6481         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6482         int mode, error = 0;
6483
6484         struct i40e_aq_get_phy_abilities_resp abilities;
6485         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
6486         if (error)
6487                 return (error);
6488         /* Read in new mode */
6489         error = sysctl_handle_int(oidp, &mode, 0, req);
6490         if ((error) || (req->newptr == NULL))
6491                 return (error);
6492
6493         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
6494 }
6495
6496 static int
6497 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
6498 {
6499         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6500         int mode, error = 0;
6501
6502         struct i40e_aq_get_phy_abilities_resp abilities;
6503         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
6504         if (error)
6505                 return (error);
6506         /* Read in new mode */
6507         error = sysctl_handle_int(oidp, &mode, 0, req);
6508         if ((error) || (req->newptr == NULL))
6509                 return (error);
6510
6511         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
6512 }
6513
6514 static int
6515 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
6516 {
6517         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6518         int mode, error = 0;
6519
6520         struct i40e_aq_get_phy_abilities_resp abilities;
6521         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
6522         if (error)
6523                 return (error);
6524         /* Read in new mode */
6525         error = sysctl_handle_int(oidp, &mode, 0, req);
6526         if ((error) || (req->newptr == NULL))
6527                 return (error);
6528
6529         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
6530 }
6531
6532 static int
6533 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
6534 {
6535         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6536         int mode, error = 0;
6537
6538         struct i40e_aq_get_phy_abilities_resp abilities;
6539         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
6540         if (error)
6541                 return (error);
6542         /* Read in new mode */
6543         error = sysctl_handle_int(oidp, &mode, 0, req);
6544         if ((error) || (req->newptr == NULL))
6545                 return (error);
6546
6547         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
6548 }
6549
6550 static int
6551 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
6552 {
6553         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6554         int mode, error = 0;
6555
6556         struct i40e_aq_get_phy_abilities_resp abilities;
6557         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
6558         if (error)
6559                 return (error);
6560         /* Read in new mode */
6561         error = sysctl_handle_int(oidp, &mode, 0, req);
6562         if ((error) || (req->newptr == NULL))
6563                 return (error);
6564
6565         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
6566 }
6567
6568 static int
6569 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
6570 {
6571         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6572         struct i40e_hw *hw = &pf->hw;
6573         device_t dev = pf->dev;
6574         struct sbuf *buf;
6575         int error = 0;
6576         enum i40e_status_code status;
6577
6578         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6579         if (!buf) {
6580                 device_printf(dev, "Could not allocate sbuf for output.\n");
6581                 return (ENOMEM);
6582         }
6583
6584         u8 *final_buff;
6585         /* This amount is only necessary if reading the entire cluster into memory */
6586 #define IXL_FINAL_BUFF_SIZE     (1280 * 1024)
6587         final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
6588         int final_buff_len = 0;
6589
6590         u8 cluster_id = 1;
6591         bool more = true;
6592
6593         u8 dump_buf[4096];
6594         u16 curr_buff_size = 4096;
6595         u8 curr_next_table = 0;
6596         u32 curr_next_index = 0;
6597
6598         u16 ret_buff_size;
6599         u8 ret_next_table;
6600         u32 ret_next_index;
6601
6602         sbuf_cat(buf, "\n");
6603
6604         while (more) {
6605                 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
6606                     dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
6607                 if (status) {
6608                         device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
6609                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6610                         goto free_out;
6611                 }
6612
6613                 /* copy info out of temp buffer */
6614                 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
6615                 final_buff_len += ret_buff_size;
6616
6617                 if (ret_next_table != curr_next_table) {
6618                         /* We're done with the current table; we can dump out read data. */
6619                         sbuf_printf(buf, "%d:", curr_next_table);
6620                         int bytes_printed = 0;
6621                         while (bytes_printed <= final_buff_len) {
6622                                 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
6623                                 bytes_printed += 16;
6624                         }
6625                                 sbuf_cat(buf, "\n");
6626
6627                         /* The entire cluster has been read; we're finished */
6628                         if (ret_next_table == 0xFF)
6629                                 break;
6630
6631                         /* Otherwise clear the output buffer and continue reading */
6632                         bzero(final_buff, IXL_FINAL_BUFF_SIZE);
6633                         final_buff_len = 0;
6634                 }
6635
6636                 if (ret_next_index == 0xFFFFFFFF)
6637                         ret_next_index = 0;
6638
6639                 bzero(dump_buf, sizeof(dump_buf));
6640                 curr_next_table = ret_next_table;
6641                 curr_next_index = ret_next_index;
6642         }
6643
6644 free_out:
6645         free(final_buff, M_DEVBUF);
6646         error = sbuf_finish(buf);
6647         if (error)
6648                 device_printf(dev, "Error finishing sbuf: %d\n", error);
6649         sbuf_delete(buf);
6650
6651         return (error);
6652 }
6653
6654 static int
6655 ixl_start_fw_lldp(struct ixl_pf *pf)
6656 {
6657         struct i40e_hw *hw = &pf->hw;
6658         enum i40e_status_code status;
6659
6660         status = i40e_aq_start_lldp(hw, false, NULL);
6661         if (status != I40E_SUCCESS) {
6662                 switch (hw->aq.asq_last_status) {
6663                 case I40E_AQ_RC_EEXIST:
6664                         device_printf(pf->dev,
6665                             "FW LLDP agent is already running\n");
6666                         break;
6667                 case I40E_AQ_RC_EPERM:
6668                         device_printf(pf->dev,
6669                             "Device configuration forbids SW from starting "
6670                             "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
6671                             "attribute to \"Enabled\" to use this sysctl\n");
6672                         return (EINVAL);
6673                 default:
6674                         device_printf(pf->dev,
6675                             "Starting FW LLDP agent failed: error: %s, %s\n",
6676                             i40e_stat_str(hw, status),
6677                             i40e_aq_str(hw, hw->aq.asq_last_status));
6678                         return (EINVAL);
6679                 }
6680         }
6681
6682         atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6683         return (0);
6684 }
6685
6686 static int
6687 ixl_stop_fw_lldp(struct ixl_pf *pf)
6688 {
6689         struct i40e_hw *hw = &pf->hw;
6690         device_t dev = pf->dev;
6691         enum i40e_status_code status;
6692
6693         if (hw->func_caps.npar_enable != 0) {
6694                 device_printf(dev,
6695                     "Disabling FW LLDP agent is not supported on this device\n");
6696                 return (EINVAL);
6697         }
6698
6699         if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
6700                 device_printf(dev,
6701                     "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
6702                 return (EINVAL);
6703         }
6704
6705         status = i40e_aq_stop_lldp(hw, true, false, NULL);
6706         if (status != I40E_SUCCESS) {
6707                 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
6708                         device_printf(dev,
6709                             "Disabling FW LLDP agent failed: error: %s, %s\n",
6710                             i40e_stat_str(hw, status),
6711                             i40e_aq_str(hw, hw->aq.asq_last_status));
6712                         return (EINVAL);
6713                 }
6714
6715                 device_printf(dev, "FW LLDP agent is already stopped\n");
6716         }
6717
6718         i40e_aq_set_dcb_parameters(hw, true, NULL);
6719         atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6720         return (0);
6721 }
6722
6723 static int
6724 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
6725 {
6726         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6727         int state, new_state, error = 0;
6728
6729         state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
6730
6731         /* Read in new mode */
6732         error = sysctl_handle_int(oidp, &new_state, 0, req);
6733         if ((error) || (req->newptr == NULL))
6734                 return (error);
6735
6736         /* Already in requested state */
6737         if (new_state == state)
6738                 return (error);
6739
6740         if (new_state == 0)
6741                 return ixl_stop_fw_lldp(pf);
6742
6743         return ixl_start_fw_lldp(pf);
6744 }
6745
6746 static int
6747 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
6748 {
6749         struct ixl_pf         *pf = (struct ixl_pf *)arg1;
6750         int                   state, new_state;
6751         int                   sysctl_handle_status = 0;
6752         enum i40e_status_code cmd_status;
6753
6754         /* Init states' values */
6755         state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
6756
6757         /* Get requested mode */
6758         sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
6759         if ((sysctl_handle_status) || (req->newptr == NULL))
6760                 return (sysctl_handle_status);
6761
6762         /* Check if state has changed */
6763         if (new_state == state)
6764                 return (0);
6765
6766         /* Set new state */
6767         cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
6768
6769         /* Save new state or report error */
6770         if (!cmd_status) {
6771                 if (new_state == 0)
6772                         atomic_clear_int(&pf->state, IXL_PF_STATE_EEE_ENABLED);
6773                 else
6774                         atomic_set_int(&pf->state, IXL_PF_STATE_EEE_ENABLED);
6775         } else if (cmd_status == I40E_ERR_CONFIG)
6776                 return (EPERM);
6777         else
6778                 return (EIO);
6779
6780         return (0);
6781 }
6782
6783 int
6784 ixl_attach_get_link_status(struct ixl_pf *pf)
6785 {
6786         struct i40e_hw *hw = &pf->hw;
6787         device_t dev = pf->dev;
6788         int error = 0;
6789
6790         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
6791             (hw->aq.fw_maj_ver < 4)) {
6792                 i40e_msec_delay(75);
6793                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
6794                 if (error) {
6795                         device_printf(dev, "link restart failed, aq_err=%d\n",
6796                             pf->hw.aq.asq_last_status);
6797                         return error;
6798                 }
6799         }
6800
6801         /* Determine link state */
6802         hw->phy.get_link_info = TRUE;
6803         i40e_get_link_status(hw, &pf->link_up);
6804         return (0);
6805 }