]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/ixl_pf_main.c
Update svn-1.9.7 to 1.10.0.
[FreeBSD/FreeBSD.git] / sys / dev / ixl / ixl_pf_main.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2017, Intel Corporation
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #include "ixl_pf.h"
37
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46
47 #ifdef DEV_NETMAP
48 #include <net/netmap.h>
49 #include <sys/selinfo.h>
50 #include <dev/netmap/netmap_kern.h>
51 #endif /* DEV_NETMAP */
52
53 static int      ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int);
54 static u64      ixl_max_aq_speed_to_value(u8);
55 static u8       ixl_convert_sysctl_aq_link_speed(u8, bool);
56 static void     ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
57
58 /* Sysctls */
59 static int      ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
60 static int      ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
61 static int      ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
62 static int      ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
63 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
64 static int      ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
65 static int      ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
66 static int      ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
67
68 /* Debug Sysctls */
69 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
70 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
71 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
72 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
73 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
74 static int      ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75 static int      ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76 static int      ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77 static int      ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78 static int      ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int      ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80 static int      ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81 static int      ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82 static int      ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83 static int      ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84 static int      ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85 static int      ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86 static int      ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87 #ifdef IXL_DEBUG
88 static int      ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
89 static int      ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
90 #endif
91
92 #ifdef IXL_IW
93 extern int ixl_enable_iwarp;
94 extern int ixl_limit_iwarp_msix;
95 #endif
96
97 const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
98     {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
99
100 const char * const ixl_fc_string[6] = {
101         "None",
102         "Rx",
103         "Tx",
104         "Full",
105         "Priority",
106         "Default"
107 };
108
109 static char *ixl_fec_string[3] = {
110        "CL108 RS-FEC",
111        "CL74 FC-FEC/BASE-R",
112        "None"
113 };
114
115 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
116
117 void
118 ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
119 {
120         va_list args;
121
122         if (!(mask & pf->dbg_mask))
123                 return;
124
125         /* Re-implement device_printf() */
126         device_print_prettyname(pf->dev);
127         va_start(args, fmt);
128         vprintf(fmt, args);
129         va_end(args);
130 }
131
132 /*
133 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
134 */
135 void
136 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
137 {
138         u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
139         u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
140         u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
141
142         sbuf_printf(buf,
143             "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
144             hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
145             hw->aq.api_maj_ver, hw->aq.api_min_ver,
146             (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
147             IXL_NVM_VERSION_HI_SHIFT,
148             (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
149             IXL_NVM_VERSION_LO_SHIFT,
150             hw->nvm.eetrack,
151             oem_ver, oem_build, oem_patch);
152 }
153
154 void
155 ixl_print_nvm_version(struct ixl_pf *pf)
156 {
157         struct i40e_hw *hw = &pf->hw;
158         device_t dev = pf->dev;
159         struct sbuf *sbuf;
160
161         sbuf = sbuf_new_auto();
162         ixl_nvm_version_str(hw, sbuf);
163         sbuf_finish(sbuf);
164         device_printf(dev, "%s\n", sbuf_data(sbuf));
165         sbuf_delete(sbuf);
166 }
167
168 static void
169 ixl_configure_tx_itr(struct ixl_pf *pf)
170 {
171         struct i40e_hw          *hw = &pf->hw;
172         struct ixl_vsi          *vsi = &pf->vsi;
173         struct ixl_queue        *que = vsi->queues;
174
175         vsi->tx_itr_setting = pf->tx_itr;
176
177         for (int i = 0; i < vsi->num_queues; i++, que++) {
178                 struct tx_ring  *txr = &que->txr;
179
180                 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
181                     vsi->tx_itr_setting);
182                 txr->itr = vsi->tx_itr_setting;
183                 txr->latency = IXL_AVE_LATENCY;
184         }
185 }
186
187 static void
188 ixl_configure_rx_itr(struct ixl_pf *pf)
189 {
190         struct i40e_hw          *hw = &pf->hw;
191         struct ixl_vsi          *vsi = &pf->vsi;
192         struct ixl_queue        *que = vsi->queues;
193
194         vsi->rx_itr_setting = pf->rx_itr;
195
196         for (int i = 0; i < vsi->num_queues; i++, que++) {
197                 struct rx_ring  *rxr = &que->rxr;
198
199                 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
200                     vsi->rx_itr_setting);
201                 rxr->itr = vsi->rx_itr_setting;
202                 rxr->latency = IXL_AVE_LATENCY;
203         }
204 }
205
206 /*
207  * Write PF ITR values to queue ITR registers.
208  */
209 void
210 ixl_configure_itr(struct ixl_pf *pf)
211 {
212         ixl_configure_tx_itr(pf);
213         ixl_configure_rx_itr(pf);
214 }
215
216
217 /*********************************************************************
218  *  Init entry point
219  *
220  *  This routine is used in two ways. It is used by the stack as
221  *  init entry point in network interface structure. It is also used
222  *  by the driver as a hw/sw initialization routine to get to a
223  *  consistent state.
224  *
225  *  return 0 on success, positive on failure
226  **********************************************************************/
227 void
228 ixl_init_locked(struct ixl_pf *pf)
229 {
230         struct i40e_hw  *hw = &pf->hw;
231         struct ixl_vsi  *vsi = &pf->vsi;
232         struct ifnet    *ifp = vsi->ifp;
233         device_t        dev = pf->dev;
234         struct i40e_filter_control_settings     filter;
235         u8              tmpaddr[ETHER_ADDR_LEN];
236         int             ret;
237
238         INIT_DEBUGOUT("ixl_init_locked: begin");
239         IXL_PF_LOCK_ASSERT(pf);
240
241         ixl_stop_locked(pf);
242
243         /*
244          * If the aq is dead here, it probably means something outside of the driver
245          * did something to the adapter, like a PF reset.
246          * So rebuild the driver's state here if that occurs.
247          */
248         if (!i40e_check_asq_alive(&pf->hw)) {
249                 device_printf(dev, "Admin Queue is down; resetting...\n");
250                 ixl_teardown_hw_structs(pf);
251                 ixl_reset(pf);
252         }
253
254         /* Get the latest mac address... User might use a LAA */
255         bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
256               ETH_ALEN);
257         if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
258             (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
259                 device_printf(dev, "ixl_init_locked: reconfigure MAC addr\n");
260                 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
261                 bcopy(tmpaddr, hw->mac.addr,
262                     ETH_ALEN);
263                 ret = i40e_aq_mac_address_write(hw,
264                     I40E_AQC_WRITE_TYPE_LAA_ONLY,
265                     hw->mac.addr, NULL);
266                 if (ret) {
267                         device_printf(dev, "LLA address"
268                          "change failed!!\n");
269                         return;
270                 }
271         }
272
273         ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
274
275         /* Set the various hardware offload abilities */
276         ifp->if_hwassist = 0;
277         if (ifp->if_capenable & IFCAP_TSO)
278                 ifp->if_hwassist |= CSUM_TSO;
279         if (ifp->if_capenable & IFCAP_TXCSUM)
280                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
281         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
282                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
283
284         /* Set up the device filtering */
285         bzero(&filter, sizeof(filter));
286         filter.enable_ethtype = TRUE;
287         filter.enable_macvlan = TRUE;
288         filter.enable_fdir = FALSE;
289         filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
290         if (i40e_set_filter_control(hw, &filter))
291                 device_printf(dev, "i40e_set_filter_control() failed\n");
292
293         /* Prepare the VSI: rings, hmc contexts, etc... */
294         if (ixl_initialize_vsi(vsi)) {
295                 device_printf(dev, "initialize vsi failed!!\n");
296                 return;
297         }
298
299         /* Set up RSS */
300         ixl_config_rss(pf);
301
302         /* Add protocol filters to list */
303         ixl_init_filters(vsi);
304
305         /* Setup vlan's if needed */
306         ixl_setup_vlan_filters(vsi);
307
308         /* Set up MSI/X routing and the ITR settings */
309         if (pf->msix > 1) {
310                 ixl_configure_queue_intr_msix(pf);
311                 ixl_configure_itr(pf);
312         } else
313                 ixl_configure_legacy(pf);
314
315         ixl_enable_rings(vsi);
316
317         i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
318
319         ixl_reconfigure_filters(vsi);
320
321         /* And now turn on interrupts */
322         ixl_enable_intr(vsi);
323
324         /* Get link info */
325         hw->phy.get_link_info = TRUE;
326         i40e_get_link_status(hw, &pf->link_up);
327         ixl_update_link_status(pf);
328
329         /* Start the local timer */
330         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
331
332         /* Now inform the stack we're ready */
333         ifp->if_drv_flags |= IFF_DRV_RUNNING;
334
335 #ifdef IXL_IW
336         if (ixl_enable_iwarp && pf->iw_enabled) {
337                 ret = ixl_iw_pf_init(pf);
338                 if (ret)
339                         device_printf(dev,
340                             "initialize iwarp failed, code %d\n", ret);
341         }
342 #endif
343 }
344
345
346 /*********************************************************************
347  *
348  *  Get the hardware capabilities
349  *
350  **********************************************************************/
351
352 int
353 ixl_get_hw_capabilities(struct ixl_pf *pf)
354 {
355         struct i40e_aqc_list_capabilities_element_resp *buf;
356         struct i40e_hw  *hw = &pf->hw;
357         device_t        dev = pf->dev;
358         int             error, len;
359         u16             needed;
360         bool            again = TRUE;
361
362         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
363 retry:
364         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
365             malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
366                 device_printf(dev, "Unable to allocate cap memory\n");
367                 return (ENOMEM);
368         }
369
370         /* This populates the hw struct */
371         error = i40e_aq_discover_capabilities(hw, buf, len,
372             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
373         free(buf, M_DEVBUF);
374         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
375             (again == TRUE)) {
376                 /* retry once with a larger buffer */
377                 again = FALSE;
378                 len = needed;
379                 goto retry;
380         } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
381                 device_printf(dev, "capability discovery failed: %d\n",
382                     pf->hw.aq.asq_last_status);
383                 return (ENODEV);
384         }
385
386         /* Capture this PF's starting queue pair */
387         pf->qbase = hw->func_caps.base_queue;
388
389 #ifdef IXL_DEBUG
390         device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
391             "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
392             hw->pf_id, hw->func_caps.num_vfs,
393             hw->func_caps.num_msix_vectors,
394             hw->func_caps.num_msix_vectors_vf,
395             hw->func_caps.fd_filters_guaranteed,
396             hw->func_caps.fd_filters_best_effort,
397             hw->func_caps.num_tx_qp,
398             hw->func_caps.num_rx_qp,
399             hw->func_caps.base_queue);
400 #endif
401         struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
402         osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
403         if (osdep->i2c_intfc_num != -1)
404                 pf->has_i2c = true;
405
406         /* Print a subset of the capability information. */
407         device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
408             hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
409             hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
410             (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
411             (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
412             (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
413             "MDIO shared");
414
415         return (error);
416 }
417
418 void
419 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
420 {
421         device_t        dev = vsi->dev;
422
423         /* Enable/disable TXCSUM/TSO4 */
424         if (!(ifp->if_capenable & IFCAP_TXCSUM)
425             && !(ifp->if_capenable & IFCAP_TSO4)) {
426                 if (mask & IFCAP_TXCSUM) {
427                         ifp->if_capenable |= IFCAP_TXCSUM;
428                         /* enable TXCSUM, restore TSO if previously enabled */
429                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
430                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
431                                 ifp->if_capenable |= IFCAP_TSO4;
432                         }
433                 }
434                 else if (mask & IFCAP_TSO4) {
435                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
436                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
437                         device_printf(dev,
438                             "TSO4 requires txcsum, enabling both...\n");
439                 }
440         } else if((ifp->if_capenable & IFCAP_TXCSUM)
441             && !(ifp->if_capenable & IFCAP_TSO4)) {
442                 if (mask & IFCAP_TXCSUM)
443                         ifp->if_capenable &= ~IFCAP_TXCSUM;
444                 else if (mask & IFCAP_TSO4)
445                         ifp->if_capenable |= IFCAP_TSO4;
446         } else if((ifp->if_capenable & IFCAP_TXCSUM)
447             && (ifp->if_capenable & IFCAP_TSO4)) {
448                 if (mask & IFCAP_TXCSUM) {
449                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
450                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
451                         device_printf(dev, 
452                             "TSO4 requires txcsum, disabling both...\n");
453                 } else if (mask & IFCAP_TSO4)
454                         ifp->if_capenable &= ~IFCAP_TSO4;
455         }
456
457         /* Enable/disable TXCSUM_IPV6/TSO6 */
458         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
459             && !(ifp->if_capenable & IFCAP_TSO6)) {
460                 if (mask & IFCAP_TXCSUM_IPV6) {
461                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
462                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
463                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
464                                 ifp->if_capenable |= IFCAP_TSO6;
465                         }
466                 } else if (mask & IFCAP_TSO6) {
467                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
468                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
469                         device_printf(dev,
470                             "TSO6 requires txcsum6, enabling both...\n");
471                 }
472         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
473             && !(ifp->if_capenable & IFCAP_TSO6)) {
474                 if (mask & IFCAP_TXCSUM_IPV6)
475                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
476                 else if (mask & IFCAP_TSO6)
477                         ifp->if_capenable |= IFCAP_TSO6;
478         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
479             && (ifp->if_capenable & IFCAP_TSO6)) {
480                 if (mask & IFCAP_TXCSUM_IPV6) {
481                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
482                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
483                         device_printf(dev,
484                             "TSO6 requires txcsum6, disabling both...\n");
485                 } else if (mask & IFCAP_TSO6)
486                         ifp->if_capenable &= ~IFCAP_TSO6;
487         }
488 }
489
490 /* For the set_advertise sysctl */
491 void
492 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
493 {
494         device_t dev = pf->dev;
495         int err;
496
497         /* Make sure to initialize the device to the complete list of
498          * supported speeds on driver load, to ensure unloading and
499          * reloading the driver will restore this value.
500          */
501         err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
502         if (err) {
503                 /* Non-fatal error */
504                 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
505                               __func__, err);
506                 return;
507         }
508
509         pf->advertised_speed =
510             ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
511 }
512
513 int
514 ixl_teardown_hw_structs(struct ixl_pf *pf)
515 {
516         enum i40e_status_code status = 0;
517         struct i40e_hw *hw = &pf->hw;
518         device_t dev = pf->dev;
519
520         /* Shutdown LAN HMC */
521         if (hw->hmc.hmc_obj) {
522                 status = i40e_shutdown_lan_hmc(hw);
523                 if (status) {
524                         device_printf(dev,
525                             "init: LAN HMC shutdown failure; status %d\n", status);
526                         goto err_out;
527                 }
528         }
529
530         /* Shutdown admin queue */
531         ixl_disable_intr0(hw);
532         status = i40e_shutdown_adminq(hw);
533         if (status)
534                 device_printf(dev,
535                     "init: Admin Queue shutdown failure; status %d\n", status);
536
537 err_out:
538         return (status);
539 }
540
541 int
542 ixl_reset(struct ixl_pf *pf)
543 {
544         struct i40e_hw *hw = &pf->hw;
545         device_t dev = pf->dev;
546         u8 set_fc_err_mask;
547         int error = 0;
548
549         // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
550         i40e_clear_hw(hw);
551         error = i40e_pf_reset(hw);
552         if (error) {
553                 device_printf(dev, "init: PF reset failure\n");
554                 error = EIO;
555                 goto err_out;
556         }
557
558         error = i40e_init_adminq(hw);
559         if (error) {
560                 device_printf(dev, "init: Admin queue init failure;"
561                     " status code %d\n", error);
562                 error = EIO;
563                 goto err_out;
564         }
565
566         i40e_clear_pxe_mode(hw);
567
568         error = ixl_get_hw_capabilities(pf);
569         if (error) {
570                 device_printf(dev, "init: Error retrieving HW capabilities;"
571                     " status code %d\n", error);
572                 goto err_out;
573         }
574
575         error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
576             hw->func_caps.num_rx_qp, 0, 0);
577         if (error) {
578                 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
579                     error);
580                 error = EIO;
581                 goto err_out;
582         }
583
584         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
585         if (error) {
586                 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
587                     error);
588                 error = EIO;
589                 goto err_out;
590         }
591
592         // XXX: possible fix for panic, but our failure recovery is still broken
593         error = ixl_switch_config(pf);
594         if (error) {
595                 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
596                      error);
597                 goto err_out;
598         }
599
600         error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
601             NULL);
602         if (error) {
603                 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
604                     " aq_err %d\n", error, hw->aq.asq_last_status);
605                 error = EIO;
606                 goto err_out;
607         }
608
609         error = i40e_set_fc(hw, &set_fc_err_mask, true);
610         if (error) {
611                 device_printf(dev, "init: setting link flow control failed; retcode %d,"
612                     " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
613                 goto err_out;
614         }
615
616         // XXX: (Rebuild VSIs?)
617
618         /* Firmware delay workaround */
619         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
620             (hw->aq.fw_maj_ver < 4)) {
621                 i40e_msec_delay(75);
622                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
623                 if (error) {
624                         device_printf(dev, "init: link restart failed, aq_err %d\n",
625                             hw->aq.asq_last_status);
626                         goto err_out;
627                 }
628         }
629
630
631         /* Re-enable admin queue interrupt */
632         if (pf->msix > 1) {
633                 ixl_configure_intr0_msix(pf);
634                 ixl_enable_intr0(hw);
635         }
636
637 err_out:
638         return (error);
639 }
640
641 /*
642 ** MSIX Interrupt Handlers and Tasklets
643 */
644 void
645 ixl_handle_que(void *context, int pending)
646 {
647         struct ixl_queue *que = context;
648         struct ixl_vsi *vsi = que->vsi;
649         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
650         struct i40e_hw  *hw = vsi->hw;
651         struct tx_ring  *txr = &que->txr;
652         struct ifnet    *ifp = vsi->ifp;
653         bool            more;
654
655         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
656                 more = ixl_rxeof(que, IXL_RX_LIMIT);
657                 IXL_TX_LOCK(txr);
658                 ixl_txeof(que);
659                 if (!drbr_empty(ifp, txr->br))
660                         ixl_mq_start_locked(ifp, txr);
661                 IXL_TX_UNLOCK(txr);
662                 if (more) {
663                         taskqueue_enqueue(que->tq, &que->task);
664                         return;
665                 }
666         }
667
668         /* Re-enable queue interrupt */
669         if (pf->msix > 1)
670                 ixl_enable_queue(hw, que->me);
671         else
672                 ixl_enable_intr0(hw);
673 }
674
675
676 /*********************************************************************
677  *
678  *  Legacy Interrupt Service routine
679  *
680  **********************************************************************/
681 void
682 ixl_intr(void *arg)
683 {
684         struct ixl_pf           *pf = arg;
685         struct i40e_hw          *hw =  &pf->hw;
686         struct ixl_vsi          *vsi = &pf->vsi;
687         struct ixl_queue        *que = vsi->queues;
688         struct ifnet            *ifp = vsi->ifp;
689         struct tx_ring          *txr = &que->txr;
690         u32                     icr0;
691         bool                    more;
692
693         pf->admin_irq++;
694
695         /* Clear PBA at start of ISR if using legacy interrupts */
696         if (pf->msix == 0)
697                 wr32(hw, I40E_PFINT_DYN_CTL0,
698                     I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
699                     (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
700
701         icr0 = rd32(hw, I40E_PFINT_ICR0);
702
703
704 #ifdef PCI_IOV
705         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
706                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
707 #endif
708
709         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
710                 taskqueue_enqueue(pf->tq, &pf->adminq);
711
712         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
713                 ++que->irqs;
714
715                 more = ixl_rxeof(que, IXL_RX_LIMIT);
716
717                 IXL_TX_LOCK(txr);
718                 ixl_txeof(que);
719                 if (!drbr_empty(vsi->ifp, txr->br))
720                         ixl_mq_start_locked(ifp, txr);
721                 IXL_TX_UNLOCK(txr);
722
723                 if (more)
724                         taskqueue_enqueue(que->tq, &que->task);
725         }
726
727         ixl_enable_intr0(hw);
728 }
729
730
731 /*********************************************************************
732  *
733  *  MSIX VSI Interrupt Service routine
734  *
735  **********************************************************************/
736 void
737 ixl_msix_que(void *arg)
738 {
739         struct ixl_queue *que = arg;
740         struct ixl_vsi  *vsi = que->vsi;
741         struct i40e_hw  *hw = vsi->hw;
742         struct tx_ring  *txr = &que->txr;
743         bool            more_tx, more_rx;
744
745         /* Protect against spurious interrupts */
746         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
747                 return;
748
749         ++que->irqs;
750
751         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
752
753         IXL_TX_LOCK(txr);
754         more_tx = ixl_txeof(que);
755         /*
756         ** Make certain that if the stack 
757         ** has anything queued the task gets
758         ** scheduled to handle it.
759         */
760         if (!drbr_empty(vsi->ifp, txr->br))
761                 more_tx = 1;
762         IXL_TX_UNLOCK(txr);
763
764         ixl_set_queue_rx_itr(que);
765         ixl_set_queue_tx_itr(que);
766
767         if (more_tx || more_rx)
768                 taskqueue_enqueue(que->tq, &que->task);
769         else
770                 ixl_enable_queue(hw, que->me);
771
772         return;
773 }
774
775
776 /*********************************************************************
777  *
778  *  MSIX Admin Queue Interrupt Service routine
779  *
780  **********************************************************************/
781 void
782 ixl_msix_adminq(void *arg)
783 {
784         struct ixl_pf   *pf = arg;
785         struct i40e_hw  *hw = &pf->hw;
786         device_t        dev = pf->dev;
787         u32             reg, mask, rstat_reg;
788         bool            do_task = FALSE;
789
790         ++pf->admin_irq;
791
792         reg = rd32(hw, I40E_PFINT_ICR0);
793         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
794
795         /* Check on the cause */
796         if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
797                 mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
798                 do_task = TRUE;
799         }
800
801         if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
802                 ixl_handle_mdd_event(pf);
803                 mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
804         }
805
806         if (reg & I40E_PFINT_ICR0_GRST_MASK) {
807                 device_printf(dev, "Reset Requested!\n");
808                 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
809                 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
810                     >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
811                 device_printf(dev, "Reset type: ");
812                 switch (rstat_reg) {
813                 /* These others might be handled similarly to an EMPR reset */
814                 case I40E_RESET_CORER:
815                         printf("CORER\n");
816                         break;
817                 case I40E_RESET_GLOBR:
818                         printf("GLOBR\n");
819                         break;
820                 case I40E_RESET_EMPR:
821                         printf("EMPR\n");
822                         atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
823                         break;
824                 default:
825                         printf("POR\n");
826                         break;
827                 }
828                 /* overload admin queue task to check reset progress */
829                 do_task = TRUE;
830         }
831
832         if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
833                 device_printf(dev, "ECC Error detected!\n");
834         }
835
836         if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
837                 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
838                 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
839                         device_printf(dev, "HMC Error detected!\n");
840                         device_printf(dev, "INFO 0x%08x\n", reg);
841                         reg = rd32(hw, I40E_PFHMC_ERRORDATA);
842                         device_printf(dev, "DATA 0x%08x\n", reg);
843                         wr32(hw, I40E_PFHMC_ERRORINFO, 0);
844                 }
845         }
846
847         if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
848                 device_printf(dev, "PCI Exception detected!\n");
849         }
850
851 #ifdef PCI_IOV
852         if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
853                 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
854                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
855         }
856 #endif
857
858         if (do_task)
859                 taskqueue_enqueue(pf->tq, &pf->adminq);
860         else
861                 ixl_enable_intr0(hw);
862 }
863
864 void
865 ixl_set_promisc(struct ixl_vsi *vsi)
866 {
867         struct ifnet    *ifp = vsi->ifp;
868         struct i40e_hw  *hw = vsi->hw;
869         int             err, mcnt = 0;
870         bool            uni = FALSE, multi = FALSE;
871
872         if (ifp->if_flags & IFF_PROMISC)
873                 uni = multi = TRUE;
874         else if (ifp->if_flags & IFF_ALLMULTI)
875                         multi = TRUE;
876         else { /* Need to count the multicast addresses */
877                 struct  ifmultiaddr *ifma;
878                 if_maddr_rlock(ifp);
879                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
880                         if (ifma->ifma_addr->sa_family != AF_LINK)
881                                 continue;
882                         if (mcnt == MAX_MULTICAST_ADDR) {
883                                 multi = TRUE;
884                                 break;
885                         }
886                         mcnt++;
887                 }
888                 if_maddr_runlock(ifp);
889         }
890
891         err = i40e_aq_set_vsi_unicast_promiscuous(hw,
892             vsi->seid, uni, NULL, TRUE);
893         err = i40e_aq_set_vsi_multicast_promiscuous(hw,
894             vsi->seid, multi, NULL);
895         return;
896 }
897
898 /*********************************************************************
899  *      Filter Routines
900  *
901  *      Routines for multicast and vlan filter management.
902  *
903  *********************************************************************/
904 void
905 ixl_add_multi(struct ixl_vsi *vsi)
906 {
907         struct  ifmultiaddr     *ifma;
908         struct ifnet            *ifp = vsi->ifp;
909         struct i40e_hw          *hw = vsi->hw;
910         int                     mcnt = 0, flags;
911
912         IOCTL_DEBUGOUT("ixl_add_multi: begin");
913
914         if_maddr_rlock(ifp);
915         /*
916         ** First just get a count, to decide if we
917         ** we simply use multicast promiscuous.
918         */
919         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
920                 if (ifma->ifma_addr->sa_family != AF_LINK)
921                         continue;
922                 mcnt++;
923         }
924         if_maddr_runlock(ifp);
925
926         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
927                 /* delete existing MC filters */
928                 ixl_del_hw_filters(vsi, mcnt);
929                 i40e_aq_set_vsi_multicast_promiscuous(hw,
930                     vsi->seid, TRUE, NULL);
931                 return;
932         }
933
934         mcnt = 0;
935         if_maddr_rlock(ifp);
936         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
937                 if (ifma->ifma_addr->sa_family != AF_LINK)
938                         continue;
939                 ixl_add_mc_filter(vsi,
940                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
941                 mcnt++;
942         }
943         if_maddr_runlock(ifp);
944         if (mcnt > 0) {
945                 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
946                 ixl_add_hw_filters(vsi, flags, mcnt);
947         }
948
949         IOCTL_DEBUGOUT("ixl_add_multi: end");
950         return;
951 }
952
953 void
954 ixl_del_multi(struct ixl_vsi *vsi)
955 {
956         struct ifnet            *ifp = vsi->ifp;
957         struct ifmultiaddr      *ifma;
958         struct ixl_mac_filter   *f;
959         int                     mcnt = 0;
960         bool            match = FALSE;
961
962         IOCTL_DEBUGOUT("ixl_del_multi: begin");
963
964         /* Search for removed multicast addresses */
965         if_maddr_rlock(ifp);
966         SLIST_FOREACH(f, &vsi->ftl, next) {
967                 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
968                         match = FALSE;
969                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
970                                 if (ifma->ifma_addr->sa_family != AF_LINK)
971                                         continue;
972                                 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
973                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
974                                         match = TRUE;
975                                         break;
976                                 }
977                         }
978                         if (match == FALSE) {
979                                 f->flags |= IXL_FILTER_DEL;
980                                 mcnt++;
981                         }
982                 }
983         }
984         if_maddr_runlock(ifp);
985
986         if (mcnt > 0)
987                 ixl_del_hw_filters(vsi, mcnt);
988 }
989
990 /*********************************************************************
991  *  Timer routine
992  *
993  *  This routine checks for link status, updates statistics,
994  *  and runs the watchdog check.
995  *
996  *  Only runs when the driver is configured UP and RUNNING.
997  *
998  **********************************************************************/
999
1000 void
1001 ixl_local_timer(void *arg)
1002 {
1003         struct ixl_pf           *pf = arg;
1004
1005         IXL_PF_LOCK_ASSERT(pf);
1006
1007         /* Fire off the adminq task */
1008         taskqueue_enqueue(pf->tq, &pf->adminq);
1009
1010         /* Update stats */
1011         ixl_update_stats_counters(pf);
1012
1013         /* Increment stat when a queue shows hung */
1014         if (ixl_queue_hang_check(&pf->vsi))
1015                 pf->watchdog_events++;
1016
1017         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1018 }
1019
1020 void
1021 ixl_link_up_msg(struct ixl_pf *pf)
1022 {
1023         struct i40e_hw *hw = &pf->hw;
1024         struct ifnet *ifp = pf->vsi.ifp;
1025         char *req_fec_string, *neg_fec_string;
1026         u8 fec_abilities;
1027
1028         fec_abilities = hw->phy.link_info.req_fec_info;
1029         /* If both RS and KR are requested, only show RS */
1030         if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
1031                 req_fec_string = ixl_fec_string[0];
1032         else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
1033                 req_fec_string = ixl_fec_string[1];
1034         else
1035                 req_fec_string = ixl_fec_string[2];
1036
1037         if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
1038                 neg_fec_string = ixl_fec_string[0];
1039         else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
1040                 neg_fec_string = ixl_fec_string[1];
1041         else
1042                 neg_fec_string = ixl_fec_string[2];
1043
1044         log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
1045             ifp->if_xname,
1046             ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
1047             req_fec_string, neg_fec_string,
1048             (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
1049             (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
1050                 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1051                 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
1052                 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
1053                 ixl_fc_string[1] : ixl_fc_string[0]);
1054 }
1055
1056 /*
1057 ** Note: this routine updates the OS on the link state
1058 **      the real check of the hardware only happens with
1059 **      a link interrupt.
1060 */
1061 void
1062 ixl_update_link_status(struct ixl_pf *pf)
1063 {
1064         struct ixl_vsi          *vsi = &pf->vsi;
1065         struct ifnet            *ifp = vsi->ifp;
1066         device_t                dev = pf->dev;
1067
1068         if (pf->link_up) {
1069                 if (vsi->link_active == FALSE) {
1070                         vsi->link_active = TRUE;
1071 #if __FreeBSD_version >= 1100000
1072                         ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed);
1073 #else
1074                         if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed));
1075 #endif
1076                         if_link_state_change(ifp, LINK_STATE_UP);
1077                         ixl_link_up_msg(pf);
1078 #ifdef PCI_IOV
1079                         ixl_broadcast_link_state(pf);
1080 #endif
1081                 }
1082         } else { /* Link down */
1083                 if (vsi->link_active == TRUE) {
1084                         if (bootverbose)
1085                                 device_printf(dev, "Link is Down\n");
1086                         if_link_state_change(ifp, LINK_STATE_DOWN);
1087                         vsi->link_active = FALSE;
1088 #ifdef PCI_IOV
1089                         ixl_broadcast_link_state(pf);
1090 #endif
1091                 }
1092         }
1093 }
1094
1095 /*********************************************************************
1096  *
1097  *  This routine disables all traffic on the adapter by issuing a
1098  *  global reset on the MAC and deallocates TX/RX buffers.
1099  *
1100  **********************************************************************/
1101
1102 void
1103 ixl_stop_locked(struct ixl_pf *pf)
1104 {
1105         struct ixl_vsi  *vsi = &pf->vsi;
1106         struct ifnet    *ifp = vsi->ifp;
1107
1108         INIT_DEBUGOUT("ixl_stop: begin\n");
1109
1110         IXL_PF_LOCK_ASSERT(pf);
1111
1112 #ifdef IXL_IW
1113         /* Stop iWARP device */
1114         if (ixl_enable_iwarp && pf->iw_enabled)
1115                 ixl_iw_pf_stop(pf);
1116 #endif
1117
1118         /* Stop the local timer */
1119         callout_stop(&pf->timer);
1120
1121         ixl_disable_rings_intr(vsi);
1122         ixl_disable_rings(vsi);
1123
1124         /* Tell the stack that the interface is no longer active */
1125         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1126 }
1127
1128 void
1129 ixl_stop(struct ixl_pf *pf)
1130 {
1131         IXL_PF_LOCK(pf);
1132         ixl_stop_locked(pf);
1133         IXL_PF_UNLOCK(pf);
1134 }
1135
1136 /*********************************************************************
1137  *
1138  *  Setup MSIX Interrupt resources and handlers for the VSI
1139  *
1140  **********************************************************************/
1141 int
1142 ixl_setup_legacy(struct ixl_pf *pf)
1143 {
1144         device_t        dev = pf->dev;
1145         int             error, rid = 0;
1146
1147         if (pf->msix == 1)
1148                 rid = 1;
1149         pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1150             &rid, RF_SHAREABLE | RF_ACTIVE);
1151         if (pf->res == NULL) {
1152                 device_printf(dev, "bus_alloc_resource_any() for"
1153                     " legacy/msi interrupt\n");
1154                 return (ENXIO);
1155         }
1156
1157         /* Set the handler function */
1158         error = bus_setup_intr(dev, pf->res,
1159             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1160             ixl_intr, pf, &pf->tag);
1161         if (error) {
1162                 pf->res = NULL;
1163                 device_printf(dev, "bus_setup_intr() for legacy/msi"
1164                     " interrupt handler failed, error %d\n", error);
1165                 return (ENXIO);
1166         }
1167         error = bus_describe_intr(dev, pf->res, pf->tag, "irq");
1168         if (error) {
1169                 /* non-fatal */
1170                 device_printf(dev, "bus_describe_intr() for Admin Queue"
1171                     " interrupt name failed, error %d\n", error);
1172         }
1173
1174         return (0);
1175 }
1176
1177 int
1178 ixl_setup_adminq_tq(struct ixl_pf *pf)
1179 {
1180         device_t dev = pf->dev;
1181         int error = 0;
1182
1183         /* Tasklet for Admin Queue interrupts */
1184         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1185 #ifdef PCI_IOV
1186         /* VFLR Tasklet */
1187         TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1188 #endif
1189         /* Create and start Admin Queue taskqueue */
1190         pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1191             taskqueue_thread_enqueue, &pf->tq);
1192         if (!pf->tq) {
1193                 device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1194                 return (ENOMEM);
1195         }
1196         error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1197             device_get_nameunit(dev));
1198         if (error) {
1199                 device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1200                     error);
1201                 taskqueue_free(pf->tq);
1202                 return (error);
1203         }
1204         return (0);
1205 }
1206
1207 int
1208 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1209 {
1210         struct ixl_queue *que = vsi->queues;
1211         device_t dev = vsi->dev;
1212 #ifdef  RSS
1213         int             cpu_id = 0;
1214         cpuset_t        cpu_mask;
1215 #endif
1216
1217         /* Create queue tasks and start queue taskqueues */
1218         for (int i = 0; i < vsi->num_queues; i++, que++) {
1219                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1220                 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1221                 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1222                     taskqueue_thread_enqueue, &que->tq);
1223 #ifdef RSS
1224                 CPU_SETOF(cpu_id, &cpu_mask);
1225                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1226                     &cpu_mask, "%s (bucket %d)",
1227                     device_get_nameunit(dev), cpu_id);
1228 #else
1229                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1230                     "%s (que %d)", device_get_nameunit(dev), que->me);
1231 #endif
1232         }
1233
1234         return (0);
1235 }
1236
1237 void
1238 ixl_free_adminq_tq(struct ixl_pf *pf)
1239 {
1240         if (pf->tq) {
1241                 taskqueue_free(pf->tq);
1242                 pf->tq = NULL;
1243         }
1244 }
1245
1246 void
1247 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1248 {
1249         struct ixl_queue *que = vsi->queues;
1250
1251         for (int i = 0; i < vsi->num_queues; i++, que++) {
1252                 if (que->tq) {
1253                         taskqueue_free(que->tq);
1254                         que->tq = NULL;
1255                 }
1256         }
1257 }
1258
1259 int
1260 ixl_setup_adminq_msix(struct ixl_pf *pf)
1261 {
1262         device_t dev = pf->dev;
1263         int rid, error = 0;
1264
1265         /* Admin IRQ rid is 1, vector is 0 */
1266         rid = 1;
1267         /* Get interrupt resource from bus */
1268         pf->res = bus_alloc_resource_any(dev,
1269             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1270         if (!pf->res) {
1271                 device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1272                     " interrupt failed [rid=%d]\n", rid);
1273                 return (ENXIO);
1274         }
1275         /* Then associate interrupt with handler */
1276         error = bus_setup_intr(dev, pf->res,
1277             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1278             ixl_msix_adminq, pf, &pf->tag);
1279         if (error) {
1280                 pf->res = NULL;
1281                 device_printf(dev, "bus_setup_intr() for Admin Queue"
1282                     " interrupt handler failed, error %d\n", error);
1283                 return (ENXIO);
1284         }
1285         error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1286         if (error) {
1287                 /* non-fatal */
1288                 device_printf(dev, "bus_describe_intr() for Admin Queue"
1289                     " interrupt name failed, error %d\n", error);
1290         }
1291         pf->admvec = 0;
1292
1293         return (0);
1294 }
1295
1296 /*
1297  * Allocate interrupt resources from bus and associate an interrupt handler
1298  * to those for the VSI's queues.
1299  */
1300 int
1301 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1302 {
1303         device_t        dev = vsi->dev;
1304         struct          ixl_queue *que = vsi->queues;
1305         struct          tx_ring  *txr;
1306         int             error, rid, vector = 1;
1307
1308         /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1309         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1310                 int cpu_id = i;
1311                 rid = vector + 1;
1312                 txr = &que->txr;
1313                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1314                     RF_SHAREABLE | RF_ACTIVE);
1315                 if (!que->res) {
1316                         device_printf(dev, "bus_alloc_resource_any() for"
1317                             " Queue %d interrupt failed [rid=%d]\n",
1318                             que->me, rid);
1319                         return (ENXIO);
1320                 }
1321                 /* Set the handler function */
1322                 error = bus_setup_intr(dev, que->res,
1323                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1324                     ixl_msix_que, que, &que->tag);
1325                 if (error) {
1326                         device_printf(dev, "bus_setup_intr() for Queue %d"
1327                             " interrupt handler failed, error %d\n",
1328                             que->me, error);
1329                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1330                         return (error);
1331                 }
1332                 error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1333                 if (error) {
1334                         device_printf(dev, "bus_describe_intr() for Queue %d"
1335                             " interrupt name failed, error %d\n",
1336                             que->me, error);
1337                 }
1338                 /* Bind the vector to a CPU */
1339 #ifdef RSS
1340                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1341 #endif
1342                 error = bus_bind_intr(dev, que->res, cpu_id);
1343                 if (error) {
1344                         device_printf(dev, "bus_bind_intr() for Queue %d"
1345                             " to CPU %d failed, error %d\n",
1346                             que->me, cpu_id, error);
1347                 }
1348                 que->msix = vector;
1349         }
1350
1351         return (0);
1352 }
1353
1354 /*
1355  * Allocate MSI/X vectors from the OS.
1356  * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1357  */
1358 int
1359 ixl_init_msix(struct ixl_pf *pf)
1360 {
1361         device_t dev = pf->dev;
1362         struct i40e_hw *hw = &pf->hw;
1363 #ifdef IXL_IW
1364 #if __FreeBSD_version >= 1100000
1365         cpuset_t cpu_set;
1366 #endif
1367 #endif
1368         int auto_max_queues;
1369         int rid, want, vectors, queues, available;
1370 #ifdef IXL_IW
1371         int iw_want=0, iw_vectors;
1372
1373         pf->iw_msix = 0;
1374 #endif
1375
1376         /* Override by tuneable */
1377         if (!pf->enable_msix)
1378                 goto no_msix;
1379
1380         /* First try MSI/X */
1381         rid = PCIR_BAR(IXL_MSIX_BAR);
1382         pf->msix_mem = bus_alloc_resource_any(dev,
1383             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1384         if (!pf->msix_mem) {
1385                 /* May not be enabled */
1386                 device_printf(pf->dev,
1387                     "Unable to map MSIX table\n");
1388                 goto no_msix;
1389         }
1390
1391         available = pci_msix_count(dev); 
1392         if (available < 2) {
1393                 /* system has msix disabled (0), or only one vector (1) */
1394                 device_printf(pf->dev, "Less than two MSI-X vectors available\n");
1395                 bus_release_resource(dev, SYS_RES_MEMORY,
1396                     rid, pf->msix_mem);
1397                 pf->msix_mem = NULL;
1398                 goto no_msix;
1399         }
1400
1401         /* Clamp max number of queues based on:
1402          * - # of MSI-X vectors available
1403          * - # of cpus available
1404          * - # of queues that can be assigned to the LAN VSI
1405          */
1406         auto_max_queues = min(mp_ncpus, available - 1);
1407         if (hw->mac.type == I40E_MAC_X722)
1408                 auto_max_queues = min(auto_max_queues, 128);
1409         else
1410                 auto_max_queues = min(auto_max_queues, 64);
1411
1412         /* Override with tunable value if tunable is less than autoconfig count */
1413         if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1414                 queues = pf->max_queues;
1415         /* Use autoconfig amount if that's lower */
1416         else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1417                 device_printf(dev, "ixl_max_queues (%d) is too large, using "
1418                     "autoconfig amount (%d)...\n",
1419                     pf->max_queues, auto_max_queues);
1420                 queues = auto_max_queues;
1421         }
1422         /* Limit maximum auto-configured queues to 8 if no user value is set */
1423         else
1424                 queues = min(auto_max_queues, 8);
1425
1426 #ifdef  RSS
1427         /* If we're doing RSS, clamp at the number of RSS buckets */
1428         if (queues > rss_getnumbuckets())
1429                 queues = rss_getnumbuckets();
1430 #endif
1431
1432         /*
1433         ** Want one vector (RX/TX pair) per queue
1434         ** plus an additional for the admin queue.
1435         */
1436         want = queues + 1;
1437         if (want <= available)  /* Have enough */
1438                 vectors = want;
1439         else {
1440                 device_printf(pf->dev,
1441                     "MSIX Configuration Problem, "
1442                     "%d vectors available but %d wanted!\n",
1443                     available, want);
1444                 pf->msix_mem = NULL;
1445                 goto no_msix; /* Will go to Legacy setup */
1446         }
1447
1448 #ifdef IXL_IW
1449         if (ixl_enable_iwarp && hw->func_caps.iwarp) {
1450 #if __FreeBSD_version >= 1100000
1451                 if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0)
1452                 {
1453                         iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX);
1454                 }
1455 #endif
1456                 if(!iw_want)
1457                         iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX);
1458                 if(ixl_limit_iwarp_msix > 0)
1459                         iw_want = min(iw_want, ixl_limit_iwarp_msix);
1460                 else
1461                         iw_want = min(iw_want, 1);
1462
1463                 available -= vectors;
1464                 if (available > 0) {
1465                         iw_vectors = (available >= iw_want) ?
1466                                 iw_want : available;
1467                         vectors += iw_vectors;
1468                 } else
1469                         iw_vectors = 0;
1470         }
1471 #endif
1472
1473         ixl_set_msix_enable(dev);
1474         if (pci_alloc_msix(dev, &vectors) == 0) {
1475                 device_printf(pf->dev,
1476                     "Using MSIX interrupts with %d vectors\n", vectors);
1477                 pf->msix = vectors;
1478 #ifdef IXL_IW
1479                 if (ixl_enable_iwarp && hw->func_caps.iwarp)
1480                 {
1481                         pf->iw_msix = iw_vectors;
1482                         device_printf(pf->dev,
1483                                         "Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n",
1484                                         iw_vectors);
1485                 }
1486 #endif
1487
1488                 pf->vsi.num_queues = queues;
1489 #ifdef RSS
1490                 /*
1491                  * If we're doing RSS, the number of queues needs to
1492                  * match the number of RSS buckets that are configured.
1493                  *
1494                  * + If there's more queues than RSS buckets, we'll end
1495                  *   up with queues that get no traffic.
1496                  *
1497                  * + If there's more RSS buckets than queues, we'll end
1498                  *   up having multiple RSS buckets map to the same queue,
1499                  *   so there'll be some contention.
1500                  */
1501                 if (queues != rss_getnumbuckets()) {
1502                         device_printf(dev,
1503                             "%s: queues (%d) != RSS buckets (%d)"
1504                             "; performance will be impacted.\n",
1505                             __func__, queues, rss_getnumbuckets());
1506                 }
1507 #endif
1508                 return (vectors);
1509         }
1510 no_msix:
1511         vectors = pci_msi_count(dev);
1512         pf->vsi.num_queues = 1;
1513         pf->max_queues = 1;
1514         if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1515                 device_printf(pf->dev, "Using an MSI interrupt\n");
1516         else {
1517                 vectors = 0;
1518                 device_printf(pf->dev, "Using a Legacy interrupt\n");
1519         }
1520         return (vectors);
1521 }
1522
1523 /*
1524  * Configure admin queue/misc interrupt cause registers in hardware.
1525  */
1526 void
1527 ixl_configure_intr0_msix(struct ixl_pf *pf)
1528 {
1529         struct i40e_hw *hw = &pf->hw;
1530         u32 reg;
1531
1532         /* First set up the adminq - vector 0 */
1533         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
1534         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
1535
1536         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1537             I40E_PFINT_ICR0_ENA_GRST_MASK |
1538             I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1539             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1540             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1541             I40E_PFINT_ICR0_ENA_VFLR_MASK |
1542             I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
1543             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1544         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1545
1546         /*
1547          * 0x7FF is the end of the queue list.
1548          * This means we won't use MSI-X vector 0 for a queue interrupt
1549          * in MSIX mode.
1550          */
1551         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1552         /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1553         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1554
1555         wr32(hw, I40E_PFINT_DYN_CTL0,
1556             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1557             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1558
1559         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1560 }
1561
1562 /*
1563  * Configure queue interrupt cause registers in hardware.
1564  */
1565 void
1566 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1567 {
1568         struct i40e_hw  *hw = &pf->hw;
1569         struct ixl_vsi *vsi = &pf->vsi;
1570         u32             reg;
1571         u16             vector = 1;
1572
1573         for (int i = 0; i < vsi->num_queues; i++, vector++) {
1574                 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1575                 /* First queue type is RX / 0 */
1576                 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1577
1578                 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1579                 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1580                 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1581                 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1582                 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1583                 wr32(hw, I40E_QINT_RQCTL(i), reg);
1584
1585                 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1586                 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1587                 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1588                 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1589                 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1590                 wr32(hw, I40E_QINT_TQCTL(i), reg);
1591         }
1592 }
1593
1594 /*
1595  * Configure for MSI single vector operation 
1596  */
1597 void
1598 ixl_configure_legacy(struct ixl_pf *pf)
1599 {
1600         struct i40e_hw  *hw = &pf->hw;
1601         struct ixl_vsi  *vsi = &pf->vsi;
1602         struct ixl_queue *que = vsi->queues;
1603         struct rx_ring  *rxr = &que->rxr;
1604         struct tx_ring  *txr = &que->txr;
1605         u32 reg;
1606
1607         /* Configure ITR */
1608         vsi->tx_itr_setting = pf->tx_itr;
1609         wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
1610             vsi->tx_itr_setting);
1611         txr->itr = vsi->tx_itr_setting;
1612
1613         vsi->rx_itr_setting = pf->rx_itr;
1614         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
1615             vsi->rx_itr_setting);
1616         rxr->itr = vsi->rx_itr_setting;
1617
1618         /* Setup "other" causes */
1619         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1620             | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1621             | I40E_PFINT_ICR0_ENA_GRST_MASK
1622             | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1623             | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1624             | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1625             | I40E_PFINT_ICR0_ENA_VFLR_MASK
1626             | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1627             ;
1628         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1629
1630         /* No ITR for non-queue interrupts */
1631         wr32(hw, I40E_PFINT_STAT_CTL0,
1632             IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1633
1634         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1635         wr32(hw, I40E_PFINT_LNKLST0, 0);
1636
1637         /* Associate the queue pair to the vector and enable the q int */
1638         reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1639             | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1640             | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1641         wr32(hw, I40E_QINT_RQCTL(0), reg);
1642
1643         reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1644             | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1645             | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1646         wr32(hw, I40E_QINT_TQCTL(0), reg);
1647 }
1648
1649 int
1650 ixl_allocate_pci_resources(struct ixl_pf *pf)
1651 {
1652         int             rid;
1653         struct i40e_hw *hw = &pf->hw;
1654         device_t        dev = pf->dev;
1655
1656         /* Map BAR0 */
1657         rid = PCIR_BAR(0);
1658         pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1659             &rid, RF_ACTIVE);
1660
1661         if (!(pf->pci_mem)) {
1662                 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1663                 return (ENXIO);
1664         }
1665         /* Ensure proper PCI device operation */
1666         ixl_set_busmaster(dev);
1667
1668         /* Save off the PCI information */
1669         hw->vendor_id = pci_get_vendor(dev);
1670         hw->device_id = pci_get_device(dev);
1671         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1672         hw->subsystem_vendor_id =
1673             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1674         hw->subsystem_device_id =
1675             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1676
1677         hw->bus.device = pci_get_slot(dev);
1678         hw->bus.func = pci_get_function(dev);
1679
1680         /* Save off register access information */
1681         pf->osdep.mem_bus_space_tag =
1682                 rman_get_bustag(pf->pci_mem);
1683         pf->osdep.mem_bus_space_handle =
1684                 rman_get_bushandle(pf->pci_mem);
1685         pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1686         pf->osdep.flush_reg = I40E_GLGEN_STAT;
1687         pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1688
1689         pf->hw.back = &pf->osdep;
1690
1691         return (0);
1692 }
1693
1694 /*
1695  * Teardown and release the admin queue/misc vector
1696  * interrupt.
1697  */
1698 int
1699 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1700 {
1701         device_t                dev = pf->dev;
1702         int                     rid, error = 0;
1703
1704         if (pf->admvec) /* we are doing MSIX */
1705                 rid = pf->admvec + 1;
1706         else
1707                 (pf->msix != 0) ? (rid = 1):(rid = 0);
1708
1709         if (pf->tag != NULL) {
1710                 bus_teardown_intr(dev, pf->res, pf->tag);
1711                 if (error) {
1712                         device_printf(dev, "bus_teardown_intr() for"
1713                             " interrupt 0 failed\n");
1714                         // return (ENXIO);
1715                 }
1716                 pf->tag = NULL;
1717         }
1718         if (pf->res != NULL) {
1719                 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1720                 if (error) {
1721                         device_printf(dev, "bus_release_resource() for"
1722                             " interrupt 0 failed [rid=%d]\n", rid);
1723                         // return (ENXIO);
1724                 }
1725                 pf->res = NULL;
1726         }
1727
1728         return (0);
1729 }
1730
1731 int
1732 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1733 {
1734         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
1735         struct ixl_queue        *que = vsi->queues;
1736         device_t                dev = vsi->dev;
1737         int                     rid, error = 0;
1738
1739         /* We may get here before stations are setup */
1740         if ((pf->msix < 2) || (que == NULL))
1741                 return (0);
1742
1743         /* Release all MSIX queue resources */
1744         for (int i = 0; i < vsi->num_queues; i++, que++) {
1745                 rid = que->msix + 1;
1746                 if (que->tag != NULL) {
1747                         error = bus_teardown_intr(dev, que->res, que->tag);
1748                         if (error) {
1749                                 device_printf(dev, "bus_teardown_intr() for"
1750                                     " Queue %d interrupt failed\n",
1751                                     que->me);
1752                                 // return (ENXIO);
1753                         }
1754                         que->tag = NULL;
1755                 }
1756                 if (que->res != NULL) {
1757                         error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1758                         if (error) {
1759                                 device_printf(dev, "bus_release_resource() for"
1760                                     " Queue %d interrupt failed [rid=%d]\n",
1761                                     que->me, rid);
1762                                 // return (ENXIO);
1763                         }
1764                         que->res = NULL;
1765                 }
1766         }
1767
1768         return (0);
1769 }
1770
1771 void
1772 ixl_free_pci_resources(struct ixl_pf *pf)
1773 {
1774         device_t                dev = pf->dev;
1775         int                     memrid;
1776
1777         ixl_teardown_queue_msix(&pf->vsi);
1778         ixl_teardown_adminq_msix(pf);
1779
1780         if (pf->msix > 0)
1781                 pci_release_msi(dev);
1782         
1783         memrid = PCIR_BAR(IXL_MSIX_BAR);
1784
1785         if (pf->msix_mem != NULL)
1786                 bus_release_resource(dev, SYS_RES_MEMORY,
1787                     memrid, pf->msix_mem);
1788
1789         if (pf->pci_mem != NULL)
1790                 bus_release_resource(dev, SYS_RES_MEMORY,
1791                     PCIR_BAR(0), pf->pci_mem);
1792
1793         return;
1794 }
1795
1796 void
1797 ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
1798 {
1799         /* Display supported media types */
1800         if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
1801                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1802
1803         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
1804                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1805         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
1806                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1807         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
1808                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1809
1810         if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
1811             phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
1812             phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
1813                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1814
1815         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
1816                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1817         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
1818                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1819         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
1820                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1821
1822         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
1823             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
1824             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
1825             phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
1826             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1827                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1828         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
1829                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1830         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
1831                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1832
1833         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
1834                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1835
1836         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
1837             || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
1838                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1839         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
1840                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
1841         if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
1842                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1843         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
1844                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1845         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
1846                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1847
1848         if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
1849                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1850
1851         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
1852                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1853         if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
1854                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1855
1856         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
1857                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
1858         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
1859                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
1860         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
1861                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1862         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
1863                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
1864         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
1865                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
1866         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
1867                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
1868 }
1869
1870 /*********************************************************************
1871  *
1872  *  Setup networking device structure and register an interface.
1873  *
1874  **********************************************************************/
1875 int
1876 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1877 {
1878         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
1879         struct ifnet            *ifp;
1880         struct i40e_hw          *hw = vsi->hw;
1881         struct ixl_queue        *que = vsi->queues;
1882         struct i40e_aq_get_phy_abilities_resp abilities;
1883         enum i40e_status_code aq_error = 0;
1884
1885         INIT_DEBUGOUT("ixl_setup_interface: begin");
1886
1887         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1888         if (ifp == NULL) {
1889                 device_printf(dev, "can not allocate ifnet structure\n");
1890                 return (-1);
1891         }
1892         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1893         ifp->if_mtu = ETHERMTU;
1894         ifp->if_init = ixl_init;
1895         ifp->if_softc = vsi;
1896         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1897         ifp->if_ioctl = ixl_ioctl;
1898
1899 #if __FreeBSD_version >= 1100036
1900         if_setgetcounterfn(ifp, ixl_get_counter);
1901 #endif
1902
1903         ifp->if_transmit = ixl_mq_start;
1904
1905         ifp->if_qflush = ixl_qflush;
1906
1907         ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
1908
1909         vsi->max_frame_size =
1910             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1911             + ETHER_VLAN_ENCAP_LEN;
1912
1913         /* Set TSO limits */
1914         ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1915         ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1916         ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
1917
1918         /*
1919          * Tell the upper layer(s) we support long frames.
1920          */
1921         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1922
1923         ifp->if_capabilities |= IFCAP_HWCSUM;
1924         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1925         ifp->if_capabilities |= IFCAP_TSO;
1926         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1927         ifp->if_capabilities |= IFCAP_LRO;
1928
1929         /* VLAN capabilties */
1930         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1931                              |  IFCAP_VLAN_HWTSO
1932                              |  IFCAP_VLAN_MTU
1933                              |  IFCAP_VLAN_HWCSUM;
1934         ifp->if_capenable = ifp->if_capabilities;
1935
1936         /*
1937         ** Don't turn this on by default, if vlans are
1938         ** created on another pseudo device (eg. lagg)
1939         ** then vlan events are not passed thru, breaking
1940         ** operation, but with HW FILTER off it works. If
1941         ** using vlans directly on the ixl driver you can
1942         ** enable this and get full hardware tag filtering.
1943         */
1944         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1945
1946         /*
1947          * Specify the media types supported by this adapter and register
1948          * callbacks to update media and link information
1949          */
1950         ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1951                      ixl_media_status);
1952
1953         aq_error = i40e_aq_get_phy_capabilities(hw,
1954             FALSE, TRUE, &abilities, NULL);
1955         /* May need delay to detect fiber correctly */
1956         if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1957                 i40e_msec_delay(200);
1958                 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1959                     TRUE, &abilities, NULL);
1960         }
1961         if (aq_error) {
1962                 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1963                         device_printf(dev, "Unknown PHY type detected!\n");
1964                 else
1965                         device_printf(dev,
1966                             "Error getting supported media types, err %d,"
1967                             " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1968         } else {
1969                 pf->supported_speeds = abilities.link_speed;
1970 #if __FreeBSD_version >= 1100000
1971                 ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
1972 #else
1973                 if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
1974 #endif
1975
1976                 ixl_add_ifmedia(vsi, hw->phy.phy_types);
1977         }
1978
1979         /* Use autoselect media by default */
1980         ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1981         ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
1982
1983         ether_ifattach(ifp, hw->mac.addr);
1984
1985         return (0);
1986 }
1987
1988 /*
1989 ** Run when the Admin Queue gets a link state change interrupt.
1990 */
1991 void
1992 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1993 {
1994         struct i40e_hw  *hw = &pf->hw; 
1995         device_t dev = pf->dev;
1996         struct i40e_aqc_get_link_status *status =
1997             (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1998
1999         /* Request link status from adapter */
2000         hw->phy.get_link_info = TRUE;
2001         i40e_get_link_status(hw, &pf->link_up);
2002
2003         /* Print out message if an unqualified module is found */
2004         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2005             (pf->advertised_speed) &&
2006             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2007             (!(status->link_info & I40E_AQ_LINK_UP)))
2008                 device_printf(dev, "Link failed because "
2009                     "an unqualified module was detected!\n");
2010
2011         /* Update OS link info */
2012         ixl_update_link_status(pf);
2013 }
2014
2015 /*********************************************************************
2016  *
2017  *  Get Firmware Switch configuration
2018  *      - this will need to be more robust when more complex
2019  *        switch configurations are enabled.
2020  *
2021  **********************************************************************/
2022 int
2023 ixl_switch_config(struct ixl_pf *pf)
2024 {
2025         struct i40e_hw  *hw = &pf->hw; 
2026         struct ixl_vsi  *vsi = &pf->vsi;
2027         device_t        dev = vsi->dev;
2028         struct i40e_aqc_get_switch_config_resp *sw_config;
2029         u8      aq_buf[I40E_AQ_LARGE_BUF];
2030         int     ret;
2031         u16     next = 0;
2032
2033         memset(&aq_buf, 0, sizeof(aq_buf));
2034         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2035         ret = i40e_aq_get_switch_config(hw, sw_config,
2036             sizeof(aq_buf), &next, NULL);
2037         if (ret) {
2038                 device_printf(dev, "aq_get_switch_config() failed, error %d,"
2039                     " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
2040                 return (ret);
2041         }
2042         if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
2043                 device_printf(dev,
2044                     "Switch config: header reported: %d in structure, %d total\n",
2045                     sw_config->header.num_reported, sw_config->header.num_total);
2046                 for (int i = 0; i < sw_config->header.num_reported; i++) {
2047                         device_printf(dev,
2048                             "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2049                             sw_config->element[i].element_type,
2050                             sw_config->element[i].seid,
2051                             sw_config->element[i].uplink_seid,
2052                             sw_config->element[i].downlink_seid);
2053                 }
2054         }
2055         /* Simplified due to a single VSI */
2056         vsi->uplink_seid = sw_config->element[0].uplink_seid;
2057         vsi->downlink_seid = sw_config->element[0].downlink_seid;
2058         vsi->seid = sw_config->element[0].seid;
2059         return (ret);
2060 }
2061
2062 /*********************************************************************
2063  *
2064  *  Initialize the VSI:  this handles contexts, which means things
2065  *                       like the number of descriptors, buffer size,
2066  *                       plus we init the rings thru this function.
2067  *
2068  **********************************************************************/
2069 int
2070 ixl_initialize_vsi(struct ixl_vsi *vsi)
2071 {
2072         struct ixl_pf           *pf = vsi->back;
2073         struct ixl_queue        *que = vsi->queues;
2074         device_t                dev = vsi->dev;
2075         struct i40e_hw          *hw = vsi->hw;
2076         struct i40e_vsi_context ctxt;
2077         int                     tc_queues;
2078         int                     err = 0;
2079
2080         memset(&ctxt, 0, sizeof(ctxt));
2081         ctxt.seid = vsi->seid;
2082         if (pf->veb_seid != 0)
2083                 ctxt.uplink_seid = pf->veb_seid;
2084         ctxt.pf_num = hw->pf_id;
2085         err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2086         if (err) {
2087                 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
2088                     " aq_error %d\n", err, hw->aq.asq_last_status);
2089                 return (err);
2090         }
2091         ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
2092             "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2093             "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2094             "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2095             ctxt.uplink_seid, ctxt.vsi_number,
2096             ctxt.vsis_allocated, ctxt.vsis_unallocated,
2097             ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2098             ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2099         /*
2100         ** Set the queue and traffic class bits
2101         **  - when multiple traffic classes are supported
2102         **    this will need to be more robust.
2103         */
2104         ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2105         ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2106         /* In contig mode, que_mapping[0] is first queue index used by this VSI */
2107         ctxt.info.queue_mapping[0] = 0;
2108         /*
2109          * This VSI will only use traffic class 0; start traffic class 0's
2110          * queue allocation at queue 0, and assign it 2^tc_queues queues (though
2111          * the driver may not use all of them).
2112          */
2113         tc_queues = bsrl(pf->qtag.num_allocated);
2114         ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
2115             & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2116             ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2117             & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2118
2119         /* Set VLAN receive stripping mode */
2120         ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2121         ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2122         if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2123                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2124         else
2125                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2126
2127 #ifdef IXL_IW
2128         /* Set TCP Enable for iWARP capable VSI */
2129         if (ixl_enable_iwarp && pf->iw_enabled) {
2130                 ctxt.info.valid_sections |=
2131                     htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
2132                 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
2133         }
2134 #endif
2135         /* Save VSI number and info for use later */
2136         vsi->vsi_num = ctxt.vsi_number;
2137         bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2138
2139         /* Reset VSI statistics */
2140         ixl_vsi_reset_stats(vsi);
2141         vsi->hw_filters_add = 0;
2142         vsi->hw_filters_del = 0;
2143
2144         ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2145
2146         err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2147         if (err) {
2148                 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2149                     " aq_error %d\n", err, hw->aq.asq_last_status);
2150                 return (err);
2151         }
2152
2153         for (int i = 0; i < vsi->num_queues; i++, que++) {
2154                 struct tx_ring          *txr = &que->txr;
2155                 struct rx_ring          *rxr = &que->rxr;
2156                 struct i40e_hmc_obj_txq tctx;
2157                 struct i40e_hmc_obj_rxq rctx;
2158                 u32                     txctl;
2159                 u16                     size;
2160
2161                 /* Setup the HMC TX Context  */
2162                 size = que->num_tx_desc * sizeof(struct i40e_tx_desc);
2163                 bzero(&tctx, sizeof(tctx));
2164                 tctx.new_context = 1;
2165                 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2166                 tctx.qlen = que->num_tx_desc;
2167                 tctx.fc_ena = 0;        /* Disable FCoE */
2168                 /*
2169                  * This value needs to pulled from the VSI that this queue
2170                  * is assigned to. Index into array is traffic class.
2171                  */
2172                 tctx.rdylist = vsi->info.qs_handle[0];
2173                 /*
2174                  * Set these to enable Head Writeback
2175                  * - Address is last entry in TX ring (reserved for HWB index)
2176                  * Leave these as 0 for Descriptor Writeback
2177                  */
2178                 if (vsi->enable_head_writeback) {
2179                         tctx.head_wb_ena = 1;
2180                         tctx.head_wb_addr = txr->dma.pa +
2181                             (que->num_tx_desc * sizeof(struct i40e_tx_desc));
2182                 }
2183                 tctx.rdylist_act = 0;
2184                 err = i40e_clear_lan_tx_queue_context(hw, i);
2185                 if (err) {
2186                         device_printf(dev, "Unable to clear TX context\n");
2187                         break;
2188                 }
2189                 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2190                 if (err) {
2191                         device_printf(dev, "Unable to set TX context\n");
2192                         break;
2193                 }
2194                 /* Associate the ring with this PF */
2195                 txctl = I40E_QTX_CTL_PF_QUEUE;
2196                 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2197                     I40E_QTX_CTL_PF_INDX_MASK);
2198                 wr32(hw, I40E_QTX_CTL(i), txctl);
2199                 ixl_flush(hw);
2200
2201                 /* Do ring (re)init */
2202                 ixl_init_tx_ring(que);
2203
2204                 /* Next setup the HMC RX Context  */
2205                 if (vsi->max_frame_size <= MCLBYTES)
2206                         rxr->mbuf_sz = MCLBYTES;
2207                 else
2208                         rxr->mbuf_sz = MJUMPAGESIZE;
2209
2210                 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2211
2212                 /* Set up an RX context for the HMC */
2213                 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2214                 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2215                 /* ignore header split for now */
2216                 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2217                 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2218                     vsi->max_frame_size : max_rxmax;
2219                 rctx.dtype = 0;
2220                 rctx.dsize = 1;         /* do 32byte descriptors */
2221                 rctx.hsplit_0 = 0;      /* no header split */
2222                 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2223                 rctx.qlen = que->num_rx_desc;
2224                 rctx.tphrdesc_ena = 1;
2225                 rctx.tphwdesc_ena = 1;
2226                 rctx.tphdata_ena = 0;   /* Header Split related */
2227                 rctx.tphhead_ena = 0;   /* Header Split related */
2228                 rctx.lrxqthresh = 2;    /* Interrupt at <128 desc avail */
2229                 rctx.crcstrip = 1;
2230                 rctx.l2tsel = 1;
2231                 rctx.showiv = 1;        /* Strip inner VLAN header */
2232                 rctx.fc_ena = 0;        /* Disable FCoE */
2233                 rctx.prefena = 1;       /* Prefetch descriptors */
2234
2235                 err = i40e_clear_lan_rx_queue_context(hw, i);
2236                 if (err) {
2237                         device_printf(dev,
2238                             "Unable to clear RX context %d\n", i);
2239                         break;
2240                 }
2241                 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2242                 if (err) {
2243                         device_printf(dev, "Unable to set RX context %d\n", i);
2244                         break;
2245                 }
2246                 err = ixl_init_rx_ring(que);
2247                 if (err) {
2248                         device_printf(dev, "Fail in init_rx_ring %d\n", i);
2249                         break;
2250                 }
2251 #ifdef DEV_NETMAP
2252                 /* preserve queue */
2253                 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2254                         struct netmap_adapter *na = NA(vsi->ifp);
2255                         struct netmap_kring *kring = na->rx_rings[i];
2256                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2257                         wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2258                 } else
2259 #endif /* DEV_NETMAP */
2260                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1);
2261         }
2262         return (err);
2263 }
2264
2265
2266
2267
2268 void
2269 ixl_vsi_free_queues(struct ixl_vsi *vsi)
2270 {
2271         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
2272         struct ixl_queue        *que = vsi->queues;
2273
2274         if (NULL == vsi->queues)
2275                 return;
2276
2277         for (int i = 0; i < vsi->num_queues; i++, que++) {
2278                 struct tx_ring *txr = &que->txr;
2279                 struct rx_ring *rxr = &que->rxr;
2280         
2281                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2282                         continue;
2283                 IXL_TX_LOCK(txr);
2284                 if (txr->br)
2285                         buf_ring_free(txr->br, M_DEVBUF);
2286                 ixl_free_que_tx(que);
2287                 if (txr->base)
2288                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2289                 IXL_TX_UNLOCK(txr);
2290                 IXL_TX_LOCK_DESTROY(txr);
2291
2292                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2293                         continue;
2294                 IXL_RX_LOCK(rxr);
2295                 ixl_free_que_rx(que);
2296                 if (rxr->base)
2297                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2298                 IXL_RX_UNLOCK(rxr);
2299                 IXL_RX_LOCK_DESTROY(rxr);
2300         }
2301 }
2302
2303
2304 /*********************************************************************
2305  *
2306  *  Free all VSI structs.
2307  *
2308  **********************************************************************/
2309 void
2310 ixl_free_vsi(struct ixl_vsi *vsi)
2311 {
2312
2313         /* Free station queues */
2314         ixl_vsi_free_queues(vsi);
2315         if (vsi->queues)
2316                 free(vsi->queues, M_DEVBUF);
2317
2318         /* Free VSI filter list */
2319         ixl_free_mac_filters(vsi);
2320 }
2321
2322 void
2323 ixl_free_mac_filters(struct ixl_vsi *vsi)
2324 {
2325         struct ixl_mac_filter *f;
2326
2327         while (!SLIST_EMPTY(&vsi->ftl)) {
2328                 f = SLIST_FIRST(&vsi->ftl);
2329                 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2330                 free(f, M_DEVBUF);
2331         }
2332 }
2333
2334 /*
2335  * Fill out fields in queue struct and setup tx/rx memory and structs
2336  */
2337 static int
2338 ixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index)
2339 {
2340         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2341         device_t dev = pf->dev;
2342         struct i40e_hw *hw = &pf->hw;
2343         struct tx_ring *txr = &que->txr;
2344         struct rx_ring *rxr = &que->rxr;
2345         int error = 0;
2346         int rsize, tsize;
2347
2348         que->num_tx_desc = vsi->num_tx_desc;
2349         que->num_rx_desc = vsi->num_rx_desc;
2350         que->me = index;
2351         que->vsi = vsi;
2352
2353         txr->que = que;
2354         txr->tail = I40E_QTX_TAIL(que->me);
2355
2356         /* Initialize the TX lock */
2357         snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2358             device_get_nameunit(dev), que->me);
2359         mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2360         /*
2361          * Create the TX descriptor ring
2362          *
2363          * In Head Writeback mode, the descriptor ring is one bigger
2364          * than the number of descriptors for space for the HW to
2365          * write back index of last completed descriptor.
2366          */
2367         if (vsi->enable_head_writeback) {
2368                 tsize = roundup2((que->num_tx_desc *
2369                     sizeof(struct i40e_tx_desc)) +
2370                     sizeof(u32), DBA_ALIGN);
2371         } else {
2372                 tsize = roundup2((que->num_tx_desc *
2373                     sizeof(struct i40e_tx_desc)), DBA_ALIGN);
2374         }
2375         if (i40e_allocate_dma_mem(hw,
2376             &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2377                 device_printf(dev,
2378                     "Unable to allocate TX Descriptor memory\n");
2379                 error = ENOMEM;
2380                 goto err_destroy_tx_mtx;
2381         }
2382         txr->base = (struct i40e_tx_desc *)txr->dma.va;
2383         bzero((void *)txr->base, tsize);
2384         /* Now allocate transmit soft structs for the ring */
2385         if (ixl_allocate_tx_data(que)) {
2386                 device_printf(dev,
2387                     "Critical Failure setting up TX structures\n");
2388                 error = ENOMEM;
2389                 goto err_free_tx_dma;
2390         }
2391         /* Allocate a buf ring */
2392         txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2393             M_NOWAIT, &txr->mtx);
2394         if (txr->br == NULL) {
2395                 device_printf(dev,
2396                     "Critical Failure setting up TX buf ring\n");
2397                 error = ENOMEM;
2398                 goto err_free_tx_data;
2399         }
2400
2401         rsize = roundup2(que->num_rx_desc *
2402             sizeof(union i40e_rx_desc), DBA_ALIGN);
2403         rxr->que = que;
2404         rxr->tail = I40E_QRX_TAIL(que->me);
2405
2406         /* Initialize the RX side lock */
2407         snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2408             device_get_nameunit(dev), que->me);
2409         mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2410
2411         if (i40e_allocate_dma_mem(hw,
2412             &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2413                 device_printf(dev,
2414                     "Unable to allocate RX Descriptor memory\n");
2415                 error = ENOMEM;
2416                 goto err_destroy_rx_mtx;
2417         }
2418         rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2419         bzero((void *)rxr->base, rsize);
2420         /* Allocate receive soft structs for the ring*/
2421         if (ixl_allocate_rx_data(que)) {
2422                 device_printf(dev,
2423                     "Critical Failure setting up receive structs\n");
2424                 error = ENOMEM;
2425                 goto err_free_rx_dma;
2426         }
2427
2428         return (0);
2429
2430 err_free_rx_dma:
2431         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2432 err_destroy_rx_mtx:
2433         mtx_destroy(&rxr->mtx);
2434         /* err_free_tx_buf_ring */
2435         buf_ring_free(txr->br, M_DEVBUF);
2436 err_free_tx_data:
2437         ixl_free_que_tx(que);
2438 err_free_tx_dma:
2439         i40e_free_dma_mem(&pf->hw, &txr->dma);
2440 err_destroy_tx_mtx:
2441         mtx_destroy(&txr->mtx);
2442
2443         return (error);
2444 }
2445
2446 int
2447 ixl_vsi_setup_queues(struct ixl_vsi *vsi)
2448 {
2449         struct ixl_queue        *que;
2450         int                     error = 0;
2451
2452         for (int i = 0; i < vsi->num_queues; i++) {
2453                 que = &vsi->queues[i];
2454                 error = ixl_vsi_setup_queue(vsi, que, i);
2455                 if (error)
2456                         break;
2457         }
2458         return (error);
2459 }
2460
2461
2462 /*********************************************************************
2463  *
2464  *  Allocate memory for the VSI (virtual station interface) and their
2465  *  associated queues, rings and the descriptors associated with each,
2466  *  called only once at attach.
2467  *
2468  **********************************************************************/
2469 int
2470 ixl_setup_stations(struct ixl_pf *pf)
2471 {
2472         device_t                dev = pf->dev;
2473         struct ixl_vsi          *vsi;
2474         int                     error = 0;
2475
2476         vsi = &pf->vsi;
2477         vsi->back = (void *)pf;
2478         vsi->hw = &pf->hw;
2479         vsi->id = 0;
2480         vsi->num_vlans = 0;
2481         vsi->back = pf;
2482
2483         if (pf->msix > 1)
2484                 vsi->flags |= IXL_FLAGS_USES_MSIX;
2485
2486         /* Get memory for the station queues */
2487         if (!(vsi->queues =
2488             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2489             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2490                 device_printf(dev, "Unable to allocate queue memory\n");
2491                 error = ENOMEM;
2492                 goto ixl_setup_stations_err;
2493         }
2494
2495         /* Then setup each queue */
2496         error = ixl_vsi_setup_queues(vsi);
2497 ixl_setup_stations_err:
2498         return (error);
2499 }
2500
2501 /*
2502 ** Provide a update to the queue RX
2503 ** interrupt moderation value.
2504 */
2505 void
2506 ixl_set_queue_rx_itr(struct ixl_queue *que)
2507 {
2508         struct ixl_vsi  *vsi = que->vsi;
2509         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2510         struct i40e_hw  *hw = vsi->hw;
2511         struct rx_ring  *rxr = &que->rxr;
2512         u16             rx_itr;
2513         u16             rx_latency = 0;
2514         int             rx_bytes;
2515
2516         /* Idle, do nothing */
2517         if (rxr->bytes == 0)
2518                 return;
2519
2520         if (pf->dynamic_rx_itr) {
2521                 rx_bytes = rxr->bytes/rxr->itr;
2522                 rx_itr = rxr->itr;
2523
2524                 /* Adjust latency range */
2525                 switch (rxr->latency) {
2526                 case IXL_LOW_LATENCY:
2527                         if (rx_bytes > 10) {
2528                                 rx_latency = IXL_AVE_LATENCY;
2529                                 rx_itr = IXL_ITR_20K;
2530                         }
2531                         break;
2532                 case IXL_AVE_LATENCY:
2533                         if (rx_bytes > 20) {
2534                                 rx_latency = IXL_BULK_LATENCY;
2535                                 rx_itr = IXL_ITR_8K;
2536                         } else if (rx_bytes <= 10) {
2537                                 rx_latency = IXL_LOW_LATENCY;
2538                                 rx_itr = IXL_ITR_100K;
2539                         }
2540                         break;
2541                 case IXL_BULK_LATENCY:
2542                         if (rx_bytes <= 20) {
2543                                 rx_latency = IXL_AVE_LATENCY;
2544                                 rx_itr = IXL_ITR_20K;
2545                         }
2546                         break;
2547                  }
2548
2549                 rxr->latency = rx_latency;
2550
2551                 if (rx_itr != rxr->itr) {
2552                         /* do an exponential smoothing */
2553                         rx_itr = (10 * rx_itr * rxr->itr) /
2554                             ((9 * rx_itr) + rxr->itr);
2555                         rxr->itr = min(rx_itr, IXL_MAX_ITR);
2556                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2557                             que->me), rxr->itr);
2558                 }
2559         } else { /* We may have have toggled to non-dynamic */
2560                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2561                         vsi->rx_itr_setting = pf->rx_itr;
2562                 /* Update the hardware if needed */
2563                 if (rxr->itr != vsi->rx_itr_setting) {
2564                         rxr->itr = vsi->rx_itr_setting;
2565                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2566                             que->me), rxr->itr);
2567                 }
2568         }
2569         rxr->bytes = 0;
2570         rxr->packets = 0;
2571         return;
2572 }
2573
2574
2575 /*
2576 ** Provide a update to the queue TX
2577 ** interrupt moderation value.
2578 */
2579 void
2580 ixl_set_queue_tx_itr(struct ixl_queue *que)
2581 {
2582         struct ixl_vsi  *vsi = que->vsi;
2583         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2584         struct i40e_hw  *hw = vsi->hw;
2585         struct tx_ring  *txr = &que->txr;
2586         u16             tx_itr;
2587         u16             tx_latency = 0;
2588         int             tx_bytes;
2589
2590
2591         /* Idle, do nothing */
2592         if (txr->bytes == 0)
2593                 return;
2594
2595         if (pf->dynamic_tx_itr) {
2596                 tx_bytes = txr->bytes/txr->itr;
2597                 tx_itr = txr->itr;
2598
2599                 switch (txr->latency) {
2600                 case IXL_LOW_LATENCY:
2601                         if (tx_bytes > 10) {
2602                                 tx_latency = IXL_AVE_LATENCY;
2603                                 tx_itr = IXL_ITR_20K;
2604                         }
2605                         break;
2606                 case IXL_AVE_LATENCY:
2607                         if (tx_bytes > 20) {
2608                                 tx_latency = IXL_BULK_LATENCY;
2609                                 tx_itr = IXL_ITR_8K;
2610                         } else if (tx_bytes <= 10) {
2611                                 tx_latency = IXL_LOW_LATENCY;
2612                                 tx_itr = IXL_ITR_100K;
2613                         }
2614                         break;
2615                 case IXL_BULK_LATENCY:
2616                         if (tx_bytes <= 20) {
2617                                 tx_latency = IXL_AVE_LATENCY;
2618                                 tx_itr = IXL_ITR_20K;
2619                         }
2620                         break;
2621                 }
2622
2623                 txr->latency = tx_latency;
2624
2625                 if (tx_itr != txr->itr) {
2626                  /* do an exponential smoothing */
2627                         tx_itr = (10 * tx_itr * txr->itr) /
2628                             ((9 * tx_itr) + txr->itr);
2629                         txr->itr = min(tx_itr, IXL_MAX_ITR);
2630                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2631                             que->me), txr->itr);
2632                 }
2633
2634         } else { /* We may have have toggled to non-dynamic */
2635                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2636                         vsi->tx_itr_setting = pf->tx_itr;
2637                 /* Update the hardware if needed */
2638                 if (txr->itr != vsi->tx_itr_setting) {
2639                         txr->itr = vsi->tx_itr_setting;
2640                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2641                             que->me), txr->itr);
2642                 }
2643         }
2644         txr->bytes = 0;
2645         txr->packets = 0;
2646         return;
2647 }
2648
2649 void
2650 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
2651     struct sysctl_ctx_list *ctx, const char *sysctl_name)
2652 {
2653         struct sysctl_oid *tree;
2654         struct sysctl_oid_list *child;
2655         struct sysctl_oid_list *vsi_list;
2656
2657         tree = device_get_sysctl_tree(pf->dev);
2658         child = SYSCTL_CHILDREN(tree);
2659         vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
2660                                    CTLFLAG_RD, NULL, "VSI Number");
2661         vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2662
2663         ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
2664 }
2665
2666 #ifdef IXL_DEBUG
2667 /**
2668  * ixl_sysctl_qtx_tail_handler
2669  * Retrieves I40E_QTX_TAIL value from hardware
2670  * for a sysctl.
2671  */
2672 static int
2673 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2674 {
2675         struct ixl_queue *que;
2676         int error;
2677         u32 val;
2678
2679         que = ((struct ixl_queue *)oidp->oid_arg1);
2680         if (!que) return 0;
2681
2682         val = rd32(que->vsi->hw, que->txr.tail);
2683         error = sysctl_handle_int(oidp, &val, 0, req);
2684         if (error || !req->newptr)
2685                 return error;
2686         return (0);
2687 }
2688
2689 /**
2690  * ixl_sysctl_qrx_tail_handler
2691  * Retrieves I40E_QRX_TAIL value from hardware
2692  * for a sysctl.
2693  */
2694 static int
2695 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2696 {
2697         struct ixl_queue *que;
2698         int error;
2699         u32 val;
2700
2701         que = ((struct ixl_queue *)oidp->oid_arg1);
2702         if (!que) return 0;
2703
2704         val = rd32(que->vsi->hw, que->rxr.tail);
2705         error = sysctl_handle_int(oidp, &val, 0, req);
2706         if (error || !req->newptr)
2707                 return error;
2708         return (0);
2709 }
2710 #endif
2711
2712 /*
2713  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2714  * Writes to the ITR registers immediately.
2715  */
2716 static int
2717 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2718 {
2719         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2720         device_t dev = pf->dev;
2721         int error = 0;
2722         int requested_tx_itr;
2723
2724         requested_tx_itr = pf->tx_itr;
2725         error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2726         if ((error) || (req->newptr == NULL))
2727                 return (error);
2728         if (pf->dynamic_tx_itr) {
2729                 device_printf(dev,
2730                     "Cannot set TX itr value while dynamic TX itr is enabled\n");
2731                     return (EINVAL);
2732         }
2733         if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2734                 device_printf(dev,
2735                     "Invalid TX itr value; value must be between 0 and %d\n",
2736                         IXL_MAX_ITR);
2737                 return (EINVAL);
2738         }
2739
2740         pf->tx_itr = requested_tx_itr;
2741         ixl_configure_tx_itr(pf);
2742
2743         return (error);
2744 }
2745
2746 /*
2747  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2748  * Writes to the ITR registers immediately.
2749  */
2750 static int
2751 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2752 {
2753         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2754         device_t dev = pf->dev;
2755         int error = 0;
2756         int requested_rx_itr;
2757
2758         requested_rx_itr = pf->rx_itr;
2759         error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2760         if ((error) || (req->newptr == NULL))
2761                 return (error);
2762         if (pf->dynamic_rx_itr) {
2763                 device_printf(dev,
2764                     "Cannot set RX itr value while dynamic RX itr is enabled\n");
2765                     return (EINVAL);
2766         }
2767         if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2768                 device_printf(dev,
2769                     "Invalid RX itr value; value must be between 0 and %d\n",
2770                         IXL_MAX_ITR);
2771                 return (EINVAL);
2772         }
2773
2774         pf->rx_itr = requested_rx_itr;
2775         ixl_configure_rx_itr(pf);
2776
2777         return (error);
2778 }
2779
2780 void
2781 ixl_add_hw_stats(struct ixl_pf *pf)
2782 {
2783         device_t dev = pf->dev;
2784         struct ixl_vsi *vsi = &pf->vsi;
2785         struct ixl_queue *queues = vsi->queues;
2786         struct i40e_hw_port_stats *pf_stats = &pf->stats;
2787
2788         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2789         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2790         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2791         struct sysctl_oid_list *vsi_list;
2792
2793         struct sysctl_oid *queue_node;
2794         struct sysctl_oid_list *queue_list;
2795
2796         struct tx_ring *txr;
2797         struct rx_ring *rxr;
2798         char queue_namebuf[QUEUE_NAME_LEN];
2799
2800         /* Driver statistics */
2801         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
2802                         CTLFLAG_RD, &pf->watchdog_events,
2803                         "Watchdog timeouts");
2804         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
2805                         CTLFLAG_RD, &pf->admin_irq,
2806                         "Admin Queue IRQ Handled");
2807
2808         ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
2809         vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
2810
2811         /* Queue statistics */
2812         for (int q = 0; q < vsi->num_queues; q++) {
2813                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2814                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
2815                     OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
2816                 queue_list = SYSCTL_CHILDREN(queue_node);
2817
2818                 txr = &(queues[q].txr);
2819                 rxr = &(queues[q].rxr);
2820
2821                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2822                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2823                                 "m_defrag() failed");
2824                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2825                                 CTLFLAG_RD, &(queues[q].irqs),
2826                                 "irqs on this queue");
2827                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2828                                 CTLFLAG_RD, &(queues[q].tso),
2829                                 "TSO");
2830                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2831                                 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2832                                 "Driver tx dma failure in xmit");
2833                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
2834                                 CTLFLAG_RD, &(queues[q].mss_too_small),
2835                                 "TSO sends with an MSS less than 64");
2836                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2837                                 CTLFLAG_RD, &(txr->no_desc),
2838                                 "Queue No Descriptor Available");
2839                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2840                                 CTLFLAG_RD, &(txr->total_packets),
2841                                 "Queue Packets Transmitted");
2842                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2843                                 CTLFLAG_RD, &(txr->tx_bytes),
2844                                 "Queue Bytes Transmitted");
2845                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2846                                 CTLFLAG_RD, &(rxr->rx_packets),
2847                                 "Queue Packets Received");
2848                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2849                                 CTLFLAG_RD, &(rxr->rx_bytes),
2850                                 "Queue Bytes Received");
2851                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
2852                                 CTLFLAG_RD, &(rxr->desc_errs),
2853                                 "Queue Rx Descriptor Errors");
2854                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
2855                                 CTLFLAG_RD, &(rxr->itr), 0,
2856                                 "Queue Rx ITR Interval");
2857                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
2858                                 CTLFLAG_RD, &(txr->itr), 0,
2859                                 "Queue Tx ITR Interval");
2860 #ifdef IXL_DEBUG
2861                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog",
2862                                 CTLFLAG_RD, &(txr->watchdog_timer), 0,
2863                                 "Ticks before watchdog timer causes interface reinit");
2864                 SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail",
2865                                 CTLFLAG_RD, &(txr->next_avail), 0,
2866                                 "Next TX descriptor to be used");
2867                 SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean",
2868                                 CTLFLAG_RD, &(txr->next_to_clean), 0,
2869                                 "Next TX descriptor to be cleaned");
2870                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
2871                                 CTLFLAG_RD, &(rxr->not_done),
2872                                 "Queue Rx Descriptors not Done");
2873                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
2874                                 CTLFLAG_RD, &(rxr->next_refresh), 0,
2875                                 "Queue Rx Descriptors not Done");
2876                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
2877                                 CTLFLAG_RD, &(rxr->next_check), 0,
2878                                 "Queue Rx Descriptors not Done");
2879                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
2880                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2881                                 sizeof(struct ixl_queue),
2882                                 ixl_sysctl_qrx_tail_handler, "IU",
2883                                 "Queue Receive Descriptor Tail");
2884                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail", 
2885                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2886                                 sizeof(struct ixl_queue),
2887                                 ixl_sysctl_qtx_tail_handler, "IU",
2888                                 "Queue Transmit Descriptor Tail");
2889 #endif
2890         }
2891
2892         /* MAC stats */
2893         ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2894 }
2895
2896 void
2897 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2898         struct sysctl_oid_list *child,
2899         struct i40e_eth_stats *eth_stats)
2900 {
2901         struct ixl_sysctl_info ctls[] =
2902         {
2903                 {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2904                 {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
2905                         "Unicast Packets Received"},
2906                 {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
2907                         "Multicast Packets Received"},
2908                 {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
2909                         "Broadcast Packets Received"},
2910                 {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2911                 {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2912                 {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2913                 {&eth_stats->tx_multicast, "mcast_pkts_txd",
2914                         "Multicast Packets Transmitted"},
2915                 {&eth_stats->tx_broadcast, "bcast_pkts_txd",
2916                         "Broadcast Packets Transmitted"},
2917                 // end
2918                 {0,0,0}
2919         };
2920
2921         struct ixl_sysctl_info *entry = ctls;
2922         while (entry->stat != 0)
2923         {
2924                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2925                                 CTLFLAG_RD, entry->stat,
2926                                 entry->description);
2927                 entry++;
2928         }
2929 }
2930
2931 void
2932 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2933         struct sysctl_oid_list *child,
2934         struct i40e_hw_port_stats *stats)
2935 {
2936         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2937                                     CTLFLAG_RD, NULL, "Mac Statistics");
2938         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2939
2940         struct i40e_eth_stats *eth_stats = &stats->eth;
2941         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2942
2943         struct ixl_sysctl_info ctls[] = 
2944         {
2945                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
2946                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2947                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2948                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2949                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2950                 /* Packet Reception Stats */
2951                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2952                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2953                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2954                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2955                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2956                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2957                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2958                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2959                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2960                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2961                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2962                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2963                 /* Packet Transmission Stats */
2964                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2965                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2966                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2967                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2968                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2969                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2970                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2971                 /* Flow control */
2972                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2973                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2974                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2975                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2976                 /* End */
2977                 {0,0,0}
2978         };
2979
2980         struct ixl_sysctl_info *entry = ctls;
2981         while (entry->stat != 0)
2982         {
2983                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2984                                 CTLFLAG_RD, entry->stat,
2985                                 entry->description);
2986                 entry++;
2987         }
2988 }
2989
2990 void
2991 ixl_set_rss_key(struct ixl_pf *pf)
2992 {
2993         struct i40e_hw *hw = &pf->hw;
2994         struct ixl_vsi *vsi = &pf->vsi;
2995         device_t        dev = pf->dev;
2996         u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
2997         enum i40e_status_code status;
2998
2999 #ifdef RSS
3000         /* Fetch the configured RSS key */
3001         rss_getkey((uint8_t *) &rss_seed);
3002 #else
3003         ixl_get_default_rss_key(rss_seed);
3004 #endif
3005         /* Fill out hash function seed */
3006         if (hw->mac.type == I40E_MAC_X722) {
3007                 struct i40e_aqc_get_set_rss_key_data key_data;
3008                 bcopy(rss_seed, &key_data, 52);
3009                 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
3010                 if (status)
3011                         device_printf(dev,
3012                             "i40e_aq_set_rss_key status %s, error %s\n",
3013                             i40e_stat_str(hw, status),
3014                             i40e_aq_str(hw, hw->aq.asq_last_status));
3015         } else {
3016                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
3017                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3018         }
3019 }
3020
3021 /*
3022  * Configure enabled PCTYPES for RSS.
3023  */
3024 void
3025 ixl_set_rss_pctypes(struct ixl_pf *pf)
3026 {
3027         struct i40e_hw *hw = &pf->hw;
3028         u64             set_hena = 0, hena;
3029
3030 #ifdef RSS
3031         u32             rss_hash_config;
3032
3033         rss_hash_config = rss_gethashconfig();
3034         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3035                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3036         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3037                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3038         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3039                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3040         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3041                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3042         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3043                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3044         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3045                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3046         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3047                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3048 #else
3049         if (hw->mac.type == I40E_MAC_X722)
3050                 set_hena = IXL_DEFAULT_RSS_HENA_X722;
3051         else
3052                 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
3053 #endif
3054         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3055             ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3056         hena |= set_hena;
3057         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
3058         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3059
3060 }
3061
3062 void
3063 ixl_set_rss_hlut(struct ixl_pf *pf)
3064 {
3065         struct i40e_hw  *hw = &pf->hw;
3066         device_t        dev = pf->dev;
3067         struct ixl_vsi *vsi = &pf->vsi;
3068         int             i, que_id;
3069         int             lut_entry_width;
3070         u32             lut = 0;
3071         enum i40e_status_code status;
3072
3073         lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
3074
3075         /* Populate the LUT with max no. of queues in round robin fashion */
3076         u8 hlut_buf[512];
3077         for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
3078 #ifdef RSS
3079                 /*
3080                  * Fetch the RSS bucket id for the given indirection entry.
3081                  * Cap it at the number of configured buckets (which is
3082                  * num_queues.)
3083                  */
3084                 que_id = rss_get_indirection_to_bucket(i);
3085                 que_id = que_id % vsi->num_queues;
3086 #else
3087                 que_id = i % vsi->num_queues;
3088 #endif
3089                 lut = (que_id & ((0x1 << lut_entry_width) - 1));
3090                 hlut_buf[i] = lut;
3091         }
3092
3093         if (hw->mac.type == I40E_MAC_X722) {
3094                 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
3095                 if (status)
3096                         device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
3097                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3098         } else {
3099                 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
3100                         wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
3101                 ixl_flush(hw);
3102         }
3103 }
3104
3105 /*
3106 ** Setup the PF's RSS parameters.
3107 */
3108 void
3109 ixl_config_rss(struct ixl_pf *pf)
3110 {
3111         ixl_set_rss_key(pf);
3112         ixl_set_rss_pctypes(pf);
3113         ixl_set_rss_hlut(pf);
3114 }
3115
3116 /*
3117 ** This routine is run via an vlan config EVENT,
3118 ** it enables us to use the HW Filter table since
3119 ** we can get the vlan id. This just creates the
3120 ** entry in the soft version of the VFTA, init will
3121 ** repopulate the real table.
3122 */
3123 void
3124 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3125 {
3126         struct ixl_vsi  *vsi = ifp->if_softc;
3127         struct i40e_hw  *hw = vsi->hw;
3128         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3129
3130         if (ifp->if_softc !=  arg)   /* Not our event */
3131                 return;
3132
3133         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3134                 return;
3135
3136         IXL_PF_LOCK(pf);
3137         ++vsi->num_vlans;
3138         ixl_add_filter(vsi, hw->mac.addr, vtag);
3139         IXL_PF_UNLOCK(pf);
3140 }
3141
3142 /*
3143 ** This routine is run via an vlan
3144 ** unconfig EVENT, remove our entry
3145 ** in the soft vfta.
3146 */
3147 void
3148 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3149 {
3150         struct ixl_vsi  *vsi = ifp->if_softc;
3151         struct i40e_hw  *hw = vsi->hw;
3152         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
3153
3154         if (ifp->if_softc !=  arg)
3155                 return;
3156
3157         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
3158                 return;
3159
3160         IXL_PF_LOCK(pf);
3161         --vsi->num_vlans;
3162         ixl_del_filter(vsi, hw->mac.addr, vtag);
3163         IXL_PF_UNLOCK(pf);
3164 }
3165
3166 /*
3167 ** This routine updates vlan filters, called by init
3168 ** it scans the filter table and then updates the hw
3169 ** after a soft reset.
3170 */
3171 void
3172 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3173 {
3174         struct ixl_mac_filter   *f;
3175         int                     cnt = 0, flags;
3176
3177         if (vsi->num_vlans == 0)
3178                 return;
3179         /*
3180         ** Scan the filter list for vlan entries,
3181         ** mark them for addition and then call
3182         ** for the AQ update.
3183         */
3184         SLIST_FOREACH(f, &vsi->ftl, next) {
3185                 if (f->flags & IXL_FILTER_VLAN) {
3186                         f->flags |=
3187                             (IXL_FILTER_ADD |
3188                             IXL_FILTER_USED);
3189                         cnt++;
3190                 }
3191         }
3192         if (cnt == 0) {
3193                 printf("setup vlan: no filters found!\n");
3194                 return;
3195         }
3196         flags = IXL_FILTER_VLAN;
3197         flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3198         ixl_add_hw_filters(vsi, flags, cnt);
3199         return;
3200 }
3201
3202 /*
3203 ** Initialize filter list and add filters that the hardware
3204 ** needs to know about.
3205 **
3206 ** Requires VSI's filter list & seid to be set before calling.
3207 */
3208 void
3209 ixl_init_filters(struct ixl_vsi *vsi)
3210 {
3211         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3212
3213         /* Add broadcast address */
3214         ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3215
3216         /*
3217          * Prevent Tx flow control frames from being sent out by
3218          * non-firmware transmitters.
3219          * This affects every VSI in the PF.
3220          */
3221         if (pf->enable_tx_fc_filter)
3222                 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3223 }
3224
3225 /*
3226 ** This routine adds mulicast filters
3227 */
3228 void
3229 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3230 {
3231         struct ixl_mac_filter *f;
3232
3233         /* Does one already exist */
3234         f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3235         if (f != NULL)
3236                 return;
3237
3238         f = ixl_get_filter(vsi);
3239         if (f == NULL) {
3240                 printf("WARNING: no filter available!!\n");
3241                 return;
3242         }
3243         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3244         f->vlan = IXL_VLAN_ANY;
3245         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3246             | IXL_FILTER_MC);
3247
3248         return;
3249 }
3250
3251 void
3252 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3253 {
3254         ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3255 }
3256
3257 /*
3258 ** This routine adds macvlan filters
3259 */
3260 void
3261 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3262 {
3263         struct ixl_mac_filter   *f, *tmp;
3264         struct ixl_pf           *pf;
3265         device_t                dev;
3266
3267         DEBUGOUT("ixl_add_filter: begin");
3268
3269         pf = vsi->back;
3270         dev = pf->dev;
3271
3272         /* Does one already exist */
3273         f = ixl_find_filter(vsi, macaddr, vlan);
3274         if (f != NULL)
3275                 return;
3276         /*
3277         ** Is this the first vlan being registered, if so we
3278         ** need to remove the ANY filter that indicates we are
3279         ** not in a vlan, and replace that with a 0 filter.
3280         */
3281         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3282                 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3283                 if (tmp != NULL) {
3284                         ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3285                         ixl_add_filter(vsi, macaddr, 0);
3286                 }
3287         }
3288
3289         f = ixl_get_filter(vsi);
3290         if (f == NULL) {
3291                 device_printf(dev, "WARNING: no filter available!!\n");
3292                 return;
3293         }
3294         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3295         f->vlan = vlan;
3296         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3297         if (f->vlan != IXL_VLAN_ANY)
3298                 f->flags |= IXL_FILTER_VLAN;
3299         else
3300                 vsi->num_macs++;
3301
3302         ixl_add_hw_filters(vsi, f->flags, 1);
3303         return;
3304 }
3305
3306 void
3307 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3308 {
3309         struct ixl_mac_filter *f;
3310
3311         f = ixl_find_filter(vsi, macaddr, vlan);
3312         if (f == NULL)
3313                 return;
3314
3315         f->flags |= IXL_FILTER_DEL;
3316         ixl_del_hw_filters(vsi, 1);
3317         vsi->num_macs--;
3318
3319         /* Check if this is the last vlan removal */
3320         if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3321                 /* Switch back to a non-vlan filter */
3322                 ixl_del_filter(vsi, macaddr, 0);
3323                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3324         }
3325         return;
3326 }
3327
3328 /*
3329 ** Find the filter with both matching mac addr and vlan id
3330 */
3331 struct ixl_mac_filter *
3332 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
3333 {
3334         struct ixl_mac_filter   *f;
3335         bool                    match = FALSE;
3336
3337         SLIST_FOREACH(f, &vsi->ftl, next) {
3338                 if (!cmp_etheraddr(f->macaddr, macaddr))
3339                         continue;
3340                 if (f->vlan == vlan) {
3341                         match = TRUE;
3342                         break;
3343                 }
3344         }       
3345
3346         if (!match)
3347                 f = NULL;
3348         return (f);
3349 }
3350
3351 /*
3352 ** This routine takes additions to the vsi filter
3353 ** table and creates an Admin Queue call to create
3354 ** the filters in the hardware.
3355 */
3356 void
3357 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3358 {
3359         struct i40e_aqc_add_macvlan_element_data *a, *b;
3360         struct ixl_mac_filter   *f;
3361         struct ixl_pf           *pf;
3362         struct i40e_hw          *hw;
3363         device_t                dev;
3364         int                     err, j = 0;
3365
3366         pf = vsi->back;
3367         dev = pf->dev;
3368         hw = &pf->hw;
3369         IXL_PF_LOCK_ASSERT(pf);
3370
3371         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3372             M_DEVBUF, M_NOWAIT | M_ZERO);
3373         if (a == NULL) {
3374                 device_printf(dev, "add_hw_filters failed to get memory\n");
3375                 return;
3376         }
3377
3378         /*
3379         ** Scan the filter list, each time we find one
3380         ** we add it to the admin queue array and turn off
3381         ** the add bit.
3382         */
3383         SLIST_FOREACH(f, &vsi->ftl, next) {
3384                 if ((f->flags & flags) == flags) {
3385                         b = &a[j]; // a pox on fvl long names :)
3386                         bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3387                         if (f->vlan == IXL_VLAN_ANY) {
3388                                 b->vlan_tag = 0;
3389                                 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3390                         } else {
3391                                 b->vlan_tag = f->vlan;
3392                                 b->flags = 0;
3393                         }
3394                         b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3395                         f->flags &= ~IXL_FILTER_ADD;
3396                         j++;
3397                 }
3398                 if (j == cnt)
3399                         break;
3400         }
3401         if (j > 0) {
3402                 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3403                 if (err) 
3404                         device_printf(dev, "aq_add_macvlan err %d, "
3405                             "aq_error %d\n", err, hw->aq.asq_last_status);
3406                 else
3407                         vsi->hw_filters_add += j;
3408         }
3409         free(a, M_DEVBUF);
3410         return;
3411 }
3412
3413 /*
3414 ** This routine takes removals in the vsi filter
3415 ** table and creates an Admin Queue call to delete
3416 ** the filters in the hardware.
3417 */
3418 void
3419 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3420 {
3421         struct i40e_aqc_remove_macvlan_element_data *d, *e;
3422         struct ixl_pf           *pf;
3423         struct i40e_hw          *hw;
3424         device_t                dev;
3425         struct ixl_mac_filter   *f, *f_temp;
3426         int                     err, j = 0;
3427
3428         DEBUGOUT("ixl_del_hw_filters: begin\n");
3429
3430         pf = vsi->back;
3431         hw = &pf->hw;
3432         dev = pf->dev;
3433
3434         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3435             M_DEVBUF, M_NOWAIT | M_ZERO);
3436         if (d == NULL) {
3437                 printf("del hw filter failed to get memory\n");
3438                 return;
3439         }
3440
3441         SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3442                 if (f->flags & IXL_FILTER_DEL) {
3443                         e = &d[j]; // a pox on fvl long names :)
3444                         bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3445                         e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3446                         if (f->vlan == IXL_VLAN_ANY) {
3447                                 e->vlan_tag = 0;
3448                                 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3449                         } else {
3450                                 e->vlan_tag = f->vlan;
3451                         }
3452                         /* delete entry from vsi list */
3453                         SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3454                         free(f, M_DEVBUF);
3455                         j++;
3456                 }
3457                 if (j == cnt)
3458                         break;
3459         }
3460         if (j > 0) {
3461                 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3462                 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3463                         int sc = 0;
3464                         for (int i = 0; i < j; i++)
3465                                 sc += (!d[i].error_code);
3466                         vsi->hw_filters_del += sc;
3467                         device_printf(dev,
3468                             "Failed to remove %d/%d filters, aq error %d\n",
3469                             j - sc, j, hw->aq.asq_last_status);
3470                 } else
3471                         vsi->hw_filters_del += j;
3472         }
3473         free(d, M_DEVBUF);
3474
3475         DEBUGOUT("ixl_del_hw_filters: end\n");
3476         return;
3477 }
3478
3479 int
3480 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3481 {
3482         struct i40e_hw  *hw = &pf->hw;
3483         int             error = 0;
3484         u32             reg;
3485         u16             pf_qidx;
3486
3487         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3488
3489         ixl_dbg(pf, IXL_DBG_EN_DIS,
3490             "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3491             pf_qidx, vsi_qidx);
3492
3493         i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3494
3495         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3496         reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3497             I40E_QTX_ENA_QENA_STAT_MASK;
3498         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3499         /* Verify the enable took */
3500         for (int j = 0; j < 10; j++) {
3501                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3502                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3503                         break;
3504                 i40e_usec_delay(10);
3505         }
3506         if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3507                 device_printf(pf->dev, "TX queue %d still disabled!\n",
3508                     pf_qidx);
3509                 error = ETIMEDOUT;
3510         }
3511
3512         return (error);
3513 }
3514
3515 int
3516 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3517 {
3518         struct i40e_hw  *hw = &pf->hw;
3519         int             error = 0;
3520         u32             reg;
3521         u16             pf_qidx;
3522
3523         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3524
3525         ixl_dbg(pf, IXL_DBG_EN_DIS,
3526             "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3527             pf_qidx, vsi_qidx);
3528
3529         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3530         reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3531             I40E_QRX_ENA_QENA_STAT_MASK;
3532         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3533         /* Verify the enable took */
3534         for (int j = 0; j < 10; j++) {
3535                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3536                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3537                         break;
3538                 i40e_usec_delay(10);
3539         }
3540         if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3541                 device_printf(pf->dev, "RX queue %d still disabled!\n",
3542                     pf_qidx);
3543                 error = ETIMEDOUT;
3544         }
3545
3546         return (error);
3547 }
3548
3549 int
3550 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3551 {
3552         int error = 0;
3553
3554         error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3555         /* Called function already prints error message */
3556         if (error)
3557                 return (error);
3558         error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3559         return (error);
3560 }
3561
3562 /* For PF VSI only */
3563 int
3564 ixl_enable_rings(struct ixl_vsi *vsi)
3565 {
3566         struct ixl_pf   *pf = vsi->back;
3567         int             error = 0;
3568
3569         for (int i = 0; i < vsi->num_queues; i++) {
3570                 error = ixl_enable_ring(pf, &pf->qtag, i);
3571                 if (error)
3572                         return (error);
3573         }
3574
3575         return (error);
3576 }
3577
3578 /*
3579  * Returns error on first ring that is detected hung.
3580  */
3581 int
3582 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3583 {
3584         struct i40e_hw  *hw = &pf->hw;
3585         int             error = 0;
3586         u32             reg;
3587         u16             pf_qidx;
3588
3589         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3590
3591         i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3592         i40e_usec_delay(500);
3593
3594         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3595         reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3596         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3597         /* Verify the disable took */
3598         for (int j = 0; j < 10; j++) {
3599                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3600                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3601                         break;
3602                 i40e_msec_delay(10);
3603         }
3604         if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3605                 device_printf(pf->dev, "TX queue %d still enabled!\n",
3606                     pf_qidx);
3607                 error = ETIMEDOUT;
3608         }
3609
3610         return (error);
3611 }
3612
3613 /*
3614  * Returns error on first ring that is detected hung.
3615  */
3616 int
3617 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3618 {
3619         struct i40e_hw  *hw = &pf->hw;
3620         int             error = 0;
3621         u32             reg;
3622         u16             pf_qidx;
3623
3624         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3625
3626         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3627         reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3628         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3629         /* Verify the disable took */
3630         for (int j = 0; j < 10; j++) {
3631                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3632                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3633                         break;
3634                 i40e_msec_delay(10);
3635         }
3636         if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3637                 device_printf(pf->dev, "RX queue %d still enabled!\n",
3638                     pf_qidx);
3639                 error = ETIMEDOUT;
3640         }
3641
3642         return (error);
3643 }
3644
3645 int
3646 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3647 {
3648         int error = 0;
3649
3650         error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3651         /* Called function already prints error message */
3652         if (error)
3653                 return (error);
3654         error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3655         return (error);
3656 }
3657
3658 /* For PF VSI only */
3659 int
3660 ixl_disable_rings(struct ixl_vsi *vsi)
3661 {
3662         struct ixl_pf   *pf = vsi->back;
3663         int             error = 0;
3664
3665         for (int i = 0; i < vsi->num_queues; i++) {
3666                 error = ixl_disable_ring(pf, &pf->qtag, i);
3667                 if (error)
3668                         return (error);
3669         }
3670
3671         return (error);
3672 }
3673
3674 /**
3675  * ixl_handle_mdd_event
3676  *
3677  * Called from interrupt handler to identify possibly malicious vfs
3678  * (But also detects events from the PF, as well)
3679  **/
3680 void
3681 ixl_handle_mdd_event(struct ixl_pf *pf)
3682 {
3683         struct i40e_hw *hw = &pf->hw;
3684         device_t dev = pf->dev;
3685         bool mdd_detected = false;
3686         bool pf_mdd_detected = false;
3687         u32 reg;
3688
3689         /* find what triggered the MDD event */
3690         reg = rd32(hw, I40E_GL_MDET_TX);
3691         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3692                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3693                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3694                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3695                                 I40E_GL_MDET_TX_EVENT_SHIFT;
3696                 u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3697                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
3698                 device_printf(dev,
3699                     "Malicious Driver Detection event %d"
3700                     " on TX queue %d, pf number %d\n",
3701                     event, queue, pf_num);
3702                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3703                 mdd_detected = true;
3704         }
3705         reg = rd32(hw, I40E_GL_MDET_RX);
3706         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3707                 u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3708                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3709                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3710                                 I40E_GL_MDET_RX_EVENT_SHIFT;
3711                 u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3712                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
3713                 device_printf(dev,
3714                     "Malicious Driver Detection event %d"
3715                     " on RX queue %d, pf number %d\n",
3716                     event, queue, pf_num);
3717                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3718                 mdd_detected = true;
3719         }
3720
3721         if (mdd_detected) {
3722                 reg = rd32(hw, I40E_PF_MDET_TX);
3723                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3724                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3725                         device_printf(dev,
3726                             "MDD TX event is for this function!\n");
3727                         pf_mdd_detected = true;
3728                 }
3729                 reg = rd32(hw, I40E_PF_MDET_RX);
3730                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3731                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3732                         device_printf(dev,
3733                             "MDD RX event is for this function!\n");
3734                         pf_mdd_detected = true;
3735                 }
3736         }
3737
3738         /* re-enable mdd interrupt cause */
3739         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3740         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3741         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3742         ixl_flush(hw);
3743 }
3744
3745 void
3746 ixl_enable_intr(struct ixl_vsi *vsi)
3747 {
3748         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
3749         struct i40e_hw          *hw = vsi->hw;
3750         struct ixl_queue        *que = vsi->queues;
3751
3752         if (pf->msix > 1) {
3753                 for (int i = 0; i < vsi->num_queues; i++, que++)
3754                         ixl_enable_queue(hw, que->me);
3755         } else
3756                 ixl_enable_intr0(hw);
3757 }
3758
3759 void
3760 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3761 {
3762         struct i40e_hw          *hw = vsi->hw;
3763         struct ixl_queue        *que = vsi->queues;
3764
3765         for (int i = 0; i < vsi->num_queues; i++, que++)
3766                 ixl_disable_queue(hw, que->me);
3767 }
3768
3769 void
3770 ixl_enable_intr0(struct i40e_hw *hw)
3771 {
3772         u32             reg;
3773
3774         /* Use IXL_ITR_NONE so ITR isn't updated here */
3775         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3776             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3777             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3778         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3779 }
3780
3781 void
3782 ixl_disable_intr0(struct i40e_hw *hw)
3783 {
3784         u32             reg;
3785
3786         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3787         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3788         ixl_flush(hw);
3789 }
3790
3791 void
3792 ixl_enable_queue(struct i40e_hw *hw, int id)
3793 {
3794         u32             reg;
3795
3796         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3797             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3798             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3799         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3800 }
3801
3802 void
3803 ixl_disable_queue(struct i40e_hw *hw, int id)
3804 {
3805         u32             reg;
3806
3807         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3808         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3809 }
3810
3811 void
3812 ixl_update_stats_counters(struct ixl_pf *pf)
3813 {
3814         struct i40e_hw  *hw = &pf->hw;
3815         struct ixl_vsi  *vsi = &pf->vsi;
3816         struct ixl_vf   *vf;
3817
3818         struct i40e_hw_port_stats *nsd = &pf->stats;
3819         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3820
3821         /* Update hw stats */
3822         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3823                            pf->stat_offsets_loaded,
3824                            &osd->crc_errors, &nsd->crc_errors);
3825         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3826                            pf->stat_offsets_loaded,
3827                            &osd->illegal_bytes, &nsd->illegal_bytes);
3828         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3829                            I40E_GLPRT_GORCL(hw->port),
3830                            pf->stat_offsets_loaded,
3831                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3832         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3833                            I40E_GLPRT_GOTCL(hw->port),
3834                            pf->stat_offsets_loaded,
3835                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3836         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3837                            pf->stat_offsets_loaded,
3838                            &osd->eth.rx_discards,
3839                            &nsd->eth.rx_discards);
3840         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3841                            I40E_GLPRT_UPRCL(hw->port),
3842                            pf->stat_offsets_loaded,
3843                            &osd->eth.rx_unicast,
3844                            &nsd->eth.rx_unicast);
3845         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3846                            I40E_GLPRT_UPTCL(hw->port),
3847                            pf->stat_offsets_loaded,
3848                            &osd->eth.tx_unicast,
3849                            &nsd->eth.tx_unicast);
3850         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3851                            I40E_GLPRT_MPRCL(hw->port),
3852                            pf->stat_offsets_loaded,
3853                            &osd->eth.rx_multicast,
3854                            &nsd->eth.rx_multicast);
3855         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3856                            I40E_GLPRT_MPTCL(hw->port),
3857                            pf->stat_offsets_loaded,
3858                            &osd->eth.tx_multicast,
3859                            &nsd->eth.tx_multicast);
3860         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3861                            I40E_GLPRT_BPRCL(hw->port),
3862                            pf->stat_offsets_loaded,
3863                            &osd->eth.rx_broadcast,
3864                            &nsd->eth.rx_broadcast);
3865         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3866                            I40E_GLPRT_BPTCL(hw->port),
3867                            pf->stat_offsets_loaded,
3868                            &osd->eth.tx_broadcast,
3869                            &nsd->eth.tx_broadcast);
3870
3871         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3872                            pf->stat_offsets_loaded,
3873                            &osd->tx_dropped_link_down,
3874                            &nsd->tx_dropped_link_down);
3875         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3876                            pf->stat_offsets_loaded,
3877                            &osd->mac_local_faults,
3878                            &nsd->mac_local_faults);
3879         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3880                            pf->stat_offsets_loaded,
3881                            &osd->mac_remote_faults,
3882                            &nsd->mac_remote_faults);
3883         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3884                            pf->stat_offsets_loaded,
3885                            &osd->rx_length_errors,
3886                            &nsd->rx_length_errors);
3887
3888         /* Flow control (LFC) stats */
3889         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3890                            pf->stat_offsets_loaded,
3891                            &osd->link_xon_rx, &nsd->link_xon_rx);
3892         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3893                            pf->stat_offsets_loaded,
3894                            &osd->link_xon_tx, &nsd->link_xon_tx);
3895         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3896                            pf->stat_offsets_loaded,
3897                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
3898         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3899                            pf->stat_offsets_loaded,
3900                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
3901
3902         /* Packet size stats rx */
3903         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3904                            I40E_GLPRT_PRC64L(hw->port),
3905                            pf->stat_offsets_loaded,
3906                            &osd->rx_size_64, &nsd->rx_size_64);
3907         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3908                            I40E_GLPRT_PRC127L(hw->port),
3909                            pf->stat_offsets_loaded,
3910                            &osd->rx_size_127, &nsd->rx_size_127);
3911         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3912                            I40E_GLPRT_PRC255L(hw->port),
3913                            pf->stat_offsets_loaded,
3914                            &osd->rx_size_255, &nsd->rx_size_255);
3915         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3916                            I40E_GLPRT_PRC511L(hw->port),
3917                            pf->stat_offsets_loaded,
3918                            &osd->rx_size_511, &nsd->rx_size_511);
3919         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3920                            I40E_GLPRT_PRC1023L(hw->port),
3921                            pf->stat_offsets_loaded,
3922                            &osd->rx_size_1023, &nsd->rx_size_1023);
3923         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3924                            I40E_GLPRT_PRC1522L(hw->port),
3925                            pf->stat_offsets_loaded,
3926                            &osd->rx_size_1522, &nsd->rx_size_1522);
3927         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3928                            I40E_GLPRT_PRC9522L(hw->port),
3929                            pf->stat_offsets_loaded,
3930                            &osd->rx_size_big, &nsd->rx_size_big);
3931
3932         /* Packet size stats tx */
3933         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3934                            I40E_GLPRT_PTC64L(hw->port),
3935                            pf->stat_offsets_loaded,
3936                            &osd->tx_size_64, &nsd->tx_size_64);
3937         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3938                            I40E_GLPRT_PTC127L(hw->port),
3939                            pf->stat_offsets_loaded,
3940                            &osd->tx_size_127, &nsd->tx_size_127);
3941         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3942                            I40E_GLPRT_PTC255L(hw->port),
3943                            pf->stat_offsets_loaded,
3944                            &osd->tx_size_255, &nsd->tx_size_255);
3945         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3946                            I40E_GLPRT_PTC511L(hw->port),
3947                            pf->stat_offsets_loaded,
3948                            &osd->tx_size_511, &nsd->tx_size_511);
3949         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3950                            I40E_GLPRT_PTC1023L(hw->port),
3951                            pf->stat_offsets_loaded,
3952                            &osd->tx_size_1023, &nsd->tx_size_1023);
3953         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3954                            I40E_GLPRT_PTC1522L(hw->port),
3955                            pf->stat_offsets_loaded,
3956                            &osd->tx_size_1522, &nsd->tx_size_1522);
3957         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3958                            I40E_GLPRT_PTC9522L(hw->port),
3959                            pf->stat_offsets_loaded,
3960                            &osd->tx_size_big, &nsd->tx_size_big);
3961
3962         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3963                            pf->stat_offsets_loaded,
3964                            &osd->rx_undersize, &nsd->rx_undersize);
3965         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3966                            pf->stat_offsets_loaded,
3967                            &osd->rx_fragments, &nsd->rx_fragments);
3968         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3969                            pf->stat_offsets_loaded,
3970                            &osd->rx_oversize, &nsd->rx_oversize);
3971         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3972                            pf->stat_offsets_loaded,
3973                            &osd->rx_jabber, &nsd->rx_jabber);
3974         pf->stat_offsets_loaded = true;
3975         /* End hw stats */
3976
3977         /* Update vsi stats */
3978         ixl_update_vsi_stats(vsi);
3979
3980         for (int i = 0; i < pf->num_vfs; i++) {
3981                 vf = &pf->vfs[i];
3982                 if (vf->vf_flags & VF_FLAG_ENABLED)
3983                         ixl_update_eth_stats(&pf->vfs[i].vsi);
3984         }
3985 }
3986
3987 int
3988 ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
3989 {
3990         struct i40e_hw *hw = &pf->hw;
3991         struct ixl_vsi *vsi = &pf->vsi;
3992         device_t dev = pf->dev;
3993         int error = 0;
3994
3995         /* Teardown */
3996         if (is_up)
3997                 ixl_stop(pf);
3998
3999         ixl_teardown_queue_msix(vsi);
4000
4001         error = i40e_shutdown_lan_hmc(hw);
4002         if (error)
4003                 device_printf(dev,
4004                     "Shutdown LAN HMC failed with code %d\n", error);
4005
4006         ixl_disable_intr0(hw);
4007         ixl_teardown_adminq_msix(pf);
4008
4009         error = i40e_shutdown_adminq(hw);
4010         if (error)
4011                 device_printf(dev,
4012                     "Shutdown Admin queue failed with code %d\n", error);
4013
4014         callout_drain(&pf->timer);
4015
4016         /* Free ring buffers, locks and filters */
4017         ixl_vsi_free_queues(vsi);
4018
4019         /* Free VSI filter list */
4020         ixl_free_mac_filters(vsi);
4021
4022         ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
4023
4024         return (error);
4025 }
4026
4027 int
4028 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
4029 {
4030         struct i40e_hw *hw = &pf->hw;
4031         struct ixl_vsi *vsi = &pf->vsi;
4032         device_t dev = pf->dev;
4033         int error = 0;
4034
4035         device_printf(dev, "Rebuilding driver state...\n");
4036
4037         error = i40e_pf_reset(hw);
4038         if (error) {
4039                 device_printf(dev, "PF reset failure %s\n",
4040                     i40e_stat_str(hw, error));
4041                 goto ixl_rebuild_hw_structs_after_reset_err;
4042         }
4043
4044         /* Setup */
4045         error = i40e_init_adminq(hw);
4046         if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
4047                 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
4048                     error);
4049                 goto ixl_rebuild_hw_structs_after_reset_err;
4050         }
4051
4052         i40e_clear_pxe_mode(hw);
4053
4054         error = ixl_get_hw_capabilities(pf);
4055         if (error) {
4056                 device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
4057                 goto ixl_rebuild_hw_structs_after_reset_err;
4058         }
4059
4060         error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
4061             hw->func_caps.num_rx_qp, 0, 0);
4062         if (error) {
4063                 device_printf(dev, "init_lan_hmc failed: %d\n", error);
4064                 goto ixl_rebuild_hw_structs_after_reset_err;
4065         }
4066
4067         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
4068         if (error) {
4069                 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
4070                 goto ixl_rebuild_hw_structs_after_reset_err;
4071         }
4072
4073         /* reserve a contiguous allocation for the PF's VSI */
4074         error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
4075         if (error) {
4076                 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
4077                     error);
4078                 /* TODO: error handling */
4079         }
4080
4081         device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
4082             pf->qtag.num_allocated, pf->qtag.num_active);
4083
4084         error = ixl_switch_config(pf);
4085         if (error) {
4086                 device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
4087                      error);
4088                 goto ixl_rebuild_hw_structs_after_reset_err;
4089         }
4090
4091         if (ixl_vsi_setup_queues(vsi)) {
4092                 device_printf(dev, "setup queues failed!\n");
4093                 error = ENOMEM;
4094                 goto ixl_rebuild_hw_structs_after_reset_err;
4095         }
4096
4097         if (pf->msix > 1) {
4098                 error = ixl_setup_adminq_msix(pf);
4099                 if (error) {
4100                         device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
4101                             error);
4102                         goto ixl_rebuild_hw_structs_after_reset_err;
4103                 }
4104
4105                 ixl_configure_intr0_msix(pf);
4106                 ixl_enable_intr0(hw);
4107
4108                 error = ixl_setup_queue_msix(vsi);
4109                 if (error) {
4110                         device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
4111                             error);
4112                         goto ixl_rebuild_hw_structs_after_reset_err;
4113                 }
4114         } else {
4115                 error = ixl_setup_legacy(pf);
4116                 if (error) {
4117                         device_printf(dev, "ixl_setup_legacy() error: %d\n",
4118                             error);
4119                         goto ixl_rebuild_hw_structs_after_reset_err;
4120                 }
4121         }
4122
4123         /* Determine link state */
4124         if (ixl_attach_get_link_status(pf)) {
4125                 error = EINVAL;
4126                 /* TODO: error handling */
4127         }
4128
4129         i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
4130         ixl_get_fw_lldp_status(pf);
4131
4132         if (is_up)
4133                 ixl_init(pf);
4134
4135         device_printf(dev, "Rebuilding driver state done.\n");
4136         return (0);
4137
4138 ixl_rebuild_hw_structs_after_reset_err:
4139         device_printf(dev, "Reload the driver to recover\n");
4140         return (error);
4141 }
4142
4143 void
4144 ixl_handle_empr_reset(struct ixl_pf *pf)
4145 {
4146         struct ixl_vsi  *vsi = &pf->vsi;
4147         struct i40e_hw  *hw = &pf->hw;
4148         bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
4149         int count = 0;
4150         u32 reg;
4151
4152         ixl_prepare_for_reset(pf, is_up);
4153
4154         /* Typically finishes within 3-4 seconds */
4155         while (count++ < 100) {
4156                 reg = rd32(hw, I40E_GLGEN_RSTAT)
4157                         & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
4158                 if (reg)
4159                         i40e_msec_delay(100);
4160                 else
4161                         break;
4162         }
4163         ixl_dbg(pf, IXL_DBG_INFO,
4164                         "EMPR reset wait count: %d\n", count);
4165
4166         ixl_rebuild_hw_structs_after_reset(pf, is_up);
4167
4168         atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
4169 }
4170
4171 /*
4172 ** Tasklet handler for MSIX Adminq interrupts
4173 **  - do outside interrupt since it might sleep
4174 */
4175 void
4176 ixl_do_adminq(void *context, int pending)
4177 {
4178         struct ixl_pf                   *pf = context;
4179         struct i40e_hw                  *hw = &pf->hw;
4180         struct i40e_arq_event_info      event;
4181         i40e_status                     ret;
4182         device_t                        dev = pf->dev;
4183         u32                             loop = 0;
4184         u16                             opcode, result;
4185
4186         if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4187                 /* Flag cleared at end of this function */
4188                 ixl_handle_empr_reset(pf);
4189                 return;
4190         }
4191
4192         /* Admin Queue handling */
4193         event.buf_len = IXL_AQ_BUF_SZ;
4194         event.msg_buf = malloc(event.buf_len,
4195             M_DEVBUF, M_NOWAIT | M_ZERO);
4196         if (!event.msg_buf) {
4197                 device_printf(dev, "%s: Unable to allocate memory for Admin"
4198                     " Queue event!\n", __func__);
4199                 return;
4200         }
4201
4202         IXL_PF_LOCK(pf);
4203         /* clean and process any events */
4204         do {
4205                 ret = i40e_clean_arq_element(hw, &event, &result);
4206                 if (ret)
4207                         break;
4208                 opcode = LE16_TO_CPU(event.desc.opcode);
4209                 ixl_dbg(pf, IXL_DBG_AQ,
4210                     "Admin Queue event: %#06x\n", opcode);
4211                 switch (opcode) {
4212                 case i40e_aqc_opc_get_link_status:
4213                         ixl_link_event(pf, &event);
4214                         break;
4215                 case i40e_aqc_opc_send_msg_to_pf:
4216 #ifdef PCI_IOV
4217                         ixl_handle_vf_msg(pf, &event);
4218 #endif
4219                         break;
4220                 case i40e_aqc_opc_event_lan_overflow:
4221                 default:
4222                         break;
4223                 }
4224
4225         } while (result && (loop++ < IXL_ADM_LIMIT));
4226
4227         free(event.msg_buf, M_DEVBUF);
4228
4229         /*
4230          * If there are still messages to process, reschedule ourselves.
4231          * Otherwise, re-enable our interrupt.
4232          */
4233         if (result > 0)
4234                 taskqueue_enqueue(pf->tq, &pf->adminq);
4235         else
4236                 ixl_enable_intr0(hw);
4237
4238         IXL_PF_UNLOCK(pf);
4239 }
4240
4241 /**
4242  * Update VSI-specific ethernet statistics counters.
4243  **/
4244 void
4245 ixl_update_eth_stats(struct ixl_vsi *vsi)
4246 {
4247         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4248         struct i40e_hw *hw = &pf->hw;
4249         struct i40e_eth_stats *es;
4250         struct i40e_eth_stats *oes;
4251         struct i40e_hw_port_stats *nsd;
4252         u16 stat_idx = vsi->info.stat_counter_idx;
4253
4254         es = &vsi->eth_stats;
4255         oes = &vsi->eth_stats_offsets;
4256         nsd = &pf->stats;
4257
4258         /* Gather up the stats that the hw collects */
4259         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4260                            vsi->stat_offsets_loaded,
4261                            &oes->tx_errors, &es->tx_errors);
4262         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4263                            vsi->stat_offsets_loaded,
4264                            &oes->rx_discards, &es->rx_discards);
4265
4266         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4267                            I40E_GLV_GORCL(stat_idx),
4268                            vsi->stat_offsets_loaded,
4269                            &oes->rx_bytes, &es->rx_bytes);
4270         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4271                            I40E_GLV_UPRCL(stat_idx),
4272                            vsi->stat_offsets_loaded,
4273                            &oes->rx_unicast, &es->rx_unicast);
4274         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4275                            I40E_GLV_MPRCL(stat_idx),
4276                            vsi->stat_offsets_loaded,
4277                            &oes->rx_multicast, &es->rx_multicast);
4278         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4279                            I40E_GLV_BPRCL(stat_idx),
4280                            vsi->stat_offsets_loaded,
4281                            &oes->rx_broadcast, &es->rx_broadcast);
4282
4283         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4284                            I40E_GLV_GOTCL(stat_idx),
4285                            vsi->stat_offsets_loaded,
4286                            &oes->tx_bytes, &es->tx_bytes);
4287         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4288                            I40E_GLV_UPTCL(stat_idx),
4289                            vsi->stat_offsets_loaded,
4290                            &oes->tx_unicast, &es->tx_unicast);
4291         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4292                            I40E_GLV_MPTCL(stat_idx),
4293                            vsi->stat_offsets_loaded,
4294                            &oes->tx_multicast, &es->tx_multicast);
4295         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4296                            I40E_GLV_BPTCL(stat_idx),
4297                            vsi->stat_offsets_loaded,
4298                            &oes->tx_broadcast, &es->tx_broadcast);
4299         vsi->stat_offsets_loaded = true;
4300 }
4301
4302 void
4303 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4304 {
4305         struct ixl_pf           *pf;
4306         struct ifnet            *ifp;
4307         struct i40e_eth_stats   *es;
4308         u64                     tx_discards;
4309
4310         struct i40e_hw_port_stats *nsd;
4311
4312         pf = vsi->back;
4313         ifp = vsi->ifp;
4314         es = &vsi->eth_stats;
4315         nsd = &pf->stats;
4316
4317         ixl_update_eth_stats(vsi);
4318
4319         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4320         for (int i = 0; i < vsi->num_queues; i++)
4321                 tx_discards += vsi->queues[i].txr.br->br_drops;
4322
4323         /* Update ifnet stats */
4324         IXL_SET_IPACKETS(vsi, es->rx_unicast +
4325                            es->rx_multicast +
4326                            es->rx_broadcast);
4327         IXL_SET_OPACKETS(vsi, es->tx_unicast +
4328                            es->tx_multicast +
4329                            es->tx_broadcast);
4330         IXL_SET_IBYTES(vsi, es->rx_bytes);
4331         IXL_SET_OBYTES(vsi, es->tx_bytes);
4332         IXL_SET_IMCASTS(vsi, es->rx_multicast);
4333         IXL_SET_OMCASTS(vsi, es->tx_multicast);
4334
4335         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4336             nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4337             nsd->rx_jabber);
4338         IXL_SET_OERRORS(vsi, es->tx_errors);
4339         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4340         IXL_SET_OQDROPS(vsi, tx_discards);
4341         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4342         IXL_SET_COLLISIONS(vsi, 0);
4343 }
4344
4345 /**
4346  * Reset all of the stats for the given pf
4347  **/
4348 void
4349 ixl_pf_reset_stats(struct ixl_pf *pf)
4350 {
4351         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4352         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4353         pf->stat_offsets_loaded = false;
4354 }
4355
4356 /**
4357  * Resets all stats of the given vsi
4358  **/
4359 void
4360 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4361 {
4362         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4363         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4364         vsi->stat_offsets_loaded = false;
4365 }
4366
4367 /**
4368  * Read and update a 48 bit stat from the hw
4369  *
4370  * Since the device stats are not reset at PFReset, they likely will not
4371  * be zeroed when the driver starts.  We'll save the first values read
4372  * and use them as offsets to be subtracted from the raw values in order
4373  * to report stats that count from zero.
4374  **/
4375 void
4376 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4377         bool offset_loaded, u64 *offset, u64 *stat)
4378 {
4379         u64 new_data;
4380
4381 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4382         new_data = rd64(hw, loreg);
4383 #else
4384         /*
4385          * Use two rd32's instead of one rd64; FreeBSD versions before
4386          * 10 don't support 64-bit bus reads/writes.
4387          */
4388         new_data = rd32(hw, loreg);
4389         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4390 #endif
4391
4392         if (!offset_loaded)
4393                 *offset = new_data;
4394         if (new_data >= *offset)
4395                 *stat = new_data - *offset;
4396         else
4397                 *stat = (new_data + ((u64)1 << 48)) - *offset;
4398         *stat &= 0xFFFFFFFFFFFFULL;
4399 }
4400
4401 /**
4402  * Read and update a 32 bit stat from the hw
4403  **/
4404 void
4405 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4406         bool offset_loaded, u64 *offset, u64 *stat)
4407 {
4408         u32 new_data;
4409
4410         new_data = rd32(hw, reg);
4411         if (!offset_loaded)
4412                 *offset = new_data;
4413         if (new_data >= *offset)
4414                 *stat = (u32)(new_data - *offset);
4415         else
4416                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4417 }
4418
4419 void
4420 ixl_add_device_sysctls(struct ixl_pf *pf)
4421 {
4422         device_t dev = pf->dev;
4423         struct i40e_hw *hw = &pf->hw;
4424
4425         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4426         struct sysctl_oid_list *ctx_list =
4427             SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4428
4429         struct sysctl_oid *debug_node;
4430         struct sysctl_oid_list *debug_list;
4431
4432         struct sysctl_oid *fec_node;
4433         struct sysctl_oid_list *fec_list;
4434
4435         /* Set up sysctls */
4436         SYSCTL_ADD_PROC(ctx, ctx_list,
4437             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4438             pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4439
4440         SYSCTL_ADD_PROC(ctx, ctx_list,
4441             OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4442             pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4443
4444         SYSCTL_ADD_PROC(ctx, ctx_list,
4445             OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
4446             pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
4447
4448         SYSCTL_ADD_PROC(ctx, ctx_list,
4449             OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4450             pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
4451
4452         SYSCTL_ADD_PROC(ctx, ctx_list,
4453             OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4454             pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4455
4456         SYSCTL_ADD_PROC(ctx, ctx_list,
4457             OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4458             pf, 0, ixl_sysctl_unallocated_queues, "I",
4459             "Queues not allocated to a PF or VF");
4460
4461         SYSCTL_ADD_PROC(ctx, ctx_list,
4462             OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4463             pf, 0, ixl_sysctl_pf_tx_itr, "I",
4464             "Immediately set TX ITR value for all queues");
4465
4466         SYSCTL_ADD_PROC(ctx, ctx_list,
4467             OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4468             pf, 0, ixl_sysctl_pf_rx_itr, "I",
4469             "Immediately set RX ITR value for all queues");
4470
4471         SYSCTL_ADD_INT(ctx, ctx_list,
4472             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4473             &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4474
4475         SYSCTL_ADD_INT(ctx, ctx_list,
4476             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4477             &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4478
4479         SYSCTL_ADD_INT(ctx, ctx_list,
4480             OID_AUTO, "tx_ring_size", CTLFLAG_RD,
4481             &pf->vsi.num_tx_desc, 0, "TX ring size");
4482
4483         SYSCTL_ADD_INT(ctx, ctx_list,
4484             OID_AUTO, "rx_ring_size", CTLFLAG_RD,
4485             &pf->vsi.num_rx_desc, 0, "RX ring size");
4486
4487         /* Add FEC sysctls for 25G adapters */
4488         if (i40e_is_25G_device(hw->device_id)) {
4489                 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4490                     OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
4491                 fec_list = SYSCTL_CHILDREN(fec_node);
4492
4493                 SYSCTL_ADD_PROC(ctx, fec_list,
4494                     OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
4495                     pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
4496
4497                 SYSCTL_ADD_PROC(ctx, fec_list,
4498                     OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
4499                     pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
4500
4501                 SYSCTL_ADD_PROC(ctx, fec_list,
4502                     OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
4503                     pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
4504
4505                 SYSCTL_ADD_PROC(ctx, fec_list,
4506                     OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
4507                     pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
4508
4509                 SYSCTL_ADD_PROC(ctx, fec_list,
4510                     OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
4511                     pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
4512         }
4513
4514         SYSCTL_ADD_PROC(ctx, ctx_list,
4515             OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
4516             pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
4517
4518         /* Add sysctls meant to print debug information, but don't list them
4519          * in "sysctl -a" output. */
4520         debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4521             OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4522         debug_list = SYSCTL_CHILDREN(debug_node);
4523
4524         SYSCTL_ADD_UINT(ctx, debug_list,
4525             OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4526             &pf->hw.debug_mask, 0, "Shared code debug message level");
4527
4528         SYSCTL_ADD_UINT(ctx, debug_list,
4529             OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4530             &pf->dbg_mask, 0, "Non-hared code debug message level");
4531
4532         SYSCTL_ADD_PROC(ctx, debug_list,
4533             OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4534             pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4535
4536         SYSCTL_ADD_PROC(ctx, debug_list,
4537             OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4538             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4539
4540         SYSCTL_ADD_PROC(ctx, debug_list,
4541             OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4542             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4543
4544         SYSCTL_ADD_PROC(ctx, debug_list,
4545             OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4546             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4547
4548         SYSCTL_ADD_PROC(ctx, debug_list,
4549             OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4550             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4551
4552         SYSCTL_ADD_PROC(ctx, debug_list,
4553             OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4554             pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4555
4556         SYSCTL_ADD_PROC(ctx, debug_list,
4557             OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4558             pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4559
4560         SYSCTL_ADD_PROC(ctx, debug_list,
4561             OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
4562             pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
4563
4564         SYSCTL_ADD_PROC(ctx, debug_list,
4565             OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
4566             pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
4567
4568         SYSCTL_ADD_PROC(ctx, debug_list,
4569             OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
4570             pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
4571
4572         if (pf->has_i2c) {
4573                 SYSCTL_ADD_PROC(ctx, debug_list,
4574                     OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4575                     pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus");
4576
4577                 SYSCTL_ADD_PROC(ctx, debug_list,
4578                     OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
4579                     pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
4580         }
4581
4582 #ifdef PCI_IOV
4583         SYSCTL_ADD_UINT(ctx, debug_list,
4584             OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4585             0, "PF/VF Virtual Channel debug level");
4586 #endif
4587 }
4588
4589 /*
4590  * Primarily for finding out how many queues can be assigned to VFs,
4591  * at runtime.
4592  */
4593 static int
4594 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4595 {
4596         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4597         int queues;
4598
4599         IXL_PF_LOCK(pf);
4600         queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4601         IXL_PF_UNLOCK(pf);
4602
4603         return sysctl_handle_int(oidp, NULL, queues, req);
4604 }
4605
4606 /*
4607 ** Set flow control using sysctl:
4608 **      0 - off
4609 **      1 - rx pause
4610 **      2 - tx pause
4611 **      3 - full
4612 */
4613 int
4614 ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4615 {
4616         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4617         struct i40e_hw *hw = &pf->hw;
4618         device_t dev = pf->dev;
4619         int requested_fc, error = 0;
4620         enum i40e_status_code aq_error = 0;
4621         u8 fc_aq_err = 0;
4622
4623         /* Get request */
4624         requested_fc = pf->fc;
4625         error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4626         if ((error) || (req->newptr == NULL))
4627                 return (error);
4628         if (requested_fc < 0 || requested_fc > 3) {
4629                 device_printf(dev,
4630                     "Invalid fc mode; valid modes are 0 through 3\n");
4631                 return (EINVAL);
4632         }
4633
4634         /* Set fc ability for port */
4635         hw->fc.requested_mode = requested_fc;
4636         aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4637         if (aq_error) {
4638                 device_printf(dev,
4639                     "%s: Error setting new fc mode %d; fc_err %#x\n",
4640                     __func__, aq_error, fc_aq_err);
4641                 return (EIO);
4642         }
4643         pf->fc = requested_fc;
4644
4645         /* Get new link state */
4646         i40e_msec_delay(250);
4647         hw->phy.get_link_info = TRUE;
4648         i40e_get_link_status(hw, &pf->link_up);
4649
4650         return (0);
4651 }
4652
4653 char *
4654 ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
4655 {
4656         int index;
4657
4658         char *speeds[] = {
4659                 "Unknown",
4660                 "100 Mbps",
4661                 "1 Gbps",
4662                 "10 Gbps",
4663                 "40 Gbps",
4664                 "20 Gbps",
4665                 "25 Gbps",
4666         };
4667
4668         switch (link_speed) {
4669         case I40E_LINK_SPEED_100MB:
4670                 index = 1;
4671                 break;
4672         case I40E_LINK_SPEED_1GB:
4673                 index = 2;
4674                 break;
4675         case I40E_LINK_SPEED_10GB:
4676                 index = 3;
4677                 break;
4678         case I40E_LINK_SPEED_40GB:
4679                 index = 4;
4680                 break;
4681         case I40E_LINK_SPEED_20GB:
4682                 index = 5;
4683                 break;
4684         case I40E_LINK_SPEED_25GB:
4685                 index = 6;
4686                 break;
4687         case I40E_LINK_SPEED_UNKNOWN:
4688         default:
4689                 index = 0;
4690                 break;
4691         }
4692
4693         return speeds[index];
4694 }
4695
4696 int
4697 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
4698 {
4699         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4700         struct i40e_hw *hw = &pf->hw;
4701         int error = 0;
4702
4703         ixl_update_link_status(pf);
4704
4705         error = sysctl_handle_string(oidp,
4706             ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
4707             8, req);
4708         return (error);
4709 }
4710
4711 /*
4712  * Converts 8-bit speeds value to and from sysctl flags and
4713  * Admin Queue flags.
4714  */
4715 static u8
4716 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
4717 {
4718         static u16 speedmap[6] = {
4719                 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
4720                 (I40E_LINK_SPEED_1GB   | (0x2 << 8)),
4721                 (I40E_LINK_SPEED_10GB  | (0x4 << 8)),
4722                 (I40E_LINK_SPEED_20GB  | (0x8 << 8)),
4723                 (I40E_LINK_SPEED_25GB  | (0x10 << 8)),
4724                 (I40E_LINK_SPEED_40GB  | (0x20 << 8))
4725         };
4726         u8 retval = 0;
4727
4728         for (int i = 0; i < 6; i++) {
4729                 if (to_aq)
4730                         retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
4731                 else
4732                         retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
4733         }
4734
4735         return (retval);
4736 }
4737
4738 int
4739 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
4740 {
4741         struct i40e_hw *hw = &pf->hw;
4742         device_t dev = pf->dev;
4743         struct i40e_aq_get_phy_abilities_resp abilities;
4744         struct i40e_aq_set_phy_config config;
4745         enum i40e_status_code aq_error = 0;
4746
4747         /* Get current capability information */
4748         aq_error = i40e_aq_get_phy_capabilities(hw,
4749             FALSE, FALSE, &abilities, NULL);
4750         if (aq_error) {
4751                 device_printf(dev,
4752                     "%s: Error getting phy capabilities %d,"
4753                     " aq error: %d\n", __func__, aq_error,
4754                     hw->aq.asq_last_status);
4755                 return (EIO);
4756         }
4757
4758         /* Prepare new config */
4759         bzero(&config, sizeof(config));
4760         if (from_aq)
4761                 config.link_speed = speeds;
4762         else
4763                 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
4764         config.phy_type = abilities.phy_type;
4765         config.phy_type_ext = abilities.phy_type_ext;
4766         config.abilities = abilities.abilities
4767             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4768         config.eee_capability = abilities.eee_capability;
4769         config.eeer = abilities.eeer_val;
4770         config.low_power_ctrl = abilities.d3_lpan;
4771         config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
4772
4773         /* Do aq command & restart link */
4774         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4775         if (aq_error) {
4776                 device_printf(dev,
4777                     "%s: Error setting new phy config %d,"
4778                     " aq error: %d\n", __func__, aq_error,
4779                     hw->aq.asq_last_status);
4780                 return (EIO);
4781         }
4782
4783         return (0);
4784 }
4785
4786 /*
4787 ** Supported link speedsL
4788 **      Flags:
4789 **       0x1 - 100 Mb
4790 **       0x2 - 1G
4791 **       0x4 - 10G
4792 **       0x8 - 20G
4793 **      0x10 - 25G
4794 **      0x20 - 40G
4795 */
4796 static int
4797 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
4798 {
4799         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4800         int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
4801
4802         return sysctl_handle_int(oidp, NULL, supported, req);
4803 }
4804
4805 /*
4806 ** Control link advertise speed:
4807 **      Flags:
4808 **       0x1 - advertise 100 Mb
4809 **       0x2 - advertise 1G
4810 **       0x4 - advertise 10G
4811 **       0x8 - advertise 20G
4812 **      0x10 - advertise 25G
4813 **      0x20 - advertise 40G
4814 **
4815 **      Set to 0 to disable link
4816 */
4817 int
4818 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
4819 {
4820         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4821         device_t dev = pf->dev;
4822         u8 converted_speeds;
4823         int requested_ls = 0;
4824         int error = 0;
4825
4826         /* Read in new mode */
4827         requested_ls = pf->advertised_speed;
4828         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4829         if ((error) || (req->newptr == NULL))
4830                 return (error);
4831
4832         /* Error out if bits outside of possible flag range are set */
4833         if ((requested_ls & ~((u8)0x3F)) != 0) {
4834                 device_printf(dev, "Input advertised speed out of range; "
4835                     "valid flags are: 0x%02x\n",
4836                     ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4837                 return (EINVAL);
4838         }
4839
4840         /* Check if adapter supports input value */
4841         converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
4842         if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
4843                 device_printf(dev, "Invalid advertised speed; "
4844                     "valid flags are: 0x%02x\n",
4845                     ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
4846                 return (EINVAL);
4847         }
4848
4849         error = ixl_set_advertised_speeds(pf, requested_ls, false);
4850         if (error)
4851                 return (error);
4852
4853         pf->advertised_speed = requested_ls;
4854         ixl_update_link_status(pf);
4855         return (0);
4856 }
4857
4858 /*
4859  * Input: bitmap of enum i40e_aq_link_speed
4860  */
4861 static u64
4862 ixl_max_aq_speed_to_value(u8 link_speeds)
4863 {
4864         if (link_speeds & I40E_LINK_SPEED_40GB)
4865                 return IF_Gbps(40);
4866         if (link_speeds & I40E_LINK_SPEED_25GB)
4867                 return IF_Gbps(25);
4868         if (link_speeds & I40E_LINK_SPEED_20GB)
4869                 return IF_Gbps(20);
4870         if (link_speeds & I40E_LINK_SPEED_10GB)
4871                 return IF_Gbps(10);
4872         if (link_speeds & I40E_LINK_SPEED_1GB)
4873                 return IF_Gbps(1);
4874         if (link_speeds & I40E_LINK_SPEED_100MB)
4875                 return IF_Mbps(100);
4876         else
4877                 /* Minimum supported link speed */
4878                 return IF_Mbps(100);
4879 }
4880
4881 /*
4882 ** Get the width and transaction speed of
4883 ** the bus this adapter is plugged into.
4884 */
4885 void
4886 ixl_get_bus_info(struct ixl_pf *pf)
4887 {
4888         struct i40e_hw *hw = &pf->hw;
4889         device_t dev = pf->dev;
4890         u16 link;
4891         u32 offset, num_ports;
4892         u64 max_speed;
4893
4894         /* Some devices don't use PCIE */
4895         if (hw->mac.type == I40E_MAC_X722)
4896                 return;
4897
4898         /* Read PCI Express Capabilities Link Status Register */
4899         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4900         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4901
4902         /* Fill out hw struct with PCIE info */
4903         i40e_set_pci_config_data(hw, link);
4904
4905         /* Use info to print out bandwidth messages */
4906         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4907             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4908             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4909             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4910             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4911             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4912             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
4913             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4914             ("Unknown"));
4915
4916         /*
4917          * If adapter is in slot with maximum supported speed,
4918          * no warning message needs to be printed out.
4919          */
4920         if (hw->bus.speed >= i40e_bus_speed_8000
4921             && hw->bus.width >= i40e_bus_width_pcie_x8)
4922                 return;
4923
4924         num_ports = bitcount32(hw->func_caps.valid_functions);
4925         max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
4926
4927         if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
4928                 device_printf(dev, "PCI-Express bandwidth available"
4929                     " for this device may be insufficient for"
4930                     " optimal performance.\n");
4931                 device_printf(dev, "Please move the device to a different"
4932                     " PCI-e link with more lanes and/or higher"
4933                     " transfer rate.\n");
4934         }
4935 }
4936
4937 static int
4938 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4939 {
4940         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
4941         struct i40e_hw  *hw = &pf->hw;
4942         struct sbuf     *sbuf;
4943
4944         sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4945         ixl_nvm_version_str(hw, sbuf);
4946         sbuf_finish(sbuf);
4947         sbuf_delete(sbuf);
4948
4949         return (0);
4950 }
4951
4952 void
4953 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4954 {
4955         if ((nvma->command == I40E_NVM_READ) &&
4956             ((nvma->config & 0xFF) == 0xF) &&
4957             (((nvma->config & 0xF00) >> 8) == 0xF) &&
4958             (nvma->offset == 0) &&
4959             (nvma->data_size == 1)) {
4960                 // device_printf(dev, "- Get Driver Status Command\n");
4961         }
4962         else if (nvma->command == I40E_NVM_READ) {
4963         
4964         }
4965         else {
4966                 switch (nvma->command) {
4967                 case 0xB:
4968                         device_printf(dev, "- command: I40E_NVM_READ\n");
4969                         break;
4970                 case 0xC:
4971                         device_printf(dev, "- command: I40E_NVM_WRITE\n");
4972                         break;
4973                 default:
4974                         device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4975                         break;
4976                 }
4977
4978                 device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
4979                 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4980                 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4981                 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
4982         }
4983 }
4984
4985 int
4986 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
4987 {
4988         struct i40e_hw *hw = &pf->hw;
4989         struct i40e_nvm_access *nvma;
4990         device_t dev = pf->dev;
4991         enum i40e_status_code status = 0;
4992         int perrno;
4993
4994         DEBUGFUNC("ixl_handle_nvmupd_cmd");
4995
4996         /* Sanity checks */
4997         if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
4998             ifd->ifd_data == NULL) {
4999                 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
5000                     __func__);
5001                 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
5002                     __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
5003                 device_printf(dev, "%s: data pointer: %p\n", __func__,
5004                     ifd->ifd_data);
5005                 return (EINVAL);
5006         }
5007
5008         nvma = (struct i40e_nvm_access *)ifd->ifd_data;
5009
5010         if (pf->dbg_mask & IXL_DBG_NVMUPD)
5011                 ixl_print_nvm_cmd(dev, nvma);
5012
5013         if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5014                 int count = 0;
5015                 while (count++ < 100) {
5016                         i40e_msec_delay(100);
5017                         if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
5018                                 break;
5019                 }
5020         }
5021
5022         if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
5023                 IXL_PF_LOCK(pf);
5024                 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
5025                 IXL_PF_UNLOCK(pf);
5026         } else {
5027                 perrno = -EBUSY;
5028         }
5029
5030         /* Let the nvmupdate report errors, show them only when debug is enabled */
5031         if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
5032                 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
5033                     i40e_stat_str(hw, status), perrno);
5034
5035         /*
5036          * -EPERM is actually ERESTART, which the kernel interprets as it needing
5037          * to run this ioctl again. So use -EACCES for -EPERM instead.
5038          */
5039         if (perrno == -EPERM)
5040                 return (-EACCES);
5041         else
5042                 return (perrno);
5043 }
5044
5045 /*********************************************************************
5046  *
5047  *  Media Ioctl callback
5048  *
5049  *  This routine is called whenever the user queries the status of
5050  *  the interface using ifconfig.
5051  *
5052  *  When adding new media types here, make sure to add them to
5053  *  ixl_add_ifmedia(), too.
5054  *
5055  **********************************************************************/
5056 void
5057 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
5058 {
5059         struct ixl_vsi  *vsi = ifp->if_softc;
5060         struct ixl_pf   *pf = vsi->back;
5061         struct i40e_hw  *hw = &pf->hw;
5062
5063         INIT_DEBUGOUT("ixl_media_status: begin");
5064
5065         /* Don't touch PF during reset */
5066         if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING)
5067                 return;
5068
5069         IXL_PF_LOCK(pf);
5070
5071         i40e_get_link_status(hw, &pf->link_up);
5072         ixl_update_link_status(pf);
5073
5074         ifmr->ifm_status = IFM_AVALID;
5075         ifmr->ifm_active = IFM_ETHER;
5076
5077         if (!pf->link_up) {
5078                 IXL_PF_UNLOCK(pf);
5079                 return;
5080         }
5081
5082         ifmr->ifm_status |= IFM_ACTIVE;
5083
5084         /* Hardware always does full-duplex */
5085         ifmr->ifm_active |= IFM_FDX;
5086
5087         switch (hw->phy.link_info.phy_type) {
5088                 /* 100 M */
5089                 case I40E_PHY_TYPE_100BASE_TX:
5090                         ifmr->ifm_active |= IFM_100_TX;
5091                         break;
5092                 /* 1 G */
5093                 case I40E_PHY_TYPE_1000BASE_T:
5094                         ifmr->ifm_active |= IFM_1000_T;
5095                         break;
5096                 case I40E_PHY_TYPE_1000BASE_SX:
5097                         ifmr->ifm_active |= IFM_1000_SX;
5098                         break;
5099                 case I40E_PHY_TYPE_1000BASE_LX:
5100                         ifmr->ifm_active |= IFM_1000_LX;
5101                         break;
5102                 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
5103                         ifmr->ifm_active |= IFM_1000_T;
5104                         break;
5105                 /* 10 G */
5106                 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
5107                         ifmr->ifm_active |= IFM_10G_TWINAX;
5108                         break;
5109                 case I40E_PHY_TYPE_10GBASE_SR:
5110                         ifmr->ifm_active |= IFM_10G_SR;
5111                         break;
5112                 case I40E_PHY_TYPE_10GBASE_LR:
5113                         ifmr->ifm_active |= IFM_10G_LR;
5114                         break;
5115                 case I40E_PHY_TYPE_10GBASE_T:
5116                         ifmr->ifm_active |= IFM_10G_T;
5117                         break;
5118                 case I40E_PHY_TYPE_XAUI:
5119                 case I40E_PHY_TYPE_XFI:
5120                         ifmr->ifm_active |= IFM_10G_TWINAX;
5121                         break;
5122                 case I40E_PHY_TYPE_10GBASE_AOC:
5123                         ifmr->ifm_active |= IFM_10G_AOC;
5124                         break;
5125                 /* 25 G */
5126                 case I40E_PHY_TYPE_25GBASE_KR:
5127                         ifmr->ifm_active |= IFM_25G_KR;
5128                         break;
5129                 case I40E_PHY_TYPE_25GBASE_CR:
5130                         ifmr->ifm_active |= IFM_25G_CR;
5131                         break;
5132                 case I40E_PHY_TYPE_25GBASE_SR:
5133                         ifmr->ifm_active |= IFM_25G_SR;
5134                         break;
5135                 case I40E_PHY_TYPE_25GBASE_LR:
5136                         ifmr->ifm_active |= IFM_25G_LR;
5137                         break;
5138                 case I40E_PHY_TYPE_25GBASE_AOC:
5139                         ifmr->ifm_active |= IFM_25G_AOC;
5140                         break;
5141                 case I40E_PHY_TYPE_25GBASE_ACC:
5142                         ifmr->ifm_active |= IFM_25G_ACC;
5143                         break;
5144                 /* 40 G */
5145                 case I40E_PHY_TYPE_40GBASE_CR4:
5146                 case I40E_PHY_TYPE_40GBASE_CR4_CU:
5147                         ifmr->ifm_active |= IFM_40G_CR4;
5148                         break;
5149                 case I40E_PHY_TYPE_40GBASE_SR4:
5150                         ifmr->ifm_active |= IFM_40G_SR4;
5151                         break;
5152                 case I40E_PHY_TYPE_40GBASE_LR4:
5153                         ifmr->ifm_active |= IFM_40G_LR4;
5154                         break;
5155                 case I40E_PHY_TYPE_XLAUI:
5156                         ifmr->ifm_active |= IFM_OTHER;
5157                         break;
5158                 case I40E_PHY_TYPE_1000BASE_KX:
5159                         ifmr->ifm_active |= IFM_1000_KX;
5160                         break;
5161                 case I40E_PHY_TYPE_SGMII:
5162                         ifmr->ifm_active |= IFM_1000_SGMII;
5163                         break;
5164                 /* ERJ: What's the difference between these? */
5165                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
5166                 case I40E_PHY_TYPE_10GBASE_CR1:
5167                         ifmr->ifm_active |= IFM_10G_CR1;
5168                         break;
5169                 case I40E_PHY_TYPE_10GBASE_KX4:
5170                         ifmr->ifm_active |= IFM_10G_KX4;
5171                         break;
5172                 case I40E_PHY_TYPE_10GBASE_KR:
5173                         ifmr->ifm_active |= IFM_10G_KR;
5174                         break;
5175                 case I40E_PHY_TYPE_SFI:
5176                         ifmr->ifm_active |= IFM_10G_SFI;
5177                         break;
5178                 /* Our single 20G media type */
5179                 case I40E_PHY_TYPE_20GBASE_KR2:
5180                         ifmr->ifm_active |= IFM_20G_KR2;
5181                         break;
5182                 case I40E_PHY_TYPE_40GBASE_KR4:
5183                         ifmr->ifm_active |= IFM_40G_KR4;
5184                         break;
5185                 case I40E_PHY_TYPE_XLPPI:
5186                 case I40E_PHY_TYPE_40GBASE_AOC:
5187                         ifmr->ifm_active |= IFM_40G_XLPPI;
5188                         break;
5189                 /* Unknown to driver */
5190                 default:
5191                         ifmr->ifm_active |= IFM_UNKNOWN;
5192                         break;
5193         }
5194         /* Report flow control status as well */
5195         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
5196                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
5197         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
5198                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
5199
5200         IXL_PF_UNLOCK(pf);
5201 }
5202
5203 void
5204 ixl_init(void *arg)
5205 {
5206         struct ixl_pf *pf = arg;
5207
5208         IXL_PF_LOCK(pf);
5209         ixl_init_locked(pf);
5210         IXL_PF_UNLOCK(pf);
5211 }
5212
5213 /*
5214  * NOTE: Fortville does not support forcing media speeds. Instead,
5215  * use the set_advertise sysctl to set the speeds Fortville
5216  * will advertise or be allowed to operate at.
5217  */
5218 int
5219 ixl_media_change(struct ifnet * ifp)
5220 {
5221         struct ixl_vsi *vsi = ifp->if_softc;
5222         struct ifmedia *ifm = &vsi->media;
5223
5224         INIT_DEBUGOUT("ixl_media_change: begin");
5225
5226         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5227                 return (EINVAL);
5228
5229         if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
5230
5231         return (ENODEV);
5232 }
5233
5234 /*********************************************************************
5235  *  Ioctl entry point
5236  *
5237  *  ixl_ioctl is called when the user wants to configure the
5238  *  interface.
5239  *
5240  *  return 0 on success, positive on failure
5241  **********************************************************************/
5242
5243 int
5244 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
5245 {
5246         struct ixl_vsi  *vsi = ifp->if_softc;
5247         struct ixl_pf   *pf = vsi->back;
5248         struct ifreq    *ifr = (struct ifreq *)data;
5249         struct ifdrv    *ifd = (struct ifdrv *)data;
5250 #if defined(INET) || defined(INET6)
5251         struct ifaddr *ifa = (struct ifaddr *)data;
5252         bool            avoid_reset = FALSE;
5253 #endif
5254         int             error = 0;
5255
5256         switch (command) {
5257
5258         case SIOCSIFADDR:
5259                 IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)");
5260 #ifdef INET
5261                 if (ifa->ifa_addr->sa_family == AF_INET)
5262                         avoid_reset = TRUE;
5263 #endif
5264 #ifdef INET6
5265                 if (ifa->ifa_addr->sa_family == AF_INET6)
5266                         avoid_reset = TRUE;
5267 #endif
5268 #if defined(INET) || defined(INET6)
5269                 /*
5270                 ** Calling init results in link renegotiation,
5271                 ** so we avoid doing it when possible.
5272                 */
5273                 if (avoid_reset) {
5274                         ifp->if_flags |= IFF_UP;
5275                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
5276                                 ixl_init(pf);
5277 #ifdef INET
5278                         if (!(ifp->if_flags & IFF_NOARP))
5279                                 arp_ifinit(ifp, ifa);
5280 #endif
5281                 } else
5282                         error = ether_ioctl(ifp, command, data);
5283                 break;
5284 #endif
5285         case SIOCSIFMTU:
5286                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
5287                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
5288                    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
5289                         error = EINVAL;
5290                 } else {
5291                         IXL_PF_LOCK(pf);
5292                         ifp->if_mtu = ifr->ifr_mtu;
5293                         vsi->max_frame_size =
5294                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
5295                             + ETHER_VLAN_ENCAP_LEN;
5296                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5297                                 ixl_init_locked(pf);
5298                         IXL_PF_UNLOCK(pf);
5299                 }
5300                 break;
5301         case SIOCSIFFLAGS:
5302                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
5303                 IXL_PF_LOCK(pf);
5304                 if (ifp->if_flags & IFF_UP) {
5305                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5306                                 if ((ifp->if_flags ^ pf->if_flags) &
5307                                     (IFF_PROMISC | IFF_ALLMULTI)) {
5308                                         ixl_set_promisc(vsi);
5309                                 }
5310                         } else {
5311                                 IXL_PF_UNLOCK(pf);
5312                                 ixl_init(pf);
5313                                 IXL_PF_LOCK(pf);
5314                         }
5315                 } else {
5316                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5317                                 ixl_stop_locked(pf);
5318                         }
5319                 }
5320                 pf->if_flags = ifp->if_flags;
5321                 IXL_PF_UNLOCK(pf);
5322                 break;
5323         case SIOCSDRVSPEC:
5324         case SIOCGDRVSPEC:
5325                 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
5326                     "Info)\n");
5327
5328                 /* NVM update command */
5329                 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
5330                         error = ixl_handle_nvmupd_cmd(pf, ifd);
5331                 else
5332                         error = EINVAL;
5333                 break;
5334         case SIOCADDMULTI:
5335                 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
5336                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5337                         IXL_PF_LOCK(pf);
5338                         ixl_disable_rings_intr(vsi);
5339                         ixl_add_multi(vsi);
5340                         ixl_enable_intr(vsi);
5341                         IXL_PF_UNLOCK(pf);
5342                 }
5343                 break;
5344         case SIOCDELMULTI:
5345                 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
5346                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5347                         IXL_PF_LOCK(pf);
5348                         ixl_disable_rings_intr(vsi);
5349                         ixl_del_multi(vsi);
5350                         ixl_enable_intr(vsi);
5351                         IXL_PF_UNLOCK(pf);
5352                 }
5353                 break;
5354         case SIOCSIFMEDIA:
5355         case SIOCGIFMEDIA:
5356         case SIOCGIFXMEDIA:
5357                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
5358                 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
5359                 break;
5360         case SIOCSIFCAP:
5361         {
5362                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5363                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
5364
5365                 ixl_cap_txcsum_tso(vsi, ifp, mask);
5366
5367                 if (mask & IFCAP_RXCSUM)
5368                         ifp->if_capenable ^= IFCAP_RXCSUM;
5369                 if (mask & IFCAP_RXCSUM_IPV6)
5370                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5371                 if (mask & IFCAP_LRO)
5372                         ifp->if_capenable ^= IFCAP_LRO;
5373                 if (mask & IFCAP_VLAN_HWTAGGING)
5374                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5375                 if (mask & IFCAP_VLAN_HWFILTER)
5376                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5377                 if (mask & IFCAP_VLAN_HWTSO)
5378                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5379                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5380                         IXL_PF_LOCK(pf);
5381                         ixl_init_locked(pf);
5382                         IXL_PF_UNLOCK(pf);
5383                 }
5384                 VLAN_CAPABILITIES(ifp);
5385
5386                 break;
5387         }
5388 #if __FreeBSD_version >= 1003000
5389         case SIOCGI2C:
5390         {
5391                 struct ifi2creq i2c;
5392                 int i;
5393
5394                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
5395                 if (!pf->has_i2c)
5396                         return (ENOTTY);
5397
5398                 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
5399                 if (error != 0)
5400                         break;
5401                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
5402                         error = EINVAL;
5403                         break;
5404                 }
5405                 if (i2c.len > sizeof(i2c.data)) {
5406                         error = EINVAL;
5407                         break;
5408                 }
5409
5410                 for (i = 0; i < i2c.len; i++)
5411                         if (ixl_read_i2c_byte(pf, i2c.offset + i,
5412                             i2c.dev_addr, &i2c.data[i]))
5413                                 return (EIO);
5414
5415                 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
5416                 break;
5417         }
5418 #endif
5419         default:
5420                 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5421                 error = ether_ioctl(ifp, command, data);
5422                 break;
5423         }
5424
5425         return (error);
5426 }
5427
5428 int
5429 ixl_find_i2c_interface(struct ixl_pf *pf)
5430 {
5431         struct i40e_hw *hw = &pf->hw;
5432         bool i2c_en, port_matched;
5433         u32 reg;
5434
5435         for (int i = 0; i < 4; i++) {
5436                 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
5437                 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
5438                 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
5439                     >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
5440                     & BIT(hw->port);
5441                 if (i2c_en && port_matched)
5442                         return (i);
5443         }
5444
5445         return (-1);
5446 }
5447
5448 static char *
5449 ixl_phy_type_string(u32 bit_pos, bool ext)
5450 {
5451         static char * phy_types_str[32] = {
5452                 "SGMII",
5453                 "1000BASE-KX",
5454                 "10GBASE-KX4",
5455                 "10GBASE-KR",
5456                 "40GBASE-KR4",
5457                 "XAUI",
5458                 "XFI",
5459                 "SFI",
5460                 "XLAUI",
5461                 "XLPPI",
5462                 "40GBASE-CR4",
5463                 "10GBASE-CR1",
5464                 "SFP+ Active DA",
5465                 "QSFP+ Active DA",
5466                 "Reserved (14)",
5467                 "Reserved (15)",
5468                 "Reserved (16)",
5469                 "100BASE-TX",
5470                 "1000BASE-T",
5471                 "10GBASE-T",
5472                 "10GBASE-SR",
5473                 "10GBASE-LR",
5474                 "10GBASE-SFP+Cu",
5475                 "10GBASE-CR1",
5476                 "40GBASE-CR4",
5477                 "40GBASE-SR4",
5478                 "40GBASE-LR4",
5479                 "1000BASE-SX",
5480                 "1000BASE-LX",
5481                 "1000BASE-T Optical",
5482                 "20GBASE-KR2",
5483                 "Reserved (31)"
5484         };
5485         static char * ext_phy_types_str[8] = {
5486                 "25GBASE-KR",
5487                 "25GBASE-CR",
5488                 "25GBASE-SR",
5489                 "25GBASE-LR",
5490                 "25GBASE-AOC",
5491                 "25GBASE-ACC",
5492                 "Reserved (6)",
5493                 "Reserved (7)"
5494         };
5495
5496         if (ext && bit_pos > 7) return "Invalid_Ext";
5497         if (bit_pos > 31) return "Invalid";
5498
5499         return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
5500 }
5501
5502 int
5503 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
5504 {
5505         device_t dev = pf->dev;
5506         struct i40e_hw *hw = &pf->hw;
5507         struct i40e_aq_desc desc;
5508         enum i40e_status_code status;
5509
5510         struct i40e_aqc_get_link_status *aq_link_status =
5511                 (struct i40e_aqc_get_link_status *)&desc.params.raw;
5512
5513         i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
5514         link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
5515         status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
5516         if (status) {
5517                 device_printf(dev,
5518                     "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
5519                     __func__, i40e_stat_str(hw, status),
5520                     i40e_aq_str(hw, hw->aq.asq_last_status));
5521                 return (EIO);
5522         }
5523
5524         bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
5525         return (0);
5526 }
5527
5528 static char *
5529 ixl_phy_type_string_ls(u8 val)
5530 {
5531         if (val >= 0x1F)
5532                 return ixl_phy_type_string(val - 0x1F, true);
5533         else
5534                 return ixl_phy_type_string(val, false);
5535 }
5536
5537 static int
5538 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5539 {
5540         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5541         device_t dev = pf->dev;
5542         struct sbuf *buf;
5543         int error = 0;
5544
5545         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5546         if (!buf) {
5547                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5548                 return (ENOMEM);
5549         }
5550
5551         struct i40e_aqc_get_link_status link_status;
5552         error = ixl_aq_get_link_status(pf, &link_status);
5553         if (error) {
5554                 sbuf_delete(buf);
5555                 return (error);
5556         }
5557
5558         sbuf_printf(buf, "\n"
5559             "PHY Type : 0x%02x<%s>\n"
5560             "Speed    : 0x%02x\n"
5561             "Link info: 0x%02x\n"
5562             "AN info  : 0x%02x\n"
5563             "Ext info : 0x%02x\n"
5564             "Loopback : 0x%02x\n"
5565             "Max Frame: %d\n"
5566             "Config   : 0x%02x\n"
5567             "Power    : 0x%02x",
5568             link_status.phy_type,
5569             ixl_phy_type_string_ls(link_status.phy_type),
5570             link_status.link_speed, 
5571             link_status.link_info,
5572             link_status.an_info,
5573             link_status.ext_info,
5574             link_status.loopback,
5575             link_status.max_frame_size,
5576             link_status.config,
5577             link_status.power_desc);
5578
5579         error = sbuf_finish(buf);
5580         if (error)
5581                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5582
5583         sbuf_delete(buf);
5584         return (error);
5585 }
5586
5587 static int
5588 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5589 {
5590         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5591         struct i40e_hw *hw = &pf->hw;
5592         device_t dev = pf->dev;
5593         enum i40e_status_code status;
5594         struct i40e_aq_get_phy_abilities_resp abilities;
5595         struct sbuf *buf;
5596         int error = 0;
5597
5598         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5599         if (!buf) {
5600                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5601                 return (ENOMEM);
5602         }
5603
5604         status = i40e_aq_get_phy_capabilities(hw,
5605             FALSE, FALSE, &abilities, NULL);
5606         if (status) {
5607                 device_printf(dev,
5608                     "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5609                     __func__, i40e_stat_str(hw, status),
5610                     i40e_aq_str(hw, hw->aq.asq_last_status));
5611                 sbuf_delete(buf);
5612                 return (EIO);
5613         }
5614
5615         sbuf_printf(buf, "\n"
5616             "PHY Type : %08x",
5617             abilities.phy_type);
5618
5619         if (abilities.phy_type != 0) {
5620                 sbuf_printf(buf, "<");
5621                 for (int i = 0; i < 32; i++)
5622                         if ((1 << i) & abilities.phy_type)
5623                                 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
5624                 sbuf_printf(buf, ">\n");
5625         }
5626
5627         sbuf_printf(buf, "PHY Ext  : %02x",
5628             abilities.phy_type_ext);
5629
5630         if (abilities.phy_type_ext != 0) {
5631                 sbuf_printf(buf, "<");
5632                 for (int i = 0; i < 4; i++)
5633                         if ((1 << i) & abilities.phy_type_ext)
5634                                 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
5635                 sbuf_printf(buf, ">");
5636         }
5637         sbuf_printf(buf, "\n");
5638
5639         sbuf_printf(buf,
5640             "Speed    : %02x\n"
5641             "Abilities: %02x\n"
5642             "EEE cap  : %04x\n"
5643             "EEER reg : %08x\n"
5644             "D3 Lpan  : %02x\n"
5645             "ID       : %02x %02x %02x %02x\n"
5646             "ModType  : %02x %02x %02x\n"
5647             "ModType E: %01x\n"
5648             "FEC Cfg  : %02x\n"
5649             "Ext CC   : %02x",
5650             abilities.link_speed, 
5651             abilities.abilities, abilities.eee_capability,
5652             abilities.eeer_val, abilities.d3_lpan,
5653             abilities.phy_id[0], abilities.phy_id[1],
5654             abilities.phy_id[2], abilities.phy_id[3],
5655             abilities.module_type[0], abilities.module_type[1],
5656             abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
5657             abilities.fec_cfg_curr_mod_ext_info & 0x1F,
5658             abilities.ext_comp_code);
5659
5660         error = sbuf_finish(buf);
5661         if (error)
5662                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5663
5664         sbuf_delete(buf);
5665         return (error);
5666 }
5667
5668 static int
5669 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5670 {
5671         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5672         struct ixl_vsi *vsi = &pf->vsi;
5673         struct ixl_mac_filter *f;
5674         char *buf, *buf_i;
5675
5676         int error = 0;
5677         int ftl_len = 0;
5678         int ftl_counter = 0;
5679         int buf_len = 0;
5680         int entry_len = 42;
5681
5682         SLIST_FOREACH(f, &vsi->ftl, next) {
5683                 ftl_len++;
5684         }
5685
5686         if (ftl_len < 1) {
5687                 sysctl_handle_string(oidp, "(none)", 6, req);
5688                 return (0);
5689         }
5690
5691         buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5692         buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5693
5694         sprintf(buf_i++, "\n");
5695         SLIST_FOREACH(f, &vsi->ftl, next) {
5696                 sprintf(buf_i,
5697                     MAC_FORMAT ", vlan %4d, flags %#06x",
5698                     MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5699                 buf_i += entry_len;
5700                 /* don't print '\n' for last entry */
5701                 if (++ftl_counter != ftl_len) {
5702                         sprintf(buf_i, "\n");
5703                         buf_i++;
5704                 }
5705         }
5706
5707         error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5708         if (error)
5709                 printf("sysctl error: %d\n", error);
5710         free(buf, M_DEVBUF);
5711         return error;
5712 }
5713
5714 #define IXL_SW_RES_SIZE 0x14
5715 int
5716 ixl_res_alloc_cmp(const void *a, const void *b)
5717 {
5718         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5719         one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5720         two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5721
5722         return ((int)one->resource_type - (int)two->resource_type);
5723 }
5724
5725 /*
5726  * Longest string length: 25 
5727  */
5728 char *
5729 ixl_switch_res_type_string(u8 type)
5730 {
5731         static char * ixl_switch_res_type_strings[0x14] = {
5732                 "VEB",
5733                 "VSI",
5734                 "Perfect Match MAC address",
5735                 "S-tag",
5736                 "(Reserved)",
5737                 "Multicast hash entry",
5738                 "Unicast hash entry",
5739                 "VLAN",
5740                 "VSI List entry",
5741                 "(Reserved)",
5742                 "VLAN Statistic Pool",
5743                 "Mirror Rule",
5744                 "Queue Set",
5745                 "Inner VLAN Forward filter",
5746                 "(Reserved)",
5747                 "Inner MAC",
5748                 "IP",
5749                 "GRE/VN1 Key",
5750                 "VN2 Key",
5751                 "Tunneling Port"
5752         };
5753
5754         if (type < 0x14)
5755                 return ixl_switch_res_type_strings[type];
5756         else
5757                 return "(Reserved)";
5758 }
5759
5760 static int
5761 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5762 {
5763         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5764         struct i40e_hw *hw = &pf->hw;
5765         device_t dev = pf->dev;
5766         struct sbuf *buf;
5767         enum i40e_status_code status;
5768         int error = 0;
5769
5770         u8 num_entries;
5771         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5772
5773         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5774         if (!buf) {
5775                 device_printf(dev, "Could not allocate sbuf for output.\n");
5776                 return (ENOMEM);
5777         }
5778
5779         bzero(resp, sizeof(resp));
5780         status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5781                                 resp,
5782                                 IXL_SW_RES_SIZE,
5783                                 NULL);
5784         if (status) {
5785                 device_printf(dev,
5786                     "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5787                     __func__, i40e_stat_str(hw, status),
5788                     i40e_aq_str(hw, hw->aq.asq_last_status));
5789                 sbuf_delete(buf);
5790                 return (error);
5791         }
5792
5793         /* Sort entries by type for display */
5794         qsort(resp, num_entries,
5795             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5796             &ixl_res_alloc_cmp);
5797
5798         sbuf_cat(buf, "\n");
5799         sbuf_printf(buf, "# of entries: %d\n", num_entries);
5800         sbuf_printf(buf,
5801             "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
5802             "                          | (this)     | (all) | (this) | (all)       \n");
5803         for (int i = 0; i < num_entries; i++) {
5804                 sbuf_printf(buf,
5805                     "%25s | %10d   %5d   %6d   %12d",
5806                     ixl_switch_res_type_string(resp[i].resource_type),
5807                     resp[i].guaranteed,
5808                     resp[i].total,
5809                     resp[i].used,
5810                     resp[i].total_unalloced);
5811                 if (i < num_entries - 1)
5812                         sbuf_cat(buf, "\n");
5813         }
5814
5815         error = sbuf_finish(buf);
5816         if (error)
5817                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5818
5819         sbuf_delete(buf);
5820         return (error);
5821 }
5822
5823 /*
5824 ** Caller must init and delete sbuf; this function will clear and
5825 ** finish it for caller.
5826 **
5827 ** XXX: Cannot use the SEID for this, since there is no longer a 
5828 ** fixed mapping between SEID and element type.
5829 */
5830 char *
5831 ixl_switch_element_string(struct sbuf *s,
5832     struct i40e_aqc_switch_config_element_resp *element)
5833 {
5834         sbuf_clear(s);
5835
5836         switch (element->element_type) {
5837         case I40E_AQ_SW_ELEM_TYPE_MAC:
5838                 sbuf_printf(s, "MAC %3d", element->element_info);
5839                 break;
5840         case I40E_AQ_SW_ELEM_TYPE_PF:
5841                 sbuf_printf(s, "PF  %3d", element->element_info);
5842                 break;
5843         case I40E_AQ_SW_ELEM_TYPE_VF:
5844                 sbuf_printf(s, "VF  %3d", element->element_info);
5845                 break;
5846         case I40E_AQ_SW_ELEM_TYPE_EMP:
5847                 sbuf_cat(s, "EMP");
5848                 break;
5849         case I40E_AQ_SW_ELEM_TYPE_BMC:
5850                 sbuf_cat(s, "BMC");
5851                 break;
5852         case I40E_AQ_SW_ELEM_TYPE_PV:
5853                 sbuf_cat(s, "PV");
5854                 break;
5855         case I40E_AQ_SW_ELEM_TYPE_VEB:
5856                 sbuf_cat(s, "VEB");
5857                 break;
5858         case I40E_AQ_SW_ELEM_TYPE_PA:
5859                 sbuf_cat(s, "PA");
5860                 break;
5861         case I40E_AQ_SW_ELEM_TYPE_VSI:
5862                 sbuf_printf(s, "VSI %3d", element->element_info);
5863                 break;
5864         default:
5865                 sbuf_cat(s, "?");
5866                 break;
5867         }
5868
5869         sbuf_finish(s);
5870         return sbuf_data(s);
5871 }
5872
5873 static int
5874 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5875 {
5876         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5877         struct i40e_hw *hw = &pf->hw;
5878         device_t dev = pf->dev;
5879         struct sbuf *buf;
5880         struct sbuf *nmbuf;
5881         enum i40e_status_code status;
5882         int error = 0;
5883         u16 next = 0;
5884         u8 aq_buf[I40E_AQ_LARGE_BUF];
5885
5886         struct i40e_aqc_get_switch_config_resp *sw_config;
5887         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5888
5889         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5890         if (!buf) {
5891                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5892                 return (ENOMEM);
5893         }
5894
5895         status = i40e_aq_get_switch_config(hw, sw_config,
5896             sizeof(aq_buf), &next, NULL);
5897         if (status) {
5898                 device_printf(dev,
5899                     "%s: aq_get_switch_config() error %s, aq error %s\n",
5900                     __func__, i40e_stat_str(hw, status),
5901                     i40e_aq_str(hw, hw->aq.asq_last_status));
5902                 sbuf_delete(buf);
5903                 return error;
5904         }
5905         if (next)
5906                 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5907                     __func__, next);
5908
5909         nmbuf = sbuf_new_auto();
5910         if (!nmbuf) {
5911                 device_printf(dev, "Could not allocate sbuf for name output.\n");
5912                 sbuf_delete(buf);
5913                 return (ENOMEM);
5914         }
5915
5916         sbuf_cat(buf, "\n");
5917         /* Assuming <= 255 elements in switch */
5918         sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5919         sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5920         /* Exclude:
5921         ** Revision -- all elements are revision 1 for now
5922         */
5923         sbuf_printf(buf,
5924             "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5925             "                |          |          | (uplink)\n");
5926         for (int i = 0; i < sw_config->header.num_reported; i++) {
5927                 // "%4d (%8s) | %8s   %8s   %#8x",
5928                 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5929                 sbuf_cat(buf, " ");
5930                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5931                     &sw_config->element[i]));
5932                 sbuf_cat(buf, " | ");
5933                 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5934                 sbuf_cat(buf, "   ");
5935                 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5936                 sbuf_cat(buf, "   ");
5937                 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5938                 if (i < sw_config->header.num_reported - 1)
5939                         sbuf_cat(buf, "\n");
5940         }
5941         sbuf_delete(nmbuf);
5942
5943         error = sbuf_finish(buf);
5944         if (error)
5945                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5946
5947         sbuf_delete(buf);
5948
5949         return (error);
5950 }
5951
5952 static int
5953 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
5954 {
5955         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5956         struct i40e_hw *hw = &pf->hw;
5957         device_t dev = pf->dev;
5958         struct sbuf *buf;
5959         int error = 0;
5960         enum i40e_status_code status;
5961         u32 reg;
5962
5963         struct i40e_aqc_get_set_rss_key_data key_data;
5964
5965         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5966         if (!buf) {
5967                 device_printf(dev, "Could not allocate sbuf for output.\n");
5968                 return (ENOMEM);
5969         }
5970
5971         bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
5972
5973         sbuf_cat(buf, "\n");
5974         if (hw->mac.type == I40E_MAC_X722) {
5975                 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
5976                 if (status)
5977                         device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
5978                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5979         } else {
5980                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
5981                         reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
5982                         bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
5983                 }
5984         }
5985
5986         ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
5987
5988         error = sbuf_finish(buf);
5989         if (error)
5990                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5991         sbuf_delete(buf);
5992
5993         return (error);
5994 }
5995
5996 static void
5997 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
5998 {
5999         int i, j, k, width;
6000         char c;
6001
6002         if (length < 1 || buf == NULL) return;
6003
6004         int byte_stride = 16;
6005         int lines = length / byte_stride;
6006         int rem = length % byte_stride;
6007         if (rem > 0)
6008                 lines++;
6009
6010         for (i = 0; i < lines; i++) {
6011                 width = (rem > 0 && i == lines - 1)
6012                     ? rem : byte_stride;
6013
6014                 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
6015
6016                 for (j = 0; j < width; j++)
6017                         sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
6018
6019                 if (width < byte_stride) {
6020                         for (k = 0; k < (byte_stride - width); k++)
6021                                 sbuf_printf(sb, "   ");
6022                 }
6023
6024                 if (!text) {
6025                         sbuf_printf(sb, "\n");
6026                         continue;
6027                 }
6028
6029                 for (j = 0; j < width; j++) {
6030                         c = (char)buf[i * byte_stride + j];
6031                         if (c < 32 || c > 126)
6032                                 sbuf_printf(sb, ".");
6033                         else
6034                                 sbuf_printf(sb, "%c", c);
6035
6036                         if (j == width - 1)
6037                                 sbuf_printf(sb, "\n");
6038                 }
6039         }
6040 }
6041
6042 static int
6043 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
6044 {
6045         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6046         struct i40e_hw *hw = &pf->hw;
6047         device_t dev = pf->dev;
6048         struct sbuf *buf;
6049         int error = 0;
6050         enum i40e_status_code status;
6051         u8 hlut[512];
6052         u32 reg;
6053
6054         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6055         if (!buf) {
6056                 device_printf(dev, "Could not allocate sbuf for output.\n");
6057                 return (ENOMEM);
6058         }
6059
6060         bzero(hlut, sizeof(hlut));
6061         sbuf_cat(buf, "\n");
6062         if (hw->mac.type == I40E_MAC_X722) {
6063                 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
6064                 if (status)
6065                         device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
6066                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6067         } else {
6068                 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
6069                         reg = rd32(hw, I40E_PFQF_HLUT(i));
6070                         bcopy(&reg, &hlut[i << 2], 4);
6071                 }
6072         }
6073         ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
6074
6075         error = sbuf_finish(buf);
6076         if (error)
6077                 device_printf(dev, "Error finishing sbuf: %d\n", error);
6078         sbuf_delete(buf);
6079
6080         return (error);
6081 }
6082
6083 static int
6084 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
6085 {
6086         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6087         struct i40e_hw *hw = &pf->hw;
6088         u64 hena;
6089
6090         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
6091             ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
6092
6093         return sysctl_handle_long(oidp, NULL, hena, req);
6094 }
6095
6096 /*
6097  * Sysctl to disable firmware's link management
6098  *
6099  * 1 - Disable link management on this port
6100  * 0 - Re-enable link management
6101  *
6102  * On normal NVMs, firmware manages link by default.
6103  */
6104 static int
6105 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
6106 {
6107         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6108         struct i40e_hw *hw = &pf->hw;
6109         device_t dev = pf->dev;
6110         int requested_mode = -1;
6111         enum i40e_status_code status = 0;
6112         int error = 0;
6113
6114         /* Read in new mode */
6115         error = sysctl_handle_int(oidp, &requested_mode, 0, req);
6116         if ((error) || (req->newptr == NULL))
6117                 return (error);
6118         /* Check for sane value */
6119         if (requested_mode < 0 || requested_mode > 1) {
6120                 device_printf(dev, "Valid modes are 0 or 1\n");
6121                 return (EINVAL);
6122         }
6123
6124         /* Set new mode */
6125         status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
6126         if (status) {
6127                 device_printf(dev,
6128                     "%s: Error setting new phy debug mode %s,"
6129                     " aq error: %s\n", __func__, i40e_stat_str(hw, status),
6130                     i40e_aq_str(hw, hw->aq.asq_last_status));
6131                 return (EIO);
6132         }
6133
6134         return (0);
6135 }
6136
6137 /*
6138  * Sysctl to read a byte from I2C bus.
6139  *
6140  * Input: 32-bit value:
6141  *      bits 0-7:   device address (0xA0 or 0xA2)
6142  *      bits 8-15:  offset (0-255)
6143  *      bits 16-31: unused
6144  * Output: 8-bit value read
6145  */
6146 static int
6147 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
6148 {
6149         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6150         device_t dev = pf->dev;
6151         int input = -1, error = 0;
6152
6153         device_printf(dev, "%s: start\n", __func__);
6154
6155         u8 dev_addr, offset, output;
6156
6157         /* Read in I2C read parameters */
6158         error = sysctl_handle_int(oidp, &input, 0, req);
6159         if ((error) || (req->newptr == NULL))
6160                 return (error);
6161         /* Validate device address */
6162         dev_addr = input & 0xFF;
6163         if (dev_addr != 0xA0 && dev_addr != 0xA2) {
6164                 return (EINVAL);
6165         }
6166         offset = (input >> 8) & 0xFF;
6167
6168         error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
6169         if (error)
6170                 return (error);
6171
6172         device_printf(dev, "%02X\n", output);
6173         return (0);
6174 }
6175
6176 /*
6177  * Sysctl to write a byte to the I2C bus.
6178  *
6179  * Input: 32-bit value:
6180  *      bits 0-7:   device address (0xA0 or 0xA2)
6181  *      bits 8-15:  offset (0-255)
6182  *      bits 16-23: value to write
6183  *      bits 24-31: unused
6184  * Output: 8-bit value written
6185  */
6186 static int
6187 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
6188 {
6189         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6190         device_t dev = pf->dev;
6191         int input = -1, error = 0;
6192
6193         u8 dev_addr, offset, value;
6194
6195         /* Read in I2C write parameters */
6196         error = sysctl_handle_int(oidp, &input, 0, req);
6197         if ((error) || (req->newptr == NULL))
6198                 return (error);
6199         /* Validate device address */
6200         dev_addr = input & 0xFF;
6201         if (dev_addr != 0xA0 && dev_addr != 0xA2) {
6202                 return (EINVAL);
6203         }
6204         offset = (input >> 8) & 0xFF;
6205         value = (input >> 16) & 0xFF;
6206
6207         error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
6208         if (error)
6209                 return (error);
6210
6211         device_printf(dev, "%02X written\n", value);
6212         return (0);
6213 }
6214
6215 static int
6216 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
6217     u8 bit_pos, int *is_set)
6218 {
6219         device_t dev = pf->dev;
6220         struct i40e_hw *hw = &pf->hw;
6221         enum i40e_status_code status;
6222
6223         status = i40e_aq_get_phy_capabilities(hw,
6224             FALSE, FALSE, abilities, NULL);
6225         if (status) {
6226                 device_printf(dev,
6227                     "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
6228                     __func__, i40e_stat_str(hw, status),
6229                     i40e_aq_str(hw, hw->aq.asq_last_status));
6230                 return (EIO);
6231         }
6232
6233         *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
6234         return (0);
6235 }
6236
6237 static int
6238 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
6239     u8 bit_pos, int set)
6240 {
6241         device_t dev = pf->dev;
6242         struct i40e_hw *hw = &pf->hw;
6243         struct i40e_aq_set_phy_config config;
6244         enum i40e_status_code status;
6245
6246         /* Set new PHY config */
6247         memset(&config, 0, sizeof(config));
6248         config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
6249         if (set)
6250                 config.fec_config |= bit_pos;
6251         if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
6252                 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
6253                 config.phy_type = abilities->phy_type;
6254                 config.phy_type_ext = abilities->phy_type_ext;
6255                 config.link_speed = abilities->link_speed;
6256                 config.eee_capability = abilities->eee_capability;
6257                 config.eeer = abilities->eeer_val;
6258                 config.low_power_ctrl = abilities->d3_lpan;
6259                 status = i40e_aq_set_phy_config(hw, &config, NULL);
6260
6261                 if (status) {
6262                         device_printf(dev,
6263                             "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
6264                             __func__, i40e_stat_str(hw, status),
6265                             i40e_aq_str(hw, hw->aq.asq_last_status));
6266                         return (EIO);
6267                 }
6268         }
6269
6270         return (0);
6271 }
6272
6273 static int
6274 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
6275 {
6276         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6277         int mode, error = 0;
6278
6279         struct i40e_aq_get_phy_abilities_resp abilities;
6280         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
6281         if (error)
6282                 return (error);
6283         /* Read in new mode */
6284         error = sysctl_handle_int(oidp, &mode, 0, req);
6285         if ((error) || (req->newptr == NULL))
6286                 return (error);
6287
6288         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
6289 }
6290
6291 static int
6292 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
6293 {
6294         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6295         int mode, error = 0;
6296
6297         struct i40e_aq_get_phy_abilities_resp abilities;
6298         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
6299         if (error)
6300                 return (error);
6301         /* Read in new mode */
6302         error = sysctl_handle_int(oidp, &mode, 0, req);
6303         if ((error) || (req->newptr == NULL))
6304                 return (error);
6305
6306         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
6307 }
6308
6309 static int
6310 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
6311 {
6312         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6313         int mode, error = 0;
6314
6315         struct i40e_aq_get_phy_abilities_resp abilities;
6316         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
6317         if (error)
6318                 return (error);
6319         /* Read in new mode */
6320         error = sysctl_handle_int(oidp, &mode, 0, req);
6321         if ((error) || (req->newptr == NULL))
6322                 return (error);
6323
6324         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
6325 }
6326
6327 static int
6328 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
6329 {
6330         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6331         int mode, error = 0;
6332
6333         struct i40e_aq_get_phy_abilities_resp abilities;
6334         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
6335         if (error)
6336                 return (error);
6337         /* Read in new mode */
6338         error = sysctl_handle_int(oidp, &mode, 0, req);
6339         if ((error) || (req->newptr == NULL))
6340                 return (error);
6341
6342         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
6343 }
6344
6345 static int
6346 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
6347 {
6348         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6349         int mode, error = 0;
6350
6351         struct i40e_aq_get_phy_abilities_resp abilities;
6352         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
6353         if (error)
6354                 return (error);
6355         /* Read in new mode */
6356         error = sysctl_handle_int(oidp, &mode, 0, req);
6357         if ((error) || (req->newptr == NULL))
6358                 return (error);
6359
6360         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
6361 }
6362
6363 static int
6364 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
6365 {
6366         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6367         struct i40e_hw *hw = &pf->hw;
6368         device_t dev = pf->dev;
6369         struct sbuf *buf;
6370         int error = 0;
6371         enum i40e_status_code status;
6372
6373         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6374         if (!buf) {
6375                 device_printf(dev, "Could not allocate sbuf for output.\n");
6376                 return (ENOMEM);
6377         }
6378
6379         u8 *final_buff;
6380         /* This amount is only necessary if reading the entire cluster into memory */
6381 #define IXL_FINAL_BUFF_SIZE     (1280 * 1024)
6382         final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
6383         if (final_buff == NULL) {
6384                 device_printf(dev, "Could not allocate memory for output.\n");
6385                 goto out;
6386         }
6387         int final_buff_len = 0;
6388
6389         u8 cluster_id = 1;
6390         bool more = true;
6391
6392         u8 dump_buf[4096];
6393         u16 curr_buff_size = 4096;
6394         u8 curr_next_table = 0;
6395         u32 curr_next_index = 0;
6396
6397         u16 ret_buff_size;
6398         u8 ret_next_table;
6399         u32 ret_next_index;
6400
6401         sbuf_cat(buf, "\n");
6402
6403         while (more) {
6404                 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
6405                     dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
6406                 if (status) {
6407                         device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
6408                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
6409                         goto free_out;
6410                 }
6411
6412                 /* copy info out of temp buffer */
6413                 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
6414                 final_buff_len += ret_buff_size;
6415
6416                 if (ret_next_table != curr_next_table) {
6417                         /* We're done with the current table; we can dump out read data. */
6418                         sbuf_printf(buf, "%d:", curr_next_table);
6419                         int bytes_printed = 0;
6420                         while (bytes_printed <= final_buff_len) {
6421                                 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
6422                                 bytes_printed += 16;
6423                         }
6424                                 sbuf_cat(buf, "\n");
6425
6426                         /* The entire cluster has been read; we're finished */
6427                         if (ret_next_table == 0xFF)
6428                                 break;
6429
6430                         /* Otherwise clear the output buffer and continue reading */
6431                         bzero(final_buff, IXL_FINAL_BUFF_SIZE);
6432                         final_buff_len = 0;
6433                 }
6434
6435                 if (ret_next_index == 0xFFFFFFFF)
6436                         ret_next_index = 0;
6437
6438                 bzero(dump_buf, sizeof(dump_buf));
6439                 curr_next_table = ret_next_table;
6440                 curr_next_index = ret_next_index;
6441         }
6442
6443 free_out:
6444         free(final_buff, M_DEVBUF);
6445 out:
6446         error = sbuf_finish(buf);
6447         if (error)
6448                 device_printf(dev, "Error finishing sbuf: %d\n", error);
6449         sbuf_delete(buf);
6450
6451         return (error);
6452 }
6453
6454 static int
6455 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
6456 {
6457         struct ixl_pf *pf = (struct ixl_pf *)arg1;
6458         struct i40e_hw *hw = &pf->hw;
6459         device_t dev = pf->dev;
6460         int error = 0;
6461         int state, new_state;
6462         enum i40e_status_code status;
6463         state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
6464
6465         /* Read in new mode */
6466         error = sysctl_handle_int(oidp, &new_state, 0, req);
6467         if ((error) || (req->newptr == NULL))
6468                 return (error);
6469
6470         /* Already in requested state */
6471         if (new_state == state)
6472                 return (error);
6473
6474         if (new_state == 0) {
6475                 if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
6476                         device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
6477                         return (EINVAL);
6478                 }
6479
6480                 if (pf->hw.aq.api_maj_ver < 1 ||
6481                     (pf->hw.aq.api_maj_ver == 1 &&
6482                     pf->hw.aq.api_min_ver < 7)) {
6483                         device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
6484                         return (EINVAL);
6485                 }
6486
6487                 i40e_aq_stop_lldp(&pf->hw, true, NULL);
6488                 i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
6489                 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6490         } else {
6491                 status = i40e_aq_start_lldp(&pf->hw, NULL);
6492                 if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
6493                         device_printf(dev, "FW LLDP agent is already running\n");
6494                 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6495         }
6496
6497         return (0);
6498 }
6499
6500 /*
6501  * Get FW LLDP Agent status
6502  */
6503 int
6504 ixl_get_fw_lldp_status(struct ixl_pf *pf)
6505 {
6506         enum i40e_status_code ret = I40E_SUCCESS;
6507         struct i40e_lldp_variables lldp_cfg;
6508         struct i40e_hw *hw = &pf->hw;
6509         u8 adminstatus = 0;
6510
6511         ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
6512         if (ret)
6513                 return ret;
6514
6515         /* Get the LLDP AdminStatus for the current port */
6516         adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
6517         adminstatus &= 0xf;
6518
6519         /* Check if LLDP agent is disabled */
6520         if (!adminstatus) {
6521                 device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
6522                 atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6523         } else
6524                 atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
6525
6526         return (0);
6527 }
6528
6529 int
6530 ixl_attach_get_link_status(struct ixl_pf *pf)
6531 {
6532         struct i40e_hw *hw = &pf->hw;
6533         device_t dev = pf->dev;
6534         int error = 0;
6535
6536         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
6537             (hw->aq.fw_maj_ver < 4)) {
6538                 i40e_msec_delay(75);
6539                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
6540                 if (error) {
6541                         device_printf(dev, "link restart failed, aq_err=%d\n",
6542                             pf->hw.aq.asq_last_status);
6543                         return error;
6544                 }
6545         }
6546
6547         /* Determine link state */
6548         hw->phy.get_link_info = TRUE;
6549         i40e_get_link_status(hw, &pf->link_up);
6550         return (0);
6551 }