]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/ixl_pf_main.c
ppc aim flush_disable_caches: Use void casts instead of a dummy variable.
[FreeBSD/FreeBSD.git] / sys / dev / ixl / ixl_pf_main.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #include "ixl_pf.h"
37
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46
47 static u8       ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void     ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50 static u_int    ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51 static u_int    ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52 static char *   ixl_switch_element_string(struct sbuf *, u8, u16);
53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
54
55 /* Sysctls */
56 static int      ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57 static int      ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58 static int      ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60 static int      ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61 static int      ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62 static int      ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
63
64 static int      ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
65 static int      ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
66
67 /* Debug Sysctls */
68 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
69 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
70 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73 static int      ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74 static int      ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75 static int      ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76 static int      ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77 static int      ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78 static int      ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int      ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80 static int      ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81 static int      ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82 static int      ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83 static int      ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84 static int      ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85 static int      ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86 static int      ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87 static int      ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88
89 /* Debug Sysctls */
90 static int      ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91 static int      ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92 static int      ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93 static int      ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94 #ifdef IXL_DEBUG
95 static int      ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
96 static int      ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
97 #endif
98
99 #ifdef IXL_IW
100 extern int ixl_enable_iwarp;
101 extern int ixl_limit_iwarp_msix;
102 #endif
103
104 static const char * const ixl_fc_string[6] = {
105         "None",
106         "Rx",
107         "Tx",
108         "Full",
109         "Priority",
110         "Default"
111 };
112
113 static char *ixl_fec_string[3] = {
114        "CL108 RS-FEC",
115        "CL74 FC-FEC/BASE-R",
116        "None"
117 };
118
119 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
120
121 /*
122 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
123 */
124 void
125 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
126 {
127         u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
128         u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
129         u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
130
131         sbuf_printf(buf,
132             "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
133             hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
134             hw->aq.api_maj_ver, hw->aq.api_min_ver,
135             (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
136             IXL_NVM_VERSION_HI_SHIFT,
137             (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
138             IXL_NVM_VERSION_LO_SHIFT,
139             hw->nvm.eetrack,
140             oem_ver, oem_build, oem_patch);
141 }
142
143 void
144 ixl_print_nvm_version(struct ixl_pf *pf)
145 {
146         struct i40e_hw *hw = &pf->hw;
147         device_t dev = pf->dev;
148         struct sbuf *sbuf;
149
150         sbuf = sbuf_new_auto();
151         ixl_nvm_version_str(hw, sbuf);
152         sbuf_finish(sbuf);
153         device_printf(dev, "%s\n", sbuf_data(sbuf));
154         sbuf_delete(sbuf);
155 }
156
157 /**
158  * ixl_get_fw_mode - Check the state of FW
159  * @hw: device hardware structure
160  *
161  * Identify state of FW. It might be in a recovery mode
162  * which limits functionality and requires special handling
163  * from the driver.
164  *
165  * @returns FW mode (normal, recovery, unexpected EMP reset)
166  */
167 static enum ixl_fw_mode
168 ixl_get_fw_mode(struct ixl_pf *pf)
169 {
170         struct i40e_hw *hw = &pf->hw;
171         enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
172         u32 fwsts;
173
174 #ifdef IXL_DEBUG
175         if (pf->recovery_mode)
176                 return IXL_FW_MODE_RECOVERY;
177 #endif
178         fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
179
180         /* Is set and has one of expected values */
181         if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
182             fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
183             fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
184             fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
185                 fw_mode = IXL_FW_MODE_RECOVERY;
186         else {
187                 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
188                     fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
189                         fw_mode = IXL_FW_MODE_UEMPR;
190         }
191         return (fw_mode);
192 }
193
194 /**
195  * ixl_pf_reset - Reset the PF
196  * @pf: PF structure
197  *
198  * Ensure that FW is in the right state and do the reset
199  * if needed.
200  *
201  * @returns zero on success, or an error code on failure.
202  */
203 int
204 ixl_pf_reset(struct ixl_pf *pf)
205 {
206         struct i40e_hw *hw = &pf->hw;
207         enum i40e_status_code status;
208         enum ixl_fw_mode fw_mode;
209
210         fw_mode = ixl_get_fw_mode(pf);
211         ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
212         if (fw_mode == IXL_FW_MODE_RECOVERY) {
213                 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
214                 /* Don't try to reset device if it's in recovery mode */
215                 return (0);
216         }
217
218         status = i40e_pf_reset(hw);
219         if (status == I40E_SUCCESS)
220                 return (0);
221
222         /* Check FW mode again in case it has changed while
223          * waiting for reset to complete */
224         fw_mode = ixl_get_fw_mode(pf);
225         ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
226         if (fw_mode == IXL_FW_MODE_RECOVERY) {
227                 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
228                 return (0);
229         }
230
231         if (fw_mode == IXL_FW_MODE_UEMPR)
232                 device_printf(pf->dev,
233                     "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
234         else
235                 device_printf(pf->dev, "PF reset failure %s\n",
236                     i40e_stat_str(hw, status));
237         return (EIO);
238 }
239
240 /**
241  * ixl_setup_hmc - Setup LAN Host Memory Cache
242  * @pf: PF structure
243  *
244  * Init and configure LAN Host Memory Cache
245  *
246  * @returns 0 on success, EIO on error
247  */
248 int
249 ixl_setup_hmc(struct ixl_pf *pf)
250 {
251         struct i40e_hw *hw = &pf->hw;
252         enum i40e_status_code status;
253
254         status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
255             hw->func_caps.num_rx_qp, 0, 0);
256         if (status) {
257                 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
258                     i40e_stat_str(hw, status));
259                 return (EIO);
260         }
261
262         status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
263         if (status) {
264                 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
265                     i40e_stat_str(hw, status));
266                 return (EIO);
267         }
268
269         return (0);
270 }
271
272 /**
273  * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
274  * @pf: PF structure
275  *
276  * Shutdown Host Memory Cache if configured.
277  *
278  */
279 void
280 ixl_shutdown_hmc(struct ixl_pf *pf)
281 {
282         struct i40e_hw *hw = &pf->hw;
283         enum i40e_status_code status;
284
285         /* HMC not configured, no need to shutdown */
286         if (hw->hmc.hmc_obj == NULL)
287                 return;
288
289         status = i40e_shutdown_lan_hmc(hw);
290         if (status)
291                 device_printf(pf->dev,
292                     "Shutdown LAN HMC failed with code %s\n",
293                     i40e_stat_str(hw, status));
294 }
295 /*
296  * Write PF ITR values to queue ITR registers.
297  */
298 void
299 ixl_configure_itr(struct ixl_pf *pf)
300 {
301         ixl_configure_tx_itr(pf);
302         ixl_configure_rx_itr(pf);
303 }
304
305 /*********************************************************************
306  *
307  *  Get the hardware capabilities
308  *
309  **********************************************************************/
310
311 int
312 ixl_get_hw_capabilities(struct ixl_pf *pf)
313 {
314         struct i40e_aqc_list_capabilities_element_resp *buf;
315         struct i40e_hw  *hw = &pf->hw;
316         device_t        dev = pf->dev;
317         enum i40e_status_code status;
318         int len, i2c_intfc_num;
319         bool again = TRUE;
320         u16 needed;
321
322         if (IXL_PF_IN_RECOVERY_MODE(pf)) {
323                 hw->func_caps.iwarp = 0;
324                 return (0);
325         }
326
327         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
328 retry:
329         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
330             malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
331                 device_printf(dev, "Unable to allocate cap memory\n");
332                 return (ENOMEM);
333         }
334
335         /* This populates the hw struct */
336         status = i40e_aq_discover_capabilities(hw, buf, len,
337             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
338         free(buf, M_IXL);
339         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
340             (again == TRUE)) {
341                 /* retry once with a larger buffer */
342                 again = FALSE;
343                 len = needed;
344                 goto retry;
345         } else if (status != I40E_SUCCESS) {
346                 device_printf(dev, "capability discovery failed; status %s, error %s\n",
347                     i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
348                 return (ENODEV);
349         }
350
351         /*
352          * Some devices have both MDIO and I2C; since this isn't reported
353          * by the FW, check registers to see if an I2C interface exists.
354          */
355         i2c_intfc_num = ixl_find_i2c_interface(pf);
356         if (i2c_intfc_num != -1)
357                 pf->has_i2c = true;
358
359         /* Determine functions to use for driver I2C accesses */
360         switch (pf->i2c_access_method) {
361         case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
362                 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
363                         pf->read_i2c_byte = ixl_read_i2c_byte_aq;
364                         pf->write_i2c_byte = ixl_write_i2c_byte_aq;
365                 } else {
366                         pf->read_i2c_byte = ixl_read_i2c_byte_reg;
367                         pf->write_i2c_byte = ixl_write_i2c_byte_reg;
368                 }
369                 break;
370         }
371         case IXL_I2C_ACCESS_METHOD_AQ:
372                 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
373                 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
374                 break;
375         case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
376                 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
377                 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
378                 break;
379         case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
380                 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
381                 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
382                 break;
383         default:
384                 /* Should not happen */
385                 device_printf(dev, "Error setting I2C access functions\n");
386                 break;
387         }
388
389         /* Keep link active by default */
390         atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
391
392         /* Print a subset of the capability information. */
393         device_printf(dev,
394             "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
395             hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
396             hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
397             (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
398             (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
399             (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
400             "MDIO shared");
401
402         return (0);
403 }
404
405 /* For the set_advertise sysctl */
406 void
407 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
408 {
409         device_t dev = pf->dev;
410         int err;
411
412         /* Make sure to initialize the device to the complete list of
413          * supported speeds on driver load, to ensure unloading and
414          * reloading the driver will restore this value.
415          */
416         err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
417         if (err) {
418                 /* Non-fatal error */
419                 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
420                               __func__, err);
421                 return;
422         }
423
424         pf->advertised_speed =
425             ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
426 }
427
428 int
429 ixl_teardown_hw_structs(struct ixl_pf *pf)
430 {
431         enum i40e_status_code status = 0;
432         struct i40e_hw *hw = &pf->hw;
433         device_t dev = pf->dev;
434
435         /* Shutdown LAN HMC */
436         if (hw->hmc.hmc_obj) {
437                 status = i40e_shutdown_lan_hmc(hw);
438                 if (status) {
439                         device_printf(dev,
440                             "init: LAN HMC shutdown failure; status %s\n",
441                             i40e_stat_str(hw, status));
442                         goto err_out;
443                 }
444         }
445
446         /* Shutdown admin queue */
447         ixl_disable_intr0(hw);
448         status = i40e_shutdown_adminq(hw);
449         if (status)
450                 device_printf(dev,
451                     "init: Admin Queue shutdown failure; status %s\n",
452                     i40e_stat_str(hw, status));
453
454         ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
455 err_out:
456         return (status);
457 }
458
459 /*
460 ** Creates new filter with given MAC address and VLAN ID
461 */
462 static struct ixl_mac_filter *
463 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
464 {
465         struct ixl_mac_filter  *f;
466
467         /* create a new empty filter */
468         f = malloc(sizeof(struct ixl_mac_filter),
469             M_IXL, M_NOWAIT | M_ZERO);
470         if (f) {
471                 LIST_INSERT_HEAD(headp, f, ftle);
472                 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
473                 f->vlan = vlan;
474         }
475
476         return (f);
477 }
478
479 /**
480  * ixl_free_filters - Free all filters in given list
481  * headp - pointer to list head
482  *
483  * Frees memory used by each entry in the list.
484  * Does not remove filters from HW.
485  */
486 void
487 ixl_free_filters(struct ixl_ftl_head *headp)
488 {
489         struct ixl_mac_filter *f, *nf;
490
491         f = LIST_FIRST(headp);
492         while (f != NULL) {
493                 nf = LIST_NEXT(f, ftle);
494                 free(f, M_IXL);
495                 f = nf;
496         }
497
498         LIST_INIT(headp);
499 }
500
501 static u_int
502 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
503 {
504         struct ixl_add_maddr_arg *ama = arg;
505         struct ixl_vsi *vsi = ama->vsi;
506         const u8 *macaddr = (u8*)LLADDR(sdl);
507         struct ixl_mac_filter *f;
508
509         /* Does one already exist */
510         f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
511         if (f != NULL)
512                 return (0);
513
514         f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
515         if (f == NULL) {
516                 device_printf(vsi->dev, "WARNING: no filter available!!\n");
517                 return (0);
518         }
519         f->flags |= IXL_FILTER_MC;
520
521         return (1);
522 }
523
524 /*********************************************************************
525  *      Filter Routines
526  *
527  *      Routines for multicast and vlan filter management.
528  *
529  *********************************************************************/
530 void
531 ixl_add_multi(struct ixl_vsi *vsi)
532 {
533         struct ifnet            *ifp = vsi->ifp;
534         struct i40e_hw          *hw = vsi->hw;
535         int                     mcnt = 0;
536         struct ixl_add_maddr_arg cb_arg;
537
538         IOCTL_DEBUGOUT("ixl_add_multi: begin");
539
540         mcnt = if_llmaddr_count(ifp);
541         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
542                 i40e_aq_set_vsi_multicast_promiscuous(hw,
543                     vsi->seid, TRUE, NULL);
544                 /* delete all existing MC filters */
545                 ixl_del_multi(vsi, true);
546                 return;
547         }
548
549         cb_arg.vsi = vsi;
550         LIST_INIT(&cb_arg.to_add);
551
552         mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
553         if (mcnt > 0)
554                 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
555
556         IOCTL_DEBUGOUT("ixl_add_multi: end");
557 }
558
559 static u_int
560 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
561 {
562         struct ixl_mac_filter *f = arg;
563
564         if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
565                 return (1);
566         else
567                 return (0);
568 }
569
570 void
571 ixl_del_multi(struct ixl_vsi *vsi, bool all)
572 {
573         struct ixl_ftl_head     to_del;
574         struct ifnet            *ifp = vsi->ifp;
575         struct ixl_mac_filter   *f, *fn;
576         int                     mcnt = 0;
577
578         IOCTL_DEBUGOUT("ixl_del_multi: begin");
579
580         LIST_INIT(&to_del);
581         /* Search for removed multicast addresses */
582         LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
583                 if ((f->flags & IXL_FILTER_MC) == 0 ||
584                     (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
585                         continue;
586
587                 LIST_REMOVE(f, ftle);
588                 LIST_INSERT_HEAD(&to_del, f, ftle);
589                 mcnt++;
590         }
591
592         if (mcnt > 0)
593                 ixl_del_hw_filters(vsi, &to_del, mcnt);
594 }
595
596 void
597 ixl_link_up_msg(struct ixl_pf *pf)
598 {
599         struct i40e_hw *hw = &pf->hw;
600         struct ifnet *ifp = pf->vsi.ifp;
601         char *req_fec_string, *neg_fec_string;
602         u8 fec_abilities;
603
604         fec_abilities = hw->phy.link_info.req_fec_info;
605         /* If both RS and KR are requested, only show RS */
606         if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
607                 req_fec_string = ixl_fec_string[0];
608         else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
609                 req_fec_string = ixl_fec_string[1];
610         else
611                 req_fec_string = ixl_fec_string[2];
612
613         if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
614                 neg_fec_string = ixl_fec_string[0];
615         else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
616                 neg_fec_string = ixl_fec_string[1];
617         else
618                 neg_fec_string = ixl_fec_string[2];
619
620         log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
621             ifp->if_xname,
622             ixl_link_speed_string(hw->phy.link_info.link_speed),
623             req_fec_string, neg_fec_string,
624             (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
625             (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
626                 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
627                 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
628                 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
629                 ixl_fc_string[1] : ixl_fc_string[0]);
630 }
631
632 /*
633  * Configure admin queue/misc interrupt cause registers in hardware.
634  */
635 void
636 ixl_configure_intr0_msix(struct ixl_pf *pf)
637 {
638         struct i40e_hw *hw = &pf->hw;
639         u32 reg;
640
641         /* First set up the adminq - vector 0 */
642         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
643         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
644
645         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
646             I40E_PFINT_ICR0_ENA_GRST_MASK |
647             I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
648             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
649             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
650             I40E_PFINT_ICR0_ENA_VFLR_MASK |
651             I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
652             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
653         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
654
655         /*
656          * 0x7FF is the end of the queue list.
657          * This means we won't use MSI-X vector 0 for a queue interrupt
658          * in MSI-X mode.
659          */
660         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
661         /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
662         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
663
664         wr32(hw, I40E_PFINT_DYN_CTL0,
665             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
666             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
667
668         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
669 }
670
671 void
672 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
673 {
674         /* Display supported media types */
675         if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
676                 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
677
678         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
679                 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
680         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
681                 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
682         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
683                 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
684
685         if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
686                 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
687
688         if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
689                 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
690
691         if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
692             phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
693             phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
694                 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
695
696         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
697                 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
698         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
699                 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
700         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
701                 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
702
703         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
704             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
705             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
706             phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
707             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
708                 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
709         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
710                 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
711         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
712                 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
713
714         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
715                 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
716
717         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
718             || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
719                 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
720         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
721                 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
722         if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
723                 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
724         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
725                 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
726         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
727                 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
728
729         if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
730                 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
731
732         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
733                 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
734         if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
735                 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
736
737         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
738                 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
739         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
740                 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
741         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
742                 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
743         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
744                 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
745         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
746                 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
747         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
748                 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
749 }
750
751 /*********************************************************************
752  *
753  *  Get Firmware Switch configuration
754  *      - this will need to be more robust when more complex
755  *        switch configurations are enabled.
756  *
757  **********************************************************************/
758 int
759 ixl_switch_config(struct ixl_pf *pf)
760 {
761         struct i40e_hw  *hw = &pf->hw; 
762         struct ixl_vsi  *vsi = &pf->vsi;
763         device_t        dev = iflib_get_dev(vsi->ctx);
764         struct i40e_aqc_get_switch_config_resp *sw_config;
765         u8      aq_buf[I40E_AQ_LARGE_BUF];
766         int     ret;
767         u16     next = 0;
768
769         memset(&aq_buf, 0, sizeof(aq_buf));
770         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
771         ret = i40e_aq_get_switch_config(hw, sw_config,
772             sizeof(aq_buf), &next, NULL);
773         if (ret) {
774                 device_printf(dev, "aq_get_switch_config() failed, error %d,"
775                     " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
776                 return (ret);
777         }
778         if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
779                 device_printf(dev,
780                     "Switch config: header reported: %d in structure, %d total\n",
781                     LE16_TO_CPU(sw_config->header.num_reported),
782                     LE16_TO_CPU(sw_config->header.num_total));
783                 for (int i = 0;
784                     i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
785                         device_printf(dev,
786                             "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
787                             sw_config->element[i].element_type,
788                             LE16_TO_CPU(sw_config->element[i].seid),
789                             LE16_TO_CPU(sw_config->element[i].uplink_seid),
790                             LE16_TO_CPU(sw_config->element[i].downlink_seid));
791                 }
792         }
793         /* Simplified due to a single VSI */
794         vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
795         vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
796         vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
797         return (ret);
798 }
799
800 void
801 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
802 {
803         struct sysctl_oid *tree;
804         struct sysctl_oid_list *child;
805         struct sysctl_oid_list *vsi_list;
806
807         tree = device_get_sysctl_tree(vsi->dev);
808         child = SYSCTL_CHILDREN(tree);
809         vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
810                         CTLFLAG_RD, NULL, "VSI Number");
811
812         vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
813         ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
814
815         /* Copy of netstat RX errors counter for validation purposes */
816         SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
817                         CTLFLAG_RD, &vsi->ierrors,
818                         "RX packet errors");
819
820         if (queues_sysctls)
821                 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
822 }
823
824 /*
825  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
826  * Writes to the ITR registers immediately.
827  */
828 static int
829 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
830 {
831         struct ixl_pf *pf = (struct ixl_pf *)arg1;
832         device_t dev = pf->dev;
833         int error = 0;
834         int requested_tx_itr;
835
836         requested_tx_itr = pf->tx_itr;
837         error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
838         if ((error) || (req->newptr == NULL))
839                 return (error);
840         if (pf->dynamic_tx_itr) {
841                 device_printf(dev,
842                     "Cannot set TX itr value while dynamic TX itr is enabled\n");
843                     return (EINVAL);
844         }
845         if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
846                 device_printf(dev,
847                     "Invalid TX itr value; value must be between 0 and %d\n",
848                         IXL_MAX_ITR);
849                 return (EINVAL);
850         }
851
852         pf->tx_itr = requested_tx_itr;
853         ixl_configure_tx_itr(pf);
854
855         return (error);
856 }
857
858 /*
859  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
860  * Writes to the ITR registers immediately.
861  */
862 static int
863 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
864 {
865         struct ixl_pf *pf = (struct ixl_pf *)arg1;
866         device_t dev = pf->dev;
867         int error = 0;
868         int requested_rx_itr;
869
870         requested_rx_itr = pf->rx_itr;
871         error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
872         if ((error) || (req->newptr == NULL))
873                 return (error);
874         if (pf->dynamic_rx_itr) {
875                 device_printf(dev,
876                     "Cannot set RX itr value while dynamic RX itr is enabled\n");
877                     return (EINVAL);
878         }
879         if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
880                 device_printf(dev,
881                     "Invalid RX itr value; value must be between 0 and %d\n",
882                         IXL_MAX_ITR);
883                 return (EINVAL);
884         }
885
886         pf->rx_itr = requested_rx_itr;
887         ixl_configure_rx_itr(pf);
888
889         return (error);
890 }
891
892 void
893 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
894         struct sysctl_oid_list *child,
895         struct i40e_hw_port_stats *stats)
896 {
897         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
898             "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
899         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
900
901         struct i40e_eth_stats *eth_stats = &stats->eth;
902         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
903
904         struct ixl_sysctl_info ctls[] = 
905         {
906                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
907                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
908                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
909                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
910                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
911                 /* Packet Reception Stats */
912                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
913                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
914                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
915                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
916                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
917                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
918                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
919                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
920                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
921                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
922                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
923                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
924                 /* Packet Transmission Stats */
925                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
926                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
927                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
928                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
929                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
930                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
931                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
932                 /* Flow control */
933                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
934                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
935                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
936                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
937                 /* End */
938                 {0,0,0}
939         };
940
941         struct ixl_sysctl_info *entry = ctls;
942         while (entry->stat != 0)
943         {
944                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
945                                 CTLFLAG_RD, entry->stat,
946                                 entry->description);
947                 entry++;
948         }
949 }
950
951 void
952 ixl_set_rss_key(struct ixl_pf *pf)
953 {
954         struct i40e_hw *hw = &pf->hw;
955         struct ixl_vsi *vsi = &pf->vsi;
956         device_t        dev = pf->dev;
957         u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
958         enum i40e_status_code status;
959
960 #ifdef RSS
961         /* Fetch the configured RSS key */
962         rss_getkey((uint8_t *) &rss_seed);
963 #else
964         ixl_get_default_rss_key(rss_seed);
965 #endif
966         /* Fill out hash function seed */
967         if (hw->mac.type == I40E_MAC_X722) {
968                 struct i40e_aqc_get_set_rss_key_data key_data;
969                 bcopy(rss_seed, &key_data, 52);
970                 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
971                 if (status)
972                         device_printf(dev,
973                             "i40e_aq_set_rss_key status %s, error %s\n",
974                             i40e_stat_str(hw, status),
975                             i40e_aq_str(hw, hw->aq.asq_last_status));
976         } else {
977                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
978                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
979         }
980 }
981
982 /*
983  * Configure enabled PCTYPES for RSS.
984  */
985 void
986 ixl_set_rss_pctypes(struct ixl_pf *pf)
987 {
988         struct i40e_hw *hw = &pf->hw;
989         u64             set_hena = 0, hena;
990
991 #ifdef RSS
992         u32             rss_hash_config;
993
994         rss_hash_config = rss_gethashconfig();
995         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
996                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
997         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
998                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
999         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1000                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1001         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1002                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1003         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1004                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1005         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1006                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1007         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1008                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1009 #else
1010         if (hw->mac.type == I40E_MAC_X722)
1011                 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1012         else
1013                 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1014 #endif
1015         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1016             ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1017         hena |= set_hena;
1018         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1019         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1020
1021 }
1022
1023 /*
1024 ** Setup the PF's RSS parameters.
1025 */
1026 void
1027 ixl_config_rss(struct ixl_pf *pf)
1028 {
1029         ixl_set_rss_key(pf);
1030         ixl_set_rss_pctypes(pf);
1031         ixl_set_rss_hlut(pf);
1032 }
1033
1034 /*
1035  * In some firmware versions there is default MAC/VLAN filter
1036  * configured which interferes with filters managed by driver.
1037  * Make sure it's removed.
1038  */
1039 void
1040 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1041 {
1042         struct i40e_aqc_remove_macvlan_element_data e;
1043
1044         bzero(&e, sizeof(e));
1045         bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1046         e.vlan_tag = 0;
1047         e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1048         i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1049
1050         bzero(&e, sizeof(e));
1051         bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1052         e.vlan_tag = 0;
1053         e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1054                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1055         i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1056 }
1057
1058 /*
1059 ** Initialize filter list and add filters that the hardware
1060 ** needs to know about.
1061 **
1062 ** Requires VSI's seid to be set before calling.
1063 */
1064 void
1065 ixl_init_filters(struct ixl_vsi *vsi)
1066 {
1067         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1068
1069         ixl_dbg_filter(pf, "%s: start\n", __func__);
1070
1071         /* Initialize mac filter list for VSI */
1072         LIST_INIT(&vsi->ftl);
1073         vsi->num_hw_filters = 0;
1074
1075         /* Receive broadcast Ethernet frames */
1076         i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1077
1078         if (IXL_VSI_IS_VF(vsi))
1079                 return;
1080
1081         ixl_del_default_hw_filters(vsi);
1082
1083         ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1084
1085         /*
1086          * Prevent Tx flow control frames from being sent out by
1087          * non-firmware transmitters.
1088          * This affects every VSI in the PF.
1089          */
1090 #ifndef IXL_DEBUG_FC
1091         i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1092 #else
1093         if (pf->enable_tx_fc_filter)
1094                 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1095 #endif
1096 }
1097
1098 void
1099 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1100 {
1101         struct i40e_hw *hw = vsi->hw;
1102         struct ixl_ftl_head tmp;
1103         int cnt;
1104
1105         /*
1106          * The ixl_add_hw_filters function adds filters configured
1107          * in HW to a list in VSI. Move all filters to a temporary
1108          * list to avoid corrupting it by concatenating to itself.
1109          */
1110         LIST_INIT(&tmp);
1111         LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1112         cnt = vsi->num_hw_filters;
1113         vsi->num_hw_filters = 0;
1114
1115         ixl_add_hw_filters(vsi, &tmp, cnt);
1116
1117         /* Filter could be removed if MAC address was changed */
1118         ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1119
1120         if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1121                 return;
1122         /*
1123          * VLAN HW filtering is enabled, make sure that filters
1124          * for all registered VLAN tags are configured
1125          */
1126         ixl_add_vlan_filters(vsi, hw->mac.addr);
1127 }
1128
1129 /*
1130  * This routine adds a MAC/VLAN filter to the software filter
1131  * list, then adds that new filter to the HW if it doesn't already
1132  * exist in the SW filter list.
1133  */
1134 void
1135 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1136 {
1137         struct ixl_mac_filter   *f, *tmp;
1138         struct ixl_pf           *pf;
1139         device_t                dev;
1140         struct ixl_ftl_head     to_add;
1141         int                     to_add_cnt;
1142
1143         pf = vsi->back;
1144         dev = pf->dev;
1145         to_add_cnt = 1;
1146
1147         ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1148             MAC_FORMAT_ARGS(macaddr), vlan);
1149
1150         /* Does one already exist */
1151         f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1152         if (f != NULL)
1153                 return;
1154
1155         LIST_INIT(&to_add);
1156         f = ixl_new_filter(&to_add, macaddr, vlan);
1157         if (f == NULL) {
1158                 device_printf(dev, "WARNING: no filter available!!\n");
1159                 return;
1160         }
1161         if (f->vlan != IXL_VLAN_ANY)
1162                 f->flags |= IXL_FILTER_VLAN;
1163         else
1164                 vsi->num_macs++;
1165
1166         /*
1167         ** Is this the first vlan being registered, if so we
1168         ** need to remove the ANY filter that indicates we are
1169         ** not in a vlan, and replace that with a 0 filter.
1170         */
1171         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1172                 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1173                 if (tmp != NULL) {
1174                         struct ixl_ftl_head to_del;
1175
1176                         /* Prepare new filter first to avoid removing
1177                          * VLAN_ANY filter if allocation fails */
1178                         f = ixl_new_filter(&to_add, macaddr, 0);
1179                         if (f == NULL) {
1180                                 device_printf(dev, "WARNING: no filter available!!\n");
1181                                 free(LIST_FIRST(&to_add), M_IXL);
1182                                 return;
1183                         }
1184                         to_add_cnt++;
1185
1186                         LIST_REMOVE(tmp, ftle);
1187                         LIST_INIT(&to_del);
1188                         LIST_INSERT_HEAD(&to_del, tmp, ftle);
1189                         ixl_del_hw_filters(vsi, &to_del, 1);
1190                 }
1191         }
1192
1193         ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1194 }
1195
1196 /**
1197  * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1198  * @vsi: pointer to VSI
1199  * @macaddr: MAC address
1200  *
1201  * Adds MAC/VLAN filter for each VLAN configured on the interface
1202  * if there is enough HW filters. Otherwise adds a single filter
1203  * for all tagged and untagged frames to allow all configured VLANs
1204  * to recieve traffic.
1205  */
1206 void
1207 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1208 {
1209         struct ixl_ftl_head to_add;
1210         struct ixl_mac_filter *f;
1211         int to_add_cnt = 0;
1212         int i, vlan = 0;
1213
1214         if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1215                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1216                 return;
1217         }
1218         LIST_INIT(&to_add);
1219
1220         /* Add filter for untagged frames if it does not exist yet */
1221         f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1222         if (f == NULL) {
1223                 f = ixl_new_filter(&to_add, macaddr, 0);
1224                 if (f == NULL) {
1225                         device_printf(vsi->dev, "WARNING: no filter available!!\n");
1226                         return;
1227                 }
1228                 to_add_cnt++;
1229         }
1230
1231         for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1232                 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1233                 if (vlan == -1)
1234                         break;
1235
1236                 /* Does one already exist */
1237                 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1238                 if (f != NULL)
1239                         continue;
1240
1241                 f = ixl_new_filter(&to_add, macaddr, vlan);
1242                 if (f == NULL) {
1243                         device_printf(vsi->dev, "WARNING: no filter available!!\n");
1244                         ixl_free_filters(&to_add);
1245                         return;
1246                 }
1247                 to_add_cnt++;
1248         }
1249
1250         ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1251 }
1252
1253 void
1254 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1255 {
1256         struct ixl_mac_filter *f, *tmp;
1257         struct ixl_ftl_head ftl_head;
1258         int to_del_cnt = 1;
1259
1260         ixl_dbg_filter((struct ixl_pf *)vsi->back,
1261             "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1262             MAC_FORMAT_ARGS(macaddr), vlan);
1263
1264         f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1265         if (f == NULL)
1266                 return;
1267
1268         LIST_REMOVE(f, ftle);
1269         LIST_INIT(&ftl_head);
1270         LIST_INSERT_HEAD(&ftl_head, f, ftle);
1271         if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1272                 vsi->num_macs--;
1273
1274         /* If this is not the last vlan just remove the filter */
1275         if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1276                 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1277                 return;
1278         }
1279
1280         /* It's the last vlan, we need to switch back to a non-vlan filter */
1281         tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1282         if (tmp != NULL) {
1283                 LIST_REMOVE(tmp, ftle);
1284                 LIST_INSERT_AFTER(f, tmp, ftle);
1285                 to_del_cnt++;
1286         }
1287         ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1288
1289         ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1290 }
1291
1292 /**
1293  * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1294  * @vsi: VSI which filters need to be removed
1295  * @macaddr: MAC address
1296  *
1297  * Remove all MAC/VLAN filters with a given MAC address. For multicast
1298  * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1299  * so skip them to speed up processing. Those filters should be removed
1300  * using ixl_del_filter function.
1301  */
1302 void
1303 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1304 {
1305         struct ixl_mac_filter *f, *tmp;
1306         struct ixl_ftl_head to_del;
1307         int to_del_cnt = 0;
1308
1309         LIST_INIT(&to_del);
1310
1311         LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1312                 if ((f->flags & IXL_FILTER_MC) != 0 ||
1313                     !ixl_ether_is_equal(f->macaddr, macaddr))
1314                         continue;
1315
1316                 LIST_REMOVE(f, ftle);
1317                 LIST_INSERT_HEAD(&to_del, f, ftle);
1318                 to_del_cnt++;
1319         }
1320
1321         ixl_dbg_filter((struct ixl_pf *)vsi->back,
1322             "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1323             __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1324         if (to_del_cnt > 0)
1325                 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1326 }
1327
1328 /*
1329 ** Find the filter with both matching mac addr and vlan id
1330 */
1331 struct ixl_mac_filter *
1332 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1333 {
1334         struct ixl_mac_filter   *f;
1335
1336         LIST_FOREACH(f, headp, ftle) {
1337                 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1338                     (f->vlan == vlan)) {
1339                         return (f);
1340                 }
1341         }
1342
1343         return (NULL);
1344 }
1345
1346 /*
1347 ** This routine takes additions to the vsi filter
1348 ** table and creates an Admin Queue call to create
1349 ** the filters in the hardware.
1350 */
1351 void
1352 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1353 {
1354         struct i40e_aqc_add_macvlan_element_data *a, *b;
1355         struct ixl_mac_filter   *f, *fn;
1356         struct ixl_pf           *pf;
1357         struct i40e_hw          *hw;
1358         device_t                dev;
1359         enum i40e_status_code   status;
1360         int                     j = 0;
1361
1362         pf = vsi->back;
1363         dev = vsi->dev;
1364         hw = &pf->hw;
1365
1366         ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1367
1368         if (cnt < 1) {
1369                 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1370                 return;
1371         }
1372
1373         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1374             M_IXL, M_NOWAIT | M_ZERO);
1375         if (a == NULL) {
1376                 device_printf(dev, "add_hw_filters failed to get memory\n");
1377                 return;
1378         }
1379
1380         LIST_FOREACH(f, to_add, ftle) {
1381                 b = &a[j]; // a pox on fvl long names :)
1382                 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1383                 if (f->vlan == IXL_VLAN_ANY) {
1384                         b->vlan_tag = 0;
1385                         b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1386                 } else {
1387                         b->vlan_tag = f->vlan;
1388                         b->flags = 0;
1389                 }
1390                 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1391                 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1392                     MAC_FORMAT_ARGS(f->macaddr));
1393
1394                 if (++j == cnt)
1395                         break;
1396         }
1397         if (j != cnt) {
1398                 /* Something went wrong */
1399                 device_printf(dev,
1400                     "%s ERROR: list of filters to short expected: %d, found: %d\n",
1401                     __func__, cnt, j);
1402                 ixl_free_filters(to_add);
1403                 goto out_free;
1404         }
1405
1406         status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1407         if (status == I40E_SUCCESS) {
1408                 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1409                 vsi->num_hw_filters += j;
1410                 goto out_free;
1411         }
1412
1413         device_printf(dev,
1414             "i40e_aq_add_macvlan status %s, error %s\n",
1415             i40e_stat_str(hw, status),
1416             i40e_aq_str(hw, hw->aq.asq_last_status));
1417         j = 0;
1418
1419         /* Verify which filters were actually configured in HW
1420          * and add them to the list */
1421         LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1422                 LIST_REMOVE(f, ftle);
1423                 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1424                         ixl_dbg_filter(pf,
1425                             "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1426                             __func__,
1427                             MAC_FORMAT_ARGS(f->macaddr),
1428                             f->vlan);
1429                         free(f, M_IXL);
1430                 } else {
1431                         LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1432                         vsi->num_hw_filters++;
1433                 }
1434                 j++;
1435         }
1436
1437 out_free:
1438         free(a, M_IXL);
1439 }
1440
1441 /*
1442 ** This routine takes removals in the vsi filter
1443 ** table and creates an Admin Queue call to delete
1444 ** the filters in the hardware.
1445 */
1446 void
1447 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1448 {
1449         struct i40e_aqc_remove_macvlan_element_data *d, *e;
1450         struct ixl_pf           *pf;
1451         struct i40e_hw          *hw;
1452         device_t                dev;
1453         struct ixl_mac_filter   *f, *f_temp;
1454         enum i40e_status_code   status;
1455         int                     j = 0;
1456
1457         pf = vsi->back;
1458         hw = &pf->hw;
1459         dev = vsi->dev;
1460
1461         ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1462
1463         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1464             M_IXL, M_NOWAIT | M_ZERO);
1465         if (d == NULL) {
1466                 device_printf(dev, "%s: failed to get memory\n", __func__);
1467                 return;
1468         }
1469
1470         LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1471                 e = &d[j]; // a pox on fvl long names :)
1472                 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1473                 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1474                 if (f->vlan == IXL_VLAN_ANY) {
1475                         e->vlan_tag = 0;
1476                         e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1477                 } else {
1478                         e->vlan_tag = f->vlan;
1479                 }
1480
1481                 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1482                     MAC_FORMAT_ARGS(f->macaddr));
1483
1484                 /* delete entry from the list */
1485                 LIST_REMOVE(f, ftle);
1486                 free(f, M_IXL);
1487                 if (++j == cnt)
1488                         break;
1489         }
1490         if (j != cnt || !LIST_EMPTY(to_del)) {
1491                 /* Something went wrong */
1492                 device_printf(dev,
1493                     "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1494                     __func__, cnt, j);
1495                 ixl_free_filters(to_del);
1496                 goto out_free;
1497         }
1498         status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1499         if (status) {
1500                 device_printf(dev,
1501                     "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1502                     __func__, i40e_stat_str(hw, status),
1503                     i40e_aq_str(hw, hw->aq.asq_last_status));
1504                 for (int i = 0; i < j; i++) {
1505                         if (d[i].error_code == 0)
1506                                 continue;
1507                         device_printf(dev,
1508                             "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1509                             __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1510                             d[i].vlan_tag);
1511                 }
1512         }
1513
1514         vsi->num_hw_filters -= j;
1515
1516 out_free:
1517         free(d, M_IXL);
1518
1519         ixl_dbg_filter(pf, "%s: end\n", __func__);
1520 }
1521
1522 int
1523 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1524 {
1525         struct i40e_hw  *hw = &pf->hw;
1526         int             error = 0;
1527         u32             reg;
1528         u16             pf_qidx;
1529
1530         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1531
1532         ixl_dbg(pf, IXL_DBG_EN_DIS,
1533             "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1534             pf_qidx, vsi_qidx);
1535
1536         i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1537
1538         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1539         reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1540             I40E_QTX_ENA_QENA_STAT_MASK;
1541         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1542         /* Verify the enable took */
1543         for (int j = 0; j < 10; j++) {
1544                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1545                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1546                         break;
1547                 i40e_usec_delay(10);
1548         }
1549         if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1550                 device_printf(pf->dev, "TX queue %d still disabled!\n",
1551                     pf_qidx);
1552                 error = ETIMEDOUT;
1553         }
1554
1555         return (error);
1556 }
1557
1558 int
1559 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1560 {
1561         struct i40e_hw  *hw = &pf->hw;
1562         int             error = 0;
1563         u32             reg;
1564         u16             pf_qidx;
1565
1566         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1567
1568         ixl_dbg(pf, IXL_DBG_EN_DIS,
1569             "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1570             pf_qidx, vsi_qidx);
1571
1572         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1573         reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1574             I40E_QRX_ENA_QENA_STAT_MASK;
1575         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1576         /* Verify the enable took */
1577         for (int j = 0; j < 10; j++) {
1578                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1579                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1580                         break;
1581                 i40e_usec_delay(10);
1582         }
1583         if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1584                 device_printf(pf->dev, "RX queue %d still disabled!\n",
1585                     pf_qidx);
1586                 error = ETIMEDOUT;
1587         }
1588
1589         return (error);
1590 }
1591
1592 int
1593 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1594 {
1595         int error = 0;
1596
1597         error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1598         /* Called function already prints error message */
1599         if (error)
1600                 return (error);
1601         error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1602         return (error);
1603 }
1604
1605 /*
1606  * Returns error on first ring that is detected hung.
1607  */
1608 int
1609 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1610 {
1611         struct i40e_hw  *hw = &pf->hw;
1612         int             error = 0;
1613         u32             reg;
1614         u16             pf_qidx;
1615
1616         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1617
1618         ixl_dbg(pf, IXL_DBG_EN_DIS,
1619             "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1620             pf_qidx, vsi_qidx);
1621
1622         i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1623         i40e_usec_delay(500);
1624
1625         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1626         reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1627         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1628         /* Verify the disable took */
1629         for (int j = 0; j < 10; j++) {
1630                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1631                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1632                         break;
1633                 i40e_msec_delay(10);
1634         }
1635         if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1636                 device_printf(pf->dev, "TX queue %d still enabled!\n",
1637                     pf_qidx);
1638                 error = ETIMEDOUT;
1639         }
1640
1641         return (error);
1642 }
1643
1644 /*
1645  * Returns error on first ring that is detected hung.
1646  */
1647 int
1648 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1649 {
1650         struct i40e_hw  *hw = &pf->hw;
1651         int             error = 0;
1652         u32             reg;
1653         u16             pf_qidx;
1654
1655         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1656
1657         ixl_dbg(pf, IXL_DBG_EN_DIS,
1658             "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1659             pf_qidx, vsi_qidx);
1660
1661         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1662         reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1663         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1664         /* Verify the disable took */
1665         for (int j = 0; j < 10; j++) {
1666                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1667                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1668                         break;
1669                 i40e_msec_delay(10);
1670         }
1671         if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1672                 device_printf(pf->dev, "RX queue %d still enabled!\n",
1673                     pf_qidx);
1674                 error = ETIMEDOUT;
1675         }
1676
1677         return (error);
1678 }
1679
1680 int
1681 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1682 {
1683         int error = 0;
1684
1685         error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1686         /* Called function already prints error message */
1687         if (error)
1688                 return (error);
1689         error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1690         return (error);
1691 }
1692
1693 static void
1694 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1695 {
1696         struct i40e_hw *hw = &pf->hw;
1697         device_t dev = pf->dev;
1698         struct ixl_vf *vf;
1699         bool mdd_detected = false;
1700         bool pf_mdd_detected = false;
1701         bool vf_mdd_detected = false;
1702         u16 vf_num, queue;
1703         u8 pf_num, event;
1704         u8 pf_mdet_num, vp_mdet_num;
1705         u32 reg;
1706
1707         /* find what triggered the MDD event */
1708         reg = rd32(hw, I40E_GL_MDET_TX);
1709         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1710                 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1711                     I40E_GL_MDET_TX_PF_NUM_SHIFT;
1712                 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1713                     I40E_GL_MDET_TX_VF_NUM_SHIFT;
1714                 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1715                     I40E_GL_MDET_TX_EVENT_SHIFT;
1716                 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1717                     I40E_GL_MDET_TX_QUEUE_SHIFT;
1718                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1719                 mdd_detected = true;
1720         }
1721
1722         if (!mdd_detected)
1723                 return;
1724
1725         reg = rd32(hw, I40E_PF_MDET_TX);
1726         if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1727                 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1728                 pf_mdet_num = hw->pf_id;
1729                 pf_mdd_detected = true;
1730         }
1731
1732         /* Check if MDD was caused by a VF */
1733         for (int i = 0; i < pf->num_vfs; i++) {
1734                 vf = &(pf->vfs[i]);
1735                 reg = rd32(hw, I40E_VP_MDET_TX(i));
1736                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1737                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1738                         vp_mdet_num = i;
1739                         vf->num_mdd_events++;
1740                         vf_mdd_detected = true;
1741                 }
1742         }
1743
1744         /* Print out an error message */
1745         if (vf_mdd_detected && pf_mdd_detected)
1746                 device_printf(dev,
1747                     "Malicious Driver Detection event %d"
1748                     " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1749                     event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1750         else if (vf_mdd_detected && !pf_mdd_detected)
1751                 device_printf(dev,
1752                     "Malicious Driver Detection event %d"
1753                     " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1754                     event, queue, pf_num, vf_num, vp_mdet_num);
1755         else if (!vf_mdd_detected && pf_mdd_detected)
1756                 device_printf(dev,
1757                     "Malicious Driver Detection event %d"
1758                     " on TX queue %d, pf number %d (PF-%d)\n",
1759                     event, queue, pf_num, pf_mdet_num);
1760         /* Theoretically shouldn't happen */
1761         else
1762                 device_printf(dev,
1763                     "TX Malicious Driver Detection event (unknown)\n");
1764 }
1765
1766 static void
1767 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1768 {
1769         struct i40e_hw *hw = &pf->hw;
1770         device_t dev = pf->dev;
1771         struct ixl_vf *vf;
1772         bool mdd_detected = false;
1773         bool pf_mdd_detected = false;
1774         bool vf_mdd_detected = false;
1775         u16 queue;
1776         u8 pf_num, event;
1777         u8 pf_mdet_num, vp_mdet_num;
1778         u32 reg;
1779
1780         /*
1781          * GL_MDET_RX doesn't contain VF number information, unlike
1782          * GL_MDET_TX.
1783          */
1784         reg = rd32(hw, I40E_GL_MDET_RX);
1785         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1786                 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1787                     I40E_GL_MDET_RX_FUNCTION_SHIFT;
1788                 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1789                     I40E_GL_MDET_RX_EVENT_SHIFT;
1790                 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1791                     I40E_GL_MDET_RX_QUEUE_SHIFT;
1792                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1793                 mdd_detected = true;
1794         }
1795
1796         if (!mdd_detected)
1797                 return;
1798
1799         reg = rd32(hw, I40E_PF_MDET_RX);
1800         if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1801                 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1802                 pf_mdet_num = hw->pf_id;
1803                 pf_mdd_detected = true;
1804         }
1805
1806         /* Check if MDD was caused by a VF */
1807         for (int i = 0; i < pf->num_vfs; i++) {
1808                 vf = &(pf->vfs[i]);
1809                 reg = rd32(hw, I40E_VP_MDET_RX(i));
1810                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1811                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1812                         vp_mdet_num = i;
1813                         vf->num_mdd_events++;
1814                         vf_mdd_detected = true;
1815                 }
1816         }
1817
1818         /* Print out an error message */
1819         if (vf_mdd_detected && pf_mdd_detected)
1820                 device_printf(dev,
1821                     "Malicious Driver Detection event %d"
1822                     " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1823                     event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1824         else if (vf_mdd_detected && !pf_mdd_detected)
1825                 device_printf(dev,
1826                     "Malicious Driver Detection event %d"
1827                     " on RX queue %d, pf number %d, (VF-%d)\n",
1828                     event, queue, pf_num, vp_mdet_num);
1829         else if (!vf_mdd_detected && pf_mdd_detected)
1830                 device_printf(dev,
1831                     "Malicious Driver Detection event %d"
1832                     " on RX queue %d, pf number %d (PF-%d)\n",
1833                     event, queue, pf_num, pf_mdet_num);
1834         /* Theoretically shouldn't happen */
1835         else
1836                 device_printf(dev,
1837                     "RX Malicious Driver Detection event (unknown)\n");
1838 }
1839
1840 /**
1841  * ixl_handle_mdd_event
1842  *
1843  * Called from interrupt handler to identify possibly malicious vfs
1844  * (But also detects events from the PF, as well)
1845  **/
1846 void
1847 ixl_handle_mdd_event(struct ixl_pf *pf)
1848 {
1849         struct i40e_hw *hw = &pf->hw;
1850         u32 reg;
1851
1852         /*
1853          * Handle both TX/RX because it's possible they could
1854          * both trigger in the same interrupt.
1855          */
1856         ixl_handle_tx_mdd_event(pf);
1857         ixl_handle_rx_mdd_event(pf);
1858
1859         atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1860
1861         /* re-enable mdd interrupt cause */
1862         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1863         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1864         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1865         ixl_flush(hw);
1866 }
1867
1868 void
1869 ixl_enable_intr0(struct i40e_hw *hw)
1870 {
1871         u32             reg;
1872
1873         /* Use IXL_ITR_NONE so ITR isn't updated here */
1874         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1875             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1876             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1877         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1878 }
1879
1880 void
1881 ixl_disable_intr0(struct i40e_hw *hw)
1882 {
1883         u32             reg;
1884
1885         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1886         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1887         ixl_flush(hw);
1888 }
1889
1890 void
1891 ixl_enable_queue(struct i40e_hw *hw, int id)
1892 {
1893         u32             reg;
1894
1895         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1896             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1897             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1898         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1899 }
1900
1901 void
1902 ixl_disable_queue(struct i40e_hw *hw, int id)
1903 {
1904         u32             reg;
1905
1906         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1907         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1908 }
1909
1910 void
1911 ixl_handle_empr_reset(struct ixl_pf *pf)
1912 {
1913         struct ixl_vsi  *vsi = &pf->vsi;
1914         bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
1915
1916         ixl_prepare_for_reset(pf, is_up);
1917         /*
1918          * i40e_pf_reset checks the type of reset and acts
1919          * accordingly. If EMP or Core reset was performed
1920          * doing PF reset is not necessary and it sometimes
1921          * fails.
1922          */
1923         ixl_pf_reset(pf);
1924
1925         if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1926             ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1927                 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1928                 device_printf(pf->dev,
1929                     "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1930                 pf->link_up = FALSE;
1931                 ixl_update_link_status(pf);
1932         }
1933
1934         ixl_rebuild_hw_structs_after_reset(pf, is_up);
1935
1936         atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING);
1937 }
1938
1939 void
1940 ixl_update_stats_counters(struct ixl_pf *pf)
1941 {
1942         struct i40e_hw  *hw = &pf->hw;
1943         struct ixl_vsi  *vsi = &pf->vsi;
1944         struct ixl_vf   *vf;
1945         u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1946
1947         struct i40e_hw_port_stats *nsd = &pf->stats;
1948         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1949
1950         /* Update hw stats */
1951         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1952                            pf->stat_offsets_loaded,
1953                            &osd->crc_errors, &nsd->crc_errors);
1954         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1955                            pf->stat_offsets_loaded,
1956                            &osd->illegal_bytes, &nsd->illegal_bytes);
1957         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1958                            I40E_GLPRT_GORCL(hw->port),
1959                            pf->stat_offsets_loaded,
1960                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1961         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1962                            I40E_GLPRT_GOTCL(hw->port),
1963                            pf->stat_offsets_loaded,
1964                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1965         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1966                            pf->stat_offsets_loaded,
1967                            &osd->eth.rx_discards,
1968                            &nsd->eth.rx_discards);
1969         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1970                            I40E_GLPRT_UPRCL(hw->port),
1971                            pf->stat_offsets_loaded,
1972                            &osd->eth.rx_unicast,
1973                            &nsd->eth.rx_unicast);
1974         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1975                            I40E_GLPRT_UPTCL(hw->port),
1976                            pf->stat_offsets_loaded,
1977                            &osd->eth.tx_unicast,
1978                            &nsd->eth.tx_unicast);
1979         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1980                            I40E_GLPRT_MPRCL(hw->port),
1981                            pf->stat_offsets_loaded,
1982                            &osd->eth.rx_multicast,
1983                            &nsd->eth.rx_multicast);
1984         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1985                            I40E_GLPRT_MPTCL(hw->port),
1986                            pf->stat_offsets_loaded,
1987                            &osd->eth.tx_multicast,
1988                            &nsd->eth.tx_multicast);
1989         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1990                            I40E_GLPRT_BPRCL(hw->port),
1991                            pf->stat_offsets_loaded,
1992                            &osd->eth.rx_broadcast,
1993                            &nsd->eth.rx_broadcast);
1994         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1995                            I40E_GLPRT_BPTCL(hw->port),
1996                            pf->stat_offsets_loaded,
1997                            &osd->eth.tx_broadcast,
1998                            &nsd->eth.tx_broadcast);
1999
2000         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2001                            pf->stat_offsets_loaded,
2002                            &osd->tx_dropped_link_down,
2003                            &nsd->tx_dropped_link_down);
2004         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2005                            pf->stat_offsets_loaded,
2006                            &osd->mac_local_faults,
2007                            &nsd->mac_local_faults);
2008         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2009                            pf->stat_offsets_loaded,
2010                            &osd->mac_remote_faults,
2011                            &nsd->mac_remote_faults);
2012         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2013                            pf->stat_offsets_loaded,
2014                            &osd->rx_length_errors,
2015                            &nsd->rx_length_errors);
2016
2017         /* Flow control (LFC) stats */
2018         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2019                            pf->stat_offsets_loaded,
2020                            &osd->link_xon_rx, &nsd->link_xon_rx);
2021         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2022                            pf->stat_offsets_loaded,
2023                            &osd->link_xon_tx, &nsd->link_xon_tx);
2024         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2025                            pf->stat_offsets_loaded,
2026                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
2027         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2028                            pf->stat_offsets_loaded,
2029                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
2030
2031         /*
2032          * For watchdog management we need to know if we have been paused
2033          * during the last interval, so capture that here.
2034          */
2035         if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2036                 vsi->shared->isc_pause_frames = 1;
2037
2038         /* Packet size stats rx */
2039         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2040                            I40E_GLPRT_PRC64L(hw->port),
2041                            pf->stat_offsets_loaded,
2042                            &osd->rx_size_64, &nsd->rx_size_64);
2043         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2044                            I40E_GLPRT_PRC127L(hw->port),
2045                            pf->stat_offsets_loaded,
2046                            &osd->rx_size_127, &nsd->rx_size_127);
2047         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2048                            I40E_GLPRT_PRC255L(hw->port),
2049                            pf->stat_offsets_loaded,
2050                            &osd->rx_size_255, &nsd->rx_size_255);
2051         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2052                            I40E_GLPRT_PRC511L(hw->port),
2053                            pf->stat_offsets_loaded,
2054                            &osd->rx_size_511, &nsd->rx_size_511);
2055         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2056                            I40E_GLPRT_PRC1023L(hw->port),
2057                            pf->stat_offsets_loaded,
2058                            &osd->rx_size_1023, &nsd->rx_size_1023);
2059         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2060                            I40E_GLPRT_PRC1522L(hw->port),
2061                            pf->stat_offsets_loaded,
2062                            &osd->rx_size_1522, &nsd->rx_size_1522);
2063         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2064                            I40E_GLPRT_PRC9522L(hw->port),
2065                            pf->stat_offsets_loaded,
2066                            &osd->rx_size_big, &nsd->rx_size_big);
2067
2068         /* Packet size stats tx */
2069         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2070                            I40E_GLPRT_PTC64L(hw->port),
2071                            pf->stat_offsets_loaded,
2072                            &osd->tx_size_64, &nsd->tx_size_64);
2073         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2074                            I40E_GLPRT_PTC127L(hw->port),
2075                            pf->stat_offsets_loaded,
2076                            &osd->tx_size_127, &nsd->tx_size_127);
2077         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2078                            I40E_GLPRT_PTC255L(hw->port),
2079                            pf->stat_offsets_loaded,
2080                            &osd->tx_size_255, &nsd->tx_size_255);
2081         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2082                            I40E_GLPRT_PTC511L(hw->port),
2083                            pf->stat_offsets_loaded,
2084                            &osd->tx_size_511, &nsd->tx_size_511);
2085         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2086                            I40E_GLPRT_PTC1023L(hw->port),
2087                            pf->stat_offsets_loaded,
2088                            &osd->tx_size_1023, &nsd->tx_size_1023);
2089         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2090                            I40E_GLPRT_PTC1522L(hw->port),
2091                            pf->stat_offsets_loaded,
2092                            &osd->tx_size_1522, &nsd->tx_size_1522);
2093         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2094                            I40E_GLPRT_PTC9522L(hw->port),
2095                            pf->stat_offsets_loaded,
2096                            &osd->tx_size_big, &nsd->tx_size_big);
2097
2098         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2099                            pf->stat_offsets_loaded,
2100                            &osd->rx_undersize, &nsd->rx_undersize);
2101         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2102                            pf->stat_offsets_loaded,
2103                            &osd->rx_fragments, &nsd->rx_fragments);
2104         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2105                            pf->stat_offsets_loaded,
2106                            &osd->rx_oversize, &nsd->rx_oversize);
2107         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2108                            pf->stat_offsets_loaded,
2109                            &osd->rx_jabber, &nsd->rx_jabber);
2110         /* EEE */
2111         i40e_get_phy_lpi_status(hw, nsd);
2112
2113         i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2114                           &osd->tx_lpi_count, &nsd->tx_lpi_count,
2115                           &osd->rx_lpi_count, &nsd->rx_lpi_count);
2116
2117         pf->stat_offsets_loaded = true;
2118         /* End hw stats */
2119
2120         /* Update vsi stats */
2121         ixl_update_vsi_stats(vsi);
2122
2123         for (int i = 0; i < pf->num_vfs; i++) {
2124                 vf = &pf->vfs[i];
2125                 if (vf->vf_flags & VF_FLAG_ENABLED)
2126                         ixl_update_eth_stats(&pf->vfs[i].vsi);
2127         }
2128 }
2129
2130 /**
2131  * Update VSI-specific ethernet statistics counters.
2132  **/
2133 void
2134 ixl_update_eth_stats(struct ixl_vsi *vsi)
2135 {
2136         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2137         struct i40e_hw *hw = &pf->hw;
2138         struct i40e_eth_stats *es;
2139         struct i40e_eth_stats *oes;
2140         u16 stat_idx = vsi->info.stat_counter_idx;
2141
2142         es = &vsi->eth_stats;
2143         oes = &vsi->eth_stats_offsets;
2144
2145         /* Gather up the stats that the hw collects */
2146         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2147                            vsi->stat_offsets_loaded,
2148                            &oes->tx_errors, &es->tx_errors);
2149         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2150                            vsi->stat_offsets_loaded,
2151                            &oes->rx_discards, &es->rx_discards);
2152
2153         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2154                            I40E_GLV_GORCL(stat_idx),
2155                            vsi->stat_offsets_loaded,
2156                            &oes->rx_bytes, &es->rx_bytes);
2157         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2158                            I40E_GLV_UPRCL(stat_idx),
2159                            vsi->stat_offsets_loaded,
2160                            &oes->rx_unicast, &es->rx_unicast);
2161         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2162                            I40E_GLV_MPRCL(stat_idx),
2163                            vsi->stat_offsets_loaded,
2164                            &oes->rx_multicast, &es->rx_multicast);
2165         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2166                            I40E_GLV_BPRCL(stat_idx),
2167                            vsi->stat_offsets_loaded,
2168                            &oes->rx_broadcast, &es->rx_broadcast);
2169
2170         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2171                            I40E_GLV_GOTCL(stat_idx),
2172                            vsi->stat_offsets_loaded,
2173                            &oes->tx_bytes, &es->tx_bytes);
2174         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2175                            I40E_GLV_UPTCL(stat_idx),
2176                            vsi->stat_offsets_loaded,
2177                            &oes->tx_unicast, &es->tx_unicast);
2178         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2179                            I40E_GLV_MPTCL(stat_idx),
2180                            vsi->stat_offsets_loaded,
2181                            &oes->tx_multicast, &es->tx_multicast);
2182         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2183                            I40E_GLV_BPTCL(stat_idx),
2184                            vsi->stat_offsets_loaded,
2185                            &oes->tx_broadcast, &es->tx_broadcast);
2186         vsi->stat_offsets_loaded = true;
2187 }
2188
2189 void
2190 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2191 {
2192         struct ixl_pf           *pf;
2193         struct i40e_eth_stats   *es;
2194         u64                     tx_discards, csum_errs;
2195
2196         struct i40e_hw_port_stats *nsd;
2197
2198         pf = vsi->back;
2199         es = &vsi->eth_stats;
2200         nsd = &pf->stats;
2201
2202         ixl_update_eth_stats(vsi);
2203
2204         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2205
2206         csum_errs = 0;
2207         for (int i = 0; i < vsi->num_rx_queues; i++)
2208                 csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2209         nsd->checksum_error = csum_errs;
2210
2211         /* Update ifnet stats */
2212         IXL_SET_IPACKETS(vsi, es->rx_unicast +
2213                            es->rx_multicast +
2214                            es->rx_broadcast);
2215         IXL_SET_OPACKETS(vsi, es->tx_unicast +
2216                            es->tx_multicast +
2217                            es->tx_broadcast);
2218         IXL_SET_IBYTES(vsi, es->rx_bytes);
2219         IXL_SET_OBYTES(vsi, es->tx_bytes);
2220         IXL_SET_IMCASTS(vsi, es->rx_multicast);
2221         IXL_SET_OMCASTS(vsi, es->tx_multicast);
2222
2223         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2224             nsd->checksum_error + nsd->rx_length_errors +
2225             nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2226             nsd->rx_jabber);
2227         IXL_SET_OERRORS(vsi, es->tx_errors);
2228         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2229         IXL_SET_OQDROPS(vsi, tx_discards);
2230         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2231         IXL_SET_COLLISIONS(vsi, 0);
2232 }
2233
2234 /**
2235  * Reset all of the stats for the given pf
2236  **/
2237 void
2238 ixl_pf_reset_stats(struct ixl_pf *pf)
2239 {
2240         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2241         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2242         pf->stat_offsets_loaded = false;
2243 }
2244
2245 /**
2246  * Resets all stats of the given vsi
2247  **/
2248 void
2249 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2250 {
2251         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2252         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2253         vsi->stat_offsets_loaded = false;
2254 }
2255
2256 /**
2257  * Read and update a 48 bit stat from the hw
2258  *
2259  * Since the device stats are not reset at PFReset, they likely will not
2260  * be zeroed when the driver starts.  We'll save the first values read
2261  * and use them as offsets to be subtracted from the raw values in order
2262  * to report stats that count from zero.
2263  **/
2264 void
2265 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2266         bool offset_loaded, u64 *offset, u64 *stat)
2267 {
2268         u64 new_data;
2269
2270 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2271         new_data = rd64(hw, loreg);
2272 #else
2273         /*
2274          * Use two rd32's instead of one rd64; FreeBSD versions before
2275          * 10 don't support 64-bit bus reads/writes.
2276          */
2277         new_data = rd32(hw, loreg);
2278         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2279 #endif
2280
2281         if (!offset_loaded)
2282                 *offset = new_data;
2283         if (new_data >= *offset)
2284                 *stat = new_data - *offset;
2285         else
2286                 *stat = (new_data + ((u64)1 << 48)) - *offset;
2287         *stat &= 0xFFFFFFFFFFFFULL;
2288 }
2289
2290 /**
2291  * Read and update a 32 bit stat from the hw
2292  **/
2293 void
2294 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2295         bool offset_loaded, u64 *offset, u64 *stat)
2296 {
2297         u32 new_data;
2298
2299         new_data = rd32(hw, reg);
2300         if (!offset_loaded)
2301                 *offset = new_data;
2302         if (new_data >= *offset)
2303                 *stat = (u32)(new_data - *offset);
2304         else
2305                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2306 }
2307
2308 /**
2309  * Add subset of device sysctls safe to use in recovery mode
2310  */
2311 void
2312 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2313 {
2314         device_t dev = pf->dev;
2315
2316         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2317         struct sysctl_oid_list *ctx_list =
2318             SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2319
2320         struct sysctl_oid *debug_node;
2321         struct sysctl_oid_list *debug_list;
2322
2323         SYSCTL_ADD_PROC(ctx, ctx_list,
2324             OID_AUTO, "fw_version",
2325             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2326             ixl_sysctl_show_fw, "A", "Firmware version");
2327
2328         /* Add sysctls meant to print debug information, but don't list them
2329          * in "sysctl -a" output. */
2330         debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2331             OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2332             "Debug Sysctls");
2333         debug_list = SYSCTL_CHILDREN(debug_node);
2334
2335         SYSCTL_ADD_UINT(ctx, debug_list,
2336             OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2337             &pf->hw.debug_mask, 0, "Shared code debug message level");
2338
2339         SYSCTL_ADD_UINT(ctx, debug_list,
2340             OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2341             &pf->dbg_mask, 0, "Non-shared code debug message level");
2342
2343         SYSCTL_ADD_PROC(ctx, debug_list,
2344             OID_AUTO, "dump_debug_data",
2345             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2346             pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2347
2348         SYSCTL_ADD_PROC(ctx, debug_list,
2349             OID_AUTO, "do_pf_reset",
2350             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2351             pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2352
2353         SYSCTL_ADD_PROC(ctx, debug_list,
2354             OID_AUTO, "do_core_reset",
2355             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2356             pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2357
2358         SYSCTL_ADD_PROC(ctx, debug_list,
2359             OID_AUTO, "do_global_reset",
2360             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2361             pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2362
2363         SYSCTL_ADD_PROC(ctx, debug_list,
2364             OID_AUTO, "queue_interrupt_table",
2365             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2366             pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2367 }
2368
2369 void
2370 ixl_add_device_sysctls(struct ixl_pf *pf)
2371 {
2372         device_t dev = pf->dev;
2373         struct i40e_hw *hw = &pf->hw;
2374
2375         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2376         struct sysctl_oid_list *ctx_list =
2377             SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2378
2379         struct sysctl_oid *debug_node;
2380         struct sysctl_oid_list *debug_list;
2381
2382         struct sysctl_oid *fec_node;
2383         struct sysctl_oid_list *fec_list;
2384         struct sysctl_oid *eee_node;
2385         struct sysctl_oid_list *eee_list;
2386
2387         /* Set up sysctls */
2388         SYSCTL_ADD_PROC(ctx, ctx_list,
2389             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2390             pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2391
2392         SYSCTL_ADD_PROC(ctx, ctx_list,
2393             OID_AUTO, "advertise_speed",
2394             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2395             ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2396
2397         SYSCTL_ADD_PROC(ctx, ctx_list,
2398             OID_AUTO, "supported_speeds",
2399             CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2400             ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2401
2402         SYSCTL_ADD_PROC(ctx, ctx_list,
2403             OID_AUTO, "current_speed",
2404             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2405             ixl_sysctl_current_speed, "A", "Current Port Speed");
2406
2407         SYSCTL_ADD_PROC(ctx, ctx_list,
2408             OID_AUTO, "fw_version",
2409             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2410             ixl_sysctl_show_fw, "A", "Firmware version");
2411
2412         SYSCTL_ADD_PROC(ctx, ctx_list,
2413             OID_AUTO, "unallocated_queues",
2414             CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2415             ixl_sysctl_unallocated_queues, "I",
2416             "Queues not allocated to a PF or VF");
2417
2418         SYSCTL_ADD_PROC(ctx, ctx_list,
2419             OID_AUTO, "tx_itr",
2420             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2421             ixl_sysctl_pf_tx_itr, "I",
2422             "Immediately set TX ITR value for all queues");
2423
2424         SYSCTL_ADD_PROC(ctx, ctx_list,
2425             OID_AUTO, "rx_itr",
2426             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2427             ixl_sysctl_pf_rx_itr, "I",
2428             "Immediately set RX ITR value for all queues");
2429
2430         SYSCTL_ADD_INT(ctx, ctx_list,
2431             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2432             &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2433
2434         SYSCTL_ADD_INT(ctx, ctx_list,
2435             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2436             &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2437
2438         /* Add FEC sysctls for 25G adapters */
2439         if (i40e_is_25G_device(hw->device_id)) {
2440                 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2441                     OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2442                     "FEC Sysctls");
2443                 fec_list = SYSCTL_CHILDREN(fec_node);
2444
2445                 SYSCTL_ADD_PROC(ctx, fec_list,
2446                     OID_AUTO, "fc_ability",
2447                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2448                     ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2449
2450                 SYSCTL_ADD_PROC(ctx, fec_list,
2451                     OID_AUTO, "rs_ability",
2452                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2453                     ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2454
2455                 SYSCTL_ADD_PROC(ctx, fec_list,
2456                     OID_AUTO, "fc_requested",
2457                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2458                     ixl_sysctl_fec_fc_request, "I",
2459                     "FC FEC mode requested on link");
2460
2461                 SYSCTL_ADD_PROC(ctx, fec_list,
2462                     OID_AUTO, "rs_requested",
2463                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2464                     ixl_sysctl_fec_rs_request, "I",
2465                     "RS FEC mode requested on link");
2466
2467                 SYSCTL_ADD_PROC(ctx, fec_list,
2468                     OID_AUTO, "auto_fec_enabled",
2469                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2470                     ixl_sysctl_fec_auto_enable, "I",
2471                     "Let FW decide FEC ability/request modes");
2472         }
2473
2474         SYSCTL_ADD_PROC(ctx, ctx_list,
2475             OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2476             pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2477
2478         eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2479             OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2480             "Energy Efficient Ethernet (EEE) Sysctls");
2481         eee_list = SYSCTL_CHILDREN(eee_node);
2482
2483         SYSCTL_ADD_PROC(ctx, eee_list,
2484             OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2485             pf, 0, ixl_sysctl_eee_enable, "I",
2486             "Enable Energy Efficient Ethernet (EEE)");
2487
2488         SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2489             CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2490             "TX LPI status");
2491
2492         SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2493             CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2494             "RX LPI status");
2495
2496         SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2497             CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2498             "TX LPI count");
2499
2500         SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2501             CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2502             "RX LPI count");
2503
2504         SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2505             "link_active_on_if_down",
2506             CTLTYPE_INT | CTLFLAG_RWTUN,
2507             pf, 0, ixl_sysctl_set_link_active, "I",
2508             IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2509
2510         /* Add sysctls meant to print debug information, but don't list them
2511          * in "sysctl -a" output. */
2512         debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2513             OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2514             "Debug Sysctls");
2515         debug_list = SYSCTL_CHILDREN(debug_node);
2516
2517         SYSCTL_ADD_UINT(ctx, debug_list,
2518             OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2519             &pf->hw.debug_mask, 0, "Shared code debug message level");
2520
2521         SYSCTL_ADD_UINT(ctx, debug_list,
2522             OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2523             &pf->dbg_mask, 0, "Non-shared code debug message level");
2524
2525         SYSCTL_ADD_PROC(ctx, debug_list,
2526             OID_AUTO, "link_status",
2527             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2528             pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2529
2530         SYSCTL_ADD_PROC(ctx, debug_list,
2531             OID_AUTO, "phy_abilities_init",
2532             CTLTYPE_STRING | CTLFLAG_RD,
2533             pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2534
2535         SYSCTL_ADD_PROC(ctx, debug_list,
2536             OID_AUTO, "phy_abilities",
2537             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2538             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2539
2540         SYSCTL_ADD_PROC(ctx, debug_list,
2541             OID_AUTO, "filter_list",
2542             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2543             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2544
2545         SYSCTL_ADD_PROC(ctx, debug_list,
2546             OID_AUTO, "hw_res_alloc",
2547             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2548             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2549
2550         SYSCTL_ADD_PROC(ctx, debug_list,
2551             OID_AUTO, "switch_config",
2552             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2553             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2554
2555         SYSCTL_ADD_PROC(ctx, debug_list,
2556             OID_AUTO, "switch_vlans",
2557             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2558             pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2559
2560         SYSCTL_ADD_PROC(ctx, debug_list,
2561             OID_AUTO, "rss_key",
2562             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2563             pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2564
2565         SYSCTL_ADD_PROC(ctx, debug_list,
2566             OID_AUTO, "rss_lut",
2567             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2568             pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2569
2570         SYSCTL_ADD_PROC(ctx, debug_list,
2571             OID_AUTO, "rss_hena",
2572             CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2573             pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2574
2575         SYSCTL_ADD_PROC(ctx, debug_list,
2576             OID_AUTO, "disable_fw_link_management",
2577             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2578             pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2579
2580         SYSCTL_ADD_PROC(ctx, debug_list,
2581             OID_AUTO, "dump_debug_data",
2582             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2583             pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2584
2585         SYSCTL_ADD_PROC(ctx, debug_list,
2586             OID_AUTO, "do_pf_reset",
2587             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2588             pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2589
2590         SYSCTL_ADD_PROC(ctx, debug_list,
2591             OID_AUTO, "do_core_reset",
2592             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2593             pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2594
2595         SYSCTL_ADD_PROC(ctx, debug_list,
2596             OID_AUTO, "do_global_reset",
2597             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2598             pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2599
2600         SYSCTL_ADD_PROC(ctx, debug_list,
2601             OID_AUTO, "queue_interrupt_table",
2602             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2603             pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2604
2605         if (pf->has_i2c) {
2606                 SYSCTL_ADD_PROC(ctx, debug_list,
2607                     OID_AUTO, "read_i2c_byte",
2608                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2609                     pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2610
2611                 SYSCTL_ADD_PROC(ctx, debug_list,
2612                     OID_AUTO, "write_i2c_byte",
2613                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2614                     pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2615
2616                 SYSCTL_ADD_PROC(ctx, debug_list,
2617                     OID_AUTO, "read_i2c_diag_data",
2618                     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2619                     pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2620         }
2621 }
2622
2623 /*
2624  * Primarily for finding out how many queues can be assigned to VFs,
2625  * at runtime.
2626  */
2627 static int
2628 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2629 {
2630         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2631         int queues;
2632
2633         queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2634
2635         return sysctl_handle_int(oidp, NULL, queues, req);
2636 }
2637
2638 static const char *
2639 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2640 {
2641         const char * link_speed_str[] = {
2642                 "Unknown",
2643                 "100 Mbps",
2644                 "1 Gbps",
2645                 "10 Gbps",
2646                 "40 Gbps",
2647                 "20 Gbps",
2648                 "25 Gbps",
2649                 "2.5 Gbps",
2650                 "5 Gbps"
2651         };
2652         int index;
2653
2654         switch (link_speed) {
2655         case I40E_LINK_SPEED_100MB:
2656                 index = 1;
2657                 break;
2658         case I40E_LINK_SPEED_1GB:
2659                 index = 2;
2660                 break;
2661         case I40E_LINK_SPEED_10GB:
2662                 index = 3;
2663                 break;
2664         case I40E_LINK_SPEED_40GB:
2665                 index = 4;
2666                 break;
2667         case I40E_LINK_SPEED_20GB:
2668                 index = 5;
2669                 break;
2670         case I40E_LINK_SPEED_25GB:
2671                 index = 6;
2672                 break;
2673         case I40E_LINK_SPEED_2_5GB:
2674                 index = 7;
2675                 break;
2676         case I40E_LINK_SPEED_5GB:
2677                 index = 8;
2678                 break;
2679         case I40E_LINK_SPEED_UNKNOWN:
2680         default:
2681                 index = 0;
2682                 break;
2683         }
2684
2685         return (link_speed_str[index]);
2686 }
2687
2688 int
2689 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2690 {
2691         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2692         struct i40e_hw *hw = &pf->hw;
2693         int error = 0;
2694
2695         ixl_update_link_status(pf);
2696
2697         error = sysctl_handle_string(oidp,
2698             __DECONST(void *,
2699                 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2700             8, req);
2701
2702         return (error);
2703 }
2704
2705 /*
2706  * Converts 8-bit speeds value to and from sysctl flags and
2707  * Admin Queue flags.
2708  */
2709 static u8
2710 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2711 {
2712 #define SPEED_MAP_SIZE 8
2713         static u16 speedmap[SPEED_MAP_SIZE] = {
2714                 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2715                 (I40E_LINK_SPEED_1GB   | (0x2 << 8)),
2716                 (I40E_LINK_SPEED_10GB  | (0x4 << 8)),
2717                 (I40E_LINK_SPEED_20GB  | (0x8 << 8)),
2718                 (I40E_LINK_SPEED_25GB  | (0x10 << 8)),
2719                 (I40E_LINK_SPEED_40GB  | (0x20 << 8)),
2720                 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2721                 (I40E_LINK_SPEED_5GB   | (0x80 << 8)),
2722         };
2723         u8 retval = 0;
2724
2725         for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2726                 if (to_aq)
2727                         retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2728                 else
2729                         retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2730         }
2731
2732         return (retval);
2733 }
2734
2735 int
2736 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2737 {
2738         struct i40e_hw *hw = &pf->hw;
2739         device_t dev = pf->dev;
2740         struct i40e_aq_get_phy_abilities_resp abilities;
2741         struct i40e_aq_set_phy_config config;
2742         enum i40e_status_code aq_error = 0;
2743
2744         /* Get current capability information */
2745         aq_error = i40e_aq_get_phy_capabilities(hw,
2746             FALSE, FALSE, &abilities, NULL);
2747         if (aq_error) {
2748                 device_printf(dev,
2749                     "%s: Error getting phy capabilities %d,"
2750                     " aq error: %d\n", __func__, aq_error,
2751                     hw->aq.asq_last_status);
2752                 return (EIO);
2753         }
2754
2755         /* Prepare new config */
2756         bzero(&config, sizeof(config));
2757         if (from_aq)
2758                 config.link_speed = speeds;
2759         else
2760                 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2761         config.phy_type = abilities.phy_type;
2762         config.phy_type_ext = abilities.phy_type_ext;
2763         config.abilities = abilities.abilities
2764             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2765         config.eee_capability = abilities.eee_capability;
2766         config.eeer = abilities.eeer_val;
2767         config.low_power_ctrl = abilities.d3_lpan;
2768         config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2769             & I40E_AQ_PHY_FEC_CONFIG_MASK;
2770
2771         /* Do aq command & restart link */
2772         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2773         if (aq_error) {
2774                 device_printf(dev,
2775                     "%s: Error setting new phy config %d,"
2776                     " aq error: %d\n", __func__, aq_error,
2777                     hw->aq.asq_last_status);
2778                 return (EIO);
2779         }
2780
2781         return (0);
2782 }
2783
2784 /*
2785 ** Supported link speeds
2786 **      Flags:
2787 **       0x1 - 100 Mb
2788 **       0x2 - 1G
2789 **       0x4 - 10G
2790 **       0x8 - 20G
2791 **      0x10 - 25G
2792 **      0x20 - 40G
2793 **      0x40 - 2.5G
2794 **      0x80 - 5G
2795 */
2796 static int
2797 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2798 {
2799         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2800         int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2801
2802         return sysctl_handle_int(oidp, NULL, supported, req);
2803 }
2804
2805 /*
2806 ** Control link advertise speed:
2807 **      Flags:
2808 **       0x1 - advertise 100 Mb
2809 **       0x2 - advertise 1G
2810 **       0x4 - advertise 10G
2811 **       0x8 - advertise 20G
2812 **      0x10 - advertise 25G
2813 **      0x20 - advertise 40G
2814 **      0x40 - advertise 2.5G
2815 **      0x80 - advertise 5G
2816 **
2817 **      Set to 0 to disable link
2818 */
2819 int
2820 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2821 {
2822         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2823         device_t dev = pf->dev;
2824         u8 converted_speeds;
2825         int requested_ls = 0;
2826         int error = 0;
2827
2828         /* Read in new mode */
2829         requested_ls = pf->advertised_speed;
2830         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2831         if ((error) || (req->newptr == NULL))
2832                 return (error);
2833         if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2834                 device_printf(dev, "Interface is currently in FW recovery mode. "
2835                                 "Setting advertise speed not supported\n");
2836                 return (EINVAL);
2837         }
2838
2839         /* Error out if bits outside of possible flag range are set */
2840         if ((requested_ls & ~((u8)0xFF)) != 0) {
2841                 device_printf(dev, "Input advertised speed out of range; "
2842                     "valid flags are: 0x%02x\n",
2843                     ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2844                 return (EINVAL);
2845         }
2846
2847         /* Check if adapter supports input value */
2848         converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2849         if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2850                 device_printf(dev, "Invalid advertised speed; "
2851                     "valid flags are: 0x%02x\n",
2852                     ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2853                 return (EINVAL);
2854         }
2855
2856         error = ixl_set_advertised_speeds(pf, requested_ls, false);
2857         if (error)
2858                 return (error);
2859
2860         pf->advertised_speed = requested_ls;
2861         ixl_update_link_status(pf);
2862         return (0);
2863 }
2864
2865 /*
2866  * Input: bitmap of enum i40e_aq_link_speed
2867  */
2868 u64
2869 ixl_max_aq_speed_to_value(u8 link_speeds)
2870 {
2871         if (link_speeds & I40E_LINK_SPEED_40GB)
2872                 return IF_Gbps(40);
2873         if (link_speeds & I40E_LINK_SPEED_25GB)
2874                 return IF_Gbps(25);
2875         if (link_speeds & I40E_LINK_SPEED_20GB)
2876                 return IF_Gbps(20);
2877         if (link_speeds & I40E_LINK_SPEED_10GB)
2878                 return IF_Gbps(10);
2879         if (link_speeds & I40E_LINK_SPEED_5GB)
2880                 return IF_Gbps(5);
2881         if (link_speeds & I40E_LINK_SPEED_2_5GB)
2882                 return IF_Mbps(2500);
2883         if (link_speeds & I40E_LINK_SPEED_1GB)
2884                 return IF_Gbps(1);
2885         if (link_speeds & I40E_LINK_SPEED_100MB)
2886                 return IF_Mbps(100);
2887         else
2888                 /* Minimum supported link speed */
2889                 return IF_Mbps(100);
2890 }
2891
2892 /*
2893 ** Get the width and transaction speed of
2894 ** the bus this adapter is plugged into.
2895 */
2896 void
2897 ixl_get_bus_info(struct ixl_pf *pf)
2898 {
2899         struct i40e_hw *hw = &pf->hw;
2900         device_t dev = pf->dev;
2901         u16 link;
2902         u32 offset, num_ports;
2903         u64 max_speed;
2904
2905         /* Some devices don't use PCIE */
2906         if (hw->mac.type == I40E_MAC_X722)
2907                 return;
2908
2909         /* Read PCI Express Capabilities Link Status Register */
2910         pci_find_cap(dev, PCIY_EXPRESS, &offset);
2911         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2912
2913         /* Fill out hw struct with PCIE info */
2914         i40e_set_pci_config_data(hw, link);
2915
2916         /* Use info to print out bandwidth messages */
2917         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2918             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2919             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2920             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2921             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2922             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2923             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2924             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2925             ("Unknown"));
2926
2927         /*
2928          * If adapter is in slot with maximum supported speed,
2929          * no warning message needs to be printed out.
2930          */
2931         if (hw->bus.speed >= i40e_bus_speed_8000
2932             && hw->bus.width >= i40e_bus_width_pcie_x8)
2933                 return;
2934
2935         num_ports = bitcount32(hw->func_caps.valid_functions);
2936         max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2937
2938         if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2939                 device_printf(dev, "PCI-Express bandwidth available"
2940                     " for this device may be insufficient for"
2941                     " optimal performance.\n");
2942                 device_printf(dev, "Please move the device to a different"
2943                     " PCI-e link with more lanes and/or higher"
2944                     " transfer rate.\n");
2945         }
2946 }
2947
2948 static int
2949 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2950 {
2951         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
2952         struct i40e_hw  *hw = &pf->hw;
2953         struct sbuf     *sbuf;
2954
2955         sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2956         ixl_nvm_version_str(hw, sbuf);
2957         sbuf_finish(sbuf);
2958         sbuf_delete(sbuf);
2959
2960         return (0);
2961 }
2962
2963 void
2964 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2965 {
2966         u8 nvma_ptr = nvma->config & 0xFF;
2967         u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2968         const char * cmd_str;
2969
2970         switch (nvma->command) {
2971         case I40E_NVM_READ:
2972                 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2973                     nvma->offset == 0 && nvma->data_size == 1) {
2974                         device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2975                         return;
2976                 }
2977                 cmd_str = "READ ";
2978                 break;
2979         case I40E_NVM_WRITE:
2980                 cmd_str = "WRITE";
2981                 break;
2982         default:
2983                 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2984                 return;
2985         }
2986         device_printf(dev,
2987             "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2988             cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2989 }
2990
2991 int
2992 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
2993 {
2994         struct i40e_hw *hw = &pf->hw;
2995         struct i40e_nvm_access *nvma;
2996         device_t dev = pf->dev;
2997         enum i40e_status_code status = 0;
2998         size_t nvma_size, ifd_len, exp_len;
2999         int err, perrno;
3000
3001         DEBUGFUNC("ixl_handle_nvmupd_cmd");
3002
3003         /* Sanity checks */
3004         nvma_size = sizeof(struct i40e_nvm_access);
3005         ifd_len = ifd->ifd_len;
3006
3007         if (ifd_len < nvma_size ||
3008             ifd->ifd_data == NULL) {
3009                 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3010                     __func__);
3011                 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3012                     __func__, ifd_len, nvma_size);
3013                 device_printf(dev, "%s: data pointer: %p\n", __func__,
3014                     ifd->ifd_data);
3015                 return (EINVAL);
3016         }
3017
3018         nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3019         err = copyin(ifd->ifd_data, nvma, ifd_len);
3020         if (err) {
3021                 device_printf(dev, "%s: Cannot get request from user space\n",
3022                     __func__);
3023                 free(nvma, M_IXL);
3024                 return (err);
3025         }
3026
3027         if (pf->dbg_mask & IXL_DBG_NVMUPD)
3028                 ixl_print_nvm_cmd(dev, nvma);
3029
3030         if (IXL_PF_IS_RESETTING(pf)) {
3031                 int count = 0;
3032                 while (count++ < 100) {
3033                         i40e_msec_delay(100);
3034                         if (!(IXL_PF_IS_RESETTING(pf)))
3035                                 break;
3036                 }
3037         }
3038
3039         if (IXL_PF_IS_RESETTING(pf)) {
3040                 device_printf(dev,
3041                     "%s: timeout waiting for EMP reset to finish\n",
3042                     __func__);
3043                 free(nvma, M_IXL);
3044                 return (-EBUSY);
3045         }
3046
3047         if (nvma->data_size < 1 || nvma->data_size > 4096) {
3048                 device_printf(dev,
3049                     "%s: invalid request, data size not in supported range\n",
3050                     __func__);
3051                 free(nvma, M_IXL);
3052                 return (EINVAL);
3053         }
3054
3055         /*
3056          * Older versions of the NVM update tool don't set ifd_len to the size
3057          * of the entire buffer passed to the ioctl. Check the data_size field
3058          * in the contained i40e_nvm_access struct and ensure everything is
3059          * copied in from userspace.
3060          */
3061         exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3062
3063         if (ifd_len < exp_len) {
3064                 ifd_len = exp_len;
3065                 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3066                 err = copyin(ifd->ifd_data, nvma, ifd_len);
3067                 if (err) {
3068                         device_printf(dev, "%s: Cannot get request from user space\n",
3069                                         __func__);
3070                         free(nvma, M_IXL);
3071                         return (err);
3072                 }
3073         }
3074
3075         // TODO: Might need a different lock here
3076         // IXL_PF_LOCK(pf);
3077         status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3078         // IXL_PF_UNLOCK(pf);
3079
3080         err = copyout(nvma, ifd->ifd_data, ifd_len);
3081         free(nvma, M_IXL);
3082         if (err) {
3083                 device_printf(dev, "%s: Cannot return data to user space\n",
3084                                 __func__);
3085                 return (err);
3086         }
3087
3088         /* Let the nvmupdate report errors, show them only when debug is enabled */
3089         if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3090                 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3091                     i40e_stat_str(hw, status), perrno);
3092
3093         /*
3094          * -EPERM is actually ERESTART, which the kernel interprets as it needing
3095          * to run this ioctl again. So use -EACCES for -EPERM instead.
3096          */
3097         if (perrno == -EPERM)
3098                 return (-EACCES);
3099         else
3100                 return (perrno);
3101 }
3102
3103 int
3104 ixl_find_i2c_interface(struct ixl_pf *pf)
3105 {
3106         struct i40e_hw *hw = &pf->hw;
3107         bool i2c_en, port_matched;
3108         u32 reg;
3109
3110         for (int i = 0; i < 4; i++) {
3111                 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3112                 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3113                 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3114                     >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3115                     & BIT(hw->port);
3116                 if (i2c_en && port_matched)
3117                         return (i);
3118         }
3119
3120         return (-1);
3121 }
3122
3123 void
3124 ixl_set_link(struct ixl_pf *pf, bool enable)
3125 {
3126         struct i40e_hw *hw = &pf->hw;
3127         device_t dev = pf->dev;
3128         struct i40e_aq_get_phy_abilities_resp abilities;
3129         struct i40e_aq_set_phy_config config;
3130         enum i40e_status_code aq_error = 0;
3131         u32 phy_type, phy_type_ext;
3132
3133         /* Get initial capability information */
3134         aq_error = i40e_aq_get_phy_capabilities(hw,
3135             FALSE, TRUE, &abilities, NULL);
3136         if (aq_error) {
3137                 device_printf(dev,
3138                     "%s: Error getting phy capabilities %d,"
3139                     " aq error: %d\n", __func__, aq_error,
3140                     hw->aq.asq_last_status);
3141                 return;
3142         }
3143
3144         phy_type = abilities.phy_type;
3145         phy_type_ext = abilities.phy_type_ext;
3146
3147         /* Get current capability information */
3148         aq_error = i40e_aq_get_phy_capabilities(hw,
3149             FALSE, FALSE, &abilities, NULL);
3150         if (aq_error) {
3151                 device_printf(dev,
3152                     "%s: Error getting phy capabilities %d,"
3153                     " aq error: %d\n", __func__, aq_error,
3154                     hw->aq.asq_last_status);
3155                 return;
3156         }
3157
3158         /* Prepare new config */
3159         memset(&config, 0, sizeof(config));
3160         config.link_speed = abilities.link_speed;
3161         config.abilities = abilities.abilities;
3162         config.eee_capability = abilities.eee_capability;
3163         config.eeer = abilities.eeer_val;
3164         config.low_power_ctrl = abilities.d3_lpan;
3165         config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3166             & I40E_AQ_PHY_FEC_CONFIG_MASK;
3167         config.phy_type = 0;
3168         config.phy_type_ext = 0;
3169
3170         config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3171                         I40E_AQ_PHY_FLAG_PAUSE_RX);
3172
3173         switch (pf->fc) {
3174         case I40E_FC_FULL:
3175                 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3176                         I40E_AQ_PHY_FLAG_PAUSE_RX;
3177                 break;
3178         case I40E_FC_RX_PAUSE:
3179                 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3180                 break;
3181         case I40E_FC_TX_PAUSE:
3182                 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3183                 break;
3184         default:
3185                 break;
3186         }
3187
3188         if (enable) {
3189                 config.phy_type = phy_type;
3190                 config.phy_type_ext = phy_type_ext;
3191
3192         }
3193
3194         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3195         if (aq_error) {
3196                 device_printf(dev,
3197                     "%s: Error setting new phy config %d,"
3198                     " aq error: %d\n", __func__, aq_error,
3199                     hw->aq.asq_last_status);
3200                 return;
3201         }
3202
3203         aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3204         if (aq_error) {
3205                 device_printf(dev,
3206                     "%s: Error set link config %d,"
3207                     " aq error: %d\n", __func__, aq_error,
3208                     hw->aq.asq_last_status);
3209                 return;
3210         }
3211 }
3212
3213 static char *
3214 ixl_phy_type_string(u32 bit_pos, bool ext)
3215 {
3216         static char * phy_types_str[32] = {
3217                 "SGMII",
3218                 "1000BASE-KX",
3219                 "10GBASE-KX4",
3220                 "10GBASE-KR",
3221                 "40GBASE-KR4",
3222                 "XAUI",
3223                 "XFI",
3224                 "SFI",
3225                 "XLAUI",
3226                 "XLPPI",
3227                 "40GBASE-CR4",
3228                 "10GBASE-CR1",
3229                 "SFP+ Active DA",
3230                 "QSFP+ Active DA",
3231                 "Reserved (14)",
3232                 "Reserved (15)",
3233                 "Reserved (16)",
3234                 "100BASE-TX",
3235                 "1000BASE-T",
3236                 "10GBASE-T",
3237                 "10GBASE-SR",
3238                 "10GBASE-LR",
3239                 "10GBASE-SFP+Cu",
3240                 "10GBASE-CR1",
3241                 "40GBASE-CR4",
3242                 "40GBASE-SR4",
3243                 "40GBASE-LR4",
3244                 "1000BASE-SX",
3245                 "1000BASE-LX",
3246                 "1000BASE-T Optical",
3247                 "20GBASE-KR2",
3248                 "Reserved (31)"
3249         };
3250         static char * ext_phy_types_str[8] = {
3251                 "25GBASE-KR",
3252                 "25GBASE-CR",
3253                 "25GBASE-SR",
3254                 "25GBASE-LR",
3255                 "25GBASE-AOC",
3256                 "25GBASE-ACC",
3257                 "2.5GBASE-T",
3258                 "5GBASE-T"
3259         };
3260
3261         if (ext && bit_pos > 7) return "Invalid_Ext";
3262         if (bit_pos > 31) return "Invalid";
3263
3264         return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3265 }
3266
3267 /* TODO: ERJ: I don't this is necessary anymore. */
3268 int
3269 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3270 {
3271         device_t dev = pf->dev;
3272         struct i40e_hw *hw = &pf->hw;
3273         struct i40e_aq_desc desc;
3274         enum i40e_status_code status;
3275
3276         struct i40e_aqc_get_link_status *aq_link_status =
3277                 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3278
3279         i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3280         link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3281         status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3282         if (status) {
3283                 device_printf(dev,
3284                     "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3285                     __func__, i40e_stat_str(hw, status),
3286                     i40e_aq_str(hw, hw->aq.asq_last_status));
3287                 return (EIO);
3288         }
3289
3290         bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3291         return (0);
3292 }
3293
3294 static char *
3295 ixl_phy_type_string_ls(u8 val)
3296 {
3297         if (val >= 0x1F)
3298                 return ixl_phy_type_string(val - 0x1F, true);
3299         else
3300                 return ixl_phy_type_string(val, false);
3301 }
3302
3303 static int
3304 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3305 {
3306         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3307         device_t dev = pf->dev;
3308         struct sbuf *buf;
3309         int error = 0;
3310
3311         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3312         if (!buf) {
3313                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3314                 return (ENOMEM);
3315         }
3316
3317         struct i40e_aqc_get_link_status link_status;
3318         error = ixl_aq_get_link_status(pf, &link_status);
3319         if (error) {
3320                 sbuf_delete(buf);
3321                 return (error);
3322         }
3323
3324         sbuf_printf(buf, "\n"
3325             "PHY Type : 0x%02x<%s>\n"
3326             "Speed    : 0x%02x\n"
3327             "Link info: 0x%02x\n"
3328             "AN info  : 0x%02x\n"
3329             "Ext info : 0x%02x\n"
3330             "Loopback : 0x%02x\n"
3331             "Max Frame: %d\n"
3332             "Config   : 0x%02x\n"
3333             "Power    : 0x%02x",
3334             link_status.phy_type,
3335             ixl_phy_type_string_ls(link_status.phy_type),
3336             link_status.link_speed,
3337             link_status.link_info,
3338             link_status.an_info,
3339             link_status.ext_info,
3340             link_status.loopback,
3341             link_status.max_frame_size,
3342             link_status.config,
3343             link_status.power_desc);
3344
3345         error = sbuf_finish(buf);
3346         if (error)
3347                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3348
3349         sbuf_delete(buf);
3350         return (error);
3351 }
3352
3353 static int
3354 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3355 {
3356         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3357         struct i40e_hw *hw = &pf->hw;
3358         device_t dev = pf->dev;
3359         enum i40e_status_code status;
3360         struct i40e_aq_get_phy_abilities_resp abilities;
3361         struct sbuf *buf;
3362         int error = 0;
3363
3364         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3365         if (!buf) {
3366                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3367                 return (ENOMEM);
3368         }
3369
3370         status = i40e_aq_get_phy_capabilities(hw,
3371             FALSE, arg2 != 0, &abilities, NULL);
3372         if (status) {
3373                 device_printf(dev,
3374                     "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3375                     __func__, i40e_stat_str(hw, status),
3376                     i40e_aq_str(hw, hw->aq.asq_last_status));
3377                 sbuf_delete(buf);
3378                 return (EIO);
3379         }
3380
3381         sbuf_printf(buf, "\n"
3382             "PHY Type : %08x",
3383             abilities.phy_type);
3384
3385         if (abilities.phy_type != 0) {
3386                 sbuf_printf(buf, "<");
3387                 for (int i = 0; i < 32; i++)
3388                         if ((1 << i) & abilities.phy_type)
3389                                 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3390                 sbuf_printf(buf, ">");
3391         }
3392
3393         sbuf_printf(buf, "\nPHY Ext  : %02x",
3394             abilities.phy_type_ext);
3395
3396         if (abilities.phy_type_ext != 0) {
3397                 sbuf_printf(buf, "<");
3398                 for (int i = 0; i < 4; i++)
3399                         if ((1 << i) & abilities.phy_type_ext)
3400                                 sbuf_printf(buf, "%s,",
3401                                     ixl_phy_type_string(i, true));
3402                 sbuf_printf(buf, ">");
3403         }
3404
3405         sbuf_printf(buf, "\nSpeed    : %02x", abilities.link_speed);
3406         if (abilities.link_speed != 0) {
3407                 u8 link_speed;
3408                 sbuf_printf(buf, " <");
3409                 for (int i = 0; i < 8; i++) {
3410                         link_speed = (1 << i) & abilities.link_speed;
3411                         if (link_speed)
3412                                 sbuf_printf(buf, "%s, ",
3413                                     ixl_link_speed_string(link_speed));
3414                 }
3415                 sbuf_printf(buf, ">");
3416         }
3417
3418         sbuf_printf(buf, "\n"
3419             "Abilities: %02x\n"
3420             "EEE cap  : %04x\n"
3421             "EEER reg : %08x\n"
3422             "D3 Lpan  : %02x\n"
3423             "ID       : %02x %02x %02x %02x\n"
3424             "ModType  : %02x %02x %02x\n"
3425             "ModType E: %01x\n"
3426             "FEC Cfg  : %02x\n"
3427             "Ext CC   : %02x",
3428             abilities.abilities, abilities.eee_capability,
3429             abilities.eeer_val, abilities.d3_lpan,
3430             abilities.phy_id[0], abilities.phy_id[1],
3431             abilities.phy_id[2], abilities.phy_id[3],
3432             abilities.module_type[0], abilities.module_type[1],
3433             abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3434             abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3435             abilities.ext_comp_code);
3436
3437         error = sbuf_finish(buf);
3438         if (error)
3439                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3440
3441         sbuf_delete(buf);
3442         return (error);
3443 }
3444
3445 static int
3446 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3447 {
3448         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3449         struct ixl_vsi *vsi = &pf->vsi;
3450         struct ixl_mac_filter *f;
3451         device_t dev = pf->dev;
3452         int error = 0, ftl_len = 0, ftl_counter = 0;
3453
3454         struct sbuf *buf;
3455
3456         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3457         if (!buf) {
3458                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3459                 return (ENOMEM);
3460         }
3461
3462         sbuf_printf(buf, "\n");
3463
3464         /* Print MAC filters */
3465         sbuf_printf(buf, "PF Filters:\n");
3466         LIST_FOREACH(f, &vsi->ftl, ftle)
3467                 ftl_len++;
3468
3469         if (ftl_len < 1)
3470                 sbuf_printf(buf, "(none)\n");
3471         else {
3472                 LIST_FOREACH(f, &vsi->ftl, ftle) {
3473                         sbuf_printf(buf,
3474                             MAC_FORMAT ", vlan %4d, flags %#06x",
3475                             MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3476                         /* don't print '\n' for last entry */
3477                         if (++ftl_counter != ftl_len)
3478                                 sbuf_printf(buf, "\n");
3479                 }
3480         }
3481
3482 #ifdef PCI_IOV
3483         /* TODO: Give each VF its own filter list sysctl */
3484         struct ixl_vf *vf;
3485         if (pf->num_vfs > 0) {
3486                 sbuf_printf(buf, "\n\n");
3487                 for (int i = 0; i < pf->num_vfs; i++) {
3488                         vf = &pf->vfs[i];
3489                         if (!(vf->vf_flags & VF_FLAG_ENABLED))
3490                                 continue;
3491
3492                         vsi = &vf->vsi;
3493                         ftl_len = 0, ftl_counter = 0;
3494                         sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3495                         LIST_FOREACH(f, &vsi->ftl, ftle)
3496                                 ftl_len++;
3497
3498                         if (ftl_len < 1)
3499                                 sbuf_printf(buf, "(none)\n");
3500                         else {
3501                                 LIST_FOREACH(f, &vsi->ftl, ftle) {
3502                                         sbuf_printf(buf,
3503                                             MAC_FORMAT ", vlan %4d, flags %#06x\n",
3504                                             MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3505                                 }
3506                         }
3507                 }
3508         }
3509 #endif
3510
3511         error = sbuf_finish(buf);
3512         if (error)
3513                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3514         sbuf_delete(buf);
3515
3516         return (error);
3517 }
3518
3519 #define IXL_SW_RES_SIZE 0x14
3520 int
3521 ixl_res_alloc_cmp(const void *a, const void *b)
3522 {
3523         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3524         one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3525         two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3526
3527         return ((int)one->resource_type - (int)two->resource_type);
3528 }
3529
3530 /*
3531  * Longest string length: 25
3532  */
3533 const char *
3534 ixl_switch_res_type_string(u8 type)
3535 {
3536         static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3537                 "VEB",
3538                 "VSI",
3539                 "Perfect Match MAC address",
3540                 "S-tag",
3541                 "(Reserved)",
3542                 "Multicast hash entry",
3543                 "Unicast hash entry",
3544                 "VLAN",
3545                 "VSI List entry",
3546                 "(Reserved)",
3547                 "VLAN Statistic Pool",
3548                 "Mirror Rule",
3549                 "Queue Set",
3550                 "Inner VLAN Forward filter",
3551                 "(Reserved)",
3552                 "Inner MAC",
3553                 "IP",
3554                 "GRE/VN1 Key",
3555                 "VN2 Key",
3556                 "Tunneling Port"
3557         };
3558
3559         if (type < IXL_SW_RES_SIZE)
3560                 return ixl_switch_res_type_strings[type];
3561         else
3562                 return "(Reserved)";
3563 }
3564
3565 static int
3566 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3567 {
3568         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3569         struct i40e_hw *hw = &pf->hw;
3570         device_t dev = pf->dev;
3571         struct sbuf *buf;
3572         enum i40e_status_code status;
3573         int error = 0;
3574
3575         u8 num_entries;
3576         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3577
3578         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3579         if (!buf) {
3580                 device_printf(dev, "Could not allocate sbuf for output.\n");
3581                 return (ENOMEM);
3582         }
3583
3584         bzero(resp, sizeof(resp));
3585         status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3586                                 resp,
3587                                 IXL_SW_RES_SIZE,
3588                                 NULL);
3589         if (status) {
3590                 device_printf(dev,
3591                     "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3592                     __func__, i40e_stat_str(hw, status),
3593                     i40e_aq_str(hw, hw->aq.asq_last_status));
3594                 sbuf_delete(buf);
3595                 return (error);
3596         }
3597
3598         /* Sort entries by type for display */
3599         qsort(resp, num_entries,
3600             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3601             &ixl_res_alloc_cmp);
3602
3603         sbuf_cat(buf, "\n");
3604         sbuf_printf(buf, "# of entries: %d\n", num_entries);
3605         sbuf_printf(buf,
3606             "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
3607             "                          | (this)     | (all) | (this) | (all)       \n");
3608         for (int i = 0; i < num_entries; i++) {
3609                 sbuf_printf(buf,
3610                     "%25s | %10d   %5d   %6d   %12d",
3611                     ixl_switch_res_type_string(resp[i].resource_type),
3612                     resp[i].guaranteed,
3613                     resp[i].total,
3614                     resp[i].used,
3615                     resp[i].total_unalloced);
3616                 if (i < num_entries - 1)
3617                         sbuf_cat(buf, "\n");
3618         }
3619
3620         error = sbuf_finish(buf);
3621         if (error)
3622                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3623
3624         sbuf_delete(buf);
3625         return (error);
3626 }
3627
3628 enum ixl_sw_seid_offset {
3629         IXL_SW_SEID_EMP = 1,
3630         IXL_SW_SEID_MAC_START = 2,
3631         IXL_SW_SEID_MAC_END = 5,
3632         IXL_SW_SEID_PF_START = 16,
3633         IXL_SW_SEID_PF_END = 31,
3634         IXL_SW_SEID_VF_START = 32,
3635         IXL_SW_SEID_VF_END = 159,
3636 };
3637
3638 /*
3639  * Caller must init and delete sbuf; this function will clear and
3640  * finish it for caller.
3641  *
3642  * Note: The SEID argument only applies for elements defined by FW at
3643  * power-on; these include the EMP, Ports, PFs and VFs.
3644  */
3645 static char *
3646 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3647 {
3648         sbuf_clear(s);
3649
3650         /* If SEID is in certain ranges, then we can infer the
3651          * mapping of SEID to switch element.
3652          */
3653         if (seid == IXL_SW_SEID_EMP) {
3654                 sbuf_cat(s, "EMP");
3655                 goto out;
3656         } else if (seid >= IXL_SW_SEID_MAC_START &&
3657             seid <= IXL_SW_SEID_MAC_END) {
3658                 sbuf_printf(s, "MAC  %2d",
3659                     seid - IXL_SW_SEID_MAC_START);
3660                 goto out;
3661         } else if (seid >= IXL_SW_SEID_PF_START &&
3662             seid <= IXL_SW_SEID_PF_END) {
3663                 sbuf_printf(s, "PF  %3d",
3664                     seid - IXL_SW_SEID_PF_START);
3665                 goto out;
3666         } else if (seid >= IXL_SW_SEID_VF_START &&
3667             seid <= IXL_SW_SEID_VF_END) {
3668                 sbuf_printf(s, "VF  %3d",
3669                     seid - IXL_SW_SEID_VF_START);
3670                 goto out;
3671         }
3672
3673         switch (element_type) {
3674         case I40E_AQ_SW_ELEM_TYPE_BMC:
3675                 sbuf_cat(s, "BMC");
3676                 break;
3677         case I40E_AQ_SW_ELEM_TYPE_PV:
3678                 sbuf_cat(s, "PV");
3679                 break;
3680         case I40E_AQ_SW_ELEM_TYPE_VEB:
3681                 sbuf_cat(s, "VEB");
3682                 break;
3683         case I40E_AQ_SW_ELEM_TYPE_PA:
3684                 sbuf_cat(s, "PA");
3685                 break;
3686         case I40E_AQ_SW_ELEM_TYPE_VSI:
3687                 sbuf_printf(s, "VSI");
3688                 break;
3689         default:
3690                 sbuf_cat(s, "?");
3691                 break;
3692         }
3693
3694 out:
3695         sbuf_finish(s);
3696         return sbuf_data(s);
3697 }
3698
3699 static int
3700 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3701 {
3702         const struct i40e_aqc_switch_config_element_resp *one, *two;
3703         one = (const struct i40e_aqc_switch_config_element_resp *)a;
3704         two = (const struct i40e_aqc_switch_config_element_resp *)b;
3705
3706         return ((int)one->seid - (int)two->seid);
3707 }
3708
3709 static int
3710 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3711 {
3712         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3713         struct i40e_hw *hw = &pf->hw;
3714         device_t dev = pf->dev;
3715         struct sbuf *buf;
3716         struct sbuf *nmbuf;
3717         enum i40e_status_code status;
3718         int error = 0;
3719         u16 next = 0;
3720         u8 aq_buf[I40E_AQ_LARGE_BUF];
3721
3722         struct i40e_aqc_switch_config_element_resp *elem;
3723         struct i40e_aqc_get_switch_config_resp *sw_config;
3724         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3725
3726         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3727         if (!buf) {
3728                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3729                 return (ENOMEM);
3730         }
3731
3732         status = i40e_aq_get_switch_config(hw, sw_config,
3733             sizeof(aq_buf), &next, NULL);
3734         if (status) {
3735                 device_printf(dev,
3736                     "%s: aq_get_switch_config() error %s, aq error %s\n",
3737                     __func__, i40e_stat_str(hw, status),
3738                     i40e_aq_str(hw, hw->aq.asq_last_status));
3739                 sbuf_delete(buf);
3740                 return error;
3741         }
3742         if (next)
3743                 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3744                     __func__, next);
3745
3746         nmbuf = sbuf_new_auto();
3747         if (!nmbuf) {
3748                 device_printf(dev, "Could not allocate sbuf for name output.\n");
3749                 sbuf_delete(buf);
3750                 return (ENOMEM);
3751         }
3752
3753         /* Sort entries by SEID for display */
3754         qsort(sw_config->element, sw_config->header.num_reported,
3755             sizeof(struct i40e_aqc_switch_config_element_resp),
3756             &ixl_sw_cfg_elem_seid_cmp);
3757
3758         sbuf_cat(buf, "\n");
3759         /* Assuming <= 255 elements in switch */
3760         sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3761         sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3762         /* Exclude:
3763          * Revision -- all elements are revision 1 for now
3764          */
3765         sbuf_printf(buf,
3766             "SEID (  Name  ) |  Up  (  Name  ) | Down (  Name  ) | Conn Type\n"
3767             "                |                 |                 | (uplink)\n");
3768         for (int i = 0; i < sw_config->header.num_reported; i++) {
3769                 elem = &sw_config->element[i];
3770
3771                 // "%4d (%8s) | %8s   %8s   %#8x",
3772                 sbuf_printf(buf, "%4d", elem->seid);
3773                 sbuf_cat(buf, " ");
3774                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3775                     elem->element_type, elem->seid));
3776                 sbuf_cat(buf, " | ");
3777                 sbuf_printf(buf, "%4d", elem->uplink_seid);
3778                 sbuf_cat(buf, " ");
3779                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3780                     0, elem->uplink_seid));
3781                 sbuf_cat(buf, " | ");
3782                 sbuf_printf(buf, "%4d", elem->downlink_seid);
3783                 sbuf_cat(buf, " ");
3784                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3785                     0, elem->downlink_seid));
3786                 sbuf_cat(buf, " | ");
3787                 sbuf_printf(buf, "%8d", elem->connection_type);
3788                 if (i < sw_config->header.num_reported - 1)
3789                         sbuf_cat(buf, "\n");
3790         }
3791         sbuf_delete(nmbuf);
3792
3793         error = sbuf_finish(buf);
3794         if (error)
3795                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3796
3797         sbuf_delete(buf);
3798
3799         return (error);
3800 }
3801
3802 static int
3803 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3804 {
3805         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3806         struct i40e_hw *hw = &pf->hw;
3807         device_t dev = pf->dev;
3808         int requested_vlan = -1;
3809         enum i40e_status_code status = 0;
3810         int error = 0;
3811
3812         error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3813         if ((error) || (req->newptr == NULL))
3814             return (error);
3815
3816         if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3817                 device_printf(dev, "Flags disallow setting of vlans\n");
3818                 return (ENODEV);
3819         }
3820
3821         hw->switch_tag = requested_vlan;
3822         device_printf(dev,
3823             "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3824             hw->switch_tag, hw->first_tag, hw->second_tag);
3825         status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3826         if (status) {
3827                 device_printf(dev,
3828                     "%s: aq_set_switch_config() error %s, aq error %s\n",
3829                     __func__, i40e_stat_str(hw, status),
3830                     i40e_aq_str(hw, hw->aq.asq_last_status));
3831                 return (status);
3832         }
3833         return (0);
3834 }
3835
3836 static int
3837 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3838 {
3839         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3840         struct i40e_hw *hw = &pf->hw;
3841         device_t dev = pf->dev;
3842         struct sbuf *buf;
3843         int error = 0;
3844         enum i40e_status_code status;
3845         u32 reg;
3846
3847         struct i40e_aqc_get_set_rss_key_data key_data;
3848
3849         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3850         if (!buf) {
3851                 device_printf(dev, "Could not allocate sbuf for output.\n");
3852                 return (ENOMEM);
3853         }
3854
3855         bzero(&key_data, sizeof(key_data));
3856
3857         sbuf_cat(buf, "\n");
3858         if (hw->mac.type == I40E_MAC_X722) {
3859                 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3860                 if (status)
3861                         device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3862                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3863         } else {
3864                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3865                         reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3866                         bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
3867                 }
3868         }
3869
3870         ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3871
3872         error = sbuf_finish(buf);
3873         if (error)
3874                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3875         sbuf_delete(buf);
3876
3877         return (error);
3878 }
3879
3880 static void
3881 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3882 {
3883         int i, j, k, width;
3884         char c;
3885
3886         if (length < 1 || buf == NULL) return;
3887
3888         int byte_stride = 16;
3889         int lines = length / byte_stride;
3890         int rem = length % byte_stride;
3891         if (rem > 0)
3892                 lines++;
3893
3894         for (i = 0; i < lines; i++) {
3895                 width = (rem > 0 && i == lines - 1)
3896                     ? rem : byte_stride;
3897
3898                 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3899
3900                 for (j = 0; j < width; j++)
3901                         sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3902
3903                 if (width < byte_stride) {
3904                         for (k = 0; k < (byte_stride - width); k++)
3905                                 sbuf_printf(sb, "   ");
3906                 }
3907
3908                 if (!text) {
3909                         sbuf_printf(sb, "\n");
3910                         continue;
3911                 }
3912
3913                 for (j = 0; j < width; j++) {
3914                         c = (char)buf[i * byte_stride + j];
3915                         if (c < 32 || c > 126)
3916                                 sbuf_printf(sb, ".");
3917                         else
3918                                 sbuf_printf(sb, "%c", c);
3919
3920                         if (j == width - 1)
3921                                 sbuf_printf(sb, "\n");
3922                 }
3923         }
3924 }
3925
3926 static int
3927 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3928 {
3929         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3930         struct i40e_hw *hw = &pf->hw;
3931         device_t dev = pf->dev;
3932         struct sbuf *buf;
3933         int error = 0;
3934         enum i40e_status_code status;
3935         u8 hlut[512];
3936         u32 reg;
3937
3938         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3939         if (!buf) {
3940                 device_printf(dev, "Could not allocate sbuf for output.\n");
3941                 return (ENOMEM);
3942         }
3943
3944         bzero(hlut, sizeof(hlut));
3945         sbuf_cat(buf, "\n");
3946         if (hw->mac.type == I40E_MAC_X722) {
3947                 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3948                 if (status)
3949                         device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3950                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3951         } else {
3952                 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3953                         reg = rd32(hw, I40E_PFQF_HLUT(i));
3954                         bcopy(&reg, &hlut[i << 2], 4);
3955                 }
3956         }
3957         ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3958
3959         error = sbuf_finish(buf);
3960         if (error)
3961                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3962         sbuf_delete(buf);
3963
3964         return (error);
3965 }
3966
3967 static int
3968 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3969 {
3970         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3971         struct i40e_hw *hw = &pf->hw;
3972         u64 hena;
3973
3974         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3975             ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3976
3977         return sysctl_handle_long(oidp, NULL, hena, req);
3978 }
3979
3980 /*
3981  * Sysctl to disable firmware's link management
3982  *
3983  * 1 - Disable link management on this port
3984  * 0 - Re-enable link management
3985  *
3986  * On normal NVMs, firmware manages link by default.
3987  */
3988 static int
3989 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3990 {
3991         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3992         struct i40e_hw *hw = &pf->hw;
3993         device_t dev = pf->dev;
3994         int requested_mode = -1;
3995         enum i40e_status_code status = 0;
3996         int error = 0;
3997
3998         /* Read in new mode */
3999         error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4000         if ((error) || (req->newptr == NULL))
4001                 return (error);
4002         /* Check for sane value */
4003         if (requested_mode < 0 || requested_mode > 1) {
4004                 device_printf(dev, "Valid modes are 0 or 1\n");
4005                 return (EINVAL);
4006         }
4007
4008         /* Set new mode */
4009         status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4010         if (status) {
4011                 device_printf(dev,
4012                     "%s: Error setting new phy debug mode %s,"
4013                     " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4014                     i40e_aq_str(hw, hw->aq.asq_last_status));
4015                 return (EIO);
4016         }
4017
4018         return (0);
4019 }
4020
4021 /*
4022  * Read some diagnostic data from a (Q)SFP+ module
4023  *
4024  *             SFP A2   QSFP Lower Page
4025  * Temperature 96-97    22-23
4026  * Vcc         98-99    26-27
4027  * TX power    102-103  34-35..40-41
4028  * RX power    104-105  50-51..56-57
4029  */
4030 static int
4031 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4032 {
4033         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4034         device_t dev = pf->dev;
4035         struct sbuf *sbuf;
4036         int error = 0;
4037         u8 output;
4038
4039         if (req->oldptr == NULL) {
4040                 error = SYSCTL_OUT(req, 0, 128);
4041                 return (0);
4042         }
4043
4044         error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4045         if (error) {
4046                 device_printf(dev, "Error reading from i2c\n");
4047                 return (error);
4048         }
4049
4050         /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4051         if (output == 0x3) {
4052                 /*
4053                  * Check for:
4054                  * - Internally calibrated data
4055                  * - Diagnostic monitoring is implemented
4056                  */
4057                 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4058                 if (!(output & 0x60)) {
4059                         device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4060                         return (0);
4061                 }
4062
4063                 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4064
4065                 for (u8 offset = 96; offset < 100; offset++) {
4066                         pf->read_i2c_byte(pf, offset, 0xA2, &output);
4067                         sbuf_printf(sbuf, "%02X ", output);
4068                 }
4069                 for (u8 offset = 102; offset < 106; offset++) {
4070                         pf->read_i2c_byte(pf, offset, 0xA2, &output);
4071                         sbuf_printf(sbuf, "%02X ", output);
4072                 }
4073         } else if (output == 0xD || output == 0x11) {
4074                 /*
4075                  * QSFP+ modules are always internally calibrated, and must indicate
4076                  * what types of diagnostic monitoring are implemented
4077                  */
4078                 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4079
4080                 for (u8 offset = 22; offset < 24; offset++) {
4081                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
4082                         sbuf_printf(sbuf, "%02X ", output);
4083                 }
4084                 for (u8 offset = 26; offset < 28; offset++) {
4085                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
4086                         sbuf_printf(sbuf, "%02X ", output);
4087                 }
4088                 /* Read the data from the first lane */
4089                 for (u8 offset = 34; offset < 36; offset++) {
4090                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
4091                         sbuf_printf(sbuf, "%02X ", output);
4092                 }
4093                 for (u8 offset = 50; offset < 52; offset++) {
4094                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
4095                         sbuf_printf(sbuf, "%02X ", output);
4096                 }
4097         } else {
4098                 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4099                 return (0);
4100         }
4101
4102         sbuf_finish(sbuf);
4103         sbuf_delete(sbuf);
4104
4105         return (0);
4106 }
4107
4108 /*
4109  * Sysctl to read a byte from I2C bus.
4110  *
4111  * Input: 32-bit value:
4112  *      bits 0-7:   device address (0xA0 or 0xA2)
4113  *      bits 8-15:  offset (0-255)
4114  *      bits 16-31: unused
4115  * Output: 8-bit value read
4116  */
4117 static int
4118 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4119 {
4120         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4121         device_t dev = pf->dev;
4122         int input = -1, error = 0;
4123         u8 dev_addr, offset, output;
4124
4125         /* Read in I2C read parameters */
4126         error = sysctl_handle_int(oidp, &input, 0, req);
4127         if ((error) || (req->newptr == NULL))
4128                 return (error);
4129         /* Validate device address */
4130         dev_addr = input & 0xFF;
4131         if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4132                 return (EINVAL);
4133         }
4134         offset = (input >> 8) & 0xFF;
4135
4136         error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4137         if (error)
4138                 return (error);
4139
4140         device_printf(dev, "%02X\n", output);
4141         return (0);
4142 }
4143
4144 /*
4145  * Sysctl to write a byte to the I2C bus.
4146  *
4147  * Input: 32-bit value:
4148  *      bits 0-7:   device address (0xA0 or 0xA2)
4149  *      bits 8-15:  offset (0-255)
4150  *      bits 16-23: value to write
4151  *      bits 24-31: unused
4152  * Output: 8-bit value written
4153  */
4154 static int
4155 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4156 {
4157         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4158         device_t dev = pf->dev;
4159         int input = -1, error = 0;
4160         u8 dev_addr, offset, value;
4161
4162         /* Read in I2C write parameters */
4163         error = sysctl_handle_int(oidp, &input, 0, req);
4164         if ((error) || (req->newptr == NULL))
4165                 return (error);
4166         /* Validate device address */
4167         dev_addr = input & 0xFF;
4168         if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4169                 return (EINVAL);
4170         }
4171         offset = (input >> 8) & 0xFF;
4172         value = (input >> 16) & 0xFF;
4173
4174         error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4175         if (error)
4176                 return (error);
4177
4178         device_printf(dev, "%02X written\n", value);
4179         return (0);
4180 }
4181
4182 static int
4183 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4184     u8 bit_pos, int *is_set)
4185 {
4186         device_t dev = pf->dev;
4187         struct i40e_hw *hw = &pf->hw;
4188         enum i40e_status_code status;
4189
4190         if (IXL_PF_IN_RECOVERY_MODE(pf))
4191                 return (EIO);
4192
4193         status = i40e_aq_get_phy_capabilities(hw,
4194             FALSE, FALSE, abilities, NULL);
4195         if (status) {
4196                 device_printf(dev,
4197                     "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4198                     __func__, i40e_stat_str(hw, status),
4199                     i40e_aq_str(hw, hw->aq.asq_last_status));
4200                 return (EIO);
4201         }
4202
4203         *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4204         return (0);
4205 }
4206
4207 static int
4208 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4209     u8 bit_pos, int set)
4210 {
4211         device_t dev = pf->dev;
4212         struct i40e_hw *hw = &pf->hw;
4213         struct i40e_aq_set_phy_config config;
4214         enum i40e_status_code status;
4215
4216         /* Set new PHY config */
4217         memset(&config, 0, sizeof(config));
4218         config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4219         if (set)
4220                 config.fec_config |= bit_pos;
4221         if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4222                 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4223                 config.phy_type = abilities->phy_type;
4224                 config.phy_type_ext = abilities->phy_type_ext;
4225                 config.link_speed = abilities->link_speed;
4226                 config.eee_capability = abilities->eee_capability;
4227                 config.eeer = abilities->eeer_val;
4228                 config.low_power_ctrl = abilities->d3_lpan;
4229                 status = i40e_aq_set_phy_config(hw, &config, NULL);
4230
4231                 if (status) {
4232                         device_printf(dev,
4233                             "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4234                             __func__, i40e_stat_str(hw, status),
4235                             i40e_aq_str(hw, hw->aq.asq_last_status));
4236                         return (EIO);
4237                 }
4238         }
4239
4240         return (0);
4241 }
4242
4243 static int
4244 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4245 {
4246         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4247         int mode, error = 0;
4248
4249         struct i40e_aq_get_phy_abilities_resp abilities;
4250         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4251         if (error)
4252                 return (error);
4253         /* Read in new mode */
4254         error = sysctl_handle_int(oidp, &mode, 0, req);
4255         if ((error) || (req->newptr == NULL))
4256                 return (error);
4257
4258         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4259 }
4260
4261 static int
4262 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4263 {
4264         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4265         int mode, error = 0;
4266
4267         struct i40e_aq_get_phy_abilities_resp abilities;
4268         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4269         if (error)
4270                 return (error);
4271         /* Read in new mode */
4272         error = sysctl_handle_int(oidp, &mode, 0, req);
4273         if ((error) || (req->newptr == NULL))
4274                 return (error);
4275
4276         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4277 }
4278
4279 static int
4280 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4281 {
4282         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4283         int mode, error = 0;
4284
4285         struct i40e_aq_get_phy_abilities_resp abilities;
4286         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4287         if (error)
4288                 return (error);
4289         /* Read in new mode */
4290         error = sysctl_handle_int(oidp, &mode, 0, req);
4291         if ((error) || (req->newptr == NULL))
4292                 return (error);
4293
4294         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4295 }
4296
4297 static int
4298 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4299 {
4300         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4301         int mode, error = 0;
4302
4303         struct i40e_aq_get_phy_abilities_resp abilities;
4304         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4305         if (error)
4306                 return (error);
4307         /* Read in new mode */
4308         error = sysctl_handle_int(oidp, &mode, 0, req);
4309         if ((error) || (req->newptr == NULL))
4310                 return (error);
4311
4312         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4313 }
4314
4315 static int
4316 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4317 {
4318         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4319         int mode, error = 0;
4320
4321         struct i40e_aq_get_phy_abilities_resp abilities;
4322         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4323         if (error)
4324                 return (error);
4325         /* Read in new mode */
4326         error = sysctl_handle_int(oidp, &mode, 0, req);
4327         if ((error) || (req->newptr == NULL))
4328                 return (error);
4329
4330         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4331 }
4332
4333 static int
4334 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4335 {
4336         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4337         struct i40e_hw *hw = &pf->hw;
4338         device_t dev = pf->dev;
4339         struct sbuf *buf;
4340         int error = 0;
4341         enum i40e_status_code status;
4342
4343         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4344         if (!buf) {
4345                 device_printf(dev, "Could not allocate sbuf for output.\n");
4346                 return (ENOMEM);
4347         }
4348
4349         u8 *final_buff;
4350         /* This amount is only necessary if reading the entire cluster into memory */
4351 #define IXL_FINAL_BUFF_SIZE     (1280 * 1024)
4352         final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4353         if (final_buff == NULL) {
4354                 device_printf(dev, "Could not allocate memory for output.\n");
4355                 goto out;
4356         }
4357         int final_buff_len = 0;
4358
4359         u8 cluster_id = 1;
4360         bool more = true;
4361
4362         u8 dump_buf[4096];
4363         u16 curr_buff_size = 4096;
4364         u8 curr_next_table = 0;
4365         u32 curr_next_index = 0;
4366
4367         u16 ret_buff_size;
4368         u8 ret_next_table;
4369         u32 ret_next_index;
4370
4371         sbuf_cat(buf, "\n");
4372
4373         while (more) {
4374                 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4375                     dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4376                 if (status) {
4377                         device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4378                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4379                         goto free_out;
4380                 }
4381
4382                 /* copy info out of temp buffer */
4383                 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4384                 final_buff_len += ret_buff_size;
4385
4386                 if (ret_next_table != curr_next_table) {
4387                         /* We're done with the current table; we can dump out read data. */
4388                         sbuf_printf(buf, "%d:", curr_next_table);
4389                         int bytes_printed = 0;
4390                         while (bytes_printed <= final_buff_len) {
4391                                 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4392                                 bytes_printed += 16;
4393                         }
4394                                 sbuf_cat(buf, "\n");
4395
4396                         /* The entire cluster has been read; we're finished */
4397                         if (ret_next_table == 0xFF)
4398                                 break;
4399
4400                         /* Otherwise clear the output buffer and continue reading */
4401                         bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4402                         final_buff_len = 0;
4403                 }
4404
4405                 if (ret_next_index == 0xFFFFFFFF)
4406                         ret_next_index = 0;
4407
4408                 bzero(dump_buf, sizeof(dump_buf));
4409                 curr_next_table = ret_next_table;
4410                 curr_next_index = ret_next_index;
4411         }
4412
4413 free_out:
4414         free(final_buff, M_IXL);
4415 out:
4416         error = sbuf_finish(buf);
4417         if (error)
4418                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4419         sbuf_delete(buf);
4420
4421         return (error);
4422 }
4423
4424 static int
4425 ixl_start_fw_lldp(struct ixl_pf *pf)
4426 {
4427         struct i40e_hw *hw = &pf->hw;
4428         enum i40e_status_code status;
4429
4430         status = i40e_aq_start_lldp(hw, false, NULL);
4431         if (status != I40E_SUCCESS) {
4432                 switch (hw->aq.asq_last_status) {
4433                 case I40E_AQ_RC_EEXIST:
4434                         device_printf(pf->dev,
4435                             "FW LLDP agent is already running\n");
4436                         break;
4437                 case I40E_AQ_RC_EPERM:
4438                         device_printf(pf->dev,
4439                             "Device configuration forbids SW from starting "
4440                             "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4441                             "attribute to \"Enabled\" to use this sysctl\n");
4442                         return (EINVAL);
4443                 default:
4444                         device_printf(pf->dev,
4445                             "Starting FW LLDP agent failed: error: %s, %s\n",
4446                             i40e_stat_str(hw, status),
4447                             i40e_aq_str(hw, hw->aq.asq_last_status));
4448                         return (EINVAL);
4449                 }
4450         }
4451
4452         atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4453         return (0);
4454 }
4455
4456 static int
4457 ixl_stop_fw_lldp(struct ixl_pf *pf)
4458 {
4459         struct i40e_hw *hw = &pf->hw;
4460         device_t dev = pf->dev;
4461         enum i40e_status_code status;
4462
4463         if (hw->func_caps.npar_enable != 0) {
4464                 device_printf(dev,
4465                     "Disabling FW LLDP agent is not supported on this device\n");
4466                 return (EINVAL);
4467         }
4468
4469         if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4470                 device_printf(dev,
4471                     "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4472                 return (EINVAL);
4473         }
4474
4475         status = i40e_aq_stop_lldp(hw, true, false, NULL);
4476         if (status != I40E_SUCCESS) {
4477                 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4478                         device_printf(dev,
4479                             "Disabling FW LLDP agent failed: error: %s, %s\n",
4480                             i40e_stat_str(hw, status),
4481                             i40e_aq_str(hw, hw->aq.asq_last_status));
4482                         return (EINVAL);
4483                 }
4484
4485                 device_printf(dev, "FW LLDP agent is already stopped\n");
4486         }
4487
4488         i40e_aq_set_dcb_parameters(hw, true, NULL);
4489         atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4490         return (0);
4491 }
4492
4493 static int
4494 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4495 {
4496         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4497         int state, new_state, error = 0;
4498
4499         state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4500
4501         /* Read in new mode */
4502         error = sysctl_handle_int(oidp, &new_state, 0, req);
4503         if ((error) || (req->newptr == NULL))
4504                 return (error);
4505
4506         /* Already in requested state */
4507         if (new_state == state)
4508                 return (error);
4509
4510         if (new_state == 0)
4511                 return ixl_stop_fw_lldp(pf);
4512
4513         return ixl_start_fw_lldp(pf);
4514 }
4515
4516 static int
4517 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4518 {
4519         struct ixl_pf         *pf = (struct ixl_pf *)arg1;
4520         int                   state, new_state;
4521         int                   sysctl_handle_status = 0;
4522         enum i40e_status_code cmd_status;
4523
4524         /* Init states' values */
4525         state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4526
4527         /* Get requested mode */
4528         sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4529         if ((sysctl_handle_status) || (req->newptr == NULL))
4530                 return (sysctl_handle_status);
4531
4532         /* Check if state has changed */
4533         if (new_state == state)
4534                 return (0);
4535
4536         /* Set new state */
4537         cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4538
4539         /* Save new state or report error */
4540         if (!cmd_status) {
4541                 if (new_state == 0)
4542                         atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4543                 else
4544                         atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4545         } else if (cmd_status == I40E_ERR_CONFIG)
4546                 return (EPERM);
4547         else
4548                 return (EIO);
4549
4550         return (0);
4551 }
4552
4553 static int
4554 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4555 {
4556         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4557         int error, state;
4558
4559         state = !!(atomic_load_acq_32(&pf->state) &
4560             IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4561
4562         error = sysctl_handle_int(oidp, &state, 0, req);
4563         if ((error) || (req->newptr == NULL))
4564                 return (error);
4565
4566         if (state == 0)
4567                 atomic_clear_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4568         else
4569                 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4570
4571         return (0);
4572 }
4573
4574
4575 int
4576 ixl_attach_get_link_status(struct ixl_pf *pf)
4577 {
4578         struct i40e_hw *hw = &pf->hw;
4579         device_t dev = pf->dev;
4580         int error = 0;
4581
4582         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4583             (hw->aq.fw_maj_ver < 4)) {
4584                 i40e_msec_delay(75);
4585                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4586                 if (error) {
4587                         device_printf(dev, "link restart failed, aq_err=%d\n",
4588                             pf->hw.aq.asq_last_status);
4589                         return error;
4590                 }
4591         }
4592
4593         /* Determine link state */
4594         hw->phy.get_link_info = TRUE;
4595         i40e_get_link_status(hw, &pf->link_up);
4596
4597         /* Flow Control mode not set by user, read current FW settings */
4598         if (pf->fc == -1)
4599                 pf->fc = hw->fc.current_mode;
4600
4601         return (0);
4602 }
4603
4604 static int
4605 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4606 {
4607         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4608         int requested = 0, error = 0;
4609
4610         /* Read in new mode */
4611         error = sysctl_handle_int(oidp, &requested, 0, req);
4612         if ((error) || (req->newptr == NULL))
4613                 return (error);
4614
4615         /* Initiate the PF reset later in the admin task */
4616         atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4617
4618         return (error);
4619 }
4620
4621 static int
4622 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4623 {
4624         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4625         struct i40e_hw *hw = &pf->hw;
4626         int requested = 0, error = 0;
4627
4628         /* Read in new mode */
4629         error = sysctl_handle_int(oidp, &requested, 0, req);
4630         if ((error) || (req->newptr == NULL))
4631                 return (error);
4632
4633         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4634
4635         return (error);
4636 }
4637
4638 static int
4639 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4640 {
4641         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4642         struct i40e_hw *hw = &pf->hw;
4643         int requested = 0, error = 0;
4644
4645         /* Read in new mode */
4646         error = sysctl_handle_int(oidp, &requested, 0, req);
4647         if ((error) || (req->newptr == NULL))
4648                 return (error);
4649
4650         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4651
4652         return (error);
4653 }
4654
4655 /*
4656  * Print out mapping of TX queue indexes and Rx queue indexes
4657  * to MSI-X vectors.
4658  */
4659 static int
4660 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4661 {
4662         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4663         struct ixl_vsi *vsi = &pf->vsi;
4664         device_t dev = pf->dev;
4665         struct sbuf *buf;
4666         int error = 0;
4667
4668         struct ixl_rx_queue *rx_que = vsi->rx_queues;
4669         struct ixl_tx_queue *tx_que = vsi->tx_queues;
4670
4671         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4672         if (!buf) {
4673                 device_printf(dev, "Could not allocate sbuf for output.\n");
4674                 return (ENOMEM);
4675         }
4676
4677         sbuf_cat(buf, "\n");
4678         for (int i = 0; i < vsi->num_rx_queues; i++) {
4679                 rx_que = &vsi->rx_queues[i];
4680                 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4681         }
4682         for (int i = 0; i < vsi->num_tx_queues; i++) {
4683                 tx_que = &vsi->tx_queues[i];
4684                 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4685         }
4686
4687         error = sbuf_finish(buf);
4688         if (error)
4689                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4690         sbuf_delete(buf);
4691
4692         return (error);
4693 }