]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/ixl_pf_main.c
ixl(4): Report RX errors as sum of all RX error counters
[FreeBSD/FreeBSD.git] / sys / dev / ixl / ixl_pf_main.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #include "ixl_pf.h"
37
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41
42 #ifdef IXL_IW
43 #include "ixl_iw.h"
44 #include "ixl_iw_int.h"
45 #endif
46
47 static u8       ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void     ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50 static u_int    ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51 static u_int    ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52 static char *   ixl_switch_element_string(struct sbuf *, u8, u16);
53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
54
55 /* Sysctls */
56 static int      ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57 static int      ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58 static int      ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60 static int      ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61 static int      ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62 static int      ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
63
64 static int      ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
65
66 /* Debug Sysctls */
67 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
70 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
71 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
72 static int      ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
73 static int      ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
74 static int      ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
75 static int      ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
76 static int      ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
77 static int      ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
78 static int      ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int      ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
80 static int      ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
81 static int      ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
82 static int      ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
83 static int      ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
84 static int      ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
85 static int      ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
86 static int      ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
87
88 /* Debug Sysctls */
89 static int      ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
90 static int      ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
91 static int      ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
92 static int      ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
93 #ifdef IXL_DEBUG
94 static int      ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
95 static int      ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
96 #endif
97
98 #ifdef IXL_IW
99 extern int ixl_enable_iwarp;
100 extern int ixl_limit_iwarp_msix;
101 #endif
102
103 static const char * const ixl_fc_string[6] = {
104         "None",
105         "Rx",
106         "Tx",
107         "Full",
108         "Priority",
109         "Default"
110 };
111
112 static char *ixl_fec_string[3] = {
113        "CL108 RS-FEC",
114        "CL74 FC-FEC/BASE-R",
115        "None"
116 };
117
118 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
119
120 /*
121 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
122 */
123 void
124 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
125 {
126         u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
127         u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
128         u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
129
130         sbuf_printf(buf,
131             "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
132             hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
133             hw->aq.api_maj_ver, hw->aq.api_min_ver,
134             (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
135             IXL_NVM_VERSION_HI_SHIFT,
136             (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
137             IXL_NVM_VERSION_LO_SHIFT,
138             hw->nvm.eetrack,
139             oem_ver, oem_build, oem_patch);
140 }
141
142 void
143 ixl_print_nvm_version(struct ixl_pf *pf)
144 {
145         struct i40e_hw *hw = &pf->hw;
146         device_t dev = pf->dev;
147         struct sbuf *sbuf;
148
149         sbuf = sbuf_new_auto();
150         ixl_nvm_version_str(hw, sbuf);
151         sbuf_finish(sbuf);
152         device_printf(dev, "%s\n", sbuf_data(sbuf));
153         sbuf_delete(sbuf);
154 }
155
156 /**
157  * ixl_get_fw_mode - Check the state of FW
158  * @hw: device hardware structure
159  *
160  * Identify state of FW. It might be in a recovery mode
161  * which limits functionality and requires special handling
162  * from the driver.
163  *
164  * @returns FW mode (normal, recovery, unexpected EMP reset)
165  */
166 static enum ixl_fw_mode
167 ixl_get_fw_mode(struct ixl_pf *pf)
168 {
169         struct i40e_hw *hw = &pf->hw;
170         enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
171         u32 fwsts;
172
173 #ifdef IXL_DEBUG
174         if (pf->recovery_mode)
175                 return IXL_FW_MODE_RECOVERY;
176 #endif
177         fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
178
179         /* Is set and has one of expected values */
180         if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
181             fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
182             fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
183             fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
184                 fw_mode = IXL_FW_MODE_RECOVERY;
185         else {
186                 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
187                     fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
188                         fw_mode = IXL_FW_MODE_UEMPR;
189         }
190         return (fw_mode);
191 }
192
193 /**
194  * ixl_pf_reset - Reset the PF
195  * @pf: PF structure
196  *
197  * Ensure that FW is in the right state and do the reset
198  * if needed.
199  *
200  * @returns zero on success, or an error code on failure.
201  */
202 int
203 ixl_pf_reset(struct ixl_pf *pf)
204 {
205         struct i40e_hw *hw = &pf->hw;
206         enum i40e_status_code status;
207         enum ixl_fw_mode fw_mode;
208
209         fw_mode = ixl_get_fw_mode(pf);
210         ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
211         if (fw_mode == IXL_FW_MODE_RECOVERY) {
212                 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
213                 /* Don't try to reset device if it's in recovery mode */
214                 return (0);
215         }
216
217         status = i40e_pf_reset(hw);
218         if (status == I40E_SUCCESS)
219                 return (0);
220
221         /* Check FW mode again in case it has changed while
222          * waiting for reset to complete */
223         fw_mode = ixl_get_fw_mode(pf);
224         ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
225         if (fw_mode == IXL_FW_MODE_RECOVERY) {
226                 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
227                 return (0);
228         }
229
230         if (fw_mode == IXL_FW_MODE_UEMPR)
231                 device_printf(pf->dev,
232                     "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
233         else
234                 device_printf(pf->dev, "PF reset failure %s\n",
235                     i40e_stat_str(hw, status));
236         return (EIO);
237 }
238
239 /**
240  * ixl_setup_hmc - Setup LAN Host Memory Cache
241  * @pf: PF structure
242  *
243  * Init and configure LAN Host Memory Cache
244  *
245  * @returns 0 on success, EIO on error
246  */
247 int
248 ixl_setup_hmc(struct ixl_pf *pf)
249 {
250         struct i40e_hw *hw = &pf->hw;
251         enum i40e_status_code status;
252
253         status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
254             hw->func_caps.num_rx_qp, 0, 0);
255         if (status) {
256                 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
257                     i40e_stat_str(hw, status));
258                 return (EIO);
259         }
260
261         status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
262         if (status) {
263                 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
264                     i40e_stat_str(hw, status));
265                 return (EIO);
266         }
267
268         return (0);
269 }
270
271 /**
272  * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
273  * @pf: PF structure
274  *
275  * Shutdown Host Memory Cache if configured.
276  *
277  */
278 void
279 ixl_shutdown_hmc(struct ixl_pf *pf)
280 {
281         struct i40e_hw *hw = &pf->hw;
282         enum i40e_status_code status;
283
284         /* HMC not configured, no need to shutdown */
285         if (hw->hmc.hmc_obj == NULL)
286                 return;
287
288         status = i40e_shutdown_lan_hmc(hw);
289         if (status)
290                 device_printf(pf->dev,
291                     "Shutdown LAN HMC failed with code %s\n",
292                     i40e_stat_str(hw, status));
293 }
294 /*
295  * Write PF ITR values to queue ITR registers.
296  */
297 void
298 ixl_configure_itr(struct ixl_pf *pf)
299 {
300         ixl_configure_tx_itr(pf);
301         ixl_configure_rx_itr(pf);
302 }
303
304 /*********************************************************************
305  *
306  *  Get the hardware capabilities
307  *
308  **********************************************************************/
309
310 int
311 ixl_get_hw_capabilities(struct ixl_pf *pf)
312 {
313         struct i40e_aqc_list_capabilities_element_resp *buf;
314         struct i40e_hw  *hw = &pf->hw;
315         device_t        dev = pf->dev;
316         enum i40e_status_code status;
317         int len, i2c_intfc_num;
318         bool again = TRUE;
319         u16 needed;
320
321         if (IXL_PF_IN_RECOVERY_MODE(pf)) {
322                 hw->func_caps.iwarp = 0;
323                 return (0);
324         }
325
326         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
327 retry:
328         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
329             malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
330                 device_printf(dev, "Unable to allocate cap memory\n");
331                 return (ENOMEM);
332         }
333
334         /* This populates the hw struct */
335         status = i40e_aq_discover_capabilities(hw, buf, len,
336             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
337         free(buf, M_IXL);
338         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
339             (again == TRUE)) {
340                 /* retry once with a larger buffer */
341                 again = FALSE;
342                 len = needed;
343                 goto retry;
344         } else if (status != I40E_SUCCESS) {
345                 device_printf(dev, "capability discovery failed; status %s, error %s\n",
346                     i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
347                 return (ENODEV);
348         }
349
350         /*
351          * Some devices have both MDIO and I2C; since this isn't reported
352          * by the FW, check registers to see if an I2C interface exists.
353          */
354         i2c_intfc_num = ixl_find_i2c_interface(pf);
355         if (i2c_intfc_num != -1)
356                 pf->has_i2c = true;
357
358         /* Determine functions to use for driver I2C accesses */
359         switch (pf->i2c_access_method) {
360         case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
361                 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
362                         pf->read_i2c_byte = ixl_read_i2c_byte_aq;
363                         pf->write_i2c_byte = ixl_write_i2c_byte_aq;
364                 } else {
365                         pf->read_i2c_byte = ixl_read_i2c_byte_reg;
366                         pf->write_i2c_byte = ixl_write_i2c_byte_reg;
367                 }
368                 break;
369         }
370         case IXL_I2C_ACCESS_METHOD_AQ:
371                 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
372                 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
373                 break;
374         case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
375                 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
376                 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
377                 break;
378         case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
379                 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
380                 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
381                 break;
382         default:
383                 /* Should not happen */
384                 device_printf(dev, "Error setting I2C access functions\n");
385                 break;
386         }
387
388         /* Print a subset of the capability information. */
389         device_printf(dev,
390             "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
391             hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
392             hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
393             (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
394             (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
395             (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
396             "MDIO shared");
397
398         return (0);
399 }
400
401 /* For the set_advertise sysctl */
402 void
403 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
404 {
405         device_t dev = pf->dev;
406         int err;
407
408         /* Make sure to initialize the device to the complete list of
409          * supported speeds on driver load, to ensure unloading and
410          * reloading the driver will restore this value.
411          */
412         err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
413         if (err) {
414                 /* Non-fatal error */
415                 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
416                               __func__, err);
417                 return;
418         }
419
420         pf->advertised_speed =
421             ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
422 }
423
424 int
425 ixl_teardown_hw_structs(struct ixl_pf *pf)
426 {
427         enum i40e_status_code status = 0;
428         struct i40e_hw *hw = &pf->hw;
429         device_t dev = pf->dev;
430
431         /* Shutdown LAN HMC */
432         if (hw->hmc.hmc_obj) {
433                 status = i40e_shutdown_lan_hmc(hw);
434                 if (status) {
435                         device_printf(dev,
436                             "init: LAN HMC shutdown failure; status %s\n",
437                             i40e_stat_str(hw, status));
438                         goto err_out;
439                 }
440         }
441
442         /* Shutdown admin queue */
443         ixl_disable_intr0(hw);
444         status = i40e_shutdown_adminq(hw);
445         if (status)
446                 device_printf(dev,
447                     "init: Admin Queue shutdown failure; status %s\n",
448                     i40e_stat_str(hw, status));
449
450         ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
451 err_out:
452         return (status);
453 }
454
455 /*
456 ** Creates new filter with given MAC address and VLAN ID
457 */
458 static struct ixl_mac_filter *
459 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
460 {
461         struct ixl_mac_filter  *f;
462
463         /* create a new empty filter */
464         f = malloc(sizeof(struct ixl_mac_filter),
465             M_IXL, M_NOWAIT | M_ZERO);
466         if (f) {
467                 LIST_INSERT_HEAD(headp, f, ftle);
468                 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
469                 f->vlan = vlan;
470         }
471
472         return (f);
473 }
474
475 /**
476  * ixl_free_filters - Free all filters in given list
477  * headp - pointer to list head
478  *
479  * Frees memory used by each entry in the list.
480  * Does not remove filters from HW.
481  */
482 void
483 ixl_free_filters(struct ixl_ftl_head *headp)
484 {
485         struct ixl_mac_filter *f, *nf;
486
487         f = LIST_FIRST(headp);
488         while (f != NULL) {
489                 nf = LIST_NEXT(f, ftle);
490                 free(f, M_IXL);
491                 f = nf;
492         }
493
494         LIST_INIT(headp);
495 }
496
497 static u_int
498 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
499 {
500         struct ixl_add_maddr_arg *ama = arg;
501         struct ixl_vsi *vsi = ama->vsi;
502         const u8 *macaddr = (u8*)LLADDR(sdl);
503         struct ixl_mac_filter *f;
504
505         /* Does one already exist */
506         f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
507         if (f != NULL)
508                 return (0);
509
510         f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
511         if (f == NULL) {
512                 device_printf(vsi->dev, "WARNING: no filter available!!\n");
513                 return (0);
514         }
515         f->flags |= IXL_FILTER_MC;
516
517         return (1);
518 }
519
520 /*********************************************************************
521  *      Filter Routines
522  *
523  *      Routines for multicast and vlan filter management.
524  *
525  *********************************************************************/
526 void
527 ixl_add_multi(struct ixl_vsi *vsi)
528 {
529         struct ifnet            *ifp = vsi->ifp;
530         struct i40e_hw          *hw = vsi->hw;
531         int                     mcnt = 0;
532         struct ixl_add_maddr_arg cb_arg;
533
534         IOCTL_DEBUGOUT("ixl_add_multi: begin");
535
536         mcnt = if_llmaddr_count(ifp);
537         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
538                 i40e_aq_set_vsi_multicast_promiscuous(hw,
539                     vsi->seid, TRUE, NULL);
540                 /* delete all existing MC filters */
541                 ixl_del_multi(vsi, true);
542                 return;
543         }
544
545         cb_arg.vsi = vsi;
546         LIST_INIT(&cb_arg.to_add);
547
548         mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
549         if (mcnt > 0)
550                 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
551
552         IOCTL_DEBUGOUT("ixl_add_multi: end");
553 }
554
555 static u_int
556 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
557 {
558         struct ixl_mac_filter *f = arg;
559
560         if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
561                 return (1);
562         else
563                 return (0);
564 }
565
566 void
567 ixl_del_multi(struct ixl_vsi *vsi, bool all)
568 {
569         struct ixl_ftl_head     to_del;
570         struct ifnet            *ifp = vsi->ifp;
571         struct ixl_mac_filter   *f, *fn;
572         int                     mcnt = 0;
573
574         IOCTL_DEBUGOUT("ixl_del_multi: begin");
575
576         LIST_INIT(&to_del);
577         /* Search for removed multicast addresses */
578         LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
579                 if ((f->flags & IXL_FILTER_MC) == 0 ||
580                     (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
581                         continue;
582
583                 LIST_REMOVE(f, ftle);
584                 LIST_INSERT_HEAD(&to_del, f, ftle);
585                 mcnt++;
586         }
587
588         if (mcnt > 0)
589                 ixl_del_hw_filters(vsi, &to_del, mcnt);
590 }
591
592 void
593 ixl_link_up_msg(struct ixl_pf *pf)
594 {
595         struct i40e_hw *hw = &pf->hw;
596         struct ifnet *ifp = pf->vsi.ifp;
597         char *req_fec_string, *neg_fec_string;
598         u8 fec_abilities;
599
600         fec_abilities = hw->phy.link_info.req_fec_info;
601         /* If both RS and KR are requested, only show RS */
602         if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
603                 req_fec_string = ixl_fec_string[0];
604         else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
605                 req_fec_string = ixl_fec_string[1];
606         else
607                 req_fec_string = ixl_fec_string[2];
608
609         if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
610                 neg_fec_string = ixl_fec_string[0];
611         else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
612                 neg_fec_string = ixl_fec_string[1];
613         else
614                 neg_fec_string = ixl_fec_string[2];
615
616         log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
617             ifp->if_xname,
618             ixl_link_speed_string(hw->phy.link_info.link_speed),
619             req_fec_string, neg_fec_string,
620             (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
621             (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
622                 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
623                 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
624                 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
625                 ixl_fc_string[1] : ixl_fc_string[0]);
626 }
627
628 /*
629  * Configure admin queue/misc interrupt cause registers in hardware.
630  */
631 void
632 ixl_configure_intr0_msix(struct ixl_pf *pf)
633 {
634         struct i40e_hw *hw = &pf->hw;
635         u32 reg;
636
637         /* First set up the adminq - vector 0 */
638         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
639         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
640
641         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
642             I40E_PFINT_ICR0_ENA_GRST_MASK |
643             I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
644             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
645             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
646             I40E_PFINT_ICR0_ENA_VFLR_MASK |
647             I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
648             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
649         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
650
651         /*
652          * 0x7FF is the end of the queue list.
653          * This means we won't use MSI-X vector 0 for a queue interrupt
654          * in MSI-X mode.
655          */
656         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
657         /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
658         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
659
660         wr32(hw, I40E_PFINT_DYN_CTL0,
661             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
662             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
663
664         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
665 }
666
667 void
668 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
669 {
670         /* Display supported media types */
671         if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
672                 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
673
674         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
675                 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
676         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
677                 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
678         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
679                 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
680
681         if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
682                 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
683
684         if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
685                 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
686
687         if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
688             phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
689             phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
690                 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
691
692         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
693                 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
694         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
695                 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
696         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
697                 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
698
699         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
700             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
701             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
702             phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
703             phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
704                 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
705         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
706                 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
707         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
708                 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
709
710         if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
711                 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
712
713         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
714             || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
715                 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
716         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
717                 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
718         if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
719                 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
720         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
721                 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
722         if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
723                 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
724
725         if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
726                 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
727
728         if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
729                 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
730         if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
731                 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
732
733         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
734                 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
735         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
736                 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
737         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
738                 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
739         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
740                 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
741         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
742                 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
743         if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
744                 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
745 }
746
747 /*********************************************************************
748  *
749  *  Get Firmware Switch configuration
750  *      - this will need to be more robust when more complex
751  *        switch configurations are enabled.
752  *
753  **********************************************************************/
754 int
755 ixl_switch_config(struct ixl_pf *pf)
756 {
757         struct i40e_hw  *hw = &pf->hw; 
758         struct ixl_vsi  *vsi = &pf->vsi;
759         device_t        dev = iflib_get_dev(vsi->ctx);
760         struct i40e_aqc_get_switch_config_resp *sw_config;
761         u8      aq_buf[I40E_AQ_LARGE_BUF];
762         int     ret;
763         u16     next = 0;
764
765         memset(&aq_buf, 0, sizeof(aq_buf));
766         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
767         ret = i40e_aq_get_switch_config(hw, sw_config,
768             sizeof(aq_buf), &next, NULL);
769         if (ret) {
770                 device_printf(dev, "aq_get_switch_config() failed, error %d,"
771                     " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
772                 return (ret);
773         }
774         if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
775                 device_printf(dev,
776                     "Switch config: header reported: %d in structure, %d total\n",
777                     LE16_TO_CPU(sw_config->header.num_reported),
778                     LE16_TO_CPU(sw_config->header.num_total));
779                 for (int i = 0;
780                     i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
781                         device_printf(dev,
782                             "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
783                             sw_config->element[i].element_type,
784                             LE16_TO_CPU(sw_config->element[i].seid),
785                             LE16_TO_CPU(sw_config->element[i].uplink_seid),
786                             LE16_TO_CPU(sw_config->element[i].downlink_seid));
787                 }
788         }
789         /* Simplified due to a single VSI */
790         vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
791         vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
792         vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
793         return (ret);
794 }
795
796 void
797 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
798 {
799         struct sysctl_oid *tree;
800         struct sysctl_oid_list *child;
801         struct sysctl_oid_list *vsi_list;
802
803         tree = device_get_sysctl_tree(vsi->dev);
804         child = SYSCTL_CHILDREN(tree);
805         vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
806                         CTLFLAG_RD, NULL, "VSI Number");
807
808         vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
809         ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
810
811         /* Copy of netstat RX errors counter for validation purposes */
812         SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
813                         CTLFLAG_RD, &vsi->ierrors,
814                         "RX packet errors");
815
816         if (queues_sysctls)
817                 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
818 }
819
820 /*
821  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
822  * Writes to the ITR registers immediately.
823  */
824 static int
825 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
826 {
827         struct ixl_pf *pf = (struct ixl_pf *)arg1;
828         device_t dev = pf->dev;
829         int error = 0;
830         int requested_tx_itr;
831
832         requested_tx_itr = pf->tx_itr;
833         error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
834         if ((error) || (req->newptr == NULL))
835                 return (error);
836         if (pf->dynamic_tx_itr) {
837                 device_printf(dev,
838                     "Cannot set TX itr value while dynamic TX itr is enabled\n");
839                     return (EINVAL);
840         }
841         if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
842                 device_printf(dev,
843                     "Invalid TX itr value; value must be between 0 and %d\n",
844                         IXL_MAX_ITR);
845                 return (EINVAL);
846         }
847
848         pf->tx_itr = requested_tx_itr;
849         ixl_configure_tx_itr(pf);
850
851         return (error);
852 }
853
854 /*
855  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
856  * Writes to the ITR registers immediately.
857  */
858 static int
859 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
860 {
861         struct ixl_pf *pf = (struct ixl_pf *)arg1;
862         device_t dev = pf->dev;
863         int error = 0;
864         int requested_rx_itr;
865
866         requested_rx_itr = pf->rx_itr;
867         error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
868         if ((error) || (req->newptr == NULL))
869                 return (error);
870         if (pf->dynamic_rx_itr) {
871                 device_printf(dev,
872                     "Cannot set RX itr value while dynamic RX itr is enabled\n");
873                     return (EINVAL);
874         }
875         if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
876                 device_printf(dev,
877                     "Invalid RX itr value; value must be between 0 and %d\n",
878                         IXL_MAX_ITR);
879                 return (EINVAL);
880         }
881
882         pf->rx_itr = requested_rx_itr;
883         ixl_configure_rx_itr(pf);
884
885         return (error);
886 }
887
888 void
889 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
890         struct sysctl_oid_list *child,
891         struct i40e_hw_port_stats *stats)
892 {
893         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
894             "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
895         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
896
897         struct i40e_eth_stats *eth_stats = &stats->eth;
898         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
899
900         struct ixl_sysctl_info ctls[] = 
901         {
902                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
903                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
904                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
905                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
906                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
907                 /* Packet Reception Stats */
908                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
909                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
910                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
911                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
912                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
913                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
914                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
915                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
916                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
917                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
918                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
919                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
920                 /* Packet Transmission Stats */
921                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
922                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
923                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
924                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
925                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
926                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
927                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
928                 /* Flow control */
929                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
930                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
931                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
932                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
933                 /* End */
934                 {0,0,0}
935         };
936
937         struct ixl_sysctl_info *entry = ctls;
938         while (entry->stat != 0)
939         {
940                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
941                                 CTLFLAG_RD, entry->stat,
942                                 entry->description);
943                 entry++;
944         }
945 }
946
947 void
948 ixl_set_rss_key(struct ixl_pf *pf)
949 {
950         struct i40e_hw *hw = &pf->hw;
951         struct ixl_vsi *vsi = &pf->vsi;
952         device_t        dev = pf->dev;
953         u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
954         enum i40e_status_code status;
955
956 #ifdef RSS
957         /* Fetch the configured RSS key */
958         rss_getkey((uint8_t *) &rss_seed);
959 #else
960         ixl_get_default_rss_key(rss_seed);
961 #endif
962         /* Fill out hash function seed */
963         if (hw->mac.type == I40E_MAC_X722) {
964                 struct i40e_aqc_get_set_rss_key_data key_data;
965                 bcopy(rss_seed, &key_data, 52);
966                 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
967                 if (status)
968                         device_printf(dev,
969                             "i40e_aq_set_rss_key status %s, error %s\n",
970                             i40e_stat_str(hw, status),
971                             i40e_aq_str(hw, hw->aq.asq_last_status));
972         } else {
973                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
974                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
975         }
976 }
977
978 /*
979  * Configure enabled PCTYPES for RSS.
980  */
981 void
982 ixl_set_rss_pctypes(struct ixl_pf *pf)
983 {
984         struct i40e_hw *hw = &pf->hw;
985         u64             set_hena = 0, hena;
986
987 #ifdef RSS
988         u32             rss_hash_config;
989
990         rss_hash_config = rss_gethashconfig();
991         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
992                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
993         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
994                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
995         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
996                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
997         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
998                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
999         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1000                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1001         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1002                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1003         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1004                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1005 #else
1006         if (hw->mac.type == I40E_MAC_X722)
1007                 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1008         else
1009                 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1010 #endif
1011         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1012             ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1013         hena |= set_hena;
1014         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1015         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1016
1017 }
1018
1019 /*
1020 ** Setup the PF's RSS parameters.
1021 */
1022 void
1023 ixl_config_rss(struct ixl_pf *pf)
1024 {
1025         ixl_set_rss_key(pf);
1026         ixl_set_rss_pctypes(pf);
1027         ixl_set_rss_hlut(pf);
1028 }
1029
1030 /*
1031  * In some firmware versions there is default MAC/VLAN filter
1032  * configured which interferes with filters managed by driver.
1033  * Make sure it's removed.
1034  */
1035 void
1036 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1037 {
1038         struct i40e_aqc_remove_macvlan_element_data e;
1039
1040         bzero(&e, sizeof(e));
1041         bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1042         e.vlan_tag = 0;
1043         e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1044         i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1045
1046         bzero(&e, sizeof(e));
1047         bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1048         e.vlan_tag = 0;
1049         e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1050                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1051         i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1052 }
1053
1054 /*
1055 ** Initialize filter list and add filters that the hardware
1056 ** needs to know about.
1057 **
1058 ** Requires VSI's seid to be set before calling.
1059 */
1060 void
1061 ixl_init_filters(struct ixl_vsi *vsi)
1062 {
1063         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1064
1065         ixl_dbg_filter(pf, "%s: start\n", __func__);
1066
1067         /* Initialize mac filter list for VSI */
1068         LIST_INIT(&vsi->ftl);
1069         vsi->num_hw_filters = 0;
1070
1071         /* Receive broadcast Ethernet frames */
1072         i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1073
1074         if (IXL_VSI_IS_VF(vsi))
1075                 return;
1076
1077         ixl_del_default_hw_filters(vsi);
1078
1079         ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1080
1081         /*
1082          * Prevent Tx flow control frames from being sent out by
1083          * non-firmware transmitters.
1084          * This affects every VSI in the PF.
1085          */
1086 #ifndef IXL_DEBUG_FC
1087         i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1088 #else
1089         if (pf->enable_tx_fc_filter)
1090                 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1091 #endif
1092 }
1093
1094 void
1095 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1096 {
1097         struct i40e_hw *hw = vsi->hw;
1098         struct ixl_ftl_head tmp;
1099         int cnt;
1100
1101         /*
1102          * The ixl_add_hw_filters function adds filters configured
1103          * in HW to a list in VSI. Move all filters to a temporary
1104          * list to avoid corrupting it by concatenating to itself.
1105          */
1106         LIST_INIT(&tmp);
1107         LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1108         cnt = vsi->num_hw_filters;
1109         vsi->num_hw_filters = 0;
1110
1111         ixl_add_hw_filters(vsi, &tmp, cnt);
1112
1113         /* Filter could be removed if MAC address was changed */
1114         ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1115
1116         if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1117                 return;
1118         /*
1119          * VLAN HW filtering is enabled, make sure that filters
1120          * for all registered VLAN tags are configured
1121          */
1122         ixl_add_vlan_filters(vsi, hw->mac.addr);
1123 }
1124
1125 /*
1126  * This routine adds a MAC/VLAN filter to the software filter
1127  * list, then adds that new filter to the HW if it doesn't already
1128  * exist in the SW filter list.
1129  */
1130 void
1131 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1132 {
1133         struct ixl_mac_filter   *f, *tmp;
1134         struct ixl_pf           *pf;
1135         device_t                dev;
1136         struct ixl_ftl_head     to_add;
1137         int                     to_add_cnt;
1138
1139         pf = vsi->back;
1140         dev = pf->dev;
1141         to_add_cnt = 1;
1142
1143         ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1144             MAC_FORMAT_ARGS(macaddr), vlan);
1145
1146         /* Does one already exist */
1147         f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1148         if (f != NULL)
1149                 return;
1150
1151         LIST_INIT(&to_add);
1152         f = ixl_new_filter(&to_add, macaddr, vlan);
1153         if (f == NULL) {
1154                 device_printf(dev, "WARNING: no filter available!!\n");
1155                 return;
1156         }
1157         if (f->vlan != IXL_VLAN_ANY)
1158                 f->flags |= IXL_FILTER_VLAN;
1159         else
1160                 vsi->num_macs++;
1161
1162         /*
1163         ** Is this the first vlan being registered, if so we
1164         ** need to remove the ANY filter that indicates we are
1165         ** not in a vlan, and replace that with a 0 filter.
1166         */
1167         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1168                 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1169                 if (tmp != NULL) {
1170                         struct ixl_ftl_head to_del;
1171
1172                         /* Prepare new filter first to avoid removing
1173                          * VLAN_ANY filter if allocation fails */
1174                         f = ixl_new_filter(&to_add, macaddr, 0);
1175                         if (f == NULL) {
1176                                 device_printf(dev, "WARNING: no filter available!!\n");
1177                                 free(LIST_FIRST(&to_add), M_IXL);
1178                                 return;
1179                         }
1180                         to_add_cnt++;
1181
1182                         LIST_REMOVE(tmp, ftle);
1183                         LIST_INIT(&to_del);
1184                         LIST_INSERT_HEAD(&to_del, tmp, ftle);
1185                         ixl_del_hw_filters(vsi, &to_del, 1);
1186                 }
1187         }
1188
1189         ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1190 }
1191
1192 /**
1193  * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1194  * @vsi: pointer to VSI
1195  * @macaddr: MAC address
1196  *
1197  * Adds MAC/VLAN filter for each VLAN configured on the interface
1198  * if there is enough HW filters. Otherwise adds a single filter
1199  * for all tagged and untagged frames to allow all configured VLANs
1200  * to recieve traffic.
1201  */
1202 void
1203 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1204 {
1205         struct ixl_ftl_head to_add;
1206         struct ixl_mac_filter *f;
1207         int to_add_cnt = 0;
1208         int i, vlan = 0;
1209
1210         if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1211                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1212                 return;
1213         }
1214         LIST_INIT(&to_add);
1215
1216         /* Add filter for untagged frames if it does not exist yet */
1217         f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1218         if (f == NULL) {
1219                 f = ixl_new_filter(&to_add, macaddr, 0);
1220                 if (f == NULL) {
1221                         device_printf(vsi->dev, "WARNING: no filter available!!\n");
1222                         return;
1223                 }
1224                 to_add_cnt++;
1225         }
1226
1227         for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1228                 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1229                 if (vlan == -1)
1230                         break;
1231
1232                 /* Does one already exist */
1233                 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1234                 if (f != NULL)
1235                         continue;
1236
1237                 f = ixl_new_filter(&to_add, macaddr, vlan);
1238                 if (f == NULL) {
1239                         device_printf(vsi->dev, "WARNING: no filter available!!\n");
1240                         ixl_free_filters(&to_add);
1241                         return;
1242                 }
1243                 to_add_cnt++;
1244         }
1245
1246         ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1247 }
1248
1249 void
1250 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1251 {
1252         struct ixl_mac_filter *f, *tmp;
1253         struct ixl_ftl_head ftl_head;
1254         int to_del_cnt = 1;
1255
1256         ixl_dbg_filter((struct ixl_pf *)vsi->back,
1257             "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1258             MAC_FORMAT_ARGS(macaddr), vlan);
1259
1260         f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1261         if (f == NULL)
1262                 return;
1263
1264         LIST_REMOVE(f, ftle);
1265         LIST_INIT(&ftl_head);
1266         LIST_INSERT_HEAD(&ftl_head, f, ftle);
1267         if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1268                 vsi->num_macs--;
1269
1270         /* If this is not the last vlan just remove the filter */
1271         if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1272                 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1273                 return;
1274         }
1275
1276         /* It's the last vlan, we need to switch back to a non-vlan filter */
1277         tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1278         if (tmp != NULL) {
1279                 LIST_REMOVE(tmp, ftle);
1280                 LIST_INSERT_AFTER(f, tmp, ftle);
1281                 to_del_cnt++;
1282         }
1283         ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1284
1285         ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1286 }
1287
1288 /**
1289  * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1290  * @vsi: VSI which filters need to be removed
1291  * @macaddr: MAC address
1292  *
1293  * Remove all MAC/VLAN filters with a given MAC address. For multicast
1294  * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1295  * so skip them to speed up processing. Those filters should be removed
1296  * using ixl_del_filter function.
1297  */
1298 void
1299 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1300 {
1301         struct ixl_mac_filter *f, *tmp;
1302         struct ixl_ftl_head to_del;
1303         int to_del_cnt = 0;
1304
1305         LIST_INIT(&to_del);
1306
1307         LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1308                 if ((f->flags & IXL_FILTER_MC) != 0 ||
1309                     !ixl_ether_is_equal(f->macaddr, macaddr))
1310                         continue;
1311
1312                 LIST_REMOVE(f, ftle);
1313                 LIST_INSERT_HEAD(&to_del, f, ftle);
1314                 to_del_cnt++;
1315         }
1316
1317         ixl_dbg_filter((struct ixl_pf *)vsi->back,
1318             "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1319             __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1320         if (to_del_cnt > 0)
1321                 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1322 }
1323
1324 /*
1325 ** Find the filter with both matching mac addr and vlan id
1326 */
1327 struct ixl_mac_filter *
1328 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1329 {
1330         struct ixl_mac_filter   *f;
1331
1332         LIST_FOREACH(f, headp, ftle) {
1333                 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1334                     (f->vlan == vlan)) {
1335                         return (f);
1336                 }
1337         }
1338
1339         return (NULL);
1340 }
1341
1342 /*
1343 ** This routine takes additions to the vsi filter
1344 ** table and creates an Admin Queue call to create
1345 ** the filters in the hardware.
1346 */
1347 void
1348 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1349 {
1350         struct i40e_aqc_add_macvlan_element_data *a, *b;
1351         struct ixl_mac_filter   *f, *fn;
1352         struct ixl_pf           *pf;
1353         struct i40e_hw          *hw;
1354         device_t                dev;
1355         enum i40e_status_code   status;
1356         int                     j = 0;
1357
1358         pf = vsi->back;
1359         dev = vsi->dev;
1360         hw = &pf->hw;
1361
1362         ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1363
1364         if (cnt < 1) {
1365                 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1366                 return;
1367         }
1368
1369         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1370             M_IXL, M_NOWAIT | M_ZERO);
1371         if (a == NULL) {
1372                 device_printf(dev, "add_hw_filters failed to get memory\n");
1373                 return;
1374         }
1375
1376         LIST_FOREACH(f, to_add, ftle) {
1377                 b = &a[j]; // a pox on fvl long names :)
1378                 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1379                 if (f->vlan == IXL_VLAN_ANY) {
1380                         b->vlan_tag = 0;
1381                         b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1382                 } else {
1383                         b->vlan_tag = f->vlan;
1384                         b->flags = 0;
1385                 }
1386                 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1387                 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1388                     MAC_FORMAT_ARGS(f->macaddr));
1389
1390                 if (++j == cnt)
1391                         break;
1392         }
1393         if (j != cnt) {
1394                 /* Something went wrong */
1395                 device_printf(dev,
1396                     "%s ERROR: list of filters to short expected: %d, found: %d\n",
1397                     __func__, cnt, j);
1398                 ixl_free_filters(to_add);
1399                 goto out_free;
1400         }
1401
1402         status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1403         if (status == I40E_SUCCESS) {
1404                 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1405                 vsi->num_hw_filters += j;
1406                 goto out_free;
1407         }
1408
1409         device_printf(dev,
1410             "i40e_aq_add_macvlan status %s, error %s\n",
1411             i40e_stat_str(hw, status),
1412             i40e_aq_str(hw, hw->aq.asq_last_status));
1413         j = 0;
1414
1415         /* Verify which filters were actually configured in HW
1416          * and add them to the list */
1417         LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1418                 LIST_REMOVE(f, ftle);
1419                 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1420                         ixl_dbg_filter(pf,
1421                             "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1422                             __func__,
1423                             MAC_FORMAT_ARGS(f->macaddr),
1424                             f->vlan);
1425                         free(f, M_IXL);
1426                 } else {
1427                         LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1428                         vsi->num_hw_filters++;
1429                 }
1430                 j++;
1431         }
1432
1433 out_free:
1434         free(a, M_IXL);
1435 }
1436
1437 /*
1438 ** This routine takes removals in the vsi filter
1439 ** table and creates an Admin Queue call to delete
1440 ** the filters in the hardware.
1441 */
1442 void
1443 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1444 {
1445         struct i40e_aqc_remove_macvlan_element_data *d, *e;
1446         struct ixl_pf           *pf;
1447         struct i40e_hw          *hw;
1448         device_t                dev;
1449         struct ixl_mac_filter   *f, *f_temp;
1450         enum i40e_status_code   status;
1451         int                     j = 0;
1452
1453         pf = vsi->back;
1454         hw = &pf->hw;
1455         dev = vsi->dev;
1456
1457         ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1458
1459         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1460             M_IXL, M_NOWAIT | M_ZERO);
1461         if (d == NULL) {
1462                 device_printf(dev, "%s: failed to get memory\n", __func__);
1463                 return;
1464         }
1465
1466         LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1467                 e = &d[j]; // a pox on fvl long names :)
1468                 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1469                 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1470                 if (f->vlan == IXL_VLAN_ANY) {
1471                         e->vlan_tag = 0;
1472                         e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1473                 } else {
1474                         e->vlan_tag = f->vlan;
1475                 }
1476
1477                 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1478                     MAC_FORMAT_ARGS(f->macaddr));
1479
1480                 /* delete entry from the list */
1481                 LIST_REMOVE(f, ftle);
1482                 free(f, M_IXL);
1483                 if (++j == cnt)
1484                         break;
1485         }
1486         if (j != cnt || !LIST_EMPTY(to_del)) {
1487                 /* Something went wrong */
1488                 device_printf(dev,
1489                     "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1490                     __func__, cnt, j);
1491                 ixl_free_filters(to_del);
1492                 goto out_free;
1493         }
1494         status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1495         if (status) {
1496                 device_printf(dev,
1497                     "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1498                     __func__, i40e_stat_str(hw, status),
1499                     i40e_aq_str(hw, hw->aq.asq_last_status));
1500                 for (int i = 0; i < j; i++) {
1501                         if (d[i].error_code == 0)
1502                                 continue;
1503                         device_printf(dev,
1504                             "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1505                             __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1506                             d[i].vlan_tag);
1507                 }
1508         }
1509
1510         vsi->num_hw_filters -= j;
1511
1512 out_free:
1513         free(d, M_IXL);
1514
1515         ixl_dbg_filter(pf, "%s: end\n", __func__);
1516 }
1517
1518 int
1519 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1520 {
1521         struct i40e_hw  *hw = &pf->hw;
1522         int             error = 0;
1523         u32             reg;
1524         u16             pf_qidx;
1525
1526         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1527
1528         ixl_dbg(pf, IXL_DBG_EN_DIS,
1529             "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1530             pf_qidx, vsi_qidx);
1531
1532         i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1533
1534         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1535         reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1536             I40E_QTX_ENA_QENA_STAT_MASK;
1537         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1538         /* Verify the enable took */
1539         for (int j = 0; j < 10; j++) {
1540                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1541                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1542                         break;
1543                 i40e_usec_delay(10);
1544         }
1545         if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1546                 device_printf(pf->dev, "TX queue %d still disabled!\n",
1547                     pf_qidx);
1548                 error = ETIMEDOUT;
1549         }
1550
1551         return (error);
1552 }
1553
1554 int
1555 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1556 {
1557         struct i40e_hw  *hw = &pf->hw;
1558         int             error = 0;
1559         u32             reg;
1560         u16             pf_qidx;
1561
1562         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1563
1564         ixl_dbg(pf, IXL_DBG_EN_DIS,
1565             "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1566             pf_qidx, vsi_qidx);
1567
1568         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1569         reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1570             I40E_QRX_ENA_QENA_STAT_MASK;
1571         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1572         /* Verify the enable took */
1573         for (int j = 0; j < 10; j++) {
1574                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1575                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1576                         break;
1577                 i40e_usec_delay(10);
1578         }
1579         if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1580                 device_printf(pf->dev, "RX queue %d still disabled!\n",
1581                     pf_qidx);
1582                 error = ETIMEDOUT;
1583         }
1584
1585         return (error);
1586 }
1587
1588 int
1589 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1590 {
1591         int error = 0;
1592
1593         error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1594         /* Called function already prints error message */
1595         if (error)
1596                 return (error);
1597         error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1598         return (error);
1599 }
1600
1601 /*
1602  * Returns error on first ring that is detected hung.
1603  */
1604 int
1605 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1606 {
1607         struct i40e_hw  *hw = &pf->hw;
1608         int             error = 0;
1609         u32             reg;
1610         u16             pf_qidx;
1611
1612         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1613
1614         ixl_dbg(pf, IXL_DBG_EN_DIS,
1615             "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1616             pf_qidx, vsi_qidx);
1617
1618         i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1619         i40e_usec_delay(500);
1620
1621         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1622         reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1623         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1624         /* Verify the disable took */
1625         for (int j = 0; j < 10; j++) {
1626                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1627                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1628                         break;
1629                 i40e_msec_delay(10);
1630         }
1631         if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1632                 device_printf(pf->dev, "TX queue %d still enabled!\n",
1633                     pf_qidx);
1634                 error = ETIMEDOUT;
1635         }
1636
1637         return (error);
1638 }
1639
1640 /*
1641  * Returns error on first ring that is detected hung.
1642  */
1643 int
1644 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1645 {
1646         struct i40e_hw  *hw = &pf->hw;
1647         int             error = 0;
1648         u32             reg;
1649         u16             pf_qidx;
1650
1651         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1652
1653         ixl_dbg(pf, IXL_DBG_EN_DIS,
1654             "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1655             pf_qidx, vsi_qidx);
1656
1657         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1658         reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1659         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1660         /* Verify the disable took */
1661         for (int j = 0; j < 10; j++) {
1662                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1663                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1664                         break;
1665                 i40e_msec_delay(10);
1666         }
1667         if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1668                 device_printf(pf->dev, "RX queue %d still enabled!\n",
1669                     pf_qidx);
1670                 error = ETIMEDOUT;
1671         }
1672
1673         return (error);
1674 }
1675
1676 int
1677 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1678 {
1679         int error = 0;
1680
1681         error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1682         /* Called function already prints error message */
1683         if (error)
1684                 return (error);
1685         error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1686         return (error);
1687 }
1688
1689 static void
1690 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1691 {
1692         struct i40e_hw *hw = &pf->hw;
1693         device_t dev = pf->dev;
1694         struct ixl_vf *vf;
1695         bool mdd_detected = false;
1696         bool pf_mdd_detected = false;
1697         bool vf_mdd_detected = false;
1698         u16 vf_num, queue;
1699         u8 pf_num, event;
1700         u8 pf_mdet_num, vp_mdet_num;
1701         u32 reg;
1702
1703         /* find what triggered the MDD event */
1704         reg = rd32(hw, I40E_GL_MDET_TX);
1705         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1706                 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1707                     I40E_GL_MDET_TX_PF_NUM_SHIFT;
1708                 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1709                     I40E_GL_MDET_TX_VF_NUM_SHIFT;
1710                 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1711                     I40E_GL_MDET_TX_EVENT_SHIFT;
1712                 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1713                     I40E_GL_MDET_TX_QUEUE_SHIFT;
1714                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1715                 mdd_detected = true;
1716         }
1717
1718         if (!mdd_detected)
1719                 return;
1720
1721         reg = rd32(hw, I40E_PF_MDET_TX);
1722         if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1723                 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1724                 pf_mdet_num = hw->pf_id;
1725                 pf_mdd_detected = true;
1726         }
1727
1728         /* Check if MDD was caused by a VF */
1729         for (int i = 0; i < pf->num_vfs; i++) {
1730                 vf = &(pf->vfs[i]);
1731                 reg = rd32(hw, I40E_VP_MDET_TX(i));
1732                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1733                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1734                         vp_mdet_num = i;
1735                         vf->num_mdd_events++;
1736                         vf_mdd_detected = true;
1737                 }
1738         }
1739
1740         /* Print out an error message */
1741         if (vf_mdd_detected && pf_mdd_detected)
1742                 device_printf(dev,
1743                     "Malicious Driver Detection event %d"
1744                     " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1745                     event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1746         else if (vf_mdd_detected && !pf_mdd_detected)
1747                 device_printf(dev,
1748                     "Malicious Driver Detection event %d"
1749                     " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1750                     event, queue, pf_num, vf_num, vp_mdet_num);
1751         else if (!vf_mdd_detected && pf_mdd_detected)
1752                 device_printf(dev,
1753                     "Malicious Driver Detection event %d"
1754                     " on TX queue %d, pf number %d (PF-%d)\n",
1755                     event, queue, pf_num, pf_mdet_num);
1756         /* Theoretically shouldn't happen */
1757         else
1758                 device_printf(dev,
1759                     "TX Malicious Driver Detection event (unknown)\n");
1760 }
1761
1762 static void
1763 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1764 {
1765         struct i40e_hw *hw = &pf->hw;
1766         device_t dev = pf->dev;
1767         struct ixl_vf *vf;
1768         bool mdd_detected = false;
1769         bool pf_mdd_detected = false;
1770         bool vf_mdd_detected = false;
1771         u16 queue;
1772         u8 pf_num, event;
1773         u8 pf_mdet_num, vp_mdet_num;
1774         u32 reg;
1775
1776         /*
1777          * GL_MDET_RX doesn't contain VF number information, unlike
1778          * GL_MDET_TX.
1779          */
1780         reg = rd32(hw, I40E_GL_MDET_RX);
1781         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1782                 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1783                     I40E_GL_MDET_RX_FUNCTION_SHIFT;
1784                 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1785                     I40E_GL_MDET_RX_EVENT_SHIFT;
1786                 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1787                     I40E_GL_MDET_RX_QUEUE_SHIFT;
1788                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1789                 mdd_detected = true;
1790         }
1791
1792         if (!mdd_detected)
1793                 return;
1794
1795         reg = rd32(hw, I40E_PF_MDET_RX);
1796         if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1797                 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1798                 pf_mdet_num = hw->pf_id;
1799                 pf_mdd_detected = true;
1800         }
1801
1802         /* Check if MDD was caused by a VF */
1803         for (int i = 0; i < pf->num_vfs; i++) {
1804                 vf = &(pf->vfs[i]);
1805                 reg = rd32(hw, I40E_VP_MDET_RX(i));
1806                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1807                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1808                         vp_mdet_num = i;
1809                         vf->num_mdd_events++;
1810                         vf_mdd_detected = true;
1811                 }
1812         }
1813
1814         /* Print out an error message */
1815         if (vf_mdd_detected && pf_mdd_detected)
1816                 device_printf(dev,
1817                     "Malicious Driver Detection event %d"
1818                     " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1819                     event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1820         else if (vf_mdd_detected && !pf_mdd_detected)
1821                 device_printf(dev,
1822                     "Malicious Driver Detection event %d"
1823                     " on RX queue %d, pf number %d, (VF-%d)\n",
1824                     event, queue, pf_num, vp_mdet_num);
1825         else if (!vf_mdd_detected && pf_mdd_detected)
1826                 device_printf(dev,
1827                     "Malicious Driver Detection event %d"
1828                     " on RX queue %d, pf number %d (PF-%d)\n",
1829                     event, queue, pf_num, pf_mdet_num);
1830         /* Theoretically shouldn't happen */
1831         else
1832                 device_printf(dev,
1833                     "RX Malicious Driver Detection event (unknown)\n");
1834 }
1835
1836 /**
1837  * ixl_handle_mdd_event
1838  *
1839  * Called from interrupt handler to identify possibly malicious vfs
1840  * (But also detects events from the PF, as well)
1841  **/
1842 void
1843 ixl_handle_mdd_event(struct ixl_pf *pf)
1844 {
1845         struct i40e_hw *hw = &pf->hw;
1846         u32 reg;
1847
1848         /*
1849          * Handle both TX/RX because it's possible they could
1850          * both trigger in the same interrupt.
1851          */
1852         ixl_handle_tx_mdd_event(pf);
1853         ixl_handle_rx_mdd_event(pf);
1854
1855         atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1856
1857         /* re-enable mdd interrupt cause */
1858         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1859         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1860         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1861         ixl_flush(hw);
1862 }
1863
1864 void
1865 ixl_enable_intr0(struct i40e_hw *hw)
1866 {
1867         u32             reg;
1868
1869         /* Use IXL_ITR_NONE so ITR isn't updated here */
1870         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1871             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1872             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1873         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1874 }
1875
1876 void
1877 ixl_disable_intr0(struct i40e_hw *hw)
1878 {
1879         u32             reg;
1880
1881         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1882         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1883         ixl_flush(hw);
1884 }
1885
1886 void
1887 ixl_enable_queue(struct i40e_hw *hw, int id)
1888 {
1889         u32             reg;
1890
1891         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1892             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1893             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1894         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1895 }
1896
1897 void
1898 ixl_disable_queue(struct i40e_hw *hw, int id)
1899 {
1900         u32             reg;
1901
1902         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1903         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1904 }
1905
1906 void
1907 ixl_handle_empr_reset(struct ixl_pf *pf)
1908 {
1909         struct ixl_vsi  *vsi = &pf->vsi;
1910         bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
1911
1912         ixl_prepare_for_reset(pf, is_up);
1913         /*
1914          * i40e_pf_reset checks the type of reset and acts
1915          * accordingly. If EMP or Core reset was performed
1916          * doing PF reset is not necessary and it sometimes
1917          * fails.
1918          */
1919         ixl_pf_reset(pf);
1920
1921         if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1922             ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1923                 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1924                 device_printf(pf->dev,
1925                     "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1926                 pf->link_up = FALSE;
1927                 ixl_update_link_status(pf);
1928         }
1929
1930         ixl_rebuild_hw_structs_after_reset(pf, is_up);
1931
1932         atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING);
1933 }
1934
1935 void
1936 ixl_update_stats_counters(struct ixl_pf *pf)
1937 {
1938         struct i40e_hw  *hw = &pf->hw;
1939         struct ixl_vsi  *vsi = &pf->vsi;
1940         struct ixl_vf   *vf;
1941         u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1942
1943         struct i40e_hw_port_stats *nsd = &pf->stats;
1944         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1945
1946         /* Update hw stats */
1947         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1948                            pf->stat_offsets_loaded,
1949                            &osd->crc_errors, &nsd->crc_errors);
1950         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1951                            pf->stat_offsets_loaded,
1952                            &osd->illegal_bytes, &nsd->illegal_bytes);
1953         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1954                            I40E_GLPRT_GORCL(hw->port),
1955                            pf->stat_offsets_loaded,
1956                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1957         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1958                            I40E_GLPRT_GOTCL(hw->port),
1959                            pf->stat_offsets_loaded,
1960                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1961         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1962                            pf->stat_offsets_loaded,
1963                            &osd->eth.rx_discards,
1964                            &nsd->eth.rx_discards);
1965         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1966                            I40E_GLPRT_UPRCL(hw->port),
1967                            pf->stat_offsets_loaded,
1968                            &osd->eth.rx_unicast,
1969                            &nsd->eth.rx_unicast);
1970         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1971                            I40E_GLPRT_UPTCL(hw->port),
1972                            pf->stat_offsets_loaded,
1973                            &osd->eth.tx_unicast,
1974                            &nsd->eth.tx_unicast);
1975         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1976                            I40E_GLPRT_MPRCL(hw->port),
1977                            pf->stat_offsets_loaded,
1978                            &osd->eth.rx_multicast,
1979                            &nsd->eth.rx_multicast);
1980         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1981                            I40E_GLPRT_MPTCL(hw->port),
1982                            pf->stat_offsets_loaded,
1983                            &osd->eth.tx_multicast,
1984                            &nsd->eth.tx_multicast);
1985         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1986                            I40E_GLPRT_BPRCL(hw->port),
1987                            pf->stat_offsets_loaded,
1988                            &osd->eth.rx_broadcast,
1989                            &nsd->eth.rx_broadcast);
1990         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1991                            I40E_GLPRT_BPTCL(hw->port),
1992                            pf->stat_offsets_loaded,
1993                            &osd->eth.tx_broadcast,
1994                            &nsd->eth.tx_broadcast);
1995
1996         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1997                            pf->stat_offsets_loaded,
1998                            &osd->tx_dropped_link_down,
1999                            &nsd->tx_dropped_link_down);
2000         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2001                            pf->stat_offsets_loaded,
2002                            &osd->mac_local_faults,
2003                            &nsd->mac_local_faults);
2004         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2005                            pf->stat_offsets_loaded,
2006                            &osd->mac_remote_faults,
2007                            &nsd->mac_remote_faults);
2008         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2009                            pf->stat_offsets_loaded,
2010                            &osd->rx_length_errors,
2011                            &nsd->rx_length_errors);
2012
2013         /* Flow control (LFC) stats */
2014         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2015                            pf->stat_offsets_loaded,
2016                            &osd->link_xon_rx, &nsd->link_xon_rx);
2017         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2018                            pf->stat_offsets_loaded,
2019                            &osd->link_xon_tx, &nsd->link_xon_tx);
2020         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2021                            pf->stat_offsets_loaded,
2022                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
2023         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2024                            pf->stat_offsets_loaded,
2025                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
2026
2027         /*
2028          * For watchdog management we need to know if we have been paused
2029          * during the last interval, so capture that here.
2030          */
2031         if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2032                 vsi->shared->isc_pause_frames = 1;
2033
2034         /* Packet size stats rx */
2035         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2036                            I40E_GLPRT_PRC64L(hw->port),
2037                            pf->stat_offsets_loaded,
2038                            &osd->rx_size_64, &nsd->rx_size_64);
2039         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2040                            I40E_GLPRT_PRC127L(hw->port),
2041                            pf->stat_offsets_loaded,
2042                            &osd->rx_size_127, &nsd->rx_size_127);
2043         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2044                            I40E_GLPRT_PRC255L(hw->port),
2045                            pf->stat_offsets_loaded,
2046                            &osd->rx_size_255, &nsd->rx_size_255);
2047         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2048                            I40E_GLPRT_PRC511L(hw->port),
2049                            pf->stat_offsets_loaded,
2050                            &osd->rx_size_511, &nsd->rx_size_511);
2051         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2052                            I40E_GLPRT_PRC1023L(hw->port),
2053                            pf->stat_offsets_loaded,
2054                            &osd->rx_size_1023, &nsd->rx_size_1023);
2055         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2056                            I40E_GLPRT_PRC1522L(hw->port),
2057                            pf->stat_offsets_loaded,
2058                            &osd->rx_size_1522, &nsd->rx_size_1522);
2059         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2060                            I40E_GLPRT_PRC9522L(hw->port),
2061                            pf->stat_offsets_loaded,
2062                            &osd->rx_size_big, &nsd->rx_size_big);
2063
2064         /* Packet size stats tx */
2065         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2066                            I40E_GLPRT_PTC64L(hw->port),
2067                            pf->stat_offsets_loaded,
2068                            &osd->tx_size_64, &nsd->tx_size_64);
2069         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2070                            I40E_GLPRT_PTC127L(hw->port),
2071                            pf->stat_offsets_loaded,
2072                            &osd->tx_size_127, &nsd->tx_size_127);
2073         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2074                            I40E_GLPRT_PTC255L(hw->port),
2075                            pf->stat_offsets_loaded,
2076                            &osd->tx_size_255, &nsd->tx_size_255);
2077         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2078                            I40E_GLPRT_PTC511L(hw->port),
2079                            pf->stat_offsets_loaded,
2080                            &osd->tx_size_511, &nsd->tx_size_511);
2081         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2082                            I40E_GLPRT_PTC1023L(hw->port),
2083                            pf->stat_offsets_loaded,
2084                            &osd->tx_size_1023, &nsd->tx_size_1023);
2085         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2086                            I40E_GLPRT_PTC1522L(hw->port),
2087                            pf->stat_offsets_loaded,
2088                            &osd->tx_size_1522, &nsd->tx_size_1522);
2089         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2090                            I40E_GLPRT_PTC9522L(hw->port),
2091                            pf->stat_offsets_loaded,
2092                            &osd->tx_size_big, &nsd->tx_size_big);
2093
2094         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2095                            pf->stat_offsets_loaded,
2096                            &osd->rx_undersize, &nsd->rx_undersize);
2097         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2098                            pf->stat_offsets_loaded,
2099                            &osd->rx_fragments, &nsd->rx_fragments);
2100         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2101                            pf->stat_offsets_loaded,
2102                            &osd->rx_oversize, &nsd->rx_oversize);
2103         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2104                            pf->stat_offsets_loaded,
2105                            &osd->rx_jabber, &nsd->rx_jabber);
2106         /* EEE */
2107         i40e_get_phy_lpi_status(hw, nsd);
2108
2109         i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2110                           &osd->tx_lpi_count, &nsd->tx_lpi_count,
2111                           &osd->rx_lpi_count, &nsd->rx_lpi_count);
2112
2113         pf->stat_offsets_loaded = true;
2114         /* End hw stats */
2115
2116         /* Update vsi stats */
2117         ixl_update_vsi_stats(vsi);
2118
2119         for (int i = 0; i < pf->num_vfs; i++) {
2120                 vf = &pf->vfs[i];
2121                 if (vf->vf_flags & VF_FLAG_ENABLED)
2122                         ixl_update_eth_stats(&pf->vfs[i].vsi);
2123         }
2124 }
2125
2126 /**
2127  * Update VSI-specific ethernet statistics counters.
2128  **/
2129 void
2130 ixl_update_eth_stats(struct ixl_vsi *vsi)
2131 {
2132         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2133         struct i40e_hw *hw = &pf->hw;
2134         struct i40e_eth_stats *es;
2135         struct i40e_eth_stats *oes;
2136         u16 stat_idx = vsi->info.stat_counter_idx;
2137
2138         es = &vsi->eth_stats;
2139         oes = &vsi->eth_stats_offsets;
2140
2141         /* Gather up the stats that the hw collects */
2142         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2143                            vsi->stat_offsets_loaded,
2144                            &oes->tx_errors, &es->tx_errors);
2145         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2146                            vsi->stat_offsets_loaded,
2147                            &oes->rx_discards, &es->rx_discards);
2148
2149         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2150                            I40E_GLV_GORCL(stat_idx),
2151                            vsi->stat_offsets_loaded,
2152                            &oes->rx_bytes, &es->rx_bytes);
2153         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2154                            I40E_GLV_UPRCL(stat_idx),
2155                            vsi->stat_offsets_loaded,
2156                            &oes->rx_unicast, &es->rx_unicast);
2157         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2158                            I40E_GLV_MPRCL(stat_idx),
2159                            vsi->stat_offsets_loaded,
2160                            &oes->rx_multicast, &es->rx_multicast);
2161         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2162                            I40E_GLV_BPRCL(stat_idx),
2163                            vsi->stat_offsets_loaded,
2164                            &oes->rx_broadcast, &es->rx_broadcast);
2165
2166         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2167                            I40E_GLV_GOTCL(stat_idx),
2168                            vsi->stat_offsets_loaded,
2169                            &oes->tx_bytes, &es->tx_bytes);
2170         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2171                            I40E_GLV_UPTCL(stat_idx),
2172                            vsi->stat_offsets_loaded,
2173                            &oes->tx_unicast, &es->tx_unicast);
2174         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2175                            I40E_GLV_MPTCL(stat_idx),
2176                            vsi->stat_offsets_loaded,
2177                            &oes->tx_multicast, &es->tx_multicast);
2178         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2179                            I40E_GLV_BPTCL(stat_idx),
2180                            vsi->stat_offsets_loaded,
2181                            &oes->tx_broadcast, &es->tx_broadcast);
2182         vsi->stat_offsets_loaded = true;
2183 }
2184
2185 void
2186 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2187 {
2188         struct ixl_pf           *pf;
2189         struct ifnet            *ifp;
2190         struct i40e_eth_stats   *es;
2191         u64                     tx_discards, csum_errs;
2192
2193         struct i40e_hw_port_stats *nsd;
2194
2195         pf = vsi->back;
2196         ifp = vsi->ifp;
2197         es = &vsi->eth_stats;
2198         nsd = &pf->stats;
2199
2200         ixl_update_eth_stats(vsi);
2201
2202         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2203
2204         csum_errs = 0;
2205         for (int i = 0; i < vsi->num_rx_queues; i++)
2206                 csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2207         nsd->checksum_error = csum_errs;
2208
2209         /* Update ifnet stats */
2210         IXL_SET_IPACKETS(vsi, es->rx_unicast +
2211                            es->rx_multicast +
2212                            es->rx_broadcast);
2213         IXL_SET_OPACKETS(vsi, es->tx_unicast +
2214                            es->tx_multicast +
2215                            es->tx_broadcast);
2216         IXL_SET_IBYTES(vsi, es->rx_bytes);
2217         IXL_SET_OBYTES(vsi, es->tx_bytes);
2218         IXL_SET_IMCASTS(vsi, es->rx_multicast);
2219         IXL_SET_OMCASTS(vsi, es->tx_multicast);
2220
2221         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2222             nsd->checksum_error + nsd->rx_length_errors +
2223             nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2224             nsd->rx_jabber);
2225         IXL_SET_OERRORS(vsi, es->tx_errors);
2226         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2227         IXL_SET_OQDROPS(vsi, tx_discards);
2228         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2229         IXL_SET_COLLISIONS(vsi, 0);
2230 }
2231
2232 /**
2233  * Reset all of the stats for the given pf
2234  **/
2235 void
2236 ixl_pf_reset_stats(struct ixl_pf *pf)
2237 {
2238         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2239         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2240         pf->stat_offsets_loaded = false;
2241 }
2242
2243 /**
2244  * Resets all stats of the given vsi
2245  **/
2246 void
2247 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2248 {
2249         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2250         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2251         vsi->stat_offsets_loaded = false;
2252 }
2253
2254 /**
2255  * Read and update a 48 bit stat from the hw
2256  *
2257  * Since the device stats are not reset at PFReset, they likely will not
2258  * be zeroed when the driver starts.  We'll save the first values read
2259  * and use them as offsets to be subtracted from the raw values in order
2260  * to report stats that count from zero.
2261  **/
2262 void
2263 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2264         bool offset_loaded, u64 *offset, u64 *stat)
2265 {
2266         u64 new_data;
2267
2268 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2269         new_data = rd64(hw, loreg);
2270 #else
2271         /*
2272          * Use two rd32's instead of one rd64; FreeBSD versions before
2273          * 10 don't support 64-bit bus reads/writes.
2274          */
2275         new_data = rd32(hw, loreg);
2276         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2277 #endif
2278
2279         if (!offset_loaded)
2280                 *offset = new_data;
2281         if (new_data >= *offset)
2282                 *stat = new_data - *offset;
2283         else
2284                 *stat = (new_data + ((u64)1 << 48)) - *offset;
2285         *stat &= 0xFFFFFFFFFFFFULL;
2286 }
2287
2288 /**
2289  * Read and update a 32 bit stat from the hw
2290  **/
2291 void
2292 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2293         bool offset_loaded, u64 *offset, u64 *stat)
2294 {
2295         u32 new_data;
2296
2297         new_data = rd32(hw, reg);
2298         if (!offset_loaded)
2299                 *offset = new_data;
2300         if (new_data >= *offset)
2301                 *stat = (u32)(new_data - *offset);
2302         else
2303                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2304 }
2305
2306 /**
2307  * Add subset of device sysctls safe to use in recovery mode
2308  */
2309 void
2310 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2311 {
2312         device_t dev = pf->dev;
2313
2314         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2315         struct sysctl_oid_list *ctx_list =
2316             SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2317
2318         struct sysctl_oid *debug_node;
2319         struct sysctl_oid_list *debug_list;
2320
2321         SYSCTL_ADD_PROC(ctx, ctx_list,
2322             OID_AUTO, "fw_version",
2323             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2324             ixl_sysctl_show_fw, "A", "Firmware version");
2325
2326         /* Add sysctls meant to print debug information, but don't list them
2327          * in "sysctl -a" output. */
2328         debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2329             OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2330             "Debug Sysctls");
2331         debug_list = SYSCTL_CHILDREN(debug_node);
2332
2333         SYSCTL_ADD_UINT(ctx, debug_list,
2334             OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2335             &pf->hw.debug_mask, 0, "Shared code debug message level");
2336
2337         SYSCTL_ADD_UINT(ctx, debug_list,
2338             OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2339             &pf->dbg_mask, 0, "Non-shared code debug message level");
2340
2341         SYSCTL_ADD_PROC(ctx, debug_list,
2342             OID_AUTO, "dump_debug_data",
2343             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2344             pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2345
2346         SYSCTL_ADD_PROC(ctx, debug_list,
2347             OID_AUTO, "do_pf_reset",
2348             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2349             pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2350
2351         SYSCTL_ADD_PROC(ctx, debug_list,
2352             OID_AUTO, "do_core_reset",
2353             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2354             pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2355
2356         SYSCTL_ADD_PROC(ctx, debug_list,
2357             OID_AUTO, "do_global_reset",
2358             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2359             pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2360
2361         SYSCTL_ADD_PROC(ctx, debug_list,
2362             OID_AUTO, "queue_interrupt_table",
2363             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2364             pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2365 }
2366
2367 void
2368 ixl_add_device_sysctls(struct ixl_pf *pf)
2369 {
2370         device_t dev = pf->dev;
2371         struct i40e_hw *hw = &pf->hw;
2372
2373         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2374         struct sysctl_oid_list *ctx_list =
2375             SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2376
2377         struct sysctl_oid *debug_node;
2378         struct sysctl_oid_list *debug_list;
2379
2380         struct sysctl_oid *fec_node;
2381         struct sysctl_oid_list *fec_list;
2382         struct sysctl_oid *eee_node;
2383         struct sysctl_oid_list *eee_list;
2384
2385         /* Set up sysctls */
2386         SYSCTL_ADD_PROC(ctx, ctx_list,
2387             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2388             pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2389
2390         SYSCTL_ADD_PROC(ctx, ctx_list,
2391             OID_AUTO, "advertise_speed",
2392             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2393             ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2394
2395         SYSCTL_ADD_PROC(ctx, ctx_list,
2396             OID_AUTO, "supported_speeds",
2397             CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2398             ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2399
2400         SYSCTL_ADD_PROC(ctx, ctx_list,
2401             OID_AUTO, "current_speed",
2402             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2403             ixl_sysctl_current_speed, "A", "Current Port Speed");
2404
2405         SYSCTL_ADD_PROC(ctx, ctx_list,
2406             OID_AUTO, "fw_version",
2407             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2408             ixl_sysctl_show_fw, "A", "Firmware version");
2409
2410         SYSCTL_ADD_PROC(ctx, ctx_list,
2411             OID_AUTO, "unallocated_queues",
2412             CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2413             ixl_sysctl_unallocated_queues, "I",
2414             "Queues not allocated to a PF or VF");
2415
2416         SYSCTL_ADD_PROC(ctx, ctx_list,
2417             OID_AUTO, "tx_itr",
2418             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2419             ixl_sysctl_pf_tx_itr, "I",
2420             "Immediately set TX ITR value for all queues");
2421
2422         SYSCTL_ADD_PROC(ctx, ctx_list,
2423             OID_AUTO, "rx_itr",
2424             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2425             ixl_sysctl_pf_rx_itr, "I",
2426             "Immediately set RX ITR value for all queues");
2427
2428         SYSCTL_ADD_INT(ctx, ctx_list,
2429             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2430             &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2431
2432         SYSCTL_ADD_INT(ctx, ctx_list,
2433             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2434             &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2435
2436         /* Add FEC sysctls for 25G adapters */
2437         if (i40e_is_25G_device(hw->device_id)) {
2438                 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2439                     OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2440                     "FEC Sysctls");
2441                 fec_list = SYSCTL_CHILDREN(fec_node);
2442
2443                 SYSCTL_ADD_PROC(ctx, fec_list,
2444                     OID_AUTO, "fc_ability",
2445                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2446                     ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2447
2448                 SYSCTL_ADD_PROC(ctx, fec_list,
2449                     OID_AUTO, "rs_ability",
2450                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2451                     ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2452
2453                 SYSCTL_ADD_PROC(ctx, fec_list,
2454                     OID_AUTO, "fc_requested",
2455                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2456                     ixl_sysctl_fec_fc_request, "I",
2457                     "FC FEC mode requested on link");
2458
2459                 SYSCTL_ADD_PROC(ctx, fec_list,
2460                     OID_AUTO, "rs_requested",
2461                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2462                     ixl_sysctl_fec_rs_request, "I",
2463                     "RS FEC mode requested on link");
2464
2465                 SYSCTL_ADD_PROC(ctx, fec_list,
2466                     OID_AUTO, "auto_fec_enabled",
2467                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2468                     ixl_sysctl_fec_auto_enable, "I",
2469                     "Let FW decide FEC ability/request modes");
2470         }
2471
2472         SYSCTL_ADD_PROC(ctx, ctx_list,
2473             OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2474             pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2475
2476         eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2477             OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2478             "Energy Efficient Ethernet (EEE) Sysctls");
2479         eee_list = SYSCTL_CHILDREN(eee_node);
2480
2481         SYSCTL_ADD_PROC(ctx, eee_list,
2482             OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2483             pf, 0, ixl_sysctl_eee_enable, "I",
2484             "Enable Energy Efficient Ethernet (EEE)");
2485
2486         SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2487             CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2488             "TX LPI status");
2489
2490         SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2491             CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2492             "RX LPI status");
2493
2494         SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2495             CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2496             "TX LPI count");
2497
2498         SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2499             CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2500             "RX LPI count");
2501
2502         /* Add sysctls meant to print debug information, but don't list them
2503          * in "sysctl -a" output. */
2504         debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2505             OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2506             "Debug Sysctls");
2507         debug_list = SYSCTL_CHILDREN(debug_node);
2508
2509         SYSCTL_ADD_UINT(ctx, debug_list,
2510             OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2511             &pf->hw.debug_mask, 0, "Shared code debug message level");
2512
2513         SYSCTL_ADD_UINT(ctx, debug_list,
2514             OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2515             &pf->dbg_mask, 0, "Non-shared code debug message level");
2516
2517         SYSCTL_ADD_PROC(ctx, debug_list,
2518             OID_AUTO, "link_status",
2519             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2520             pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2521
2522         SYSCTL_ADD_PROC(ctx, debug_list,
2523             OID_AUTO, "phy_abilities",
2524             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2525             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2526
2527         SYSCTL_ADD_PROC(ctx, debug_list,
2528             OID_AUTO, "filter_list",
2529             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2530             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2531
2532         SYSCTL_ADD_PROC(ctx, debug_list,
2533             OID_AUTO, "hw_res_alloc",
2534             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2535             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2536
2537         SYSCTL_ADD_PROC(ctx, debug_list,
2538             OID_AUTO, "switch_config",
2539             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2540             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2541
2542         SYSCTL_ADD_PROC(ctx, debug_list,
2543             OID_AUTO, "switch_vlans",
2544             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2545             pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2546
2547         SYSCTL_ADD_PROC(ctx, debug_list,
2548             OID_AUTO, "rss_key",
2549             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2550             pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2551
2552         SYSCTL_ADD_PROC(ctx, debug_list,
2553             OID_AUTO, "rss_lut",
2554             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2555             pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2556
2557         SYSCTL_ADD_PROC(ctx, debug_list,
2558             OID_AUTO, "rss_hena",
2559             CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2560             pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2561
2562         SYSCTL_ADD_PROC(ctx, debug_list,
2563             OID_AUTO, "disable_fw_link_management",
2564             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2565             pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2566
2567         SYSCTL_ADD_PROC(ctx, debug_list,
2568             OID_AUTO, "dump_debug_data",
2569             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2570             pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2571
2572         SYSCTL_ADD_PROC(ctx, debug_list,
2573             OID_AUTO, "do_pf_reset",
2574             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2575             pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2576
2577         SYSCTL_ADD_PROC(ctx, debug_list,
2578             OID_AUTO, "do_core_reset",
2579             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2580             pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2581
2582         SYSCTL_ADD_PROC(ctx, debug_list,
2583             OID_AUTO, "do_global_reset",
2584             CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2585             pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2586
2587         SYSCTL_ADD_PROC(ctx, debug_list,
2588             OID_AUTO, "queue_interrupt_table",
2589             CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2590             pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2591
2592         if (pf->has_i2c) {
2593                 SYSCTL_ADD_PROC(ctx, debug_list,
2594                     OID_AUTO, "read_i2c_byte",
2595                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2596                     pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2597
2598                 SYSCTL_ADD_PROC(ctx, debug_list,
2599                     OID_AUTO, "write_i2c_byte",
2600                     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2601                     pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2602
2603                 SYSCTL_ADD_PROC(ctx, debug_list,
2604                     OID_AUTO, "read_i2c_diag_data",
2605                     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2606                     pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2607         }
2608 }
2609
2610 /*
2611  * Primarily for finding out how many queues can be assigned to VFs,
2612  * at runtime.
2613  */
2614 static int
2615 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2616 {
2617         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2618         int queues;
2619
2620         queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2621
2622         return sysctl_handle_int(oidp, NULL, queues, req);
2623 }
2624
2625 static const char *
2626 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2627 {
2628         const char * link_speed_str[] = {
2629                 "Unknown",
2630                 "100 Mbps",
2631                 "1 Gbps",
2632                 "10 Gbps",
2633                 "40 Gbps",
2634                 "20 Gbps",
2635                 "25 Gbps",
2636                 "2.5 Gbps",
2637                 "5 Gbps"
2638         };
2639         int index;
2640
2641         switch (link_speed) {
2642         case I40E_LINK_SPEED_100MB:
2643                 index = 1;
2644                 break;
2645         case I40E_LINK_SPEED_1GB:
2646                 index = 2;
2647                 break;
2648         case I40E_LINK_SPEED_10GB:
2649                 index = 3;
2650                 break;
2651         case I40E_LINK_SPEED_40GB:
2652                 index = 4;
2653                 break;
2654         case I40E_LINK_SPEED_20GB:
2655                 index = 5;
2656                 break;
2657         case I40E_LINK_SPEED_25GB:
2658                 index = 6;
2659                 break;
2660         case I40E_LINK_SPEED_2_5GB:
2661                 index = 7;
2662                 break;
2663         case I40E_LINK_SPEED_5GB:
2664                 index = 8;
2665                 break;
2666         case I40E_LINK_SPEED_UNKNOWN:
2667         default:
2668                 index = 0;
2669                 break;
2670         }
2671
2672         return (link_speed_str[index]);
2673 }
2674
2675 int
2676 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2677 {
2678         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2679         struct i40e_hw *hw = &pf->hw;
2680         int error = 0;
2681
2682         ixl_update_link_status(pf);
2683
2684         error = sysctl_handle_string(oidp,
2685             __DECONST(void *,
2686                 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2687             8, req);
2688
2689         return (error);
2690 }
2691
2692 /*
2693  * Converts 8-bit speeds value to and from sysctl flags and
2694  * Admin Queue flags.
2695  */
2696 static u8
2697 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2698 {
2699 #define SPEED_MAP_SIZE 8
2700         static u16 speedmap[SPEED_MAP_SIZE] = {
2701                 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2702                 (I40E_LINK_SPEED_1GB   | (0x2 << 8)),
2703                 (I40E_LINK_SPEED_10GB  | (0x4 << 8)),
2704                 (I40E_LINK_SPEED_20GB  | (0x8 << 8)),
2705                 (I40E_LINK_SPEED_25GB  | (0x10 << 8)),
2706                 (I40E_LINK_SPEED_40GB  | (0x20 << 8)),
2707                 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2708                 (I40E_LINK_SPEED_5GB   | (0x80 << 8)),
2709         };
2710         u8 retval = 0;
2711
2712         for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2713                 if (to_aq)
2714                         retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2715                 else
2716                         retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2717         }
2718
2719         return (retval);
2720 }
2721
2722 int
2723 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2724 {
2725         struct i40e_hw *hw = &pf->hw;
2726         device_t dev = pf->dev;
2727         struct i40e_aq_get_phy_abilities_resp abilities;
2728         struct i40e_aq_set_phy_config config;
2729         enum i40e_status_code aq_error = 0;
2730
2731         /* Get current capability information */
2732         aq_error = i40e_aq_get_phy_capabilities(hw,
2733             FALSE, FALSE, &abilities, NULL);
2734         if (aq_error) {
2735                 device_printf(dev,
2736                     "%s: Error getting phy capabilities %d,"
2737                     " aq error: %d\n", __func__, aq_error,
2738                     hw->aq.asq_last_status);
2739                 return (EIO);
2740         }
2741
2742         /* Prepare new config */
2743         bzero(&config, sizeof(config));
2744         if (from_aq)
2745                 config.link_speed = speeds;
2746         else
2747                 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2748         config.phy_type = abilities.phy_type;
2749         config.phy_type_ext = abilities.phy_type_ext;
2750         config.abilities = abilities.abilities
2751             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2752         config.eee_capability = abilities.eee_capability;
2753         config.eeer = abilities.eeer_val;
2754         config.low_power_ctrl = abilities.d3_lpan;
2755         config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2756             & I40E_AQ_PHY_FEC_CONFIG_MASK;
2757
2758         /* Do aq command & restart link */
2759         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2760         if (aq_error) {
2761                 device_printf(dev,
2762                     "%s: Error setting new phy config %d,"
2763                     " aq error: %d\n", __func__, aq_error,
2764                     hw->aq.asq_last_status);
2765                 return (EIO);
2766         }
2767
2768         return (0);
2769 }
2770
2771 /*
2772 ** Supported link speeds
2773 **      Flags:
2774 **       0x1 - 100 Mb
2775 **       0x2 - 1G
2776 **       0x4 - 10G
2777 **       0x8 - 20G
2778 **      0x10 - 25G
2779 **      0x20 - 40G
2780 **      0x40 - 2.5G
2781 **      0x80 - 5G
2782 */
2783 static int
2784 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2785 {
2786         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2787         int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2788
2789         return sysctl_handle_int(oidp, NULL, supported, req);
2790 }
2791
2792 /*
2793 ** Control link advertise speed:
2794 **      Flags:
2795 **       0x1 - advertise 100 Mb
2796 **       0x2 - advertise 1G
2797 **       0x4 - advertise 10G
2798 **       0x8 - advertise 20G
2799 **      0x10 - advertise 25G
2800 **      0x20 - advertise 40G
2801 **      0x40 - advertise 2.5G
2802 **      0x80 - advertise 5G
2803 **
2804 **      Set to 0 to disable link
2805 */
2806 int
2807 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2808 {
2809         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2810         device_t dev = pf->dev;
2811         u8 converted_speeds;
2812         int requested_ls = 0;
2813         int error = 0;
2814
2815         /* Read in new mode */
2816         requested_ls = pf->advertised_speed;
2817         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2818         if ((error) || (req->newptr == NULL))
2819                 return (error);
2820         if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2821                 device_printf(dev, "Interface is currently in FW recovery mode. "
2822                                 "Setting advertise speed not supported\n");
2823                 return (EINVAL);
2824         }
2825
2826         /* Error out if bits outside of possible flag range are set */
2827         if ((requested_ls & ~((u8)0xFF)) != 0) {
2828                 device_printf(dev, "Input advertised speed out of range; "
2829                     "valid flags are: 0x%02x\n",
2830                     ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2831                 return (EINVAL);
2832         }
2833
2834         /* Check if adapter supports input value */
2835         converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2836         if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2837                 device_printf(dev, "Invalid advertised speed; "
2838                     "valid flags are: 0x%02x\n",
2839                     ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2840                 return (EINVAL);
2841         }
2842
2843         error = ixl_set_advertised_speeds(pf, requested_ls, false);
2844         if (error)
2845                 return (error);
2846
2847         pf->advertised_speed = requested_ls;
2848         ixl_update_link_status(pf);
2849         return (0);
2850 }
2851
2852 /*
2853  * Input: bitmap of enum i40e_aq_link_speed
2854  */
2855 u64
2856 ixl_max_aq_speed_to_value(u8 link_speeds)
2857 {
2858         if (link_speeds & I40E_LINK_SPEED_40GB)
2859                 return IF_Gbps(40);
2860         if (link_speeds & I40E_LINK_SPEED_25GB)
2861                 return IF_Gbps(25);
2862         if (link_speeds & I40E_LINK_SPEED_20GB)
2863                 return IF_Gbps(20);
2864         if (link_speeds & I40E_LINK_SPEED_10GB)
2865                 return IF_Gbps(10);
2866         if (link_speeds & I40E_LINK_SPEED_5GB)
2867                 return IF_Gbps(5);
2868         if (link_speeds & I40E_LINK_SPEED_2_5GB)
2869                 return IF_Mbps(2500);
2870         if (link_speeds & I40E_LINK_SPEED_1GB)
2871                 return IF_Gbps(1);
2872         if (link_speeds & I40E_LINK_SPEED_100MB)
2873                 return IF_Mbps(100);
2874         else
2875                 /* Minimum supported link speed */
2876                 return IF_Mbps(100);
2877 }
2878
2879 /*
2880 ** Get the width and transaction speed of
2881 ** the bus this adapter is plugged into.
2882 */
2883 void
2884 ixl_get_bus_info(struct ixl_pf *pf)
2885 {
2886         struct i40e_hw *hw = &pf->hw;
2887         device_t dev = pf->dev;
2888         u16 link;
2889         u32 offset, num_ports;
2890         u64 max_speed;
2891
2892         /* Some devices don't use PCIE */
2893         if (hw->mac.type == I40E_MAC_X722)
2894                 return;
2895
2896         /* Read PCI Express Capabilities Link Status Register */
2897         pci_find_cap(dev, PCIY_EXPRESS, &offset);
2898         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2899
2900         /* Fill out hw struct with PCIE info */
2901         i40e_set_pci_config_data(hw, link);
2902
2903         /* Use info to print out bandwidth messages */
2904         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2905             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2906             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2907             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2908             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2909             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2910             (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2911             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2912             ("Unknown"));
2913
2914         /*
2915          * If adapter is in slot with maximum supported speed,
2916          * no warning message needs to be printed out.
2917          */
2918         if (hw->bus.speed >= i40e_bus_speed_8000
2919             && hw->bus.width >= i40e_bus_width_pcie_x8)
2920                 return;
2921
2922         num_ports = bitcount32(hw->func_caps.valid_functions);
2923         max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2924
2925         if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2926                 device_printf(dev, "PCI-Express bandwidth available"
2927                     " for this device may be insufficient for"
2928                     " optimal performance.\n");
2929                 device_printf(dev, "Please move the device to a different"
2930                     " PCI-e link with more lanes and/or higher"
2931                     " transfer rate.\n");
2932         }
2933 }
2934
2935 static int
2936 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2937 {
2938         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
2939         struct i40e_hw  *hw = &pf->hw;
2940         struct sbuf     *sbuf;
2941
2942         sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2943         ixl_nvm_version_str(hw, sbuf);
2944         sbuf_finish(sbuf);
2945         sbuf_delete(sbuf);
2946
2947         return (0);
2948 }
2949
2950 void
2951 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2952 {
2953         u8 nvma_ptr = nvma->config & 0xFF;
2954         u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2955         const char * cmd_str;
2956
2957         switch (nvma->command) {
2958         case I40E_NVM_READ:
2959                 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2960                     nvma->offset == 0 && nvma->data_size == 1) {
2961                         device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2962                         return;
2963                 }
2964                 cmd_str = "READ ";
2965                 break;
2966         case I40E_NVM_WRITE:
2967                 cmd_str = "WRITE";
2968                 break;
2969         default:
2970                 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2971                 return;
2972         }
2973         device_printf(dev,
2974             "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2975             cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2976 }
2977
2978 int
2979 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
2980 {
2981         struct i40e_hw *hw = &pf->hw;
2982         struct i40e_nvm_access *nvma;
2983         device_t dev = pf->dev;
2984         enum i40e_status_code status = 0;
2985         size_t nvma_size, ifd_len, exp_len;
2986         int err, perrno;
2987
2988         DEBUGFUNC("ixl_handle_nvmupd_cmd");
2989
2990         /* Sanity checks */
2991         nvma_size = sizeof(struct i40e_nvm_access);
2992         ifd_len = ifd->ifd_len;
2993
2994         if (ifd_len < nvma_size ||
2995             ifd->ifd_data == NULL) {
2996                 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
2997                     __func__);
2998                 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
2999                     __func__, ifd_len, nvma_size);
3000                 device_printf(dev, "%s: data pointer: %p\n", __func__,
3001                     ifd->ifd_data);
3002                 return (EINVAL);
3003         }
3004
3005         nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3006         err = copyin(ifd->ifd_data, nvma, ifd_len);
3007         if (err) {
3008                 device_printf(dev, "%s: Cannot get request from user space\n",
3009                     __func__);
3010                 free(nvma, M_IXL);
3011                 return (err);
3012         }
3013
3014         if (pf->dbg_mask & IXL_DBG_NVMUPD)
3015                 ixl_print_nvm_cmd(dev, nvma);
3016
3017         if (IXL_PF_IS_RESETTING(pf)) {
3018                 int count = 0;
3019                 while (count++ < 100) {
3020                         i40e_msec_delay(100);
3021                         if (!(IXL_PF_IS_RESETTING(pf)))
3022                                 break;
3023                 }
3024         }
3025
3026         if (IXL_PF_IS_RESETTING(pf)) {
3027                 device_printf(dev,
3028                     "%s: timeout waiting for EMP reset to finish\n",
3029                     __func__);
3030                 free(nvma, M_IXL);
3031                 return (-EBUSY);
3032         }
3033
3034         if (nvma->data_size < 1 || nvma->data_size > 4096) {
3035                 device_printf(dev,
3036                     "%s: invalid request, data size not in supported range\n",
3037                     __func__);
3038                 free(nvma, M_IXL);
3039                 return (EINVAL);
3040         }
3041
3042         /*
3043          * Older versions of the NVM update tool don't set ifd_len to the size
3044          * of the entire buffer passed to the ioctl. Check the data_size field
3045          * in the contained i40e_nvm_access struct and ensure everything is
3046          * copied in from userspace.
3047          */
3048         exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3049
3050         if (ifd_len < exp_len) {
3051                 ifd_len = exp_len;
3052                 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3053                 err = copyin(ifd->ifd_data, nvma, ifd_len);
3054                 if (err) {
3055                         device_printf(dev, "%s: Cannot get request from user space\n",
3056                                         __func__);
3057                         free(nvma, M_IXL);
3058                         return (err);
3059                 }
3060         }
3061
3062         // TODO: Might need a different lock here
3063         // IXL_PF_LOCK(pf);
3064         status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3065         // IXL_PF_UNLOCK(pf);
3066
3067         err = copyout(nvma, ifd->ifd_data, ifd_len);
3068         free(nvma, M_IXL);
3069         if (err) {
3070                 device_printf(dev, "%s: Cannot return data to user space\n",
3071                                 __func__);
3072                 return (err);
3073         }
3074
3075         /* Let the nvmupdate report errors, show them only when debug is enabled */
3076         if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3077                 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3078                     i40e_stat_str(hw, status), perrno);
3079
3080         /*
3081          * -EPERM is actually ERESTART, which the kernel interprets as it needing
3082          * to run this ioctl again. So use -EACCES for -EPERM instead.
3083          */
3084         if (perrno == -EPERM)
3085                 return (-EACCES);
3086         else
3087                 return (perrno);
3088 }
3089
3090 int
3091 ixl_find_i2c_interface(struct ixl_pf *pf)
3092 {
3093         struct i40e_hw *hw = &pf->hw;
3094         bool i2c_en, port_matched;
3095         u32 reg;
3096
3097         for (int i = 0; i < 4; i++) {
3098                 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3099                 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3100                 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3101                     >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3102                     & BIT(hw->port);
3103                 if (i2c_en && port_matched)
3104                         return (i);
3105         }
3106
3107         return (-1);
3108 }
3109
3110 static char *
3111 ixl_phy_type_string(u32 bit_pos, bool ext)
3112 {
3113         static char * phy_types_str[32] = {
3114                 "SGMII",
3115                 "1000BASE-KX",
3116                 "10GBASE-KX4",
3117                 "10GBASE-KR",
3118                 "40GBASE-KR4",
3119                 "XAUI",
3120                 "XFI",
3121                 "SFI",
3122                 "XLAUI",
3123                 "XLPPI",
3124                 "40GBASE-CR4",
3125                 "10GBASE-CR1",
3126                 "SFP+ Active DA",
3127                 "QSFP+ Active DA",
3128                 "Reserved (14)",
3129                 "Reserved (15)",
3130                 "Reserved (16)",
3131                 "100BASE-TX",
3132                 "1000BASE-T",
3133                 "10GBASE-T",
3134                 "10GBASE-SR",
3135                 "10GBASE-LR",
3136                 "10GBASE-SFP+Cu",
3137                 "10GBASE-CR1",
3138                 "40GBASE-CR4",
3139                 "40GBASE-SR4",
3140                 "40GBASE-LR4",
3141                 "1000BASE-SX",
3142                 "1000BASE-LX",
3143                 "1000BASE-T Optical",
3144                 "20GBASE-KR2",
3145                 "Reserved (31)"
3146         };
3147         static char * ext_phy_types_str[8] = {
3148                 "25GBASE-KR",
3149                 "25GBASE-CR",
3150                 "25GBASE-SR",
3151                 "25GBASE-LR",
3152                 "25GBASE-AOC",
3153                 "25GBASE-ACC",
3154                 "2.5GBASE-T",
3155                 "5GBASE-T"
3156         };
3157
3158         if (ext && bit_pos > 7) return "Invalid_Ext";
3159         if (bit_pos > 31) return "Invalid";
3160
3161         return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3162 }
3163
3164 /* TODO: ERJ: I don't this is necessary anymore. */
3165 int
3166 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3167 {
3168         device_t dev = pf->dev;
3169         struct i40e_hw *hw = &pf->hw;
3170         struct i40e_aq_desc desc;
3171         enum i40e_status_code status;
3172
3173         struct i40e_aqc_get_link_status *aq_link_status =
3174                 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3175
3176         i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3177         link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3178         status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3179         if (status) {
3180                 device_printf(dev,
3181                     "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3182                     __func__, i40e_stat_str(hw, status),
3183                     i40e_aq_str(hw, hw->aq.asq_last_status));
3184                 return (EIO);
3185         }
3186
3187         bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3188         return (0);
3189 }
3190
3191 static char *
3192 ixl_phy_type_string_ls(u8 val)
3193 {
3194         if (val >= 0x1F)
3195                 return ixl_phy_type_string(val - 0x1F, true);
3196         else
3197                 return ixl_phy_type_string(val, false);
3198 }
3199
3200 static int
3201 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3202 {
3203         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3204         device_t dev = pf->dev;
3205         struct sbuf *buf;
3206         int error = 0;
3207
3208         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3209         if (!buf) {
3210                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3211                 return (ENOMEM);
3212         }
3213
3214         struct i40e_aqc_get_link_status link_status;
3215         error = ixl_aq_get_link_status(pf, &link_status);
3216         if (error) {
3217                 sbuf_delete(buf);
3218                 return (error);
3219         }
3220
3221         sbuf_printf(buf, "\n"
3222             "PHY Type : 0x%02x<%s>\n"
3223             "Speed    : 0x%02x\n"
3224             "Link info: 0x%02x\n"
3225             "AN info  : 0x%02x\n"
3226             "Ext info : 0x%02x\n"
3227             "Loopback : 0x%02x\n"
3228             "Max Frame: %d\n"
3229             "Config   : 0x%02x\n"
3230             "Power    : 0x%02x",
3231             link_status.phy_type,
3232             ixl_phy_type_string_ls(link_status.phy_type),
3233             link_status.link_speed,
3234             link_status.link_info,
3235             link_status.an_info,
3236             link_status.ext_info,
3237             link_status.loopback,
3238             link_status.max_frame_size,
3239             link_status.config,
3240             link_status.power_desc);
3241
3242         error = sbuf_finish(buf);
3243         if (error)
3244                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3245
3246         sbuf_delete(buf);
3247         return (error);
3248 }
3249
3250 static int
3251 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3252 {
3253         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3254         struct i40e_hw *hw = &pf->hw;
3255         device_t dev = pf->dev;
3256         enum i40e_status_code status;
3257         struct i40e_aq_get_phy_abilities_resp abilities;
3258         struct sbuf *buf;
3259         int error = 0;
3260
3261         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3262         if (!buf) {
3263                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3264                 return (ENOMEM);
3265         }
3266
3267         status = i40e_aq_get_phy_capabilities(hw,
3268             FALSE, FALSE, &abilities, NULL);
3269         if (status) {
3270                 device_printf(dev,
3271                     "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3272                     __func__, i40e_stat_str(hw, status),
3273                     i40e_aq_str(hw, hw->aq.asq_last_status));
3274                 sbuf_delete(buf);
3275                 return (EIO);
3276         }
3277
3278         sbuf_printf(buf, "\n"
3279             "PHY Type : %08x",
3280             abilities.phy_type);
3281
3282         if (abilities.phy_type != 0) {
3283                 sbuf_printf(buf, "<");
3284                 for (int i = 0; i < 32; i++)
3285                         if ((1 << i) & abilities.phy_type)
3286                                 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3287                 sbuf_printf(buf, ">");
3288         }
3289
3290         sbuf_printf(buf, "\nPHY Ext  : %02x",
3291             abilities.phy_type_ext);
3292
3293         if (abilities.phy_type_ext != 0) {
3294                 sbuf_printf(buf, "<");
3295                 for (int i = 0; i < 4; i++)
3296                         if ((1 << i) & abilities.phy_type_ext)
3297                                 sbuf_printf(buf, "%s,",
3298                                     ixl_phy_type_string(i, true));
3299                 sbuf_printf(buf, ">");
3300         }
3301
3302         sbuf_printf(buf, "\nSpeed    : %02x", abilities.link_speed);
3303         if (abilities.link_speed != 0) {
3304                 u8 link_speed;
3305                 sbuf_printf(buf, " <");
3306                 for (int i = 0; i < 8; i++) {
3307                         link_speed = (1 << i) & abilities.link_speed;
3308                         if (link_speed)
3309                                 sbuf_printf(buf, "%s, ",
3310                                     ixl_link_speed_string(link_speed));
3311                 }
3312                 sbuf_printf(buf, ">");
3313         }
3314
3315         sbuf_printf(buf, "\n"
3316             "Abilities: %02x\n"
3317             "EEE cap  : %04x\n"
3318             "EEER reg : %08x\n"
3319             "D3 Lpan  : %02x\n"
3320             "ID       : %02x %02x %02x %02x\n"
3321             "ModType  : %02x %02x %02x\n"
3322             "ModType E: %01x\n"
3323             "FEC Cfg  : %02x\n"
3324             "Ext CC   : %02x",
3325             abilities.abilities, abilities.eee_capability,
3326             abilities.eeer_val, abilities.d3_lpan,
3327             abilities.phy_id[0], abilities.phy_id[1],
3328             abilities.phy_id[2], abilities.phy_id[3],
3329             abilities.module_type[0], abilities.module_type[1],
3330             abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3331             abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3332             abilities.ext_comp_code);
3333
3334         error = sbuf_finish(buf);
3335         if (error)
3336                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3337
3338         sbuf_delete(buf);
3339         return (error);
3340 }
3341
3342 static int
3343 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3344 {
3345         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3346         struct ixl_vsi *vsi = &pf->vsi;
3347         struct ixl_mac_filter *f;
3348         device_t dev = pf->dev;
3349         int error = 0, ftl_len = 0, ftl_counter = 0;
3350
3351         struct sbuf *buf;
3352
3353         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3354         if (!buf) {
3355                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3356                 return (ENOMEM);
3357         }
3358
3359         sbuf_printf(buf, "\n");
3360
3361         /* Print MAC filters */
3362         sbuf_printf(buf, "PF Filters:\n");
3363         LIST_FOREACH(f, &vsi->ftl, ftle)
3364                 ftl_len++;
3365
3366         if (ftl_len < 1)
3367                 sbuf_printf(buf, "(none)\n");
3368         else {
3369                 LIST_FOREACH(f, &vsi->ftl, ftle) {
3370                         sbuf_printf(buf,
3371                             MAC_FORMAT ", vlan %4d, flags %#06x",
3372                             MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3373                         /* don't print '\n' for last entry */
3374                         if (++ftl_counter != ftl_len)
3375                                 sbuf_printf(buf, "\n");
3376                 }
3377         }
3378
3379 #ifdef PCI_IOV
3380         /* TODO: Give each VF its own filter list sysctl */
3381         struct ixl_vf *vf;
3382         if (pf->num_vfs > 0) {
3383                 sbuf_printf(buf, "\n\n");
3384                 for (int i = 0; i < pf->num_vfs; i++) {
3385                         vf = &pf->vfs[i];
3386                         if (!(vf->vf_flags & VF_FLAG_ENABLED))
3387                                 continue;
3388
3389                         vsi = &vf->vsi;
3390                         ftl_len = 0, ftl_counter = 0;
3391                         sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3392                         LIST_FOREACH(f, &vsi->ftl, ftle)
3393                                 ftl_len++;
3394
3395                         if (ftl_len < 1)
3396                                 sbuf_printf(buf, "(none)\n");
3397                         else {
3398                                 LIST_FOREACH(f, &vsi->ftl, ftle) {
3399                                         sbuf_printf(buf,
3400                                             MAC_FORMAT ", vlan %4d, flags %#06x\n",
3401                                             MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3402                                 }
3403                         }
3404                 }
3405         }
3406 #endif
3407
3408         error = sbuf_finish(buf);
3409         if (error)
3410                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3411         sbuf_delete(buf);
3412
3413         return (error);
3414 }
3415
3416 #define IXL_SW_RES_SIZE 0x14
3417 int
3418 ixl_res_alloc_cmp(const void *a, const void *b)
3419 {
3420         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3421         one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3422         two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3423
3424         return ((int)one->resource_type - (int)two->resource_type);
3425 }
3426
3427 /*
3428  * Longest string length: 25
3429  */
3430 const char *
3431 ixl_switch_res_type_string(u8 type)
3432 {
3433         static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3434                 "VEB",
3435                 "VSI",
3436                 "Perfect Match MAC address",
3437                 "S-tag",
3438                 "(Reserved)",
3439                 "Multicast hash entry",
3440                 "Unicast hash entry",
3441                 "VLAN",
3442                 "VSI List entry",
3443                 "(Reserved)",
3444                 "VLAN Statistic Pool",
3445                 "Mirror Rule",
3446                 "Queue Set",
3447                 "Inner VLAN Forward filter",
3448                 "(Reserved)",
3449                 "Inner MAC",
3450                 "IP",
3451                 "GRE/VN1 Key",
3452                 "VN2 Key",
3453                 "Tunneling Port"
3454         };
3455
3456         if (type < IXL_SW_RES_SIZE)
3457                 return ixl_switch_res_type_strings[type];
3458         else
3459                 return "(Reserved)";
3460 }
3461
3462 static int
3463 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3464 {
3465         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3466         struct i40e_hw *hw = &pf->hw;
3467         device_t dev = pf->dev;
3468         struct sbuf *buf;
3469         enum i40e_status_code status;
3470         int error = 0;
3471
3472         u8 num_entries;
3473         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3474
3475         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3476         if (!buf) {
3477                 device_printf(dev, "Could not allocate sbuf for output.\n");
3478                 return (ENOMEM);
3479         }
3480
3481         bzero(resp, sizeof(resp));
3482         status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3483                                 resp,
3484                                 IXL_SW_RES_SIZE,
3485                                 NULL);
3486         if (status) {
3487                 device_printf(dev,
3488                     "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3489                     __func__, i40e_stat_str(hw, status),
3490                     i40e_aq_str(hw, hw->aq.asq_last_status));
3491                 sbuf_delete(buf);
3492                 return (error);
3493         }
3494
3495         /* Sort entries by type for display */
3496         qsort(resp, num_entries,
3497             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3498             &ixl_res_alloc_cmp);
3499
3500         sbuf_cat(buf, "\n");
3501         sbuf_printf(buf, "# of entries: %d\n", num_entries);
3502         sbuf_printf(buf,
3503             "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
3504             "                          | (this)     | (all) | (this) | (all)       \n");
3505         for (int i = 0; i < num_entries; i++) {
3506                 sbuf_printf(buf,
3507                     "%25s | %10d   %5d   %6d   %12d",
3508                     ixl_switch_res_type_string(resp[i].resource_type),
3509                     resp[i].guaranteed,
3510                     resp[i].total,
3511                     resp[i].used,
3512                     resp[i].total_unalloced);
3513                 if (i < num_entries - 1)
3514                         sbuf_cat(buf, "\n");
3515         }
3516
3517         error = sbuf_finish(buf);
3518         if (error)
3519                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3520
3521         sbuf_delete(buf);
3522         return (error);
3523 }
3524
3525 enum ixl_sw_seid_offset {
3526         IXL_SW_SEID_EMP = 1,
3527         IXL_SW_SEID_MAC_START = 2,
3528         IXL_SW_SEID_MAC_END = 5,
3529         IXL_SW_SEID_PF_START = 16,
3530         IXL_SW_SEID_PF_END = 31,
3531         IXL_SW_SEID_VF_START = 32,
3532         IXL_SW_SEID_VF_END = 159,
3533 };
3534
3535 /*
3536  * Caller must init and delete sbuf; this function will clear and
3537  * finish it for caller.
3538  *
3539  * Note: The SEID argument only applies for elements defined by FW at
3540  * power-on; these include the EMP, Ports, PFs and VFs.
3541  */
3542 static char *
3543 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3544 {
3545         sbuf_clear(s);
3546
3547         /* If SEID is in certain ranges, then we can infer the
3548          * mapping of SEID to switch element.
3549          */
3550         if (seid == IXL_SW_SEID_EMP) {
3551                 sbuf_cat(s, "EMP");
3552                 goto out;
3553         } else if (seid >= IXL_SW_SEID_MAC_START &&
3554             seid <= IXL_SW_SEID_MAC_END) {
3555                 sbuf_printf(s, "MAC  %2d",
3556                     seid - IXL_SW_SEID_MAC_START);
3557                 goto out;
3558         } else if (seid >= IXL_SW_SEID_PF_START &&
3559             seid <= IXL_SW_SEID_PF_END) {
3560                 sbuf_printf(s, "PF  %3d",
3561                     seid - IXL_SW_SEID_PF_START);
3562                 goto out;
3563         } else if (seid >= IXL_SW_SEID_VF_START &&
3564             seid <= IXL_SW_SEID_VF_END) {
3565                 sbuf_printf(s, "VF  %3d",
3566                     seid - IXL_SW_SEID_VF_START);
3567                 goto out;
3568         }
3569
3570         switch (element_type) {
3571         case I40E_AQ_SW_ELEM_TYPE_BMC:
3572                 sbuf_cat(s, "BMC");
3573                 break;
3574         case I40E_AQ_SW_ELEM_TYPE_PV:
3575                 sbuf_cat(s, "PV");
3576                 break;
3577         case I40E_AQ_SW_ELEM_TYPE_VEB:
3578                 sbuf_cat(s, "VEB");
3579                 break;
3580         case I40E_AQ_SW_ELEM_TYPE_PA:
3581                 sbuf_cat(s, "PA");
3582                 break;
3583         case I40E_AQ_SW_ELEM_TYPE_VSI:
3584                 sbuf_printf(s, "VSI");
3585                 break;
3586         default:
3587                 sbuf_cat(s, "?");
3588                 break;
3589         }
3590
3591 out:
3592         sbuf_finish(s);
3593         return sbuf_data(s);
3594 }
3595
3596 static int
3597 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3598 {
3599         const struct i40e_aqc_switch_config_element_resp *one, *two;
3600         one = (const struct i40e_aqc_switch_config_element_resp *)a;
3601         two = (const struct i40e_aqc_switch_config_element_resp *)b;
3602
3603         return ((int)one->seid - (int)two->seid);
3604 }
3605
3606 static int
3607 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3608 {
3609         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3610         struct i40e_hw *hw = &pf->hw;
3611         device_t dev = pf->dev;
3612         struct sbuf *buf;
3613         struct sbuf *nmbuf;
3614         enum i40e_status_code status;
3615         int error = 0;
3616         u16 next = 0;
3617         u8 aq_buf[I40E_AQ_LARGE_BUF];
3618
3619         struct i40e_aqc_switch_config_element_resp *elem;
3620         struct i40e_aqc_get_switch_config_resp *sw_config;
3621         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3622
3623         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3624         if (!buf) {
3625                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3626                 return (ENOMEM);
3627         }
3628
3629         status = i40e_aq_get_switch_config(hw, sw_config,
3630             sizeof(aq_buf), &next, NULL);
3631         if (status) {
3632                 device_printf(dev,
3633                     "%s: aq_get_switch_config() error %s, aq error %s\n",
3634                     __func__, i40e_stat_str(hw, status),
3635                     i40e_aq_str(hw, hw->aq.asq_last_status));
3636                 sbuf_delete(buf);
3637                 return error;
3638         }
3639         if (next)
3640                 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3641                     __func__, next);
3642
3643         nmbuf = sbuf_new_auto();
3644         if (!nmbuf) {
3645                 device_printf(dev, "Could not allocate sbuf for name output.\n");
3646                 sbuf_delete(buf);
3647                 return (ENOMEM);
3648         }
3649
3650         /* Sort entries by SEID for display */
3651         qsort(sw_config->element, sw_config->header.num_reported,
3652             sizeof(struct i40e_aqc_switch_config_element_resp),
3653             &ixl_sw_cfg_elem_seid_cmp);
3654
3655         sbuf_cat(buf, "\n");
3656         /* Assuming <= 255 elements in switch */
3657         sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3658         sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3659         /* Exclude:
3660          * Revision -- all elements are revision 1 for now
3661          */
3662         sbuf_printf(buf,
3663             "SEID (  Name  ) |  Up  (  Name  ) | Down (  Name  ) | Conn Type\n"
3664             "                |                 |                 | (uplink)\n");
3665         for (int i = 0; i < sw_config->header.num_reported; i++) {
3666                 elem = &sw_config->element[i];
3667
3668                 // "%4d (%8s) | %8s   %8s   %#8x",
3669                 sbuf_printf(buf, "%4d", elem->seid);
3670                 sbuf_cat(buf, " ");
3671                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3672                     elem->element_type, elem->seid));
3673                 sbuf_cat(buf, " | ");
3674                 sbuf_printf(buf, "%4d", elem->uplink_seid);
3675                 sbuf_cat(buf, " ");
3676                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3677                     0, elem->uplink_seid));
3678                 sbuf_cat(buf, " | ");
3679                 sbuf_printf(buf, "%4d", elem->downlink_seid);
3680                 sbuf_cat(buf, " ");
3681                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3682                     0, elem->downlink_seid));
3683                 sbuf_cat(buf, " | ");
3684                 sbuf_printf(buf, "%8d", elem->connection_type);
3685                 if (i < sw_config->header.num_reported - 1)
3686                         sbuf_cat(buf, "\n");
3687         }
3688         sbuf_delete(nmbuf);
3689
3690         error = sbuf_finish(buf);
3691         if (error)
3692                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3693
3694         sbuf_delete(buf);
3695
3696         return (error);
3697 }
3698
3699 static int
3700 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3701 {
3702         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3703         struct i40e_hw *hw = &pf->hw;
3704         device_t dev = pf->dev;
3705         int requested_vlan = -1;
3706         enum i40e_status_code status = 0;
3707         int error = 0;
3708
3709         error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3710         if ((error) || (req->newptr == NULL))
3711             return (error);
3712
3713         if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3714                 device_printf(dev, "Flags disallow setting of vlans\n");
3715                 return (ENODEV);
3716         }
3717
3718         hw->switch_tag = requested_vlan;
3719         device_printf(dev,
3720             "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3721             hw->switch_tag, hw->first_tag, hw->second_tag);
3722         status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3723         if (status) {
3724                 device_printf(dev,
3725                     "%s: aq_set_switch_config() error %s, aq error %s\n",
3726                     __func__, i40e_stat_str(hw, status),
3727                     i40e_aq_str(hw, hw->aq.asq_last_status));
3728                 return (status);
3729         }
3730         return (0);
3731 }
3732
3733 static int
3734 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3735 {
3736         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3737         struct i40e_hw *hw = &pf->hw;
3738         device_t dev = pf->dev;
3739         struct sbuf *buf;
3740         int error = 0;
3741         enum i40e_status_code status;
3742         u32 reg;
3743
3744         struct i40e_aqc_get_set_rss_key_data key_data;
3745
3746         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3747         if (!buf) {
3748                 device_printf(dev, "Could not allocate sbuf for output.\n");
3749                 return (ENOMEM);
3750         }
3751
3752         bzero(&key_data, sizeof(key_data));
3753
3754         sbuf_cat(buf, "\n");
3755         if (hw->mac.type == I40E_MAC_X722) {
3756                 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3757                 if (status)
3758                         device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3759                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3760         } else {
3761                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3762                         reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3763                         bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
3764                 }
3765         }
3766
3767         ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3768
3769         error = sbuf_finish(buf);
3770         if (error)
3771                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3772         sbuf_delete(buf);
3773
3774         return (error);
3775 }
3776
3777 static void
3778 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3779 {
3780         int i, j, k, width;
3781         char c;
3782
3783         if (length < 1 || buf == NULL) return;
3784
3785         int byte_stride = 16;
3786         int lines = length / byte_stride;
3787         int rem = length % byte_stride;
3788         if (rem > 0)
3789                 lines++;
3790
3791         for (i = 0; i < lines; i++) {
3792                 width = (rem > 0 && i == lines - 1)
3793                     ? rem : byte_stride;
3794
3795                 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3796
3797                 for (j = 0; j < width; j++)
3798                         sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3799
3800                 if (width < byte_stride) {
3801                         for (k = 0; k < (byte_stride - width); k++)
3802                                 sbuf_printf(sb, "   ");
3803                 }
3804
3805                 if (!text) {
3806                         sbuf_printf(sb, "\n");
3807                         continue;
3808                 }
3809
3810                 for (j = 0; j < width; j++) {
3811                         c = (char)buf[i * byte_stride + j];
3812                         if (c < 32 || c > 126)
3813                                 sbuf_printf(sb, ".");
3814                         else
3815                                 sbuf_printf(sb, "%c", c);
3816
3817                         if (j == width - 1)
3818                                 sbuf_printf(sb, "\n");
3819                 }
3820         }
3821 }
3822
3823 static int
3824 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3825 {
3826         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3827         struct i40e_hw *hw = &pf->hw;
3828         device_t dev = pf->dev;
3829         struct sbuf *buf;
3830         int error = 0;
3831         enum i40e_status_code status;
3832         u8 hlut[512];
3833         u32 reg;
3834
3835         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3836         if (!buf) {
3837                 device_printf(dev, "Could not allocate sbuf for output.\n");
3838                 return (ENOMEM);
3839         }
3840
3841         bzero(hlut, sizeof(hlut));
3842         sbuf_cat(buf, "\n");
3843         if (hw->mac.type == I40E_MAC_X722) {
3844                 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3845                 if (status)
3846                         device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3847                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3848         } else {
3849                 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3850                         reg = rd32(hw, I40E_PFQF_HLUT(i));
3851                         bcopy(&reg, &hlut[i << 2], 4);
3852                 }
3853         }
3854         ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3855
3856         error = sbuf_finish(buf);
3857         if (error)
3858                 device_printf(dev, "Error finishing sbuf: %d\n", error);
3859         sbuf_delete(buf);
3860
3861         return (error);
3862 }
3863
3864 static int
3865 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3866 {
3867         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3868         struct i40e_hw *hw = &pf->hw;
3869         u64 hena;
3870
3871         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3872             ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3873
3874         return sysctl_handle_long(oidp, NULL, hena, req);
3875 }
3876
3877 /*
3878  * Sysctl to disable firmware's link management
3879  *
3880  * 1 - Disable link management on this port
3881  * 0 - Re-enable link management
3882  *
3883  * On normal NVMs, firmware manages link by default.
3884  */
3885 static int
3886 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3887 {
3888         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3889         struct i40e_hw *hw = &pf->hw;
3890         device_t dev = pf->dev;
3891         int requested_mode = -1;
3892         enum i40e_status_code status = 0;
3893         int error = 0;
3894
3895         /* Read in new mode */
3896         error = sysctl_handle_int(oidp, &requested_mode, 0, req);
3897         if ((error) || (req->newptr == NULL))
3898                 return (error);
3899         /* Check for sane value */
3900         if (requested_mode < 0 || requested_mode > 1) {
3901                 device_printf(dev, "Valid modes are 0 or 1\n");
3902                 return (EINVAL);
3903         }
3904
3905         /* Set new mode */
3906         status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
3907         if (status) {
3908                 device_printf(dev,
3909                     "%s: Error setting new phy debug mode %s,"
3910                     " aq error: %s\n", __func__, i40e_stat_str(hw, status),
3911                     i40e_aq_str(hw, hw->aq.asq_last_status));
3912                 return (EIO);
3913         }
3914
3915         return (0);
3916 }
3917
3918 /*
3919  * Read some diagnostic data from a (Q)SFP+ module
3920  *
3921  *             SFP A2   QSFP Lower Page
3922  * Temperature 96-97    22-23
3923  * Vcc         98-99    26-27
3924  * TX power    102-103  34-35..40-41
3925  * RX power    104-105  50-51..56-57
3926  */
3927 static int
3928 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
3929 {
3930         struct ixl_pf *pf = (struct ixl_pf *)arg1;
3931         device_t dev = pf->dev;
3932         struct sbuf *sbuf;
3933         int error = 0;
3934         u8 output;
3935
3936         if (req->oldptr == NULL) {
3937                 error = SYSCTL_OUT(req, 0, 128);
3938                 return (0);
3939         }
3940
3941         error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
3942         if (error) {
3943                 device_printf(dev, "Error reading from i2c\n");
3944                 return (error);
3945         }
3946
3947         /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
3948         if (output == 0x3) {
3949                 /*
3950                  * Check for:
3951                  * - Internally calibrated data
3952                  * - Diagnostic monitoring is implemented
3953                  */
3954                 pf->read_i2c_byte(pf, 92, 0xA0, &output);
3955                 if (!(output & 0x60)) {
3956                         device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
3957                         return (0);
3958                 }
3959
3960                 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3961
3962                 for (u8 offset = 96; offset < 100; offset++) {
3963                         pf->read_i2c_byte(pf, offset, 0xA2, &output);
3964                         sbuf_printf(sbuf, "%02X ", output);
3965                 }
3966                 for (u8 offset = 102; offset < 106; offset++) {
3967                         pf->read_i2c_byte(pf, offset, 0xA2, &output);
3968                         sbuf_printf(sbuf, "%02X ", output);
3969                 }
3970         } else if (output == 0xD || output == 0x11) {
3971                 /*
3972                  * QSFP+ modules are always internally calibrated, and must indicate
3973                  * what types of diagnostic monitoring are implemented
3974                  */
3975                 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3976
3977                 for (u8 offset = 22; offset < 24; offset++) {
3978                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
3979                         sbuf_printf(sbuf, "%02X ", output);
3980                 }
3981                 for (u8 offset = 26; offset < 28; offset++) {
3982                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
3983                         sbuf_printf(sbuf, "%02X ", output);
3984                 }
3985                 /* Read the data from the first lane */
3986                 for (u8 offset = 34; offset < 36; offset++) {
3987                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
3988                         sbuf_printf(sbuf, "%02X ", output);
3989                 }
3990                 for (u8 offset = 50; offset < 52; offset++) {
3991                         pf->read_i2c_byte(pf, offset, 0xA0, &output);
3992                         sbuf_printf(sbuf, "%02X ", output);
3993                 }
3994         } else {
3995                 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
3996                 return (0);
3997         }
3998
3999         sbuf_finish(sbuf);
4000         sbuf_delete(sbuf);
4001
4002         return (0);
4003 }
4004
4005 /*
4006  * Sysctl to read a byte from I2C bus.
4007  *
4008  * Input: 32-bit value:
4009  *      bits 0-7:   device address (0xA0 or 0xA2)
4010  *      bits 8-15:  offset (0-255)
4011  *      bits 16-31: unused
4012  * Output: 8-bit value read
4013  */
4014 static int
4015 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4016 {
4017         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4018         device_t dev = pf->dev;
4019         int input = -1, error = 0;
4020         u8 dev_addr, offset, output;
4021
4022         /* Read in I2C read parameters */
4023         error = sysctl_handle_int(oidp, &input, 0, req);
4024         if ((error) || (req->newptr == NULL))
4025                 return (error);
4026         /* Validate device address */
4027         dev_addr = input & 0xFF;
4028         if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4029                 return (EINVAL);
4030         }
4031         offset = (input >> 8) & 0xFF;
4032
4033         error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4034         if (error)
4035                 return (error);
4036
4037         device_printf(dev, "%02X\n", output);
4038         return (0);
4039 }
4040
4041 /*
4042  * Sysctl to write a byte to the I2C bus.
4043  *
4044  * Input: 32-bit value:
4045  *      bits 0-7:   device address (0xA0 or 0xA2)
4046  *      bits 8-15:  offset (0-255)
4047  *      bits 16-23: value to write
4048  *      bits 24-31: unused
4049  * Output: 8-bit value written
4050  */
4051 static int
4052 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4053 {
4054         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4055         device_t dev = pf->dev;
4056         int input = -1, error = 0;
4057         u8 dev_addr, offset, value;
4058
4059         /* Read in I2C write parameters */
4060         error = sysctl_handle_int(oidp, &input, 0, req);
4061         if ((error) || (req->newptr == NULL))
4062                 return (error);
4063         /* Validate device address */
4064         dev_addr = input & 0xFF;
4065         if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4066                 return (EINVAL);
4067         }
4068         offset = (input >> 8) & 0xFF;
4069         value = (input >> 16) & 0xFF;
4070
4071         error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4072         if (error)
4073                 return (error);
4074
4075         device_printf(dev, "%02X written\n", value);
4076         return (0);
4077 }
4078
4079 static int
4080 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4081     u8 bit_pos, int *is_set)
4082 {
4083         device_t dev = pf->dev;
4084         struct i40e_hw *hw = &pf->hw;
4085         enum i40e_status_code status;
4086
4087         if (IXL_PF_IN_RECOVERY_MODE(pf))
4088                 return (EIO);
4089
4090         status = i40e_aq_get_phy_capabilities(hw,
4091             FALSE, FALSE, abilities, NULL);
4092         if (status) {
4093                 device_printf(dev,
4094                     "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4095                     __func__, i40e_stat_str(hw, status),
4096                     i40e_aq_str(hw, hw->aq.asq_last_status));
4097                 return (EIO);
4098         }
4099
4100         *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4101         return (0);
4102 }
4103
4104 static int
4105 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4106     u8 bit_pos, int set)
4107 {
4108         device_t dev = pf->dev;
4109         struct i40e_hw *hw = &pf->hw;
4110         struct i40e_aq_set_phy_config config;
4111         enum i40e_status_code status;
4112
4113         /* Set new PHY config */
4114         memset(&config, 0, sizeof(config));
4115         config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4116         if (set)
4117                 config.fec_config |= bit_pos;
4118         if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4119                 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4120                 config.phy_type = abilities->phy_type;
4121                 config.phy_type_ext = abilities->phy_type_ext;
4122                 config.link_speed = abilities->link_speed;
4123                 config.eee_capability = abilities->eee_capability;
4124                 config.eeer = abilities->eeer_val;
4125                 config.low_power_ctrl = abilities->d3_lpan;
4126                 status = i40e_aq_set_phy_config(hw, &config, NULL);
4127
4128                 if (status) {
4129                         device_printf(dev,
4130                             "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4131                             __func__, i40e_stat_str(hw, status),
4132                             i40e_aq_str(hw, hw->aq.asq_last_status));
4133                         return (EIO);
4134                 }
4135         }
4136
4137         return (0);
4138 }
4139
4140 static int
4141 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4142 {
4143         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4144         int mode, error = 0;
4145
4146         struct i40e_aq_get_phy_abilities_resp abilities;
4147         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4148         if (error)
4149                 return (error);
4150         /* Read in new mode */
4151         error = sysctl_handle_int(oidp, &mode, 0, req);
4152         if ((error) || (req->newptr == NULL))
4153                 return (error);
4154
4155         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4156 }
4157
4158 static int
4159 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4160 {
4161         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4162         int mode, error = 0;
4163
4164         struct i40e_aq_get_phy_abilities_resp abilities;
4165         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4166         if (error)
4167                 return (error);
4168         /* Read in new mode */
4169         error = sysctl_handle_int(oidp, &mode, 0, req);
4170         if ((error) || (req->newptr == NULL))
4171                 return (error);
4172
4173         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4174 }
4175
4176 static int
4177 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4178 {
4179         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4180         int mode, error = 0;
4181
4182         struct i40e_aq_get_phy_abilities_resp abilities;
4183         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4184         if (error)
4185                 return (error);
4186         /* Read in new mode */
4187         error = sysctl_handle_int(oidp, &mode, 0, req);
4188         if ((error) || (req->newptr == NULL))
4189                 return (error);
4190
4191         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4192 }
4193
4194 static int
4195 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4196 {
4197         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4198         int mode, error = 0;
4199
4200         struct i40e_aq_get_phy_abilities_resp abilities;
4201         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4202         if (error)
4203                 return (error);
4204         /* Read in new mode */
4205         error = sysctl_handle_int(oidp, &mode, 0, req);
4206         if ((error) || (req->newptr == NULL))
4207                 return (error);
4208
4209         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4210 }
4211
4212 static int
4213 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4214 {
4215         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4216         int mode, error = 0;
4217
4218         struct i40e_aq_get_phy_abilities_resp abilities;
4219         error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4220         if (error)
4221                 return (error);
4222         /* Read in new mode */
4223         error = sysctl_handle_int(oidp, &mode, 0, req);
4224         if ((error) || (req->newptr == NULL))
4225                 return (error);
4226
4227         return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4228 }
4229
4230 static int
4231 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4232 {
4233         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4234         struct i40e_hw *hw = &pf->hw;
4235         device_t dev = pf->dev;
4236         struct sbuf *buf;
4237         int error = 0;
4238         enum i40e_status_code status;
4239
4240         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4241         if (!buf) {
4242                 device_printf(dev, "Could not allocate sbuf for output.\n");
4243                 return (ENOMEM);
4244         }
4245
4246         u8 *final_buff;
4247         /* This amount is only necessary if reading the entire cluster into memory */
4248 #define IXL_FINAL_BUFF_SIZE     (1280 * 1024)
4249         final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4250         if (final_buff == NULL) {
4251                 device_printf(dev, "Could not allocate memory for output.\n");
4252                 goto out;
4253         }
4254         int final_buff_len = 0;
4255
4256         u8 cluster_id = 1;
4257         bool more = true;
4258
4259         u8 dump_buf[4096];
4260         u16 curr_buff_size = 4096;
4261         u8 curr_next_table = 0;
4262         u32 curr_next_index = 0;
4263
4264         u16 ret_buff_size;
4265         u8 ret_next_table;
4266         u32 ret_next_index;
4267
4268         sbuf_cat(buf, "\n");
4269
4270         while (more) {
4271                 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4272                     dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4273                 if (status) {
4274                         device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4275                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4276                         goto free_out;
4277                 }
4278
4279                 /* copy info out of temp buffer */
4280                 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4281                 final_buff_len += ret_buff_size;
4282
4283                 if (ret_next_table != curr_next_table) {
4284                         /* We're done with the current table; we can dump out read data. */
4285                         sbuf_printf(buf, "%d:", curr_next_table);
4286                         int bytes_printed = 0;
4287                         while (bytes_printed <= final_buff_len) {
4288                                 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4289                                 bytes_printed += 16;
4290                         }
4291                                 sbuf_cat(buf, "\n");
4292
4293                         /* The entire cluster has been read; we're finished */
4294                         if (ret_next_table == 0xFF)
4295                                 break;
4296
4297                         /* Otherwise clear the output buffer and continue reading */
4298                         bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4299                         final_buff_len = 0;
4300                 }
4301
4302                 if (ret_next_index == 0xFFFFFFFF)
4303                         ret_next_index = 0;
4304
4305                 bzero(dump_buf, sizeof(dump_buf));
4306                 curr_next_table = ret_next_table;
4307                 curr_next_index = ret_next_index;
4308         }
4309
4310 free_out:
4311         free(final_buff, M_IXL);
4312 out:
4313         error = sbuf_finish(buf);
4314         if (error)
4315                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4316         sbuf_delete(buf);
4317
4318         return (error);
4319 }
4320
4321 static int
4322 ixl_start_fw_lldp(struct ixl_pf *pf)
4323 {
4324         struct i40e_hw *hw = &pf->hw;
4325         enum i40e_status_code status;
4326
4327         status = i40e_aq_start_lldp(hw, false, NULL);
4328         if (status != I40E_SUCCESS) {
4329                 switch (hw->aq.asq_last_status) {
4330                 case I40E_AQ_RC_EEXIST:
4331                         device_printf(pf->dev,
4332                             "FW LLDP agent is already running\n");
4333                         break;
4334                 case I40E_AQ_RC_EPERM:
4335                         device_printf(pf->dev,
4336                             "Device configuration forbids SW from starting "
4337                             "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4338                             "attribute to \"Enabled\" to use this sysctl\n");
4339                         return (EINVAL);
4340                 default:
4341                         device_printf(pf->dev,
4342                             "Starting FW LLDP agent failed: error: %s, %s\n",
4343                             i40e_stat_str(hw, status),
4344                             i40e_aq_str(hw, hw->aq.asq_last_status));
4345                         return (EINVAL);
4346                 }
4347         }
4348
4349         atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4350         return (0);
4351 }
4352
4353 static int
4354 ixl_stop_fw_lldp(struct ixl_pf *pf)
4355 {
4356         struct i40e_hw *hw = &pf->hw;
4357         device_t dev = pf->dev;
4358         enum i40e_status_code status;
4359
4360         if (hw->func_caps.npar_enable != 0) {
4361                 device_printf(dev,
4362                     "Disabling FW LLDP agent is not supported on this device\n");
4363                 return (EINVAL);
4364         }
4365
4366         if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4367                 device_printf(dev,
4368                     "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4369                 return (EINVAL);
4370         }
4371
4372         status = i40e_aq_stop_lldp(hw, true, false, NULL);
4373         if (status != I40E_SUCCESS) {
4374                 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4375                         device_printf(dev,
4376                             "Disabling FW LLDP agent failed: error: %s, %s\n",
4377                             i40e_stat_str(hw, status),
4378                             i40e_aq_str(hw, hw->aq.asq_last_status));
4379                         return (EINVAL);
4380                 }
4381
4382                 device_printf(dev, "FW LLDP agent is already stopped\n");
4383         }
4384
4385         i40e_aq_set_dcb_parameters(hw, true, NULL);
4386         atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4387         return (0);
4388 }
4389
4390 static int
4391 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4392 {
4393         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4394         int state, new_state, error = 0;
4395
4396         state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4397
4398         /* Read in new mode */
4399         error = sysctl_handle_int(oidp, &new_state, 0, req);
4400         if ((error) || (req->newptr == NULL))
4401                 return (error);
4402
4403         /* Already in requested state */
4404         if (new_state == state)
4405                 return (error);
4406
4407         if (new_state == 0)
4408                 return ixl_stop_fw_lldp(pf);
4409
4410         return ixl_start_fw_lldp(pf);
4411 }
4412
4413 static int
4414 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4415 {
4416         struct ixl_pf         *pf = (struct ixl_pf *)arg1;
4417         int                   state, new_state;
4418         int                   sysctl_handle_status = 0;
4419         enum i40e_status_code cmd_status;
4420
4421         /* Init states' values */
4422         state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4423
4424         /* Get requested mode */
4425         sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4426         if ((sysctl_handle_status) || (req->newptr == NULL))
4427                 return (sysctl_handle_status);
4428
4429         /* Check if state has changed */
4430         if (new_state == state)
4431                 return (0);
4432
4433         /* Set new state */
4434         cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4435
4436         /* Save new state or report error */
4437         if (!cmd_status) {
4438                 if (new_state == 0)
4439                         atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4440                 else
4441                         atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4442         } else if (cmd_status == I40E_ERR_CONFIG)
4443                 return (EPERM);
4444         else
4445                 return (EIO);
4446
4447         return (0);
4448 }
4449
4450 int
4451 ixl_attach_get_link_status(struct ixl_pf *pf)
4452 {
4453         struct i40e_hw *hw = &pf->hw;
4454         device_t dev = pf->dev;
4455         int error = 0;
4456
4457         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4458             (hw->aq.fw_maj_ver < 4)) {
4459                 i40e_msec_delay(75);
4460                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4461                 if (error) {
4462                         device_printf(dev, "link restart failed, aq_err=%d\n",
4463                             pf->hw.aq.asq_last_status);
4464                         return error;
4465                 }
4466         }
4467
4468         /* Determine link state */
4469         hw->phy.get_link_info = TRUE;
4470         i40e_get_link_status(hw, &pf->link_up);
4471         return (0);
4472 }
4473
4474 static int
4475 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4476 {
4477         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4478         int requested = 0, error = 0;
4479
4480         /* Read in new mode */
4481         error = sysctl_handle_int(oidp, &requested, 0, req);
4482         if ((error) || (req->newptr == NULL))
4483                 return (error);
4484
4485         /* Initiate the PF reset later in the admin task */
4486         atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4487
4488         return (error);
4489 }
4490
4491 static int
4492 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4493 {
4494         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4495         struct i40e_hw *hw = &pf->hw;
4496         int requested = 0, error = 0;
4497
4498         /* Read in new mode */
4499         error = sysctl_handle_int(oidp, &requested, 0, req);
4500         if ((error) || (req->newptr == NULL))
4501                 return (error);
4502
4503         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4504
4505         return (error);
4506 }
4507
4508 static int
4509 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4510 {
4511         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4512         struct i40e_hw *hw = &pf->hw;
4513         int requested = 0, error = 0;
4514
4515         /* Read in new mode */
4516         error = sysctl_handle_int(oidp, &requested, 0, req);
4517         if ((error) || (req->newptr == NULL))
4518                 return (error);
4519
4520         wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4521
4522         return (error);
4523 }
4524
4525 /*
4526  * Print out mapping of TX queue indexes and Rx queue indexes
4527  * to MSI-X vectors.
4528  */
4529 static int
4530 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4531 {
4532         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4533         struct ixl_vsi *vsi = &pf->vsi;
4534         device_t dev = pf->dev;
4535         struct sbuf *buf;
4536         int error = 0;
4537
4538         struct ixl_rx_queue *rx_que = vsi->rx_queues;
4539         struct ixl_tx_queue *tx_que = vsi->tx_queues;
4540
4541         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4542         if (!buf) {
4543                 device_printf(dev, "Could not allocate sbuf for output.\n");
4544                 return (ENOMEM);
4545         }
4546
4547         sbuf_cat(buf, "\n");
4548         for (int i = 0; i < vsi->num_rx_queues; i++) {
4549                 rx_que = &vsi->rx_queues[i];
4550                 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4551         }
4552         for (int i = 0; i < vsi->num_tx_queues; i++) {
4553                 tx_que = &vsi->tx_queues[i];
4554                 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4555         }
4556
4557         error = sbuf_finish(buf);
4558         if (error)
4559                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4560         sbuf_delete(buf);
4561
4562         return (error);
4563 }