]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixl/ixl_pf_main.c
MFV r304057:
[FreeBSD/FreeBSD.git] / sys / dev / ixl / ixl_pf_main.c
1 /******************************************************************************
2
3   Copyright (c) 2013-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #include "ixl_pf.h"
37
38 #ifdef PCI_IOV
39 #include "ixl_pf_iov.h"
40 #endif
41
42 #ifdef DEV_NETMAP
43 #include <net/netmap.h>
44 #include <sys/selinfo.h>
45 #include <dev/netmap/netmap_kern.h>
46 #endif /* DEV_NETMAP */
47
48 static int      ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
49
50 /* Sysctls */
51 static int      ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
52 static int      ixl_set_advertise(SYSCTL_HANDLER_ARGS);
53 static int      ixl_current_speed(SYSCTL_HANDLER_ARGS);
54 static int      ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
55 static int      ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
56 static int      ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
57 static int      ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
58
59 /* Debug Sysctls */
60 static int      ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
61 static int      ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
62 static int      ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
63 static int      ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
64 static int      ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
65 static int      ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
66 static int      ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
67
68 void
69 ixl_dbg(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
70 {
71         va_list args;
72
73         if (!(mask & pf->dbg_mask))
74                 return;
75
76         va_start(args, fmt);
77         device_printf(pf->dev, fmt, args);
78         va_end(args);
79 }
80
81 /*
82 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
83 */
84 void
85 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
86 {
87         u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
88         u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
89         u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
90
91         sbuf_printf(buf,
92             "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
93             hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
94             hw->aq.api_maj_ver, hw->aq.api_min_ver,
95             (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
96             IXL_NVM_VERSION_HI_SHIFT,
97             (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
98             IXL_NVM_VERSION_LO_SHIFT,
99             hw->nvm.eetrack,
100             oem_ver, oem_build, oem_patch);
101 }
102
103 void
104 ixl_print_nvm_version(struct ixl_pf *pf)
105 {
106         struct i40e_hw *hw = &pf->hw;
107         device_t dev = pf->dev;
108         struct sbuf *sbuf;
109
110         sbuf = sbuf_new_auto();
111         ixl_nvm_version_str(hw, sbuf);
112         sbuf_finish(sbuf);
113         device_printf(dev, "%s\n", sbuf_data(sbuf));
114         sbuf_delete(sbuf);
115 }
116
117 static void
118 ixl_configure_tx_itr(struct ixl_pf *pf)
119 {
120         struct i40e_hw          *hw = &pf->hw;
121         struct ixl_vsi          *vsi = &pf->vsi;
122         struct ixl_queue        *que = vsi->queues;
123
124         vsi->tx_itr_setting = pf->tx_itr;
125
126         for (int i = 0; i < vsi->num_queues; i++, que++) {
127                 struct tx_ring  *txr = &que->txr;
128
129                 wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
130                     vsi->tx_itr_setting);
131                 txr->itr = vsi->tx_itr_setting;
132                 txr->latency = IXL_AVE_LATENCY;
133         }
134 }
135
136 static void
137 ixl_configure_rx_itr(struct ixl_pf *pf)
138 {
139         struct i40e_hw          *hw = &pf->hw;
140         struct ixl_vsi          *vsi = &pf->vsi;
141         struct ixl_queue        *que = vsi->queues;
142
143         vsi->rx_itr_setting = pf->rx_itr;
144
145         for (int i = 0; i < vsi->num_queues; i++, que++) {
146                 struct rx_ring  *rxr = &que->rxr;
147
148                 wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
149                     vsi->rx_itr_setting);
150                 rxr->itr = vsi->rx_itr_setting;
151                 rxr->latency = IXL_AVE_LATENCY;
152         }
153 }
154
155 /*
156  * Write PF ITR values to queue ITR registers.
157  */
158 void
159 ixl_configure_itr(struct ixl_pf *pf)
160 {
161         ixl_configure_tx_itr(pf);
162         ixl_configure_rx_itr(pf);
163 }
164
165
166 /*********************************************************************
167  *  Init entry point
168  *
169  *  This routine is used in two ways. It is used by the stack as
170  *  init entry point in network interface structure. It is also used
171  *  by the driver as a hw/sw initialization routine to get to a
172  *  consistent state.
173  *
174  *  return 0 on success, positive on failure
175  **********************************************************************/
176 void
177 ixl_init_locked(struct ixl_pf *pf)
178 {
179         struct i40e_hw  *hw = &pf->hw;
180         struct ixl_vsi  *vsi = &pf->vsi;
181         struct ifnet    *ifp = vsi->ifp;
182         device_t        dev = pf->dev;
183         struct i40e_filter_control_settings     filter;
184         u8              tmpaddr[ETHER_ADDR_LEN];
185         int             ret;
186
187         mtx_assert(&pf->pf_mtx, MA_OWNED);
188         INIT_DEBUGOUT("ixl_init_locked: begin");
189
190         ixl_stop_locked(pf);
191
192         /* Get the latest mac address... User might use a LAA */
193         bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
194               I40E_ETH_LENGTH_OF_ADDRESS);
195         if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
196             (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
197                 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
198                 bcopy(tmpaddr, hw->mac.addr,
199                     I40E_ETH_LENGTH_OF_ADDRESS);
200                 ret = i40e_aq_mac_address_write(hw,
201                     I40E_AQC_WRITE_TYPE_LAA_ONLY,
202                     hw->mac.addr, NULL);
203                 if (ret) {
204                         device_printf(dev, "LLA address"
205                          "change failed!!\n");
206                         return;
207                 }
208         }
209
210         ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
211
212         /* Set the various hardware offload abilities */
213         ifp->if_hwassist = 0;
214         if (ifp->if_capenable & IFCAP_TSO)
215                 ifp->if_hwassist |= CSUM_TSO;
216         if (ifp->if_capenable & IFCAP_TXCSUM)
217                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
218         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
219                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
220
221         /* Set up the device filtering */
222         bzero(&filter, sizeof(filter));
223         filter.enable_ethtype = TRUE;
224         filter.enable_macvlan = TRUE;
225         filter.enable_fdir = FALSE;
226         filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
227         if (i40e_set_filter_control(hw, &filter))
228                 device_printf(dev, "i40e_set_filter_control() failed\n");
229
230         /* Prepare the VSI: rings, hmc contexts, etc... */
231         if (ixl_initialize_vsi(vsi)) {
232                 device_printf(dev, "initialize vsi failed!!\n");
233                 return;
234         }
235
236         /* Set up RSS */
237         ixl_config_rss(pf);
238
239         /* Add protocol filters to list */
240         ixl_init_filters(vsi);
241
242         /* Setup vlan's if needed */
243         ixl_setup_vlan_filters(vsi);
244
245         /* Set up MSI/X routing and the ITR settings */
246         if (pf->enable_msix) {
247                 ixl_configure_queue_intr_msix(pf);
248                 ixl_configure_itr(pf);
249         } else
250                 ixl_configure_legacy(pf);
251
252         ixl_enable_rings(vsi);
253
254         i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
255
256         ixl_reconfigure_filters(vsi);
257
258         /* And now turn on interrupts */
259         ixl_enable_intr(vsi);
260
261         /* Get link info */
262         hw->phy.get_link_info = TRUE;
263         i40e_get_link_status(hw, &pf->link_up);
264         ixl_update_link_status(pf);
265
266         /* Set initial advertised speed sysctl value */
267         ixl_get_initial_advertised_speeds(pf);
268
269         /* Start the local timer */
270         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
271
272         /* Now inform the stack we're ready */
273         ifp->if_drv_flags |= IFF_DRV_RUNNING;
274 }
275
276
277 /*********************************************************************
278  *
279  *  Get the hardware capabilities
280  *
281  **********************************************************************/
282
283 int
284 ixl_get_hw_capabilities(struct ixl_pf *pf)
285 {
286         struct i40e_aqc_list_capabilities_element_resp *buf;
287         struct i40e_hw  *hw = &pf->hw;
288         device_t        dev = pf->dev;
289         int             error, len;
290         u16             needed;
291         bool            again = TRUE;
292
293         len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
294 retry:
295         if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
296             malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
297                 device_printf(dev, "Unable to allocate cap memory\n");
298                 return (ENOMEM);
299         }
300
301         /* This populates the hw struct */
302         error = i40e_aq_discover_capabilities(hw, buf, len,
303             &needed, i40e_aqc_opc_list_func_capabilities, NULL);
304         free(buf, M_DEVBUF);
305         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
306             (again == TRUE)) {
307                 /* retry once with a larger buffer */
308                 again = FALSE;
309                 len = needed;
310                 goto retry;
311         } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
312                 device_printf(dev, "capability discovery failed: %d\n",
313                     pf->hw.aq.asq_last_status);
314                 return (ENODEV);
315         }
316
317         /* Capture this PF's starting queue pair */
318         pf->qbase = hw->func_caps.base_queue;
319
320 #ifdef IXL_DEBUG
321         device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
322             "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
323             hw->pf_id, hw->func_caps.num_vfs,
324             hw->func_caps.num_msix_vectors,
325             hw->func_caps.num_msix_vectors_vf,
326             hw->func_caps.fd_filters_guaranteed,
327             hw->func_caps.fd_filters_best_effort,
328             hw->func_caps.num_tx_qp,
329             hw->func_caps.num_rx_qp,
330             hw->func_caps.base_queue);
331 #endif
332         /* Print a subset of the capability information. */
333         device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
334             hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
335             hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
336             (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
337             (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
338             "MDIO shared");
339
340         return (error);
341 }
342
343 void
344 ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
345 {
346         device_t        dev = vsi->dev;
347
348         /* Enable/disable TXCSUM/TSO4 */
349         if (!(ifp->if_capenable & IFCAP_TXCSUM)
350             && !(ifp->if_capenable & IFCAP_TSO4)) {
351                 if (mask & IFCAP_TXCSUM) {
352                         ifp->if_capenable |= IFCAP_TXCSUM;
353                         /* enable TXCSUM, restore TSO if previously enabled */
354                         if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
355                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
356                                 ifp->if_capenable |= IFCAP_TSO4;
357                         }
358                 }
359                 else if (mask & IFCAP_TSO4) {
360                         ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
361                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
362                         device_printf(dev,
363                             "TSO4 requires txcsum, enabling both...\n");
364                 }
365         } else if((ifp->if_capenable & IFCAP_TXCSUM)
366             && !(ifp->if_capenable & IFCAP_TSO4)) {
367                 if (mask & IFCAP_TXCSUM)
368                         ifp->if_capenable &= ~IFCAP_TXCSUM;
369                 else if (mask & IFCAP_TSO4)
370                         ifp->if_capenable |= IFCAP_TSO4;
371         } else if((ifp->if_capenable & IFCAP_TXCSUM)
372             && (ifp->if_capenable & IFCAP_TSO4)) {
373                 if (mask & IFCAP_TXCSUM) {
374                         vsi->flags |= IXL_FLAGS_KEEP_TSO4;
375                         ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
376                         device_printf(dev, 
377                             "TSO4 requires txcsum, disabling both...\n");
378                 } else if (mask & IFCAP_TSO4)
379                         ifp->if_capenable &= ~IFCAP_TSO4;
380         }
381
382         /* Enable/disable TXCSUM_IPV6/TSO6 */
383         if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
384             && !(ifp->if_capenable & IFCAP_TSO6)) {
385                 if (mask & IFCAP_TXCSUM_IPV6) {
386                         ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
387                         if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
388                                 vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
389                                 ifp->if_capenable |= IFCAP_TSO6;
390                         }
391                 } else if (mask & IFCAP_TSO6) {
392                         ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
393                         vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
394                         device_printf(dev,
395                             "TSO6 requires txcsum6, enabling both...\n");
396                 }
397         } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
398             && !(ifp->if_capenable & IFCAP_TSO6)) {
399                 if (mask & IFCAP_TXCSUM_IPV6)
400                         ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
401                 else if (mask & IFCAP_TSO6)
402                         ifp->if_capenable |= IFCAP_TSO6;
403         } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
404             && (ifp->if_capenable & IFCAP_TSO6)) {
405                 if (mask & IFCAP_TXCSUM_IPV6) {
406                         vsi->flags |= IXL_FLAGS_KEEP_TSO6;
407                         ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
408                         device_printf(dev,
409                             "TSO6 requires txcsum6, disabling both...\n");
410                 } else if (mask & IFCAP_TSO6)
411                         ifp->if_capenable &= ~IFCAP_TSO6;
412         }
413 }
414
415 /* For the set_advertise sysctl */
416 void
417 ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
418 {
419         struct i40e_hw *hw = &pf->hw;
420         device_t dev = pf->dev;
421         enum i40e_status_code status;
422         struct i40e_aq_get_phy_abilities_resp abilities;
423
424         /* Set initial sysctl values */
425         status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
426                                               NULL);
427         if (status) {
428                 /* Non-fatal error */
429                 device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
430                      __func__, status);
431                 return;
432         }
433
434         if (abilities.link_speed & I40E_LINK_SPEED_40GB)
435                 pf->advertised_speed |= 0x10;
436         if (abilities.link_speed & I40E_LINK_SPEED_20GB)
437                 pf->advertised_speed |= 0x8;
438         if (abilities.link_speed & I40E_LINK_SPEED_10GB)
439                 pf->advertised_speed |= 0x4;
440         if (abilities.link_speed & I40E_LINK_SPEED_1GB)
441                 pf->advertised_speed |= 0x2;
442         if (abilities.link_speed & I40E_LINK_SPEED_100MB)
443                 pf->advertised_speed |= 0x1;
444 }
445
446 int
447 ixl_teardown_hw_structs(struct ixl_pf *pf)
448 {
449         enum i40e_status_code status = 0;
450         struct i40e_hw *hw = &pf->hw;
451         device_t dev = pf->dev;
452
453         /* Shutdown LAN HMC */
454         if (hw->hmc.hmc_obj) {
455                 status = i40e_shutdown_lan_hmc(hw);
456                 if (status) {
457                         device_printf(dev,
458                             "init: LAN HMC shutdown failure; status %d\n", status);
459                         goto err_out;
460                 }
461         }
462
463         // XXX: This gets called when we know the adminq is inactive;
464         // so we already know it's setup when we get here.
465
466         /* Shutdown admin queue */
467         status = i40e_shutdown_adminq(hw);
468         if (status)
469                 device_printf(dev,
470                     "init: Admin Queue shutdown failure; status %d\n", status);
471
472 err_out:
473         return (status);
474 }
475
476 int
477 ixl_reset(struct ixl_pf *pf)
478 {
479         struct i40e_hw *hw = &pf->hw;
480         device_t dev = pf->dev;
481         u8 set_fc_err_mask;
482         int error = 0;
483
484         // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
485         i40e_clear_hw(hw);
486         error = i40e_pf_reset(hw);
487         if (error) {
488                 device_printf(dev, "init: PF reset failure");
489                 error = EIO;
490                 goto err_out;
491         }
492
493         error = i40e_init_adminq(hw);
494         if (error) {
495                 device_printf(dev, "init: Admin queue init failure;"
496                     " status code %d", error);
497                 error = EIO;
498                 goto err_out;
499         }
500
501         i40e_clear_pxe_mode(hw);
502
503         error = ixl_get_hw_capabilities(pf);
504         if (error) {
505                 device_printf(dev, "init: Error retrieving HW capabilities;"
506                     " status code %d\n", error);
507                 goto err_out;
508         }
509
510         error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
511             hw->func_caps.num_rx_qp, 0, 0);
512         if (error) {
513                 device_printf(dev, "init: LAN HMC init failed; status code %d\n",
514                     error);
515                 error = EIO;
516                 goto err_out;
517         }
518
519         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
520         if (error) {
521                 device_printf(dev, "init: LAN HMC config failed; status code %d\n",
522                     error);
523                 error = EIO;
524                 goto err_out;
525         }
526
527         // XXX: possible fix for panic, but our failure recovery is still broken
528         error = ixl_switch_config(pf);
529         if (error) {
530                 device_printf(dev, "init: ixl_switch_config() failed: %d\n",
531                      error);
532                 goto err_out;
533         }
534
535         error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
536             NULL);
537         if (error) {
538                 device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
539                     " aq_err %d\n", error, hw->aq.asq_last_status);
540                 error = EIO;
541                 goto err_out;
542         }
543
544         error = i40e_set_fc(hw, &set_fc_err_mask, true);
545         if (error) {
546                 device_printf(dev, "init: setting link flow control failed; retcode %d,"
547                     " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
548                 goto err_out;
549         }
550
551         // XXX: (Rebuild VSIs?)
552
553         /* Firmware delay workaround */
554         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
555             (hw->aq.fw_maj_ver < 4)) {
556                 i40e_msec_delay(75);
557                 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
558                 if (error) {
559                         device_printf(dev, "init: link restart failed, aq_err %d\n",
560                             hw->aq.asq_last_status);
561                         goto err_out;
562                 }
563         }
564
565
566 err_out:
567         return (error);
568 }
569
570 /*
571 ** MSIX Interrupt Handlers and Tasklets
572 */
573 void
574 ixl_handle_que(void *context, int pending)
575 {
576         struct ixl_queue *que = context;
577         struct ixl_vsi *vsi = que->vsi;
578         struct i40e_hw  *hw = vsi->hw;
579         struct tx_ring  *txr = &que->txr;
580         struct ifnet    *ifp = vsi->ifp;
581         bool            more;
582
583         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
584                 more = ixl_rxeof(que, IXL_RX_LIMIT);
585                 IXL_TX_LOCK(txr);
586                 ixl_txeof(que);
587                 if (!drbr_empty(ifp, txr->br))
588                         ixl_mq_start_locked(ifp, txr);
589                 IXL_TX_UNLOCK(txr);
590                 if (more) {
591                         taskqueue_enqueue(que->tq, &que->task);
592                         return;
593                 }
594         }
595
596         /* Reenable this interrupt - hmmm */
597         ixl_enable_queue(hw, que->me);
598         return;
599 }
600
601
602 /*********************************************************************
603  *
604  *  Legacy Interrupt Service routine
605  *
606  **********************************************************************/
607 void
608 ixl_intr(void *arg)
609 {
610         struct ixl_pf           *pf = arg;
611         struct i40e_hw          *hw =  &pf->hw;
612         struct ixl_vsi          *vsi = &pf->vsi;
613         struct ixl_queue        *que = vsi->queues;
614         struct ifnet            *ifp = vsi->ifp;
615         struct tx_ring          *txr = &que->txr;
616         u32                     reg, icr0, mask;
617         bool                    more_tx, more_rx;
618
619         ++que->irqs;
620
621         /* Protect against spurious interrupts */
622         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
623                 return;
624
625         icr0 = rd32(hw, I40E_PFINT_ICR0);
626
627         reg = rd32(hw, I40E_PFINT_DYN_CTL0);
628         reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
629         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
630
631         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
632
633 #ifdef PCI_IOV
634         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
635                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
636 #endif
637
638         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
639                 taskqueue_enqueue(pf->tq, &pf->adminq);
640                 return;
641         }
642
643         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
644
645         IXL_TX_LOCK(txr);
646         more_tx = ixl_txeof(que);
647         if (!drbr_empty(vsi->ifp, txr->br))
648                 more_tx = 1;
649         IXL_TX_UNLOCK(txr);
650
651         /* re-enable other interrupt causes */
652         wr32(hw, I40E_PFINT_ICR0_ENA, mask);
653
654         /* And now the queues */
655         reg = rd32(hw, I40E_QINT_RQCTL(0));
656         reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
657         wr32(hw, I40E_QINT_RQCTL(0), reg);
658
659         reg = rd32(hw, I40E_QINT_TQCTL(0));
660         reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
661         reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
662         wr32(hw, I40E_QINT_TQCTL(0), reg);
663
664         ixl_enable_legacy(hw);
665
666         return;
667 }
668
669
670 /*********************************************************************
671  *
672  *  MSIX VSI Interrupt Service routine
673  *
674  **********************************************************************/
675 void
676 ixl_msix_que(void *arg)
677 {
678         struct ixl_queue        *que = arg;
679         struct ixl_vsi  *vsi = que->vsi;
680         struct i40e_hw  *hw = vsi->hw;
681         struct tx_ring  *txr = &que->txr;
682         bool            more_tx, more_rx;
683
684         /* Protect against spurious interrupts */
685         if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
686                 return;
687
688         ++que->irqs;
689
690         more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
691
692         IXL_TX_LOCK(txr);
693         more_tx = ixl_txeof(que);
694         /*
695         ** Make certain that if the stack 
696         ** has anything queued the task gets
697         ** scheduled to handle it.
698         */
699         if (!drbr_empty(vsi->ifp, txr->br))
700                 more_tx = 1;
701         IXL_TX_UNLOCK(txr);
702
703         ixl_set_queue_rx_itr(que);
704         ixl_set_queue_tx_itr(que);
705
706         if (more_tx || more_rx)
707                 taskqueue_enqueue(que->tq, &que->task);
708         else
709                 ixl_enable_queue(hw, que->me);
710
711         return;
712 }
713
714
715 /*********************************************************************
716  *
717  *  MSIX Admin Queue Interrupt Service routine
718  *
719  **********************************************************************/
720 void
721 ixl_msix_adminq(void *arg)
722 {
723         struct ixl_pf   *pf = arg;
724         struct i40e_hw  *hw = &pf->hw;
725         device_t        dev = pf->dev;
726         u32             reg, mask, rstat_reg;
727         bool            do_task = FALSE;
728
729         ++pf->admin_irq;
730
731         reg = rd32(hw, I40E_PFINT_ICR0);
732         mask = rd32(hw, I40E_PFINT_ICR0_ENA);
733
734         /* Check on the cause */
735         if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
736                 mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
737                 do_task = TRUE;
738         }
739
740         if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
741                 ixl_handle_mdd_event(pf);
742                 mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
743         }
744
745         if (reg & I40E_PFINT_ICR0_GRST_MASK) {
746                 device_printf(dev, "Reset Requested!\n");
747                 rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
748                 rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
749                     >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
750                 device_printf(dev, "Reset type: ");
751                 switch (rstat_reg) {
752                 /* These others might be handled similarly to an EMPR reset */
753                 case I40E_RESET_CORER:
754                         printf("CORER\n");
755                         break;
756                 case I40E_RESET_GLOBR:
757                         printf("GLOBR\n");
758                         break;
759                 case I40E_RESET_EMPR:
760                         printf("EMPR\n");
761                         atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
762                         break;
763                 default:
764                         printf("POR\n");
765                         break;
766                 }
767                 /* overload admin queue task to check reset progress */
768                 do_task = TRUE;
769         }
770
771         if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
772                 device_printf(dev, "ECC Error detected!\n");
773         }
774
775         if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
776                 reg = rd32(hw, I40E_PFHMC_ERRORINFO);
777                 if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
778                         device_printf(dev, "HMC Error detected!\n");
779                         device_printf(dev, "INFO 0x%08x\n", reg);
780                         reg = rd32(hw, I40E_PFHMC_ERRORDATA);
781                         device_printf(dev, "DATA 0x%08x\n", reg);
782                         wr32(hw, I40E_PFHMC_ERRORINFO, 0);
783                 }
784         }
785
786         if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
787                 device_printf(dev, "PCI Exception detected!\n");
788         }
789
790 #ifdef PCI_IOV
791         if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
792                 mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
793                 taskqueue_enqueue(pf->tq, &pf->vflr_task);
794         }
795 #endif
796
797         if (do_task)
798                 taskqueue_enqueue(pf->tq, &pf->adminq);
799         else
800                 ixl_enable_adminq(hw);
801 }
802
803 void
804 ixl_set_promisc(struct ixl_vsi *vsi)
805 {
806         struct ifnet    *ifp = vsi->ifp;
807         struct i40e_hw  *hw = vsi->hw;
808         int             err, mcnt = 0;
809         bool            uni = FALSE, multi = FALSE;
810
811         if (ifp->if_flags & IFF_ALLMULTI)
812                 multi = TRUE;
813         else { /* Need to count the multicast addresses */
814                 struct  ifmultiaddr *ifma;
815                 if_maddr_rlock(ifp);
816                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
817                         if (ifma->ifma_addr->sa_family != AF_LINK)
818                                 continue;
819                         if (mcnt == MAX_MULTICAST_ADDR)
820                                 break;
821                         mcnt++;
822                 }
823                 if_maddr_runlock(ifp);
824         }
825
826         if (mcnt >= MAX_MULTICAST_ADDR)
827                 multi = TRUE;
828         if (ifp->if_flags & IFF_PROMISC)
829                 uni = TRUE;
830
831         err = i40e_aq_set_vsi_unicast_promiscuous(hw,
832             vsi->seid, uni, NULL, TRUE);
833         err = i40e_aq_set_vsi_multicast_promiscuous(hw,
834             vsi->seid, multi, NULL);
835         return;
836 }
837
838 /*********************************************************************
839  *      Filter Routines
840  *
841  *      Routines for multicast and vlan filter management.
842  *
843  *********************************************************************/
844 void
845 ixl_add_multi(struct ixl_vsi *vsi)
846 {
847         struct  ifmultiaddr     *ifma;
848         struct ifnet            *ifp = vsi->ifp;
849         struct i40e_hw          *hw = vsi->hw;
850         int                     mcnt = 0, flags;
851
852         IOCTL_DEBUGOUT("ixl_add_multi: begin");
853
854         if_maddr_rlock(ifp);
855         /*
856         ** First just get a count, to decide if we
857         ** we simply use multicast promiscuous.
858         */
859         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
860                 if (ifma->ifma_addr->sa_family != AF_LINK)
861                         continue;
862                 mcnt++;
863         }
864         if_maddr_runlock(ifp);
865
866         if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
867                 /* delete existing MC filters */
868                 ixl_del_hw_filters(vsi, mcnt);
869                 i40e_aq_set_vsi_multicast_promiscuous(hw,
870                     vsi->seid, TRUE, NULL);
871                 return;
872         }
873
874         mcnt = 0;
875         if_maddr_rlock(ifp);
876         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
877                 if (ifma->ifma_addr->sa_family != AF_LINK)
878                         continue;
879                 ixl_add_mc_filter(vsi,
880                     (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
881                 mcnt++;
882         }
883         if_maddr_runlock(ifp);
884         if (mcnt > 0) {
885                 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
886                 ixl_add_hw_filters(vsi, flags, mcnt);
887         }
888
889         IOCTL_DEBUGOUT("ixl_add_multi: end");
890         return;
891 }
892
893 void
894 ixl_del_multi(struct ixl_vsi *vsi)
895 {
896         struct ifnet            *ifp = vsi->ifp;
897         struct ifmultiaddr      *ifma;
898         struct ixl_mac_filter   *f;
899         int                     mcnt = 0;
900         bool            match = FALSE;
901
902         IOCTL_DEBUGOUT("ixl_del_multi: begin");
903
904         /* Search for removed multicast addresses */
905         if_maddr_rlock(ifp);
906         SLIST_FOREACH(f, &vsi->ftl, next) {
907                 if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
908                         match = FALSE;
909                         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
910                                 if (ifma->ifma_addr->sa_family != AF_LINK)
911                                         continue;
912                                 u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
913                                 if (cmp_etheraddr(f->macaddr, mc_addr)) {
914                                         match = TRUE;
915                                         break;
916                                 }
917                         }
918                         if (match == FALSE) {
919                                 f->flags |= IXL_FILTER_DEL;
920                                 mcnt++;
921                         }
922                 }
923         }
924         if_maddr_runlock(ifp);
925
926         if (mcnt > 0)
927                 ixl_del_hw_filters(vsi, mcnt);
928 }
929
930
931 /*********************************************************************
932  *  Timer routine
933  *
934  *  This routine checks for link status,updates statistics,
935  *  and runs the watchdog check.
936  *
937  *  Only runs when the driver is configured UP and RUNNING.
938  *
939  **********************************************************************/
940
941 void
942 ixl_local_timer(void *arg)
943 {
944         struct ixl_pf           *pf = arg;
945         struct i40e_hw          *hw = &pf->hw;
946         struct ixl_vsi          *vsi = &pf->vsi;
947         struct ixl_queue        *que = vsi->queues;
948         device_t                dev = pf->dev;
949         int                     hung = 0;
950         u32                     mask;
951
952         mtx_assert(&pf->pf_mtx, MA_OWNED);
953
954         /* Fire off the adminq task */
955         taskqueue_enqueue(pf->tq, &pf->adminq);
956
957         /* Update stats */
958         ixl_update_stats_counters(pf);
959
960         /* Check status of the queues */
961         mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
962                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
963  
964         for (int i = 0; i < vsi->num_queues; i++, que++) {
965                 /* Any queues with outstanding work get a sw irq */
966                 if (que->busy)
967                         wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
968                 /*
969                 ** Each time txeof runs without cleaning, but there
970                 ** are uncleaned descriptors it increments busy. If
971                 ** we get to 5 we declare it hung.
972                 */
973                 if (que->busy == IXL_QUEUE_HUNG) {
974                         ++hung;
975                         continue;
976                 }
977                 if (que->busy >= IXL_MAX_TX_BUSY) {
978 #ifdef IXL_DEBUG
979                         device_printf(dev, "Warning queue %d "
980                             "appears to be hung!\n", i);
981 #endif
982                         que->busy = IXL_QUEUE_HUNG;
983                         ++hung;
984                 }
985         }
986         /* Only reinit if all queues show hung */
987         if (hung == vsi->num_queues)
988                 goto hung;
989
990         callout_reset(&pf->timer, hz, ixl_local_timer, pf);
991         return;
992
993 hung:
994         device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
995         ixl_init_locked(pf);
996 }
997
998 /*
999 ** Note: this routine updates the OS on the link state
1000 **      the real check of the hardware only happens with
1001 **      a link interrupt.
1002 */
1003 void
1004 ixl_update_link_status(struct ixl_pf *pf)
1005 {
1006         struct ixl_vsi          *vsi = &pf->vsi;
1007         struct i40e_hw          *hw = &pf->hw;
1008         struct ifnet            *ifp = vsi->ifp;
1009         device_t                dev = pf->dev;
1010
1011         if (pf->link_up) {
1012                 if (vsi->link_active == FALSE) {
1013                         pf->fc = hw->fc.current_mode;
1014                         if (bootverbose) {
1015                                 device_printf(dev, "Link is up %d Gbps %s,"
1016                                     " Flow Control: %s\n",
1017                                     ((pf->link_speed ==
1018                                     I40E_LINK_SPEED_40GB)? 40:10),
1019                                     "Full Duplex", ixl_fc_string[pf->fc]);
1020                         }
1021                         vsi->link_active = TRUE;
1022                         if_link_state_change(ifp, LINK_STATE_UP);
1023                 }
1024         } else { /* Link down */
1025                 if (vsi->link_active == TRUE) {
1026                         if (bootverbose)
1027                                 device_printf(dev, "Link is Down\n");
1028                         if_link_state_change(ifp, LINK_STATE_DOWN);
1029                         vsi->link_active = FALSE;
1030                 }
1031         }
1032
1033         return;
1034 }
1035
1036 /*********************************************************************
1037  *
1038  *  This routine disables all traffic on the adapter by issuing a
1039  *  global reset on the MAC and deallocates TX/RX buffers.
1040  *
1041  **********************************************************************/
1042
1043 void
1044 ixl_stop_locked(struct ixl_pf *pf)
1045 {
1046         struct ixl_vsi  *vsi = &pf->vsi;
1047         struct ifnet    *ifp = vsi->ifp;
1048
1049         INIT_DEBUGOUT("ixl_stop: begin\n");
1050
1051         IXL_PF_LOCK_ASSERT(pf);
1052
1053         /* Stop the local timer */
1054         callout_stop(&pf->timer);
1055
1056         ixl_disable_rings_intr(vsi);
1057         ixl_disable_rings(vsi);
1058
1059         /* Tell the stack that the interface is no longer active */
1060         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1061 }
1062
1063 void
1064 ixl_stop(struct ixl_pf *pf)
1065 {
1066         IXL_PF_LOCK(pf);
1067         ixl_stop_locked(pf);
1068         IXL_PF_UNLOCK(pf);
1069
1070         ixl_teardown_queue_msix(&pf->vsi);
1071         ixl_free_queue_tqs(&pf->vsi);
1072 }
1073
1074 /*********************************************************************
1075  *
1076  *  Setup MSIX Interrupt resources and handlers for the VSI
1077  *
1078  **********************************************************************/
1079 int
1080 ixl_assign_vsi_legacy(struct ixl_pf *pf)
1081 {
1082         device_t        dev = pf->dev;
1083         struct          ixl_vsi *vsi = &pf->vsi;
1084         struct          ixl_queue *que = vsi->queues;
1085         int             error, rid = 0;
1086
1087         if (pf->msix == 1)
1088                 rid = 1;
1089         pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1090             &rid, RF_SHAREABLE | RF_ACTIVE);
1091         if (pf->res == NULL) {
1092                 device_printf(dev, "Unable to allocate"
1093                     " bus resource: vsi legacy/msi interrupt\n");
1094                 return (ENXIO);
1095         }
1096
1097         /* Set the handler function */
1098         error = bus_setup_intr(dev, pf->res,
1099             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1100             ixl_intr, pf, &pf->tag);
1101         if (error) {
1102                 pf->res = NULL;
1103                 device_printf(dev, "Failed to register legacy/msi handler\n");
1104                 return (error);
1105         }
1106         bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1107         TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1108         TASK_INIT(&que->task, 0, ixl_handle_que, que);
1109         que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1110             taskqueue_thread_enqueue, &que->tq);
1111         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1112             device_get_nameunit(dev));
1113         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1114
1115         pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1116             taskqueue_thread_enqueue, &pf->tq);
1117         taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1118             device_get_nameunit(dev));
1119
1120         return (0);
1121 }
1122
1123 int
1124 ixl_setup_adminq_tq(struct ixl_pf *pf)
1125 {
1126         device_t dev = pf->dev;
1127         int error = 0;
1128
1129         /* Tasklet for Admin Queue interrupts */
1130         TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1131 #ifdef PCI_IOV
1132         /* VFLR Tasklet */
1133         TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1134 #endif
1135         /* Create and start Admin Queue taskqueue */
1136         pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
1137             taskqueue_thread_enqueue, &pf->tq);
1138         if (!pf->tq) {
1139                 device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
1140                 return (ENOMEM);
1141         }
1142         error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
1143             device_get_nameunit(dev));
1144         if (error) {
1145                 device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
1146                     error);
1147                 taskqueue_free(pf->tq);
1148                 return (error);
1149         }
1150         return (0);
1151 }
1152
1153 int
1154 ixl_setup_queue_tqs(struct ixl_vsi *vsi)
1155 {
1156         struct ixl_queue *que = vsi->queues;
1157         device_t dev = vsi->dev;
1158 #ifdef  RSS
1159         int             cpu_id = 0;
1160         cpuset_t        cpu_mask;
1161 #endif
1162
1163         /* Create queue tasks and start queue taskqueues */
1164         for (int i = 0; i < vsi->num_queues; i++, que++) {
1165                 TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1166                 TASK_INIT(&que->task, 0, ixl_handle_que, que);
1167                 que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1168                     taskqueue_thread_enqueue, &que->tq);
1169 #ifdef RSS
1170                 CPU_SETOF(cpu_id, &cpu_mask);
1171                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1172                     &cpu_mask, "%s (bucket %d)",
1173                     device_get_nameunit(dev), cpu_id);
1174 #else
1175                 taskqueue_start_threads(&que->tq, 1, PI_NET,
1176                     "%s (que %d)", device_get_nameunit(dev), que->me);
1177 #endif
1178         }
1179
1180         return (0);
1181 }
1182
1183 void
1184 ixl_free_adminq_tq(struct ixl_pf *pf)
1185 {
1186         if (pf->tq) {
1187                 taskqueue_free(pf->tq);
1188                 pf->tq = NULL;
1189         }
1190 }
1191
1192 void
1193 ixl_free_queue_tqs(struct ixl_vsi *vsi)
1194 {
1195         struct ixl_queue *que = vsi->queues;
1196
1197         for (int i = 0; i < vsi->num_queues; i++, que++) {
1198                 if (que->tq) {
1199                         taskqueue_free(que->tq);
1200                         que->tq = NULL;
1201                 }
1202         }
1203 }
1204
1205 int
1206 ixl_setup_adminq_msix(struct ixl_pf *pf)
1207 {
1208         device_t dev = pf->dev;
1209         int rid, error = 0;
1210
1211         /* Admin IRQ rid is 1, vector is 0 */
1212         rid = 1;
1213         /* Get interrupt resource from bus */
1214         pf->res = bus_alloc_resource_any(dev,
1215             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1216         if (!pf->res) {
1217                 device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
1218                     " interrupt failed [rid=%d]\n", rid);
1219                 return (ENXIO);
1220         }
1221         /* Then associate interrupt with handler */
1222         error = bus_setup_intr(dev, pf->res,
1223             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1224             ixl_msix_adminq, pf, &pf->tag);
1225         if (error) {
1226                 pf->res = NULL;
1227                 device_printf(dev, "bus_setup_intr() for Admin Queue"
1228                     " interrupt handler failed, error %d\n", error);
1229                 return (ENXIO);
1230         }
1231         error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
1232         if (error) {
1233                 /* Probably non-fatal? */
1234                 device_printf(dev, "bus_describe_intr() for Admin Queue"
1235                     " interrupt name failed, error %d\n", error);
1236         }
1237         pf->admvec = 0;
1238
1239         return (0);
1240 }
1241
1242 /*
1243  * Allocate interrupt resources from bus and associate an interrupt handler
1244  * to those for the VSI's queues.
1245  */
1246 int
1247 ixl_setup_queue_msix(struct ixl_vsi *vsi)
1248 {
1249         device_t        dev = vsi->dev;
1250         struct          ixl_queue *que = vsi->queues;
1251         struct          tx_ring  *txr;
1252         int             error, rid, vector = 1;
1253
1254         /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
1255         for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1256                 int cpu_id = i;
1257                 rid = vector + 1;
1258                 txr = &que->txr;
1259                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1260                     RF_SHAREABLE | RF_ACTIVE);
1261                 if (!que->res) {
1262                         device_printf(dev, "bus_alloc_resource_any() for"
1263                             " Queue %d interrupt failed [rid=%d]\n",
1264                             que->me, rid);
1265                         return (ENXIO);
1266                 }
1267                 /* Set the handler function */
1268                 error = bus_setup_intr(dev, que->res,
1269                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1270                     ixl_msix_que, que, &que->tag);
1271                 if (error) {
1272                         device_printf(dev, "bus_setup_intr() for Queue %d"
1273                             " interrupt handler failed, error %d\n",
1274                             que->me, error);
1275                         return (error);
1276                 }
1277                 error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1278                 if (error) {
1279                         device_printf(dev, "bus_describe_intr() for Queue %d"
1280                             " interrupt name failed, error %d\n",
1281                             que->me, error);
1282                 }
1283                 /* Bind the vector to a CPU */
1284 #ifdef RSS
1285                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
1286 #endif
1287                 error = bus_bind_intr(dev, que->res, cpu_id);
1288                 if (error) {
1289                         device_printf(dev, "bus_bind_intr() for Queue %d"
1290                             " to CPU %d failed, error %d\n",
1291                             que->me, cpu_id, error);
1292                 }
1293                 que->msix = vector;
1294         }
1295
1296         return (0);
1297 }
1298
1299 /*
1300  * When used in a virtualized environment PCI BUSMASTER capability may not be set
1301  * so explicity set it here and rewrite the ENABLE in the MSIX control register
1302  * at this point to cause the host to successfully initialize us.
1303  */
1304 void
1305 ixl_set_busmaster(device_t dev)
1306 {
1307         u16 pci_cmd_word;
1308         int msix_ctrl, rid;
1309
1310         pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1311         pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1312         pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1313
1314         pci_find_cap(dev, PCIY_MSIX, &rid);
1315         rid += PCIR_MSIX_CTRL;
1316         msix_ctrl = pci_read_config(dev, rid, 2);
1317         msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1318         pci_write_config(dev, rid, msix_ctrl, 2);
1319 }
1320
1321 /*
1322  * Allocate MSI/X vectors from the OS.
1323  * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
1324  */
1325 int
1326 ixl_init_msix(struct ixl_pf *pf)
1327 {
1328         device_t dev = pf->dev;
1329         struct i40e_hw *hw = &pf->hw;
1330         int auto_max_queues;
1331         int rid, want, vectors, queues, available;
1332
1333         /* Override by tuneable */
1334         if (!pf->enable_msix)
1335                 goto no_msix;
1336
1337         /* Ensure proper operation in virtualized environment */
1338         ixl_set_busmaster(dev);
1339
1340         /* First try MSI/X */
1341         rid = PCIR_BAR(IXL_BAR);
1342         pf->msix_mem = bus_alloc_resource_any(dev,
1343             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1344         if (!pf->msix_mem) {
1345                 /* May not be enabled */
1346                 device_printf(pf->dev,
1347                     "Unable to map MSIX table\n");
1348                 goto no_msix;
1349         }
1350
1351         available = pci_msix_count(dev); 
1352         if (available < 2) {
1353                 /* system has msix disabled (0), or only one vector (1) */
1354                 bus_release_resource(dev, SYS_RES_MEMORY,
1355                     rid, pf->msix_mem);
1356                 pf->msix_mem = NULL;
1357                 goto no_msix;
1358         }
1359
1360         /* Clamp max number of queues based on:
1361          * - # of MSI-X vectors available
1362          * - # of cpus available
1363          * - # of queues that can be assigned to the LAN VSI
1364          */
1365         auto_max_queues = min(mp_ncpus, available - 1);
1366         if (hw->mac.type == I40E_MAC_X722)
1367                 auto_max_queues = min(auto_max_queues, 128);
1368         else
1369                 auto_max_queues = min(auto_max_queues, 64);
1370
1371         /* Override with tunable value if tunable is less than autoconfig count */
1372         if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
1373                 queues = pf->max_queues;
1374         /* Use autoconfig amount if that's lower */
1375         else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
1376                 device_printf(dev, "ixl_max_queues (%d) is too large, using "
1377                     "autoconfig amount (%d)...\n",
1378                     pf->max_queues, auto_max_queues);
1379                 queues = auto_max_queues;
1380         }
1381         /* Limit maximum auto-configured queues to 8 if no user value is set */
1382         else
1383                 queues = min(auto_max_queues, 8);
1384
1385 #ifdef  RSS
1386         /* If we're doing RSS, clamp at the number of RSS buckets */
1387         if (queues > rss_getnumbuckets())
1388                 queues = rss_getnumbuckets();
1389 #endif
1390
1391         /*
1392         ** Want one vector (RX/TX pair) per queue
1393         ** plus an additional for the admin queue.
1394         */
1395         want = queues + 1;
1396         if (want <= available)  /* Have enough */
1397                 vectors = want;
1398         else {
1399                 device_printf(pf->dev,
1400                     "MSIX Configuration Problem, "
1401                     "%d vectors available but %d wanted!\n",
1402                     available, want);
1403                 return (0); /* Will go to Legacy setup */
1404         }
1405
1406         if (pci_alloc_msix(dev, &vectors) == 0) {
1407                 device_printf(pf->dev,
1408                     "Using MSIX interrupts with %d vectors\n", vectors);
1409                 pf->msix = vectors;
1410                 pf->vsi.num_queues = queues;
1411 #ifdef RSS
1412                 /*
1413                  * If we're doing RSS, the number of queues needs to
1414                  * match the number of RSS buckets that are configured.
1415                  *
1416                  * + If there's more queues than RSS buckets, we'll end
1417                  *   up with queues that get no traffic.
1418                  *
1419                  * + If there's more RSS buckets than queues, we'll end
1420                  *   up having multiple RSS buckets map to the same queue,
1421                  *   so there'll be some contention.
1422                  */
1423                 if (queues != rss_getnumbuckets()) {
1424                         device_printf(dev,
1425                             "%s: queues (%d) != RSS buckets (%d)"
1426                             "; performance will be impacted.\n",
1427                             __func__, queues, rss_getnumbuckets());
1428                 }
1429 #endif
1430                 return (vectors);
1431         }
1432 no_msix:
1433         vectors = pci_msi_count(dev);
1434         pf->vsi.num_queues = 1;
1435         pf->max_queues = 1;
1436         pf->enable_msix = 0;
1437         if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1438                 device_printf(pf->dev, "Using an MSI interrupt\n");
1439         else {
1440                 vectors = 0;
1441                 device_printf(pf->dev, "Using a Legacy interrupt\n");
1442         }
1443         return (vectors);
1444 }
1445
1446 /*
1447  * Configure admin queue/misc interrupt cause registers in hardware.
1448  */
1449 void
1450 ixl_configure_intr0_msix(struct ixl_pf *pf)
1451 {
1452         struct i40e_hw *hw = &pf->hw;
1453         u32 reg;
1454
1455         /* First set up the adminq - vector 0 */
1456         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
1457         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
1458
1459         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
1460             I40E_PFINT_ICR0_ENA_GRST_MASK |
1461             I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
1462             I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
1463             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
1464             I40E_PFINT_ICR0_ENA_VFLR_MASK |
1465             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
1466         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1467
1468         /*
1469          * 0x7FF is the end of the queue list.
1470          * This means we won't use MSI-X vector 0 for a queue interrupt
1471          * in MSIX mode.
1472          */
1473         wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1474         /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
1475         wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
1476
1477         wr32(hw, I40E_PFINT_DYN_CTL0,
1478             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
1479             I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
1480
1481         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1482 }
1483
1484 /*
1485  * Configure queue interrupt cause registers in hardware.
1486  */
1487 void
1488 ixl_configure_queue_intr_msix(struct ixl_pf *pf)
1489 {
1490         struct i40e_hw  *hw = &pf->hw;
1491         struct ixl_vsi *vsi = &pf->vsi;
1492         u32             reg;
1493         u16             vector = 1;
1494
1495         for (int i = 0; i < vsi->num_queues; i++, vector++) {
1496                 wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
1497                 /* First queue type is RX / 0 */
1498                 wr32(hw, I40E_PFINT_LNKLSTN(i), i);
1499
1500                 reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
1501                 (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1502                 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1503                 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1504                 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
1505                 wr32(hw, I40E_QINT_RQCTL(i), reg);
1506
1507                 reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
1508                 (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1509                 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1510                 (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1511                 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1512                 wr32(hw, I40E_QINT_TQCTL(i), reg);
1513         }
1514 }
1515
1516 /*
1517  * Configure for MSI single vector operation 
1518  */
1519 void
1520 ixl_configure_legacy(struct ixl_pf *pf)
1521 {
1522         struct i40e_hw  *hw = &pf->hw;
1523         u32             reg;
1524
1525         wr32(hw, I40E_PFINT_ITR0(0), 0);
1526         wr32(hw, I40E_PFINT_ITR0(1), 0);
1527
1528         /* Setup "other" causes */
1529         reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
1530             | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
1531             | I40E_PFINT_ICR0_ENA_GRST_MASK
1532             | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
1533             | I40E_PFINT_ICR0_ENA_GPIO_MASK
1534             | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
1535             | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
1536             | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
1537             | I40E_PFINT_ICR0_ENA_VFLR_MASK
1538             | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
1539             ;
1540         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1541
1542         /* SW_ITR_IDX = 0, but don't change INTENA */
1543         wr32(hw, I40E_PFINT_DYN_CTL0,
1544             I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
1545             I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
1546         /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
1547         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
1548
1549         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
1550         wr32(hw, I40E_PFINT_LNKLST0, 0);
1551
1552         /* Associate the queue pair to the vector and enable the q int */
1553         reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
1554             | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
1555             | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
1556         wr32(hw, I40E_QINT_RQCTL(0), reg);
1557
1558         reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
1559             | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
1560             | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
1561         wr32(hw, I40E_QINT_TQCTL(0), reg);
1562 }
1563
1564 int
1565 ixl_allocate_pci_resources(struct ixl_pf *pf)
1566 {
1567         int             rid;
1568         struct i40e_hw *hw = &pf->hw;
1569         device_t        dev = pf->dev;
1570
1571         /* Map BAR0 */
1572         rid = PCIR_BAR(0);
1573         pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1574             &rid, RF_ACTIVE);
1575
1576         if (!(pf->pci_mem)) {
1577                 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
1578                 return (ENXIO);
1579         }
1580
1581         /* Save off the PCI information */
1582         hw->vendor_id = pci_get_vendor(dev);
1583         hw->device_id = pci_get_device(dev);
1584         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1585         hw->subsystem_vendor_id =
1586             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1587         hw->subsystem_device_id =
1588             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1589
1590         hw->bus.device = pci_get_slot(dev);
1591         hw->bus.func = pci_get_function(dev);
1592
1593         /* Save off register access information */
1594         pf->osdep.mem_bus_space_tag =
1595                 rman_get_bustag(pf->pci_mem);
1596         pf->osdep.mem_bus_space_handle =
1597                 rman_get_bushandle(pf->pci_mem);
1598         pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
1599         pf->osdep.flush_reg = I40E_GLGEN_STAT;
1600         pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
1601
1602         pf->hw.back = &pf->osdep;
1603
1604         return (0);
1605 }
1606
1607 /*
1608  * Teardown and release the admin queue/misc vector
1609  * interrupt.
1610  */
1611 int
1612 ixl_teardown_adminq_msix(struct ixl_pf *pf)
1613 {
1614         device_t                dev = pf->dev;
1615         int                     rid;
1616
1617         if (pf->admvec) /* we are doing MSIX */
1618                 rid = pf->admvec + 1;
1619         else
1620                 (pf->msix != 0) ? (rid = 1):(rid = 0);
1621
1622         if (pf->tag != NULL) {
1623                 bus_teardown_intr(dev, pf->res, pf->tag);
1624                 pf->tag = NULL;
1625         }
1626         if (pf->res != NULL) {
1627                 bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
1628                 pf->res = NULL;
1629         }
1630
1631         return (0);
1632 }
1633
1634 int
1635 ixl_teardown_queue_msix(struct ixl_vsi *vsi)
1636 {
1637         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
1638         struct ixl_queue        *que = vsi->queues;
1639         device_t                dev = vsi->dev;
1640         int                     rid, error = 0;
1641
1642         /* We may get here before stations are setup */
1643         if ((!pf->enable_msix) || (que == NULL))
1644                 return (0);
1645
1646         /* Release all MSIX queue resources */
1647         for (int i = 0; i < vsi->num_queues; i++, que++) {
1648                 rid = que->msix + 1;
1649                 if (que->tag != NULL) {
1650                         error = bus_teardown_intr(dev, que->res, que->tag);
1651                         if (error) {
1652                                 device_printf(dev, "bus_teardown_intr() for"
1653                                     " Queue %d interrupt failed\n",
1654                                     que->me);
1655                                 // return (ENXIO);
1656                         }
1657                         que->tag = NULL;
1658                 }
1659                 if (que->res != NULL) {
1660                         error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1661                         if (error) {
1662                                 device_printf(dev, "bus_release_resource() for"
1663                                     " Queue %d interrupt failed [rid=%d]\n",
1664                                     que->me, rid);
1665                                 // return (ENXIO);
1666                         }
1667                         que->res = NULL;
1668                 }
1669         }
1670
1671         return (0);
1672 }
1673
1674 void
1675 ixl_free_pci_resources(struct ixl_pf *pf)
1676 {
1677         device_t                dev = pf->dev;
1678         int                     memrid;
1679
1680         ixl_teardown_queue_msix(&pf->vsi);
1681         ixl_teardown_adminq_msix(pf);
1682
1683         if (pf->msix)
1684                 pci_release_msi(dev);
1685         
1686         memrid = PCIR_BAR(IXL_BAR);
1687
1688         if (pf->msix_mem != NULL)
1689                 bus_release_resource(dev, SYS_RES_MEMORY,
1690                     memrid, pf->msix_mem);
1691
1692         if (pf->pci_mem != NULL)
1693                 bus_release_resource(dev, SYS_RES_MEMORY,
1694                     PCIR_BAR(0), pf->pci_mem);
1695
1696         return;
1697 }
1698
1699 void
1700 ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
1701 {
1702         /* Display supported media types */
1703         if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
1704                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1705
1706         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
1707                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1708         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
1709                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1710         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
1711                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
1712
1713         if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
1714             phy_type & (1 << I40E_PHY_TYPE_XFI) ||
1715             phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
1716                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
1717
1718         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
1719                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1720         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
1721                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1722         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
1723                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1724
1725         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
1726             phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
1727             phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
1728             phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
1729             phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
1730                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
1731         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
1732                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1733         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
1734                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
1735
1736         if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
1737                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1738
1739         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
1740             || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
1741                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
1742         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
1743                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
1744         if (phy_type & (1 << I40E_PHY_TYPE_SFI))
1745                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
1746         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
1747                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1748         if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
1749                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1750
1751         if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
1752                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
1753
1754         if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
1755                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
1756         if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
1757                 ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
1758 }
1759
1760 /*********************************************************************
1761  *
1762  *  Setup networking device structure and register an interface.
1763  *
1764  **********************************************************************/
1765 int
1766 ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
1767 {
1768         struct ifnet            *ifp;
1769         struct i40e_hw          *hw = vsi->hw;
1770         struct ixl_queue        *que = vsi->queues;
1771         struct i40e_aq_get_phy_abilities_resp abilities;
1772         enum i40e_status_code aq_error = 0;
1773
1774         INIT_DEBUGOUT("ixl_setup_interface: begin");
1775
1776         ifp = vsi->ifp = if_alloc(IFT_ETHER);
1777         if (ifp == NULL) {
1778                 device_printf(dev, "can not allocate ifnet structure\n");
1779                 return (-1);
1780         }
1781         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1782         ifp->if_mtu = ETHERMTU;
1783         ifp->if_baudrate = IF_Gbps(40);
1784         ifp->if_init = ixl_init;
1785         ifp->if_softc = vsi;
1786         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1787         ifp->if_ioctl = ixl_ioctl;
1788
1789 #if __FreeBSD_version >= 1100036
1790         if_setgetcounterfn(ifp, ixl_get_counter);
1791 #endif
1792
1793         ifp->if_transmit = ixl_mq_start;
1794
1795         ifp->if_qflush = ixl_qflush;
1796
1797         ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1798
1799         vsi->max_frame_size =
1800             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1801             + ETHER_VLAN_ENCAP_LEN;
1802
1803         /* Set TSO limits */
1804         ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1805         ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1806         ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1807
1808         /*
1809          * Tell the upper layer(s) we support long frames.
1810          */
1811         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1812
1813         ifp->if_capabilities |= IFCAP_HWCSUM;
1814         ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1815         ifp->if_capabilities |= IFCAP_TSO;
1816         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1817         ifp->if_capabilities |= IFCAP_LRO;
1818
1819         /* VLAN capabilties */
1820         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1821                              |  IFCAP_VLAN_HWTSO
1822                              |  IFCAP_VLAN_MTU
1823                              |  IFCAP_VLAN_HWCSUM;
1824         ifp->if_capenable = ifp->if_capabilities;
1825
1826         /*
1827         ** Don't turn this on by default, if vlans are
1828         ** created on another pseudo device (eg. lagg)
1829         ** then vlan events are not passed thru, breaking
1830         ** operation, but with HW FILTER off it works. If
1831         ** using vlans directly on the ixl driver you can
1832         ** enable this and get full hardware tag filtering.
1833         */
1834         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1835
1836         /*
1837          * Specify the media types supported by this adapter and register
1838          * callbacks to update media and link information
1839          */
1840         ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
1841                      ixl_media_status);
1842
1843         aq_error = i40e_aq_get_phy_capabilities(hw,
1844             FALSE, TRUE, &abilities, NULL);
1845         /* May need delay to detect fiber correctly */
1846         if (aq_error == I40E_ERR_UNKNOWN_PHY) {
1847                 i40e_msec_delay(200);
1848                 aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
1849                     TRUE, &abilities, NULL);
1850         }
1851         if (aq_error) {
1852                 if (aq_error == I40E_ERR_UNKNOWN_PHY)
1853                         device_printf(dev, "Unknown PHY type detected!\n");
1854                 else
1855                         device_printf(dev,
1856                             "Error getting supported media types, err %d,"
1857                             " AQ error %d\n", aq_error, hw->aq.asq_last_status);
1858                 return (0);
1859         }
1860
1861         ixl_add_ifmedia(vsi, abilities.phy_type);
1862
1863         /* Use autoselect media by default */
1864         ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1865         ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
1866
1867         ether_ifattach(ifp, hw->mac.addr);
1868
1869         return (0);
1870 }
1871
1872 /*
1873 ** Run when the Admin Queue gets a link state change interrupt.
1874 */
1875 void
1876 ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1877 {
1878         struct i40e_hw  *hw = &pf->hw; 
1879         device_t dev = pf->dev;
1880         struct i40e_aqc_get_link_status *status =
1881             (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
1882
1883         /* Request link status from adapter */
1884         hw->phy.get_link_info = TRUE;
1885         i40e_get_link_status(hw, &pf->link_up);
1886
1887         /* Print out message if an unqualified module is found */
1888         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
1889             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
1890             (!(status->link_info & I40E_AQ_LINK_UP)))
1891                 device_printf(dev, "Link failed because "
1892                     "an unqualified module was detected!\n");
1893
1894         /* Update OS link info */
1895         ixl_update_link_status(pf);
1896 }
1897
1898 /*********************************************************************
1899  *
1900  *  Get Firmware Switch configuration
1901  *      - this will need to be more robust when more complex
1902  *        switch configurations are enabled.
1903  *
1904  **********************************************************************/
1905 int
1906 ixl_switch_config(struct ixl_pf *pf)
1907 {
1908         struct i40e_hw  *hw = &pf->hw; 
1909         struct ixl_vsi  *vsi = &pf->vsi;
1910         device_t        dev = vsi->dev;
1911         struct i40e_aqc_get_switch_config_resp *sw_config;
1912         u8      aq_buf[I40E_AQ_LARGE_BUF];
1913         int     ret;
1914         u16     next = 0;
1915
1916         memset(&aq_buf, 0, sizeof(aq_buf));
1917         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
1918         ret = i40e_aq_get_switch_config(hw, sw_config,
1919             sizeof(aq_buf), &next, NULL);
1920         if (ret) {
1921                 device_printf(dev, "aq_get_switch_config() failed, error %d,"
1922                     " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
1923                 return (ret);
1924         }
1925         if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
1926                 device_printf(dev,
1927                     "Switch config: header reported: %d in structure, %d total\n",
1928                     sw_config->header.num_reported, sw_config->header.num_total);
1929                 for (int i = 0; i < sw_config->header.num_reported; i++) {
1930                         device_printf(dev,
1931                             "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
1932                             sw_config->element[i].element_type,
1933                             sw_config->element[i].seid,
1934                             sw_config->element[i].uplink_seid,
1935                             sw_config->element[i].downlink_seid);
1936                 }
1937         }
1938         /* Simplified due to a single VSI */
1939         vsi->uplink_seid = sw_config->element[0].uplink_seid;
1940         vsi->downlink_seid = sw_config->element[0].downlink_seid;
1941         vsi->seid = sw_config->element[0].seid;
1942         return (ret);
1943 }
1944
1945 /*********************************************************************
1946  *
1947  *  Initialize the VSI:  this handles contexts, which means things
1948  *                       like the number of descriptors, buffer size,
1949  *                       plus we init the rings thru this function.
1950  *
1951  **********************************************************************/
1952 int
1953 ixl_initialize_vsi(struct ixl_vsi *vsi)
1954 {
1955         struct ixl_pf           *pf = vsi->back;
1956         struct ixl_queue        *que = vsi->queues;
1957         device_t                dev = vsi->dev;
1958         struct i40e_hw          *hw = vsi->hw;
1959         struct i40e_vsi_context ctxt;
1960         int                     tc_queues;
1961         int                     err = 0;
1962
1963         memset(&ctxt, 0, sizeof(ctxt));
1964         ctxt.seid = vsi->seid;
1965         if (pf->veb_seid != 0)
1966                 ctxt.uplink_seid = pf->veb_seid;
1967         ctxt.pf_num = hw->pf_id;
1968         err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
1969         if (err) {
1970                 device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
1971                     " aq_error %d\n", err, hw->aq.asq_last_status);
1972                 return (err);
1973         }
1974         ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
1975             "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
1976             "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
1977             "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
1978             ctxt.uplink_seid, ctxt.vsi_number,
1979             ctxt.vsis_allocated, ctxt.vsis_unallocated,
1980             ctxt.flags, ctxt.pf_num, ctxt.vf_num,
1981             ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
1982         /*
1983         ** Set the queue and traffic class bits
1984         **  - when multiple traffic classes are supported
1985         **    this will need to be more robust.
1986         */
1987         ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1988         ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
1989         /* In contig mode, que_mapping[0] is first queue index used by this VSI */
1990         ctxt.info.queue_mapping[0] = 0;
1991         /*
1992          * This VSI will only use traffic class 0; start traffic class 0's
1993          * queue allocation at queue 0, and assign it 2^tc_queues queues (though
1994          * the driver may not use all of them).
1995          */
1996         tc_queues = bsrl(pf->qtag.num_allocated);
1997         ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
1998             & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
1999             ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
2000             & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
2001
2002         /* Set VLAN receive stripping mode */
2003         ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2004         ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2005         if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2006                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2007         else
2008                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2009
2010         /* Save VSI number and info for use later */
2011         vsi->vsi_num = ctxt.vsi_number;
2012         bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2013
2014         /* Reset VSI statistics */
2015         ixl_vsi_reset_stats(vsi);
2016         vsi->hw_filters_add = 0;
2017         vsi->hw_filters_del = 0;
2018
2019         ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2020
2021         err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2022         if (err) {
2023                 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
2024                     " aq_error %d\n", err, hw->aq.asq_last_status);
2025                 return (err);
2026         }
2027
2028         for (int i = 0; i < vsi->num_queues; i++, que++) {
2029                 struct tx_ring          *txr = &que->txr;
2030                 struct rx_ring          *rxr = &que->rxr;
2031                 struct i40e_hmc_obj_txq tctx;
2032                 struct i40e_hmc_obj_rxq rctx;
2033                 u32                     txctl;
2034                 u16                     size;
2035
2036                 /* Setup the HMC TX Context  */
2037                 size = que->num_desc * sizeof(struct i40e_tx_desc);
2038                 memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2039                 tctx.new_context = 1;
2040                 tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2041                 tctx.qlen = que->num_desc;
2042                 tctx.fc_ena = 0;
2043                 tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2044                 /* Enable HEAD writeback */
2045                 tctx.head_wb_ena = 1;
2046                 tctx.head_wb_addr = txr->dma.pa +
2047                     (que->num_desc * sizeof(struct i40e_tx_desc));
2048                 tctx.rdylist_act = 0;
2049                 err = i40e_clear_lan_tx_queue_context(hw, i);
2050                 if (err) {
2051                         device_printf(dev, "Unable to clear TX context\n");
2052                         break;
2053                 }
2054                 err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2055                 if (err) {
2056                         device_printf(dev, "Unable to set TX context\n");
2057                         break;
2058                 }
2059                 /* Associate the ring with this PF */
2060                 txctl = I40E_QTX_CTL_PF_QUEUE;
2061                 txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2062                     I40E_QTX_CTL_PF_INDX_MASK);
2063                 wr32(hw, I40E_QTX_CTL(i), txctl);
2064                 ixl_flush(hw);
2065
2066                 /* Do ring (re)init */
2067                 ixl_init_tx_ring(que);
2068
2069                 /* Next setup the HMC RX Context  */
2070                 if (vsi->max_frame_size <= MCLBYTES)
2071                         rxr->mbuf_sz = MCLBYTES;
2072                 else
2073                         rxr->mbuf_sz = MJUMPAGESIZE;
2074
2075                 u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2076
2077                 /* Set up an RX context for the HMC */
2078                 memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2079                 rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2080                 /* ignore header split for now */
2081                 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2082                 rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2083                     vsi->max_frame_size : max_rxmax;
2084                 rctx.dtype = 0;
2085                 rctx.dsize = 1; /* do 32byte descriptors */
2086                 rctx.hsplit_0 = 0;  /* no HDR split initially */
2087                 rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2088                 rctx.qlen = que->num_desc;
2089                 rctx.tphrdesc_ena = 1;
2090                 rctx.tphwdesc_ena = 1;
2091                 rctx.tphdata_ena = 0;
2092                 rctx.tphhead_ena = 0;
2093                 rctx.lrxqthresh = 2;
2094                 rctx.crcstrip = 1;
2095                 rctx.l2tsel = 1;
2096                 rctx.showiv = 1;
2097                 rctx.fc_ena = 0;
2098                 rctx.prefena = 1;
2099
2100                 err = i40e_clear_lan_rx_queue_context(hw, i);
2101                 if (err) {
2102                         device_printf(dev,
2103                             "Unable to clear RX context %d\n", i);
2104                         break;
2105                 }
2106                 err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2107                 if (err) {
2108                         device_printf(dev, "Unable to set RX context %d\n", i);
2109                         break;
2110                 }
2111                 err = ixl_init_rx_ring(que);
2112                 if (err) {
2113                         device_printf(dev, "Fail in init_rx_ring %d\n", i);
2114                         break;
2115                 }
2116 #ifdef DEV_NETMAP
2117                 /* preserve queue */
2118                 if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2119                         struct netmap_adapter *na = NA(vsi->ifp);
2120                         struct netmap_kring *kring = &na->rx_rings[i];
2121                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2122                         wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2123                 } else
2124 #endif /* DEV_NETMAP */
2125                 wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2126         }
2127         return (err);
2128 }
2129
2130
2131 /*********************************************************************
2132  *
2133  *  Free all VSI structs.
2134  *
2135  **********************************************************************/
2136 void
2137 ixl_free_vsi(struct ixl_vsi *vsi)
2138 {
2139         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
2140         struct ixl_queue        *que = vsi->queues;
2141
2142         /* Free station queues */
2143         if (!vsi->queues)
2144                 goto free_filters;
2145
2146         for (int i = 0; i < vsi->num_queues; i++, que++) {
2147                 struct tx_ring *txr = &que->txr;
2148                 struct rx_ring *rxr = &que->rxr;
2149         
2150                 if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2151                         continue;
2152                 IXL_TX_LOCK(txr);
2153                 ixl_free_que_tx(que);
2154                 if (txr->base)
2155                         i40e_free_dma_mem(&pf->hw, &txr->dma);
2156                 IXL_TX_UNLOCK(txr);
2157                 IXL_TX_LOCK_DESTROY(txr);
2158
2159                 if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2160                         continue;
2161                 IXL_RX_LOCK(rxr);
2162                 ixl_free_que_rx(que);
2163                 if (rxr->base)
2164                         i40e_free_dma_mem(&pf->hw, &rxr->dma);
2165                 IXL_RX_UNLOCK(rxr);
2166                 IXL_RX_LOCK_DESTROY(rxr);
2167         }
2168         free(vsi->queues, M_DEVBUF);
2169
2170 free_filters:
2171         /* Free VSI filter list */
2172         ixl_free_mac_filters(vsi);
2173 }
2174
2175 void
2176 ixl_free_mac_filters(struct ixl_vsi *vsi)
2177 {
2178         struct ixl_mac_filter *f;
2179
2180         while (!SLIST_EMPTY(&vsi->ftl)) {
2181                 f = SLIST_FIRST(&vsi->ftl);
2182                 SLIST_REMOVE_HEAD(&vsi->ftl, next);
2183                 free(f, M_DEVBUF);
2184         }
2185 }
2186
2187 /*
2188  * Fill out fields in queue struct and setup tx/rx memory and structs
2189  */
2190 static int
2191 ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
2192 {
2193         device_t dev = pf->dev;
2194         struct i40e_hw *hw = &pf->hw;
2195         struct ixl_vsi *vsi = &pf->vsi;
2196         struct tx_ring *txr = &que->txr;
2197         struct rx_ring *rxr = &que->rxr;
2198         int error = 0;
2199         int rsize, tsize;
2200
2201         /* ERJ: A lot of references to external objects... */
2202         que->num_desc = pf->ringsz;
2203         que->me = index;
2204         que->vsi = vsi;
2205
2206         txr->que = que;
2207         txr->tail = I40E_QTX_TAIL(que->me);
2208
2209         /* Initialize the TX lock */
2210         snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2211             device_get_nameunit(dev), que->me);
2212         mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2213         /* Create the TX descriptor ring */
2214         tsize = roundup2((que->num_desc *
2215             sizeof(struct i40e_tx_desc)) +
2216             sizeof(u32), DBA_ALIGN);
2217         if (i40e_allocate_dma_mem(hw,
2218             &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2219                 device_printf(dev,
2220                     "Unable to allocate TX Descriptor memory\n");
2221                 error = ENOMEM;
2222                 goto fail;
2223         }
2224         txr->base = (struct i40e_tx_desc *)txr->dma.va;
2225         bzero((void *)txr->base, tsize);
2226         /* Now allocate transmit soft structs for the ring */
2227         if (ixl_allocate_tx_data(que)) {
2228                 device_printf(dev,
2229                     "Critical Failure setting up TX structures\n");
2230                 error = ENOMEM;
2231                 goto fail;
2232         }
2233         /* Allocate a buf ring */
2234         txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
2235             M_NOWAIT, &txr->mtx);
2236         if (txr->br == NULL) {
2237                 device_printf(dev,
2238                     "Critical Failure setting up TX buf ring\n");
2239                 error = ENOMEM;
2240                 goto fail;
2241         }
2242
2243         rsize = roundup2(que->num_desc *
2244             sizeof(union i40e_rx_desc), DBA_ALIGN);
2245         rxr->que = que;
2246         rxr->tail = I40E_QRX_TAIL(que->me);
2247
2248         /* Initialize the RX side lock */
2249         snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2250             device_get_nameunit(dev), que->me);
2251         mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2252
2253         if (i40e_allocate_dma_mem(hw,
2254             &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2255                 device_printf(dev,
2256                     "Unable to allocate RX Descriptor memory\n");
2257                 error = ENOMEM;
2258                 goto fail;
2259         }
2260         rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2261         bzero((void *)rxr->base, rsize);
2262         /* Allocate receive soft structs for the ring*/
2263         if (ixl_allocate_rx_data(que)) {
2264                 device_printf(dev,
2265                     "Critical Failure setting up receive structs\n");
2266                 error = ENOMEM;
2267                 goto fail;
2268         }
2269
2270         return (0);
2271 fail:
2272         if (rxr->base)
2273                 i40e_free_dma_mem(&pf->hw, &rxr->dma);
2274         if (mtx_initialized(&rxr->mtx))
2275                 mtx_destroy(&rxr->mtx);
2276         if (txr->br) {
2277                 buf_ring_free(txr->br, M_DEVBUF);
2278                 txr->br = NULL;
2279         }
2280         if (txr->base)
2281                 i40e_free_dma_mem(&pf->hw, &txr->dma);
2282         if (mtx_initialized(&txr->mtx))
2283                 mtx_destroy(&txr->mtx);
2284
2285         return (error);
2286 }
2287
2288 /*********************************************************************
2289  *
2290  *  Allocate memory for the VSI (virtual station interface) and their
2291  *  associated queues, rings and the descriptors associated with each,
2292  *  called only once at attach.
2293  *
2294  **********************************************************************/
2295 int
2296 ixl_setup_stations(struct ixl_pf *pf)
2297 {
2298         device_t                dev = pf->dev;
2299         struct ixl_vsi          *vsi;
2300         struct ixl_queue        *que;
2301         int                     error = 0;
2302
2303         vsi = &pf->vsi;
2304         vsi->back = (void *)pf;
2305         vsi->hw = &pf->hw;
2306         vsi->id = 0;
2307         vsi->num_vlans = 0;
2308         vsi->back = pf;
2309
2310         /* Get memory for the station queues */
2311         if (!(vsi->queues =
2312             (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2313             vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2314                 device_printf(dev, "Unable to allocate queue memory\n");
2315                 error = ENOMEM;
2316                 return (error);
2317         }
2318
2319         for (int i = 0; i < vsi->num_queues; i++) {
2320                 que = &vsi->queues[i];
2321                 error = ixl_setup_queue(que, pf, i);
2322                 if (error)
2323                         return (error);
2324         }
2325
2326         return (0);
2327 }
2328
2329 /*
2330 ** Provide a update to the queue RX
2331 ** interrupt moderation value.
2332 */
2333 void
2334 ixl_set_queue_rx_itr(struct ixl_queue *que)
2335 {
2336         struct ixl_vsi  *vsi = que->vsi;
2337         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2338         struct i40e_hw  *hw = vsi->hw;
2339         struct rx_ring  *rxr = &que->rxr;
2340         u16             rx_itr;
2341         u16             rx_latency = 0;
2342         int             rx_bytes;
2343
2344         /* Idle, do nothing */
2345         if (rxr->bytes == 0)
2346                 return;
2347
2348         if (pf->dynamic_rx_itr) {
2349                 rx_bytes = rxr->bytes/rxr->itr;
2350                 rx_itr = rxr->itr;
2351
2352                 /* Adjust latency range */
2353                 switch (rxr->latency) {
2354                 case IXL_LOW_LATENCY:
2355                         if (rx_bytes > 10) {
2356                                 rx_latency = IXL_AVE_LATENCY;
2357                                 rx_itr = IXL_ITR_20K;
2358                         }
2359                         break;
2360                 case IXL_AVE_LATENCY:
2361                         if (rx_bytes > 20) {
2362                                 rx_latency = IXL_BULK_LATENCY;
2363                                 rx_itr = IXL_ITR_8K;
2364                         } else if (rx_bytes <= 10) {
2365                                 rx_latency = IXL_LOW_LATENCY;
2366                                 rx_itr = IXL_ITR_100K;
2367                         }
2368                         break;
2369                 case IXL_BULK_LATENCY:
2370                         if (rx_bytes <= 20) {
2371                                 rx_latency = IXL_AVE_LATENCY;
2372                                 rx_itr = IXL_ITR_20K;
2373                         }
2374                         break;
2375                  }
2376
2377                 rxr->latency = rx_latency;
2378
2379                 if (rx_itr != rxr->itr) {
2380                         /* do an exponential smoothing */
2381                         rx_itr = (10 * rx_itr * rxr->itr) /
2382                             ((9 * rx_itr) + rxr->itr);
2383                         rxr->itr = rx_itr & IXL_MAX_ITR;
2384                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2385                             que->me), rxr->itr);
2386                 }
2387         } else { /* We may have have toggled to non-dynamic */
2388                 if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2389                         vsi->rx_itr_setting = pf->rx_itr;
2390                 /* Update the hardware if needed */
2391                 if (rxr->itr != vsi->rx_itr_setting) {
2392                         rxr->itr = vsi->rx_itr_setting;
2393                         wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2394                             que->me), rxr->itr);
2395                 }
2396         }
2397         rxr->bytes = 0;
2398         rxr->packets = 0;
2399         return;
2400 }
2401
2402
2403 /*
2404 ** Provide a update to the queue TX
2405 ** interrupt moderation value.
2406 */
2407 void
2408 ixl_set_queue_tx_itr(struct ixl_queue *que)
2409 {
2410         struct ixl_vsi  *vsi = que->vsi;
2411         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2412         struct i40e_hw  *hw = vsi->hw;
2413         struct tx_ring  *txr = &que->txr;
2414         u16             tx_itr;
2415         u16             tx_latency = 0;
2416         int             tx_bytes;
2417
2418
2419         /* Idle, do nothing */
2420         if (txr->bytes == 0)
2421                 return;
2422
2423         if (pf->dynamic_tx_itr) {
2424                 tx_bytes = txr->bytes/txr->itr;
2425                 tx_itr = txr->itr;
2426
2427                 switch (txr->latency) {
2428                 case IXL_LOW_LATENCY:
2429                         if (tx_bytes > 10) {
2430                                 tx_latency = IXL_AVE_LATENCY;
2431                                 tx_itr = IXL_ITR_20K;
2432                         }
2433                         break;
2434                 case IXL_AVE_LATENCY:
2435                         if (tx_bytes > 20) {
2436                                 tx_latency = IXL_BULK_LATENCY;
2437                                 tx_itr = IXL_ITR_8K;
2438                         } else if (tx_bytes <= 10) {
2439                                 tx_latency = IXL_LOW_LATENCY;
2440                                 tx_itr = IXL_ITR_100K;
2441                         }
2442                         break;
2443                 case IXL_BULK_LATENCY:
2444                         if (tx_bytes <= 20) {
2445                                 tx_latency = IXL_AVE_LATENCY;
2446                                 tx_itr = IXL_ITR_20K;
2447                         }
2448                         break;
2449                 }
2450
2451                 txr->latency = tx_latency;
2452
2453                 if (tx_itr != txr->itr) {
2454                  /* do an exponential smoothing */
2455                         tx_itr = (10 * tx_itr * txr->itr) /
2456                             ((9 * tx_itr) + txr->itr);
2457                         txr->itr = tx_itr & IXL_MAX_ITR;
2458                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2459                             que->me), txr->itr);
2460                 }
2461
2462         } else { /* We may have have toggled to non-dynamic */
2463                 if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2464                         vsi->tx_itr_setting = pf->tx_itr;
2465                 /* Update the hardware if needed */
2466                 if (txr->itr != vsi->tx_itr_setting) {
2467                         txr->itr = vsi->tx_itr_setting;
2468                         wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2469                             que->me), txr->itr);
2470                 }
2471         }
2472         txr->bytes = 0;
2473         txr->packets = 0;
2474         return;
2475 }
2476
2477 void
2478 ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
2479     struct sysctl_ctx_list *ctx, const char *sysctl_name)
2480 {
2481         struct sysctl_oid *tree;
2482         struct sysctl_oid_list *child;
2483         struct sysctl_oid_list *vsi_list;
2484
2485         tree = device_get_sysctl_tree(pf->dev);
2486         child = SYSCTL_CHILDREN(tree);
2487         vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
2488                                    CTLFLAG_RD, NULL, "VSI Number");
2489         vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
2490
2491         ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
2492 }
2493
2494 #ifdef IXL_DEBUG
2495 /**
2496  * ixl_sysctl_qtx_tail_handler
2497  * Retrieves I40E_QTX_TAIL value from hardware
2498  * for a sysctl.
2499  */
2500 int
2501 ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2502 {
2503         struct ixl_queue *que;
2504         int error;
2505         u32 val;
2506
2507         que = ((struct ixl_queue *)oidp->oid_arg1);
2508         if (!que) return 0;
2509
2510         val = rd32(que->vsi->hw, que->txr.tail);
2511         error = sysctl_handle_int(oidp, &val, 0, req);
2512         if (error || !req->newptr)
2513                 return error;
2514         return (0);
2515 }
2516
2517 /**
2518  * ixl_sysctl_qrx_tail_handler
2519  * Retrieves I40E_QRX_TAIL value from hardware
2520  * for a sysctl.
2521  */
2522 int
2523 ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2524 {
2525         struct ixl_queue *que;
2526         int error;
2527         u32 val;
2528
2529         que = ((struct ixl_queue *)oidp->oid_arg1);
2530         if (!que) return 0;
2531
2532         val = rd32(que->vsi->hw, que->rxr.tail);
2533         error = sysctl_handle_int(oidp, &val, 0, req);
2534         if (error || !req->newptr)
2535                 return error;
2536         return (0);
2537 }
2538 #endif
2539
2540 /*
2541  * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
2542  * Writes to the ITR registers immediately.
2543  */
2544 static int
2545 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
2546 {
2547         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2548         device_t dev = pf->dev;
2549         int error = 0;
2550         int requested_tx_itr;
2551
2552         requested_tx_itr = pf->tx_itr;
2553         error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2554         if ((error) || (req->newptr == NULL))
2555                 return (error);
2556         if (pf->dynamic_tx_itr) {
2557                 device_printf(dev,
2558                     "Cannot set TX itr value while dynamic TX itr is enabled\n");
2559                     return (EINVAL);
2560         }
2561         if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2562                 device_printf(dev,
2563                     "Invalid TX itr value; value must be between 0 and %d\n",
2564                         IXL_MAX_ITR);
2565                 return (EINVAL);
2566         }
2567
2568         pf->tx_itr = requested_tx_itr;
2569         ixl_configure_tx_itr(pf);
2570
2571         return (error);
2572 }
2573
2574 /*
2575  * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
2576  * Writes to the ITR registers immediately.
2577  */
2578 static int
2579 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
2580 {
2581         struct ixl_pf *pf = (struct ixl_pf *)arg1;
2582         device_t dev = pf->dev;
2583         int error = 0;
2584         int requested_rx_itr;
2585
2586         requested_rx_itr = pf->rx_itr;
2587         error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2588         if ((error) || (req->newptr == NULL))
2589                 return (error);
2590         if (pf->dynamic_rx_itr) {
2591                 device_printf(dev,
2592                     "Cannot set RX itr value while dynamic RX itr is enabled\n");
2593                     return (EINVAL);
2594         }
2595         if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2596                 device_printf(dev,
2597                     "Invalid RX itr value; value must be between 0 and %d\n",
2598                         IXL_MAX_ITR);
2599                 return (EINVAL);
2600         }
2601
2602         pf->rx_itr = requested_rx_itr;
2603         ixl_configure_rx_itr(pf);
2604
2605         return (error);
2606 }
2607
2608 void
2609 ixl_add_hw_stats(struct ixl_pf *pf)
2610 {
2611         device_t dev = pf->dev;
2612         struct ixl_vsi *vsi = &pf->vsi;
2613         struct ixl_queue *queues = vsi->queues;
2614         struct i40e_hw_port_stats *pf_stats = &pf->stats;
2615
2616         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2617         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2618         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2619         struct sysctl_oid_list *vsi_list;
2620
2621         struct sysctl_oid *queue_node;
2622         struct sysctl_oid_list *queue_list;
2623
2624         struct tx_ring *txr;
2625         struct rx_ring *rxr;
2626         char queue_namebuf[QUEUE_NAME_LEN];
2627
2628         /* Driver statistics */
2629         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2630                         CTLFLAG_RD, &pf->watchdog_events,
2631                         "Watchdog timeouts");
2632         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2633                         CTLFLAG_RD, &pf->admin_irq,
2634                         "Admin Queue IRQ Handled");
2635
2636         ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
2637         vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
2638
2639         /* Queue statistics */
2640         for (int q = 0; q < vsi->num_queues; q++) {
2641                 snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2642                 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
2643                     OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
2644                 queue_list = SYSCTL_CHILDREN(queue_node);
2645
2646                 txr = &(queues[q].txr);
2647                 rxr = &(queues[q].rxr);
2648
2649                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2650                                 CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2651                                 "m_defrag() failed");
2652                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2653                                 CTLFLAG_RD, &(queues[q].irqs),
2654                                 "irqs on this queue");
2655                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2656                                 CTLFLAG_RD, &(queues[q].tso),
2657                                 "TSO");
2658                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
2659                                 CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
2660                                 "Driver tx dma failure in xmit");
2661                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2662                                 CTLFLAG_RD, &(txr->no_desc),
2663                                 "Queue No Descriptor Available");
2664                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2665                                 CTLFLAG_RD, &(txr->total_packets),
2666                                 "Queue Packets Transmitted");
2667                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2668                                 CTLFLAG_RD, &(txr->tx_bytes),
2669                                 "Queue Bytes Transmitted");
2670                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2671                                 CTLFLAG_RD, &(rxr->rx_packets),
2672                                 "Queue Packets Received");
2673                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2674                                 CTLFLAG_RD, &(rxr->rx_bytes),
2675                                 "Queue Bytes Received");
2676                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
2677                                 CTLFLAG_RD, &(rxr->desc_errs),
2678                                 "Queue Rx Descriptor Errors");
2679                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
2680                                 CTLFLAG_RD, &(rxr->itr), 0,
2681                                 "Queue Rx ITR Interval");
2682                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
2683                                 CTLFLAG_RD, &(txr->itr), 0,
2684                                 "Queue Tx ITR Interval");
2685 #ifdef IXL_DEBUG
2686                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
2687                                 CTLFLAG_RD, &(rxr->not_done),
2688                                 "Queue Rx Descriptors not Done");
2689                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
2690                                 CTLFLAG_RD, &(rxr->next_refresh), 0,
2691                                 "Queue Rx Descriptors not Done");
2692                 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
2693                                 CTLFLAG_RD, &(rxr->next_check), 0,
2694                                 "Queue Rx Descriptors not Done");
2695                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail", 
2696                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2697                                 sizeof(struct ixl_queue),
2698                                 ixl_sysctl_qtx_tail_handler, "IU",
2699                                 "Queue Transmit Descriptor Tail");
2700                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail", 
2701                                 CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2702                                 sizeof(struct ixl_queue),
2703                                 ixl_sysctl_qrx_tail_handler, "IU",
2704                                 "Queue Receive Descriptor Tail");
2705 #endif
2706         }
2707
2708         /* MAC stats */
2709         ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2710 }
2711
2712 void
2713 ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2714         struct sysctl_oid_list *child,
2715         struct i40e_eth_stats *eth_stats)
2716 {
2717         struct ixl_sysctl_info ctls[] =
2718         {
2719                 {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2720                 {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
2721                         "Unicast Packets Received"},
2722                 {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
2723                         "Multicast Packets Received"},
2724                 {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
2725                         "Broadcast Packets Received"},
2726                 {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2727                 {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2728                 {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2729                 {&eth_stats->tx_multicast, "mcast_pkts_txd",
2730                         "Multicast Packets Transmitted"},
2731                 {&eth_stats->tx_broadcast, "bcast_pkts_txd",
2732                         "Broadcast Packets Transmitted"},
2733                 // end
2734                 {0,0,0}
2735         };
2736
2737         struct ixl_sysctl_info *entry = ctls;
2738         while (entry->stat != 0)
2739         {
2740                 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2741                                 CTLFLAG_RD, entry->stat,
2742                                 entry->description);
2743                 entry++;
2744         }
2745 }
2746
2747 void
2748 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2749         struct sysctl_oid_list *child,
2750         struct i40e_hw_port_stats *stats)
2751 {
2752         struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2753                                     CTLFLAG_RD, NULL, "Mac Statistics");
2754         struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
2755
2756         struct i40e_eth_stats *eth_stats = &stats->eth;
2757         ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
2758
2759         struct ixl_sysctl_info ctls[] = 
2760         {
2761                 {&stats->crc_errors, "crc_errors", "CRC Errors"},
2762                 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
2763                 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
2764                 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
2765                 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
2766                 /* Packet Reception Stats */
2767                 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
2768                 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
2769                 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
2770                 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
2771                 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
2772                 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
2773                 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
2774                 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
2775                 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
2776                 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
2777                 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
2778                 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
2779                 /* Packet Transmission Stats */
2780                 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
2781                 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
2782                 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
2783                 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
2784                 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
2785                 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
2786                 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
2787                 /* Flow control */
2788                 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
2789                 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
2790                 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
2791                 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
2792                 /* End */
2793                 {0,0,0}
2794         };
2795
2796         struct ixl_sysctl_info *entry = ctls;
2797         while (entry->stat != 0)
2798         {
2799                 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
2800                                 CTLFLAG_RD, entry->stat,
2801                                 entry->description);
2802                 entry++;
2803         }
2804 }
2805
2806 void
2807 ixl_set_rss_key(struct ixl_pf *pf)
2808 {
2809         struct i40e_hw *hw = &pf->hw;
2810         struct ixl_vsi *vsi = &pf->vsi;
2811         device_t        dev = pf->dev;
2812         enum i40e_status_code status;
2813 #ifdef RSS
2814         u32             rss_seed[IXL_RSS_KEY_SIZE_REG];
2815 #else
2816         u32             rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
2817                             0x183cfd8c, 0xce880440, 0x580cbc3c,
2818                             0x35897377, 0x328b25e1, 0x4fa98922,
2819                             0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
2820                             0x0, 0x0, 0x0};
2821 #endif
2822
2823 #ifdef RSS
2824         /* Fetch the configured RSS key */
2825         rss_getkey((uint8_t *) &rss_seed);
2826 #endif
2827         /* Fill out hash function seed */
2828         if (hw->mac.type == I40E_MAC_X722) {
2829                 struct i40e_aqc_get_set_rss_key_data key_data;
2830                 bcopy(rss_seed, key_data.standard_rss_key, 40);
2831                 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
2832                 if (status)
2833                         device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
2834                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2835         } else {
2836                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2837                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
2838         }
2839 }
2840
2841 /*
2842  * Configure enabled PCTYPES for RSS.
2843  */
2844 void
2845 ixl_set_rss_pctypes(struct ixl_pf *pf)
2846 {
2847         struct i40e_hw *hw = &pf->hw;
2848         u64             set_hena = 0, hena;
2849
2850 #ifdef RSS
2851         u32             rss_hash_config;
2852
2853         rss_hash_config = rss_gethashconfig();
2854         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2855                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2856         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2857                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2858         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2859                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2860         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2861                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2862         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2863                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2864         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2865                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2866         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2867                 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2868 #else
2869         set_hena = IXL_DEFAULT_RSS_HENA;
2870 #endif
2871         hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
2872             ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
2873         hena |= set_hena;
2874         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
2875         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
2876
2877 }
2878
2879 void
2880 ixl_set_rss_hlut(struct ixl_pf *pf)
2881 {
2882         struct i40e_hw  *hw = &pf->hw;
2883         device_t        dev = pf->dev;
2884         struct ixl_vsi *vsi = &pf->vsi;
2885         int             i, que_id;
2886         int             lut_entry_width;
2887         u32             lut = 0;
2888         enum i40e_status_code status;
2889
2890         if (hw->mac.type == I40E_MAC_X722)
2891                 lut_entry_width = 7;
2892         else
2893                 lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
2894
2895         /* Populate the LUT with max no. of queues in round robin fashion */
2896         u8 hlut_buf[512];
2897         for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
2898 #ifdef RSS
2899                 /*
2900                  * Fetch the RSS bucket id for the given indirection entry.
2901                  * Cap it at the number of configured buckets (which is
2902                  * num_queues.)
2903                  */
2904                 que_id = rss_get_indirection_to_bucket(i);
2905                 que_id = que_id % vsi->num_queues;
2906 #else
2907                 que_id = i % vsi->num_queues;
2908 #endif
2909                 lut = (que_id & ((0x1 << lut_entry_width) - 1));
2910                 hlut_buf[i] = lut;
2911         }
2912
2913         if (hw->mac.type == I40E_MAC_X722) {
2914                 status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
2915                 if (status)
2916                         device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
2917                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
2918         } else {
2919                 for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
2920                         wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
2921                 ixl_flush(hw);
2922         }
2923 }
2924
2925 /*
2926 ** Setup the PF's RSS parameters.
2927 */
2928 void
2929 ixl_config_rss(struct ixl_pf *pf)
2930 {
2931         ixl_set_rss_key(pf);
2932         ixl_set_rss_pctypes(pf);
2933         ixl_set_rss_hlut(pf);
2934 }
2935
2936 /*
2937 ** This routine is run via an vlan config EVENT,
2938 ** it enables us to use the HW Filter table since
2939 ** we can get the vlan id. This just creates the
2940 ** entry in the soft version of the VFTA, init will
2941 ** repopulate the real table.
2942 */
2943 void
2944 ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2945 {
2946         struct ixl_vsi  *vsi = ifp->if_softc;
2947         struct i40e_hw  *hw = vsi->hw;
2948         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2949
2950         if (ifp->if_softc !=  arg)   /* Not our event */
2951                 return;
2952
2953         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2954                 return;
2955
2956         IXL_PF_LOCK(pf);
2957         ++vsi->num_vlans;
2958         ixl_add_filter(vsi, hw->mac.addr, vtag);
2959         IXL_PF_UNLOCK(pf);
2960 }
2961
2962 /*
2963 ** This routine is run via an vlan
2964 ** unconfig EVENT, remove our entry
2965 ** in the soft vfta.
2966 */
2967 void
2968 ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2969 {
2970         struct ixl_vsi  *vsi = ifp->if_softc;
2971         struct i40e_hw  *hw = vsi->hw;
2972         struct ixl_pf   *pf = (struct ixl_pf *)vsi->back;
2973
2974         if (ifp->if_softc !=  arg)
2975                 return;
2976
2977         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2978                 return;
2979
2980         IXL_PF_LOCK(pf);
2981         --vsi->num_vlans;
2982         ixl_del_filter(vsi, hw->mac.addr, vtag);
2983         IXL_PF_UNLOCK(pf);
2984 }
2985
2986 /*
2987 ** This routine updates vlan filters, called by init
2988 ** it scans the filter table and then updates the hw
2989 ** after a soft reset.
2990 */
2991 void
2992 ixl_setup_vlan_filters(struct ixl_vsi *vsi)
2993 {
2994         struct ixl_mac_filter   *f;
2995         int                     cnt = 0, flags;
2996
2997         if (vsi->num_vlans == 0)
2998                 return;
2999         /*
3000         ** Scan the filter list for vlan entries,
3001         ** mark them for addition and then call
3002         ** for the AQ update.
3003         */
3004         SLIST_FOREACH(f, &vsi->ftl, next) {
3005                 if (f->flags & IXL_FILTER_VLAN) {
3006                         f->flags |=
3007                             (IXL_FILTER_ADD |
3008                             IXL_FILTER_USED);
3009                         cnt++;
3010                 }
3011         }
3012         if (cnt == 0) {
3013                 printf("setup vlan: no filters found!\n");
3014                 return;
3015         }
3016         flags = IXL_FILTER_VLAN;
3017         flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3018         ixl_add_hw_filters(vsi, flags, cnt);
3019         return;
3020 }
3021
3022 /*
3023 ** Initialize filter list and add filters that the hardware
3024 ** needs to know about.
3025 **
3026 ** Requires VSI's filter list & seid to be set before calling.
3027 */
3028 void
3029 ixl_init_filters(struct ixl_vsi *vsi)
3030 {
3031         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3032
3033         /* Add broadcast address */
3034         ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3035
3036         /*
3037          * Prevent Tx flow control frames from being sent out by
3038          * non-firmware transmitters.
3039          * This affects every VSI in the PF.
3040          */
3041         if (pf->enable_tx_fc_filter)
3042                 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3043 }
3044
3045 /*
3046 ** This routine adds mulicast filters
3047 */
3048 void
3049 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3050 {
3051         struct ixl_mac_filter *f;
3052
3053         /* Does one already exist */
3054         f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3055         if (f != NULL)
3056                 return;
3057
3058         f = ixl_get_filter(vsi);
3059         if (f == NULL) {
3060                 printf("WARNING: no filter available!!\n");
3061                 return;
3062         }
3063         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3064         f->vlan = IXL_VLAN_ANY;
3065         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3066             | IXL_FILTER_MC);
3067
3068         return;
3069 }
3070
3071 void
3072 ixl_reconfigure_filters(struct ixl_vsi *vsi)
3073 {
3074         ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3075 }
3076
3077 /*
3078 ** This routine adds macvlan filters
3079 */
3080 void
3081 ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3082 {
3083         struct ixl_mac_filter   *f, *tmp;
3084         struct ixl_pf           *pf;
3085         device_t                dev;
3086
3087         DEBUGOUT("ixl_add_filter: begin");
3088
3089         pf = vsi->back;
3090         dev = pf->dev;
3091
3092         /* Does one already exist */
3093         f = ixl_find_filter(vsi, macaddr, vlan);
3094         if (f != NULL)
3095                 return;
3096         /*
3097         ** Is this the first vlan being registered, if so we
3098         ** need to remove the ANY filter that indicates we are
3099         ** not in a vlan, and replace that with a 0 filter.
3100         */
3101         if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3102                 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3103                 if (tmp != NULL) {
3104                         ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3105                         ixl_add_filter(vsi, macaddr, 0);
3106                 }
3107         }
3108
3109         f = ixl_get_filter(vsi);
3110         if (f == NULL) {
3111                 device_printf(dev, "WARNING: no filter available!!\n");
3112                 return;
3113         }
3114         bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3115         f->vlan = vlan;
3116         f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3117         if (f->vlan != IXL_VLAN_ANY)
3118                 f->flags |= IXL_FILTER_VLAN;
3119         else
3120                 vsi->num_macs++;
3121
3122         ixl_add_hw_filters(vsi, f->flags, 1);
3123         return;
3124 }
3125
3126 void
3127 ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3128 {
3129         struct ixl_mac_filter *f;
3130
3131         f = ixl_find_filter(vsi, macaddr, vlan);
3132         if (f == NULL)
3133                 return;
3134
3135         f->flags |= IXL_FILTER_DEL;
3136         ixl_del_hw_filters(vsi, 1);
3137         vsi->num_macs--;
3138
3139         /* Check if this is the last vlan removal */
3140         if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3141                 /* Switch back to a non-vlan filter */
3142                 ixl_del_filter(vsi, macaddr, 0);
3143                 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3144         }
3145         return;
3146 }
3147
3148 /*
3149 ** Find the filter with both matching mac addr and vlan id
3150 */
3151 struct ixl_mac_filter *
3152 ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3153 {
3154         struct ixl_mac_filter   *f;
3155         bool                    match = FALSE;
3156
3157         SLIST_FOREACH(f, &vsi->ftl, next) {
3158                 if (!cmp_etheraddr(f->macaddr, macaddr))
3159                         continue;
3160                 if (f->vlan == vlan) {
3161                         match = TRUE;
3162                         break;
3163                 }
3164         }       
3165
3166         if (!match)
3167                 f = NULL;
3168         return (f);
3169 }
3170
3171 /*
3172 ** This routine takes additions to the vsi filter
3173 ** table and creates an Admin Queue call to create
3174 ** the filters in the hardware.
3175 */
3176 void
3177 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3178 {
3179         struct i40e_aqc_add_macvlan_element_data *a, *b;
3180         struct ixl_mac_filter   *f;
3181         struct ixl_pf           *pf;
3182         struct i40e_hw          *hw;
3183         device_t                dev;
3184         int                     err, j = 0;
3185
3186         pf = vsi->back;
3187         dev = pf->dev;
3188         hw = &pf->hw;
3189         IXL_PF_LOCK_ASSERT(pf);
3190
3191         a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3192             M_DEVBUF, M_NOWAIT | M_ZERO);
3193         if (a == NULL) {
3194                 device_printf(dev, "add_hw_filters failed to get memory\n");
3195                 return;
3196         }
3197
3198         /*
3199         ** Scan the filter list, each time we find one
3200         ** we add it to the admin queue array and turn off
3201         ** the add bit.
3202         */
3203         SLIST_FOREACH(f, &vsi->ftl, next) {
3204                 if (f->flags == flags) {
3205                         b = &a[j]; // a pox on fvl long names :)
3206                         bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3207                         if (f->vlan == IXL_VLAN_ANY) {
3208                                 b->vlan_tag = 0;
3209                                 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3210                         } else {
3211                                 b->vlan_tag = f->vlan;
3212                                 b->flags = 0;
3213                         }
3214                         b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3215                         f->flags &= ~IXL_FILTER_ADD;
3216                         j++;
3217                 }
3218                 if (j == cnt)
3219                         break;
3220         }
3221         if (j > 0) {
3222                 err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3223                 if (err) 
3224                         device_printf(dev, "aq_add_macvlan err %d, "
3225                             "aq_error %d\n", err, hw->aq.asq_last_status);
3226                 else
3227                         vsi->hw_filters_add += j;
3228         }
3229         free(a, M_DEVBUF);
3230         return;
3231 }
3232
3233 /*
3234 ** This routine takes removals in the vsi filter
3235 ** table and creates an Admin Queue call to delete
3236 ** the filters in the hardware.
3237 */
3238 void
3239 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3240 {
3241         struct i40e_aqc_remove_macvlan_element_data *d, *e;
3242         struct ixl_pf           *pf;
3243         struct i40e_hw          *hw;
3244         device_t                dev;
3245         struct ixl_mac_filter   *f, *f_temp;
3246         int                     err, j = 0;
3247
3248         DEBUGOUT("ixl_del_hw_filters: begin\n");
3249
3250         pf = vsi->back;
3251         hw = &pf->hw;
3252         dev = pf->dev;
3253
3254         d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3255             M_DEVBUF, M_NOWAIT | M_ZERO);
3256         if (d == NULL) {
3257                 printf("del hw filter failed to get memory\n");
3258                 return;
3259         }
3260
3261         SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3262                 if (f->flags & IXL_FILTER_DEL) {
3263                         e = &d[j]; // a pox on fvl long names :)
3264                         bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3265                         e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3266                         e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3267                         /* delete entry from vsi list */
3268                         SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3269                         free(f, M_DEVBUF);
3270                         j++;
3271                 }
3272                 if (j == cnt)
3273                         break;
3274         }
3275         if (j > 0) {
3276                 err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3277                 if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3278                         int sc = 0;
3279                         for (int i = 0; i < j; i++)
3280                                 sc += (!d[i].error_code);
3281                         vsi->hw_filters_del += sc;
3282                         device_printf(dev,
3283                             "Failed to remove %d/%d filters, aq error %d\n",
3284                             j - sc, j, hw->aq.asq_last_status);
3285                 } else
3286                         vsi->hw_filters_del += j;
3287         }
3288         free(d, M_DEVBUF);
3289
3290         DEBUGOUT("ixl_del_hw_filters: end\n");
3291         return;
3292 }
3293
3294 int
3295 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3296 {
3297         struct i40e_hw  *hw = &pf->hw;
3298         int             error = 0;
3299         u32             reg;
3300         u16             pf_qidx;
3301
3302         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3303
3304         ixl_dbg(pf, IXL_DBG_EN_DIS,
3305             "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
3306             pf_qidx, vsi_qidx);
3307
3308         i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
3309
3310         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3311         reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3312             I40E_QTX_ENA_QENA_STAT_MASK;
3313         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3314         /* Verify the enable took */
3315         for (int j = 0; j < 10; j++) {
3316                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3317                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3318                         break;
3319                 i40e_msec_delay(10);
3320         }
3321         if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3322                 device_printf(pf->dev, "TX queue %d still disabled!\n",
3323                     pf_qidx);
3324                 error = ETIMEDOUT;
3325         }
3326
3327         return (error);
3328 }
3329
3330 int
3331 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3332 {
3333         struct i40e_hw  *hw = &pf->hw;
3334         int             error = 0;
3335         u32             reg;
3336         u16             pf_qidx;
3337
3338         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3339
3340         ixl_dbg(pf, IXL_DBG_EN_DIS,
3341             "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
3342             pf_qidx, vsi_qidx);
3343
3344         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3345         reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3346             I40E_QRX_ENA_QENA_STAT_MASK;
3347         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3348         /* Verify the enable took */
3349         for (int j = 0; j < 10; j++) {
3350                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3351                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3352                         break;
3353                 i40e_msec_delay(10);
3354         }
3355         if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3356                 device_printf(pf->dev, "RX queue %d still disabled!\n",
3357                     pf_qidx);
3358                 error = ETIMEDOUT;
3359         }
3360
3361         return (error);
3362 }
3363
3364 int
3365 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3366 {
3367         int error = 0;
3368
3369         error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
3370         /* Called function already prints error message */
3371         if (error)
3372                 return (error);
3373         error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
3374         return (error);
3375 }
3376
3377 /* For PF VSI only */
3378 int
3379 ixl_enable_rings(struct ixl_vsi *vsi)
3380 {
3381         struct ixl_pf   *pf = vsi->back;
3382         int             error = 0;
3383
3384         for (int i = 0; i < vsi->num_queues; i++) {
3385                 error = ixl_enable_ring(pf, &pf->qtag, i);
3386                 if (error)
3387                         return (error);
3388         }
3389
3390         return (error);
3391 }
3392
3393 int
3394 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3395 {
3396         struct i40e_hw  *hw = &pf->hw;
3397         int             error = 0;
3398         u32             reg;
3399         u16             pf_qidx;
3400
3401         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3402
3403         i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
3404         i40e_usec_delay(500);
3405
3406         reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3407         reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3408         wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
3409         /* Verify the disable took */
3410         for (int j = 0; j < 10; j++) {
3411                 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
3412                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3413                         break;
3414                 i40e_msec_delay(10);
3415         }
3416         if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3417                 device_printf(pf->dev, "TX queue %d still enabled!\n",
3418                     pf_qidx);
3419                 error = ETIMEDOUT;
3420         }
3421
3422         return (error);
3423 }
3424
3425 int
3426 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3427 {
3428         struct i40e_hw  *hw = &pf->hw;
3429         int             error = 0;
3430         u32             reg;
3431         u16             pf_qidx;
3432
3433         pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
3434
3435         reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3436         reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3437         wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
3438         /* Verify the disable took */
3439         for (int j = 0; j < 10; j++) {
3440                 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
3441                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3442                         break;
3443                 i40e_msec_delay(10);
3444         }
3445         if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3446                 device_printf(pf->dev, "RX queue %d still enabled!\n",
3447                     pf_qidx);
3448                 error = ETIMEDOUT;
3449         }
3450
3451         return (error);
3452 }
3453
3454 int
3455 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
3456 {
3457         int error = 0;
3458
3459         error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
3460         /* Called function already prints error message */
3461         if (error)
3462                 return (error);
3463         error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
3464         return (error);
3465 }
3466
3467 /* For PF VSI only */
3468 int
3469 ixl_disable_rings(struct ixl_vsi *vsi)
3470 {
3471         struct ixl_pf   *pf = vsi->back;
3472         int             error = 0;
3473
3474         for (int i = 0; i < vsi->num_queues; i++) {
3475                 error = ixl_disable_ring(pf, &pf->qtag, i);
3476                 if (error)
3477                         return (error);
3478         }
3479
3480         return (error);
3481 }
3482
3483 /**
3484  * ixl_handle_mdd_event
3485  *
3486  * Called from interrupt handler to identify possibly malicious vfs
3487  * (But also detects events from the PF, as well)
3488  **/
3489 void
3490 ixl_handle_mdd_event(struct ixl_pf *pf)
3491 {
3492         struct i40e_hw *hw = &pf->hw;
3493         device_t dev = pf->dev;
3494         bool mdd_detected = false;
3495         bool pf_mdd_detected = false;
3496         u32 reg;
3497
3498         /* find what triggered the MDD event */
3499         reg = rd32(hw, I40E_GL_MDET_TX);
3500         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3501                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3502                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
3503                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3504                                 I40E_GL_MDET_TX_EVENT_SHIFT;
3505                 u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3506                                 I40E_GL_MDET_TX_QUEUE_SHIFT;
3507                 device_printf(dev,
3508                     "Malicious Driver Detection event %d"
3509                     " on TX queue %d, pf number %d\n",
3510                     event, queue, pf_num);
3511                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3512                 mdd_detected = true;
3513         }
3514         reg = rd32(hw, I40E_GL_MDET_RX);
3515         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3516                 u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3517                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
3518                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3519                                 I40E_GL_MDET_RX_EVENT_SHIFT;
3520                 u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3521                                 I40E_GL_MDET_RX_QUEUE_SHIFT;
3522                 device_printf(dev,
3523                     "Malicious Driver Detection event %d"
3524                     " on RX queue %d, pf number %d\n",
3525                     event, queue, pf_num);
3526                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3527                 mdd_detected = true;
3528         }
3529
3530         if (mdd_detected) {
3531                 reg = rd32(hw, I40E_PF_MDET_TX);
3532                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3533                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3534                         device_printf(dev,
3535                             "MDD TX event is for this function!");
3536                         pf_mdd_detected = true;
3537                 }
3538                 reg = rd32(hw, I40E_PF_MDET_RX);
3539                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3540                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3541                         device_printf(dev,
3542                             "MDD RX event is for this function!");
3543                         pf_mdd_detected = true;
3544                 }
3545         }
3546
3547         /* re-enable mdd interrupt cause */
3548         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3549         reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3550         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3551         ixl_flush(hw);
3552 }
3553
3554 void
3555 ixl_enable_intr(struct ixl_vsi *vsi)
3556 {
3557         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
3558         struct i40e_hw          *hw = vsi->hw;
3559         struct ixl_queue        *que = vsi->queues;
3560
3561         if (pf->enable_msix) {
3562                 for (int i = 0; i < vsi->num_queues; i++, que++)
3563                         ixl_enable_queue(hw, que->me);
3564         } else
3565                 ixl_enable_legacy(hw);
3566 }
3567
3568 void
3569 ixl_disable_rings_intr(struct ixl_vsi *vsi)
3570 {
3571         struct i40e_hw          *hw = vsi->hw;
3572         struct ixl_queue        *que = vsi->queues;
3573
3574         for (int i = 0; i < vsi->num_queues; i++, que++)
3575                 ixl_disable_queue(hw, que->me);
3576 }
3577
3578 void
3579 ixl_disable_intr(struct ixl_vsi *vsi)
3580 {
3581         struct ixl_pf           *pf = (struct ixl_pf *)vsi->back;
3582         struct i40e_hw          *hw = vsi->hw;
3583
3584         if (pf->enable_msix)
3585                 ixl_disable_adminq(hw);
3586         else
3587                 ixl_disable_legacy(hw);
3588 }
3589
3590 void
3591 ixl_enable_adminq(struct i40e_hw *hw)
3592 {
3593         u32             reg;
3594
3595         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3596             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3597             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3598         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3599         ixl_flush(hw);
3600 }
3601
3602 void
3603 ixl_disable_adminq(struct i40e_hw *hw)
3604 {
3605         u32             reg;
3606
3607         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3608         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3609         ixl_flush(hw);
3610 }
3611
3612 void
3613 ixl_enable_queue(struct i40e_hw *hw, int id)
3614 {
3615         u32             reg;
3616
3617         reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3618             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3619             (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3620         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3621 }
3622
3623 void
3624 ixl_disable_queue(struct i40e_hw *hw, int id)
3625 {
3626         u32             reg;
3627
3628         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3629         wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3630 }
3631
3632 void
3633 ixl_enable_legacy(struct i40e_hw *hw)
3634 {
3635         u32             reg;
3636         reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3637             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3638             (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3639         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3640 }
3641
3642 void
3643 ixl_disable_legacy(struct i40e_hw *hw)
3644 {
3645         u32             reg;
3646
3647         reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3648         wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3649 }
3650
3651 void
3652 ixl_update_stats_counters(struct ixl_pf *pf)
3653 {
3654         struct i40e_hw  *hw = &pf->hw;
3655         struct ixl_vsi  *vsi = &pf->vsi;
3656         struct ixl_vf   *vf;
3657
3658         struct i40e_hw_port_stats *nsd = &pf->stats;
3659         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3660
3661         /* Update hw stats */
3662         ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3663                            pf->stat_offsets_loaded,
3664                            &osd->crc_errors, &nsd->crc_errors);
3665         ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3666                            pf->stat_offsets_loaded,
3667                            &osd->illegal_bytes, &nsd->illegal_bytes);
3668         ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3669                            I40E_GLPRT_GORCL(hw->port),
3670                            pf->stat_offsets_loaded,
3671                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3672         ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3673                            I40E_GLPRT_GOTCL(hw->port),
3674                            pf->stat_offsets_loaded,
3675                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3676         ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3677                            pf->stat_offsets_loaded,
3678                            &osd->eth.rx_discards,
3679                            &nsd->eth.rx_discards);
3680         ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3681                            I40E_GLPRT_UPRCL(hw->port),
3682                            pf->stat_offsets_loaded,
3683                            &osd->eth.rx_unicast,
3684                            &nsd->eth.rx_unicast);
3685         ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3686                            I40E_GLPRT_UPTCL(hw->port),
3687                            pf->stat_offsets_loaded,
3688                            &osd->eth.tx_unicast,
3689                            &nsd->eth.tx_unicast);
3690         ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3691                            I40E_GLPRT_MPRCL(hw->port),
3692                            pf->stat_offsets_loaded,
3693                            &osd->eth.rx_multicast,
3694                            &nsd->eth.rx_multicast);
3695         ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3696                            I40E_GLPRT_MPTCL(hw->port),
3697                            pf->stat_offsets_loaded,
3698                            &osd->eth.tx_multicast,
3699                            &nsd->eth.tx_multicast);
3700         ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3701                            I40E_GLPRT_BPRCL(hw->port),
3702                            pf->stat_offsets_loaded,
3703                            &osd->eth.rx_broadcast,
3704                            &nsd->eth.rx_broadcast);
3705         ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3706                            I40E_GLPRT_BPTCL(hw->port),
3707                            pf->stat_offsets_loaded,
3708                            &osd->eth.tx_broadcast,
3709                            &nsd->eth.tx_broadcast);
3710
3711         ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3712                            pf->stat_offsets_loaded,
3713                            &osd->tx_dropped_link_down,
3714                            &nsd->tx_dropped_link_down);
3715         ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3716                            pf->stat_offsets_loaded,
3717                            &osd->mac_local_faults,
3718                            &nsd->mac_local_faults);
3719         ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3720                            pf->stat_offsets_loaded,
3721                            &osd->mac_remote_faults,
3722                            &nsd->mac_remote_faults);
3723         ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3724                            pf->stat_offsets_loaded,
3725                            &osd->rx_length_errors,
3726                            &nsd->rx_length_errors);
3727
3728         /* Flow control (LFC) stats */
3729         ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3730                            pf->stat_offsets_loaded,
3731                            &osd->link_xon_rx, &nsd->link_xon_rx);
3732         ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3733                            pf->stat_offsets_loaded,
3734                            &osd->link_xon_tx, &nsd->link_xon_tx);
3735         ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3736                            pf->stat_offsets_loaded,
3737                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
3738         ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3739                            pf->stat_offsets_loaded,
3740                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
3741
3742         /* Packet size stats rx */
3743         ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3744                            I40E_GLPRT_PRC64L(hw->port),
3745                            pf->stat_offsets_loaded,
3746                            &osd->rx_size_64, &nsd->rx_size_64);
3747         ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3748                            I40E_GLPRT_PRC127L(hw->port),
3749                            pf->stat_offsets_loaded,
3750                            &osd->rx_size_127, &nsd->rx_size_127);
3751         ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3752                            I40E_GLPRT_PRC255L(hw->port),
3753                            pf->stat_offsets_loaded,
3754                            &osd->rx_size_255, &nsd->rx_size_255);
3755         ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3756                            I40E_GLPRT_PRC511L(hw->port),
3757                            pf->stat_offsets_loaded,
3758                            &osd->rx_size_511, &nsd->rx_size_511);
3759         ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3760                            I40E_GLPRT_PRC1023L(hw->port),
3761                            pf->stat_offsets_loaded,
3762                            &osd->rx_size_1023, &nsd->rx_size_1023);
3763         ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3764                            I40E_GLPRT_PRC1522L(hw->port),
3765                            pf->stat_offsets_loaded,
3766                            &osd->rx_size_1522, &nsd->rx_size_1522);
3767         ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3768                            I40E_GLPRT_PRC9522L(hw->port),
3769                            pf->stat_offsets_loaded,
3770                            &osd->rx_size_big, &nsd->rx_size_big);
3771
3772         /* Packet size stats tx */
3773         ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3774                            I40E_GLPRT_PTC64L(hw->port),
3775                            pf->stat_offsets_loaded,
3776                            &osd->tx_size_64, &nsd->tx_size_64);
3777         ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3778                            I40E_GLPRT_PTC127L(hw->port),
3779                            pf->stat_offsets_loaded,
3780                            &osd->tx_size_127, &nsd->tx_size_127);
3781         ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3782                            I40E_GLPRT_PTC255L(hw->port),
3783                            pf->stat_offsets_loaded,
3784                            &osd->tx_size_255, &nsd->tx_size_255);
3785         ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3786                            I40E_GLPRT_PTC511L(hw->port),
3787                            pf->stat_offsets_loaded,
3788                            &osd->tx_size_511, &nsd->tx_size_511);
3789         ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3790                            I40E_GLPRT_PTC1023L(hw->port),
3791                            pf->stat_offsets_loaded,
3792                            &osd->tx_size_1023, &nsd->tx_size_1023);
3793         ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3794                            I40E_GLPRT_PTC1522L(hw->port),
3795                            pf->stat_offsets_loaded,
3796                            &osd->tx_size_1522, &nsd->tx_size_1522);
3797         ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3798                            I40E_GLPRT_PTC9522L(hw->port),
3799                            pf->stat_offsets_loaded,
3800                            &osd->tx_size_big, &nsd->tx_size_big);
3801
3802         ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3803                            pf->stat_offsets_loaded,
3804                            &osd->rx_undersize, &nsd->rx_undersize);
3805         ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3806                            pf->stat_offsets_loaded,
3807                            &osd->rx_fragments, &nsd->rx_fragments);
3808         ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3809                            pf->stat_offsets_loaded,
3810                            &osd->rx_oversize, &nsd->rx_oversize);
3811         ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3812                            pf->stat_offsets_loaded,
3813                            &osd->rx_jabber, &nsd->rx_jabber);
3814         pf->stat_offsets_loaded = true;
3815         /* End hw stats */
3816
3817         /* Update vsi stats */
3818         ixl_update_vsi_stats(vsi);
3819
3820         for (int i = 0; i < pf->num_vfs; i++) {
3821                 vf = &pf->vfs[i];
3822                 if (vf->vf_flags & VF_FLAG_ENABLED)
3823                         ixl_update_eth_stats(&pf->vfs[i].vsi);
3824         }
3825 }
3826
3827 int
3828 ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
3829 {
3830         struct i40e_hw *hw = &pf->hw;
3831         struct ixl_vsi *vsi = &pf->vsi;
3832         device_t dev = pf->dev;
3833         bool is_up = false;
3834         int error = 0;
3835
3836         is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
3837
3838         /* Teardown */
3839         if (is_up)
3840                 ixl_stop(pf);
3841         error = i40e_shutdown_lan_hmc(hw);
3842         if (error)
3843                 device_printf(dev,
3844                     "Shutdown LAN HMC failed with code %d\n", error);
3845         ixl_disable_adminq(hw);
3846         ixl_teardown_adminq_msix(pf);
3847         error = i40e_shutdown_adminq(hw);
3848         if (error)
3849                 device_printf(dev,
3850                     "Shutdown Admin queue failed with code %d\n", error);
3851
3852         /* Setup */
3853         error = i40e_init_adminq(hw);
3854         if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
3855                 device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
3856                     error);
3857         }
3858         error = ixl_setup_adminq_msix(pf);
3859         if (error) {
3860                 device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
3861                     error);
3862         }
3863         ixl_configure_intr0_msix(pf);
3864         ixl_enable_adminq(hw);
3865         error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
3866             hw->func_caps.num_rx_qp, 0, 0);
3867         if (error) {
3868                 device_printf(dev, "init_lan_hmc failed: %d\n", error);
3869         }
3870         error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
3871         if (error) {
3872                 device_printf(dev, "configure_lan_hmc failed: %d\n", error);
3873         }
3874         if (is_up)
3875                 ixl_init(pf);
3876
3877         return (0);
3878 }
3879
3880 void
3881 ixl_handle_empr_reset(struct ixl_pf *pf)
3882 {
3883         struct i40e_hw *hw = &pf->hw;
3884         device_t dev = pf->dev;
3885         int count = 0;
3886         u32 reg;
3887
3888         /* Typically finishes within 3-4 seconds */
3889         while (count++ < 100) {
3890                 reg = rd32(hw, I40E_GLGEN_RSTAT)
3891                     & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
3892                 if (reg)
3893                         i40e_msec_delay(100);
3894                 else
3895                         break;
3896         }
3897         ixl_dbg(pf, IXL_DBG_INFO,
3898             "EMPR reset wait count: %d\n", count);
3899
3900         device_printf(dev, "Rebuilding driver state...\n");
3901         ixl_rebuild_hw_structs_after_reset(pf);
3902         device_printf(dev, "Rebuilding driver state done.\n");
3903
3904         atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
3905 }
3906
3907 /*
3908 ** Tasklet handler for MSIX Adminq interrupts
3909 **  - do outside interrupt since it might sleep
3910 */
3911 void
3912 ixl_do_adminq(void *context, int pending)
3913 {
3914         struct ixl_pf                   *pf = context;
3915         struct i40e_hw                  *hw = &pf->hw;
3916         struct i40e_arq_event_info      event;
3917         i40e_status                     ret;
3918         device_t                        dev = pf->dev;
3919         u32                             loop = 0;
3920         u16                             opcode, result;
3921
3922         if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
3923                 /* Flag cleared at end of this function */
3924                 ixl_handle_empr_reset(pf);
3925                 return;
3926         }
3927
3928         /* Admin Queue handling */
3929         event.buf_len = IXL_AQ_BUF_SZ;
3930         event.msg_buf = malloc(event.buf_len,
3931             M_DEVBUF, M_NOWAIT | M_ZERO);
3932         if (!event.msg_buf) {
3933                 device_printf(dev, "%s: Unable to allocate memory for Admin"
3934                     " Queue event!\n", __func__);
3935                 return;
3936         }
3937
3938         IXL_PF_LOCK(pf);
3939         /* clean and process any events */
3940         do {
3941                 ret = i40e_clean_arq_element(hw, &event, &result);
3942                 if (ret)
3943                         break;
3944                 opcode = LE16_TO_CPU(event.desc.opcode);
3945                 ixl_dbg(pf, IXL_DBG_AQ,
3946                     "%s: Admin Queue event: %#06x\n", __func__, opcode);
3947                 switch (opcode) {
3948                 case i40e_aqc_opc_get_link_status:
3949                         ixl_link_event(pf, &event);
3950                         break;
3951                 case i40e_aqc_opc_send_msg_to_pf:
3952 #ifdef PCI_IOV
3953                         ixl_handle_vf_msg(pf, &event);
3954 #endif
3955                         break;
3956                 case i40e_aqc_opc_event_lan_overflow:
3957                 default:
3958                         break;
3959                 }
3960
3961         } while (result && (loop++ < IXL_ADM_LIMIT));
3962
3963         free(event.msg_buf, M_DEVBUF);
3964
3965         /*
3966          * If there are still messages to process, reschedule ourselves.
3967          * Otherwise, re-enable our interrupt.
3968          */
3969         if (result > 0)
3970                 taskqueue_enqueue(pf->tq, &pf->adminq);
3971         else
3972                 ixl_enable_adminq(hw);
3973
3974         IXL_PF_UNLOCK(pf);
3975 }
3976
3977 /**
3978  * Update VSI-specific ethernet statistics counters.
3979  **/
3980 void
3981 ixl_update_eth_stats(struct ixl_vsi *vsi)
3982 {
3983         struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
3984         struct i40e_hw *hw = &pf->hw;
3985         struct i40e_eth_stats *es;
3986         struct i40e_eth_stats *oes;
3987         struct i40e_hw_port_stats *nsd;
3988         u16 stat_idx = vsi->info.stat_counter_idx;
3989
3990         es = &vsi->eth_stats;
3991         oes = &vsi->eth_stats_offsets;
3992         nsd = &pf->stats;
3993
3994         /* Gather up the stats that the hw collects */
3995         ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
3996                            vsi->stat_offsets_loaded,
3997                            &oes->tx_errors, &es->tx_errors);
3998         ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
3999                            vsi->stat_offsets_loaded,
4000                            &oes->rx_discards, &es->rx_discards);
4001
4002         ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4003                            I40E_GLV_GORCL(stat_idx),
4004                            vsi->stat_offsets_loaded,
4005                            &oes->rx_bytes, &es->rx_bytes);
4006         ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4007                            I40E_GLV_UPRCL(stat_idx),
4008                            vsi->stat_offsets_loaded,
4009                            &oes->rx_unicast, &es->rx_unicast);
4010         ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4011                            I40E_GLV_MPRCL(stat_idx),
4012                            vsi->stat_offsets_loaded,
4013                            &oes->rx_multicast, &es->rx_multicast);
4014         ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4015                            I40E_GLV_BPRCL(stat_idx),
4016                            vsi->stat_offsets_loaded,
4017                            &oes->rx_broadcast, &es->rx_broadcast);
4018
4019         ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4020                            I40E_GLV_GOTCL(stat_idx),
4021                            vsi->stat_offsets_loaded,
4022                            &oes->tx_bytes, &es->tx_bytes);
4023         ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4024                            I40E_GLV_UPTCL(stat_idx),
4025                            vsi->stat_offsets_loaded,
4026                            &oes->tx_unicast, &es->tx_unicast);
4027         ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4028                            I40E_GLV_MPTCL(stat_idx),
4029                            vsi->stat_offsets_loaded,
4030                            &oes->tx_multicast, &es->tx_multicast);
4031         ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4032                            I40E_GLV_BPTCL(stat_idx),
4033                            vsi->stat_offsets_loaded,
4034                            &oes->tx_broadcast, &es->tx_broadcast);
4035         vsi->stat_offsets_loaded = true;
4036 }
4037
4038 void
4039 ixl_update_vsi_stats(struct ixl_vsi *vsi)
4040 {
4041         struct ixl_pf           *pf;
4042         struct ifnet            *ifp;
4043         struct i40e_eth_stats   *es;
4044         u64                     tx_discards;
4045
4046         struct i40e_hw_port_stats *nsd;
4047
4048         pf = vsi->back;
4049         ifp = vsi->ifp;
4050         es = &vsi->eth_stats;
4051         nsd = &pf->stats;
4052
4053         ixl_update_eth_stats(vsi);
4054
4055         tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4056         for (int i = 0; i < vsi->num_queues; i++)
4057                 tx_discards += vsi->queues[i].txr.br->br_drops;
4058
4059         /* Update ifnet stats */
4060         IXL_SET_IPACKETS(vsi, es->rx_unicast +
4061                            es->rx_multicast +
4062                            es->rx_broadcast);
4063         IXL_SET_OPACKETS(vsi, es->tx_unicast +
4064                            es->tx_multicast +
4065                            es->tx_broadcast);
4066         IXL_SET_IBYTES(vsi, es->rx_bytes);
4067         IXL_SET_OBYTES(vsi, es->tx_bytes);
4068         IXL_SET_IMCASTS(vsi, es->rx_multicast);
4069         IXL_SET_OMCASTS(vsi, es->tx_multicast);
4070
4071         IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4072             nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4073             nsd->rx_jabber);
4074         IXL_SET_OERRORS(vsi, es->tx_errors);
4075         IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4076         IXL_SET_OQDROPS(vsi, tx_discards);
4077         IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4078         IXL_SET_COLLISIONS(vsi, 0);
4079 }
4080
4081 /**
4082  * Reset all of the stats for the given pf
4083  **/
4084 void
4085 ixl_pf_reset_stats(struct ixl_pf *pf)
4086 {
4087         bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4088         bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4089         pf->stat_offsets_loaded = false;
4090 }
4091
4092 /**
4093  * Resets all stats of the given vsi
4094  **/
4095 void
4096 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4097 {
4098         bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4099         bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4100         vsi->stat_offsets_loaded = false;
4101 }
4102
4103 /**
4104  * Read and update a 48 bit stat from the hw
4105  *
4106  * Since the device stats are not reset at PFReset, they likely will not
4107  * be zeroed when the driver starts.  We'll save the first values read
4108  * and use them as offsets to be subtracted from the raw values in order
4109  * to report stats that count from zero.
4110  **/
4111 void
4112 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4113         bool offset_loaded, u64 *offset, u64 *stat)
4114 {
4115         u64 new_data;
4116
4117 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4118         new_data = rd64(hw, loreg);
4119 #else
4120         /*
4121          * Use two rd32's instead of one rd64; FreeBSD versions before
4122          * 10 don't support 64-bit bus reads/writes.
4123          */
4124         new_data = rd32(hw, loreg);
4125         new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4126 #endif
4127
4128         if (!offset_loaded)
4129                 *offset = new_data;
4130         if (new_data >= *offset)
4131                 *stat = new_data - *offset;
4132         else
4133                 *stat = (new_data + ((u64)1 << 48)) - *offset;
4134         *stat &= 0xFFFFFFFFFFFFULL;
4135 }
4136
4137 /**
4138  * Read and update a 32 bit stat from the hw
4139  **/
4140 void
4141 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4142         bool offset_loaded, u64 *offset, u64 *stat)
4143 {
4144         u32 new_data;
4145
4146         new_data = rd32(hw, reg);
4147         if (!offset_loaded)
4148                 *offset = new_data;
4149         if (new_data >= *offset)
4150                 *stat = (u32)(new_data - *offset);
4151         else
4152                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4153 }
4154
4155 void
4156 ixl_add_device_sysctls(struct ixl_pf *pf)
4157 {
4158         device_t dev = pf->dev;
4159
4160         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4161         struct sysctl_oid_list *ctx_list =
4162             SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4163
4164         struct sysctl_oid *debug_node;
4165         struct sysctl_oid_list *debug_list;
4166
4167         /* Set up sysctls */
4168         SYSCTL_ADD_PROC(ctx, ctx_list,
4169             OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4170             pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4171
4172         SYSCTL_ADD_PROC(ctx, ctx_list,
4173             OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4174             pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4175
4176         SYSCTL_ADD_PROC(ctx, ctx_list,
4177             OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4178             pf, 0, ixl_current_speed, "A", "Current Port Speed");
4179
4180         SYSCTL_ADD_PROC(ctx, ctx_list,
4181             OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4182             pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4183
4184         SYSCTL_ADD_PROC(ctx, ctx_list,
4185             OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
4186             pf, 0, ixl_sysctl_unallocated_queues, "I",
4187             "Queues not allocated to a PF or VF");
4188
4189         SYSCTL_ADD_PROC(ctx, ctx_list,
4190             OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
4191             pf, 0, ixl_sysctl_pf_tx_itr, "I",
4192             "Immediately set TX ITR value for all queues");
4193
4194         SYSCTL_ADD_PROC(ctx, ctx_list,
4195             OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
4196             pf, 0, ixl_sysctl_pf_rx_itr, "I",
4197             "Immediately set RX ITR value for all queues");
4198
4199         SYSCTL_ADD_INT(ctx, ctx_list,
4200             OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4201             &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
4202
4203         SYSCTL_ADD_INT(ctx, ctx_list,
4204             OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4205             &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
4206
4207         /* Add sysctls meant to print debug information, but don't list them
4208          * in "sysctl -a" output. */
4209         debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
4210             OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
4211         debug_list = SYSCTL_CHILDREN(debug_node);
4212
4213         SYSCTL_ADD_UINT(ctx, debug_list,
4214             OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
4215             &pf->hw.debug_mask, 0, "Shared code debug message level");
4216
4217         SYSCTL_ADD_UINT(ctx, debug_list,
4218             OID_AUTO, "core_debug_mask", CTLFLAG_RW,
4219             &pf->dbg_mask, 0, "Non-hared code debug message level");
4220
4221         SYSCTL_ADD_PROC(ctx, debug_list,
4222             OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4223             pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4224
4225         SYSCTL_ADD_PROC(ctx, debug_list,
4226             OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4227             pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4228
4229         SYSCTL_ADD_PROC(ctx, debug_list,
4230             OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4231             pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4232
4233         SYSCTL_ADD_PROC(ctx, debug_list,
4234             OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4235             pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4236
4237         SYSCTL_ADD_PROC(ctx, debug_list,
4238             OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4239             pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4240
4241         SYSCTL_ADD_PROC(ctx, debug_list,
4242             OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
4243             pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
4244
4245         SYSCTL_ADD_PROC(ctx, debug_list,
4246             OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
4247             pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
4248 #ifdef PCI_IOV
4249         SYSCTL_ADD_UINT(ctx, debug_list,
4250             OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4251             0, "PF/VF Virtual Channel debug level");
4252 #endif
4253 }
4254
4255 /*
4256  * Primarily for finding out how many queues can be assigned to VFs,
4257  * at runtime.
4258  */
4259 static int
4260 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
4261 {
4262         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4263         int queues;
4264
4265         IXL_PF_LOCK(pf);
4266         queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
4267         IXL_PF_UNLOCK(pf);
4268
4269         return sysctl_handle_int(oidp, NULL, queues, req);
4270 }
4271
4272 /*
4273 ** Set flow control using sysctl:
4274 **      0 - off
4275 **      1 - rx pause
4276 **      2 - tx pause
4277 **      3 - full
4278 */
4279 int
4280 ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4281 {
4282         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4283         struct i40e_hw *hw = &pf->hw;
4284         device_t dev = pf->dev;
4285         int requested_fc, error = 0;
4286         enum i40e_status_code aq_error = 0;
4287         u8 fc_aq_err = 0;
4288
4289         /* Get request */
4290         requested_fc = pf->fc;
4291         error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4292         if ((error) || (req->newptr == NULL))
4293                 return (error);
4294         if (requested_fc < 0 || requested_fc > 3) {
4295                 device_printf(dev,
4296                     "Invalid fc mode; valid modes are 0 through 3\n");
4297                 return (EINVAL);
4298         }
4299
4300         /* Set fc ability for port */
4301         hw->fc.requested_mode = requested_fc;
4302         aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4303         if (aq_error) {
4304                 device_printf(dev,
4305                     "%s: Error setting new fc mode %d; fc_err %#x\n",
4306                     __func__, aq_error, fc_aq_err);
4307                 return (EIO);
4308         }
4309         pf->fc = requested_fc;
4310
4311         /* Get new link state */
4312         i40e_msec_delay(250);
4313         hw->phy.get_link_info = TRUE;
4314         i40e_get_link_status(hw, &pf->link_up);
4315
4316         return (0);
4317 }
4318
4319 int
4320 ixl_current_speed(SYSCTL_HANDLER_ARGS)
4321 {
4322         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4323         struct i40e_hw *hw = &pf->hw;
4324         int error = 0, index = 0;
4325
4326         char *speeds[] = {
4327                 "Unknown",
4328                 "100M",
4329                 "1G",
4330                 "10G",
4331                 "40G",
4332                 "20G"
4333         };
4334
4335         ixl_update_link_status(pf);
4336
4337         switch (hw->phy.link_info.link_speed) {
4338         case I40E_LINK_SPEED_100MB:
4339                 index = 1;
4340                 break;
4341         case I40E_LINK_SPEED_1GB:
4342                 index = 2;
4343                 break;
4344         case I40E_LINK_SPEED_10GB:
4345                 index = 3;
4346                 break;
4347         case I40E_LINK_SPEED_40GB:
4348                 index = 4;
4349                 break;
4350         case I40E_LINK_SPEED_20GB:
4351                 index = 5;
4352                 break;
4353         case I40E_LINK_SPEED_UNKNOWN:
4354         default:
4355                 index = 0;
4356                 break;
4357         }
4358
4359         error = sysctl_handle_string(oidp, speeds[index],
4360             strlen(speeds[index]), req);
4361         return (error);
4362 }
4363
4364 int
4365 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4366 {
4367         struct i40e_hw *hw = &pf->hw;
4368         device_t dev = pf->dev;
4369         struct i40e_aq_get_phy_abilities_resp abilities;
4370         struct i40e_aq_set_phy_config config;
4371         enum i40e_status_code aq_error = 0;
4372
4373         /* Get current capability information */
4374         aq_error = i40e_aq_get_phy_capabilities(hw,
4375             FALSE, FALSE, &abilities, NULL);
4376         if (aq_error) {
4377                 device_printf(dev,
4378                     "%s: Error getting phy capabilities %d,"
4379                     " aq error: %d\n", __func__, aq_error,
4380                     hw->aq.asq_last_status);
4381                 return (EIO);
4382         }
4383
4384         /* Prepare new config */
4385         bzero(&config, sizeof(config));
4386         config.phy_type = abilities.phy_type;
4387         config.abilities = abilities.abilities
4388             | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4389         config.eee_capability = abilities.eee_capability;
4390         config.eeer = abilities.eeer_val;
4391         config.low_power_ctrl = abilities.d3_lpan;
4392         /* Translate into aq cmd link_speed */
4393         if (speeds & 0x10)
4394                 config.link_speed |= I40E_LINK_SPEED_40GB;
4395         if (speeds & 0x8)
4396                 config.link_speed |= I40E_LINK_SPEED_20GB;
4397         if (speeds & 0x4)
4398                 config.link_speed |= I40E_LINK_SPEED_10GB;
4399         if (speeds & 0x2)
4400                 config.link_speed |= I40E_LINK_SPEED_1GB;
4401         if (speeds & 0x1)
4402                 config.link_speed |= I40E_LINK_SPEED_100MB;
4403
4404         /* Do aq command & restart link */
4405         aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4406         if (aq_error) {
4407                 device_printf(dev,
4408                     "%s: Error setting new phy config %d,"
4409                     " aq error: %d\n", __func__, aq_error,
4410                     hw->aq.asq_last_status);
4411                 return (EAGAIN);
4412         }
4413
4414         /*
4415         ** This seems a bit heavy handed, but we
4416         ** need to get a reinit on some devices
4417         */
4418         IXL_PF_LOCK(pf);
4419         ixl_stop_locked(pf);
4420         ixl_init_locked(pf);
4421         IXL_PF_UNLOCK(pf);
4422
4423         return (0);
4424 }
4425
4426 /*
4427 ** Control link advertise speed:
4428 **      Flags:
4429 **       0x1 - advertise 100 Mb
4430 **       0x2 - advertise 1G
4431 **       0x4 - advertise 10G
4432 **       0x8 - advertise 20G
4433 **      0x10 - advertise 40G
4434 **
4435 **      Set to 0 to disable link
4436 */
4437 int
4438 ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4439 {
4440         struct ixl_pf *pf = (struct ixl_pf *)arg1;
4441         struct i40e_hw *hw = &pf->hw;
4442         device_t dev = pf->dev;
4443         int requested_ls = 0;
4444         int error = 0;
4445
4446         /* Read in new mode */
4447         requested_ls = pf->advertised_speed;
4448         error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4449         if ((error) || (req->newptr == NULL))
4450                 return (error);
4451         /* Check for sane value */
4452         if (requested_ls > 0x10) {
4453                 device_printf(dev, "Invalid advertised speed; "
4454                     "valid modes are 0x1 through 0x10\n");
4455                 return (EINVAL);
4456         }
4457         /* Then check for validity based on adapter type */
4458         switch (hw->device_id) {
4459         case I40E_DEV_ID_1G_BASE_T_X722:
4460                 /* 1G BaseT */
4461                 if (requested_ls & ~(0x2)) {
4462                         device_printf(dev,
4463                                 "Only 1G speeds supported on this device.\n");
4464                         return (EINVAL);
4465                 }
4466                 break;
4467         case I40E_DEV_ID_10G_BASE_T:
4468         case I40E_DEV_ID_10G_BASE_T4:
4469                 /* 10G BaseT */
4470                 if (requested_ls & ~(0x7)) {
4471                         device_printf(dev,
4472                             "Only 100M/1G/10G speeds supported on this device.\n");
4473                         return (EINVAL);
4474                 }
4475                 break;
4476         case I40E_DEV_ID_20G_KR2:
4477         case I40E_DEV_ID_20G_KR2_A:
4478                 /* 20G */
4479                 if (requested_ls & ~(0xE)) {
4480                         device_printf(dev,
4481                             "Only 1G/10G/20G speeds supported on this device.\n");
4482                         return (EINVAL);
4483                 }
4484                 break;
4485         case I40E_DEV_ID_KX_B:
4486         case I40E_DEV_ID_QSFP_A:
4487         case I40E_DEV_ID_QSFP_B:
4488                 /* 40G */
4489                 if (requested_ls & ~(0x10)) {
4490                         device_printf(dev,
4491                             "Only 40G speeds supported on this device.\n");
4492                         return (EINVAL);
4493                 }
4494                 break;
4495         default:
4496                 /* 10G (1G) */
4497                 if (requested_ls & ~(0x6)) {
4498                         device_printf(dev,
4499                             "Only 1/10G speeds supported on this device.\n");
4500                         return (EINVAL);
4501                 }
4502                 break;
4503         }
4504
4505         /* Exit if no change */
4506         if (pf->advertised_speed == requested_ls)
4507                 return (0);
4508
4509         error = ixl_set_advertised_speeds(pf, requested_ls);
4510         if (error)
4511                 return (error);
4512
4513         pf->advertised_speed = requested_ls;
4514         ixl_update_link_status(pf);
4515         return (0);
4516 }
4517
4518 /*
4519 ** Get the width and transaction speed of
4520 ** the bus this adapter is plugged into.
4521 */
4522 void
4523 ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4524 {
4525         u16                     link;
4526         u32                     offset;
4527
4528         /* Some devices don't use PCIE */
4529         if (hw->mac.type == I40E_MAC_X722)
4530                 return;
4531
4532         /* Read PCI Express Capabilities Link Status Register */
4533         pci_find_cap(dev, PCIY_EXPRESS, &offset);
4534         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4535
4536         /* Fill out hw struct with PCIE info */
4537         i40e_set_pci_config_data(hw, link);
4538
4539         /* Use info to print out bandwidth messages */
4540         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4541             ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4542             (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4543             (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4544             (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4545             (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4546             (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4547             ("Unknown"));
4548
4549         if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4550             (hw->bus.speed < i40e_bus_speed_8000)) {
4551                 device_printf(dev, "PCI-Express bandwidth available"
4552                     " for this device may be insufficient for"
4553                     " optimal performance.\n");
4554                 device_printf(dev, "For optimal performance, a x8 "
4555                     "PCIE Gen3 slot is required.\n");
4556         }
4557 }
4558
4559 static int
4560 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4561 {
4562         struct ixl_pf   *pf = (struct ixl_pf *)arg1;
4563         struct i40e_hw  *hw = &pf->hw;
4564         struct sbuf     *sbuf;
4565
4566         sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4567         ixl_nvm_version_str(hw, sbuf);
4568         sbuf_finish(sbuf);
4569         sbuf_delete(sbuf);
4570
4571         return 0;
4572 }
4573
4574 void
4575 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
4576 {
4577         if ((nvma->command == I40E_NVM_READ) &&
4578             ((nvma->config & 0xFF) == 0xF) &&
4579             (((nvma->config & 0xF00) >> 8) == 0xF) &&
4580             (nvma->offset == 0) &&
4581             (nvma->data_size == 1)) {
4582                 // device_printf(dev, "- Get Driver Status Command\n");
4583         }
4584         else if (nvma->command == I40E_NVM_READ) {
4585         
4586         }
4587         else {
4588                 switch (nvma->command) {
4589                 case 0xB:
4590                         device_printf(dev, "- command: I40E_NVM_READ\n");
4591                         break;
4592                 case 0xC:
4593                         device_printf(dev, "- command: I40E_NVM_WRITE\n");
4594                         break;
4595                 default:
4596                         device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
4597                         break;
4598                 }
4599
4600                 device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
4601                 device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
4602                 device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
4603                 device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
4604         }
4605 }
4606
4607 int
4608 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
4609 {
4610         struct i40e_hw *hw = &pf->hw;
4611         struct i40e_nvm_access *nvma;
4612         device_t dev = pf->dev;
4613         enum i40e_status_code status = 0;
4614         int perrno;
4615
4616         DEBUGFUNC("ixl_handle_nvmupd_cmd");
4617
4618         /* Sanity checks */
4619         if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
4620             ifd->ifd_data == NULL) {
4621                 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
4622                     __func__);
4623                 device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
4624                     __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
4625                 device_printf(dev, "%s: data pointer: %p\n", __func__,
4626                     ifd->ifd_data);
4627                 return (EINVAL);
4628         }
4629
4630         nvma = (struct i40e_nvm_access *)ifd->ifd_data;
4631
4632         if (pf->dbg_mask & IXL_DBG_NVMUPD)
4633                 ixl_print_nvm_cmd(dev, nvma);
4634
4635         if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4636                 int count = 0;
4637                 while (count++ < 100) {
4638                         i40e_msec_delay(100);
4639                         if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
4640                                 break;
4641                 }
4642         }
4643
4644         if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
4645                 IXL_PF_LOCK(pf);
4646                 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
4647                 IXL_PF_UNLOCK(pf);
4648         } else {
4649                 perrno = -EBUSY;
4650         }
4651
4652         if (status)
4653                 device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
4654                     status, perrno);
4655
4656         /*
4657          * -EPERM is actually ERESTART, which the kernel interprets as it needing
4658          * to run this ioctl again. So use -EACCES for -EPERM instead.
4659          */
4660         if (perrno == -EPERM)
4661                 return (-EACCES);
4662         else
4663                 return (perrno);
4664 }
4665
4666 /*********************************************************************
4667  *
4668  *  Media Ioctl callback
4669  *
4670  *  This routine is called whenever the user queries the status of
4671  *  the interface using ifconfig.
4672  *
4673  **********************************************************************/
4674 void
4675 ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
4676 {
4677         struct ixl_vsi  *vsi = ifp->if_softc;
4678         struct ixl_pf   *pf = vsi->back;
4679         struct i40e_hw  *hw = &pf->hw;
4680
4681         INIT_DEBUGOUT("ixl_media_status: begin");
4682         IXL_PF_LOCK(pf);
4683
4684         hw->phy.get_link_info = TRUE;
4685         i40e_get_link_status(hw, &pf->link_up);
4686         ixl_update_link_status(pf);
4687
4688         ifmr->ifm_status = IFM_AVALID;
4689         ifmr->ifm_active = IFM_ETHER;
4690
4691         if (!pf->link_up) {
4692                 IXL_PF_UNLOCK(pf);
4693                 return;
4694         }
4695
4696         ifmr->ifm_status |= IFM_ACTIVE;
4697
4698         /* Hardware always does full-duplex */
4699         ifmr->ifm_active |= IFM_FDX;
4700
4701         switch (hw->phy.link_info.phy_type) {
4702                 /* 100 M */
4703                 case I40E_PHY_TYPE_100BASE_TX:
4704                         ifmr->ifm_active |= IFM_100_TX;
4705                         break;
4706                 /* 1 G */
4707                 case I40E_PHY_TYPE_1000BASE_T:
4708                         ifmr->ifm_active |= IFM_1000_T;
4709                         break;
4710                 case I40E_PHY_TYPE_1000BASE_SX:
4711                         ifmr->ifm_active |= IFM_1000_SX;
4712                         break;
4713                 case I40E_PHY_TYPE_1000BASE_LX:
4714                         ifmr->ifm_active |= IFM_1000_LX;
4715                         break;
4716                 case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
4717                         ifmr->ifm_active |= IFM_OTHER;
4718                         break;
4719                 /* 10 G */
4720                 case I40E_PHY_TYPE_10GBASE_SFPP_CU:
4721                         ifmr->ifm_active |= IFM_10G_TWINAX;
4722                         break;
4723                 case I40E_PHY_TYPE_10GBASE_SR:
4724                         ifmr->ifm_active |= IFM_10G_SR;
4725                         break;
4726                 case I40E_PHY_TYPE_10GBASE_LR:
4727                         ifmr->ifm_active |= IFM_10G_LR;
4728                         break;
4729                 case I40E_PHY_TYPE_10GBASE_T:
4730                         ifmr->ifm_active |= IFM_10G_T;
4731                         break;
4732                 case I40E_PHY_TYPE_XAUI:
4733                 case I40E_PHY_TYPE_XFI:
4734                 case I40E_PHY_TYPE_10GBASE_AOC:
4735                         ifmr->ifm_active |= IFM_OTHER;
4736                         break;
4737                 /* 40 G */
4738                 case I40E_PHY_TYPE_40GBASE_CR4:
4739                 case I40E_PHY_TYPE_40GBASE_CR4_CU:
4740                         ifmr->ifm_active |= IFM_40G_CR4;
4741                         break;
4742                 case I40E_PHY_TYPE_40GBASE_SR4:
4743                         ifmr->ifm_active |= IFM_40G_SR4;
4744                         break;
4745                 case I40E_PHY_TYPE_40GBASE_LR4:
4746                         ifmr->ifm_active |= IFM_40G_LR4;
4747                         break;
4748                 case I40E_PHY_TYPE_XLAUI:
4749                         ifmr->ifm_active |= IFM_OTHER;
4750                         break;
4751                 case I40E_PHY_TYPE_1000BASE_KX:
4752                         ifmr->ifm_active |= IFM_1000_KX;
4753                         break;
4754                 case I40E_PHY_TYPE_SGMII:
4755                         ifmr->ifm_active |= IFM_1000_SGMII;
4756                         break;
4757                 /* ERJ: What's the difference between these? */
4758                 case I40E_PHY_TYPE_10GBASE_CR1_CU:
4759                 case I40E_PHY_TYPE_10GBASE_CR1:
4760                         ifmr->ifm_active |= IFM_10G_CR1;
4761                         break;
4762                 case I40E_PHY_TYPE_10GBASE_KX4:
4763                         ifmr->ifm_active |= IFM_10G_KX4;
4764                         break;
4765                 case I40E_PHY_TYPE_10GBASE_KR:
4766                         ifmr->ifm_active |= IFM_10G_KR;
4767                         break;
4768                 case I40E_PHY_TYPE_SFI:
4769                         ifmr->ifm_active |= IFM_10G_SFI;
4770                         break;
4771                 /* Our single 20G media type */
4772                 case I40E_PHY_TYPE_20GBASE_KR2:
4773                         ifmr->ifm_active |= IFM_20G_KR2;
4774                         break;
4775                 case I40E_PHY_TYPE_40GBASE_KR4:
4776                         ifmr->ifm_active |= IFM_40G_KR4;
4777                         break;
4778                 case I40E_PHY_TYPE_XLPPI:
4779                 case I40E_PHY_TYPE_40GBASE_AOC:
4780                         ifmr->ifm_active |= IFM_40G_XLPPI;
4781                         break;
4782                 /* Unknown to driver */
4783                 default:
4784                         ifmr->ifm_active |= IFM_UNKNOWN;
4785                         break;
4786         }
4787         /* Report flow control status as well */
4788         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
4789                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
4790         if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
4791                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
4792
4793         IXL_PF_UNLOCK(pf);
4794 }
4795
4796 void
4797 ixl_init(void *arg)
4798 {
4799         struct ixl_pf *pf = arg;
4800         struct ixl_vsi *vsi = &pf->vsi;
4801         device_t dev = pf->dev;
4802         int error = 0;
4803
4804         /*
4805          * If the aq is dead here, it probably means something outside of the driver
4806          * did something to the adapter, like a PF reset.
4807          * So rebuild the driver's state here if that occurs.
4808          */
4809         if (!i40e_check_asq_alive(&pf->hw)) {
4810                 device_printf(dev, "Admin Queue is down; resetting...\n");
4811                 IXL_PF_LOCK(pf);
4812                 ixl_teardown_hw_structs(pf);
4813                 ixl_reset(pf);
4814                 IXL_PF_UNLOCK(pf);
4815         }
4816
4817         /*
4818          * Set up LAN queue interrupts here.
4819          * Kernel interrupt setup functions cannot be called while holding a lock,
4820          * so this is done outside of init_locked().
4821          */
4822         if (pf->msix > 1) {
4823                 /* Teardown existing interrupts, if they exist */
4824                 ixl_teardown_queue_msix(vsi);
4825                 ixl_free_queue_tqs(vsi);
4826                 /* Then set them up again */
4827                 error = ixl_setup_queue_msix(vsi);
4828                 if (error)
4829                         device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
4830                             error);
4831                 error = ixl_setup_queue_tqs(vsi);
4832                 if (error)
4833                         device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
4834                             error);
4835         } else
4836                 // possibly broken
4837                 error = ixl_assign_vsi_legacy(pf);
4838         if (error) {
4839                 device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
4840                 return;
4841         }
4842
4843         IXL_PF_LOCK(pf);
4844         ixl_init_locked(pf);
4845         IXL_PF_UNLOCK(pf);
4846 }
4847
4848 /*
4849  * NOTE: Fortville does not support forcing media speeds. Instead,
4850  * use the set_advertise sysctl to set the speeds Fortville
4851  * will advertise or be allowed to operate at.
4852  */
4853 int
4854 ixl_media_change(struct ifnet * ifp)
4855 {
4856         struct ixl_vsi *vsi = ifp->if_softc;
4857         struct ifmedia *ifm = &vsi->media;
4858
4859         INIT_DEBUGOUT("ixl_media_change: begin");
4860
4861         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4862                 return (EINVAL);
4863
4864         if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
4865
4866         return (ENODEV);
4867 }
4868
4869 /*********************************************************************
4870  *  Ioctl entry point
4871  *
4872  *  ixl_ioctl is called when the user wants to configure the
4873  *  interface.
4874  *
4875  *  return 0 on success, positive on failure
4876  **********************************************************************/
4877
4878 int
4879 ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
4880 {
4881         struct ixl_vsi  *vsi = ifp->if_softc;
4882         struct ixl_pf   *pf = vsi->back;
4883         struct ifreq    *ifr = (struct ifreq *)data;
4884         struct ifdrv    *ifd = (struct ifdrv *)data;
4885 #if defined(INET) || defined(INET6)
4886         struct ifaddr *ifa = (struct ifaddr *)data;
4887         bool            avoid_reset = FALSE;
4888 #endif
4889         int             error = 0;
4890
4891         switch (command) {
4892
4893         case SIOCSIFADDR:
4894 #ifdef INET
4895                 if (ifa->ifa_addr->sa_family == AF_INET)
4896                         avoid_reset = TRUE;
4897 #endif
4898 #ifdef INET6
4899                 if (ifa->ifa_addr->sa_family == AF_INET6)
4900                         avoid_reset = TRUE;
4901 #endif
4902 #if defined(INET) || defined(INET6)
4903                 /*
4904                 ** Calling init results in link renegotiation,
4905                 ** so we avoid doing it when possible.
4906                 */
4907                 if (avoid_reset) {
4908                         ifp->if_flags |= IFF_UP;
4909                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4910                                 ixl_init(pf);
4911 #ifdef INET
4912                         if (!(ifp->if_flags & IFF_NOARP))
4913                                 arp_ifinit(ifp, ifa);
4914 #endif
4915                 } else
4916                         error = ether_ioctl(ifp, command, data);
4917                 break;
4918 #endif
4919         case SIOCSIFMTU:
4920                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4921                 if (ifr->ifr_mtu > IXL_MAX_FRAME -
4922                    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
4923                         error = EINVAL;
4924                 } else {
4925                         IXL_PF_LOCK(pf);
4926                         ifp->if_mtu = ifr->ifr_mtu;
4927                         vsi->max_frame_size =
4928                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
4929                             + ETHER_VLAN_ENCAP_LEN;
4930                         ixl_init_locked(pf);
4931                         IXL_PF_UNLOCK(pf);
4932                 }
4933                 break;
4934         case SIOCSIFFLAGS:
4935                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4936                 IXL_PF_LOCK(pf);
4937                 if (ifp->if_flags & IFF_UP) {
4938                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4939                                 if ((ifp->if_flags ^ pf->if_flags) &
4940                                     (IFF_PROMISC | IFF_ALLMULTI)) {
4941                                         ixl_set_promisc(vsi);
4942                                 }
4943                         } else {
4944                                 IXL_PF_UNLOCK(pf);
4945                                 ixl_init(pf);
4946                                 IXL_PF_LOCK(pf);
4947                         }
4948                 } else {
4949                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4950                                 IXL_PF_UNLOCK(pf);
4951                                 ixl_stop(pf);
4952                                 IXL_PF_LOCK(pf);
4953                         }
4954                 }
4955                 pf->if_flags = ifp->if_flags;
4956                 IXL_PF_UNLOCK(pf);
4957                 break;
4958         case SIOCSDRVSPEC:
4959         case SIOCGDRVSPEC:
4960                 IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
4961                     "Info)\n");
4962
4963                 /* NVM update command */
4964                 if (ifd->ifd_cmd == I40E_NVM_ACCESS)
4965                         error = ixl_handle_nvmupd_cmd(pf, ifd);
4966                 else
4967                         error = EINVAL;
4968                 break;
4969         case SIOCADDMULTI:
4970                 IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
4971                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4972                         IXL_PF_LOCK(pf);
4973                         ixl_disable_intr(vsi);
4974                         ixl_add_multi(vsi);
4975                         ixl_enable_intr(vsi);
4976                         IXL_PF_UNLOCK(pf);
4977                 }
4978                 break;
4979         case SIOCDELMULTI:
4980                 IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
4981                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4982                         IXL_PF_LOCK(pf);
4983                         ixl_disable_intr(vsi);
4984                         ixl_del_multi(vsi);
4985                         ixl_enable_intr(vsi);
4986                         IXL_PF_UNLOCK(pf);
4987                 }
4988                 break;
4989         case SIOCSIFMEDIA:
4990         case SIOCGIFMEDIA:
4991         case SIOCGIFXMEDIA:
4992                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4993                 error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
4994                 break;
4995         case SIOCSIFCAP:
4996         {
4997                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4998                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4999
5000                 ixl_cap_txcsum_tso(vsi, ifp, mask);
5001
5002                 if (mask & IFCAP_RXCSUM)
5003                         ifp->if_capenable ^= IFCAP_RXCSUM;
5004                 if (mask & IFCAP_RXCSUM_IPV6)
5005                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
5006                 if (mask & IFCAP_LRO)
5007                         ifp->if_capenable ^= IFCAP_LRO;
5008                 if (mask & IFCAP_VLAN_HWTAGGING)
5009                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
5010                 if (mask & IFCAP_VLAN_HWFILTER)
5011                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
5012                 if (mask & IFCAP_VLAN_HWTSO)
5013                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
5014                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
5015                         IXL_PF_LOCK(pf);
5016                         ixl_init_locked(pf);
5017                         IXL_PF_UNLOCK(pf);
5018                 }
5019                 VLAN_CAPABILITIES(ifp);
5020
5021                 break;
5022         }
5023
5024         default:
5025                 IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
5026                 error = ether_ioctl(ifp, command, data);
5027                 break;
5028         }
5029
5030         return (error);
5031 }
5032
5033 static char *
5034 ixl_phy_type_string(u32 bit_pos)
5035 {
5036         static char * phy_types_str[32] = {
5037                 "SGMII",
5038                 "1000BASE-KX",
5039                 "10GBASE-KX4",
5040                 "10GBASE-KR",
5041                 "40GBASE-KR4",
5042                 "XAUI",
5043                 "XFI",
5044                 "SFI",
5045                 "XLAUI",
5046                 "XLPPI",
5047                 "40GBASE-CR4",
5048                 "10GBASE-CR1",
5049                 "Reserved (12)",
5050                 "Reserved (13)",
5051                 "Reserved (14)",
5052                 "Reserved (15)",
5053                 "Reserved (16)",
5054                 "100BASE-TX",
5055                 "1000BASE-T",
5056                 "10GBASE-T",
5057                 "10GBASE-SR",
5058                 "10GBASE-LR",
5059                 "10GBASE-SFP+Cu",
5060                 "10GBASE-CR1",
5061                 "40GBASE-CR4",
5062                 "40GBASE-SR4",
5063                 "40GBASE-LR4",
5064                 "1000BASE-SX",
5065                 "1000BASE-LX",
5066                 "1000BASE-T Optical",
5067                 "20GBASE-KR2",
5068                 "Reserved (31)"
5069         };
5070
5071         if (bit_pos > 31) return "Invalid";
5072         return phy_types_str[bit_pos];
5073 }
5074
5075
5076 static int
5077 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5078 {
5079         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5080         struct i40e_hw *hw = &pf->hw;
5081         device_t dev = pf->dev;
5082         struct i40e_link_status link_status;
5083         enum i40e_status_code status;
5084         struct sbuf *buf;
5085         int error = 0;
5086
5087         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5088         if (!buf) {
5089                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5090                 return (ENOMEM);
5091         }
5092
5093         status = i40e_aq_get_link_info(hw, true, &link_status, NULL);
5094         if (status) {
5095                 device_printf(dev,
5096                     "%s: i40e_aq_get_link_info() status %s, aq error %s\n",
5097                     __func__, i40e_stat_str(hw, status),
5098                     i40e_aq_str(hw, hw->aq.asq_last_status));
5099                 sbuf_delete(buf);
5100                 return (EIO);
5101         }
5102
5103         sbuf_printf(buf, "\n"
5104             "PHY Type : 0x%02x<%s>\n"
5105             "Speed    : 0x%02x\n"
5106             "Link info: 0x%02x\n"
5107             "AN info  : 0x%02x\n"
5108             "Ext info : 0x%02x\n"
5109             "Max Frame: %d\n"
5110             "Pacing   : 0x%02x\n"
5111             "CRC En?  : %s\n",
5112             link_status.phy_type, ixl_phy_type_string(link_status.phy_type),
5113             link_status.link_speed, 
5114             link_status.link_info, link_status.an_info,
5115             link_status.ext_info, link_status.max_frame_size,
5116             link_status.pacing,
5117             (link_status.crc_enable) ? "Yes" : "No");
5118
5119         error = sbuf_finish(buf);
5120         if (error)
5121                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5122
5123         sbuf_delete(buf);
5124         return (error);
5125 }
5126
5127 static int
5128 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5129 {
5130         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5131         struct i40e_hw *hw = &pf->hw;
5132         device_t dev = pf->dev;
5133         enum i40e_status_code status;
5134         struct i40e_aq_get_phy_abilities_resp abilities;
5135         struct sbuf *buf;
5136         int error = 0;
5137
5138         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5139         if (!buf) {
5140                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5141                 return (ENOMEM);
5142         }
5143
5144         status = i40e_aq_get_phy_capabilities(hw,
5145             TRUE, FALSE, &abilities, NULL);
5146         if (status) {
5147                 device_printf(dev,
5148                     "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
5149                     __func__, i40e_stat_str(hw, status),
5150                     i40e_aq_str(hw, hw->aq.asq_last_status));
5151                 sbuf_delete(buf);
5152                 return (EIO);
5153         }
5154
5155         sbuf_printf(buf, "\n"
5156             "PHY Type : %08x",
5157             abilities.phy_type);
5158
5159         if (abilities.phy_type != 0) {
5160                 sbuf_printf(buf, "<");
5161                 for (int i = 0; i < 32; i++)
5162                         if ((1 << i) & abilities.phy_type)
5163                                 sbuf_printf(buf, "%s,", ixl_phy_type_string(i));
5164                 sbuf_printf(buf, ">\n");
5165         }
5166
5167         sbuf_printf(buf,
5168             "Speed    : %02x\n"
5169             "Abilities: %02x\n"
5170             "EEE cap  : %04x\n"
5171             "EEER reg : %08x\n"
5172             "D3 Lpan  : %02x\n"
5173             "ID       : %02x %02x %02x %02x\n"
5174             "ModType  : %02x %02x %02x",
5175             abilities.link_speed, 
5176             abilities.abilities, abilities.eee_capability,
5177             abilities.eeer_val, abilities.d3_lpan,
5178             abilities.phy_id[0], abilities.phy_id[1],
5179             abilities.phy_id[2], abilities.phy_id[3],
5180             abilities.module_type[0], abilities.module_type[1],
5181             abilities.module_type[2]);
5182
5183         error = sbuf_finish(buf);
5184         if (error)
5185                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5186
5187         sbuf_delete(buf);
5188         return (error);
5189 }
5190
5191 static int
5192 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5193 {
5194         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5195         struct ixl_vsi *vsi = &pf->vsi;
5196         struct ixl_mac_filter *f;
5197         char *buf, *buf_i;
5198
5199         int error = 0;
5200         int ftl_len = 0;
5201         int ftl_counter = 0;
5202         int buf_len = 0;
5203         int entry_len = 42;
5204
5205         SLIST_FOREACH(f, &vsi->ftl, next) {
5206                 ftl_len++;
5207         }
5208
5209         if (ftl_len < 1) {
5210                 sysctl_handle_string(oidp, "(none)", 6, req);
5211                 return (0);
5212         }
5213
5214         buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5215         buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5216
5217         sprintf(buf_i++, "\n");
5218         SLIST_FOREACH(f, &vsi->ftl, next) {
5219                 sprintf(buf_i,
5220                     MAC_FORMAT ", vlan %4d, flags %#06x",
5221                     MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5222                 buf_i += entry_len;
5223                 /* don't print '\n' for last entry */
5224                 if (++ftl_counter != ftl_len) {
5225                         sprintf(buf_i, "\n");
5226                         buf_i++;
5227                 }
5228         }
5229
5230         error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5231         if (error)
5232                 printf("sysctl error: %d\n", error);
5233         free(buf, M_DEVBUF);
5234         return error;
5235 }
5236
5237 #define IXL_SW_RES_SIZE 0x14
5238 int
5239 ixl_res_alloc_cmp(const void *a, const void *b)
5240 {
5241         const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5242         one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5243         two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5244
5245         return ((int)one->resource_type - (int)two->resource_type);
5246 }
5247
5248 /*
5249  * Longest string length: 25 
5250  */
5251 char *
5252 ixl_switch_res_type_string(u8 type)
5253 {
5254         char * ixl_switch_res_type_strings[0x14] = {
5255                 "VEB",
5256                 "VSI",
5257                 "Perfect Match MAC address",
5258                 "S-tag",
5259                 "(Reserved)",
5260                 "Multicast hash entry",
5261                 "Unicast hash entry",
5262                 "VLAN",
5263                 "VSI List entry",
5264                 "(Reserved)",
5265                 "VLAN Statistic Pool",
5266                 "Mirror Rule",
5267                 "Queue Set",
5268                 "Inner VLAN Forward filter",
5269                 "(Reserved)",
5270                 "Inner MAC",
5271                 "IP",
5272                 "GRE/VN1 Key",
5273                 "VN2 Key",
5274                 "Tunneling Port"
5275         };
5276
5277         if (type < 0x14)
5278                 return ixl_switch_res_type_strings[type];
5279         else
5280                 return "(Reserved)";
5281 }
5282
5283 static int
5284 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5285 {
5286         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5287         struct i40e_hw *hw = &pf->hw;
5288         device_t dev = pf->dev;
5289         struct sbuf *buf;
5290         enum i40e_status_code status;
5291         int error = 0;
5292
5293         u8 num_entries;
5294         struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5295
5296         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5297         if (!buf) {
5298                 device_printf(dev, "Could not allocate sbuf for output.\n");
5299                 return (ENOMEM);
5300         }
5301
5302         bzero(resp, sizeof(resp));
5303         status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5304                                 resp,
5305                                 IXL_SW_RES_SIZE,
5306                                 NULL);
5307         if (status) {
5308                 device_printf(dev,
5309                     "%s: get_switch_resource_alloc() error %s, aq error %s\n",
5310                     __func__, i40e_stat_str(hw, status),
5311                     i40e_aq_str(hw, hw->aq.asq_last_status));
5312                 sbuf_delete(buf);
5313                 return (error);
5314         }
5315
5316         /* Sort entries by type for display */
5317         qsort(resp, num_entries,
5318             sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5319             &ixl_res_alloc_cmp);
5320
5321         sbuf_cat(buf, "\n");
5322         sbuf_printf(buf, "# of entries: %d\n", num_entries);
5323         sbuf_printf(buf,
5324             "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
5325             "                          | (this)     | (all) | (this) | (all)       \n");
5326         for (int i = 0; i < num_entries; i++) {
5327                 sbuf_printf(buf,
5328                     "%25s | %10d   %5d   %6d   %12d",
5329                     ixl_switch_res_type_string(resp[i].resource_type),
5330                     resp[i].guaranteed,
5331                     resp[i].total,
5332                     resp[i].used,
5333                     resp[i].total_unalloced);
5334                 if (i < num_entries - 1)
5335                         sbuf_cat(buf, "\n");
5336         }
5337
5338         error = sbuf_finish(buf);
5339         if (error)
5340                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5341
5342         sbuf_delete(buf);
5343         return (error);
5344 }
5345
5346 /*
5347 ** Caller must init and delete sbuf; this function will clear and
5348 ** finish it for caller.
5349 **
5350 ** XXX: Cannot use the SEID for this, since there is no longer a 
5351 ** fixed mapping between SEID and element type.
5352 */
5353 char *
5354 ixl_switch_element_string(struct sbuf *s,
5355     struct i40e_aqc_switch_config_element_resp *element)
5356 {
5357         sbuf_clear(s);
5358
5359         switch (element->element_type) {
5360         case I40E_AQ_SW_ELEM_TYPE_MAC:
5361                 sbuf_printf(s, "MAC %3d", element->element_info);
5362                 break;
5363         case I40E_AQ_SW_ELEM_TYPE_PF:
5364                 sbuf_printf(s, "PF  %3d", element->element_info);
5365                 break;
5366         case I40E_AQ_SW_ELEM_TYPE_VF:
5367                 sbuf_printf(s, "VF  %3d", element->element_info);
5368                 break;
5369         case I40E_AQ_SW_ELEM_TYPE_EMP:
5370                 sbuf_cat(s, "EMP");
5371                 break;
5372         case I40E_AQ_SW_ELEM_TYPE_BMC:
5373                 sbuf_cat(s, "BMC");
5374                 break;
5375         case I40E_AQ_SW_ELEM_TYPE_PV:
5376                 sbuf_cat(s, "PV");
5377                 break;
5378         case I40E_AQ_SW_ELEM_TYPE_VEB:
5379                 sbuf_cat(s, "VEB");
5380                 break;
5381         case I40E_AQ_SW_ELEM_TYPE_PA:
5382                 sbuf_cat(s, "PA");
5383                 break;
5384         case I40E_AQ_SW_ELEM_TYPE_VSI:
5385                 sbuf_printf(s, "VSI %3d", element->element_info);
5386                 break;
5387         default:
5388                 sbuf_cat(s, "?");
5389                 break;
5390         }
5391
5392         sbuf_finish(s);
5393         return sbuf_data(s);
5394 }
5395
5396 static int
5397 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5398 {
5399         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5400         struct i40e_hw *hw = &pf->hw;
5401         device_t dev = pf->dev;
5402         struct sbuf *buf;
5403         struct sbuf *nmbuf;
5404         enum i40e_status_code status;
5405         int error = 0;
5406         u16 next = 0;
5407         u8 aq_buf[I40E_AQ_LARGE_BUF];
5408
5409         struct i40e_aqc_get_switch_config_resp *sw_config;
5410         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5411
5412         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5413         if (!buf) {
5414                 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5415                 return (ENOMEM);
5416         }
5417
5418         status = i40e_aq_get_switch_config(hw, sw_config,
5419             sizeof(aq_buf), &next, NULL);
5420         if (status) {
5421                 device_printf(dev,
5422                     "%s: aq_get_switch_config() error %s, aq error %s\n",
5423                     __func__, i40e_stat_str(hw, status),
5424                     i40e_aq_str(hw, hw->aq.asq_last_status));
5425                 sbuf_delete(buf);
5426                 return error;
5427         }
5428         if (next)
5429                 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5430                     __func__, next);
5431
5432         nmbuf = sbuf_new_auto();
5433         if (!nmbuf) {
5434                 device_printf(dev, "Could not allocate sbuf for name output.\n");
5435                 sbuf_delete(buf);
5436                 return (ENOMEM);
5437         }
5438
5439         sbuf_cat(buf, "\n");
5440         /* Assuming <= 255 elements in switch */
5441         sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5442         sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5443         /* Exclude:
5444         ** Revision -- all elements are revision 1 for now
5445         */
5446         sbuf_printf(buf,
5447             "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5448             "                |          |          | (uplink)\n");
5449         for (int i = 0; i < sw_config->header.num_reported; i++) {
5450                 // "%4d (%8s) | %8s   %8s   %#8x",
5451                 sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5452                 sbuf_cat(buf, " ");
5453                 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5454                     &sw_config->element[i]));
5455                 sbuf_cat(buf, " | ");
5456                 sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5457                 sbuf_cat(buf, "   ");
5458                 sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5459                 sbuf_cat(buf, "   ");
5460                 sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5461                 if (i < sw_config->header.num_reported - 1)
5462                         sbuf_cat(buf, "\n");
5463         }
5464         sbuf_delete(nmbuf);
5465
5466         error = sbuf_finish(buf);
5467         if (error)
5468                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5469
5470         sbuf_delete(buf);
5471
5472         return (error);
5473 }
5474
5475 static int
5476 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
5477 {
5478         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5479         struct i40e_hw *hw = &pf->hw;
5480         device_t dev = pf->dev;
5481         struct sbuf *buf;
5482         int error = 0;
5483         enum i40e_status_code status;
5484         u32 reg;
5485
5486         struct i40e_aqc_get_set_rss_key_data key_data;
5487
5488         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5489         if (!buf) {
5490                 device_printf(dev, "Could not allocate sbuf for output.\n");
5491                 return (ENOMEM);
5492         }
5493
5494         sbuf_cat(buf, "\n");
5495         if (hw->mac.type == I40E_MAC_X722) {
5496                 bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
5497                 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
5498                 if (status)
5499                         device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
5500                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5501                 sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
5502         } else {
5503                 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
5504                         reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
5505                         sbuf_printf(buf, "%4D", (u_char *)&reg, "");
5506                 }
5507         }
5508
5509         error = sbuf_finish(buf);
5510         if (error)
5511                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5512         sbuf_delete(buf);
5513
5514         return (error);
5515 }
5516
5517 static int
5518 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
5519 {
5520         struct ixl_pf *pf = (struct ixl_pf *)arg1;
5521         struct i40e_hw *hw = &pf->hw;
5522         device_t dev = pf->dev;
5523         struct sbuf *buf;
5524         int error = 0;
5525         enum i40e_status_code status;
5526         u8 hlut[512];
5527         u32 reg;
5528
5529         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5530         if (!buf) {
5531                 device_printf(dev, "Could not allocate sbuf for output.\n");
5532                 return (ENOMEM);
5533         }
5534
5535         sbuf_cat(buf, "\n");
5536         if (hw->mac.type == I40E_MAC_X722) {
5537                 bzero(hlut, sizeof(hlut));
5538                 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
5539                 if (status)
5540                         device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
5541                             i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
5542                 sbuf_printf(buf, "%512D", (u_char *)hlut, "");
5543         } else {
5544                 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
5545                         reg = rd32(hw, I40E_PFQF_HLUT(i));
5546                         sbuf_printf(buf, "%4D", (u_char *)&reg, "");
5547                 }
5548         }
5549
5550         error = sbuf_finish(buf);
5551         if (error)
5552                 device_printf(dev, "Error finishing sbuf: %d\n", error);
5553         sbuf_delete(buf);
5554
5555         return (error);
5556 }
5557