]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixgbe/if_ixv.c
MFC r368207,368607:
[FreeBSD/stable/10.git] / sys / dev / ixgbe / if_ixv.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixv.h"
42
43 /************************************************************************
44  *  Driver version
45  ************************************************************************/
46 char ixv_driver_version[] = "1.5.9-k";
47
48 /************************************************************************
49  *  PCI Device ID Table
50  *
51  *  Used by probe to select devices to load on
52  *  Last field stores an index into ixv_strings
53  *  Last entry must be all 0s
54  *
55  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56  ************************************************************************/
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
58 {
59         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
64         /* required last entry */
65         {0, 0, 0, 0, 0}
66 };
67
68 /************************************************************************
69  *  Table of branding strings
70  ************************************************************************/
71 static char *ixv_strings[] = {
72         "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 };
74
75 /************************************************************************
76  *  Function prototypes
77  ************************************************************************/
78 static int  ixv_probe(device_t);
79 static int  ixv_attach(device_t);
80 static int  ixv_detach(device_t);
81 static int  ixv_shutdown(device_t);
82 static int  ixv_ioctl(struct ifnet *, u_long, caddr_t);
83 static void ixv_init(void *);
84 static void ixv_stop(void *);
85 static void ixv_init_device_features(struct adapter *);
86 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
87 static int  ixv_media_change(struct ifnet *);
88 static int  ixv_allocate_pci_resources(struct adapter *);
89 static int  ixv_allocate_msix(struct adapter *);
90 static int  ixv_configure_interrupts(struct adapter *);
91 static void ixv_free_pci_resources(struct adapter *);
92 static void ixv_local_timer(void *);
93 static void ixv_setup_interface(device_t, struct adapter *);
94
95 static void ixv_initialize_transmit_units(struct adapter *);
96 static void ixv_initialize_receive_units(struct adapter *);
97 static void ixv_initialize_rss_mapping(struct adapter *);
98 static void ixv_check_link(struct adapter *);
99
100 static void ixv_enable_intr(struct adapter *);
101 static void ixv_disable_intr(struct adapter *);
102 static void ixv_set_multi(struct adapter *);
103 static void ixv_update_link_status(struct adapter *);
104 static int  ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
105 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
106 static void ixv_configure_ivars(struct adapter *);
107 static u8   *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
108
109 static void ixv_setup_vlan_support(struct adapter *);
110 static void ixv_register_vlan(void *, struct ifnet *, u16);
111 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
112
113 static void ixv_save_stats(struct adapter *);
114 static void ixv_init_stats(struct adapter *);
115 static void ixv_update_stats(struct adapter *);
116 static void ixv_add_stats_sysctls(struct adapter *);
117 static void ixv_set_sysctl_value(struct adapter *, const char *,
118                                  const char *, int *, int);
119
120 /* The MSI-X Interrupt handlers */
121 static void ixv_msix_que(void *);
122 static void ixv_msix_mbx(void *);
123
124 /* Deferred interrupt tasklets */
125 static void ixv_handle_que(void *, int);
126 static void ixv_handle_link(void *, int);
127
128 /************************************************************************
129  *  FreeBSD Device Interface Entry Points
130  ************************************************************************/
131 static device_method_t ixv_methods[] = {
132         /* Device interface */
133         DEVMETHOD(device_probe, ixv_probe),
134         DEVMETHOD(device_attach, ixv_attach),
135         DEVMETHOD(device_detach, ixv_detach),
136         DEVMETHOD(device_shutdown, ixv_shutdown),
137         DEVMETHOD_END
138 };
139
140 static driver_t ixv_driver = {
141         "ixv", ixv_methods, sizeof(struct adapter),
142 };
143
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 MODULE_DEPEND(ixv, pci, 1, 1, 1);
147 MODULE_DEPEND(ixv, ether, 1, 1, 1);
148 #if __FreeBSD_version >= 1100000
149 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
150 #endif
151
152 /*
153  * TUNEABLE PARAMETERS:
154  */
155
156 static SYSCTL_NODE(_hw, OID_AUTO, ixv, CTLFLAG_RD, 0, "IXV driver parameters");
157
158 /* Number of Queues - do not exceed MSI-X vectors - 1 */
159 static int ixv_num_queues = 1;
160 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
161 SYSCTL_INT(_hw_ixv, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixv_num_queues, 0,
162     "Number of queues to configure, 0 indicates autoconfigure");
163
164 /*
165  * AIM: Adaptive Interrupt Moderation
166  * which means that the interrupt rate
167  * is varied over time based on the
168  * traffic for that interrupt vector
169  */
170 static int ixv_enable_aim = FALSE;
171 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
172 SYSCTL_INT(_hw_ixv, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixv_enable_aim, 0,
173     "Adaptive Interrupt Moderation");
174
175 /* How many packets rxeof tries to clean at a time */
176 static int ixv_rx_process_limit = 256;
177 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
178 SYSCTL_INT(_hw_ixv, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
179     &ixv_rx_process_limit, 0, "Limit to RX packet processing");
180
181 /* How many packets txeof tries to clean at a time */
182 static int ixv_tx_process_limit = 256;
183 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
184 SYSCTL_INT(_hw_ixv, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
185     &ixv_tx_process_limit, 0, "Limit to TX packet processing");
186
187 /* Flow control setting, default to full */
188 static int ixv_flow_control = ixgbe_fc_full;
189 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
190 SYSCTL_INT(_hw_ixv, OID_AUTO, flow_control, CTLFLAG_RDTUN, &ixv_flow_control, 0,
191     "Flow Control");
192
193 /*
194  * Header split: this causes the hardware to DMA
195  * the header into a separate mbuf from the payload,
196  * it can be a performance win in some workloads, but
197  * in others it actually hurts, its off by default.
198  */
199 static int ixv_header_split = FALSE;
200 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
201 SYSCTL_INT(_hw_ixv, OID_AUTO, header_split, CTLFLAG_RDTUN, &ixv_header_split, 0,
202     "Header Split: DMA header into separate mbuf");
203
204 /*
205  * Number of TX descriptors per ring,
206  * setting higher than RX as this seems
207  * the better performing choice.
208  */
209 static int ixv_txd = DEFAULT_TXD;
210 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
211 SYSCTL_INT(_hw_ixv, OID_AUTO, txd, CTLFLAG_RDTUN, &ixv_txd, 0,
212     "Number of Transmit descriptors");
213
214 /* Number of RX descriptors per ring */
215 static int ixv_rxd = DEFAULT_RXD;
216 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
217 SYSCTL_INT(_hw_ixv, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixv_rxd, 0,
218     "Number of Receive descriptors");
219
220 /* Legacy Transmit (single queue) */
221 static int ixv_enable_legacy_tx = 0;
222 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
223 SYSCTL_INT(_hw_ixv, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
224     &ixv_enable_legacy_tx, 0, "Enable Legacy TX flow");
225
226 /*
227  * Shadow VFTA table, this is needed because
228  * the real filter table gets cleared during
229  * a soft reset and we need to repopulate it.
230  */
231 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
232
233 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
234 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
235
236 MALLOC_DEFINE(M_IXV, "ixv", "ixv driver allocations");
237
238 /************************************************************************
239  * ixv_probe - Device identification routine
240  *
241  *   Determines if the driver should be loaded on
242  *   adapter based on its PCI vendor/device ID.
243  *
244  *   return BUS_PROBE_DEFAULT on success, positive on failure
245  ************************************************************************/
246 static int
247 ixv_probe(device_t dev)
248 {
249         ixgbe_vendor_info_t *ent;
250         u16                 pci_vendor_id = 0;
251         u16                 pci_device_id = 0;
252         u16                 pci_subvendor_id = 0;
253         u16                 pci_subdevice_id = 0;
254         char                adapter_name[256];
255
256
257         pci_vendor_id = pci_get_vendor(dev);
258         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
259                 return (ENXIO);
260
261         pci_device_id = pci_get_device(dev);
262         pci_subvendor_id = pci_get_subvendor(dev);
263         pci_subdevice_id = pci_get_subdevice(dev);
264
265         ent = ixv_vendor_info_array;
266         while (ent->vendor_id != 0) {
267                 if ((pci_vendor_id == ent->vendor_id) &&
268                     (pci_device_id == ent->device_id) &&
269                     ((pci_subvendor_id == ent->subvendor_id) ||
270                      (ent->subvendor_id == 0)) &&
271                     ((pci_subdevice_id == ent->subdevice_id) ||
272                      (ent->subdevice_id == 0))) {
273                         sprintf(adapter_name, "%s, Version - %s",
274                             ixv_strings[ent->index], ixv_driver_version);
275                         device_set_desc_copy(dev, adapter_name);
276                         return (BUS_PROBE_DEFAULT);
277                 }
278                 ent++;
279         }
280
281         return (ENXIO);
282 } /* ixv_probe */
283
284 /************************************************************************
285  * ixv_attach - Device initialization routine
286  *
287  *   Called when the driver is being loaded.
288  *   Identifies the type of hardware, allocates all resources
289  *   and initializes the hardware.
290  *
291  *   return 0 on success, positive on failure
292  ************************************************************************/
293 static int
294 ixv_attach(device_t dev)
295 {
296         struct adapter  *adapter;
297         struct ixgbe_hw *hw;
298         int             error = 0;
299
300         INIT_DEBUGOUT("ixv_attach: begin");
301
302         /*
303          * Make sure BUSMASTER is set, on a VM under
304          * KVM it may not be and will break things.
305          */
306         pci_enable_busmaster(dev);
307
308         /* Allocate, clear, and link in our adapter structure */
309         adapter = device_get_softc(dev);
310         adapter->dev = dev;
311         adapter->hw.back = adapter;
312         hw = &adapter->hw;
313
314         adapter->init_locked = ixv_init_locked;
315         adapter->stop_locked = ixv_stop;
316
317         /* Core Lock Init*/
318         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
319
320         /* Do base PCI setup - map BAR0 */
321         if (ixv_allocate_pci_resources(adapter)) {
322                 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
323                 error = ENXIO;
324                 goto err_out;
325         }
326
327         /* SYSCTL APIs */
328         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
329             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
330             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
331             "Debug Info");
332
333         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
334             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
335             "enable_aim", CTLFLAG_RW, &ixv_enable_aim, 1,
336             "Interrupt Moderation");
337
338         /* Set up the timer callout */
339         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
340
341         /* Save off the information about this board */
342         hw->vendor_id = pci_get_vendor(dev);
343         hw->device_id = pci_get_device(dev);
344         hw->revision_id = pci_get_revid(dev);
345         hw->subsystem_vendor_id = pci_get_subvendor(dev);
346         hw->subsystem_device_id = pci_get_subdevice(dev);
347
348         /* A subset of set_mac_type */
349         switch (hw->device_id) {
350         case IXGBE_DEV_ID_82599_VF:
351                 hw->mac.type = ixgbe_mac_82599_vf;
352                 break;
353         case IXGBE_DEV_ID_X540_VF:
354                 hw->mac.type = ixgbe_mac_X540_vf;
355                 break;
356         case IXGBE_DEV_ID_X550_VF:
357                 hw->mac.type = ixgbe_mac_X550_vf;
358                 break;
359         case IXGBE_DEV_ID_X550EM_X_VF:
360                 hw->mac.type = ixgbe_mac_X550EM_x_vf;
361                 break;
362         case IXGBE_DEV_ID_X550EM_A_VF:
363                 hw->mac.type = ixgbe_mac_X550EM_a_vf;
364                 break;
365         default:
366                 /* Shouldn't get here since probe succeeded */
367                 device_printf(dev, "Unknown device ID!\n");
368                 error = ENXIO;
369                 goto err_out;
370                 break;
371         }
372
373         ixv_init_device_features(adapter);
374
375         /* Initialize the shared code */
376         error = ixgbe_init_ops_vf(hw);
377         if (error) {
378                 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
379                 error = EIO;
380                 goto err_out;
381         }
382
383         /* Setup the mailbox */
384         ixv_init_mbx_params_vf(hw);
385
386         //hw->mac.max_tx_queues = 2;
387         //hw->mac.max_rx_queues = 2;
388
389         /* Set the right number of segments */
390         adapter->num_segs = IXGBE_82599_SCATTER;
391
392         error = hw->mac.ops.reset_hw(hw);
393         if (error == IXGBE_ERR_RESET_FAILED)
394                 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
395         else if (error)
396                 device_printf(dev, "...reset_hw() failed with error %d\n",
397                     error);
398         if (error) {
399                 error = EIO;
400                 goto err_out;
401         }
402
403         error = hw->mac.ops.init_hw(hw);
404         if (error) {
405                 device_printf(dev, "...init_hw() failed with error %d\n",
406                     error);
407                 error = EIO;
408                 goto err_out;
409         }
410
411         /* Negotiate mailbox API version */
412         error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_12);
413         if (error) {
414                 device_printf(dev, "MBX API 1.2 negotiation failed! Error %d\n",
415                     error);
416                 error = EIO;
417                 goto err_out;
418         }
419
420         /* If no mac address was assigned, make a random one */
421         if (!ixv_check_ether_addr(hw->mac.addr)) {
422                 u8 addr[ETHER_ADDR_LEN];
423                 arc4rand(&addr, sizeof(addr), 0);
424                 addr[0] &= 0xFE;
425                 addr[0] |= 0x02;
426                 bcopy(addr, hw->mac.addr, sizeof(addr));
427                 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
428         }
429
430         /* Register for VLAN events */
431         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
432             ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
433         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
434             ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
435
436         /* Sysctls for limiting the amount of work done in the taskqueues */
437         ixv_set_sysctl_value(adapter, "rx_processing_limit",
438             "max number of rx packets to process",
439             &adapter->rx_process_limit, ixv_rx_process_limit);
440
441         ixv_set_sysctl_value(adapter, "tx_processing_limit",
442             "max number of tx packets to process",
443             &adapter->tx_process_limit, ixv_tx_process_limit);
444
445         /* Do descriptor calc and sanity checks */
446         if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
447             ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
448                 device_printf(dev, "TXD config issue, using default!\n");
449                 adapter->num_tx_desc = DEFAULT_TXD;
450         } else
451                 adapter->num_tx_desc = ixv_txd;
452
453         if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
454             ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
455                 device_printf(dev, "RXD config issue, using default!\n");
456                 adapter->num_rx_desc = DEFAULT_RXD;
457         } else
458                 adapter->num_rx_desc = ixv_rxd;
459
460         /* Setup MSI-X */
461         error = ixv_configure_interrupts(adapter);
462         if (error)
463                 goto err_out;
464
465         /* Allocate our TX/RX Queues */
466         if (ixv_allocate_queues(adapter)) {
467                 device_printf(dev, "ixv_allocate_queues() failed!\n");
468                 error = ENOMEM;
469                 goto err_out;
470         }
471
472         /* Setup OS specific network interface */
473         ixv_setup_interface(dev, adapter);
474
475         error = ixv_allocate_msix(adapter);
476         if (error) {
477                 device_printf(dev, "ixv_allocate_msix() failed!\n");
478                 goto err_late;
479         }
480
481         /* Do the stats setup */
482         ixv_save_stats(adapter);
483         ixv_init_stats(adapter);
484         ixv_add_stats_sysctls(adapter);
485
486         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
487                 ixv_netmap_attach(adapter);
488
489         INIT_DEBUGOUT("ixv_attach: end");
490
491         return (0);
492
493 err_late:
494         ixv_free_transmit_structures(adapter);
495         ixv_free_receive_structures(adapter);
496         free(adapter->queues, M_IXV);
497 err_out:
498         ixv_free_pci_resources(adapter);
499         IXGBE_CORE_LOCK_DESTROY(adapter);
500
501         return (error);
502 } /* ixv_attach */
503
504 /************************************************************************
505  * ixv_detach - Device removal routine
506  *
507  *   Called when the driver is being removed.
508  *   Stops the adapter and deallocates all the resources
509  *   that were allocated for driver operation.
510  *
511  *   return 0 on success, positive on failure
512  ************************************************************************/
513 static int
514 ixv_detach(device_t dev)
515 {
516         struct adapter  *adapter = device_get_softc(dev);
517         struct ix_queue *que = adapter->queues;
518
519         INIT_DEBUGOUT("ixv_detach: begin");
520
521         /* Make sure VLANS are not using driver */
522         if (adapter->ifp->if_vlantrunk != NULL) {
523                 device_printf(dev, "Vlan in use, detach first\n");
524                 return (EBUSY);
525         }
526
527         ether_ifdetach(adapter->ifp);
528         IXGBE_CORE_LOCK(adapter);
529         ixv_stop(adapter);
530         IXGBE_CORE_UNLOCK(adapter);
531
532         for (int i = 0; i < adapter->num_queues; i++, que++) {
533                 if (que->tq) {
534                         struct tx_ring  *txr = que->txr;
535                         taskqueue_drain(que->tq, &txr->txq_task);
536                         taskqueue_drain(que->tq, &que->que_task);
537                         taskqueue_free(que->tq);
538                 }
539         }
540
541         /* Drain the Mailbox(link) queue */
542         if (adapter->tq) {
543                 taskqueue_drain(adapter->tq, &adapter->link_task);
544                 taskqueue_free(adapter->tq);
545         }
546
547         /* Unregister VLAN events */
548         if (adapter->vlan_attach != NULL)
549                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
550         if (adapter->vlan_detach != NULL)
551                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
552
553         callout_drain(&adapter->timer);
554
555         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
556                 netmap_detach(adapter->ifp);
557
558         ixv_free_pci_resources(adapter);
559         bus_generic_detach(dev);
560         if_free(adapter->ifp);
561
562         ixv_free_transmit_structures(adapter);
563         ixv_free_receive_structures(adapter);
564         free(adapter->queues, M_IXV);
565
566         IXGBE_CORE_LOCK_DESTROY(adapter);
567
568         return (0);
569 } /* ixv_detach */
570
571 /************************************************************************
572  * ixv_shutdown - Shutdown entry point
573  ************************************************************************/
574 static int
575 ixv_shutdown(device_t dev)
576 {
577         struct adapter *adapter = device_get_softc(dev);
578         IXGBE_CORE_LOCK(adapter);
579         ixv_stop(adapter);
580         IXGBE_CORE_UNLOCK(adapter);
581
582         return (0);
583 } /* ixv_shutdown */
584
585
586 /************************************************************************
587  * ixv_ioctl - Ioctl entry point
588  *
589  *   Called when the user wants to configure the interface.
590  *
591  *   return 0 on success, positive on failure
592  ************************************************************************/
593 static int
594 ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
595 {
596         struct adapter *adapter = ifp->if_softc;
597         struct ifreq   *ifr = (struct ifreq *)data;
598 #if defined(INET) || defined(INET6)
599         struct ifaddr  *ifa = (struct ifaddr *)data;
600         bool           avoid_reset = FALSE;
601 #endif
602         int            error = 0;
603
604         switch (command) {
605
606         case SIOCSIFADDR:
607 #ifdef INET
608                 if (ifa->ifa_addr->sa_family == AF_INET)
609                         avoid_reset = TRUE;
610 #endif
611 #ifdef INET6
612                 if (ifa->ifa_addr->sa_family == AF_INET6)
613                         avoid_reset = TRUE;
614 #endif
615 #if defined(INET) || defined(INET6)
616                 /*
617                  * Calling init results in link renegotiation,
618                  * so we avoid doing it when possible.
619                  */
620                 if (avoid_reset) {
621                         ifp->if_flags |= IFF_UP;
622                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
623                                 ixv_init(adapter);
624                         if (!(ifp->if_flags & IFF_NOARP))
625                                 arp_ifinit(ifp, ifa);
626                 } else
627                         error = ether_ioctl(ifp, command, data);
628                 break;
629 #endif
630         case SIOCSIFMTU:
631                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
632                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
633                         error = EINVAL;
634                 } else {
635                         IXGBE_CORE_LOCK(adapter);
636                         ifp->if_mtu = ifr->ifr_mtu;
637                         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
638                         ixv_init_locked(adapter);
639                         IXGBE_CORE_UNLOCK(adapter);
640                 }
641                 break;
642         case SIOCSIFFLAGS:
643                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
644                 IXGBE_CORE_LOCK(adapter);
645                 if (ifp->if_flags & IFF_UP) {
646                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
647                                 ixv_init_locked(adapter);
648                 } else
649                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
650                                 ixv_stop(adapter);
651                 adapter->if_flags = ifp->if_flags;
652                 IXGBE_CORE_UNLOCK(adapter);
653                 break;
654         case SIOCADDMULTI:
655         case SIOCDELMULTI:
656                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
657                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
658                         IXGBE_CORE_LOCK(adapter);
659                         ixv_disable_intr(adapter);
660                         ixv_set_multi(adapter);
661                         ixv_enable_intr(adapter);
662                         IXGBE_CORE_UNLOCK(adapter);
663                 }
664                 break;
665         case SIOCSIFMEDIA:
666         case SIOCGIFMEDIA:
667                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
668                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
669                 break;
670         case SIOCSIFCAP:
671         {
672                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
673                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
674                 if (mask & IFCAP_HWCSUM)
675                         ifp->if_capenable ^= IFCAP_HWCSUM;
676                 if (mask & IFCAP_TSO4)
677                         ifp->if_capenable ^= IFCAP_TSO4;
678                 if (mask & IFCAP_LRO)
679                         ifp->if_capenable ^= IFCAP_LRO;
680                 if (mask & IFCAP_VLAN_HWTAGGING)
681                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
682                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
683                         IXGBE_CORE_LOCK(adapter);
684                         ixv_init_locked(adapter);
685                         IXGBE_CORE_UNLOCK(adapter);
686                 }
687                 VLAN_CAPABILITIES(ifp);
688                 break;
689         }
690
691         default:
692                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
693                 error = ether_ioctl(ifp, command, data);
694                 break;
695         }
696
697         return (error);
698 } /* ixv_ioctl */
699
700 /************************************************************************
701  * ixv_init_device_features
702  ************************************************************************/
703 static void
704 ixv_init_device_features(struct adapter *adapter)
705 {
706         adapter->feat_cap = IXGBE_FEATURE_NETMAP
707                           | IXGBE_FEATURE_RSS
708                           | IXGBE_FEATURE_LEGACY_TX;
709
710         /* A tad short on feature flags for VFs, atm. */
711         switch (adapter->hw.mac.type) {
712         case ixgbe_mac_82599_vf:
713                 adapter->feat_cap |= IXGBE_FEATURE_FRAME_LIMIT;
714                 break;
715         case ixgbe_mac_X540_vf:
716         case ixgbe_mac_X550_vf:
717         case ixgbe_mac_X550EM_x_vf:
718         case ixgbe_mac_X550EM_a_vf:
719         default:
720                 break;
721         }
722
723         /* Enabled by default... */
724         /* Netmap */
725         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
726                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
727         /* Receive-Side Scaling (RSS) */
728         if (adapter->feat_cap & IXGBE_FEATURE_RSS)
729                 adapter->feat_en |= IXGBE_FEATURE_RSS;
730         /* Frame size limitation */
731         if (adapter->feat_cap & IXGBE_FEATURE_FRAME_LIMIT)
732                 adapter->feat_en |= IXGBE_FEATURE_FRAME_LIMIT;
733
734         /* Enabled via sysctl... */
735         /* Legacy (single queue) transmit */
736         if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
737             ixv_enable_legacy_tx)
738                 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
739 } /* ixv_init_device_features */
740
741 /************************************************************************
742  * ixv_init_locked - Init entry point
743  *
744  *   Used in two ways: It is used by the stack as init entry
745  *   point in network interface structure. It is also used
746  *   by the driver as a hw/sw initialization routine to get
747  *   to a consistent state.
748  *
749  *   return 0 on success, positive on failure
750  ************************************************************************/
751 #define IXGBE_MHADD_MFS_SHIFT 16
752
753 void
754 ixv_init_locked(struct adapter *adapter)
755 {
756         struct ifnet    *ifp = adapter->ifp;
757         device_t        dev = adapter->dev;
758         struct ixgbe_hw *hw = &adapter->hw;
759         int             error = 0;
760
761         INIT_DEBUGOUT("ixv_init_locked: begin");
762         mtx_assert(&adapter->core_mtx, MA_OWNED);
763         hw->adapter_stopped = FALSE;
764         hw->mac.ops.stop_adapter(hw);
765         callout_stop(&adapter->timer);
766
767         /* reprogram the RAR[0] in case user changed it. */
768         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
769
770         /* Get the latest mac address, User can use a LAA */
771         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
772             IXGBE_ETH_LENGTH_OF_ADDRESS);
773         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
774
775         /* Prepare transmit descriptors and buffers */
776         if (ixv_setup_transmit_structures(adapter)) {
777                 device_printf(dev, "Could not setup transmit structures\n");
778                 ixv_stop(adapter);
779                 return;
780         }
781
782         /* Reset VF and renegotiate mailbox API version */
783         hw->mac.ops.reset_hw(hw);
784         error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_12);
785         if (error)
786                 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n",
787                     error);
788
789         ixv_initialize_transmit_units(adapter);
790
791         /* Setup Multicast table */
792         ixv_set_multi(adapter);
793
794         /*
795          * Determine the correct mbuf pool
796          * for doing jumbo/headersplit
797          */
798         if (ifp->if_mtu > ETHERMTU)
799                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
800         else
801                 adapter->rx_mbuf_sz = MCLBYTES;
802
803         /* Prepare receive descriptors and buffers */
804         if (ixv_setup_receive_structures(adapter)) {
805                 device_printf(dev, "Could not setup receive structures\n");
806                 ixv_stop(adapter);
807                 return;
808         }
809
810         /* Configure RX settings */
811         ixv_initialize_receive_units(adapter);
812
813         /* Set the various hardware offload abilities */
814         ifp->if_hwassist = 0;
815         if (ifp->if_capenable & IFCAP_TSO4)
816                 ifp->if_hwassist |= CSUM_TSO;
817         if (ifp->if_capenable & IFCAP_TXCSUM) {
818                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
819 #if __FreeBSD_version >= 800000
820                 ifp->if_hwassist |= CSUM_SCTP;
821 #endif
822         }
823
824         /* Set up VLAN offload and filter */
825         ixv_setup_vlan_support(adapter);
826
827         /* Set up MSI-X routing */
828         ixv_configure_ivars(adapter);
829
830         /* Set up auto-mask */
831         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
832
833         /* Set moderation on the Link interrupt */
834         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
835
836         /* Stats init */
837         ixv_init_stats(adapter);
838
839         /* Config/Enable Link */
840         hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
841             FALSE);
842
843         /* Start watchdog */
844         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
845
846         /* And now turn on interrupts */
847         ixv_enable_intr(adapter);
848
849         /* Now inform the stack we're ready */
850         ifp->if_drv_flags |= IFF_DRV_RUNNING;
851         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
852
853         return;
854 } /* ixv_init_locked */
855
856 /************************************************************************
857  * ixv_init
858  ************************************************************************/
859 static void
860 ixv_init(void *arg)
861 {
862         struct adapter *adapter = arg;
863
864         IXGBE_CORE_LOCK(adapter);
865         ixv_init_locked(adapter);
866         IXGBE_CORE_UNLOCK(adapter);
867
868         return;
869 } /* ixv_init */
870
871
872 /*
873  * MSI-X Interrupt Handlers and Tasklets
874  */
875
876 static inline void
877 ixv_enable_queue(struct adapter *adapter, u32 vector)
878 {
879         struct ixgbe_hw *hw = &adapter->hw;
880         u32           queue = 1 << vector;
881         u32           mask;
882
883         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
884         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
885 } /* ixv_enable_queue */
886
887 static inline void
888 ixv_disable_queue(struct adapter *adapter, u32 vector)
889 {
890         struct ixgbe_hw *hw = &adapter->hw;
891         u64           queue = (u64)(1 << vector);
892         u32           mask;
893
894         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
895         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
896 } /* ixv_disable_queue */
897
898 static inline void
899 ixv_rearm_queues(struct adapter *adapter, u64 queues)
900 {
901         u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
902         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
903 } /* ixv_rearm_queues */
904
905
906 static void
907 ixv_handle_que(void *context, int pending)
908 {
909         struct ix_queue *que = context;
910         struct adapter  *adapter = que->adapter;
911         struct tx_ring  *txr = que->txr;
912         struct ifnet    *ifp = adapter->ifp;
913         bool            more;
914
915         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
916                 more = ixv_rxeof(que);
917                 IXGBE_TX_LOCK(txr);
918                 ixv_txeof(txr);
919                 if (!ixv_ring_empty(ifp, txr->br))
920                         ixv_start_locked(ifp, txr);
921                 IXGBE_TX_UNLOCK(txr);
922                 if (more) {
923                         taskqueue_enqueue(que->tq, &que->que_task);
924                         return;
925                 }
926         }
927
928         /* Reenable this interrupt */
929         ixv_enable_queue(adapter, que->msix);
930
931         return;
932 } /* ixv_handle_que */
933
934 /************************************************************************
935  * ixv_msix_que - MSI Queue Interrupt Service routine
936  ************************************************************************/
937 void
938 ixv_msix_que(void *arg)
939 {
940         struct ix_queue *que = arg;
941         struct adapter  *adapter = que->adapter;
942         struct ifnet    *ifp = adapter->ifp;
943         struct tx_ring  *txr = que->txr;
944         struct rx_ring  *rxr = que->rxr;
945         bool            more;
946         u32             newitr = 0;
947
948         ixv_disable_queue(adapter, que->msix);
949         ++que->irqs;
950
951         more = ixv_rxeof(que);
952
953         IXGBE_TX_LOCK(txr);
954         ixv_txeof(txr);
955         /*
956          * Make certain that if the stack
957          * has anything queued the task gets
958          * scheduled to handle it.
959          */
960         if (!ixv_ring_empty(adapter->ifp, txr->br))
961                 ixv_start_locked(ifp, txr);
962         IXGBE_TX_UNLOCK(txr);
963
964         /* Do AIM now? */
965
966         if (ixv_enable_aim == FALSE)
967                 goto no_calc;
968         /*
969          * Do Adaptive Interrupt Moderation:
970          *  - Write out last calculated setting
971          *  - Calculate based on average size over
972          *    the last interval.
973          */
974         if (que->eitr_setting)
975                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
976                     que->eitr_setting);
977
978         que->eitr_setting = 0;
979
980         /* Idle, do nothing */
981         if ((txr->bytes == 0) && (rxr->bytes == 0))
982                 goto no_calc;
983
984         if ((txr->bytes) && (txr->packets))
985                 newitr = txr->bytes/txr->packets;
986         if ((rxr->bytes) && (rxr->packets))
987                 newitr = max(newitr, (rxr->bytes / rxr->packets));
988         newitr += 24; /* account for hardware frame, crc */
989
990         /* set an upper boundary */
991         newitr = min(newitr, 3000);
992
993         /* Be nice to the mid range */
994         if ((newitr > 300) && (newitr < 1200))
995                 newitr = (newitr / 3);
996         else
997                 newitr = (newitr / 2);
998
999         newitr |= newitr << 16;
1000
1001         /* save for next interrupt */
1002         que->eitr_setting = newitr;
1003
1004         /* Reset state */
1005         txr->bytes = 0;
1006         txr->packets = 0;
1007         rxr->bytes = 0;
1008         rxr->packets = 0;
1009
1010 no_calc:
1011         if (more)
1012                 taskqueue_enqueue(que->tq, &que->que_task);
1013         else /* Reenable this interrupt */
1014                 ixv_enable_queue(adapter, que->msix);
1015
1016         return;
1017 } /* ixv_msix_que */
1018
1019 /************************************************************************
1020  * ixv_msix_mbx
1021  ************************************************************************/
1022 static void
1023 ixv_msix_mbx(void *arg)
1024 {
1025         struct adapter  *adapter = arg;
1026         struct ixgbe_hw *hw = &adapter->hw;
1027         u32             reg;
1028
1029         ++adapter->link_irq;
1030
1031         /* First get the cause */
1032         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1033         /* Clear interrupt with write */
1034         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1035
1036         /* Link status change */
1037         if (reg & IXGBE_EICR_LSC)
1038                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1039
1040         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1041
1042         return;
1043 } /* ixv_msix_mbx */
1044
1045 /************************************************************************
1046  * ixv_media_status - Media Ioctl callback
1047  *
1048  *   Called whenever the user queries the status of
1049  *   the interface using ifconfig.
1050  ************************************************************************/
1051 static void
1052 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1053 {
1054         struct adapter *adapter = ifp->if_softc;
1055
1056         INIT_DEBUGOUT("ixv_media_status: begin");
1057         IXGBE_CORE_LOCK(adapter);
1058         ixv_update_link_status(adapter);
1059
1060         ifmr->ifm_status = IFM_AVALID;
1061         ifmr->ifm_active = IFM_ETHER;
1062
1063         if (!adapter->link_active) {
1064                 IXGBE_CORE_UNLOCK(adapter);
1065                 return;
1066         }
1067
1068         ifmr->ifm_status |= IFM_ACTIVE;
1069
1070         switch (adapter->link_speed) {
1071                 case IXGBE_LINK_SPEED_1GB_FULL:
1072                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1073                         break;
1074                 case IXGBE_LINK_SPEED_10GB_FULL:
1075                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1076                         break;
1077                 case IXGBE_LINK_SPEED_100_FULL:
1078                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1079                         break;
1080                 case IXGBE_LINK_SPEED_10_FULL:
1081                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1082                         break;
1083         }
1084
1085         IXGBE_CORE_UNLOCK(adapter);
1086
1087         return;
1088 } /* ixv_media_status */
1089
1090 /************************************************************************
1091  * ixv_media_change - Media Ioctl callback
1092  *
1093  *   Called when the user changes speed/duplex using
1094  *   media/mediopt option with ifconfig.
1095  ************************************************************************/
1096 static int
1097 ixv_media_change(struct ifnet *ifp)
1098 {
1099         struct adapter *adapter = ifp->if_softc;
1100         struct ifmedia *ifm = &adapter->media;
1101
1102         INIT_DEBUGOUT("ixv_media_change: begin");
1103
1104         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1105                 return (EINVAL);
1106
1107         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1108         case IFM_AUTO:
1109                 break;
1110         default:
1111                 device_printf(adapter->dev, "Only auto media type\n");
1112                 return (EINVAL);
1113         }
1114
1115         return (0);
1116 } /* ixv_media_change */
1117
1118
1119 /************************************************************************
1120  * ixv_set_multi - Multicast Update
1121  *
1122  *   Called whenever multicast address list is updated.
1123  ************************************************************************/
1124 static void
1125 ixv_set_multi(struct adapter *adapter)
1126 {
1127         u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1128         u8                 *update_ptr;
1129         struct ifmultiaddr *ifma;
1130         struct ifnet       *ifp = adapter->ifp;
1131         int                mcnt = 0;
1132
1133         IOCTL_DEBUGOUT("ixv_set_multi: begin");
1134
1135 #if __FreeBSD_version < 800000
1136         IF_ADDR_LOCK(ifp);
1137 #else
1138         if_maddr_rlock(ifp);
1139 #endif
1140         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1141                 if (ifma->ifma_addr->sa_family != AF_LINK)
1142                         continue;
1143                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1144                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1145                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1146                 mcnt++;
1147         }
1148 #if __FreeBSD_version < 800000
1149         IF_ADDR_UNLOCK(ifp);
1150 #else
1151         if_maddr_runlock(ifp);
1152 #endif
1153
1154         update_ptr = mta;
1155
1156         adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1157             ixv_mc_array_itr, TRUE);
1158
1159         return;
1160 } /* ixv_set_multi */
1161
1162 /************************************************************************
1163  * ixv_mc_array_itr
1164  *
1165  *   An iterator function needed by the multicast shared code.
1166  *   It feeds the shared code routine the addresses in the
1167  *   array of ixv_set_multi() one by one.
1168  ************************************************************************/
1169 static u8 *
1170 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1171 {
1172         u8 *addr = *update_ptr;
1173         u8 *newptr;
1174         *vmdq = 0;
1175
1176         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1177         *update_ptr = newptr;
1178
1179         return addr;
1180 } /* ixv_mc_array_itr */
1181
1182 /************************************************************************
1183  * ixv_local_timer - Timer routine
1184  *
1185  *   Checks for link status, updates statistics,
1186  *   and runs the watchdog check.
1187  ************************************************************************/
1188 static void
1189 ixv_local_timer(void *arg)
1190 {
1191         struct adapter  *adapter = arg;
1192         device_t        dev = adapter->dev;
1193         struct ix_queue *que = adapter->queues;
1194         u64             queues = 0;
1195         int             hung = 0;
1196
1197         mtx_assert(&adapter->core_mtx, MA_OWNED);
1198
1199         ixv_check_link(adapter);
1200
1201         /* Stats Update */
1202         ixv_update_stats(adapter);
1203
1204         /*
1205          * Check the TX queues status
1206          *      - mark hung queues so we don't schedule on them
1207          *      - watchdog only if all queues show hung
1208          */
1209         for (int i = 0; i < adapter->num_queues; i++, que++) {
1210                 /* Keep track of queues with work for soft irq */
1211                 if (que->txr->busy)
1212                         queues |= ((u64)1 << que->me);
1213                 /*
1214                  * Each time txeof runs without cleaning, but there
1215                  * are uncleaned descriptors it increments busy. If
1216                  * we get to the MAX we declare it hung.
1217                  */
1218                 if (que->busy == IXGBE_QUEUE_HUNG) {
1219                         ++hung;
1220                         /* Mark the queue as inactive */
1221                         adapter->active_queues &= ~((u64)1 << que->me);
1222                         continue;
1223                 } else {
1224                         /* Check if we've come back from hung */
1225                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1226                                 adapter->active_queues |= ((u64)1 << que->me);
1227                 }
1228                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1229                         device_printf(dev,
1230                             "Warning queue %d appears to be hung!\n", i);
1231                         que->txr->busy = IXGBE_QUEUE_HUNG;
1232                         ++hung;
1233                 }
1234
1235         }
1236
1237         /* Only truly watchdog if all queues show hung */
1238         if (hung == adapter->num_queues)
1239                 goto watchdog;
1240         else if (queues != 0) { /* Force an IRQ on queues with work */
1241                 ixv_rearm_queues(adapter, queues);
1242         }
1243
1244         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1245
1246         return;
1247
1248 watchdog:
1249
1250         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1251         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1252         adapter->watchdog_events++;
1253         ixv_init_locked(adapter);
1254 } /* ixv_local_timer */
1255
1256 /************************************************************************
1257  * ixv_update_link_status - Update OS on link state
1258  *
1259  * Note: Only updates the OS on the cached link state.
1260  *       The real check of the hardware only happens with
1261  *       a link interrupt.
1262  ************************************************************************/
1263 static void
1264 ixv_update_link_status(struct adapter *adapter)
1265 {
1266         struct ifnet *ifp = adapter->ifp;
1267         device_t     dev = adapter->dev;
1268
1269         if (adapter->link_up) {
1270                 if (adapter->link_active == FALSE) {
1271                         if (bootverbose)
1272                                 device_printf(dev,"Link is up %d Gbps %s \n",
1273                                     ((adapter->link_speed == 128) ? 10 : 1),
1274                                     "Full Duplex");
1275                         adapter->link_active = TRUE;
1276                         if_link_state_change(ifp, LINK_STATE_UP);
1277                 }
1278         } else { /* Link down */
1279                 if (adapter->link_active == TRUE) {
1280                         if (bootverbose)
1281                                 device_printf(dev,"Link is Down\n");
1282                         if_link_state_change(ifp, LINK_STATE_DOWN);
1283                         adapter->link_active = FALSE;
1284                 }
1285         }
1286
1287         return;
1288 } /* ixv_update_link_status */
1289
1290
1291 /************************************************************************
1292  * ixv_stop - Stop the hardware
1293  *
1294  *   Disables all traffic on the adapter by issuing a
1295  *   global reset on the MAC and deallocates TX/RX buffers.
1296  ************************************************************************/
1297 static void
1298 ixv_stop(void *arg)
1299 {
1300         struct ifnet    *ifp;
1301         struct adapter  *adapter = arg;
1302         struct ixgbe_hw *hw = &adapter->hw;
1303
1304         ifp = adapter->ifp;
1305
1306         mtx_assert(&adapter->core_mtx, MA_OWNED);
1307
1308         INIT_DEBUGOUT("ixv_stop: begin\n");
1309         ixv_disable_intr(adapter);
1310
1311         /* Tell the stack that the interface is no longer active */
1312         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1313
1314         hw->mac.ops.reset_hw(hw);
1315         adapter->hw.adapter_stopped = FALSE;
1316         hw->mac.ops.stop_adapter(hw);
1317         callout_stop(&adapter->timer);
1318
1319         /* reprogram the RAR[0] in case user changed it. */
1320         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1321
1322         return;
1323 } /* ixv_stop */
1324
1325
1326 /************************************************************************
1327  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
1328  ************************************************************************/
1329 static int
1330 ixv_allocate_msix(struct adapter *adapter)
1331 {
1332         device_t        dev = adapter->dev;
1333         struct ix_queue *que = adapter->queues;
1334         struct tx_ring  *txr = adapter->tx_rings;
1335         int             error, msix_ctrl, rid, vector = 0;
1336
1337         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1338                 rid = vector + 1;
1339                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1340                     RF_SHAREABLE | RF_ACTIVE);
1341                 if (que->res == NULL) {
1342                         device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
1343                             vector);
1344                         return (ENXIO);
1345                 }
1346                 /* Set the handler function */
1347                 error = bus_setup_intr(dev, que->res,
1348                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1349                     ixv_msix_que, que, &que->tag);
1350                 if (error) {
1351                         que->res = NULL;
1352                         device_printf(dev, "Failed to register QUE handler");
1353                         return (error);
1354                 }
1355 #if __FreeBSD_version >= 800504
1356                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1357 #endif
1358                 que->msix = vector;
1359                 adapter->active_queues |= (u64)(1 << que->msix);
1360                 /*
1361                  * Bind the MSI-X vector, and thus the
1362                  * ring to the corresponding CPU.
1363                  */
1364                 if (adapter->num_queues > 1)
1365                         bus_bind_intr(dev, que->res, i);
1366                 TASK_INIT(&txr->txq_task, 0, ixv_deferred_mq_start, txr);
1367                 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1368                 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1369                     taskqueue_thread_enqueue, &que->tq);
1370                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1371                     device_get_nameunit(adapter->dev));
1372         }
1373
1374         /* and Mailbox */
1375         rid = vector + 1;
1376         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1377             RF_SHAREABLE | RF_ACTIVE);
1378         if (!adapter->res) {
1379                 device_printf(dev,
1380                     "Unable to allocate bus resource: MBX interrupt [%d]\n",
1381                     rid);
1382                 return (ENXIO);
1383         }
1384         /* Set the mbx handler function */
1385         error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
1386             NULL, ixv_msix_mbx, adapter, &adapter->tag);
1387         if (error) {
1388                 adapter->res = NULL;
1389                 device_printf(dev, "Failed to register LINK handler");
1390                 return (error);
1391         }
1392 #if __FreeBSD_version >= 800504
1393         bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1394 #endif
1395         adapter->vector = vector;
1396         /* Tasklets for Mailbox */
1397         TASK_INIT(&adapter->link_task, 0, ixv_handle_link, adapter);
1398         adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1399             taskqueue_thread_enqueue, &adapter->tq);
1400         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1401             device_get_nameunit(adapter->dev));
1402         /*
1403          * Due to a broken design QEMU will fail to properly
1404          * enable the guest for MSI-X unless the vectors in
1405          * the table are all set up, so we must rewrite the
1406          * ENABLE in the MSI-X control register again at this
1407          * point to cause it to successfully initialize us.
1408          */
1409         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1410                 pci_find_cap(dev, PCIY_MSIX, &rid);
1411                 rid += PCIR_MSIX_CTRL;
1412                 msix_ctrl = pci_read_config(dev, rid, 2);
1413                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1414                 pci_write_config(dev, rid, msix_ctrl, 2);
1415         }
1416
1417         return (0);
1418 } /* ixv_allocate_msix */
1419
1420 /************************************************************************
1421  * ixv_configure_interrupts - Setup MSI-X resources
1422  *
1423  *   Note: The VF device MUST use MSI-X, there is no fallback.
1424  ************************************************************************/
1425 static int
1426 ixv_configure_interrupts(struct adapter *adapter)
1427 {
1428         device_t dev = adapter->dev;
1429         int      rid, want, msgs;
1430
1431         /* Must have at least 2 MSI-X vectors */
1432         msgs = pci_msix_count(dev);
1433         if (msgs < 2)
1434                 goto out;
1435         rid = PCIR_BAR(3);
1436         adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1437             RF_ACTIVE);
1438         if (adapter->msix_mem == NULL) {
1439                 device_printf(adapter->dev, "Unable to map MSI-X table \n");
1440                 goto out;
1441         }
1442
1443         /*
1444          * Want vectors for the queues,
1445          * plus an additional for mailbox.
1446          */
1447         want = adapter->num_queues + 1;
1448         if (want > msgs) {
1449                 want = msgs;
1450                 adapter->num_queues = msgs - 1;
1451         } else
1452                 msgs = want;
1453         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1454                 device_printf(adapter->dev,
1455                     "Using MSI-X interrupts with %d vectors\n", want);
1456                 /* reflect correct sysctl value */
1457                 ixv_num_queues = adapter->num_queues;
1458
1459                 return (0);
1460         }
1461         /* Release in case alloc was insufficient */
1462         pci_release_msi(dev);
1463 out:
1464         if (adapter->msix_mem != NULL) {
1465                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1466                     adapter->msix_mem);
1467                 adapter->msix_mem = NULL;
1468         }
1469         device_printf(adapter->dev, "MSI-X config error\n");
1470
1471         return (ENXIO);
1472 } /* ixv_configure_interrupts */
1473
1474
1475 /************************************************************************
1476  * ixv_allocate_pci_resources
1477  ************************************************************************/
1478 static int
1479 ixv_allocate_pci_resources(struct adapter *adapter)
1480 {
1481         device_t dev = adapter->dev;
1482         int      rid;
1483
1484         rid = PCIR_BAR(0);
1485         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1486             RF_ACTIVE);
1487
1488         if (!(adapter->pci_mem)) {
1489                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1490                 return (ENXIO);
1491         }
1492
1493         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1494         adapter->osdep.mem_bus_space_handle =
1495             rman_get_bushandle(adapter->pci_mem);
1496         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1497
1498         /* Pick up the tuneable queues */
1499         adapter->num_queues = ixv_num_queues;
1500
1501         return (0);
1502 } /* ixv_allocate_pci_resources */
1503
1504 /************************************************************************
1505  * ixv_free_pci_resources
1506  ************************************************************************/
1507 static void
1508 ixv_free_pci_resources(struct adapter * adapter)
1509 {
1510         struct ix_queue *que = adapter->queues;
1511         device_t        dev = adapter->dev;
1512         int             rid, memrid;
1513
1514         memrid = PCIR_BAR(MSIX_82598_BAR);
1515
1516         /*
1517          * There is a slight possibility of a failure mode
1518          * in attach that will result in entering this function
1519          * before interrupt resources have been initialized, and
1520          * in that case we do not want to execute the loops below
1521          * We can detect this reliably by the state of the adapter
1522          * res pointer.
1523          */
1524         if (adapter->res == NULL)
1525                 goto mem;
1526
1527         /*
1528          *  Release all msix queue resources:
1529          */
1530         for (int i = 0; i < adapter->num_queues; i++, que++) {
1531                 rid = que->msix + 1;
1532                 if (que->tag != NULL) {
1533                         bus_teardown_intr(dev, que->res, que->tag);
1534                         que->tag = NULL;
1535                 }
1536                 if (que->res != NULL)
1537                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1538         }
1539
1540
1541         /* Clean the Mailbox interrupt last */
1542         rid = adapter->vector + 1;
1543
1544         if (adapter->tag != NULL) {
1545                 bus_teardown_intr(dev, adapter->res, adapter->tag);
1546                 adapter->tag = NULL;
1547         }
1548         if (adapter->res != NULL)
1549                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1550
1551 mem:
1552         pci_release_msi(dev);
1553
1554         if (adapter->msix_mem != NULL)
1555                 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
1556                     adapter->msix_mem);
1557
1558         if (adapter->pci_mem != NULL)
1559                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1560                     adapter->pci_mem);
1561
1562         return;
1563 } /* ixv_free_pci_resources */
1564
1565 /************************************************************************
1566  * ixv_setup_interface
1567  *
1568  *   Setup networking device structure and register an interface.
1569  ************************************************************************/
1570 static void
1571 ixv_setup_interface(device_t dev, struct adapter *adapter)
1572 {
1573         struct ifnet *ifp;
1574
1575         INIT_DEBUGOUT("ixv_setup_interface: begin");
1576
1577         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1578         if (ifp == NULL)
1579                 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1580         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1581         ifp->if_baudrate = 1000000000;
1582         ifp->if_init = ixv_init;
1583         ifp->if_softc = adapter;
1584         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1585         ifp->if_ioctl = ixv_ioctl;
1586 #if __FreeBSD_version >= 1100045
1587         /* TSO parameters */
1588         ifp->if_hw_tsomax = 65518;
1589         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1590         ifp->if_hw_tsomaxsegsize = 2048;
1591 #endif
1592         if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1593                 ifp->if_start = ixv_legacy_start;
1594                 ixv_start_locked = ixv_legacy_start_locked;
1595                 ixv_ring_empty = ixgbe_legacy_ring_empty;
1596         } else {
1597                 ifp->if_transmit = ixv_mq_start;
1598                 ifp->if_qflush = ixv_qflush;
1599                 ixv_start_locked = ixv_mq_start_locked;
1600                 ixv_ring_empty = drbr_empty;
1601         }
1602         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1603
1604         ether_ifattach(ifp, adapter->hw.mac.addr);
1605
1606         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1607
1608         /*
1609          * Tell the upper layer(s) we support long frames.
1610          */
1611         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1612
1613         /* Set capability flags */
1614         ifp->if_capabilities |= IFCAP_HWCSUM
1615                              |  IFCAP_HWCSUM_IPV6
1616                              |  IFCAP_TSO
1617                              |  IFCAP_LRO
1618                              |  IFCAP_VLAN_HWTAGGING
1619                              |  IFCAP_VLAN_HWTSO
1620                              |  IFCAP_VLAN_HWCSUM
1621                              |  IFCAP_JUMBO_MTU
1622                              |  IFCAP_VLAN_MTU;
1623
1624         /* Enable the above capabilities by default */
1625         ifp->if_capenable = ifp->if_capabilities;
1626
1627         /*
1628          * Specify the media types supported by this adapter and register
1629          * callbacks to update media and link information
1630          */
1631         ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1632             ixv_media_status);
1633         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1634         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1635
1636         return;
1637 } /* ixv_setup_interface */
1638
1639
1640 /************************************************************************
1641  * ixv_initialize_transmit_units - Enable transmit unit.
1642  ************************************************************************/
1643 static void
1644 ixv_initialize_transmit_units(struct adapter *adapter)
1645 {
1646         struct tx_ring *txr = adapter->tx_rings;
1647         struct ixgbe_hw  *hw = &adapter->hw;
1648
1649
1650         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1651                 u64 tdba = txr->txdma.dma_paddr;
1652                 u32 txctrl, txdctl;
1653
1654                 /* Set WTHRESH to 8, burst writeback */
1655                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1656                 txdctl |= (8 << 16);
1657                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1658
1659                 /* Set the HW Tx Head and Tail indices */
1660                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1661                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1662
1663                 /* Set Tx Tail register */
1664                 txr->tail = IXGBE_VFTDT(i);
1665
1666                 /* Set Ring parameters */
1667                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1668                     (tdba & 0x00000000ffffffffULL));
1669                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1670                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1671                     adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1672                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1673                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1674                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1675
1676                 /* Now enable */
1677                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1678                 txdctl |= IXGBE_TXDCTL_ENABLE;
1679                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1680         }
1681
1682         return;
1683 } /* ixv_initialize_transmit_units */
1684
1685
1686 /************************************************************************
1687  * ixv_initialize_rss_mapping
1688  ************************************************************************/
1689 static void
1690 ixv_initialize_rss_mapping(struct adapter *adapter)
1691 {
1692         struct ixgbe_hw *hw = &adapter->hw;
1693         u32             reta = 0, mrqc, rss_key[10];
1694         int             queue_id;
1695         int             i, j;
1696         u32             rss_hash_config;
1697
1698         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1699                 /* Fetch the configured RSS key */
1700                 rss_getkey((uint8_t *)&rss_key);
1701         } else {
1702                 /* set up random bits */
1703                 arc4rand(&rss_key, sizeof(rss_key), 0);
1704         }
1705
1706         /* Now fill out hash function seeds */
1707         for (i = 0; i < 10; i++)
1708                 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1709
1710         /* Set up the redirection table */
1711         for (i = 0, j = 0; i < 64; i++, j++) {
1712                 if (j == adapter->num_queues)
1713                         j = 0;
1714
1715                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1716                         /*
1717                          * Fetch the RSS bucket id for the given indirection
1718                          * entry. Cap it at the number of configured buckets
1719                          * (which is num_queues.)
1720                          */
1721                         queue_id = rss_get_indirection_to_bucket(i);
1722                         queue_id = queue_id % adapter->num_queues;
1723                 } else
1724                         queue_id = j;
1725
1726                 /*
1727                  * The low 8 bits are for hash value (n+0);
1728                  * The next 8 bits are for hash value (n+1), etc.
1729                  */
1730                 reta >>= 8;
1731                 reta |= ((uint32_t)queue_id) << 24;
1732                 if ((i & 3) == 3) {
1733                         IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1734                         reta = 0;
1735                 }
1736         }
1737
1738         /* Perform hash on these packet types */
1739         if (adapter->feat_en & IXGBE_FEATURE_RSS)
1740                 rss_hash_config = rss_gethashconfig();
1741         else {
1742                 /*
1743                  * Disable UDP - IP fragments aren't currently being handled
1744                  * and so we end up with a mix of 2-tuple and 4-tuple
1745                  * traffic.
1746                  */
1747                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1748                                 | RSS_HASHTYPE_RSS_TCP_IPV4
1749                                 | RSS_HASHTYPE_RSS_IPV6
1750                                 | RSS_HASHTYPE_RSS_TCP_IPV6;
1751         }
1752
1753         mrqc = IXGBE_MRQC_RSSEN;
1754         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1755                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1756         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1757                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1758         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1759                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1760         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1761                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1762         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1763                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1764                     __func__);
1765         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1766                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1767                     __func__);
1768         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1769                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1770         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
1771                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
1772                     __func__);
1773         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1774                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1775         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1776                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1777                     __func__);
1778         IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1779 } /* ixv_initialize_rss_mapping */
1780
1781
1782 /************************************************************************
1783  * ixv_initialize_receive_units - Setup receive registers and features.
1784  ************************************************************************/
1785 static void
1786 ixv_initialize_receive_units(struct adapter *adapter)
1787 {
1788         struct rx_ring *rxr = adapter->rx_rings;
1789         struct ixgbe_hw  *hw = &adapter->hw;
1790         struct ifnet   *ifp = adapter->ifp;
1791         u32            bufsz, rxcsum, psrtype;
1792
1793         if (ifp->if_mtu > ETHERMTU)
1794                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1795         else
1796                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1797
1798         psrtype = IXGBE_PSRTYPE_TCPHDR
1799                 | IXGBE_PSRTYPE_UDPHDR
1800                 | IXGBE_PSRTYPE_IPV4HDR
1801                 | IXGBE_PSRTYPE_IPV6HDR
1802                 | IXGBE_PSRTYPE_L2HDR;
1803
1804         if (adapter->num_queues > 1)
1805                 psrtype |= 1 << 29;
1806
1807         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1808
1809         /* Tell PF our max_frame size */
1810         if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size)) {
1811                 /*
1812                  * Workaround for hardware that can't support frames with VLAN
1813                  * headers without turning on jumbo frames in the PF driver.
1814                  */
1815                 if (adapter->feat_en & IXGBE_FEATURE_FRAME_LIMIT) {
1816                         device_printf(adapter->dev, "This is a device with a frame size limitation.  The PF driver is forced to deny a change in frame size to allow for VLAN headers while jumbo frames is not enabled.  To work around this, we're telling the stack that the MTU must shrink by sizeof(VLAN header) if VLANs are enabled.  Thus, our maximum frame size is standard MTU + ethernet header/CRC. If you want standard MTU plus VLAN headers, you can also enable jumbo frames in the PF first.\n");
1817                         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1818                         ifp->if_capabilities &= ~IFCAP_VLAN_MTU;
1819                         ifp->if_capenable &= ~IFCAP_VLAN_MTU;
1820                 }
1821                 /* Try again... */
1822                 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size)) {
1823                         device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1824                 }
1825         }
1826
1827         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1828                 u64 rdba = rxr->rxdma.dma_paddr;
1829                 u32 reg, rxdctl;
1830
1831                 /* Disable the queue */
1832                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1833                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1834                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1835                 for (int j = 0; j < 10; j++) {
1836                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1837                             IXGBE_RXDCTL_ENABLE)
1838                                 msec_delay(1);
1839                         else
1840                                 break;
1841                 }
1842                 wmb();
1843                 /* Setup the Base and Length of the Rx Descriptor Ring */
1844                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1845                     (rdba & 0x00000000ffffffffULL));
1846                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1847                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1848                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1849
1850                 /* Reset the ring indices */
1851                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1852                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1853
1854                 /* Set up the SRRCTL register */
1855                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1856                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1857                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1858                 reg |= bufsz;
1859                 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1860                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1861
1862                 /* Capture Rx Tail index */
1863                 rxr->tail = IXGBE_VFRDT(rxr->me);
1864
1865                 /* Do the queue enabling last */
1866                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1867                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1868                 for (int k = 0; k < 10; k++) {
1869                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1870                             IXGBE_RXDCTL_ENABLE)
1871                                 break;
1872                         msec_delay(1);
1873                 }
1874                 wmb();
1875
1876                 /* Set the Tail Pointer */
1877                 /*
1878                  * In netmap mode, we must preserve the buffers made
1879                  * available to userspace before the if_init()
1880                  * (this is true by default on the TX side, because
1881                  * init makes all buffers available to userspace).
1882                  *
1883                  * netmap_reset() and the device specific routines
1884                  * (e.g. ixgbe_setup_receive_rings()) map these
1885                  * buffers at the end of the NIC ring, so here we
1886                  * must set the RDT (tail) register to make sure
1887                  * they are not overwritten.
1888                  *
1889                  * In this driver the NIC ring starts at RDH = 0,
1890                  * RDT points to the last slot available for reception (?),
1891                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1892                  */
1893 #ifdef DEV_NETMAP
1894                 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1895                     (ifp->if_capenable & IFCAP_NETMAP)) {
1896                         struct netmap_adapter *na = NA(adapter->ifp);
1897                         struct netmap_kring *kring = &na->rx_rings[i];
1898                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1899
1900                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1901                 } else
1902 #endif /* DEV_NETMAP */
1903                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1904                             adapter->num_rx_desc - 1);
1905         }
1906
1907         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1908
1909         ixv_initialize_rss_mapping(adapter);
1910
1911         if (adapter->num_queues > 1) {
1912                 /* RSS and RX IPP Checksum are mutually exclusive */
1913                 rxcsum |= IXGBE_RXCSUM_PCSD;
1914         }
1915
1916         if (ifp->if_capenable & IFCAP_RXCSUM)
1917                 rxcsum |= IXGBE_RXCSUM_PCSD;
1918
1919         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1920                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1921
1922         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1923
1924         return;
1925 } /* ixv_initialize_receive_units */
1926
1927 /************************************************************************
1928  * ixv_setup_vlan_support
1929  ************************************************************************/
1930 static void
1931 ixv_setup_vlan_support(struct adapter *adapter)
1932 {
1933         struct ixgbe_hw *hw = &adapter->hw;
1934         u32             ctrl, vid, vfta, retry;
1935
1936         /*
1937          * We get here thru init_locked, meaning
1938          * a soft reset, this has already cleared
1939          * the VFTA and other state, so if there
1940          * have been no vlan's registered do nothing.
1941          */
1942         if (adapter->num_vlans == 0)
1943                 return;
1944
1945         /* Enable the queues */
1946         for (int i = 0; i < adapter->num_queues; i++) {
1947                 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1948                 ctrl |= IXGBE_RXDCTL_VME;
1949                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1950                 /*
1951                  * Let Rx path know that it needs to store VLAN tag
1952                  * as part of extra mbuf info.
1953                  */
1954                 adapter->rx_rings[i].vtag_strip = TRUE;
1955         }
1956
1957         /*
1958          * A soft reset zero's out the VFTA, so
1959          * we need to repopulate it now.
1960          */
1961         for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1962                 if (ixv_shadow_vfta[i] == 0)
1963                         continue;
1964                 vfta = ixv_shadow_vfta[i];
1965                 /*
1966                  * Reconstruct the vlan id's
1967                  * based on the bits set in each
1968                  * of the array ints.
1969                  */
1970                 for (int j = 0; j < 32; j++) {
1971                         retry = 0;
1972                         if ((vfta & (1 << j)) == 0)
1973                                 continue;
1974                         vid = (i * 32) + j;
1975                         /* Call the shared code mailbox routine */
1976                         while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1977                                 if (++retry > 5)
1978                                         break;
1979                         }
1980                 }
1981         }
1982 } /* ixv_setup_vlan_support */
1983
1984 /************************************************************************
1985  * ixv_register_vlan
1986  *
1987  *   Run via a vlan config EVENT, it enables us to use the
1988  *   HW Filter table since we can get the vlan id. This just
1989  *   creates the entry in the soft version of the VFTA, init
1990  *   will repopulate the real table.
1991  ************************************************************************/
1992 static void
1993 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1994 {
1995         struct adapter *adapter = ifp->if_softc;
1996         u16            index, bit;
1997
1998         if (ifp->if_softc != arg) /* Not our event */
1999                 return;
2000
2001         if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2002                 return;
2003
2004         IXGBE_CORE_LOCK(adapter);
2005         index = (vtag >> 5) & 0x7F;
2006         bit = vtag & 0x1F;
2007         ixv_shadow_vfta[index] |= (1 << bit);
2008         ++adapter->num_vlans;
2009         /* Re-init to load the changes */
2010         ixv_init_locked(adapter);
2011         IXGBE_CORE_UNLOCK(adapter);
2012 } /* ixv_register_vlan */
2013
2014 /************************************************************************
2015  * ixv_unregister_vlan
2016  *
2017  *   Run via a vlan unconfig EVENT, remove our entry
2018  *   in the soft vfta.
2019  ************************************************************************/
2020 static void
2021 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2022 {
2023         struct adapter *adapter = ifp->if_softc;
2024         u16            index, bit;
2025
2026         if (ifp->if_softc !=  arg)
2027                 return;
2028
2029         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
2030                 return;
2031
2032         IXGBE_CORE_LOCK(adapter);
2033         index = (vtag >> 5) & 0x7F;
2034         bit = vtag & 0x1F;
2035         ixv_shadow_vfta[index] &= ~(1 << bit);
2036         --adapter->num_vlans;
2037         /* Re-init to load the changes */
2038         ixv_init_locked(adapter);
2039         IXGBE_CORE_UNLOCK(adapter);
2040 } /* ixv_unregister_vlan */
2041
2042 /************************************************************************
2043  * ixv_enable_intr
2044  ************************************************************************/
2045 static void
2046 ixv_enable_intr(struct adapter *adapter)
2047 {
2048         struct ixgbe_hw   *hw = &adapter->hw;
2049         struct ix_queue *que = adapter->queues;
2050         u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2051
2052
2053         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
2054
2055         mask = IXGBE_EIMS_ENABLE_MASK;
2056         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
2057         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2058
2059         for (int i = 0; i < adapter->num_queues; i++, que++)
2060                 ixv_enable_queue(adapter, que->msix);
2061
2062         IXGBE_WRITE_FLUSH(hw);
2063
2064         return;
2065 } /* ixv_enable_intr */
2066
2067 /************************************************************************
2068  * ixv_disable_intr
2069  ************************************************************************/
2070 static void
2071 ixv_disable_intr(struct adapter *adapter)
2072 {
2073         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2074         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
2075         IXGBE_WRITE_FLUSH(&adapter->hw);
2076
2077         return;
2078 } /* ixv_disable_intr */
2079
2080 /************************************************************************
2081  * ixv_set_ivar
2082  *
2083  *   Setup the correct IVAR register for a particular MSI-X interrupt
2084  *    - entry is the register array entry
2085  *    - vector is the MSI-X vector for this queue
2086  *    - type is RX/TX/MISC
2087  ************************************************************************/
2088 static void
2089 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2090 {
2091         struct ixgbe_hw *hw = &adapter->hw;
2092         u32             ivar, index;
2093
2094         vector |= IXGBE_IVAR_ALLOC_VAL;
2095
2096         if (type == -1) { /* MISC IVAR */
2097                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2098                 ivar &= ~0xFF;
2099                 ivar |= vector;
2100                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2101         } else {          /* RX/TX IVARS */
2102                 index = (16 * (entry & 1)) + (8 * type);
2103                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2104                 ivar &= ~(0xFF << index);
2105                 ivar |= (vector << index);
2106                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2107         }
2108 } /* ixv_set_ivar */
2109
2110 /************************************************************************
2111  * ixv_configure_ivars
2112  ************************************************************************/
2113 static void
2114 ixv_configure_ivars(struct adapter *adapter)
2115 {
2116         struct ix_queue *que = adapter->queues;
2117
2118         for (int i = 0; i < adapter->num_queues; i++, que++) {
2119                 /* First the RX queue entry */
2120                 ixv_set_ivar(adapter, i, que->msix, 0);
2121                 /* ... and the TX */
2122                 ixv_set_ivar(adapter, i, que->msix, 1);
2123                 /* Set an initial value in EITR */
2124                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
2125                     IXGBE_EITR_DEFAULT);
2126         }
2127
2128         /* For the mailbox interrupt */
2129         ixv_set_ivar(adapter, 1, adapter->vector, -1);
2130 } /* ixv_configure_ivars */
2131
2132
2133 /************************************************************************
2134  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2135  *
2136  *   Done outside of interrupt context since the driver might sleep
2137  ************************************************************************/
2138 static void
2139 ixv_handle_link(void *context, int pending)
2140 {
2141         struct adapter *adapter = context;
2142
2143         adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2144             &adapter->link_up, FALSE);
2145         ixv_update_link_status(adapter);
2146 } /* ixv_handle_link */
2147
2148 /************************************************************************
2149  * ixv_check_link - Used in the local timer to poll for link changes
2150  ************************************************************************/
2151 static void
2152 ixv_check_link(struct adapter *adapter)
2153 {
2154         adapter->hw.mac.get_link_status = TRUE;
2155
2156         adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2157             &adapter->link_up, FALSE);
2158         ixv_update_link_status(adapter);
2159 } /* ixv_check_link */
2160
2161 /************************************************************************
2162  * ixv_save_stats
2163  *
2164  *   The VF stats registers never have a truly virgin
2165  *   starting point, so this routine tries to make an
2166  *   artificial one, marking ground zero on attach as
2167  *   it were.
2168  ************************************************************************/
2169 static void
2170 ixv_save_stats(struct adapter *adapter)
2171 {
2172         if (adapter->stats_vf.vfgprc || adapter->stats_vf.vfgptc) {
2173                 adapter->stats_vf.saved_reset_vfgprc +=
2174                     adapter->stats_vf.vfgprc - adapter->stats_vf.base_vfgprc;
2175                 adapter->stats_vf.saved_reset_vfgptc +=
2176                     adapter->stats_vf.vfgptc - adapter->stats_vf.base_vfgptc;
2177                 adapter->stats_vf.saved_reset_vfgorc +=
2178                     adapter->stats_vf.vfgorc - adapter->stats_vf.base_vfgorc;
2179                 adapter->stats_vf.saved_reset_vfgotc +=
2180                     adapter->stats_vf.vfgotc - adapter->stats_vf.base_vfgotc;
2181                 adapter->stats_vf.saved_reset_vfmprc +=
2182                     adapter->stats_vf.vfmprc - adapter->stats_vf.base_vfmprc;
2183         }
2184 } /* ixv_save_stats */
2185
2186 /************************************************************************
2187  * ixv_init_stats
2188  ************************************************************************/
2189 static void
2190 ixv_init_stats(struct adapter *adapter)
2191 {
2192         struct ixgbe_hw *hw = &adapter->hw;
2193
2194         adapter->stats_vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2195         adapter->stats_vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2196         adapter->stats_vf.last_vfgorc |=
2197             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2198
2199         adapter->stats_vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2200         adapter->stats_vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2201         adapter->stats_vf.last_vfgotc |=
2202             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2203
2204         adapter->stats_vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2205
2206         adapter->stats_vf.base_vfgprc = adapter->stats_vf.last_vfgprc;
2207         adapter->stats_vf.base_vfgorc = adapter->stats_vf.last_vfgorc;
2208         adapter->stats_vf.base_vfgptc = adapter->stats_vf.last_vfgptc;
2209         adapter->stats_vf.base_vfgotc = adapter->stats_vf.last_vfgotc;
2210         adapter->stats_vf.base_vfmprc = adapter->stats_vf.last_vfmprc;
2211 } /* ixv_init_stats */
2212
2213 #define UPDATE_STAT_32(reg, last, count)                \
2214 {                                                       \
2215         u32 current = IXGBE_READ_REG(hw, reg);          \
2216         if (current < last)                             \
2217                 count += 0x100000000LL;                 \
2218         last = current;                                 \
2219         count &= 0xFFFFFFFF00000000LL;                  \
2220         count |= current;                               \
2221 }
2222
2223 #define UPDATE_STAT_36(lsb, msb, last, count)           \
2224 {                                                       \
2225         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
2226         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
2227         u64 current = ((cur_msb << 32) | cur_lsb);      \
2228         if (current < last)                             \
2229                 count += 0x1000000000LL;                \
2230         last = current;                                 \
2231         count &= 0xFFFFFFF000000000LL;                  \
2232         count |= current;                               \
2233 }
2234
2235 /************************************************************************
2236  * ixv_update_stats - Update the board statistics counters.
2237  ************************************************************************/
2238 void
2239 ixv_update_stats(struct adapter *adapter)
2240 {
2241         struct ixgbe_hw *hw = &adapter->hw;
2242
2243         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats_vf.last_vfgprc,
2244             adapter->stats_vf.vfgprc);
2245         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats_vf.last_vfgptc,
2246             adapter->stats_vf.vfgptc);
2247         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2248             adapter->stats_vf.last_vfgorc, adapter->stats_vf.vfgorc);
2249         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2250             adapter->stats_vf.last_vfgotc, adapter->stats_vf.vfgotc);
2251         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats_vf.last_vfmprc,
2252             adapter->stats_vf.vfmprc);
2253 } /* ixv_update_stats */
2254
2255 /************************************************************************
2256  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2257  ************************************************************************/
2258 static void
2259 ixv_add_stats_sysctls(struct adapter *adapter)
2260 {
2261         device_t                dev = adapter->dev;
2262         struct tx_ring          *txr = adapter->tx_rings;
2263         struct rx_ring          *rxr = adapter->rx_rings;
2264         struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
2265         struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
2266         struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
2267         struct ixgbevf_hw_stats *stats = &adapter->stats_vf;
2268         struct sysctl_oid       *stat_node, *queue_node;
2269         struct sysctl_oid_list  *stat_list, *queue_list;
2270
2271 #define QUEUE_NAME_LEN 32
2272         char                    namebuf[QUEUE_NAME_LEN];
2273
2274         /* Driver Statistics */
2275         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2276             CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
2277         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2278             CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
2279         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2280             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
2281         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
2282             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
2283
2284         for (int i = 0; i < adapter->num_queues; i++, txr++) {
2285                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
2286                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2287                     CTLFLAG_RD, NULL, "Queue Name");
2288                 queue_list = SYSCTL_CHILDREN(queue_node);
2289
2290                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2291                     CTLFLAG_RD, &(adapter->queues[i].irqs), "IRQs on queue");
2292                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
2293                     CTLFLAG_RD, &(txr->no_tx_dma_setup),
2294                     "Driver Tx DMA failure in Tx");
2295                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2296                     CTLFLAG_RD, &(txr->no_desc_avail),
2297                     "Not-enough-descriptors count: TX");
2298                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2299                     CTLFLAG_RD, &(txr->total_packets), "TX Packets");
2300                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
2301                     CTLFLAG_RD, &txr->br->br_drops,
2302                     "Not-enough-descriptors count: TX");
2303         }
2304
2305         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2306                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
2307                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2308                     CTLFLAG_RD, NULL, "Queue Name");
2309                 queue_list = SYSCTL_CHILDREN(queue_node);
2310
2311                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2312                     CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
2313                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2314                     CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
2315                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2316                     CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
2317         }
2318
2319         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2320             CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
2321         stat_list = SYSCTL_CHILDREN(stat_node);
2322
2323         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2324             CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
2325         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2326             CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
2327         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2328             CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
2329         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2330             CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
2331         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2332             CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
2333 } /* ixv_add_stats_sysctls */
2334
2335 /************************************************************************
2336  * ixv_set_sysctl_value
2337  ************************************************************************/
2338 static void
2339 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2340         const char *description, int *limit, int value)
2341 {
2342         *limit = value;
2343         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2344             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2345             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2346 } /* ixv_set_sysctl_value */
2347
2348 /************************************************************************
2349  * ixv_print_debug_info
2350  *
2351  *   Called only when em_display_debug_stats is enabled.
2352  *   Provides a way to take a look at important statistics
2353  *   maintained by the driver and hardware.
2354  ************************************************************************/
2355 static void
2356 ixv_print_debug_info(struct adapter *adapter)
2357 {
2358         device_t        dev = adapter->dev;
2359         struct ixgbe_hw *hw = &adapter->hw;
2360         struct ix_queue *que = adapter->queues;
2361         struct rx_ring  *rxr;
2362         struct tx_ring  *txr;
2363         struct lro_ctrl *lro;
2364
2365         device_printf(dev, "Error Byte Count = %u \n",
2366             IXGBE_READ_REG(hw, IXGBE_ERRBC));
2367
2368         for (int i = 0; i < adapter->num_queues; i++, que++) {
2369                 txr = que->txr;
2370                 rxr = que->rxr;
2371                 lro = &rxr->lro;
2372                 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2373                     que->msix, (long)que->irqs);
2374                 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2375                     rxr->me, (long long)rxr->rx_packets);
2376                 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2377                     rxr->me, (long)rxr->rx_bytes);
2378 #if __FreeBSD_version < 1100000
2379                 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2380                     rxr->me, (long long)lro->lro_queued);
2381                 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2382                     rxr->me, (long long)lro->lro_flushed);
2383 #else
2384                 device_printf(dev, "RX(%d) LRO Queued= %lu\n",
2385                     rxr->me, lro->lro_queued);
2386                 device_printf(dev, "RX(%d) LRO Flushed= %lu\n",
2387                     rxr->me, lro->lro_flushed);
2388 #endif
2389                 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2390                     txr->me, (long)txr->total_packets);
2391                 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2392                     txr->me, (long)txr->no_desc_avail);
2393         }
2394
2395         device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
2396 } /* ixv_print_debug_info */
2397
2398 static int
2399 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2400 {
2401         struct adapter *adapter;
2402         int            error, result;
2403
2404         result = -1;
2405         error = sysctl_handle_int(oidp, &result, 0, req);
2406
2407         if (error || !req->newptr)
2408                 return (error);
2409
2410         if (result == 1) {
2411                 adapter = (struct adapter *)arg1;
2412                 ixv_print_debug_info(adapter);
2413         }
2414
2415         return error;
2416 } /* ixv_sysctl_debug */
2417