]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ixv.c
Merge lldb trunk r321017 to contrib/llvm/tools/lldb.
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ixv.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /************************************************************************
44  * Driver version
45  ************************************************************************/
46 char ixv_driver_version[] = "1.5.13-k";
47
48 /************************************************************************
49  * PCI Device ID Table
50  *
51  *   Used by probe to select devices to load on
52  *   Last field stores an index into ixv_strings
53  *   Last entry must be all 0s
54  *
55  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56  ************************************************************************/
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
58 {
59         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
64         /* required last entry */
65         {0, 0, 0, 0, 0}
66 };
67
68 /************************************************************************
69  * Table of branding strings
70  ************************************************************************/
71 static char *ixv_strings[] = {
72         "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 };
74
75 /************************************************************************
76  * Function prototypes
77  ************************************************************************/
78 static int      ixv_probe(device_t);
79 static int      ixv_attach(device_t);
80 static int      ixv_detach(device_t);
81 static int      ixv_shutdown(device_t);
82 static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
83 static void     ixv_init(void *);
84 static void     ixv_init_locked(struct adapter *);
85 static void     ixv_stop(void *);
86 static uint64_t ixv_get_counter(struct ifnet *, ift_counter);
87 static void     ixv_init_device_features(struct adapter *);
88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int      ixv_media_change(struct ifnet *);
90 static int      ixv_allocate_pci_resources(struct adapter *);
91 static int      ixv_allocate_msix(struct adapter *);
92 static int      ixv_configure_interrupts(struct adapter *);
93 static void     ixv_free_pci_resources(struct adapter *);
94 static void     ixv_local_timer(void *);
95 static void     ixv_setup_interface(device_t, struct adapter *);
96 static int      ixv_negotiate_api(struct adapter *);
97
98 static void     ixv_initialize_transmit_units(struct adapter *);
99 static void     ixv_initialize_receive_units(struct adapter *);
100 static void     ixv_initialize_rss_mapping(struct adapter *);
101 static void     ixv_check_link(struct adapter *);
102
103 static void     ixv_enable_intr(struct adapter *);
104 static void     ixv_disable_intr(struct adapter *);
105 static void     ixv_set_multi(struct adapter *);
106 static void     ixv_update_link_status(struct adapter *);
107 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
108 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
109 static void     ixv_configure_ivars(struct adapter *);
110 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
111
112 static void     ixv_setup_vlan_support(struct adapter *);
113 static void     ixv_register_vlan(void *, struct ifnet *, u16);
114 static void     ixv_unregister_vlan(void *, struct ifnet *, u16);
115
116 static void     ixv_save_stats(struct adapter *);
117 static void     ixv_init_stats(struct adapter *);
118 static void     ixv_update_stats(struct adapter *);
119 static void     ixv_add_stats_sysctls(struct adapter *);
120 static void     ixv_set_sysctl_value(struct adapter *, const char *,
121                                      const char *, int *, int);
122
123 /* The MSI-X Interrupt handlers */
124 static void     ixv_msix_que(void *);
125 static void     ixv_msix_mbx(void *);
126
127 /* Deferred interrupt tasklets */
128 static void     ixv_handle_que(void *, int);
129 static void     ixv_handle_link(void *, int);
130
131 /************************************************************************
132  * FreeBSD Device Interface Entry Points
133  ************************************************************************/
134 static device_method_t ixv_methods[] = {
135         /* Device interface */
136         DEVMETHOD(device_probe, ixv_probe),
137         DEVMETHOD(device_attach, ixv_attach),
138         DEVMETHOD(device_detach, ixv_detach),
139         DEVMETHOD(device_shutdown, ixv_shutdown),
140         DEVMETHOD_END
141 };
142
143 static driver_t ixv_driver = {
144         "ixv", ixv_methods, sizeof(struct adapter),
145 };
146
147 devclass_t ixv_devclass;
148 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
149 MODULE_DEPEND(ixv, pci, 1, 1, 1);
150 MODULE_DEPEND(ixv, ether, 1, 1, 1);
151 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
152
153 /*
154  * TUNEABLE PARAMETERS:
155  */
156
157 /* Number of Queues - do not exceed MSI-X vectors - 1 */
158 static int ixv_num_queues = 1;
159 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
160
161 /*
162  * AIM: Adaptive Interrupt Moderation
163  * which means that the interrupt rate
164  * is varied over time based on the
165  * traffic for that interrupt vector
166  */
167 static int ixv_enable_aim = FALSE;
168 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
169
170 /* How many packets rxeof tries to clean at a time */
171 static int ixv_rx_process_limit = 256;
172 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
173
174 /* How many packets txeof tries to clean at a time */
175 static int ixv_tx_process_limit = 256;
176 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
177
178 /* Flow control setting, default to full */
179 static int ixv_flow_control = ixgbe_fc_full;
180 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
181
182 /*
183  * Header split: this causes the hardware to DMA
184  * the header into a separate mbuf from the payload,
185  * it can be a performance win in some workloads, but
186  * in others it actually hurts, its off by default.
187  */
188 static int ixv_header_split = FALSE;
189 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
190
191 /*
192  * Number of TX descriptors per ring,
193  * setting higher than RX as this seems
194  * the better performing choice.
195  */
196 static int ixv_txd = DEFAULT_TXD;
197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
198
199 /* Number of RX descriptors per ring */
200 static int ixv_rxd = DEFAULT_RXD;
201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
202
203 /* Legacy Transmit (single queue) */
204 static int ixv_enable_legacy_tx = 0;
205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
206
207 /*
208  * Shadow VFTA table, this is needed because
209  * the real filter table gets cleared during
210  * a soft reset and we need to repopulate it.
211  */
212 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
213
214 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
215 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
216
217 /************************************************************************
218  * ixv_probe - Device identification routine
219  *
220  *   Determines if the driver should be loaded on
221  *   adapter based on its PCI vendor/device ID.
222  *
223  *   return BUS_PROBE_DEFAULT on success, positive on failure
224  ************************************************************************/
225 static int
226 ixv_probe(device_t dev)
227 {
228         ixgbe_vendor_info_t *ent;
229         u16                 pci_vendor_id = 0;
230         u16                 pci_device_id = 0;
231         u16                 pci_subvendor_id = 0;
232         u16                 pci_subdevice_id = 0;
233         char                adapter_name[256];
234
235
236         pci_vendor_id = pci_get_vendor(dev);
237         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
238                 return (ENXIO);
239
240         pci_device_id = pci_get_device(dev);
241         pci_subvendor_id = pci_get_subvendor(dev);
242         pci_subdevice_id = pci_get_subdevice(dev);
243
244         ent = ixv_vendor_info_array;
245         while (ent->vendor_id != 0) {
246                 if ((pci_vendor_id == ent->vendor_id) &&
247                     (pci_device_id == ent->device_id) &&
248                     ((pci_subvendor_id == ent->subvendor_id) ||
249                      (ent->subvendor_id == 0)) &&
250                     ((pci_subdevice_id == ent->subdevice_id) ||
251                      (ent->subdevice_id == 0))) {
252                         sprintf(adapter_name, "%s, Version - %s",
253                             ixv_strings[ent->index], ixv_driver_version);
254                         device_set_desc_copy(dev, adapter_name);
255                         return (BUS_PROBE_DEFAULT);
256                 }
257                 ent++;
258         }
259
260         return (ENXIO);
261 } /* ixv_probe */
262
263 /************************************************************************
264  * ixv_attach - Device initialization routine
265  *
266  *   Called when the driver is being loaded.
267  *   Identifies the type of hardware, allocates all resources
268  *   and initializes the hardware.
269  *
270  *   return 0 on success, positive on failure
271  ************************************************************************/
272 static int
273 ixv_attach(device_t dev)
274 {
275         struct adapter  *adapter;
276         struct ixgbe_hw *hw;
277         int             error = 0;
278
279         INIT_DEBUGOUT("ixv_attach: begin");
280
281         /*
282          * Make sure BUSMASTER is set, on a VM under
283          * KVM it may not be and will break things.
284          */
285         pci_enable_busmaster(dev);
286
287         /* Allocate, clear, and link in our adapter structure */
288         adapter = device_get_softc(dev);
289         adapter->dev = dev;
290         adapter->hw.back = adapter;
291         hw = &adapter->hw;
292
293         adapter->init_locked = ixv_init_locked;
294         adapter->stop_locked = ixv_stop;
295
296         /* Core Lock Init*/
297         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
298
299         /* Do base PCI setup - map BAR0 */
300         if (ixv_allocate_pci_resources(adapter)) {
301                 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
302                 error = ENXIO;
303                 goto err_out;
304         }
305
306         /* SYSCTL APIs */
307         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
309             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
310             "Debug Info");
311
312         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
314             "enable_aim", CTLFLAG_RW, &ixv_enable_aim, 1,
315             "Interrupt Moderation");
316
317         /* Set up the timer callout */
318         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
319
320         /* Save off the information about this board */
321         hw->vendor_id = pci_get_vendor(dev);
322         hw->device_id = pci_get_device(dev);
323         hw->revision_id = pci_get_revid(dev);
324         hw->subsystem_vendor_id = pci_get_subvendor(dev);
325         hw->subsystem_device_id = pci_get_subdevice(dev);
326
327         /* A subset of set_mac_type */
328         switch (hw->device_id) {
329         case IXGBE_DEV_ID_82599_VF:
330                 hw->mac.type = ixgbe_mac_82599_vf;
331                 break;
332         case IXGBE_DEV_ID_X540_VF:
333                 hw->mac.type = ixgbe_mac_X540_vf;
334                 break;
335         case IXGBE_DEV_ID_X550_VF:
336                 hw->mac.type = ixgbe_mac_X550_vf;
337                 break;
338         case IXGBE_DEV_ID_X550EM_X_VF:
339                 hw->mac.type = ixgbe_mac_X550EM_x_vf;
340                 break;
341         case IXGBE_DEV_ID_X550EM_A_VF:
342                 hw->mac.type = ixgbe_mac_X550EM_a_vf;
343                 break;
344         default:
345                 /* Shouldn't get here since probe succeeded */
346                 device_printf(dev, "Unknown device ID!\n");
347                 error = ENXIO;
348                 goto err_out;
349                 break;
350         }
351
352         ixv_init_device_features(adapter);
353
354         /* Initialize the shared code */
355         error = ixgbe_init_ops_vf(hw);
356         if (error) {
357                 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
358                 error = EIO;
359                 goto err_out;
360         }
361
362         /* Setup the mailbox */
363         ixgbe_init_mbx_params_vf(hw);
364
365         /* Set the right number of segments */
366         adapter->num_segs = IXGBE_82599_SCATTER;
367
368         error = hw->mac.ops.reset_hw(hw);
369         if (error == IXGBE_ERR_RESET_FAILED)
370                 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
371         else if (error)
372                 device_printf(dev, "...reset_hw() failed with error %d\n",
373                     error);
374         if (error) {
375                 error = EIO;
376                 goto err_out;
377         }
378
379         error = hw->mac.ops.init_hw(hw);
380         if (error) {
381                 device_printf(dev, "...init_hw() failed with error %d\n",
382                     error);
383                 error = EIO;
384                 goto err_out;
385         }
386
387         /* Negotiate mailbox API version */
388         error = ixv_negotiate_api(adapter);
389         if (error) {
390                 device_printf(dev,
391                     "Mailbox API negotiation failed during attach!\n");
392                 goto err_out;
393         }
394
395         /* If no mac address was assigned, make a random one */
396         if (!ixv_check_ether_addr(hw->mac.addr)) {
397                 u8 addr[ETHER_ADDR_LEN];
398                 arc4rand(&addr, sizeof(addr), 0);
399                 addr[0] &= 0xFE;
400                 addr[0] |= 0x02;
401                 bcopy(addr, hw->mac.addr, sizeof(addr));
402                 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
403         }
404
405         /* Register for VLAN events */
406         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
407             ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
408         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
409             ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
410
411         /* Sysctls for limiting the amount of work done in the taskqueues */
412         ixv_set_sysctl_value(adapter, "rx_processing_limit",
413             "max number of rx packets to process",
414             &adapter->rx_process_limit, ixv_rx_process_limit);
415
416         ixv_set_sysctl_value(adapter, "tx_processing_limit",
417             "max number of tx packets to process",
418             &adapter->tx_process_limit, ixv_tx_process_limit);
419
420         /* Do descriptor calc and sanity checks */
421         if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
422             ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
423                 device_printf(dev, "TXD config issue, using default!\n");
424                 adapter->num_tx_desc = DEFAULT_TXD;
425         } else
426                 adapter->num_tx_desc = ixv_txd;
427
428         if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
429             ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
430                 device_printf(dev, "RXD config issue, using default!\n");
431                 adapter->num_rx_desc = DEFAULT_RXD;
432         } else
433                 adapter->num_rx_desc = ixv_rxd;
434
435         /* Setup MSI-X */
436         error = ixv_configure_interrupts(adapter);
437         if (error)
438                 goto err_out;
439
440         /* Allocate our TX/RX Queues */
441         if (ixgbe_allocate_queues(adapter)) {
442                 device_printf(dev, "ixgbe_allocate_queues() failed!\n");
443                 error = ENOMEM;
444                 goto err_out;
445         }
446
447         /* Setup OS specific network interface */
448         ixv_setup_interface(dev, adapter);
449
450         error = ixv_allocate_msix(adapter);
451         if (error) {
452                 device_printf(dev, "ixv_allocate_msix() failed!\n");
453                 goto err_late;
454         }
455
456         /* Do the stats setup */
457         ixv_save_stats(adapter);
458         ixv_init_stats(adapter);
459         ixv_add_stats_sysctls(adapter);
460
461         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
462                 ixgbe_netmap_attach(adapter);
463
464         INIT_DEBUGOUT("ixv_attach: end");
465
466         return (0);
467
468 err_late:
469         ixgbe_free_transmit_structures(adapter);
470         ixgbe_free_receive_structures(adapter);
471         free(adapter->queues, M_DEVBUF);
472 err_out:
473         ixv_free_pci_resources(adapter);
474         IXGBE_CORE_LOCK_DESTROY(adapter);
475
476         return (error);
477 } /* ixv_attach */
478
479 /************************************************************************
480  * ixv_detach - Device removal routine
481  *
482  *   Called when the driver is being removed.
483  *   Stops the adapter and deallocates all the resources
484  *   that were allocated for driver operation.
485  *
486  *   return 0 on success, positive on failure
487  ************************************************************************/
488 static int
489 ixv_detach(device_t dev)
490 {
491         struct adapter  *adapter = device_get_softc(dev);
492         struct ix_queue *que = adapter->queues;
493
494         INIT_DEBUGOUT("ixv_detach: begin");
495
496         /* Make sure VLANS are not using driver */
497         if (adapter->ifp->if_vlantrunk != NULL) {
498                 device_printf(dev, "Vlan in use, detach first\n");
499                 return (EBUSY);
500         }
501
502         ether_ifdetach(adapter->ifp);
503         IXGBE_CORE_LOCK(adapter);
504         ixv_stop(adapter);
505         IXGBE_CORE_UNLOCK(adapter);
506
507         for (int i = 0; i < adapter->num_queues; i++, que++) {
508                 if (que->tq) {
509                         struct tx_ring  *txr = que->txr;
510                         taskqueue_drain(que->tq, &txr->txq_task);
511                         taskqueue_drain(que->tq, &que->que_task);
512                         taskqueue_free(que->tq);
513                 }
514         }
515
516         /* Drain the Mailbox(link) queue */
517         if (adapter->tq) {
518                 taskqueue_drain(adapter->tq, &adapter->link_task);
519                 taskqueue_free(adapter->tq);
520         }
521
522         /* Unregister VLAN events */
523         if (adapter->vlan_attach != NULL)
524                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
525         if (adapter->vlan_detach != NULL)
526                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
527
528         callout_drain(&adapter->timer);
529
530         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
531                 netmap_detach(adapter->ifp);
532
533         ixv_free_pci_resources(adapter);
534         bus_generic_detach(dev);
535         if_free(adapter->ifp);
536
537         ixgbe_free_transmit_structures(adapter);
538         ixgbe_free_receive_structures(adapter);
539         free(adapter->queues, M_DEVBUF);
540
541         IXGBE_CORE_LOCK_DESTROY(adapter);
542
543         return (0);
544 } /* ixv_detach */
545
546 /************************************************************************
547  * ixv_init_locked - Init entry point
548  *
549  *   Used in two ways: It is used by the stack as an init entry
550  *   point in network interface structure. It is also used
551  *   by the driver as a hw/sw initialization routine to get
552  *   to a consistent state.
553  *
554  *   return 0 on success, positive on failure
555  ************************************************************************/
556 void
557 ixv_init_locked(struct adapter *adapter)
558 {
559         struct ifnet    *ifp = adapter->ifp;
560         device_t        dev = adapter->dev;
561         struct ixgbe_hw *hw = &adapter->hw;
562         int             error = 0;
563
564         INIT_DEBUGOUT("ixv_init_locked: begin");
565         mtx_assert(&adapter->core_mtx, MA_OWNED);
566         hw->adapter_stopped = FALSE;
567         hw->mac.ops.stop_adapter(hw);
568         callout_stop(&adapter->timer);
569
570         /* reprogram the RAR[0] in case user changed it. */
571         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
572
573         /* Get the latest mac address, User can use a LAA */
574         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
575             IXGBE_ETH_LENGTH_OF_ADDRESS);
576         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
577
578         /* Prepare transmit descriptors and buffers */
579         if (ixgbe_setup_transmit_structures(adapter)) {
580                 device_printf(dev, "Could not setup transmit structures\n");
581                 ixv_stop(adapter);
582                 return;
583         }
584
585         /* Reset VF and renegotiate mailbox API version */
586         hw->mac.ops.reset_hw(hw);
587         error = ixv_negotiate_api(adapter);
588         if (error) {
589                 device_printf(dev,
590                     "Mailbox API negotiation failed in init_locked!\n");
591                 return;
592         }
593
594         ixv_initialize_transmit_units(adapter);
595
596         /* Setup Multicast table */
597         ixv_set_multi(adapter);
598
599         /*
600          * Determine the correct mbuf pool
601          * for doing jumbo/headersplit
602          */
603         if (ifp->if_mtu > ETHERMTU)
604                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
605         else
606                 adapter->rx_mbuf_sz = MCLBYTES;
607
608         /* Prepare receive descriptors and buffers */
609         if (ixgbe_setup_receive_structures(adapter)) {
610                 device_printf(dev, "Could not setup receive structures\n");
611                 ixv_stop(adapter);
612                 return;
613         }
614
615         /* Configure RX settings */
616         ixv_initialize_receive_units(adapter);
617
618         /* Set the various hardware offload abilities */
619         ifp->if_hwassist = 0;
620         if (ifp->if_capenable & IFCAP_TSO4)
621                 ifp->if_hwassist |= CSUM_TSO;
622         if (ifp->if_capenable & IFCAP_TXCSUM) {
623                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
624 #if __FreeBSD_version >= 800000
625                 ifp->if_hwassist |= CSUM_SCTP;
626 #endif
627         }
628
629         /* Set up VLAN offload and filter */
630         ixv_setup_vlan_support(adapter);
631
632         /* Set up MSI-X routing */
633         ixv_configure_ivars(adapter);
634
635         /* Set up auto-mask */
636         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
637
638         /* Set moderation on the Link interrupt */
639         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
640
641         /* Stats init */
642         ixv_init_stats(adapter);
643
644         /* Config/Enable Link */
645         hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
646             FALSE);
647
648         /* Start watchdog */
649         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
650
651         /* And now turn on interrupts */
652         ixv_enable_intr(adapter);
653
654         /* Now inform the stack we're ready */
655         ifp->if_drv_flags |= IFF_DRV_RUNNING;
656         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
657
658         return;
659 } /* ixv_init_locked */
660
661 /*
662  * MSI-X Interrupt Handlers and Tasklets
663  */
664
665 static inline void
666 ixv_enable_queue(struct adapter *adapter, u32 vector)
667 {
668         struct ixgbe_hw *hw = &adapter->hw;
669         u32             queue = 1 << vector;
670         u32             mask;
671
672         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
673         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
674 } /* ixv_enable_queue */
675
676 static inline void
677 ixv_disable_queue(struct adapter *adapter, u32 vector)
678 {
679         struct ixgbe_hw *hw = &adapter->hw;
680         u64             queue = (u64)(1 << vector);
681         u32             mask;
682
683         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
684         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
685 } /* ixv_disable_queue */
686
687 static inline void
688 ixv_rearm_queues(struct adapter *adapter, u64 queues)
689 {
690         u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
691         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
692 } /* ixv_rearm_queues */
693
694
695 /************************************************************************
696  * ixv_msix_que - MSI Queue Interrupt Service routine
697  ************************************************************************/
698 void
699 ixv_msix_que(void *arg)
700 {
701         struct ix_queue *que = arg;
702         struct adapter  *adapter = que->adapter;
703         struct ifnet    *ifp = adapter->ifp;
704         struct tx_ring  *txr = que->txr;
705         struct rx_ring  *rxr = que->rxr;
706         bool            more;
707         u32             newitr = 0;
708
709         ixv_disable_queue(adapter, que->msix);
710         ++que->irqs;
711
712         more = ixgbe_rxeof(que);
713
714         IXGBE_TX_LOCK(txr);
715         ixgbe_txeof(txr);
716         /*
717          * Make certain that if the stack
718          * has anything queued the task gets
719          * scheduled to handle it.
720          */
721         if (!ixv_ring_empty(adapter->ifp, txr->br))
722                 ixv_start_locked(ifp, txr);
723         IXGBE_TX_UNLOCK(txr);
724
725         /* Do AIM now? */
726
727         if (ixv_enable_aim == FALSE)
728                 goto no_calc;
729         /*
730          * Do Adaptive Interrupt Moderation:
731          *  - Write out last calculated setting
732          *  - Calculate based on average size over
733          *    the last interval.
734          */
735         if (que->eitr_setting)
736                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
737                     que->eitr_setting);
738
739         que->eitr_setting = 0;
740
741         /* Idle, do nothing */
742         if ((txr->bytes == 0) && (rxr->bytes == 0))
743                 goto no_calc;
744
745         if ((txr->bytes) && (txr->packets))
746                 newitr = txr->bytes/txr->packets;
747         if ((rxr->bytes) && (rxr->packets))
748                 newitr = max(newitr, (rxr->bytes / rxr->packets));
749         newitr += 24; /* account for hardware frame, crc */
750
751         /* set an upper boundary */
752         newitr = min(newitr, 3000);
753
754         /* Be nice to the mid range */
755         if ((newitr > 300) && (newitr < 1200))
756                 newitr = (newitr / 3);
757         else
758                 newitr = (newitr / 2);
759
760         newitr |= newitr << 16;
761
762         /* save for next interrupt */
763         que->eitr_setting = newitr;
764
765         /* Reset state */
766         txr->bytes = 0;
767         txr->packets = 0;
768         rxr->bytes = 0;
769         rxr->packets = 0;
770
771 no_calc:
772         if (more)
773                 taskqueue_enqueue(que->tq, &que->que_task);
774         else /* Re-enable this interrupt */
775                 ixv_enable_queue(adapter, que->msix);
776
777         return;
778 } /* ixv_msix_que */
779
780 /************************************************************************
781  * ixv_msix_mbx
782  ************************************************************************/
783 static void
784 ixv_msix_mbx(void *arg)
785 {
786         struct adapter  *adapter = arg;
787         struct ixgbe_hw *hw = &adapter->hw;
788         u32             reg;
789
790         ++adapter->link_irq;
791
792         /* First get the cause */
793         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
794         /* Clear interrupt with write */
795         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
796
797         /* Link status change */
798         if (reg & IXGBE_EICR_LSC)
799                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
800
801         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
802
803         return;
804 } /* ixv_msix_mbx */
805
806 /************************************************************************
807  * ixv_media_status - Media Ioctl callback
808  *
809  *   Called whenever the user queries the status of
810  *   the interface using ifconfig.
811  ************************************************************************/
812 static void
813 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
814 {
815         struct adapter *adapter = ifp->if_softc;
816
817         INIT_DEBUGOUT("ixv_media_status: begin");
818         IXGBE_CORE_LOCK(adapter);
819         ixv_update_link_status(adapter);
820
821         ifmr->ifm_status = IFM_AVALID;
822         ifmr->ifm_active = IFM_ETHER;
823
824         if (!adapter->link_active) {
825                 IXGBE_CORE_UNLOCK(adapter);
826                 return;
827         }
828
829         ifmr->ifm_status |= IFM_ACTIVE;
830
831         switch (adapter->link_speed) {
832                 case IXGBE_LINK_SPEED_1GB_FULL:
833                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
834                         break;
835                 case IXGBE_LINK_SPEED_10GB_FULL:
836                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
837                         break;
838                 case IXGBE_LINK_SPEED_100_FULL:
839                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
840                         break;
841                 case IXGBE_LINK_SPEED_10_FULL:
842                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
843                         break;
844         }
845
846         IXGBE_CORE_UNLOCK(adapter);
847
848         return;
849 } /* ixv_media_status */
850
851 /************************************************************************
852  * ixv_media_change - Media Ioctl callback
853  *
854  *   Called when the user changes speed/duplex using
855  *   media/mediopt option with ifconfig.
856  ************************************************************************/
857 static int
858 ixv_media_change(struct ifnet *ifp)
859 {
860         struct adapter *adapter = ifp->if_softc;
861         struct ifmedia *ifm = &adapter->media;
862
863         INIT_DEBUGOUT("ixv_media_change: begin");
864
865         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
866                 return (EINVAL);
867
868         switch (IFM_SUBTYPE(ifm->ifm_media)) {
869         case IFM_AUTO:
870                 break;
871         default:
872                 device_printf(adapter->dev, "Only auto media type\n");
873                 return (EINVAL);
874         }
875
876         return (0);
877 } /* ixv_media_change */
878
879
880 /************************************************************************
881  * ixv_negotiate_api
882  *
883  *   Negotiate the Mailbox API with the PF;
884  *   start with the most featured API first.
885  ************************************************************************/
886 static int
887 ixv_negotiate_api(struct adapter *adapter)
888 {
889         struct ixgbe_hw *hw = &adapter->hw;
890         int             mbx_api[] = { ixgbe_mbox_api_11,
891                                       ixgbe_mbox_api_10,
892                                       ixgbe_mbox_api_unknown };
893         int             i = 0;
894
895         while (mbx_api[i] != ixgbe_mbox_api_unknown) {
896                 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
897                         return (0);
898                 i++;
899         }
900
901         return (EINVAL);
902 } /* ixv_negotiate_api */
903
904
905 /************************************************************************
906  * ixv_set_multi - Multicast Update
907  *
908  *   Called whenever multicast address list is updated.
909  ************************************************************************/
910 static void
911 ixv_set_multi(struct adapter *adapter)
912 {
913         u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
914         u8                 *update_ptr;
915         struct ifmultiaddr *ifma;
916         struct ifnet       *ifp = adapter->ifp;
917         int                mcnt = 0;
918
919         IOCTL_DEBUGOUT("ixv_set_multi: begin");
920
921 #if __FreeBSD_version < 800000
922         IF_ADDR_LOCK(ifp);
923 #else
924         if_maddr_rlock(ifp);
925 #endif
926         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
927                 if (ifma->ifma_addr->sa_family != AF_LINK)
928                         continue;
929                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
930                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
931                     IXGBE_ETH_LENGTH_OF_ADDRESS);
932                 mcnt++;
933         }
934 #if __FreeBSD_version < 800000
935         IF_ADDR_UNLOCK(ifp);
936 #else
937         if_maddr_runlock(ifp);
938 #endif
939
940         update_ptr = mta;
941
942         adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
943             ixv_mc_array_itr, TRUE);
944
945         return;
946 } /* ixv_set_multi */
947
948 /************************************************************************
949  * ixv_mc_array_itr
950  *
951  *   An iterator function needed by the multicast shared code.
952  *   It feeds the shared code routine the addresses in the
953  *   array of ixv_set_multi() one by one.
954  ************************************************************************/
955 static u8 *
956 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
957 {
958         u8 *addr = *update_ptr;
959         u8 *newptr;
960         *vmdq = 0;
961
962         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
963         *update_ptr = newptr;
964
965         return addr;
966 } /* ixv_mc_array_itr */
967
968 /************************************************************************
969  * ixv_local_timer - Timer routine
970  *
971  *   Checks for link status, updates statistics,
972  *   and runs the watchdog check.
973  ************************************************************************/
974 static void
975 ixv_local_timer(void *arg)
976 {
977         struct adapter  *adapter = arg;
978         device_t        dev = adapter->dev;
979         struct ix_queue *que = adapter->queues;
980         u64             queues = 0;
981         int             hung = 0;
982
983         mtx_assert(&adapter->core_mtx, MA_OWNED);
984
985         ixv_check_link(adapter);
986
987         /* Stats Update */
988         ixv_update_stats(adapter);
989
990         /*
991          * Check the TX queues status
992          *      - mark hung queues so we don't schedule on them
993          *      - watchdog only if all queues show hung
994          */
995         for (int i = 0; i < adapter->num_queues; i++, que++) {
996                 /* Keep track of queues with work for soft irq */
997                 if (que->txr->busy)
998                         queues |= ((u64)1 << que->me);
999                 /*
1000                  * Each time txeof runs without cleaning, but there
1001                  * are uncleaned descriptors it increments busy. If
1002                  * we get to the MAX we declare it hung.
1003                  */
1004                 if (que->busy == IXGBE_QUEUE_HUNG) {
1005                         ++hung;
1006                         /* Mark the queue as inactive */
1007                         adapter->active_queues &= ~((u64)1 << que->me);
1008                         continue;
1009                 } else {
1010                         /* Check if we've come back from hung */
1011                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1012                                 adapter->active_queues |= ((u64)1 << que->me);
1013                 }
1014                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1015                         device_printf(dev,
1016                             "Warning queue %d appears to be hung!\n", i);
1017                         que->txr->busy = IXGBE_QUEUE_HUNG;
1018                         ++hung;
1019                 }
1020
1021         }
1022
1023         /* Only truly watchdog if all queues show hung */
1024         if (hung == adapter->num_queues)
1025                 goto watchdog;
1026         else if (queues != 0) { /* Force an IRQ on queues with work */
1027                 ixv_rearm_queues(adapter, queues);
1028         }
1029
1030         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1031
1032         return;
1033
1034 watchdog:
1035
1036         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1037         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1038         adapter->watchdog_events++;
1039         ixv_init_locked(adapter);
1040 } /* ixv_local_timer */
1041
1042 /************************************************************************
1043  * ixv_update_link_status - Update OS on link state
1044  *
1045  * Note: Only updates the OS on the cached link state.
1046  *       The real check of the hardware only happens with
1047  *       a link interrupt.
1048  ************************************************************************/
1049 static void
1050 ixv_update_link_status(struct adapter *adapter)
1051 {
1052         struct ifnet *ifp = adapter->ifp;
1053         device_t     dev = adapter->dev;
1054
1055         if (adapter->link_up) {
1056                 if (adapter->link_active == FALSE) {
1057                         if (bootverbose)
1058                                 device_printf(dev,"Link is up %d Gbps %s \n",
1059                                     ((adapter->link_speed == 128) ? 10 : 1),
1060                                     "Full Duplex");
1061                         adapter->link_active = TRUE;
1062                         if_link_state_change(ifp, LINK_STATE_UP);
1063                 }
1064         } else { /* Link down */
1065                 if (adapter->link_active == TRUE) {
1066                         if (bootverbose)
1067                                 device_printf(dev,"Link is Down\n");
1068                         if_link_state_change(ifp, LINK_STATE_DOWN);
1069                         adapter->link_active = FALSE;
1070                 }
1071         }
1072
1073         return;
1074 } /* ixv_update_link_status */
1075
1076
1077 /************************************************************************
1078  * ixv_stop - Stop the hardware
1079  *
1080  *   Disables all traffic on the adapter by issuing a
1081  *   global reset on the MAC and deallocates TX/RX buffers.
1082  ************************************************************************/
1083 static void
1084 ixv_stop(void *arg)
1085 {
1086         struct ifnet    *ifp;
1087         struct adapter  *adapter = arg;
1088         struct ixgbe_hw *hw = &adapter->hw;
1089
1090         ifp = adapter->ifp;
1091
1092         mtx_assert(&adapter->core_mtx, MA_OWNED);
1093
1094         INIT_DEBUGOUT("ixv_stop: begin\n");
1095         ixv_disable_intr(adapter);
1096
1097         /* Tell the stack that the interface is no longer active */
1098         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1099
1100         hw->mac.ops.reset_hw(hw);
1101         adapter->hw.adapter_stopped = FALSE;
1102         hw->mac.ops.stop_adapter(hw);
1103         callout_stop(&adapter->timer);
1104
1105         /* reprogram the RAR[0] in case user changed it. */
1106         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1107
1108         return;
1109 } /* ixv_stop */
1110
1111
1112 /************************************************************************
1113  * ixv_allocate_pci_resources
1114  ************************************************************************/
1115 static int
1116 ixv_allocate_pci_resources(struct adapter *adapter)
1117 {
1118         device_t dev = adapter->dev;
1119         int      rid;
1120
1121         rid = PCIR_BAR(0);
1122         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1123             RF_ACTIVE);
1124
1125         if (!(adapter->pci_mem)) {
1126                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1127                 return (ENXIO);
1128         }
1129
1130         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1131         adapter->osdep.mem_bus_space_handle =
1132             rman_get_bushandle(adapter->pci_mem);
1133         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1134
1135         /* Pick up the tuneable queues */
1136         adapter->num_queues = ixv_num_queues;
1137
1138         return (0);
1139 } /* ixv_allocate_pci_resources */
1140
1141 /************************************************************************
1142  * ixv_free_pci_resources
1143  ************************************************************************/
1144 static void
1145 ixv_free_pci_resources(struct adapter * adapter)
1146 {
1147         struct ix_queue *que = adapter->queues;
1148         device_t        dev = adapter->dev;
1149         int             rid, memrid;
1150
1151         memrid = PCIR_BAR(MSIX_82598_BAR);
1152
1153         /*
1154          * There is a slight possibility of a failure mode
1155          * in attach that will result in entering this function
1156          * before interrupt resources have been initialized, and
1157          * in that case we do not want to execute the loops below
1158          * We can detect this reliably by the state of the adapter
1159          * res pointer.
1160          */
1161         if (adapter->res == NULL)
1162                 goto mem;
1163
1164         /*
1165          *  Release all msix queue resources:
1166          */
1167         for (int i = 0; i < adapter->num_queues; i++, que++) {
1168                 rid = que->msix + 1;
1169                 if (que->tag != NULL) {
1170                         bus_teardown_intr(dev, que->res, que->tag);
1171                         que->tag = NULL;
1172                 }
1173                 if (que->res != NULL)
1174                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1175         }
1176
1177
1178         /* Clean the Mailbox interrupt last */
1179         rid = adapter->vector + 1;
1180
1181         if (adapter->tag != NULL) {
1182                 bus_teardown_intr(dev, adapter->res, adapter->tag);
1183                 adapter->tag = NULL;
1184         }
1185         if (adapter->res != NULL)
1186                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1187
1188 mem:
1189         pci_release_msi(dev);
1190
1191         if (adapter->msix_mem != NULL)
1192                 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
1193                     adapter->msix_mem);
1194
1195         if (adapter->pci_mem != NULL)
1196                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1197                     adapter->pci_mem);
1198
1199         return;
1200 } /* ixv_free_pci_resources */
1201
1202 /************************************************************************
1203  * ixv_setup_interface
1204  *
1205  *   Setup networking device structure and register an interface.
1206  ************************************************************************/
1207 static void
1208 ixv_setup_interface(device_t dev, struct adapter *adapter)
1209 {
1210         struct ifnet *ifp;
1211
1212         INIT_DEBUGOUT("ixv_setup_interface: begin");
1213
1214         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1215         if (ifp == NULL)
1216                 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1217         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1218         ifp->if_baudrate = 1000000000;
1219         ifp->if_init = ixv_init;
1220         ifp->if_softc = adapter;
1221         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1222         ifp->if_ioctl = ixv_ioctl;
1223         if_setgetcounterfn(ifp, ixv_get_counter);
1224         /* TSO parameters */
1225         ifp->if_hw_tsomax = 65518;
1226         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1227         ifp->if_hw_tsomaxsegsize = 2048;
1228         if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1229                 ifp->if_start = ixgbe_legacy_start;
1230                 ixv_start_locked = ixgbe_legacy_start_locked;
1231                 ixv_ring_empty = ixgbe_legacy_ring_empty;
1232         } else {
1233                 ifp->if_transmit = ixgbe_mq_start;
1234                 ifp->if_qflush = ixgbe_qflush;
1235                 ixv_start_locked = ixgbe_mq_start_locked;
1236                 ixv_ring_empty = drbr_empty;
1237         }
1238         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1239
1240         ether_ifattach(ifp, adapter->hw.mac.addr);
1241
1242         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1243
1244         /*
1245          * Tell the upper layer(s) we support long frames.
1246          */
1247         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1248
1249         /* Set capability flags */
1250         ifp->if_capabilities |= IFCAP_HWCSUM
1251                              |  IFCAP_HWCSUM_IPV6
1252                              |  IFCAP_TSO
1253                              |  IFCAP_LRO
1254                              |  IFCAP_VLAN_HWTAGGING
1255                              |  IFCAP_VLAN_HWTSO
1256                              |  IFCAP_VLAN_HWCSUM
1257                              |  IFCAP_JUMBO_MTU
1258                              |  IFCAP_VLAN_MTU;
1259
1260         /* Enable the above capabilities by default */
1261         ifp->if_capenable = ifp->if_capabilities;
1262
1263         /*
1264          * Specify the media types supported by this adapter and register
1265          * callbacks to update media and link information
1266          */
1267         ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1268             ixv_media_status);
1269         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1270         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1271
1272         return;
1273 } /* ixv_setup_interface */
1274
1275
1276 /************************************************************************
1277  * ixv_initialize_transmit_units - Enable transmit unit.
1278  ************************************************************************/
1279 static void
1280 ixv_initialize_transmit_units(struct adapter *adapter)
1281 {
1282         struct tx_ring  *txr = adapter->tx_rings;
1283         struct ixgbe_hw *hw = &adapter->hw;
1284
1285
1286         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1287                 u64 tdba = txr->txdma.dma_paddr;
1288                 u32 txctrl, txdctl;
1289
1290                 /* Set WTHRESH to 8, burst writeback */
1291                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1292                 txdctl |= (8 << 16);
1293                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1294
1295                 /* Set the HW Tx Head and Tail indices */
1296                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1297                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1298
1299                 /* Set Tx Tail register */
1300                 txr->tail = IXGBE_VFTDT(i);
1301
1302                 /* Set Ring parameters */
1303                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1304                     (tdba & 0x00000000ffffffffULL));
1305                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1306                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1307                     adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1308                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1309                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1310                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1311
1312                 /* Now enable */
1313                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1314                 txdctl |= IXGBE_TXDCTL_ENABLE;
1315                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1316         }
1317
1318         return;
1319 } /* ixv_initialize_transmit_units */
1320
1321
1322 /************************************************************************
1323  * ixv_initialize_rss_mapping
1324  ************************************************************************/
1325 static void
1326 ixv_initialize_rss_mapping(struct adapter *adapter)
1327 {
1328         struct ixgbe_hw *hw = &adapter->hw;
1329         u32             reta = 0, mrqc, rss_key[10];
1330         int             queue_id;
1331         int             i, j;
1332         u32             rss_hash_config;
1333
1334         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1335                 /* Fetch the configured RSS key */
1336                 rss_getkey((uint8_t *)&rss_key);
1337         } else {
1338                 /* set up random bits */
1339                 arc4rand(&rss_key, sizeof(rss_key), 0);
1340         }
1341
1342         /* Now fill out hash function seeds */
1343         for (i = 0; i < 10; i++)
1344                 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1345
1346         /* Set up the redirection table */
1347         for (i = 0, j = 0; i < 64; i++, j++) {
1348                 if (j == adapter->num_queues)
1349                         j = 0;
1350
1351                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1352                         /*
1353                          * Fetch the RSS bucket id for the given indirection
1354                          * entry. Cap it at the number of configured buckets
1355                          * (which is num_queues.)
1356                          */
1357                         queue_id = rss_get_indirection_to_bucket(i);
1358                         queue_id = queue_id % adapter->num_queues;
1359                 } else
1360                         queue_id = j;
1361
1362                 /*
1363                  * The low 8 bits are for hash value (n+0);
1364                  * The next 8 bits are for hash value (n+1), etc.
1365                  */
1366                 reta >>= 8;
1367                 reta |= ((uint32_t)queue_id) << 24;
1368                 if ((i & 3) == 3) {
1369                         IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1370                         reta = 0;
1371                 }
1372         }
1373
1374         /* Perform hash on these packet types */
1375         if (adapter->feat_en & IXGBE_FEATURE_RSS)
1376                 rss_hash_config = rss_gethashconfig();
1377         else {
1378                 /*
1379                  * Disable UDP - IP fragments aren't currently being handled
1380                  * and so we end up with a mix of 2-tuple and 4-tuple
1381                  * traffic.
1382                  */
1383                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1384                                 | RSS_HASHTYPE_RSS_TCP_IPV4
1385                                 | RSS_HASHTYPE_RSS_IPV6
1386                                 | RSS_HASHTYPE_RSS_TCP_IPV6;
1387         }
1388
1389         mrqc = IXGBE_MRQC_RSSEN;
1390         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1391                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1392         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1393                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1394         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1395                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1396         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1397                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1398         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1399                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1400                     __func__);
1401         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1402                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1403                     __func__);
1404         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1405                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1406         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1407                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1408         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1409                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1410                     __func__);
1411         IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1412 } /* ixv_initialize_rss_mapping */
1413
1414
1415 /************************************************************************
1416  * ixv_initialize_receive_units - Setup receive registers and features.
1417  ************************************************************************/
1418 static void
1419 ixv_initialize_receive_units(struct adapter *adapter)
1420 {
1421         struct rx_ring  *rxr = adapter->rx_rings;
1422         struct ixgbe_hw *hw = &adapter->hw;
1423         struct ifnet    *ifp = adapter->ifp;
1424         u32             bufsz, rxcsum, psrtype;
1425
1426         if (ifp->if_mtu > ETHERMTU)
1427                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1428         else
1429                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1430
1431         psrtype = IXGBE_PSRTYPE_TCPHDR
1432                 | IXGBE_PSRTYPE_UDPHDR
1433                 | IXGBE_PSRTYPE_IPV4HDR
1434                 | IXGBE_PSRTYPE_IPV6HDR
1435                 | IXGBE_PSRTYPE_L2HDR;
1436
1437         if (adapter->num_queues > 1)
1438                 psrtype |= 1 << 29;
1439
1440         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1441
1442         /* Tell PF our max_frame size */
1443         if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1444                 device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1445         }
1446
1447         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1448                 u64 rdba = rxr->rxdma.dma_paddr;
1449                 u32 reg, rxdctl;
1450
1451                 /* Disable the queue */
1452                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1453                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1454                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1455                 for (int j = 0; j < 10; j++) {
1456                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1457                             IXGBE_RXDCTL_ENABLE)
1458                                 msec_delay(1);
1459                         else
1460                                 break;
1461                 }
1462                 wmb();
1463                 /* Setup the Base and Length of the Rx Descriptor Ring */
1464                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1465                     (rdba & 0x00000000ffffffffULL));
1466                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1467                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1468                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1469
1470                 /* Reset the ring indices */
1471                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1472                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1473
1474                 /* Set up the SRRCTL register */
1475                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1476                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1477                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1478                 reg |= bufsz;
1479                 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1480                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1481
1482                 /* Capture Rx Tail index */
1483                 rxr->tail = IXGBE_VFRDT(rxr->me);
1484
1485                 /* Do the queue enabling last */
1486                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1487                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1488                 for (int k = 0; k < 10; k++) {
1489                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1490                             IXGBE_RXDCTL_ENABLE)
1491                                 break;
1492                         msec_delay(1);
1493                 }
1494                 wmb();
1495
1496                 /* Set the Tail Pointer */
1497                 /*
1498                  * In netmap mode, we must preserve the buffers made
1499                  * available to userspace before the if_init()
1500                  * (this is true by default on the TX side, because
1501                  * init makes all buffers available to userspace).
1502                  *
1503                  * netmap_reset() and the device specific routines
1504                  * (e.g. ixgbe_setup_receive_rings()) map these
1505                  * buffers at the end of the NIC ring, so here we
1506                  * must set the RDT (tail) register to make sure
1507                  * they are not overwritten.
1508                  *
1509                  * In this driver the NIC ring starts at RDH = 0,
1510                  * RDT points to the last slot available for reception (?),
1511                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1512                  */
1513 #ifdef DEV_NETMAP
1514                 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1515                     (ifp->if_capenable & IFCAP_NETMAP)) {
1516                         struct netmap_adapter *na = NA(adapter->ifp);
1517                         struct netmap_kring *kring = &na->rx_rings[i];
1518                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1519
1520                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1521                 } else
1522 #endif /* DEV_NETMAP */
1523                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1524                             adapter->num_rx_desc - 1);
1525         }
1526
1527         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1528
1529         ixv_initialize_rss_mapping(adapter);
1530
1531         if (adapter->num_queues > 1) {
1532                 /* RSS and RX IPP Checksum are mutually exclusive */
1533                 rxcsum |= IXGBE_RXCSUM_PCSD;
1534         }
1535
1536         if (ifp->if_capenable & IFCAP_RXCSUM)
1537                 rxcsum |= IXGBE_RXCSUM_PCSD;
1538
1539         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1540                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1541
1542         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1543
1544         return;
1545 } /* ixv_initialize_receive_units */
1546
1547 /************************************************************************
1548  * ixv_setup_vlan_support
1549  ************************************************************************/
1550 static void
1551 ixv_setup_vlan_support(struct adapter *adapter)
1552 {
1553         struct ixgbe_hw *hw = &adapter->hw;
1554         u32             ctrl, vid, vfta, retry;
1555
1556         /*
1557          * We get here thru init_locked, meaning
1558          * a soft reset, this has already cleared
1559          * the VFTA and other state, so if there
1560          * have been no vlan's registered do nothing.
1561          */
1562         if (adapter->num_vlans == 0)
1563                 return;
1564
1565         /* Enable the queues */
1566         for (int i = 0; i < adapter->num_queues; i++) {
1567                 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1568                 ctrl |= IXGBE_RXDCTL_VME;
1569                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1570                 /*
1571                  * Let Rx path know that it needs to store VLAN tag
1572                  * as part of extra mbuf info.
1573                  */
1574                 adapter->rx_rings[i].vtag_strip = TRUE;
1575         }
1576
1577         /*
1578          * A soft reset zero's out the VFTA, so
1579          * we need to repopulate it now.
1580          */
1581         for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1582                 if (ixv_shadow_vfta[i] == 0)
1583                         continue;
1584                 vfta = ixv_shadow_vfta[i];
1585                 /*
1586                  * Reconstruct the vlan id's
1587                  * based on the bits set in each
1588                  * of the array ints.
1589                  */
1590                 for (int j = 0; j < 32; j++) {
1591                         retry = 0;
1592                         if ((vfta & (1 << j)) == 0)
1593                                 continue;
1594                         vid = (i * 32) + j;
1595                         /* Call the shared code mailbox routine */
1596                         while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1597                                 if (++retry > 5)
1598                                         break;
1599                         }
1600                 }
1601         }
1602 } /* ixv_setup_vlan_support */
1603
1604 /************************************************************************
1605  * ixv_register_vlan
1606  *
1607  *   Run via a vlan config EVENT, it enables us to use the
1608  *   HW Filter table since we can get the vlan id. This just
1609  *   creates the entry in the soft version of the VFTA, init
1610  *   will repopulate the real table.
1611  ************************************************************************/
1612 static void
1613 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1614 {
1615         struct adapter *adapter = ifp->if_softc;
1616         u16            index, bit;
1617
1618         if (ifp->if_softc != arg) /* Not our event */
1619                 return;
1620
1621         if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1622                 return;
1623
1624         IXGBE_CORE_LOCK(adapter);
1625         index = (vtag >> 5) & 0x7F;
1626         bit = vtag & 0x1F;
1627         ixv_shadow_vfta[index] |= (1 << bit);
1628         ++adapter->num_vlans;
1629         /* Re-init to load the changes */
1630         ixv_init_locked(adapter);
1631         IXGBE_CORE_UNLOCK(adapter);
1632 } /* ixv_register_vlan */
1633
1634 /************************************************************************
1635  * ixv_unregister_vlan
1636  *
1637  *   Run via a vlan unconfig EVENT, remove our entry
1638  *   in the soft vfta.
1639  ************************************************************************/
1640 static void
1641 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1642 {
1643         struct adapter *adapter = ifp->if_softc;
1644         u16            index, bit;
1645
1646         if (ifp->if_softc !=  arg)
1647                 return;
1648
1649         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1650                 return;
1651
1652         IXGBE_CORE_LOCK(adapter);
1653         index = (vtag >> 5) & 0x7F;
1654         bit = vtag & 0x1F;
1655         ixv_shadow_vfta[index] &= ~(1 << bit);
1656         --adapter->num_vlans;
1657         /* Re-init to load the changes */
1658         ixv_init_locked(adapter);
1659         IXGBE_CORE_UNLOCK(adapter);
1660 } /* ixv_unregister_vlan */
1661
1662 /************************************************************************
1663  * ixv_enable_intr
1664  ************************************************************************/
1665 static void
1666 ixv_enable_intr(struct adapter *adapter)
1667 {
1668         struct ixgbe_hw *hw = &adapter->hw;
1669         struct ix_queue *que = adapter->queues;
1670         u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1671
1672
1673         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1674
1675         mask = IXGBE_EIMS_ENABLE_MASK;
1676         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1677         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1678
1679         for (int i = 0; i < adapter->num_queues; i++, que++)
1680                 ixv_enable_queue(adapter, que->msix);
1681
1682         IXGBE_WRITE_FLUSH(hw);
1683
1684         return;
1685 } /* ixv_enable_intr */
1686
1687 /************************************************************************
1688  * ixv_disable_intr
1689  ************************************************************************/
1690 static void
1691 ixv_disable_intr(struct adapter *adapter)
1692 {
1693         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1694         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1695         IXGBE_WRITE_FLUSH(&adapter->hw);
1696
1697         return;
1698 } /* ixv_disable_intr */
1699
1700 /************************************************************************
1701  * ixv_set_ivar
1702  *
1703  *   Setup the correct IVAR register for a particular MSI-X interrupt
1704  *    - entry is the register array entry
1705  *    - vector is the MSI-X vector for this queue
1706  *    - type is RX/TX/MISC
1707  ************************************************************************/
1708 static void
1709 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1710 {
1711         struct ixgbe_hw *hw = &adapter->hw;
1712         u32             ivar, index;
1713
1714         vector |= IXGBE_IVAR_ALLOC_VAL;
1715
1716         if (type == -1) { /* MISC IVAR */
1717                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1718                 ivar &= ~0xFF;
1719                 ivar |= vector;
1720                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1721         } else {          /* RX/TX IVARS */
1722                 index = (16 * (entry & 1)) + (8 * type);
1723                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1724                 ivar &= ~(0xFF << index);
1725                 ivar |= (vector << index);
1726                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1727         }
1728 } /* ixv_set_ivar */
1729
1730 /************************************************************************
1731  * ixv_configure_ivars
1732  ************************************************************************/
1733 static void
1734 ixv_configure_ivars(struct adapter *adapter)
1735 {
1736         struct ix_queue *que = adapter->queues;
1737
1738         for (int i = 0; i < adapter->num_queues; i++, que++) {
1739                 /* First the RX queue entry */
1740                 ixv_set_ivar(adapter, i, que->msix, 0);
1741                 /* ... and the TX */
1742                 ixv_set_ivar(adapter, i, que->msix, 1);
1743                 /* Set an initial value in EITR */
1744                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1745                     IXGBE_EITR_DEFAULT);
1746         }
1747
1748         /* For the mailbox interrupt */
1749         ixv_set_ivar(adapter, 1, adapter->vector, -1);
1750 } /* ixv_configure_ivars */
1751
1752
1753 /************************************************************************
1754  * ixv_get_counter
1755  ************************************************************************/
1756 static uint64_t
1757 ixv_get_counter(struct ifnet *ifp, ift_counter cnt)
1758 {
1759         struct adapter *adapter;
1760
1761         adapter = if_getsoftc(ifp);
1762
1763         switch (cnt) {
1764         case IFCOUNTER_IPACKETS:
1765                 return (adapter->ipackets);
1766         case IFCOUNTER_OPACKETS:
1767                 return (adapter->opackets);
1768         case IFCOUNTER_IBYTES:
1769                 return (adapter->ibytes);
1770         case IFCOUNTER_OBYTES:
1771                 return (adapter->obytes);
1772         case IFCOUNTER_IMCASTS:
1773                 return (adapter->imcasts);
1774         default:
1775                 return (if_get_counter_default(ifp, cnt));
1776         }
1777 } /* ixv_get_counter */
1778
1779 /************************************************************************
1780  * ixv_save_stats
1781  *
1782  *   The VF stats registers never have a truly virgin
1783  *   starting point, so this routine tries to make an
1784  *   artificial one, marking ground zero on attach as
1785  *   it were.
1786  ************************************************************************/
1787 static void
1788 ixv_save_stats(struct adapter *adapter)
1789 {
1790         if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1791                 adapter->stats.vf.saved_reset_vfgprc +=
1792                     adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1793                 adapter->stats.vf.saved_reset_vfgptc +=
1794                     adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1795                 adapter->stats.vf.saved_reset_vfgorc +=
1796                     adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1797                 adapter->stats.vf.saved_reset_vfgotc +=
1798                     adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1799                 adapter->stats.vf.saved_reset_vfmprc +=
1800                     adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1801         }
1802 } /* ixv_save_stats */
1803
1804 /************************************************************************
1805  * ixv_init_stats
1806  ************************************************************************/
1807 static void
1808 ixv_init_stats(struct adapter *adapter)
1809 {
1810         struct ixgbe_hw *hw = &adapter->hw;
1811
1812         adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1813         adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1814         adapter->stats.vf.last_vfgorc |=
1815             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1816
1817         adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1818         adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1819         adapter->stats.vf.last_vfgotc |=
1820             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1821
1822         adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1823
1824         adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1825         adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1826         adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1827         adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1828         adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1829 } /* ixv_init_stats */
1830
1831 #define UPDATE_STAT_32(reg, last, count)                \
1832 {                                                       \
1833         u32 current = IXGBE_READ_REG(hw, reg);          \
1834         if (current < last)                             \
1835                 count += 0x100000000LL;                 \
1836         last = current;                                 \
1837         count &= 0xFFFFFFFF00000000LL;                  \
1838         count |= current;                               \
1839 }
1840
1841 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1842 {                                                       \
1843         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1844         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1845         u64 current = ((cur_msb << 32) | cur_lsb);      \
1846         if (current < last)                             \
1847                 count += 0x1000000000LL;                \
1848         last = current;                                 \
1849         count &= 0xFFFFFFF000000000LL;                  \
1850         count |= current;                               \
1851 }
1852
1853 /************************************************************************
1854  * ixv_update_stats - Update the board statistics counters.
1855  ************************************************************************/
1856 void
1857 ixv_update_stats(struct adapter *adapter)
1858 {
1859         struct ixgbe_hw *hw = &adapter->hw;
1860         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1861
1862         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1863             adapter->stats.vf.vfgprc);
1864         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1865             adapter->stats.vf.vfgptc);
1866         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1867             adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1868         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1869             adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1870         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1871             adapter->stats.vf.vfmprc);
1872
1873         /* Fill out the OS statistics structure */
1874         IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1875         IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1876         IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1877         IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1878         IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1879 } /* ixv_update_stats */
1880
1881 /************************************************************************
1882  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1883  ************************************************************************/
1884 static void
1885 ixv_add_stats_sysctls(struct adapter *adapter)
1886 {
1887         device_t                dev = adapter->dev;
1888         struct tx_ring          *txr = adapter->tx_rings;
1889         struct rx_ring          *rxr = adapter->rx_rings;
1890         struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1891         struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1892         struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1893         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1894         struct sysctl_oid       *stat_node, *queue_node;
1895         struct sysctl_oid_list  *stat_list, *queue_list;
1896
1897 #define QUEUE_NAME_LEN 32
1898         char                    namebuf[QUEUE_NAME_LEN];
1899
1900         /* Driver Statistics */
1901         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1902             CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1903         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1904             CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1905         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1906             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1907         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1908             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1909
1910         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1911                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1912                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1913                     CTLFLAG_RD, NULL, "Queue Name");
1914                 queue_list = SYSCTL_CHILDREN(queue_node);
1915
1916                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1917                     CTLFLAG_RD, &(adapter->queues[i].irqs), "IRQs on queue");
1918                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1919                     CTLFLAG_RD, &(txr->no_tx_dma_setup),
1920                     "Driver Tx DMA failure in Tx");
1921                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
1922                     CTLFLAG_RD, &(txr->no_desc_avail),
1923                     "Not-enough-descriptors count: TX");
1924                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1925                     CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1926                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1927                     CTLFLAG_RD, &(txr->br->br_drops),
1928                     "Packets dropped in buf_ring");
1929         }
1930
1931         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1932                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1933                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1934                     CTLFLAG_RD, NULL, "Queue Name");
1935                 queue_list = SYSCTL_CHILDREN(queue_node);
1936
1937                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1938                     CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1939                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1940                     CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1941                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1942                     CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1943         }
1944
1945         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1946             CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1947         stat_list = SYSCTL_CHILDREN(stat_node);
1948
1949         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1950             CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1951         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1952             CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1953         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1954             CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1955         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1956             CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1957         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1958             CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1959 } /* ixv_add_stats_sysctls */
1960
1961 /************************************************************************
1962  * ixv_set_sysctl_value
1963  ************************************************************************/
1964 static void
1965 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
1966         const char *description, int *limit, int value)
1967 {
1968         *limit = value;
1969         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
1970             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
1971             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
1972 } /* ixv_set_sysctl_value */
1973
1974 /************************************************************************
1975  * ixv_print_debug_info
1976  *
1977  *   Called only when em_display_debug_stats is enabled.
1978  *   Provides a way to take a look at important statistics
1979  *   maintained by the driver and hardware.
1980  ************************************************************************/
1981 static void
1982 ixv_print_debug_info(struct adapter *adapter)
1983 {
1984         device_t        dev = adapter->dev;
1985         struct ixgbe_hw *hw = &adapter->hw;
1986         struct ix_queue *que = adapter->queues;
1987         struct rx_ring  *rxr;
1988         struct tx_ring  *txr;
1989         struct lro_ctrl *lro;
1990
1991         device_printf(dev, "Error Byte Count = %u \n",
1992             IXGBE_READ_REG(hw, IXGBE_ERRBC));
1993
1994         for (int i = 0; i < adapter->num_queues; i++, que++) {
1995                 txr = que->txr;
1996                 rxr = que->rxr;
1997                 lro = &rxr->lro;
1998                 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
1999                     que->msix, (long)que->irqs);
2000                 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2001                     rxr->me, (long long)rxr->rx_packets);
2002                 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2003                     rxr->me, (long)rxr->rx_bytes);
2004                 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2005                     rxr->me, (long long)lro->lro_queued);
2006                 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2007                     rxr->me, (long long)lro->lro_flushed);
2008                 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2009                     txr->me, (long)txr->total_packets);
2010                 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2011                     txr->me, (long)txr->no_desc_avail);
2012         }
2013
2014         device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
2015 } /* ixv_print_debug_info */
2016
2017 /************************************************************************
2018  * ixv_sysctl_debug
2019  ************************************************************************/
2020 static int
2021 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2022 {
2023         struct adapter *adapter;
2024         int            error, result;
2025
2026         result = -1;
2027         error = sysctl_handle_int(oidp, &result, 0, req);
2028
2029         if (error || !req->newptr)
2030                 return (error);
2031
2032         if (result == 1) {
2033                 adapter = (struct adapter *)arg1;
2034                 ixv_print_debug_info(adapter);
2035         }
2036
2037         return error;
2038 } /* ixv_sysctl_debug */
2039
2040 /************************************************************************
2041  * ixv_init_device_features
2042  ************************************************************************/
2043 static void
2044 ixv_init_device_features(struct adapter *adapter)
2045 {
2046         adapter->feat_cap = IXGBE_FEATURE_NETMAP
2047                           | IXGBE_FEATURE_VF
2048                           | IXGBE_FEATURE_RSS
2049                           | IXGBE_FEATURE_LEGACY_TX;
2050
2051         /* A tad short on feature flags for VFs, atm. */
2052         switch (adapter->hw.mac.type) {
2053         case ixgbe_mac_82599_vf:
2054                 break;
2055         case ixgbe_mac_X540_vf:
2056                 break;
2057         case ixgbe_mac_X550_vf:
2058         case ixgbe_mac_X550EM_x_vf:
2059         case ixgbe_mac_X550EM_a_vf:
2060                 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2061                 break;
2062         default:
2063                 break;
2064         }
2065
2066         /* Enabled by default... */
2067         /* Is a virtual function (VF) */
2068         if (adapter->feat_cap & IXGBE_FEATURE_VF)
2069                 adapter->feat_en |= IXGBE_FEATURE_VF;
2070         /* Netmap */
2071         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2072                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2073         /* Receive-Side Scaling (RSS) */
2074         if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2075                 adapter->feat_en |= IXGBE_FEATURE_RSS;
2076         /* Needs advanced context descriptor regardless of offloads req'd */
2077         if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2078                 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2079
2080         /* Enabled via sysctl... */
2081         /* Legacy (single queue) transmit */
2082         if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2083             ixv_enable_legacy_tx)
2084                 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2085 } /* ixv_init_device_features */
2086
2087 /************************************************************************
2088  * ixv_shutdown - Shutdown entry point
2089  ************************************************************************/
2090 static int
2091 ixv_shutdown(device_t dev)
2092 {
2093         struct adapter *adapter = device_get_softc(dev);
2094         IXGBE_CORE_LOCK(adapter);
2095         ixv_stop(adapter);
2096         IXGBE_CORE_UNLOCK(adapter);
2097
2098         return (0);
2099 } /* ixv_shutdown */
2100
2101
2102 /************************************************************************
2103  * ixv_ioctl - Ioctl entry point
2104  *
2105  *   Called when the user wants to configure the interface.
2106  *
2107  *   return 0 on success, positive on failure
2108  ************************************************************************/
2109 static int
2110 ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2111 {
2112         struct adapter *adapter = ifp->if_softc;
2113         struct ifreq   *ifr = (struct ifreq *)data;
2114 #if defined(INET) || defined(INET6)
2115         struct ifaddr  *ifa = (struct ifaddr *)data;
2116         bool           avoid_reset = FALSE;
2117 #endif
2118         int            error = 0;
2119
2120         switch (command) {
2121
2122         case SIOCSIFADDR:
2123 #ifdef INET
2124                 if (ifa->ifa_addr->sa_family == AF_INET)
2125                         avoid_reset = TRUE;
2126 #endif
2127 #ifdef INET6
2128                 if (ifa->ifa_addr->sa_family == AF_INET6)
2129                         avoid_reset = TRUE;
2130 #endif
2131 #if defined(INET) || defined(INET6)
2132                 /*
2133                  * Calling init results in link renegotiation,
2134                  * so we avoid doing it when possible.
2135                  */
2136                 if (avoid_reset) {
2137                         ifp->if_flags |= IFF_UP;
2138                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2139                                 ixv_init(adapter);
2140                         if (!(ifp->if_flags & IFF_NOARP))
2141                                 arp_ifinit(ifp, ifa);
2142                 } else
2143                         error = ether_ioctl(ifp, command, data);
2144                 break;
2145 #endif
2146         case SIOCSIFMTU:
2147                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2148                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
2149                         error = EINVAL;
2150                 } else {
2151                         IXGBE_CORE_LOCK(adapter);
2152                         ifp->if_mtu = ifr->ifr_mtu;
2153                         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
2154                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2155                                 ixv_init_locked(adapter);
2156                         IXGBE_CORE_UNLOCK(adapter);
2157                 }
2158                 break;
2159         case SIOCSIFFLAGS:
2160                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2161                 IXGBE_CORE_LOCK(adapter);
2162                 if (ifp->if_flags & IFF_UP) {
2163                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2164                                 ixv_init_locked(adapter);
2165                 } else
2166                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2167                                 ixv_stop(adapter);
2168                 adapter->if_flags = ifp->if_flags;
2169                 IXGBE_CORE_UNLOCK(adapter);
2170                 break;
2171         case SIOCADDMULTI:
2172         case SIOCDELMULTI:
2173                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2174                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2175                         IXGBE_CORE_LOCK(adapter);
2176                         ixv_disable_intr(adapter);
2177                         ixv_set_multi(adapter);
2178                         ixv_enable_intr(adapter);
2179                         IXGBE_CORE_UNLOCK(adapter);
2180                 }
2181                 break;
2182         case SIOCSIFMEDIA:
2183         case SIOCGIFMEDIA:
2184                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2185                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2186                 break;
2187         case SIOCSIFCAP:
2188         {
2189                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2190                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2191                 if (mask & IFCAP_HWCSUM)
2192                         ifp->if_capenable ^= IFCAP_HWCSUM;
2193                 if (mask & IFCAP_TSO4)
2194                         ifp->if_capenable ^= IFCAP_TSO4;
2195                 if (mask & IFCAP_LRO)
2196                         ifp->if_capenable ^= IFCAP_LRO;
2197                 if (mask & IFCAP_VLAN_HWTAGGING)
2198                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2199                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2200                         IXGBE_CORE_LOCK(adapter);
2201                         ixv_init_locked(adapter);
2202                         IXGBE_CORE_UNLOCK(adapter);
2203                 }
2204                 VLAN_CAPABILITIES(ifp);
2205                 break;
2206         }
2207
2208         default:
2209                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
2210                 error = ether_ioctl(ifp, command, data);
2211                 break;
2212         }
2213
2214         return (error);
2215 } /* ixv_ioctl */
2216
2217 /************************************************************************
2218  * ixv_init
2219  ************************************************************************/
2220 static void
2221 ixv_init(void *arg)
2222 {
2223         struct adapter *adapter = arg;
2224
2225         IXGBE_CORE_LOCK(adapter);
2226         ixv_init_locked(adapter);
2227         IXGBE_CORE_UNLOCK(adapter);
2228
2229         return;
2230 } /* ixv_init */
2231
2232
2233 /************************************************************************
2234  * ixv_handle_que
2235  ************************************************************************/
2236 static void
2237 ixv_handle_que(void *context, int pending)
2238 {
2239         struct ix_queue *que = context;
2240         struct adapter  *adapter = que->adapter;
2241         struct tx_ring  *txr = que->txr;
2242         struct ifnet    *ifp = adapter->ifp;
2243         bool            more;
2244
2245         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2246                 more = ixgbe_rxeof(que);
2247                 IXGBE_TX_LOCK(txr);
2248                 ixgbe_txeof(txr);
2249                 if (!ixv_ring_empty(ifp, txr->br))
2250                         ixv_start_locked(ifp, txr);
2251                 IXGBE_TX_UNLOCK(txr);
2252                 if (more) {
2253                         taskqueue_enqueue(que->tq, &que->que_task);
2254                         return;
2255                 }
2256         }
2257
2258         /* Re-enable this interrupt */
2259         ixv_enable_queue(adapter, que->msix);
2260
2261         return;
2262 } /* ixv_handle_que */
2263
2264 /************************************************************************
2265  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2266  ************************************************************************/
2267 static int
2268 ixv_allocate_msix(struct adapter *adapter)
2269 {
2270         device_t        dev = adapter->dev;
2271         struct ix_queue *que = adapter->queues;
2272         struct tx_ring  *txr = adapter->tx_rings;
2273         int             error, msix_ctrl, rid, vector = 0;
2274
2275         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2276                 rid = vector + 1;
2277                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2278                     RF_SHAREABLE | RF_ACTIVE);
2279                 if (que->res == NULL) {
2280                         device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
2281                             vector);
2282                         return (ENXIO);
2283                 }
2284                 /* Set the handler function */
2285                 error = bus_setup_intr(dev, que->res,
2286                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2287                     ixv_msix_que, que, &que->tag);
2288                 if (error) {
2289                         que->res = NULL;
2290                         device_printf(dev, "Failed to register QUE handler");
2291                         return (error);
2292                 }
2293 #if __FreeBSD_version >= 800504
2294                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2295 #endif
2296                 que->msix = vector;
2297                 adapter->active_queues |= (u64)(1 << que->msix);
2298                 /*
2299                  * Bind the MSI-X vector, and thus the
2300                  * ring to the corresponding CPU.
2301                  */
2302                 if (adapter->num_queues > 1)
2303                         bus_bind_intr(dev, que->res, i);
2304                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2305                 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
2306                 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
2307                     taskqueue_thread_enqueue, &que->tq);
2308                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2309                     device_get_nameunit(adapter->dev));
2310         }
2311
2312         /* and Mailbox */
2313         rid = vector + 1;
2314         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2315             RF_SHAREABLE | RF_ACTIVE);
2316         if (!adapter->res) {
2317                 device_printf(dev,
2318                     "Unable to allocate bus resource: MBX interrupt [%d]\n",
2319                     rid);
2320                 return (ENXIO);
2321         }
2322         /* Set the mbx handler function */
2323         error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
2324             NULL, ixv_msix_mbx, adapter, &adapter->tag);
2325         if (error) {
2326                 adapter->res = NULL;
2327                 device_printf(dev, "Failed to register LINK handler");
2328                 return (error);
2329         }
2330 #if __FreeBSD_version >= 800504
2331         bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
2332 #endif
2333         adapter->vector = vector;
2334         /* Tasklets for Mailbox */
2335         TASK_INIT(&adapter->link_task, 0, ixv_handle_link, adapter);
2336         adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
2337             taskqueue_thread_enqueue, &adapter->tq);
2338         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
2339             device_get_nameunit(adapter->dev));
2340         /*
2341          * Due to a broken design QEMU will fail to properly
2342          * enable the guest for MSI-X unless the vectors in
2343          * the table are all set up, so we must rewrite the
2344          * ENABLE in the MSI-X control register again at this
2345          * point to cause it to successfully initialize us.
2346          */
2347         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2348                 pci_find_cap(dev, PCIY_MSIX, &rid);
2349                 rid += PCIR_MSIX_CTRL;
2350                 msix_ctrl = pci_read_config(dev, rid, 2);
2351                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2352                 pci_write_config(dev, rid, msix_ctrl, 2);
2353         }
2354
2355         return (0);
2356 } /* ixv_allocate_msix */
2357
2358 /************************************************************************
2359  * ixv_configure_interrupts - Setup MSI-X resources
2360  *
2361  *   Note: The VF device MUST use MSI-X, there is no fallback.
2362  ************************************************************************/
2363 static int
2364 ixv_configure_interrupts(struct adapter *adapter)
2365 {
2366         device_t dev = adapter->dev;
2367         int      rid, want, msgs;
2368
2369         /* Must have at least 2 MSI-X vectors */
2370         msgs = pci_msix_count(dev);
2371         if (msgs < 2)
2372                 goto out;
2373         rid = PCIR_BAR(3);
2374         adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2375             RF_ACTIVE);
2376         if (adapter->msix_mem == NULL) {
2377                 device_printf(adapter->dev, "Unable to map MSI-X table \n");
2378                 goto out;
2379         }
2380
2381         /*
2382          * Want vectors for the queues,
2383          * plus an additional for mailbox.
2384          */
2385         want = adapter->num_queues + 1;
2386         if (want > msgs) {
2387                 want = msgs;
2388                 adapter->num_queues = msgs - 1;
2389         } else
2390                 msgs = want;
2391         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2392                 device_printf(adapter->dev,
2393                     "Using MSI-X interrupts with %d vectors\n", want);
2394                 /* reflect correct sysctl value */
2395                 ixv_num_queues = adapter->num_queues;
2396
2397                 return (0);
2398         }
2399         /* Release in case alloc was insufficient */
2400         pci_release_msi(dev);
2401 out:
2402         if (adapter->msix_mem != NULL) {
2403                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
2404                     adapter->msix_mem);
2405                 adapter->msix_mem = NULL;
2406         }
2407         device_printf(adapter->dev, "MSI-X config error\n");
2408
2409         return (ENXIO);
2410 } /* ixv_configure_interrupts */
2411
2412
2413 /************************************************************************
2414  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2415  *
2416  *   Done outside of interrupt context since the driver might sleep
2417  ************************************************************************/
2418 static void
2419 ixv_handle_link(void *context, int pending)
2420 {
2421         struct adapter *adapter = context;
2422
2423         adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2424             &adapter->link_up, FALSE);
2425         ixv_update_link_status(adapter);
2426 } /* ixv_handle_link */
2427
2428 /************************************************************************
2429  * ixv_check_link - Used in the local timer to poll for link changes
2430  ************************************************************************/
2431 static void
2432 ixv_check_link(struct adapter *adapter)
2433 {
2434         adapter->hw.mac.get_link_status = TRUE;
2435
2436         adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2437             &adapter->link_up, FALSE);
2438         ixv_update_link_status(adapter);
2439 } /* ixv_check_link */
2440