]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ixv.c
MFC r334002: uchcom: extend hardware support to version 0x30
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ixv.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /************************************************************************
44  * Driver version
45  ************************************************************************/
46 char ixv_driver_version[] = "1.5.13-k";
47
48 /************************************************************************
49  * PCI Device ID Table
50  *
51  *   Used by probe to select devices to load on
52  *   Last field stores an index into ixv_strings
53  *   Last entry must be all 0s
54  *
55  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56  ************************************************************************/
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
58 {
59         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
64         /* required last entry */
65         {0, 0, 0, 0, 0}
66 };
67
68 /************************************************************************
69  * Table of branding strings
70  ************************************************************************/
71 static char *ixv_strings[] = {
72         "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 };
74
75 /************************************************************************
76  * Function prototypes
77  ************************************************************************/
78 static int      ixv_probe(device_t);
79 static int      ixv_attach(device_t);
80 static int      ixv_detach(device_t);
81 static int      ixv_shutdown(device_t);
82 static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
83 static void     ixv_init(void *);
84 static void     ixv_init_locked(struct adapter *);
85 static void     ixv_stop(void *);
86 static uint64_t ixv_get_counter(struct ifnet *, ift_counter);
87 static void     ixv_init_device_features(struct adapter *);
88 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int      ixv_media_change(struct ifnet *);
90 static int      ixv_allocate_pci_resources(struct adapter *);
91 static int      ixv_allocate_msix(struct adapter *);
92 static int      ixv_configure_interrupts(struct adapter *);
93 static void     ixv_free_pci_resources(struct adapter *);
94 static void     ixv_local_timer(void *);
95 static void     ixv_setup_interface(device_t, struct adapter *);
96
97 static void     ixv_initialize_transmit_units(struct adapter *);
98 static void     ixv_initialize_receive_units(struct adapter *);
99 static void     ixv_initialize_rss_mapping(struct adapter *);
100 static void     ixv_check_link(struct adapter *);
101
102 static void     ixv_enable_intr(struct adapter *);
103 static void     ixv_disable_intr(struct adapter *);
104 static void     ixv_set_multi(struct adapter *);
105 static void     ixv_update_link_status(struct adapter *);
106 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
107 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
108 static void     ixv_configure_ivars(struct adapter *);
109 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
110
111 static void     ixv_setup_vlan_support(struct adapter *);
112 static void     ixv_register_vlan(void *, struct ifnet *, u16);
113 static void     ixv_unregister_vlan(void *, struct ifnet *, u16);
114
115 static void     ixv_save_stats(struct adapter *);
116 static void     ixv_init_stats(struct adapter *);
117 static void     ixv_update_stats(struct adapter *);
118 static void     ixv_add_stats_sysctls(struct adapter *);
119 static void     ixv_set_sysctl_value(struct adapter *, const char *,
120                                      const char *, int *, int);
121
122 /* The MSI-X Interrupt handlers */
123 static void     ixv_msix_que(void *);
124 static void     ixv_msix_mbx(void *);
125
126 /* Deferred interrupt tasklets */
127 static void     ixv_handle_que(void *, int);
128 static void     ixv_handle_link(void *, int);
129
130 /************************************************************************
131  * FreeBSD Device Interface Entry Points
132  ************************************************************************/
133 static device_method_t ixv_methods[] = {
134         /* Device interface */
135         DEVMETHOD(device_probe, ixv_probe),
136         DEVMETHOD(device_attach, ixv_attach),
137         DEVMETHOD(device_detach, ixv_detach),
138         DEVMETHOD(device_shutdown, ixv_shutdown),
139         DEVMETHOD_END
140 };
141
142 static driver_t ixv_driver = {
143         "ixv", ixv_methods, sizeof(struct adapter),
144 };
145
146 devclass_t ixv_devclass;
147 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
148 MODULE_DEPEND(ixv, pci, 1, 1, 1);
149 MODULE_DEPEND(ixv, ether, 1, 1, 1);
150 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
151
152 /*
153  * TUNEABLE PARAMETERS:
154  */
155
156 /* Number of Queues - do not exceed MSI-X vectors - 1 */
157 static int ixv_num_queues = 1;
158 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
159
160 /*
161  * AIM: Adaptive Interrupt Moderation
162  * which means that the interrupt rate
163  * is varied over time based on the
164  * traffic for that interrupt vector
165  */
166 static int ixv_enable_aim = FALSE;
167 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
168
169 /* How many packets rxeof tries to clean at a time */
170 static int ixv_rx_process_limit = 256;
171 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
172
173 /* How many packets txeof tries to clean at a time */
174 static int ixv_tx_process_limit = 256;
175 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
176
177 /* Flow control setting, default to full */
178 static int ixv_flow_control = ixgbe_fc_full;
179 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
180
181 /*
182  * Header split: this causes the hardware to DMA
183  * the header into a separate mbuf from the payload,
184  * it can be a performance win in some workloads, but
185  * in others it actually hurts, its off by default.
186  */
187 static int ixv_header_split = FALSE;
188 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
189
190 /*
191  * Number of TX descriptors per ring,
192  * setting higher than RX as this seems
193  * the better performing choice.
194  */
195 static int ixv_txd = DEFAULT_TXD;
196 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
197
198 /* Number of RX descriptors per ring */
199 static int ixv_rxd = DEFAULT_RXD;
200 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
201
202 /* Legacy Transmit (single queue) */
203 static int ixv_enable_legacy_tx = 0;
204 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
205
206 /*
207  * Shadow VFTA table, this is needed because
208  * the real filter table gets cleared during
209  * a soft reset and we need to repopulate it.
210  */
211 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
212
213 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
214 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
215
216 /************************************************************************
217  * ixv_probe - Device identification routine
218  *
219  *   Determines if the driver should be loaded on
220  *   adapter based on its PCI vendor/device ID.
221  *
222  *   return BUS_PROBE_DEFAULT on success, positive on failure
223  ************************************************************************/
224 static int
225 ixv_probe(device_t dev)
226 {
227         ixgbe_vendor_info_t *ent;
228         u16                 pci_vendor_id = 0;
229         u16                 pci_device_id = 0;
230         u16                 pci_subvendor_id = 0;
231         u16                 pci_subdevice_id = 0;
232         char                adapter_name[256];
233
234
235         pci_vendor_id = pci_get_vendor(dev);
236         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
237                 return (ENXIO);
238
239         pci_device_id = pci_get_device(dev);
240         pci_subvendor_id = pci_get_subvendor(dev);
241         pci_subdevice_id = pci_get_subdevice(dev);
242
243         ent = ixv_vendor_info_array;
244         while (ent->vendor_id != 0) {
245                 if ((pci_vendor_id == ent->vendor_id) &&
246                     (pci_device_id == ent->device_id) &&
247                     ((pci_subvendor_id == ent->subvendor_id) ||
248                      (ent->subvendor_id == 0)) &&
249                     ((pci_subdevice_id == ent->subdevice_id) ||
250                      (ent->subdevice_id == 0))) {
251                         sprintf(adapter_name, "%s, Version - %s",
252                             ixv_strings[ent->index], ixv_driver_version);
253                         device_set_desc_copy(dev, adapter_name);
254                         return (BUS_PROBE_DEFAULT);
255                 }
256                 ent++;
257         }
258
259         return (ENXIO);
260 } /* ixv_probe */
261
262 /************************************************************************
263  * ixv_attach - Device initialization routine
264  *
265  *   Called when the driver is being loaded.
266  *   Identifies the type of hardware, allocates all resources
267  *   and initializes the hardware.
268  *
269  *   return 0 on success, positive on failure
270  ************************************************************************/
271 static int
272 ixv_attach(device_t dev)
273 {
274         struct adapter  *adapter;
275         struct ixgbe_hw *hw;
276         int             error = 0;
277
278         INIT_DEBUGOUT("ixv_attach: begin");
279
280         /*
281          * Make sure BUSMASTER is set, on a VM under
282          * KVM it may not be and will break things.
283          */
284         pci_enable_busmaster(dev);
285
286         /* Allocate, clear, and link in our adapter structure */
287         adapter = device_get_softc(dev);
288         adapter->dev = dev;
289         adapter->hw.back = adapter;
290         hw = &adapter->hw;
291
292         adapter->init_locked = ixv_init_locked;
293         adapter->stop_locked = ixv_stop;
294
295         /* Core Lock Init*/
296         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
297
298         /* Do base PCI setup - map BAR0 */
299         if (ixv_allocate_pci_resources(adapter)) {
300                 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
301                 error = ENXIO;
302                 goto err_out;
303         }
304
305         /* SYSCTL APIs */
306         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
307             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
308             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
309             "Debug Info");
310
311         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
312             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
313             "enable_aim", CTLFLAG_RW, &ixv_enable_aim, 1,
314             "Interrupt Moderation");
315
316         /* Set up the timer callout */
317         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
318
319         /* Save off the information about this board */
320         hw->vendor_id = pci_get_vendor(dev);
321         hw->device_id = pci_get_device(dev);
322         hw->revision_id = pci_get_revid(dev);
323         hw->subsystem_vendor_id = pci_get_subvendor(dev);
324         hw->subsystem_device_id = pci_get_subdevice(dev);
325
326         /* A subset of set_mac_type */
327         switch (hw->device_id) {
328         case IXGBE_DEV_ID_82599_VF:
329                 hw->mac.type = ixgbe_mac_82599_vf;
330                 break;
331         case IXGBE_DEV_ID_X540_VF:
332                 hw->mac.type = ixgbe_mac_X540_vf;
333                 break;
334         case IXGBE_DEV_ID_X550_VF:
335                 hw->mac.type = ixgbe_mac_X550_vf;
336                 break;
337         case IXGBE_DEV_ID_X550EM_X_VF:
338                 hw->mac.type = ixgbe_mac_X550EM_x_vf;
339                 break;
340         case IXGBE_DEV_ID_X550EM_A_VF:
341                 hw->mac.type = ixgbe_mac_X550EM_a_vf;
342                 break;
343         default:
344                 /* Shouldn't get here since probe succeeded */
345                 device_printf(dev, "Unknown device ID!\n");
346                 error = ENXIO;
347                 goto err_out;
348                 break;
349         }
350
351         ixv_init_device_features(adapter);
352
353         /* Initialize the shared code */
354         error = ixgbe_init_ops_vf(hw);
355         if (error) {
356                 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
357                 error = EIO;
358                 goto err_out;
359         }
360
361         /* Setup the mailbox */
362         ixgbe_init_mbx_params_vf(hw);
363
364         /* Set the right number of segments */
365         adapter->num_segs = IXGBE_82599_SCATTER;
366
367         error = hw->mac.ops.reset_hw(hw);
368         if (error == IXGBE_ERR_RESET_FAILED)
369                 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
370         else if (error)
371                 device_printf(dev, "...reset_hw() failed with error %d\n",
372                     error);
373         if (error) {
374                 error = EIO;
375                 goto err_out;
376         }
377
378         error = hw->mac.ops.init_hw(hw);
379         if (error) {
380                 device_printf(dev, "...init_hw() failed with error %d\n",
381                     error);
382                 error = EIO;
383                 goto err_out;
384         }
385
386         /* Negotiate mailbox API version */
387         error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_12);
388         if (error) {
389                 device_printf(dev, "MBX API 1.2 negotiation failed! Error %d\n",
390                     error);
391                 error = EIO;
392                 goto err_out;
393         }
394
395         /* If no mac address was assigned, make a random one */
396         if (!ixv_check_ether_addr(hw->mac.addr)) {
397                 u8 addr[ETHER_ADDR_LEN];
398                 arc4rand(&addr, sizeof(addr), 0);
399                 addr[0] &= 0xFE;
400                 addr[0] |= 0x02;
401                 bcopy(addr, hw->mac.addr, sizeof(addr));
402                 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
403         }
404
405         /* Register for VLAN events */
406         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
407             ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
408         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
409             ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
410
411         /* Sysctls for limiting the amount of work done in the taskqueues */
412         ixv_set_sysctl_value(adapter, "rx_processing_limit",
413             "max number of rx packets to process",
414             &adapter->rx_process_limit, ixv_rx_process_limit);
415
416         ixv_set_sysctl_value(adapter, "tx_processing_limit",
417             "max number of tx packets to process",
418             &adapter->tx_process_limit, ixv_tx_process_limit);
419
420         /* Do descriptor calc and sanity checks */
421         if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
422             ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
423                 device_printf(dev, "TXD config issue, using default!\n");
424                 adapter->num_tx_desc = DEFAULT_TXD;
425         } else
426                 adapter->num_tx_desc = ixv_txd;
427
428         if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
429             ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
430                 device_printf(dev, "RXD config issue, using default!\n");
431                 adapter->num_rx_desc = DEFAULT_RXD;
432         } else
433                 adapter->num_rx_desc = ixv_rxd;
434
435         /* Setup MSI-X */
436         error = ixv_configure_interrupts(adapter);
437         if (error)
438                 goto err_out;
439
440         /* Allocate our TX/RX Queues */
441         if (ixgbe_allocate_queues(adapter)) {
442                 device_printf(dev, "ixgbe_allocate_queues() failed!\n");
443                 error = ENOMEM;
444                 goto err_out;
445         }
446
447         /* Setup OS specific network interface */
448         ixv_setup_interface(dev, adapter);
449
450         error = ixv_allocate_msix(adapter);
451         if (error) {
452                 device_printf(dev, "ixv_allocate_msix() failed!\n");
453                 goto err_late;
454         }
455
456         /* Do the stats setup */
457         ixv_save_stats(adapter);
458         ixv_init_stats(adapter);
459         ixv_add_stats_sysctls(adapter);
460
461         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
462                 ixgbe_netmap_attach(adapter);
463
464         INIT_DEBUGOUT("ixv_attach: end");
465
466         return (0);
467
468 err_late:
469         ixgbe_free_transmit_structures(adapter);
470         ixgbe_free_receive_structures(adapter);
471         free(adapter->queues, M_DEVBUF);
472 err_out:
473         ixv_free_pci_resources(adapter);
474         IXGBE_CORE_LOCK_DESTROY(adapter);
475
476         return (error);
477 } /* ixv_attach */
478
479 /************************************************************************
480  * ixv_detach - Device removal routine
481  *
482  *   Called when the driver is being removed.
483  *   Stops the adapter and deallocates all the resources
484  *   that were allocated for driver operation.
485  *
486  *   return 0 on success, positive on failure
487  ************************************************************************/
488 static int
489 ixv_detach(device_t dev)
490 {
491         struct adapter  *adapter = device_get_softc(dev);
492         struct ix_queue *que = adapter->queues;
493
494         INIT_DEBUGOUT("ixv_detach: begin");
495
496         /* Make sure VLANS are not using driver */
497         if (adapter->ifp->if_vlantrunk != NULL) {
498                 device_printf(dev, "Vlan in use, detach first\n");
499                 return (EBUSY);
500         }
501
502         ether_ifdetach(adapter->ifp);
503         IXGBE_CORE_LOCK(adapter);
504         ixv_stop(adapter);
505         IXGBE_CORE_UNLOCK(adapter);
506
507         for (int i = 0; i < adapter->num_queues; i++, que++) {
508                 if (que->tq) {
509                         struct tx_ring  *txr = que->txr;
510                         taskqueue_drain(que->tq, &txr->txq_task);
511                         taskqueue_drain(que->tq, &que->que_task);
512                         taskqueue_free(que->tq);
513                 }
514         }
515
516         /* Drain the Mailbox(link) queue */
517         if (adapter->tq) {
518                 taskqueue_drain(adapter->tq, &adapter->link_task);
519                 taskqueue_free(adapter->tq);
520         }
521
522         /* Unregister VLAN events */
523         if (adapter->vlan_attach != NULL)
524                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
525         if (adapter->vlan_detach != NULL)
526                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
527
528         callout_drain(&adapter->timer);
529
530         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
531                 netmap_detach(adapter->ifp);
532
533         ixv_free_pci_resources(adapter);
534         bus_generic_detach(dev);
535         if_free(adapter->ifp);
536
537         ixgbe_free_transmit_structures(adapter);
538         ixgbe_free_receive_structures(adapter);
539         free(adapter->queues, M_DEVBUF);
540
541         IXGBE_CORE_LOCK_DESTROY(adapter);
542
543         return (0);
544 } /* ixv_detach */
545
546 /************************************************************************
547  * ixv_init_locked - Init entry point
548  *
549  *   Used in two ways: It is used by the stack as an init entry
550  *   point in network interface structure. It is also used
551  *   by the driver as a hw/sw initialization routine to get
552  *   to a consistent state.
553  *
554  *   return 0 on success, positive on failure
555  ************************************************************************/
556 void
557 ixv_init_locked(struct adapter *adapter)
558 {
559         struct ifnet    *ifp = adapter->ifp;
560         device_t        dev = adapter->dev;
561         struct ixgbe_hw *hw = &adapter->hw;
562         int             error = 0;
563
564         INIT_DEBUGOUT("ixv_init_locked: begin");
565         mtx_assert(&adapter->core_mtx, MA_OWNED);
566         hw->adapter_stopped = FALSE;
567         hw->mac.ops.stop_adapter(hw);
568         callout_stop(&adapter->timer);
569
570         /* reprogram the RAR[0] in case user changed it. */
571         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
572
573         /* Get the latest mac address, User can use a LAA */
574         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
575             IXGBE_ETH_LENGTH_OF_ADDRESS);
576         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
577
578         /* Prepare transmit descriptors and buffers */
579         if (ixgbe_setup_transmit_structures(adapter)) {
580                 device_printf(dev, "Could not setup transmit structures\n");
581                 ixv_stop(adapter);
582                 return;
583         }
584
585         /* Reset VF and renegotiate mailbox API version */
586         hw->mac.ops.reset_hw(hw);
587         error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_12);
588         if (error)
589                 device_printf(dev, "MBX API 1.2 negotiation failed! Error %d\n",
590                     error);
591
592         ixv_initialize_transmit_units(adapter);
593
594         /* Setup Multicast table */
595         ixv_set_multi(adapter);
596
597         /*
598          * Determine the correct mbuf pool
599          * for doing jumbo/headersplit
600          */
601         if (ifp->if_mtu > ETHERMTU)
602                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
603         else
604                 adapter->rx_mbuf_sz = MCLBYTES;
605
606         /* Prepare receive descriptors and buffers */
607         if (ixgbe_setup_receive_structures(adapter)) {
608                 device_printf(dev, "Could not setup receive structures\n");
609                 ixv_stop(adapter);
610                 return;
611         }
612
613         /* Configure RX settings */
614         ixv_initialize_receive_units(adapter);
615
616         /* Set the various hardware offload abilities */
617         ifp->if_hwassist = 0;
618         if (ifp->if_capenable & IFCAP_TSO4)
619                 ifp->if_hwassist |= CSUM_TSO;
620         if (ifp->if_capenable & IFCAP_TXCSUM) {
621                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
622 #if __FreeBSD_version >= 800000
623                 ifp->if_hwassist |= CSUM_SCTP;
624 #endif
625         }
626
627         /* Set up VLAN offload and filter */
628         ixv_setup_vlan_support(adapter);
629
630         /* Set up MSI-X routing */
631         ixv_configure_ivars(adapter);
632
633         /* Set up auto-mask */
634         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
635
636         /* Set moderation on the Link interrupt */
637         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
638
639         /* Stats init */
640         ixv_init_stats(adapter);
641
642         /* Config/Enable Link */
643         hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
644             FALSE);
645
646         /* Start watchdog */
647         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
648
649         /* And now turn on interrupts */
650         ixv_enable_intr(adapter);
651
652         /* Now inform the stack we're ready */
653         ifp->if_drv_flags |= IFF_DRV_RUNNING;
654         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
655
656         return;
657 } /* ixv_init_locked */
658
659 /*
660  * MSI-X Interrupt Handlers and Tasklets
661  */
662
663 static inline void
664 ixv_enable_queue(struct adapter *adapter, u32 vector)
665 {
666         struct ixgbe_hw *hw = &adapter->hw;
667         u32             queue = 1 << vector;
668         u32             mask;
669
670         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
671         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
672 } /* ixv_enable_queue */
673
674 static inline void
675 ixv_disable_queue(struct adapter *adapter, u32 vector)
676 {
677         struct ixgbe_hw *hw = &adapter->hw;
678         u64             queue = (u64)(1 << vector);
679         u32             mask;
680
681         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
682         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
683 } /* ixv_disable_queue */
684
685 static inline void
686 ixv_rearm_queues(struct adapter *adapter, u64 queues)
687 {
688         u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
689         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
690 } /* ixv_rearm_queues */
691
692
693 /************************************************************************
694  * ixv_msix_que - MSI Queue Interrupt Service routine
695  ************************************************************************/
696 void
697 ixv_msix_que(void *arg)
698 {
699         struct ix_queue *que = arg;
700         struct adapter  *adapter = que->adapter;
701         struct ifnet    *ifp = adapter->ifp;
702         struct tx_ring  *txr = que->txr;
703         struct rx_ring  *rxr = que->rxr;
704         bool            more;
705         u32             newitr = 0;
706
707         ixv_disable_queue(adapter, que->msix);
708         ++que->irqs;
709
710         more = ixgbe_rxeof(que);
711
712         IXGBE_TX_LOCK(txr);
713         ixgbe_txeof(txr);
714         /*
715          * Make certain that if the stack
716          * has anything queued the task gets
717          * scheduled to handle it.
718          */
719         if (!ixv_ring_empty(adapter->ifp, txr->br))
720                 ixv_start_locked(ifp, txr);
721         IXGBE_TX_UNLOCK(txr);
722
723         /* Do AIM now? */
724
725         if (ixv_enable_aim == FALSE)
726                 goto no_calc;
727         /*
728          * Do Adaptive Interrupt Moderation:
729          *  - Write out last calculated setting
730          *  - Calculate based on average size over
731          *    the last interval.
732          */
733         if (que->eitr_setting)
734                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
735                     que->eitr_setting);
736
737         que->eitr_setting = 0;
738
739         /* Idle, do nothing */
740         if ((txr->bytes == 0) && (rxr->bytes == 0))
741                 goto no_calc;
742
743         if ((txr->bytes) && (txr->packets))
744                 newitr = txr->bytes/txr->packets;
745         if ((rxr->bytes) && (rxr->packets))
746                 newitr = max(newitr, (rxr->bytes / rxr->packets));
747         newitr += 24; /* account for hardware frame, crc */
748
749         /* set an upper boundary */
750         newitr = min(newitr, 3000);
751
752         /* Be nice to the mid range */
753         if ((newitr > 300) && (newitr < 1200))
754                 newitr = (newitr / 3);
755         else
756                 newitr = (newitr / 2);
757
758         newitr |= newitr << 16;
759
760         /* save for next interrupt */
761         que->eitr_setting = newitr;
762
763         /* Reset state */
764         txr->bytes = 0;
765         txr->packets = 0;
766         rxr->bytes = 0;
767         rxr->packets = 0;
768
769 no_calc:
770         if (more)
771                 taskqueue_enqueue(que->tq, &que->que_task);
772         else /* Re-enable this interrupt */
773                 ixv_enable_queue(adapter, que->msix);
774
775         return;
776 } /* ixv_msix_que */
777
778 /************************************************************************
779  * ixv_msix_mbx
780  ************************************************************************/
781 static void
782 ixv_msix_mbx(void *arg)
783 {
784         struct adapter  *adapter = arg;
785         struct ixgbe_hw *hw = &adapter->hw;
786         u32             reg;
787
788         ++adapter->link_irq;
789
790         /* First get the cause */
791         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
792         /* Clear interrupt with write */
793         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
794
795         /* Link status change */
796         if (reg & IXGBE_EICR_LSC)
797                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
798
799         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
800
801         return;
802 } /* ixv_msix_mbx */
803
804 /************************************************************************
805  * ixv_media_status - Media Ioctl callback
806  *
807  *   Called whenever the user queries the status of
808  *   the interface using ifconfig.
809  ************************************************************************/
810 static void
811 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
812 {
813         struct adapter *adapter = ifp->if_softc;
814
815         INIT_DEBUGOUT("ixv_media_status: begin");
816         IXGBE_CORE_LOCK(adapter);
817         ixv_update_link_status(adapter);
818
819         ifmr->ifm_status = IFM_AVALID;
820         ifmr->ifm_active = IFM_ETHER;
821
822         if (!adapter->link_active) {
823                 IXGBE_CORE_UNLOCK(adapter);
824                 return;
825         }
826
827         ifmr->ifm_status |= IFM_ACTIVE;
828
829         switch (adapter->link_speed) {
830                 case IXGBE_LINK_SPEED_1GB_FULL:
831                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
832                         break;
833                 case IXGBE_LINK_SPEED_10GB_FULL:
834                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
835                         break;
836                 case IXGBE_LINK_SPEED_100_FULL:
837                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
838                         break;
839                 case IXGBE_LINK_SPEED_10_FULL:
840                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
841                         break;
842         }
843
844         IXGBE_CORE_UNLOCK(adapter);
845
846         return;
847 } /* ixv_media_status */
848
849 /************************************************************************
850  * ixv_media_change - Media Ioctl callback
851  *
852  *   Called when the user changes speed/duplex using
853  *   media/mediopt option with ifconfig.
854  ************************************************************************/
855 static int
856 ixv_media_change(struct ifnet *ifp)
857 {
858         struct adapter *adapter = ifp->if_softc;
859         struct ifmedia *ifm = &adapter->media;
860
861         INIT_DEBUGOUT("ixv_media_change: begin");
862
863         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
864                 return (EINVAL);
865
866         switch (IFM_SUBTYPE(ifm->ifm_media)) {
867         case IFM_AUTO:
868                 break;
869         default:
870                 device_printf(adapter->dev, "Only auto media type\n");
871                 return (EINVAL);
872         }
873
874         return (0);
875 } /* ixv_media_change */
876
877
878 /************************************************************************
879  * ixv_set_multi - Multicast Update
880  *
881  *   Called whenever multicast address list is updated.
882  ************************************************************************/
883 static void
884 ixv_set_multi(struct adapter *adapter)
885 {
886         u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
887         u8                 *update_ptr;
888         struct ifmultiaddr *ifma;
889         struct ifnet       *ifp = adapter->ifp;
890         int                mcnt = 0;
891
892         IOCTL_DEBUGOUT("ixv_set_multi: begin");
893
894 #if __FreeBSD_version < 800000
895         IF_ADDR_LOCK(ifp);
896 #else
897         if_maddr_rlock(ifp);
898 #endif
899         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
900                 if (ifma->ifma_addr->sa_family != AF_LINK)
901                         continue;
902                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
903                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
904                     IXGBE_ETH_LENGTH_OF_ADDRESS);
905                 mcnt++;
906         }
907 #if __FreeBSD_version < 800000
908         IF_ADDR_UNLOCK(ifp);
909 #else
910         if_maddr_runlock(ifp);
911 #endif
912
913         update_ptr = mta;
914
915         adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
916             ixv_mc_array_itr, TRUE);
917
918         return;
919 } /* ixv_set_multi */
920
921 /************************************************************************
922  * ixv_mc_array_itr
923  *
924  *   An iterator function needed by the multicast shared code.
925  *   It feeds the shared code routine the addresses in the
926  *   array of ixv_set_multi() one by one.
927  ************************************************************************/
928 static u8 *
929 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
930 {
931         u8 *addr = *update_ptr;
932         u8 *newptr;
933         *vmdq = 0;
934
935         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
936         *update_ptr = newptr;
937
938         return addr;
939 } /* ixv_mc_array_itr */
940
941 /************************************************************************
942  * ixv_local_timer - Timer routine
943  *
944  *   Checks for link status, updates statistics,
945  *   and runs the watchdog check.
946  ************************************************************************/
947 static void
948 ixv_local_timer(void *arg)
949 {
950         struct adapter  *adapter = arg;
951         device_t        dev = adapter->dev;
952         struct ix_queue *que = adapter->queues;
953         u64             queues = 0;
954         int             hung = 0;
955
956         mtx_assert(&adapter->core_mtx, MA_OWNED);
957
958         ixv_check_link(adapter);
959
960         /* Stats Update */
961         ixv_update_stats(adapter);
962
963         /*
964          * Check the TX queues status
965          *      - mark hung queues so we don't schedule on them
966          *      - watchdog only if all queues show hung
967          */
968         for (int i = 0; i < adapter->num_queues; i++, que++) {
969                 /* Keep track of queues with work for soft irq */
970                 if (que->txr->busy)
971                         queues |= ((u64)1 << que->me);
972                 /*
973                  * Each time txeof runs without cleaning, but there
974                  * are uncleaned descriptors it increments busy. If
975                  * we get to the MAX we declare it hung.
976                  */
977                 if (que->busy == IXGBE_QUEUE_HUNG) {
978                         ++hung;
979                         /* Mark the queue as inactive */
980                         adapter->active_queues &= ~((u64)1 << que->me);
981                         continue;
982                 } else {
983                         /* Check if we've come back from hung */
984                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
985                                 adapter->active_queues |= ((u64)1 << que->me);
986                 }
987                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
988                         device_printf(dev,
989                             "Warning queue %d appears to be hung!\n", i);
990                         que->txr->busy = IXGBE_QUEUE_HUNG;
991                         ++hung;
992                 }
993
994         }
995
996         /* Only truly watchdog if all queues show hung */
997         if (hung == adapter->num_queues)
998                 goto watchdog;
999         else if (queues != 0) { /* Force an IRQ on queues with work */
1000                 ixv_rearm_queues(adapter, queues);
1001         }
1002
1003         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1004
1005         return;
1006
1007 watchdog:
1008
1009         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1010         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1011         adapter->watchdog_events++;
1012         ixv_init_locked(adapter);
1013 } /* ixv_local_timer */
1014
1015 /************************************************************************
1016  * ixv_update_link_status - Update OS on link state
1017  *
1018  * Note: Only updates the OS on the cached link state.
1019  *       The real check of the hardware only happens with
1020  *       a link interrupt.
1021  ************************************************************************/
1022 static void
1023 ixv_update_link_status(struct adapter *adapter)
1024 {
1025         struct ifnet *ifp = adapter->ifp;
1026         device_t     dev = adapter->dev;
1027
1028         if (adapter->link_up) {
1029                 if (adapter->link_active == FALSE) {
1030                         if (bootverbose)
1031                                 device_printf(dev,"Link is up %d Gbps %s \n",
1032                                     ((adapter->link_speed == 128) ? 10 : 1),
1033                                     "Full Duplex");
1034                         adapter->link_active = TRUE;
1035                         if_link_state_change(ifp, LINK_STATE_UP);
1036                 }
1037         } else { /* Link down */
1038                 if (adapter->link_active == TRUE) {
1039                         if (bootverbose)
1040                                 device_printf(dev,"Link is Down\n");
1041                         if_link_state_change(ifp, LINK_STATE_DOWN);
1042                         adapter->link_active = FALSE;
1043                 }
1044         }
1045
1046         return;
1047 } /* ixv_update_link_status */
1048
1049
1050 /************************************************************************
1051  * ixv_stop - Stop the hardware
1052  *
1053  *   Disables all traffic on the adapter by issuing a
1054  *   global reset on the MAC and deallocates TX/RX buffers.
1055  ************************************************************************/
1056 static void
1057 ixv_stop(void *arg)
1058 {
1059         struct ifnet    *ifp;
1060         struct adapter  *adapter = arg;
1061         struct ixgbe_hw *hw = &adapter->hw;
1062
1063         ifp = adapter->ifp;
1064
1065         mtx_assert(&adapter->core_mtx, MA_OWNED);
1066
1067         INIT_DEBUGOUT("ixv_stop: begin\n");
1068         ixv_disable_intr(adapter);
1069
1070         /* Tell the stack that the interface is no longer active */
1071         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1072
1073         hw->mac.ops.reset_hw(hw);
1074         adapter->hw.adapter_stopped = FALSE;
1075         hw->mac.ops.stop_adapter(hw);
1076         callout_stop(&adapter->timer);
1077
1078         /* reprogram the RAR[0] in case user changed it. */
1079         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1080
1081         return;
1082 } /* ixv_stop */
1083
1084
1085 /************************************************************************
1086  * ixv_allocate_pci_resources
1087  ************************************************************************/
1088 static int
1089 ixv_allocate_pci_resources(struct adapter *adapter)
1090 {
1091         device_t dev = adapter->dev;
1092         int      rid;
1093
1094         rid = PCIR_BAR(0);
1095         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1096             RF_ACTIVE);
1097
1098         if (!(adapter->pci_mem)) {
1099                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1100                 return (ENXIO);
1101         }
1102
1103         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1104         adapter->osdep.mem_bus_space_handle =
1105             rman_get_bushandle(adapter->pci_mem);
1106         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1107
1108         /* Pick up the tuneable queues */
1109         adapter->num_queues = ixv_num_queues;
1110
1111         return (0);
1112 } /* ixv_allocate_pci_resources */
1113
1114 /************************************************************************
1115  * ixv_free_pci_resources
1116  ************************************************************************/
1117 static void
1118 ixv_free_pci_resources(struct adapter * adapter)
1119 {
1120         struct ix_queue *que = adapter->queues;
1121         device_t        dev = adapter->dev;
1122         int             rid, memrid;
1123
1124         memrid = PCIR_BAR(MSIX_82598_BAR);
1125
1126         /*
1127          * There is a slight possibility of a failure mode
1128          * in attach that will result in entering this function
1129          * before interrupt resources have been initialized, and
1130          * in that case we do not want to execute the loops below
1131          * We can detect this reliably by the state of the adapter
1132          * res pointer.
1133          */
1134         if (adapter->res == NULL)
1135                 goto mem;
1136
1137         /*
1138          *  Release all msix queue resources:
1139          */
1140         for (int i = 0; i < adapter->num_queues; i++, que++) {
1141                 rid = que->msix + 1;
1142                 if (que->tag != NULL) {
1143                         bus_teardown_intr(dev, que->res, que->tag);
1144                         que->tag = NULL;
1145                 }
1146                 if (que->res != NULL)
1147                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1148         }
1149
1150
1151         /* Clean the Mailbox interrupt last */
1152         rid = adapter->vector + 1;
1153
1154         if (adapter->tag != NULL) {
1155                 bus_teardown_intr(dev, adapter->res, adapter->tag);
1156                 adapter->tag = NULL;
1157         }
1158         if (adapter->res != NULL)
1159                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1160
1161 mem:
1162         pci_release_msi(dev);
1163
1164         if (adapter->msix_mem != NULL)
1165                 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
1166                     adapter->msix_mem);
1167
1168         if (adapter->pci_mem != NULL)
1169                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1170                     adapter->pci_mem);
1171
1172         return;
1173 } /* ixv_free_pci_resources */
1174
1175 /************************************************************************
1176  * ixv_setup_interface
1177  *
1178  *   Setup networking device structure and register an interface.
1179  ************************************************************************/
1180 static void
1181 ixv_setup_interface(device_t dev, struct adapter *adapter)
1182 {
1183         struct ifnet *ifp;
1184
1185         INIT_DEBUGOUT("ixv_setup_interface: begin");
1186
1187         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1188         if (ifp == NULL)
1189                 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1190         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1191         ifp->if_baudrate = 1000000000;
1192         ifp->if_init = ixv_init;
1193         ifp->if_softc = adapter;
1194         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1195         ifp->if_ioctl = ixv_ioctl;
1196         if_setgetcounterfn(ifp, ixv_get_counter);
1197         /* TSO parameters */
1198         ifp->if_hw_tsomax = 65518;
1199         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1200         ifp->if_hw_tsomaxsegsize = 2048;
1201         if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1202                 ifp->if_start = ixgbe_legacy_start;
1203                 ixv_start_locked = ixgbe_legacy_start_locked;
1204                 ixv_ring_empty = ixgbe_legacy_ring_empty;
1205         } else {
1206                 ifp->if_transmit = ixgbe_mq_start;
1207                 ifp->if_qflush = ixgbe_qflush;
1208                 ixv_start_locked = ixgbe_mq_start_locked;
1209                 ixv_ring_empty = drbr_empty;
1210         }
1211         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1212
1213         ether_ifattach(ifp, adapter->hw.mac.addr);
1214
1215         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1216
1217         /*
1218          * Tell the upper layer(s) we support long frames.
1219          */
1220         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1221
1222         /* Set capability flags */
1223         ifp->if_capabilities |= IFCAP_HWCSUM
1224                              |  IFCAP_HWCSUM_IPV6
1225                              |  IFCAP_TSO
1226                              |  IFCAP_LRO
1227                              |  IFCAP_VLAN_HWTAGGING
1228                              |  IFCAP_VLAN_HWTSO
1229                              |  IFCAP_VLAN_HWCSUM
1230                              |  IFCAP_JUMBO_MTU
1231                              |  IFCAP_VLAN_MTU;
1232
1233         /* Enable the above capabilities by default */
1234         ifp->if_capenable = ifp->if_capabilities;
1235
1236         /*
1237          * Specify the media types supported by this adapter and register
1238          * callbacks to update media and link information
1239          */
1240         ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1241             ixv_media_status);
1242         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1243         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1244
1245         return;
1246 } /* ixv_setup_interface */
1247
1248
1249 /************************************************************************
1250  * ixv_initialize_transmit_units - Enable transmit unit.
1251  ************************************************************************/
1252 static void
1253 ixv_initialize_transmit_units(struct adapter *adapter)
1254 {
1255         struct tx_ring  *txr = adapter->tx_rings;
1256         struct ixgbe_hw *hw = &adapter->hw;
1257
1258
1259         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1260                 u64 tdba = txr->txdma.dma_paddr;
1261                 u32 txctrl, txdctl;
1262
1263                 /* Set WTHRESH to 8, burst writeback */
1264                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1265                 txdctl |= (8 << 16);
1266                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1267
1268                 /* Set the HW Tx Head and Tail indices */
1269                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1270                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1271
1272                 /* Set Tx Tail register */
1273                 txr->tail = IXGBE_VFTDT(i);
1274
1275                 /* Set Ring parameters */
1276                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1277                     (tdba & 0x00000000ffffffffULL));
1278                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1279                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1280                     adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1281                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1282                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1283                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1284
1285                 /* Now enable */
1286                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1287                 txdctl |= IXGBE_TXDCTL_ENABLE;
1288                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1289         }
1290
1291         return;
1292 } /* ixv_initialize_transmit_units */
1293
1294
1295 /************************************************************************
1296  * ixv_initialize_rss_mapping
1297  ************************************************************************/
1298 static void
1299 ixv_initialize_rss_mapping(struct adapter *adapter)
1300 {
1301         struct ixgbe_hw *hw = &adapter->hw;
1302         u32             reta = 0, mrqc, rss_key[10];
1303         int             queue_id;
1304         int             i, j;
1305         u32             rss_hash_config;
1306
1307         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1308                 /* Fetch the configured RSS key */
1309                 rss_getkey((uint8_t *)&rss_key);
1310         } else {
1311                 /* set up random bits */
1312                 arc4rand(&rss_key, sizeof(rss_key), 0);
1313         }
1314
1315         /* Now fill out hash function seeds */
1316         for (i = 0; i < 10; i++)
1317                 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1318
1319         /* Set up the redirection table */
1320         for (i = 0, j = 0; i < 64; i++, j++) {
1321                 if (j == adapter->num_queues)
1322                         j = 0;
1323
1324                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1325                         /*
1326                          * Fetch the RSS bucket id for the given indirection
1327                          * entry. Cap it at the number of configured buckets
1328                          * (which is num_queues.)
1329                          */
1330                         queue_id = rss_get_indirection_to_bucket(i);
1331                         queue_id = queue_id % adapter->num_queues;
1332                 } else
1333                         queue_id = j;
1334
1335                 /*
1336                  * The low 8 bits are for hash value (n+0);
1337                  * The next 8 bits are for hash value (n+1), etc.
1338                  */
1339                 reta >>= 8;
1340                 reta |= ((uint32_t)queue_id) << 24;
1341                 if ((i & 3) == 3) {
1342                         IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1343                         reta = 0;
1344                 }
1345         }
1346
1347         /* Perform hash on these packet types */
1348         if (adapter->feat_en & IXGBE_FEATURE_RSS)
1349                 rss_hash_config = rss_gethashconfig();
1350         else {
1351                 /*
1352                  * Disable UDP - IP fragments aren't currently being handled
1353                  * and so we end up with a mix of 2-tuple and 4-tuple
1354                  * traffic.
1355                  */
1356                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1357                                 | RSS_HASHTYPE_RSS_TCP_IPV4
1358                                 | RSS_HASHTYPE_RSS_IPV6
1359                                 | RSS_HASHTYPE_RSS_TCP_IPV6;
1360         }
1361
1362         mrqc = IXGBE_MRQC_RSSEN;
1363         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1364                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1365         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1366                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1367         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1368                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1369         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1370                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1371         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1372                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1373                     __func__);
1374         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1375                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1376                     __func__);
1377         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1378                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1379         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
1380                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
1381                     __func__);
1382         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1383                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1384         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1385                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1386                     __func__);
1387         IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1388 } /* ixv_initialize_rss_mapping */
1389
1390
1391 /************************************************************************
1392  * ixv_initialize_receive_units - Setup receive registers and features.
1393  ************************************************************************/
1394 static void
1395 ixv_initialize_receive_units(struct adapter *adapter)
1396 {
1397         struct rx_ring  *rxr = adapter->rx_rings;
1398         struct ixgbe_hw *hw = &adapter->hw;
1399         struct ifnet    *ifp = adapter->ifp;
1400         u32             bufsz, rxcsum, psrtype;
1401
1402         if (ifp->if_mtu > ETHERMTU)
1403                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1404         else
1405                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1406
1407         psrtype = IXGBE_PSRTYPE_TCPHDR
1408                 | IXGBE_PSRTYPE_UDPHDR
1409                 | IXGBE_PSRTYPE_IPV4HDR
1410                 | IXGBE_PSRTYPE_IPV6HDR
1411                 | IXGBE_PSRTYPE_L2HDR;
1412
1413         if (adapter->num_queues > 1)
1414                 psrtype |= 1 << 29;
1415
1416         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1417
1418         /* Tell PF our max_frame size */
1419         if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1420                 device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1421         }
1422
1423         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1424                 u64 rdba = rxr->rxdma.dma_paddr;
1425                 u32 reg, rxdctl;
1426
1427                 /* Disable the queue */
1428                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1429                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1430                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1431                 for (int j = 0; j < 10; j++) {
1432                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1433                             IXGBE_RXDCTL_ENABLE)
1434                                 msec_delay(1);
1435                         else
1436                                 break;
1437                 }
1438                 wmb();
1439                 /* Setup the Base and Length of the Rx Descriptor Ring */
1440                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1441                     (rdba & 0x00000000ffffffffULL));
1442                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1443                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1444                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1445
1446                 /* Reset the ring indices */
1447                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1448                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1449
1450                 /* Set up the SRRCTL register */
1451                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1452                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1453                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1454                 reg |= bufsz;
1455                 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1456                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1457
1458                 /* Capture Rx Tail index */
1459                 rxr->tail = IXGBE_VFRDT(rxr->me);
1460
1461                 /* Do the queue enabling last */
1462                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1463                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1464                 for (int k = 0; k < 10; k++) {
1465                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1466                             IXGBE_RXDCTL_ENABLE)
1467                                 break;
1468                         msec_delay(1);
1469                 }
1470                 wmb();
1471
1472                 /* Set the Tail Pointer */
1473                 /*
1474                  * In netmap mode, we must preserve the buffers made
1475                  * available to userspace before the if_init()
1476                  * (this is true by default on the TX side, because
1477                  * init makes all buffers available to userspace).
1478                  *
1479                  * netmap_reset() and the device specific routines
1480                  * (e.g. ixgbe_setup_receive_rings()) map these
1481                  * buffers at the end of the NIC ring, so here we
1482                  * must set the RDT (tail) register to make sure
1483                  * they are not overwritten.
1484                  *
1485                  * In this driver the NIC ring starts at RDH = 0,
1486                  * RDT points to the last slot available for reception (?),
1487                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1488                  */
1489 #ifdef DEV_NETMAP
1490                 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1491                     (ifp->if_capenable & IFCAP_NETMAP)) {
1492                         struct netmap_adapter *na = NA(adapter->ifp);
1493                         struct netmap_kring *kring = &na->rx_rings[i];
1494                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1495
1496                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1497                 } else
1498 #endif /* DEV_NETMAP */
1499                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1500                             adapter->num_rx_desc - 1);
1501         }
1502
1503         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1504
1505         ixv_initialize_rss_mapping(adapter);
1506
1507         if (adapter->num_queues > 1) {
1508                 /* RSS and RX IPP Checksum are mutually exclusive */
1509                 rxcsum |= IXGBE_RXCSUM_PCSD;
1510         }
1511
1512         if (ifp->if_capenable & IFCAP_RXCSUM)
1513                 rxcsum |= IXGBE_RXCSUM_PCSD;
1514
1515         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1516                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1517
1518         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1519
1520         return;
1521 } /* ixv_initialize_receive_units */
1522
1523 /************************************************************************
1524  * ixv_setup_vlan_support
1525  ************************************************************************/
1526 static void
1527 ixv_setup_vlan_support(struct adapter *adapter)
1528 {
1529         struct ixgbe_hw *hw = &adapter->hw;
1530         u32             ctrl, vid, vfta, retry;
1531
1532         /*
1533          * We get here thru init_locked, meaning
1534          * a soft reset, this has already cleared
1535          * the VFTA and other state, so if there
1536          * have been no vlan's registered do nothing.
1537          */
1538         if (adapter->num_vlans == 0)
1539                 return;
1540
1541         /* Enable the queues */
1542         for (int i = 0; i < adapter->num_queues; i++) {
1543                 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1544                 ctrl |= IXGBE_RXDCTL_VME;
1545                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1546                 /*
1547                  * Let Rx path know that it needs to store VLAN tag
1548                  * as part of extra mbuf info.
1549                  */
1550                 adapter->rx_rings[i].vtag_strip = TRUE;
1551         }
1552
1553         /*
1554          * A soft reset zero's out the VFTA, so
1555          * we need to repopulate it now.
1556          */
1557         for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1558                 if (ixv_shadow_vfta[i] == 0)
1559                         continue;
1560                 vfta = ixv_shadow_vfta[i];
1561                 /*
1562                  * Reconstruct the vlan id's
1563                  * based on the bits set in each
1564                  * of the array ints.
1565                  */
1566                 for (int j = 0; j < 32; j++) {
1567                         retry = 0;
1568                         if ((vfta & (1 << j)) == 0)
1569                                 continue;
1570                         vid = (i * 32) + j;
1571                         /* Call the shared code mailbox routine */
1572                         while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1573                                 if (++retry > 5)
1574                                         break;
1575                         }
1576                 }
1577         }
1578 } /* ixv_setup_vlan_support */
1579
1580 /************************************************************************
1581  * ixv_register_vlan
1582  *
1583  *   Run via a vlan config EVENT, it enables us to use the
1584  *   HW Filter table since we can get the vlan id. This just
1585  *   creates the entry in the soft version of the VFTA, init
1586  *   will repopulate the real table.
1587  ************************************************************************/
1588 static void
1589 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1590 {
1591         struct adapter *adapter = ifp->if_softc;
1592         u16            index, bit;
1593
1594         if (ifp->if_softc != arg) /* Not our event */
1595                 return;
1596
1597         if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1598                 return;
1599
1600         IXGBE_CORE_LOCK(adapter);
1601         index = (vtag >> 5) & 0x7F;
1602         bit = vtag & 0x1F;
1603         ixv_shadow_vfta[index] |= (1 << bit);
1604         ++adapter->num_vlans;
1605         /* Re-init to load the changes */
1606         ixv_init_locked(adapter);
1607         IXGBE_CORE_UNLOCK(adapter);
1608 } /* ixv_register_vlan */
1609
1610 /************************************************************************
1611  * ixv_unregister_vlan
1612  *
1613  *   Run via a vlan unconfig EVENT, remove our entry
1614  *   in the soft vfta.
1615  ************************************************************************/
1616 static void
1617 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1618 {
1619         struct adapter *adapter = ifp->if_softc;
1620         u16            index, bit;
1621
1622         if (ifp->if_softc !=  arg)
1623                 return;
1624
1625         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1626                 return;
1627
1628         IXGBE_CORE_LOCK(adapter);
1629         index = (vtag >> 5) & 0x7F;
1630         bit = vtag & 0x1F;
1631         ixv_shadow_vfta[index] &= ~(1 << bit);
1632         --adapter->num_vlans;
1633         /* Re-init to load the changes */
1634         ixv_init_locked(adapter);
1635         IXGBE_CORE_UNLOCK(adapter);
1636 } /* ixv_unregister_vlan */
1637
1638 /************************************************************************
1639  * ixv_enable_intr
1640  ************************************************************************/
1641 static void
1642 ixv_enable_intr(struct adapter *adapter)
1643 {
1644         struct ixgbe_hw *hw = &adapter->hw;
1645         struct ix_queue *que = adapter->queues;
1646         u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1647
1648
1649         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1650
1651         mask = IXGBE_EIMS_ENABLE_MASK;
1652         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1653         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1654
1655         for (int i = 0; i < adapter->num_queues; i++, que++)
1656                 ixv_enable_queue(adapter, que->msix);
1657
1658         IXGBE_WRITE_FLUSH(hw);
1659
1660         return;
1661 } /* ixv_enable_intr */
1662
1663 /************************************************************************
1664  * ixv_disable_intr
1665  ************************************************************************/
1666 static void
1667 ixv_disable_intr(struct adapter *adapter)
1668 {
1669         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1670         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1671         IXGBE_WRITE_FLUSH(&adapter->hw);
1672
1673         return;
1674 } /* ixv_disable_intr */
1675
1676 /************************************************************************
1677  * ixv_set_ivar
1678  *
1679  *   Setup the correct IVAR register for a particular MSI-X interrupt
1680  *    - entry is the register array entry
1681  *    - vector is the MSI-X vector for this queue
1682  *    - type is RX/TX/MISC
1683  ************************************************************************/
1684 static void
1685 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1686 {
1687         struct ixgbe_hw *hw = &adapter->hw;
1688         u32             ivar, index;
1689
1690         vector |= IXGBE_IVAR_ALLOC_VAL;
1691
1692         if (type == -1) { /* MISC IVAR */
1693                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1694                 ivar &= ~0xFF;
1695                 ivar |= vector;
1696                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1697         } else {          /* RX/TX IVARS */
1698                 index = (16 * (entry & 1)) + (8 * type);
1699                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1700                 ivar &= ~(0xFF << index);
1701                 ivar |= (vector << index);
1702                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1703         }
1704 } /* ixv_set_ivar */
1705
1706 /************************************************************************
1707  * ixv_configure_ivars
1708  ************************************************************************/
1709 static void
1710 ixv_configure_ivars(struct adapter *adapter)
1711 {
1712         struct ix_queue *que = adapter->queues;
1713
1714         for (int i = 0; i < adapter->num_queues; i++, que++) {
1715                 /* First the RX queue entry */
1716                 ixv_set_ivar(adapter, i, que->msix, 0);
1717                 /* ... and the TX */
1718                 ixv_set_ivar(adapter, i, que->msix, 1);
1719                 /* Set an initial value in EITR */
1720                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1721                     IXGBE_EITR_DEFAULT);
1722         }
1723
1724         /* For the mailbox interrupt */
1725         ixv_set_ivar(adapter, 1, adapter->vector, -1);
1726 } /* ixv_configure_ivars */
1727
1728
1729 /************************************************************************
1730  * ixv_get_counter
1731  ************************************************************************/
1732 static uint64_t
1733 ixv_get_counter(struct ifnet *ifp, ift_counter cnt)
1734 {
1735         struct adapter *adapter;
1736
1737         adapter = if_getsoftc(ifp);
1738
1739         switch (cnt) {
1740         case IFCOUNTER_IPACKETS:
1741                 return (adapter->ipackets);
1742         case IFCOUNTER_OPACKETS:
1743                 return (adapter->opackets);
1744         case IFCOUNTER_IBYTES:
1745                 return (adapter->ibytes);
1746         case IFCOUNTER_OBYTES:
1747                 return (adapter->obytes);
1748         case IFCOUNTER_IMCASTS:
1749                 return (adapter->imcasts);
1750         default:
1751                 return (if_get_counter_default(ifp, cnt));
1752         }
1753 } /* ixv_get_counter */
1754
1755 /************************************************************************
1756  * ixv_save_stats
1757  *
1758  *   The VF stats registers never have a truly virgin
1759  *   starting point, so this routine tries to make an
1760  *   artificial one, marking ground zero on attach as
1761  *   it were.
1762  ************************************************************************/
1763 static void
1764 ixv_save_stats(struct adapter *adapter)
1765 {
1766         if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1767                 adapter->stats.vf.saved_reset_vfgprc +=
1768                     adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1769                 adapter->stats.vf.saved_reset_vfgptc +=
1770                     adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1771                 adapter->stats.vf.saved_reset_vfgorc +=
1772                     adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1773                 adapter->stats.vf.saved_reset_vfgotc +=
1774                     adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1775                 adapter->stats.vf.saved_reset_vfmprc +=
1776                     adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1777         }
1778 } /* ixv_save_stats */
1779
1780 /************************************************************************
1781  * ixv_init_stats
1782  ************************************************************************/
1783 static void
1784 ixv_init_stats(struct adapter *adapter)
1785 {
1786         struct ixgbe_hw *hw = &adapter->hw;
1787
1788         adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1789         adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1790         adapter->stats.vf.last_vfgorc |=
1791             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1792
1793         adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1794         adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1795         adapter->stats.vf.last_vfgotc |=
1796             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1797
1798         adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1799
1800         adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1801         adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1802         adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1803         adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1804         adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1805 } /* ixv_init_stats */
1806
1807 #define UPDATE_STAT_32(reg, last, count)                \
1808 {                                                       \
1809         u32 current = IXGBE_READ_REG(hw, reg);          \
1810         if (current < last)                             \
1811                 count += 0x100000000LL;                 \
1812         last = current;                                 \
1813         count &= 0xFFFFFFFF00000000LL;                  \
1814         count |= current;                               \
1815 }
1816
1817 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1818 {                                                       \
1819         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1820         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1821         u64 current = ((cur_msb << 32) | cur_lsb);      \
1822         if (current < last)                             \
1823                 count += 0x1000000000LL;                \
1824         last = current;                                 \
1825         count &= 0xFFFFFFF000000000LL;                  \
1826         count |= current;                               \
1827 }
1828
1829 /************************************************************************
1830  * ixv_update_stats - Update the board statistics counters.
1831  ************************************************************************/
1832 void
1833 ixv_update_stats(struct adapter *adapter)
1834 {
1835         struct ixgbe_hw *hw = &adapter->hw;
1836         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1837
1838         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1839             adapter->stats.vf.vfgprc);
1840         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1841             adapter->stats.vf.vfgptc);
1842         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1843             adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1844         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1845             adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1846         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1847             adapter->stats.vf.vfmprc);
1848
1849         /* Fill out the OS statistics structure */
1850         IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1851         IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1852         IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1853         IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1854         IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1855 } /* ixv_update_stats */
1856
1857 /************************************************************************
1858  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1859  ************************************************************************/
1860 static void
1861 ixv_add_stats_sysctls(struct adapter *adapter)
1862 {
1863         device_t                dev = adapter->dev;
1864         struct tx_ring          *txr = adapter->tx_rings;
1865         struct rx_ring          *rxr = adapter->rx_rings;
1866         struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1867         struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1868         struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1869         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1870         struct sysctl_oid       *stat_node, *queue_node;
1871         struct sysctl_oid_list  *stat_list, *queue_list;
1872
1873 #define QUEUE_NAME_LEN 32
1874         char                    namebuf[QUEUE_NAME_LEN];
1875
1876         /* Driver Statistics */
1877         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1878             CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1879         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1880             CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1881         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1882             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1883         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1884             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1885
1886         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1887                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1888                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1889                     CTLFLAG_RD, NULL, "Queue Name");
1890                 queue_list = SYSCTL_CHILDREN(queue_node);
1891
1892                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1893                     CTLFLAG_RD, &(adapter->queues[i].irqs), "IRQs on queue");
1894                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1895                     CTLFLAG_RD, &(txr->no_tx_dma_setup),
1896                     "Driver Tx DMA failure in Tx");
1897                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
1898                     CTLFLAG_RD, &(txr->no_desc_avail),
1899                     "Not-enough-descriptors count: TX");
1900                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1901                     CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1902                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1903                     CTLFLAG_RD, &(txr->br->br_drops),
1904                     "Packets dropped in buf_ring");
1905         }
1906
1907         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1908                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1909                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1910                     CTLFLAG_RD, NULL, "Queue Name");
1911                 queue_list = SYSCTL_CHILDREN(queue_node);
1912
1913                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1914                     CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1915                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1916                     CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1917                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1918                     CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1919         }
1920
1921         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1922             CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1923         stat_list = SYSCTL_CHILDREN(stat_node);
1924
1925         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1926             CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1927         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1928             CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1929         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1930             CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1931         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1932             CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1933         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1934             CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1935 } /* ixv_add_stats_sysctls */
1936
1937 /************************************************************************
1938  * ixv_set_sysctl_value
1939  ************************************************************************/
1940 static void
1941 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
1942         const char *description, int *limit, int value)
1943 {
1944         *limit = value;
1945         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
1946             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
1947             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
1948 } /* ixv_set_sysctl_value */
1949
1950 /************************************************************************
1951  * ixv_print_debug_info
1952  *
1953  *   Called only when em_display_debug_stats is enabled.
1954  *   Provides a way to take a look at important statistics
1955  *   maintained by the driver and hardware.
1956  ************************************************************************/
1957 static void
1958 ixv_print_debug_info(struct adapter *adapter)
1959 {
1960         device_t        dev = adapter->dev;
1961         struct ixgbe_hw *hw = &adapter->hw;
1962         struct ix_queue *que = adapter->queues;
1963         struct rx_ring  *rxr;
1964         struct tx_ring  *txr;
1965         struct lro_ctrl *lro;
1966
1967         device_printf(dev, "Error Byte Count = %u \n",
1968             IXGBE_READ_REG(hw, IXGBE_ERRBC));
1969
1970         for (int i = 0; i < adapter->num_queues; i++, que++) {
1971                 txr = que->txr;
1972                 rxr = que->rxr;
1973                 lro = &rxr->lro;
1974                 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
1975                     que->msix, (long)que->irqs);
1976                 device_printf(dev, "RX(%d) Packets Received: %lld\n",
1977                     rxr->me, (long long)rxr->rx_packets);
1978                 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
1979                     rxr->me, (long)rxr->rx_bytes);
1980                 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
1981                     rxr->me, (long long)lro->lro_queued);
1982                 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
1983                     rxr->me, (long long)lro->lro_flushed);
1984                 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
1985                     txr->me, (long)txr->total_packets);
1986                 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
1987                     txr->me, (long)txr->no_desc_avail);
1988         }
1989
1990         device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1991 } /* ixv_print_debug_info */
1992
1993 /************************************************************************
1994  * ixv_sysctl_debug
1995  ************************************************************************/
1996 static int
1997 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1998 {
1999         struct adapter *adapter;
2000         int            error, result;
2001
2002         result = -1;
2003         error = sysctl_handle_int(oidp, &result, 0, req);
2004
2005         if (error || !req->newptr)
2006                 return (error);
2007
2008         if (result == 1) {
2009                 adapter = (struct adapter *)arg1;
2010                 ixv_print_debug_info(adapter);
2011         }
2012
2013         return error;
2014 } /* ixv_sysctl_debug */
2015
2016 /************************************************************************
2017  * ixv_init_device_features
2018  ************************************************************************/
2019 static void
2020 ixv_init_device_features(struct adapter *adapter)
2021 {
2022         adapter->feat_cap = IXGBE_FEATURE_NETMAP
2023                           | IXGBE_FEATURE_VF
2024                           | IXGBE_FEATURE_RSS
2025                           | IXGBE_FEATURE_LEGACY_TX;
2026
2027         /* A tad short on feature flags for VFs, atm. */
2028         switch (adapter->hw.mac.type) {
2029         case ixgbe_mac_82599_vf:
2030                 break;
2031         case ixgbe_mac_X540_vf:
2032                 break;
2033         case ixgbe_mac_X550_vf:
2034         case ixgbe_mac_X550EM_x_vf:
2035         case ixgbe_mac_X550EM_a_vf:
2036                 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2037                 break;
2038         default:
2039                 break;
2040         }
2041
2042         /* Enabled by default... */
2043         /* Is a virtual function (VF) */
2044         if (adapter->feat_cap & IXGBE_FEATURE_VF)
2045                 adapter->feat_en |= IXGBE_FEATURE_VF;
2046         /* Netmap */
2047         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2048                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2049         /* Receive-Side Scaling (RSS) */
2050         if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2051                 adapter->feat_en |= IXGBE_FEATURE_RSS;
2052         /* Needs advanced context descriptor regardless of offloads req'd */
2053         if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2054                 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2055
2056         /* Enabled via sysctl... */
2057         /* Legacy (single queue) transmit */
2058         if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2059             ixv_enable_legacy_tx)
2060                 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2061 } /* ixv_init_device_features */
2062
2063 /************************************************************************
2064  * ixv_shutdown - Shutdown entry point
2065  ************************************************************************/
2066 static int
2067 ixv_shutdown(device_t dev)
2068 {
2069         struct adapter *adapter = device_get_softc(dev);
2070         IXGBE_CORE_LOCK(adapter);
2071         ixv_stop(adapter);
2072         IXGBE_CORE_UNLOCK(adapter);
2073
2074         return (0);
2075 } /* ixv_shutdown */
2076
2077
2078 /************************************************************************
2079  * ixv_ioctl - Ioctl entry point
2080  *
2081  *   Called when the user wants to configure the interface.
2082  *
2083  *   return 0 on success, positive on failure
2084  ************************************************************************/
2085 static int
2086 ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2087 {
2088         struct adapter *adapter = ifp->if_softc;
2089         struct ifreq   *ifr = (struct ifreq *)data;
2090 #if defined(INET) || defined(INET6)
2091         struct ifaddr  *ifa = (struct ifaddr *)data;
2092         bool           avoid_reset = FALSE;
2093 #endif
2094         int            error = 0;
2095
2096         switch (command) {
2097
2098         case SIOCSIFADDR:
2099 #ifdef INET
2100                 if (ifa->ifa_addr->sa_family == AF_INET)
2101                         avoid_reset = TRUE;
2102 #endif
2103 #ifdef INET6
2104                 if (ifa->ifa_addr->sa_family == AF_INET6)
2105                         avoid_reset = TRUE;
2106 #endif
2107 #if defined(INET) || defined(INET6)
2108                 /*
2109                  * Calling init results in link renegotiation,
2110                  * so we avoid doing it when possible.
2111                  */
2112                 if (avoid_reset) {
2113                         ifp->if_flags |= IFF_UP;
2114                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2115                                 ixv_init(adapter);
2116                         if (!(ifp->if_flags & IFF_NOARP))
2117                                 arp_ifinit(ifp, ifa);
2118                 } else
2119                         error = ether_ioctl(ifp, command, data);
2120                 break;
2121 #endif
2122         case SIOCSIFMTU:
2123                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2124                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
2125                         error = EINVAL;
2126                 } else {
2127                         IXGBE_CORE_LOCK(adapter);
2128                         ifp->if_mtu = ifr->ifr_mtu;
2129                         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
2130                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2131                                 ixv_init_locked(adapter);
2132                         IXGBE_CORE_UNLOCK(adapter);
2133                 }
2134                 break;
2135         case SIOCSIFFLAGS:
2136                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2137                 IXGBE_CORE_LOCK(adapter);
2138                 if (ifp->if_flags & IFF_UP) {
2139                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2140                                 ixv_init_locked(adapter);
2141                 } else
2142                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2143                                 ixv_stop(adapter);
2144                 adapter->if_flags = ifp->if_flags;
2145                 IXGBE_CORE_UNLOCK(adapter);
2146                 break;
2147         case SIOCADDMULTI:
2148         case SIOCDELMULTI:
2149                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2150                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2151                         IXGBE_CORE_LOCK(adapter);
2152                         ixv_disable_intr(adapter);
2153                         ixv_set_multi(adapter);
2154                         ixv_enable_intr(adapter);
2155                         IXGBE_CORE_UNLOCK(adapter);
2156                 }
2157                 break;
2158         case SIOCSIFMEDIA:
2159         case SIOCGIFMEDIA:
2160                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2161                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2162                 break;
2163         case SIOCSIFCAP:
2164         {
2165                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2166                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2167                 if (mask & IFCAP_HWCSUM)
2168                         ifp->if_capenable ^= IFCAP_HWCSUM;
2169                 if (mask & IFCAP_TSO4)
2170                         ifp->if_capenable ^= IFCAP_TSO4;
2171                 if (mask & IFCAP_LRO)
2172                         ifp->if_capenable ^= IFCAP_LRO;
2173                 if (mask & IFCAP_VLAN_HWTAGGING)
2174                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2175                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2176                         IXGBE_CORE_LOCK(adapter);
2177                         ixv_init_locked(adapter);
2178                         IXGBE_CORE_UNLOCK(adapter);
2179                 }
2180                 VLAN_CAPABILITIES(ifp);
2181                 break;
2182         }
2183
2184         default:
2185                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
2186                 error = ether_ioctl(ifp, command, data);
2187                 break;
2188         }
2189
2190         return (error);
2191 } /* ixv_ioctl */
2192
2193 /************************************************************************
2194  * ixv_init
2195  ************************************************************************/
2196 static void
2197 ixv_init(void *arg)
2198 {
2199         struct adapter *adapter = arg;
2200
2201         IXGBE_CORE_LOCK(adapter);
2202         ixv_init_locked(adapter);
2203         IXGBE_CORE_UNLOCK(adapter);
2204
2205         return;
2206 } /* ixv_init */
2207
2208
2209 /************************************************************************
2210  * ixv_handle_que
2211  ************************************************************************/
2212 static void
2213 ixv_handle_que(void *context, int pending)
2214 {
2215         struct ix_queue *que = context;
2216         struct adapter  *adapter = que->adapter;
2217         struct tx_ring  *txr = que->txr;
2218         struct ifnet    *ifp = adapter->ifp;
2219         bool            more;
2220
2221         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2222                 more = ixgbe_rxeof(que);
2223                 IXGBE_TX_LOCK(txr);
2224                 ixgbe_txeof(txr);
2225                 if (!ixv_ring_empty(ifp, txr->br))
2226                         ixv_start_locked(ifp, txr);
2227                 IXGBE_TX_UNLOCK(txr);
2228                 if (more) {
2229                         taskqueue_enqueue(que->tq, &que->que_task);
2230                         return;
2231                 }
2232         }
2233
2234         /* Re-enable this interrupt */
2235         ixv_enable_queue(adapter, que->msix);
2236
2237         return;
2238 } /* ixv_handle_que */
2239
2240 /************************************************************************
2241  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2242  ************************************************************************/
2243 static int
2244 ixv_allocate_msix(struct adapter *adapter)
2245 {
2246         device_t        dev = adapter->dev;
2247         struct ix_queue *que = adapter->queues;
2248         struct tx_ring  *txr = adapter->tx_rings;
2249         int             error, msix_ctrl, rid, vector = 0;
2250
2251         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2252                 rid = vector + 1;
2253                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2254                     RF_SHAREABLE | RF_ACTIVE);
2255                 if (que->res == NULL) {
2256                         device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
2257                             vector);
2258                         return (ENXIO);
2259                 }
2260                 /* Set the handler function */
2261                 error = bus_setup_intr(dev, que->res,
2262                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2263                     ixv_msix_que, que, &que->tag);
2264                 if (error) {
2265                         que->res = NULL;
2266                         device_printf(dev, "Failed to register QUE handler");
2267                         return (error);
2268                 }
2269 #if __FreeBSD_version >= 800504
2270                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2271 #endif
2272                 que->msix = vector;
2273                 adapter->active_queues |= (u64)(1 << que->msix);
2274                 /*
2275                  * Bind the MSI-X vector, and thus the
2276                  * ring to the corresponding CPU.
2277                  */
2278                 if (adapter->num_queues > 1)
2279                         bus_bind_intr(dev, que->res, i);
2280                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2281                 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
2282                 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
2283                     taskqueue_thread_enqueue, &que->tq);
2284                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2285                     device_get_nameunit(adapter->dev));
2286         }
2287
2288         /* and Mailbox */
2289         rid = vector + 1;
2290         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2291             RF_SHAREABLE | RF_ACTIVE);
2292         if (!adapter->res) {
2293                 device_printf(dev,
2294                     "Unable to allocate bus resource: MBX interrupt [%d]\n",
2295                     rid);
2296                 return (ENXIO);
2297         }
2298         /* Set the mbx handler function */
2299         error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
2300             NULL, ixv_msix_mbx, adapter, &adapter->tag);
2301         if (error) {
2302                 adapter->res = NULL;
2303                 device_printf(dev, "Failed to register LINK handler");
2304                 return (error);
2305         }
2306 #if __FreeBSD_version >= 800504
2307         bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
2308 #endif
2309         adapter->vector = vector;
2310         /* Tasklets for Mailbox */
2311         TASK_INIT(&adapter->link_task, 0, ixv_handle_link, adapter);
2312         adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
2313             taskqueue_thread_enqueue, &adapter->tq);
2314         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
2315             device_get_nameunit(adapter->dev));
2316         /*
2317          * Due to a broken design QEMU will fail to properly
2318          * enable the guest for MSI-X unless the vectors in
2319          * the table are all set up, so we must rewrite the
2320          * ENABLE in the MSI-X control register again at this
2321          * point to cause it to successfully initialize us.
2322          */
2323         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2324                 pci_find_cap(dev, PCIY_MSIX, &rid);
2325                 rid += PCIR_MSIX_CTRL;
2326                 msix_ctrl = pci_read_config(dev, rid, 2);
2327                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2328                 pci_write_config(dev, rid, msix_ctrl, 2);
2329         }
2330
2331         return (0);
2332 } /* ixv_allocate_msix */
2333
2334 /************************************************************************
2335  * ixv_configure_interrupts - Setup MSI-X resources
2336  *
2337  *   Note: The VF device MUST use MSI-X, there is no fallback.
2338  ************************************************************************/
2339 static int
2340 ixv_configure_interrupts(struct adapter *adapter)
2341 {
2342         device_t dev = adapter->dev;
2343         int      rid, want, msgs;
2344
2345         /* Must have at least 2 MSI-X vectors */
2346         msgs = pci_msix_count(dev);
2347         if (msgs < 2)
2348                 goto out;
2349         rid = PCIR_BAR(3);
2350         adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2351             RF_ACTIVE);
2352         if (adapter->msix_mem == NULL) {
2353                 device_printf(adapter->dev, "Unable to map MSI-X table \n");
2354                 goto out;
2355         }
2356
2357         /*
2358          * Want vectors for the queues,
2359          * plus an additional for mailbox.
2360          */
2361         want = adapter->num_queues + 1;
2362         if (want > msgs) {
2363                 want = msgs;
2364                 adapter->num_queues = msgs - 1;
2365         } else
2366                 msgs = want;
2367         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2368                 device_printf(adapter->dev,
2369                     "Using MSI-X interrupts with %d vectors\n", want);
2370                 /* reflect correct sysctl value */
2371                 ixv_num_queues = adapter->num_queues;
2372
2373                 return (0);
2374         }
2375         /* Release in case alloc was insufficient */
2376         pci_release_msi(dev);
2377 out:
2378         if (adapter->msix_mem != NULL) {
2379                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
2380                     adapter->msix_mem);
2381                 adapter->msix_mem = NULL;
2382         }
2383         device_printf(adapter->dev, "MSI-X config error\n");
2384
2385         return (ENXIO);
2386 } /* ixv_configure_interrupts */
2387
2388
2389 /************************************************************************
2390  * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2391  *
2392  *   Done outside of interrupt context since the driver might sleep
2393  ************************************************************************/
2394 static void
2395 ixv_handle_link(void *context, int pending)
2396 {
2397         struct adapter *adapter = context;
2398
2399         adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2400             &adapter->link_up, FALSE);
2401         ixv_update_link_status(adapter);
2402 } /* ixv_handle_link */
2403
2404 /************************************************************************
2405  * ixv_check_link - Used in the local timer to poll for link changes
2406  ************************************************************************/
2407 static void
2408 ixv_check_link(struct adapter *adapter)
2409 {
2410         adapter->hw.mac.get_link_status = TRUE;
2411
2412         adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2413             &adapter->link_up, FALSE);
2414         ixv_update_link_status(adapter);
2415 } /* ixv_check_link */
2416