]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixgbe/if_ixv.c
Multiple MFC for ixgbe -- v 3.1.0
[FreeBSD/stable/10.git] / sys / dev / ixgbe / if_ixv.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /*********************************************************************
44  *  Driver version
45  *********************************************************************/
46 char ixv_driver_version[] = "1.4.0";
47
48 /*********************************************************************
49  *  PCI Device ID Table
50  *
51  *  Used by probe to select devices to load on
52  *  Last field stores an index into ixv_strings
53  *  Last entry must be all 0s
54  *
55  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56  *********************************************************************/
57
58 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
59 {
60         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64         /* required last entry */
65         {0, 0, 0, 0, 0}
66 };
67
68 /*********************************************************************
69  *  Table of branding strings
70  *********************************************************************/
71
72 static char    *ixv_strings[] = {
73         "Intel(R) PRO/10GbE Virtual Function Network Driver"
74 };
75
76 /*********************************************************************
77  *  Function prototypes
78  *********************************************************************/
79 static int      ixv_probe(device_t);
80 static int      ixv_attach(device_t);
81 static int      ixv_detach(device_t);
82 static int      ixv_shutdown(device_t);
83 static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
84 static void     ixv_init(void *);
85 static void     ixv_init_locked(struct adapter *);
86 static void     ixv_stop(void *);
87 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
88 static int      ixv_media_change(struct ifnet *);
89 static void     ixv_identify_hardware(struct adapter *);
90 static int      ixv_allocate_pci_resources(struct adapter *);
91 static int      ixv_allocate_msix(struct adapter *);
92 static int      ixv_setup_msix(struct adapter *);
93 static void     ixv_free_pci_resources(struct adapter *);
94 static void     ixv_local_timer(void *);
95 static void     ixv_setup_interface(device_t, struct adapter *);
96 static void     ixv_config_link(struct adapter *);
97
98 static void     ixv_initialize_transmit_units(struct adapter *);
99 static void     ixv_initialize_receive_units(struct adapter *);
100
101 static void     ixv_enable_intr(struct adapter *);
102 static void     ixv_disable_intr(struct adapter *);
103 static void     ixv_set_multi(struct adapter *);
104 static void     ixv_update_link_status(struct adapter *);
105 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
107 static void     ixv_configure_ivars(struct adapter *);
108 static u8 *     ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
109
110 static void     ixv_setup_vlan_support(struct adapter *);
111 static void     ixv_register_vlan(void *, struct ifnet *, u16);
112 static void     ixv_unregister_vlan(void *, struct ifnet *, u16);
113
114 static void     ixv_save_stats(struct adapter *);
115 static void     ixv_init_stats(struct adapter *);
116 static void     ixv_update_stats(struct adapter *);
117 static void     ixv_add_stats_sysctls(struct adapter *);
118 static void     ixv_set_sysctl_value(struct adapter *, const char *,
119                     const char *, int *, int);
120
121 /* The MSI/X Interrupt handlers */
122 static void     ixv_msix_que(void *);
123 static void     ixv_msix_mbx(void *);
124
125 /* Deferred interrupt tasklets */
126 static void     ixv_handle_que(void *, int);
127 static void     ixv_handle_mbx(void *, int);
128
129 #ifdef DEV_NETMAP
130 /*
131  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
132  * if_ix.c.
133  */
134 extern void ixgbe_netmap_attach(struct adapter *adapter);
135
136 #include <net/netmap.h>
137 #include <sys/selinfo.h>
138 #include <dev/netmap/netmap_kern.h>
139 #endif /* DEV_NETMAP */
140
141 /*********************************************************************
142  *  FreeBSD Device Interface Entry Points
143  *********************************************************************/
144
145 static device_method_t ixv_methods[] = {
146         /* Device interface */
147         DEVMETHOD(device_probe, ixv_probe),
148         DEVMETHOD(device_attach, ixv_attach),
149         DEVMETHOD(device_detach, ixv_detach),
150         DEVMETHOD(device_shutdown, ixv_shutdown),
151         DEVMETHOD_END
152 };
153
154 static driver_t ixv_driver = {
155         "ixv", ixv_methods, sizeof(struct adapter),
156 };
157
158 devclass_t ixv_devclass;
159 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
160 MODULE_DEPEND(ixv, pci, 1, 1, 1);
161 MODULE_DEPEND(ixv, ether, 1, 1, 1);
162 #ifdef DEV_NETMAP
163 MODULE_DEPEND(ix, netmap, 1, 1, 1);
164 #endif /* DEV_NETMAP */
165 /* XXX depend on 'ix' ? */
166
167 /*
168 ** TUNEABLE PARAMETERS:
169 */
170
171 /* Number of Queues - do not exceed MSIX vectors - 1 */
172 static int ixv_num_queues = 1;
173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
174
175 /*
176 ** AIM: Adaptive Interrupt Moderation
177 ** which means that the interrupt rate
178 ** is varied over time based on the
179 ** traffic for that interrupt vector
180 */
181 static int ixv_enable_aim = FALSE;
182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
183
184 /* How many packets rxeof tries to clean at a time */
185 static int ixv_rx_process_limit = 256;
186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
187
188 /* How many packets txeof tries to clean at a time */
189 static int ixv_tx_process_limit = 256;
190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
191
192 /* Flow control setting, default to full */
193 static int ixv_flow_control = ixgbe_fc_full;
194 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
195
196 /*
197  * Header split: this causes the hardware to DMA
198  * the header into a seperate mbuf from the payload,
199  * it can be a performance win in some workloads, but
200  * in others it actually hurts, its off by default.
201  */
202 static int ixv_header_split = FALSE;
203 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
204
205 /*
206 ** Number of TX descriptors per ring,
207 ** setting higher than RX as this seems
208 ** the better performing choice.
209 */
210 static int ixv_txd = DEFAULT_TXD;
211 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
212
213 /* Number of RX descriptors per ring */
214 static int ixv_rxd = DEFAULT_RXD;
215 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
216
217 /*
218 ** Shadow VFTA table, this is needed because
219 ** the real filter table gets cleared during
220 ** a soft reset and we need to repopulate it.
221 */
222 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
223
224 /*********************************************************************
225  *  Device identification routine
226  *
227  *  ixv_probe determines if the driver should be loaded on
228  *  adapter based on PCI vendor/device id of the adapter.
229  *
230  *  return BUS_PROBE_DEFAULT on success, positive on failure
231  *********************************************************************/
232
233 static int
234 ixv_probe(device_t dev)
235 {
236         ixgbe_vendor_info_t *ent;
237
238         u16     pci_vendor_id = 0;
239         u16     pci_device_id = 0;
240         u16     pci_subvendor_id = 0;
241         u16     pci_subdevice_id = 0;
242         char    adapter_name[256];
243
244
245         pci_vendor_id = pci_get_vendor(dev);
246         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
247                 return (ENXIO);
248
249         pci_device_id = pci_get_device(dev);
250         pci_subvendor_id = pci_get_subvendor(dev);
251         pci_subdevice_id = pci_get_subdevice(dev);
252
253         ent = ixv_vendor_info_array;
254         while (ent->vendor_id != 0) {
255                 if ((pci_vendor_id == ent->vendor_id) &&
256                     (pci_device_id == ent->device_id) &&
257
258                     ((pci_subvendor_id == ent->subvendor_id) ||
259                      (ent->subvendor_id == 0)) &&
260
261                     ((pci_subdevice_id == ent->subdevice_id) ||
262                      (ent->subdevice_id == 0))) {
263                         sprintf(adapter_name, "%s, Version - %s",
264                                 ixv_strings[ent->index],
265                                 ixv_driver_version);
266                         device_set_desc_copy(dev, adapter_name);
267                         return (BUS_PROBE_DEFAULT);
268                 }
269                 ent++;
270         }
271         return (ENXIO);
272 }
273
274 /*********************************************************************
275  *  Device initialization routine
276  *
277  *  The attach entry point is called when the driver is being loaded.
278  *  This routine identifies the type of hardware, allocates all resources
279  *  and initializes the hardware.
280  *
281  *  return 0 on success, positive on failure
282  *********************************************************************/
283
284 static int
285 ixv_attach(device_t dev)
286 {
287         struct adapter *adapter;
288         struct ixgbe_hw *hw;
289         int             error = 0;
290
291         INIT_DEBUGOUT("ixv_attach: begin");
292
293         /* Allocate, clear, and link in our adapter structure */
294         adapter = device_get_softc(dev);
295         adapter->dev = adapter->osdep.dev = dev;
296         hw = &adapter->hw;
297
298 #ifdef DEV_NETMAP
299         adapter->init_locked = ixv_init_locked;
300         adapter->stop_locked = ixv_stop;
301 #endif
302
303         /* Core Lock Init*/
304         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
305
306         /* SYSCTL APIs */
307         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
309                         OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
310                         adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
311
312         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
314                         OID_AUTO, "enable_aim", CTLFLAG_RW,
315                         &ixv_enable_aim, 1, "Interrupt Moderation");
316
317         /* Set up the timer callout */
318         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
319
320         /* Determine hardware revision */
321         ixv_identify_hardware(adapter);
322
323         /* Do base PCI setup - map BAR0 */
324         if (ixv_allocate_pci_resources(adapter)) {
325                 device_printf(dev, "Allocation of PCI resources failed\n");
326                 error = ENXIO;
327                 goto err_out;
328         }
329
330         /* Sysctls for limiting the amount of work done in the taskqueues */
331         ixv_set_sysctl_value(adapter, "rx_processing_limit",
332             "max number of rx packets to process",
333             &adapter->rx_process_limit, ixv_rx_process_limit);
334
335         ixv_set_sysctl_value(adapter, "tx_processing_limit",
336             "max number of tx packets to process",
337             &adapter->tx_process_limit, ixv_tx_process_limit);
338
339         /* Sysctls for limiting the amount of work done in the taskqueues */
340         ixv_set_sysctl_value(adapter, "rx_processing_limit",
341             "max number of rx packets to process",
342             &adapter->rx_process_limit, ixv_rx_process_limit);
343
344         ixv_set_sysctl_value(adapter, "tx_processing_limit",
345             "max number of tx packets to process",
346             &adapter->tx_process_limit, ixv_tx_process_limit);
347
348         /* Do descriptor calc and sanity checks */
349         if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350             ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351                 device_printf(dev, "TXD config issue, using default!\n");
352                 adapter->num_tx_desc = DEFAULT_TXD;
353         } else
354                 adapter->num_tx_desc = ixv_txd;
355
356         if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357             ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
358                 device_printf(dev, "RXD config issue, using default!\n");
359                 adapter->num_rx_desc = DEFAULT_RXD;
360         } else
361                 adapter->num_rx_desc = ixv_rxd;
362
363         /* Allocate our TX/RX Queues */
364         if (ixgbe_allocate_queues(adapter)) {
365                 error = ENOMEM;
366                 goto err_out;
367         }
368
369         /*
370         ** Initialize the shared code: its
371         ** at this point the mac type is set.
372         */
373         error = ixgbe_init_shared_code(hw);
374         if (error) {
375                 device_printf(dev,"Shared Code Initialization Failure\n");
376                 error = EIO;
377                 goto err_late;
378         }
379
380         /* Setup the mailbox */
381         ixgbe_init_mbx_params_vf(hw);
382
383         ixgbe_reset_hw(hw);
384
385         /* Get the Mailbox API version */
386         device_printf(dev,"MBX API %d negotiation: %d\n",
387             ixgbe_mbox_api_11,
388             ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11));
389
390         error = ixgbe_init_hw(hw);
391         if (error) {
392                 device_printf(dev,"Hardware Initialization Failure\n");
393                 error = EIO;
394                 goto err_late;
395         }
396         
397         error = ixv_allocate_msix(adapter); 
398         if (error) 
399                 goto err_late;
400
401         /* If no mac address was assigned, make a random one */
402         if (!ixv_check_ether_addr(hw->mac.addr)) {
403                 u8 addr[ETHER_ADDR_LEN];
404                 arc4rand(&addr, sizeof(addr), 0);
405                 addr[0] &= 0xFE;
406                 addr[0] |= 0x02;
407                 bcopy(addr, hw->mac.addr, sizeof(addr));
408         }
409
410         /* Setup OS specific network interface */
411         ixv_setup_interface(dev, adapter);
412
413         /* Do the stats setup */
414         ixv_save_stats(adapter);
415         ixv_init_stats(adapter);
416         ixv_add_stats_sysctls(adapter);
417
418         /* Register for VLAN events */
419         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
420             ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
421         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
422             ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
423
424 #ifdef DEV_NETMAP
425         ixgbe_netmap_attach(adapter);
426 #endif /* DEV_NETMAP */
427         INIT_DEBUGOUT("ixv_attach: end");
428         return (0);
429
430 err_late:
431         ixgbe_free_transmit_structures(adapter);
432         ixgbe_free_receive_structures(adapter);
433 err_out:
434         ixv_free_pci_resources(adapter);
435         return (error);
436
437 }
438
439 /*********************************************************************
440  *  Device removal routine
441  *
442  *  The detach entry point is called when the driver is being removed.
443  *  This routine stops the adapter and deallocates all the resources
444  *  that were allocated for driver operation.
445  *
446  *  return 0 on success, positive on failure
447  *********************************************************************/
448
449 static int
450 ixv_detach(device_t dev)
451 {
452         struct adapter *adapter = device_get_softc(dev);
453         struct ix_queue *que = adapter->queues;
454
455         INIT_DEBUGOUT("ixv_detach: begin");
456
457         /* Make sure VLANS are not using driver */
458         if (adapter->ifp->if_vlantrunk != NULL) {
459                 device_printf(dev,"Vlan in use, detach first\n");
460                 return (EBUSY);
461         }
462
463         IXGBE_CORE_LOCK(adapter);
464         ixv_stop(adapter);
465         IXGBE_CORE_UNLOCK(adapter);
466
467         for (int i = 0; i < adapter->num_queues; i++, que++) {
468                 if (que->tq) {
469                         struct tx_ring  *txr = que->txr;
470                         taskqueue_drain(que->tq, &txr->txq_task);
471                         taskqueue_drain(que->tq, &que->que_task);
472                         taskqueue_free(que->tq);
473                 }
474         }
475
476         /* Drain the Mailbox(link) queue */
477         if (adapter->tq) {
478                 taskqueue_drain(adapter->tq, &adapter->link_task);
479                 taskqueue_free(adapter->tq);
480         }
481
482         /* Unregister VLAN events */
483         if (adapter->vlan_attach != NULL)
484                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
485         if (adapter->vlan_detach != NULL)
486                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
487
488         ether_ifdetach(adapter->ifp);
489         callout_drain(&adapter->timer);
490 #ifdef DEV_NETMAP
491         netmap_detach(adapter->ifp);
492 #endif /* DEV_NETMAP */
493         ixv_free_pci_resources(adapter);
494         bus_generic_detach(dev);
495         if_free(adapter->ifp);
496
497         ixgbe_free_transmit_structures(adapter);
498         ixgbe_free_receive_structures(adapter);
499
500         IXGBE_CORE_LOCK_DESTROY(adapter);
501         return (0);
502 }
503
504 /*********************************************************************
505  *
506  *  Shutdown entry point
507  *
508  **********************************************************************/
509 static int
510 ixv_shutdown(device_t dev)
511 {
512         struct adapter *adapter = device_get_softc(dev);
513         IXGBE_CORE_LOCK(adapter);
514         ixv_stop(adapter);
515         IXGBE_CORE_UNLOCK(adapter);
516         return (0);
517 }
518
519
520 /*********************************************************************
521  *  Ioctl entry point
522  *
523  *  ixv_ioctl is called when the user wants to configure the
524  *  interface.
525  *
526  *  return 0 on success, positive on failure
527  **********************************************************************/
528
529 static int
530 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
531 {
532         struct adapter  *adapter = ifp->if_softc;
533         struct ifreq    *ifr = (struct ifreq *) data;
534 #if defined(INET) || defined(INET6)
535         struct ifaddr   *ifa = (struct ifaddr *) data;
536         bool            avoid_reset = FALSE;
537 #endif
538         int             error = 0;
539
540         switch (command) {
541
542         case SIOCSIFADDR:
543 #ifdef INET
544                 if (ifa->ifa_addr->sa_family == AF_INET)
545                         avoid_reset = TRUE;
546 #endif
547 #ifdef INET6
548                 if (ifa->ifa_addr->sa_family == AF_INET6)
549                         avoid_reset = TRUE;
550 #endif
551 #if defined(INET) || defined(INET6)
552                 /*
553                 ** Calling init results in link renegotiation,
554                 ** so we avoid doing it when possible.
555                 */
556                 if (avoid_reset) {
557                         ifp->if_flags |= IFF_UP;
558                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
559                                 ixv_init(adapter);
560                         if (!(ifp->if_flags & IFF_NOARP))
561                                 arp_ifinit(ifp, ifa);
562                 } else
563                         error = ether_ioctl(ifp, command, data);
564                 break;
565 #endif
566         case SIOCSIFMTU:
567                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
568                 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
569                         error = EINVAL;
570                 } else {
571                         IXGBE_CORE_LOCK(adapter);
572                         ifp->if_mtu = ifr->ifr_mtu;
573                         adapter->max_frame_size =
574                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
575                         ixv_init_locked(adapter);
576                         IXGBE_CORE_UNLOCK(adapter);
577                 }
578                 break;
579         case SIOCSIFFLAGS:
580                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
581                 IXGBE_CORE_LOCK(adapter);
582                 if (ifp->if_flags & IFF_UP) {
583                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
584                                 ixv_init_locked(adapter);
585                 } else
586                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
587                                 ixv_stop(adapter);
588                 adapter->if_flags = ifp->if_flags;
589                 IXGBE_CORE_UNLOCK(adapter);
590                 break;
591         case SIOCADDMULTI:
592         case SIOCDELMULTI:
593                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
594                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
595                         IXGBE_CORE_LOCK(adapter);
596                         ixv_disable_intr(adapter);
597                         ixv_set_multi(adapter);
598                         ixv_enable_intr(adapter);
599                         IXGBE_CORE_UNLOCK(adapter);
600                 }
601                 break;
602         case SIOCSIFMEDIA:
603         case SIOCGIFMEDIA:
604                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
605                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
606                 break;
607         case SIOCSIFCAP:
608         {
609                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
610                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
611                 if (mask & IFCAP_HWCSUM)
612                         ifp->if_capenable ^= IFCAP_HWCSUM;
613                 if (mask & IFCAP_TSO4)
614                         ifp->if_capenable ^= IFCAP_TSO4;
615                 if (mask & IFCAP_LRO)
616                         ifp->if_capenable ^= IFCAP_LRO;
617                 if (mask & IFCAP_VLAN_HWTAGGING)
618                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
619                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
620                         IXGBE_CORE_LOCK(adapter);
621                         ixv_init_locked(adapter);
622                         IXGBE_CORE_UNLOCK(adapter);
623                 }
624                 VLAN_CAPABILITIES(ifp);
625                 break;
626         }
627
628         default:
629                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
630                 error = ether_ioctl(ifp, command, data);
631                 break;
632         }
633
634         return (error);
635 }
636
637 /*********************************************************************
638  *  Init entry point
639  *
640  *  This routine is used in two ways. It is used by the stack as
641  *  init entry point in network interface structure. It is also used
642  *  by the driver as a hw/sw initialization routine to get to a
643  *  consistent state.
644  *
645  *  return 0 on success, positive on failure
646  **********************************************************************/
647 #define IXGBE_MHADD_MFS_SHIFT 16
648
649 static void
650 ixv_init_locked(struct adapter *adapter)
651 {
652         struct ifnet    *ifp = adapter->ifp;
653         device_t        dev = adapter->dev;
654         struct ixgbe_hw *hw = &adapter->hw;
655         u32             mhadd, gpie;
656
657         INIT_DEBUGOUT("ixv_init: begin");
658         mtx_assert(&adapter->core_mtx, MA_OWNED);
659         hw->adapter_stopped = FALSE;
660         ixgbe_stop_adapter(hw);
661         callout_stop(&adapter->timer);
662
663         /* reprogram the RAR[0] in case user changed it. */
664         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
665
666         /* Get the latest mac address, User can use a LAA */
667         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
668              IXGBE_ETH_LENGTH_OF_ADDRESS);
669         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
670         hw->addr_ctrl.rar_used_count = 1;
671
672         /* Prepare transmit descriptors and buffers */
673         if (ixgbe_setup_transmit_structures(adapter)) {
674                 device_printf(dev,"Could not setup transmit structures\n");
675                 ixv_stop(adapter);
676                 return;
677         }
678
679         ixgbe_reset_hw(hw);
680         ixv_initialize_transmit_units(adapter);
681
682         /* Setup Multicast table */
683         ixv_set_multi(adapter);
684
685         /*
686         ** Determine the correct mbuf pool
687         ** for doing jumbo/headersplit
688         */
689         if (ifp->if_mtu > ETHERMTU)
690                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
691         else
692                 adapter->rx_mbuf_sz = MCLBYTES;
693
694         /* Prepare receive descriptors and buffers */
695         if (ixgbe_setup_receive_structures(adapter)) {
696                 device_printf(dev,"Could not setup receive structures\n");
697                 ixv_stop(adapter);
698                 return;
699         }
700
701         /* Configure RX settings */
702         ixv_initialize_receive_units(adapter);
703
704         /* Enable Enhanced MSIX mode */
705         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
706         gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
707         gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
708         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
709
710         /* Set the various hardware offload abilities */
711         ifp->if_hwassist = 0;
712         if (ifp->if_capenable & IFCAP_TSO4)
713                 ifp->if_hwassist |= CSUM_TSO;
714         if (ifp->if_capenable & IFCAP_TXCSUM) {
715                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
716 #if __FreeBSD_version >= 800000
717                 ifp->if_hwassist |= CSUM_SCTP;
718 #endif
719         }
720         
721         /* Set MTU size */
722         if (ifp->if_mtu > ETHERMTU) {
723                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
724                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
725                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
726                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
727         }
728
729         /* Set up VLAN offload and filter */
730         ixv_setup_vlan_support(adapter);
731
732         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
733
734         /* Set up MSI/X routing */
735         ixv_configure_ivars(adapter);
736
737         /* Set up auto-mask */
738         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
739
740         /* Set moderation on the Link interrupt */
741         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
742
743         /* Stats init */
744         ixv_init_stats(adapter);
745
746         /* Config/Enable Link */
747         ixv_config_link(adapter);
748
749         /* And now turn on interrupts */
750         ixv_enable_intr(adapter);
751
752         /* Now inform the stack we're ready */
753         ifp->if_drv_flags |= IFF_DRV_RUNNING;
754         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
755
756         return;
757 }
758
759 static void
760 ixv_init(void *arg)
761 {
762         struct adapter *adapter = arg;
763
764         IXGBE_CORE_LOCK(adapter);
765         ixv_init_locked(adapter);
766         IXGBE_CORE_UNLOCK(adapter);
767         return;
768 }
769
770
771 /*
772 **
773 ** MSIX Interrupt Handlers and Tasklets
774 **
775 */
776
777 static inline void
778 ixv_enable_queue(struct adapter *adapter, u32 vector)
779 {
780         struct ixgbe_hw *hw = &adapter->hw;
781         u32     queue = 1 << vector;
782         u32     mask;
783
784         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
785         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
786 }
787
788 static inline void
789 ixv_disable_queue(struct adapter *adapter, u32 vector)
790 {
791         struct ixgbe_hw *hw = &adapter->hw;
792         u64     queue = (u64)(1 << vector);
793         u32     mask;
794
795         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
796         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
797 }
798
799 static inline void
800 ixv_rearm_queues(struct adapter *adapter, u64 queues)
801 {
802         u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
803         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
804 }
805
806
807 static void
808 ixv_handle_que(void *context, int pending)
809 {
810         struct ix_queue *que = context;
811         struct adapter  *adapter = que->adapter;
812         struct tx_ring  *txr = que->txr;
813         struct ifnet    *ifp = adapter->ifp;
814         bool            more;
815
816         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
817                 more = ixgbe_rxeof(que);
818                 IXGBE_TX_LOCK(txr);
819                 ixgbe_txeof(txr);
820 #if __FreeBSD_version >= 800000
821                 if (!drbr_empty(ifp, txr->br))
822                         ixgbe_mq_start_locked(ifp, txr);
823 #else
824                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
825                         ixgbe_start_locked(txr, ifp);
826 #endif
827                 IXGBE_TX_UNLOCK(txr);
828                 if (more) {
829                         taskqueue_enqueue(que->tq, &que->que_task);
830                         return;
831                 }
832         }
833
834         /* Reenable this interrupt */
835         ixv_enable_queue(adapter, que->msix);
836         return;
837 }
838
839 /*********************************************************************
840  *
841  *  MSI Queue Interrupt Service routine
842  *
843  **********************************************************************/
844 void
845 ixv_msix_que(void *arg)
846 {
847         struct ix_queue *que = arg;
848         struct adapter  *adapter = que->adapter;
849         struct ifnet    *ifp = adapter->ifp;
850         struct tx_ring  *txr = que->txr;
851         struct rx_ring  *rxr = que->rxr;
852         bool            more;
853         u32             newitr = 0;
854
855         ixv_disable_queue(adapter, que->msix);
856         ++que->irqs;
857
858         more = ixgbe_rxeof(que);
859
860         IXGBE_TX_LOCK(txr);
861         ixgbe_txeof(txr);
862         /*
863         ** Make certain that if the stack
864         ** has anything queued the task gets
865         ** scheduled to handle it.
866         */
867 #ifdef IXGBE_LEGACY_TX
868         if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
869                 ixgbe_start_locked(txr, ifp);
870 #else
871         if (!drbr_empty(adapter->ifp, txr->br))
872                 ixgbe_mq_start_locked(ifp, txr);
873 #endif
874         IXGBE_TX_UNLOCK(txr);
875
876         /* Do AIM now? */
877
878         if (ixv_enable_aim == FALSE)
879                 goto no_calc;
880         /*
881         ** Do Adaptive Interrupt Moderation:
882         **  - Write out last calculated setting
883         **  - Calculate based on average size over
884         **    the last interval.
885         */
886         if (que->eitr_setting)
887                 IXGBE_WRITE_REG(&adapter->hw,
888                     IXGBE_VTEITR(que->msix),
889                     que->eitr_setting);
890  
891         que->eitr_setting = 0;
892
893         /* Idle, do nothing */
894         if ((txr->bytes == 0) && (rxr->bytes == 0))
895                 goto no_calc;
896                                 
897         if ((txr->bytes) && (txr->packets))
898                 newitr = txr->bytes/txr->packets;
899         if ((rxr->bytes) && (rxr->packets))
900                 newitr = max(newitr,
901                     (rxr->bytes / rxr->packets));
902         newitr += 24; /* account for hardware frame, crc */
903
904         /* set an upper boundary */
905         newitr = min(newitr, 3000);
906
907         /* Be nice to the mid range */
908         if ((newitr > 300) && (newitr < 1200))
909                 newitr = (newitr / 3);
910         else
911                 newitr = (newitr / 2);
912
913         newitr |= newitr << 16;
914                  
915         /* save for next interrupt */
916         que->eitr_setting = newitr;
917
918         /* Reset state */
919         txr->bytes = 0;
920         txr->packets = 0;
921         rxr->bytes = 0;
922         rxr->packets = 0;
923
924 no_calc:
925         if (more)
926                 taskqueue_enqueue(que->tq, &que->que_task);
927         else /* Reenable this interrupt */
928                 ixv_enable_queue(adapter, que->msix);
929         return;
930 }
931
932 static void
933 ixv_msix_mbx(void *arg)
934 {
935         struct adapter  *adapter = arg;
936         struct ixgbe_hw *hw = &adapter->hw;
937         u32             reg;
938
939         ++adapter->link_irq;
940
941         /* First get the cause */
942         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
943         /* Clear interrupt with write */
944         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
945
946         /* Link status change */
947         if (reg & IXGBE_EICR_LSC)
948                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
949
950         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
951         return;
952 }
953
954 /*********************************************************************
955  *
956  *  Media Ioctl callback
957  *
958  *  This routine is called whenever the user queries the status of
959  *  the interface using ifconfig.
960  *
961  **********************************************************************/
962 static void
963 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
964 {
965         struct adapter *adapter = ifp->if_softc;
966
967         INIT_DEBUGOUT("ixv_media_status: begin");
968         IXGBE_CORE_LOCK(adapter);
969         ixv_update_link_status(adapter);
970
971         ifmr->ifm_status = IFM_AVALID;
972         ifmr->ifm_active = IFM_ETHER;
973
974         if (!adapter->link_active) {
975                 IXGBE_CORE_UNLOCK(adapter);
976                 return;
977         }
978
979         ifmr->ifm_status |= IFM_ACTIVE;
980
981         switch (adapter->link_speed) {
982                 case IXGBE_LINK_SPEED_1GB_FULL:
983                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
984                         break;
985                 case IXGBE_LINK_SPEED_10GB_FULL:
986                         ifmr->ifm_active |= IFM_FDX;
987                         break;
988         }
989
990         IXGBE_CORE_UNLOCK(adapter);
991
992         return;
993 }
994
995 /*********************************************************************
996  *
997  *  Media Ioctl callback
998  *
999  *  This routine is called when the user changes speed/duplex using
1000  *  media/mediopt option with ifconfig.
1001  *
1002  **********************************************************************/
1003 static int
1004 ixv_media_change(struct ifnet * ifp)
1005 {
1006         struct adapter *adapter = ifp->if_softc;
1007         struct ifmedia *ifm = &adapter->media;
1008
1009         INIT_DEBUGOUT("ixv_media_change: begin");
1010
1011         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1012                 return (EINVAL);
1013
1014         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1015         case IFM_AUTO:
1016                 break;
1017         default:
1018                 device_printf(adapter->dev, "Only auto media type\n");
1019                 return (EINVAL);
1020         }
1021
1022         return (0);
1023 }
1024
1025
1026 /*********************************************************************
1027  *  Multicast Update
1028  *
1029  *  This routine is called whenever multicast address list is updated.
1030  *
1031  **********************************************************************/
1032 #define IXGBE_RAR_ENTRIES 16
1033
1034 static void
1035 ixv_set_multi(struct adapter *adapter)
1036 {
1037         u8      mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1038         u8      *update_ptr;
1039         struct  ifmultiaddr *ifma;
1040         int     mcnt = 0;
1041         struct ifnet   *ifp = adapter->ifp;
1042
1043         IOCTL_DEBUGOUT("ixv_set_multi: begin");
1044
1045 #if __FreeBSD_version < 800000
1046         IF_ADDR_LOCK(ifp);
1047 #else
1048         if_maddr_rlock(ifp);
1049 #endif
1050         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1051                 if (ifma->ifma_addr->sa_family != AF_LINK)
1052                         continue;
1053                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1054                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1055                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1056                 mcnt++;
1057         }
1058 #if __FreeBSD_version < 800000
1059         IF_ADDR_UNLOCK(ifp);
1060 #else
1061         if_maddr_runlock(ifp);
1062 #endif
1063
1064         update_ptr = mta;
1065
1066         ixgbe_update_mc_addr_list(&adapter->hw,
1067             update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1068
1069         return;
1070 }
1071
1072 /*
1073  * This is an iterator function now needed by the multicast
1074  * shared code. It simply feeds the shared code routine the
1075  * addresses in the array of ixv_set_multi() one by one.
1076  */
1077 static u8 *
1078 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1079 {
1080         u8 *addr = *update_ptr;
1081         u8 *newptr;
1082         *vmdq = 0;
1083
1084         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1085         *update_ptr = newptr;
1086         return addr;
1087 }
1088
1089 /*********************************************************************
1090  *  Timer routine
1091  *
1092  *  This routine checks for link status,updates statistics,
1093  *  and runs the watchdog check.
1094  *
1095  **********************************************************************/
1096
1097 static void
1098 ixv_local_timer(void *arg)
1099 {
1100         struct adapter  *adapter = arg;
1101         device_t        dev = adapter->dev;
1102         struct ix_queue *que = adapter->queues;
1103         u64             queues = 0;
1104         int             hung = 0;
1105
1106         mtx_assert(&adapter->core_mtx, MA_OWNED);
1107
1108         ixv_update_link_status(adapter);
1109
1110         /* Stats Update */
1111         ixv_update_stats(adapter);
1112
1113         /*
1114         ** Check the TX queues status
1115         **      - mark hung queues so we don't schedule on them
1116         **      - watchdog only if all queues show hung
1117         */
1118         for (int i = 0; i < adapter->num_queues; i++, que++) {
1119                 /* Keep track of queues with work for soft irq */
1120                 if (que->txr->busy)
1121                         queues |= ((u64)1 << que->me);
1122                 /*
1123                 ** Each time txeof runs without cleaning, but there
1124                 ** are uncleaned descriptors it increments busy. If
1125                 ** we get to the MAX we declare it hung.
1126                 */
1127                 if (que->busy == IXGBE_QUEUE_HUNG) {
1128                         ++hung;
1129                         /* Mark the queue as inactive */
1130                         adapter->active_queues &= ~((u64)1 << que->me);
1131                         continue;
1132                 } else {
1133                         /* Check if we've come back from hung */
1134                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1135                                 adapter->active_queues |= ((u64)1 << que->me);
1136                 }
1137                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1138                         device_printf(dev,"Warning queue %d "
1139                             "appears to be hung!\n", i);
1140                         que->txr->busy = IXGBE_QUEUE_HUNG;
1141                         ++hung;
1142                 }
1143
1144         }
1145
1146         /* Only truely watchdog if all queues show hung */
1147         if (hung == adapter->num_queues)
1148                 goto watchdog;
1149         else if (queues != 0) { /* Force an IRQ on queues with work */
1150                 ixv_rearm_queues(adapter, queues);
1151         }
1152
1153         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1154         return;
1155
1156 watchdog:
1157         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1158         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1159         adapter->watchdog_events++;
1160         ixv_init_locked(adapter);
1161 }
1162
1163 /*
1164 ** Note: this routine updates the OS on the link state
1165 **      the real check of the hardware only happens with
1166 **      a link interrupt.
1167 */
1168 static void
1169 ixv_update_link_status(struct adapter *adapter)
1170 {
1171         struct ifnet    *ifp = adapter->ifp;
1172         device_t dev = adapter->dev;
1173
1174         if (adapter->link_up){ 
1175                 if (adapter->link_active == FALSE) {
1176                         if (bootverbose)
1177                                 device_printf(dev,"Link is up %d Gbps %s \n",
1178                                     ((adapter->link_speed == 128)? 10:1),
1179                                     "Full Duplex");
1180                         adapter->link_active = TRUE;
1181                         if_link_state_change(ifp, LINK_STATE_UP);
1182                 }
1183         } else { /* Link down */
1184                 if (adapter->link_active == TRUE) {
1185                         if (bootverbose)
1186                                 device_printf(dev,"Link is Down\n");
1187                         if_link_state_change(ifp, LINK_STATE_DOWN);
1188                         adapter->link_active = FALSE;
1189                 }
1190         }
1191
1192         return;
1193 }
1194
1195
1196 /*********************************************************************
1197  *
1198  *  This routine disables all traffic on the adapter by issuing a
1199  *  global reset on the MAC and deallocates TX/RX buffers.
1200  *
1201  **********************************************************************/
1202
1203 static void
1204 ixv_stop(void *arg)
1205 {
1206         struct ifnet   *ifp;
1207         struct adapter *adapter = arg;
1208         struct ixgbe_hw *hw = &adapter->hw;
1209         ifp = adapter->ifp;
1210
1211         mtx_assert(&adapter->core_mtx, MA_OWNED);
1212
1213         INIT_DEBUGOUT("ixv_stop: begin\n");
1214         ixv_disable_intr(adapter);
1215
1216         /* Tell the stack that the interface is no longer active */
1217         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1218
1219         ixgbe_reset_hw(hw);
1220         adapter->hw.adapter_stopped = FALSE;
1221         ixgbe_stop_adapter(hw);
1222         callout_stop(&adapter->timer);
1223
1224         /* reprogram the RAR[0] in case user changed it. */
1225         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1226
1227         return;
1228 }
1229
1230
1231 /*********************************************************************
1232  *
1233  *  Determine hardware revision.
1234  *
1235  **********************************************************************/
1236 static void
1237 ixv_identify_hardware(struct adapter *adapter)
1238 {
1239         device_t        dev = adapter->dev;
1240         struct ixgbe_hw *hw = &adapter->hw;
1241
1242         /*
1243         ** Make sure BUSMASTER is set, on a VM under
1244         ** KVM it may not be and will break things.
1245         */
1246         pci_enable_busmaster(dev);
1247
1248         /* Save off the information about this board */
1249         hw->vendor_id = pci_get_vendor(dev);
1250         hw->device_id = pci_get_device(dev);
1251         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1252         hw->subsystem_vendor_id =
1253             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1254         hw->subsystem_device_id =
1255             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1256
1257         /* We need this to determine device-specific things */
1258         ixgbe_set_mac_type(hw);
1259
1260         /* Set the right number of segments */
1261         adapter->num_segs = IXGBE_82599_SCATTER;
1262
1263         return;
1264 }
1265
1266 /*********************************************************************
1267  *
1268  *  Setup MSIX Interrupt resources and handlers 
1269  *
1270  **********************************************************************/
1271 static int
1272 ixv_allocate_msix(struct adapter *adapter)
1273 {
1274         device_t        dev = adapter->dev;
1275         struct          ix_queue *que = adapter->queues;
1276         struct          tx_ring *txr = adapter->tx_rings;
1277         int             error, rid, vector = 0;
1278
1279         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1280                 rid = vector + 1;
1281                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1282                     RF_SHAREABLE | RF_ACTIVE);
1283                 if (que->res == NULL) {
1284                         device_printf(dev,"Unable to allocate"
1285                             " bus resource: que interrupt [%d]\n", vector);
1286                         return (ENXIO);
1287                 }
1288                 /* Set the handler function */
1289                 error = bus_setup_intr(dev, que->res,
1290                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1291                     ixv_msix_que, que, &que->tag);
1292                 if (error) {
1293                         que->res = NULL;
1294                         device_printf(dev, "Failed to register QUE handler");
1295                         return (error);
1296                 }
1297 #if __FreeBSD_version >= 800504
1298                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1299 #endif
1300                 que->msix = vector;
1301                 adapter->active_queues |= (u64)(1 << que->msix);
1302                 /*
1303                 ** Bind the msix vector, and thus the
1304                 ** ring to the corresponding cpu.
1305                 */
1306                 if (adapter->num_queues > 1)
1307                         bus_bind_intr(dev, que->res, i);
1308                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1309                 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1310                 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1311                     taskqueue_thread_enqueue, &que->tq);
1312                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1313                     device_get_nameunit(adapter->dev));
1314         }
1315
1316         /* and Mailbox */
1317         rid = vector + 1;
1318         adapter->res = bus_alloc_resource_any(dev,
1319             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1320         if (!adapter->res) {
1321                 device_printf(dev,"Unable to allocate"
1322             " bus resource: MBX interrupt [%d]\n", rid);
1323                 return (ENXIO);
1324         }
1325         /* Set the mbx handler function */
1326         error = bus_setup_intr(dev, adapter->res,
1327             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1328             ixv_msix_mbx, adapter, &adapter->tag);
1329         if (error) {
1330                 adapter->res = NULL;
1331                 device_printf(dev, "Failed to register LINK handler");
1332                 return (error);
1333         }
1334 #if __FreeBSD_version >= 800504
1335         bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1336 #endif
1337         adapter->vector = vector;
1338         /* Tasklets for Mailbox */
1339         TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1340         adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1341             taskqueue_thread_enqueue, &adapter->tq);
1342         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1343             device_get_nameunit(adapter->dev));
1344         /*
1345         ** Due to a broken design QEMU will fail to properly
1346         ** enable the guest for MSIX unless the vectors in
1347         ** the table are all set up, so we must rewrite the
1348         ** ENABLE in the MSIX control register again at this
1349         ** point to cause it to successfully initialize us.
1350         */
1351         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1352                 int msix_ctrl;
1353                 pci_find_cap(dev, PCIY_MSIX, &rid);
1354                 rid += PCIR_MSIX_CTRL;
1355                 msix_ctrl = pci_read_config(dev, rid, 2);
1356                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1357                 pci_write_config(dev, rid, msix_ctrl, 2);
1358         }
1359
1360         return (0);
1361 }
1362
1363 /*
1364  * Setup MSIX resources, note that the VF
1365  * device MUST use MSIX, there is no fallback.
1366  */
1367 static int
1368 ixv_setup_msix(struct adapter *adapter)
1369 {
1370         device_t dev = adapter->dev;
1371         int rid, want, msgs;
1372
1373
1374         /* Must have at least 2 MSIX vectors */
1375         msgs = pci_msix_count(dev);
1376         if (msgs < 2)
1377                 goto out;
1378         rid = PCIR_BAR(3);
1379         adapter->msix_mem = bus_alloc_resource_any(dev,
1380             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1381         if (adapter->msix_mem == NULL) {
1382                 device_printf(adapter->dev,
1383                     "Unable to map MSIX table \n");
1384                 goto out;
1385         }
1386
1387         /*
1388         ** Want vectors for the queues,
1389         ** plus an additional for mailbox.
1390         */
1391         want = adapter->num_queues + 1;
1392         if (want > msgs) {
1393                 want = msgs;
1394                 adapter->num_queues = msgs - 1;
1395         } else
1396                 msgs = want;
1397         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1398                 device_printf(adapter->dev,
1399                     "Using MSIX interrupts with %d vectors\n", want);
1400                 return (want);
1401         }
1402         /* Release in case alloc was insufficient */
1403         pci_release_msi(dev);
1404 out:
1405         if (adapter->msix_mem != NULL) {
1406                 bus_release_resource(dev, SYS_RES_MEMORY,
1407                     rid, adapter->msix_mem);
1408                 adapter->msix_mem = NULL;
1409         }
1410         device_printf(adapter->dev,"MSIX config error\n");
1411         return (ENXIO);
1412 }
1413
1414
1415 static int
1416 ixv_allocate_pci_resources(struct adapter *adapter)
1417 {
1418         int             rid;
1419         device_t        dev = adapter->dev;
1420
1421         rid = PCIR_BAR(0);
1422         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1423             &rid, RF_ACTIVE);
1424
1425         if (!(adapter->pci_mem)) {
1426                 device_printf(dev,"Unable to allocate bus resource: memory\n");
1427                 return (ENXIO);
1428         }
1429
1430         adapter->osdep.mem_bus_space_tag =
1431                 rman_get_bustag(adapter->pci_mem);
1432         adapter->osdep.mem_bus_space_handle =
1433                 rman_get_bushandle(adapter->pci_mem);
1434         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1435
1436         /* Pick up the tuneable queues */
1437         adapter->num_queues = ixv_num_queues;
1438
1439         adapter->hw.back = &adapter->osdep;
1440
1441         /*
1442         ** Now setup MSI/X, should
1443         ** return us the number of
1444         ** configured vectors.
1445         */
1446         adapter->msix = ixv_setup_msix(adapter);
1447         if (adapter->msix == ENXIO)
1448                 return (ENXIO);
1449         else
1450                 return (0);
1451 }
1452
1453 static void
1454 ixv_free_pci_resources(struct adapter * adapter)
1455 {
1456         struct          ix_queue *que = adapter->queues;
1457         device_t        dev = adapter->dev;
1458         int             rid, memrid;
1459
1460         memrid = PCIR_BAR(MSIX_82598_BAR);
1461
1462         /*
1463         ** There is a slight possibility of a failure mode
1464         ** in attach that will result in entering this function
1465         ** before interrupt resources have been initialized, and
1466         ** in that case we do not want to execute the loops below
1467         ** We can detect this reliably by the state of the adapter
1468         ** res pointer.
1469         */
1470         if (adapter->res == NULL)
1471                 goto mem;
1472
1473         /*
1474         **  Release all msix queue resources:
1475         */
1476         for (int i = 0; i < adapter->num_queues; i++, que++) {
1477                 rid = que->msix + 1;
1478                 if (que->tag != NULL) {
1479                         bus_teardown_intr(dev, que->res, que->tag);
1480                         que->tag = NULL;
1481                 }
1482                 if (que->res != NULL)
1483                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1484         }
1485
1486
1487         /* Clean the Legacy or Link interrupt last */
1488         if (adapter->vector) /* we are doing MSIX */
1489                 rid = adapter->vector + 1;
1490         else
1491                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1492
1493         if (adapter->tag != NULL) {
1494                 bus_teardown_intr(dev, adapter->res, adapter->tag);
1495                 adapter->tag = NULL;
1496         }
1497         if (adapter->res != NULL)
1498                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1499
1500 mem:
1501         if (adapter->msix)
1502                 pci_release_msi(dev);
1503
1504         if (adapter->msix_mem != NULL)
1505                 bus_release_resource(dev, SYS_RES_MEMORY,
1506                     memrid, adapter->msix_mem);
1507
1508         if (adapter->pci_mem != NULL)
1509                 bus_release_resource(dev, SYS_RES_MEMORY,
1510                     PCIR_BAR(0), adapter->pci_mem);
1511
1512         return;
1513 }
1514
1515 /*********************************************************************
1516  *
1517  *  Setup networking device structure and register an interface.
1518  *
1519  **********************************************************************/
1520 static void
1521 ixv_setup_interface(device_t dev, struct adapter *adapter)
1522 {
1523         struct ifnet   *ifp;
1524
1525         INIT_DEBUGOUT("ixv_setup_interface: begin");
1526
1527         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1528         if (ifp == NULL)
1529                 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1530         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1531         ifp->if_baudrate = 1000000000;
1532         ifp->if_init = ixv_init;
1533         ifp->if_softc = adapter;
1534         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1535         ifp->if_ioctl = ixv_ioctl;
1536 #if __FreeBSD_version >= 800000
1537         ifp->if_transmit = ixgbe_mq_start;
1538         ifp->if_qflush = ixgbe_qflush;
1539 #else
1540         ifp->if_start = ixgbe_start;
1541 #endif
1542         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1543
1544         ether_ifattach(ifp, adapter->hw.mac.addr);
1545
1546         adapter->max_frame_size =
1547             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1548
1549         /*
1550          * Tell the upper layer(s) we support long frames.
1551          */
1552         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1553
1554         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1555         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1556         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1557                              |  IFCAP_VLAN_HWTSO
1558                              |  IFCAP_VLAN_MTU;
1559         ifp->if_capabilities |= IFCAP_LRO;
1560         ifp->if_capenable = ifp->if_capabilities;
1561
1562         /*
1563          * Specify the media types supported by this adapter and register
1564          * callbacks to update media and link information
1565          */
1566         ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1567                      ixv_media_status);
1568         ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1569         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1570         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1571
1572         return;
1573 }
1574         
1575 static void
1576 ixv_config_link(struct adapter *adapter)
1577 {
1578         struct ixgbe_hw *hw = &adapter->hw;
1579         u32     autoneg, err = 0;
1580
1581         if (hw->mac.ops.check_link)
1582                 err = hw->mac.ops.check_link(hw, &autoneg,
1583                     &adapter->link_up, FALSE);
1584         if (err)
1585                 goto out;
1586
1587         if (hw->mac.ops.setup_link)
1588                 err = hw->mac.ops.setup_link(hw,
1589                     autoneg, adapter->link_up);
1590 out:
1591         return;
1592 }
1593
1594
1595 /*********************************************************************
1596  *
1597  *  Enable transmit unit.
1598  *
1599  **********************************************************************/
1600 static void
1601 ixv_initialize_transmit_units(struct adapter *adapter)
1602 {
1603         struct tx_ring  *txr = adapter->tx_rings;
1604         struct ixgbe_hw *hw = &adapter->hw;
1605
1606
1607         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1608                 u64     tdba = txr->txdma.dma_paddr;
1609                 u32     txctrl, txdctl;
1610
1611                 /* Set WTHRESH to 8, burst writeback */
1612                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1613                 txdctl |= (8 << 16);
1614                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1615
1616                 /* Set the HW Tx Head and Tail indices */
1617                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1618                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1619
1620                 /* Set Tx Tail register */
1621                 txr->tail = IXGBE_VFTDT(i);
1622
1623                 /* Set Ring parameters */
1624                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1625                        (tdba & 0x00000000ffffffffULL));
1626                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1627                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1628                     adapter->num_tx_desc *
1629                     sizeof(struct ixgbe_legacy_tx_desc));
1630                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1631                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1632                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1633
1634                 /* Now enable */
1635                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1636                 txdctl |= IXGBE_TXDCTL_ENABLE;
1637                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1638         }
1639
1640         return;
1641 }
1642
1643
1644 /*********************************************************************
1645  *
1646  *  Setup receive registers and features.
1647  *
1648  **********************************************************************/
1649 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1650
1651 static void
1652 ixv_initialize_receive_units(struct adapter *adapter)
1653 {
1654         struct  rx_ring *rxr = adapter->rx_rings;
1655         struct ixgbe_hw *hw = &adapter->hw;
1656         struct ifnet    *ifp = adapter->ifp;
1657         u32             bufsz, rxcsum, psrtype;
1658         int             max_frame;
1659
1660         if (ifp->if_mtu > ETHERMTU)
1661                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1662         else
1663                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1664
1665         psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1666             IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1667             IXGBE_PSRTYPE_L2HDR;
1668
1669         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1670
1671         /* Tell PF our expected packet-size */
1672         max_frame = ifp->if_mtu + IXGBE_MTU_HDR;
1673         ixgbevf_rlpml_set_vf(hw, max_frame);
1674
1675         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1676                 u64 rdba = rxr->rxdma.dma_paddr;
1677                 u32 reg, rxdctl;
1678
1679                 /* Disable the queue */
1680                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1681                 rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME);
1682                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1683                 for (int j = 0; j < 10; j++) {
1684                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1685                             IXGBE_RXDCTL_ENABLE)
1686                                 msec_delay(1);
1687                         else
1688                                 break;
1689                 }
1690                 wmb();
1691                 /* Setup the Base and Length of the Rx Descriptor Ring */
1692                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1693                     (rdba & 0x00000000ffffffffULL));
1694                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1695                     (rdba >> 32));
1696                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1697                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1698
1699                 /* Reset the ring indices */
1700                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1701                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1702
1703                 /* Set up the SRRCTL register */
1704                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1705                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1706                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1707                 reg |= bufsz;
1708                 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1709                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1710
1711                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1712                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1713                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1714                     adapter->num_rx_desc - 1);
1715
1716                 /* Set Rx Tail register */
1717                 rxr->tail = IXGBE_VFRDT(rxr->me);
1718
1719                 /* Do the queue enabling last */
1720                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1721                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1722                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1723                 for (int k = 0; k < 10; k++) {
1724                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1725                             IXGBE_RXDCTL_ENABLE)
1726                                 break;
1727                         else
1728                                 msec_delay(1);
1729                 }
1730                 wmb();
1731
1732                 /* Set the Tail Pointer */
1733 #ifdef DEV_NETMAP
1734                 /*
1735                  * In netmap mode, we must preserve the buffers made
1736                  * available to userspace before the if_init()
1737                  * (this is true by default on the TX side, because
1738                  * init makes all buffers available to userspace).
1739                  *
1740                  * netmap_reset() and the device specific routines
1741                  * (e.g. ixgbe_setup_receive_rings()) map these
1742                  * buffers at the end of the NIC ring, so here we
1743                  * must set the RDT (tail) register to make sure
1744                  * they are not overwritten.
1745                  *
1746                  * In this driver the NIC ring starts at RDH = 0,
1747                  * RDT points to the last slot available for reception (?),
1748                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1749                  */
1750                 if (ifp->if_capenable & IFCAP_NETMAP) {
1751                         struct netmap_adapter *na = NA(adapter->ifp);
1752                         struct netmap_kring *kring = &na->rx_rings[i];
1753                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1754
1755                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1756                 } else
1757 #endif /* DEV_NETMAP */
1758                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1759                             adapter->num_rx_desc - 1);
1760         }
1761
1762         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1763
1764         if (ifp->if_capenable & IFCAP_RXCSUM)
1765                 rxcsum |= IXGBE_RXCSUM_PCSD;
1766
1767         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1768                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1769
1770         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1771
1772         return;
1773 }
1774
1775 static void
1776 ixv_setup_vlan_support(struct adapter *adapter)
1777 {
1778         struct ixgbe_hw *hw = &adapter->hw;
1779         u32             ctrl, vid, vfta, retry;
1780
1781
1782         /*
1783         ** We get here thru init_locked, meaning
1784         ** a soft reset, this has already cleared
1785         ** the VFTA and other state, so if there
1786         ** have been no vlan's registered do nothing.
1787         */
1788         if (adapter->num_vlans == 0)
1789                 return;
1790
1791         /* Enable the queues */
1792         for (int i = 0; i < adapter->num_queues; i++) {
1793                 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1794                 ctrl |= IXGBE_RXDCTL_VME;
1795                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1796         }
1797
1798         /*
1799         ** A soft reset zero's out the VFTA, so
1800         ** we need to repopulate it now.
1801         */
1802         for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1803                 if (ixv_shadow_vfta[i] == 0)
1804                         continue;
1805                 vfta = ixv_shadow_vfta[i];
1806                 /*
1807                 ** Reconstruct the vlan id's
1808                 ** based on the bits set in each
1809                 ** of the array ints.
1810                 */
1811                 for ( int j = 0; j < 32; j++) {
1812                         retry = 0;
1813                         if ((vfta & (1 << j)) == 0)
1814                                 continue;
1815                         vid = (i * 32) + j;
1816                         /* Call the shared code mailbox routine */
1817                         while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1818                                 if (++retry > 5)
1819                                         break;
1820                         }
1821                 }
1822         }
1823 }
1824
1825 /*
1826 ** This routine is run via an vlan config EVENT,
1827 ** it enables us to use the HW Filter table since
1828 ** we can get the vlan id. This just creates the
1829 ** entry in the soft version of the VFTA, init will
1830 ** repopulate the real table.
1831 */
1832 static void
1833 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1834 {
1835         struct adapter  *adapter = ifp->if_softc;
1836         u16             index, bit;
1837
1838         if (ifp->if_softc !=  arg)   /* Not our event */
1839                 return;
1840
1841         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1842                 return;
1843
1844         IXGBE_CORE_LOCK(adapter);
1845         index = (vtag >> 5) & 0x7F;
1846         bit = vtag & 0x1F;
1847         ixv_shadow_vfta[index] |= (1 << bit);
1848         ++adapter->num_vlans;
1849         /* Re-init to load the changes */
1850         ixv_init_locked(adapter);
1851         IXGBE_CORE_UNLOCK(adapter);
1852 }
1853
1854 /*
1855 ** This routine is run via an vlan
1856 ** unconfig EVENT, remove our entry
1857 ** in the soft vfta.
1858 */
1859 static void
1860 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1861 {
1862         struct adapter  *adapter = ifp->if_softc;
1863         u16             index, bit;
1864
1865         if (ifp->if_softc !=  arg)
1866                 return;
1867
1868         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1869                 return;
1870
1871         IXGBE_CORE_LOCK(adapter);
1872         index = (vtag >> 5) & 0x7F;
1873         bit = vtag & 0x1F;
1874         ixv_shadow_vfta[index] &= ~(1 << bit);
1875         --adapter->num_vlans;
1876         /* Re-init to load the changes */
1877         ixv_init_locked(adapter);
1878         IXGBE_CORE_UNLOCK(adapter);
1879 }
1880
1881 static void
1882 ixv_enable_intr(struct adapter *adapter)
1883 {
1884         struct ixgbe_hw *hw = &adapter->hw;
1885         struct ix_queue *que = adapter->queues;
1886         u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1887
1888
1889         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1890
1891         mask = IXGBE_EIMS_ENABLE_MASK;
1892         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1893         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1894
1895         for (int i = 0; i < adapter->num_queues; i++, que++)
1896                 ixv_enable_queue(adapter, que->msix);
1897
1898         IXGBE_WRITE_FLUSH(hw);
1899
1900         return;
1901 }
1902
1903 static void
1904 ixv_disable_intr(struct adapter *adapter)
1905 {
1906         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1907         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1908         IXGBE_WRITE_FLUSH(&adapter->hw);
1909         return;
1910 }
1911
1912 /*
1913 ** Setup the correct IVAR register for a particular MSIX interrupt
1914 **  - entry is the register array entry
1915 **  - vector is the MSIX vector for this queue
1916 **  - type is RX/TX/MISC
1917 */
1918 static void
1919 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1920 {
1921         struct ixgbe_hw *hw = &adapter->hw;
1922         u32 ivar, index;
1923
1924         vector |= IXGBE_IVAR_ALLOC_VAL;
1925
1926         if (type == -1) { /* MISC IVAR */
1927                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1928                 ivar &= ~0xFF;
1929                 ivar |= vector;
1930                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1931         } else {        /* RX/TX IVARS */
1932                 index = (16 * (entry & 1)) + (8 * type);
1933                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1934                 ivar &= ~(0xFF << index);
1935                 ivar |= (vector << index);
1936                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1937         }
1938 }
1939
1940 static void
1941 ixv_configure_ivars(struct adapter *adapter)
1942 {
1943         struct  ix_queue *que = adapter->queues;
1944
1945         for (int i = 0; i < adapter->num_queues; i++, que++) {
1946                 /* First the RX queue entry */
1947                 ixv_set_ivar(adapter, i, que->msix, 0);
1948                 /* ... and the TX */
1949                 ixv_set_ivar(adapter, i, que->msix, 1);
1950                 /* Set an initial value in EITR */
1951                 IXGBE_WRITE_REG(&adapter->hw,
1952                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1953         }
1954
1955         /* For the mailbox interrupt */
1956         ixv_set_ivar(adapter, 1, adapter->vector, -1);
1957 }
1958
1959
1960 /*
1961 ** Tasklet handler for MSIX MBX interrupts
1962 **  - do outside interrupt since it might sleep
1963 */
1964 static void
1965 ixv_handle_mbx(void *context, int pending)
1966 {
1967         struct adapter  *adapter = context;
1968
1969         ixgbe_check_link(&adapter->hw,
1970             &adapter->link_speed, &adapter->link_up, 0);
1971         ixv_update_link_status(adapter);
1972 }
1973
1974 /*
1975 ** The VF stats registers never have a truely virgin
1976 ** starting point, so this routine tries to make an
1977 ** artificial one, marking ground zero on attach as
1978 ** it were.
1979 */
1980 static void
1981 ixv_save_stats(struct adapter *adapter)
1982 {
1983         if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1984                 adapter->stats.vf.saved_reset_vfgprc +=
1985                     adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1986                 adapter->stats.vf.saved_reset_vfgptc +=
1987                     adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1988                 adapter->stats.vf.saved_reset_vfgorc +=
1989                     adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1990                 adapter->stats.vf.saved_reset_vfgotc +=
1991                     adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1992                 adapter->stats.vf.saved_reset_vfmprc +=
1993                     adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1994         }
1995 }
1996  
1997 static void
1998 ixv_init_stats(struct adapter *adapter)
1999 {
2000         struct ixgbe_hw *hw = &adapter->hw;
2001  
2002         adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2003         adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2004         adapter->stats.vf.last_vfgorc |=
2005             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2006
2007         adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2008         adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2009         adapter->stats.vf.last_vfgotc |=
2010             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2011
2012         adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2013
2014         adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2015         adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2016         adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2017         adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2018         adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2019 }
2020
2021 #define UPDATE_STAT_32(reg, last, count)                \
2022 {                                                       \
2023         u32 current = IXGBE_READ_REG(hw, reg);          \
2024         if (current < last)                             \
2025                 count += 0x100000000LL;                 \
2026         last = current;                                 \
2027         count &= 0xFFFFFFFF00000000LL;                  \
2028         count |= current;                               \
2029 }
2030
2031 #define UPDATE_STAT_36(lsb, msb, last, count)           \
2032 {                                                       \
2033         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
2034         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
2035         u64 current = ((cur_msb << 32) | cur_lsb);      \
2036         if (current < last)                             \
2037                 count += 0x1000000000LL;                \
2038         last = current;                                 \
2039         count &= 0xFFFFFFF000000000LL;                  \
2040         count |= current;                               \
2041 }
2042
2043 /*
2044 ** ixv_update_stats - Update the board statistics counters.
2045 */
2046 void
2047 ixv_update_stats(struct adapter *adapter)
2048 {
2049         struct ixgbe_hw *hw = &adapter->hw;
2050
2051         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2052             adapter->stats.vf.vfgprc);
2053         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2054             adapter->stats.vf.vfgptc);
2055         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2056             adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2057         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2058             adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2059         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2060             adapter->stats.vf.vfmprc);
2061 }
2062
2063 /*
2064  * Add statistic sysctls for the VF.
2065  */
2066 static void
2067 ixv_add_stats_sysctls(struct adapter *adapter)
2068 {
2069         device_t dev = adapter->dev;
2070         struct ix_queue *que = &adapter->queues[0];
2071         struct tx_ring *txr = que->txr;
2072         struct rx_ring *rxr = que->rxr;
2073
2074         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2075         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2076         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2077         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2078
2079         struct sysctl_oid *stat_node, *queue_node;
2080         struct sysctl_oid_list *stat_list, *queue_list;
2081
2082         /* Driver Statistics */
2083         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2084                         CTLFLAG_RD, &adapter->dropped_pkts,
2085                         "Driver dropped packets");
2086         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2087                         CTLFLAG_RD, &adapter->mbuf_defrag_failed,
2088                         "m_defrag() failed");
2089         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2090                         CTLFLAG_RD, &adapter->watchdog_events,
2091                         "Watchdog timeouts");
2092
2093         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2094                                     CTLFLAG_RD, NULL,
2095                                     "VF Statistics (read from HW registers)");
2096         stat_list = SYSCTL_CHILDREN(stat_node);
2097
2098         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2099                         CTLFLAG_RD, &stats->vfgprc,
2100                         "Good Packets Received");
2101         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2102                         CTLFLAG_RD, &stats->vfgorc, 
2103                         "Good Octets Received"); 
2104         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2105                         CTLFLAG_RD, &stats->vfmprc,
2106                         "Multicast Packets Received");
2107         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2108                         CTLFLAG_RD, &stats->vfgptc,
2109                         "Good Packets Transmitted");
2110         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2111                         CTLFLAG_RD, &stats->vfgotc, 
2112                         "Good Octets Transmitted"); 
2113
2114         queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2115                                     CTLFLAG_RD, NULL,
2116                                     "Queue Statistics (collected by SW)");
2117         queue_list = SYSCTL_CHILDREN(queue_node);
2118
2119         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2120                         CTLFLAG_RD, &(que->irqs),
2121                         "IRQs on queue");
2122         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2123                         CTLFLAG_RD, &(rxr->rx_irq),
2124                         "RX irqs on queue");
2125         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2126                         CTLFLAG_RD, &(rxr->rx_packets),
2127                         "RX packets");
2128         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2129                         CTLFLAG_RD, &(rxr->rx_bytes),
2130                         "RX bytes");
2131         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2132                         CTLFLAG_RD, &(rxr->rx_discarded),
2133                         "Discarded RX packets");
2134
2135         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2136                         CTLFLAG_RD, &(txr->total_packets),
2137                         "TX Packets");
2138         SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
2139                         CTLFLAG_RD, &(txr->bytes), 0,
2140                         "TX Bytes");
2141         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2142                         CTLFLAG_RD, &(txr->no_desc_avail),
2143                         "# of times not enough descriptors were available during TX");
2144 }
2145
2146 static void
2147 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2148         const char *description, int *limit, int value)
2149 {
2150         *limit = value;
2151         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2152             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2153             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2154 }
2155
2156 /**********************************************************************
2157  *
2158  *  This routine is called only when em_display_debug_stats is enabled.
2159  *  This routine provides a way to take a look at important statistics
2160  *  maintained by the driver and hardware.
2161  *
2162  **********************************************************************/
2163 static void
2164 ixv_print_debug_info(struct adapter *adapter)
2165 {
2166         device_t dev = adapter->dev;
2167         struct ixgbe_hw         *hw = &adapter->hw;
2168         struct ix_queue         *que = adapter->queues;
2169         struct rx_ring          *rxr;
2170         struct tx_ring          *txr;
2171         struct lro_ctrl         *lro;
2172
2173         device_printf(dev,"Error Byte Count = %u \n",
2174             IXGBE_READ_REG(hw, IXGBE_ERRBC));
2175
2176         for (int i = 0; i < adapter->num_queues; i++, que++) {
2177                 txr = que->txr;
2178                 rxr = que->rxr;
2179                 lro = &rxr->lro;
2180                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2181                     que->msix, (long)que->irqs);
2182                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2183                     rxr->me, (long long)rxr->rx_packets);
2184                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2185                     rxr->me, (long)rxr->rx_bytes);
2186                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2187                     rxr->me, lro->lro_queued);
2188                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2189                     rxr->me, lro->lro_flushed);
2190                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2191                     txr->me, (long)txr->total_packets);
2192                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2193                     txr->me, (long)txr->no_desc_avail);
2194         }
2195
2196         device_printf(dev,"MBX IRQ Handled: %lu\n",
2197             (long)adapter->link_irq);
2198         return;
2199 }
2200
2201 static int
2202 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2203 {
2204         int error, result;
2205         struct adapter *adapter;
2206
2207         result = -1;
2208         error = sysctl_handle_int(oidp, &result, 0, req);
2209
2210         if (error || !req->newptr)
2211                 return (error);
2212
2213         if (result == 1) {
2214                 adapter = (struct adapter *) arg1;
2215                 ixv_print_debug_info(adapter);
2216         }
2217         return error;
2218 }
2219