]> CyberLeo.Net >> Repos - FreeBSD/releng/10.3.git/blob - sys/dev/ixgbe/if_ixv.c
- Copy stable/10@296371 to releng/10.3 in preparation for 10.3-RC1
[FreeBSD/releng/10.3.git] / sys / dev / ixgbe / if_ixv.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /*********************************************************************
44  *  Driver version
45  *********************************************************************/
46 char ixv_driver_version[] = "1.4.6-k";
47
48 /*********************************************************************
49  *  PCI Device ID Table
50  *
51  *  Used by probe to select devices to load on
52  *  Last field stores an index into ixv_strings
53  *  Last entry must be all 0s
54  *
55  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56  *********************************************************************/
57
58 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
59 {
60         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64         /* required last entry */
65         {0, 0, 0, 0, 0}
66 };
67
68 /*********************************************************************
69  *  Table of branding strings
70  *********************************************************************/
71
72 static char    *ixv_strings[] = {
73         "Intel(R) PRO/10GbE Virtual Function Network Driver"
74 };
75
76 /*********************************************************************
77  *  Function prototypes
78  *********************************************************************/
79 static int      ixv_probe(device_t);
80 static int      ixv_attach(device_t);
81 static int      ixv_detach(device_t);
82 static int      ixv_shutdown(device_t);
83 static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
84 static void     ixv_init(void *);
85 static void     ixv_init_locked(struct adapter *);
86 static void     ixv_stop(void *);
87 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
88 static int      ixv_media_change(struct ifnet *);
89 static void     ixv_identify_hardware(struct adapter *);
90 static int      ixv_allocate_pci_resources(struct adapter *);
91 static int      ixv_allocate_msix(struct adapter *);
92 static int      ixv_setup_msix(struct adapter *);
93 static void     ixv_free_pci_resources(struct adapter *);
94 static void     ixv_local_timer(void *);
95 static void     ixv_setup_interface(device_t, struct adapter *);
96 static void     ixv_config_link(struct adapter *);
97
98 static void     ixv_initialize_transmit_units(struct adapter *);
99 static void     ixv_initialize_receive_units(struct adapter *);
100
101 static void     ixv_enable_intr(struct adapter *);
102 static void     ixv_disable_intr(struct adapter *);
103 static void     ixv_set_multi(struct adapter *);
104 static void     ixv_update_link_status(struct adapter *);
105 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
107 static void     ixv_configure_ivars(struct adapter *);
108 static u8 *     ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
109
110 static void     ixv_setup_vlan_support(struct adapter *);
111 static void     ixv_register_vlan(void *, struct ifnet *, u16);
112 static void     ixv_unregister_vlan(void *, struct ifnet *, u16);
113
114 static void     ixv_save_stats(struct adapter *);
115 static void     ixv_init_stats(struct adapter *);
116 static void     ixv_update_stats(struct adapter *);
117 static void     ixv_add_stats_sysctls(struct adapter *);
118 static void     ixv_set_sysctl_value(struct adapter *, const char *,
119                     const char *, int *, int);
120
121 /* The MSI/X Interrupt handlers */
122 static void     ixv_msix_que(void *);
123 static void     ixv_msix_mbx(void *);
124
125 /* Deferred interrupt tasklets */
126 static void     ixv_handle_que(void *, int);
127 static void     ixv_handle_mbx(void *, int);
128
129 #ifdef DEV_NETMAP
130 /*
131  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
132  * if_ix.c.
133  */
134 extern void ixgbe_netmap_attach(struct adapter *adapter);
135
136 #include <net/netmap.h>
137 #include <sys/selinfo.h>
138 #include <dev/netmap/netmap_kern.h>
139 #endif /* DEV_NETMAP */
140
141 /*********************************************************************
142  *  FreeBSD Device Interface Entry Points
143  *********************************************************************/
144
145 static device_method_t ixv_methods[] = {
146         /* Device interface */
147         DEVMETHOD(device_probe, ixv_probe),
148         DEVMETHOD(device_attach, ixv_attach),
149         DEVMETHOD(device_detach, ixv_detach),
150         DEVMETHOD(device_shutdown, ixv_shutdown),
151         DEVMETHOD_END
152 };
153
154 static driver_t ixv_driver = {
155         "ixv", ixv_methods, sizeof(struct adapter),
156 };
157
158 devclass_t ixv_devclass;
159 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
160 MODULE_DEPEND(ixv, pci, 1, 1, 1);
161 MODULE_DEPEND(ixv, ether, 1, 1, 1);
162 #ifdef DEV_NETMAP
163 MODULE_DEPEND(ix, netmap, 1, 1, 1);
164 #endif /* DEV_NETMAP */
165 /* XXX depend on 'ix' ? */
166
167 /*
168 ** TUNEABLE PARAMETERS:
169 */
170
171 /* Number of Queues - do not exceed MSIX vectors - 1 */
172 static int ixv_num_queues = 1;
173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
174
175 /*
176 ** AIM: Adaptive Interrupt Moderation
177 ** which means that the interrupt rate
178 ** is varied over time based on the
179 ** traffic for that interrupt vector
180 */
181 static int ixv_enable_aim = FALSE;
182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
183
184 /* How many packets rxeof tries to clean at a time */
185 static int ixv_rx_process_limit = 256;
186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
187
188 /* How many packets txeof tries to clean at a time */
189 static int ixv_tx_process_limit = 256;
190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
191
192 /* Flow control setting, default to full */
193 static int ixv_flow_control = ixgbe_fc_full;
194 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
195
196 /*
197  * Header split: this causes the hardware to DMA
198  * the header into a seperate mbuf from the payload,
199  * it can be a performance win in some workloads, but
200  * in others it actually hurts, its off by default.
201  */
202 static int ixv_header_split = FALSE;
203 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
204
205 /*
206 ** Number of TX descriptors per ring,
207 ** setting higher than RX as this seems
208 ** the better performing choice.
209 */
210 static int ixv_txd = DEFAULT_TXD;
211 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
212
213 /* Number of RX descriptors per ring */
214 static int ixv_rxd = DEFAULT_RXD;
215 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
216
217 /*
218 ** Shadow VFTA table, this is needed because
219 ** the real filter table gets cleared during
220 ** a soft reset and we need to repopulate it.
221 */
222 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
223
224 /*********************************************************************
225  *  Device identification routine
226  *
227  *  ixv_probe determines if the driver should be loaded on
228  *  adapter based on PCI vendor/device id of the adapter.
229  *
230  *  return BUS_PROBE_DEFAULT on success, positive on failure
231  *********************************************************************/
232
233 static int
234 ixv_probe(device_t dev)
235 {
236         ixgbe_vendor_info_t *ent;
237
238         u16     pci_vendor_id = 0;
239         u16     pci_device_id = 0;
240         u16     pci_subvendor_id = 0;
241         u16     pci_subdevice_id = 0;
242         char    adapter_name[256];
243
244
245         pci_vendor_id = pci_get_vendor(dev);
246         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
247                 return (ENXIO);
248
249         pci_device_id = pci_get_device(dev);
250         pci_subvendor_id = pci_get_subvendor(dev);
251         pci_subdevice_id = pci_get_subdevice(dev);
252
253         ent = ixv_vendor_info_array;
254         while (ent->vendor_id != 0) {
255                 if ((pci_vendor_id == ent->vendor_id) &&
256                     (pci_device_id == ent->device_id) &&
257
258                     ((pci_subvendor_id == ent->subvendor_id) ||
259                      (ent->subvendor_id == 0)) &&
260
261                     ((pci_subdevice_id == ent->subdevice_id) ||
262                      (ent->subdevice_id == 0))) {
263                         sprintf(adapter_name, "%s, Version - %s",
264                                 ixv_strings[ent->index],
265                                 ixv_driver_version);
266                         device_set_desc_copy(dev, adapter_name);
267                         return (BUS_PROBE_DEFAULT);
268                 }
269                 ent++;
270         }
271         return (ENXIO);
272 }
273
274 /*********************************************************************
275  *  Device initialization routine
276  *
277  *  The attach entry point is called when the driver is being loaded.
278  *  This routine identifies the type of hardware, allocates all resources
279  *  and initializes the hardware.
280  *
281  *  return 0 on success, positive on failure
282  *********************************************************************/
283
284 static int
285 ixv_attach(device_t dev)
286 {
287         struct adapter *adapter;
288         struct ixgbe_hw *hw;
289         int             error = 0;
290
291         INIT_DEBUGOUT("ixv_attach: begin");
292
293         /* Allocate, clear, and link in our adapter structure */
294         adapter = device_get_softc(dev);
295         adapter->dev = dev;
296         hw = &adapter->hw;
297
298 #ifdef DEV_NETMAP
299         adapter->init_locked = ixv_init_locked;
300         adapter->stop_locked = ixv_stop;
301 #endif
302
303         /* Core Lock Init*/
304         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
305
306         /* SYSCTL APIs */
307         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
309                         OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
310                         adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
311
312         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
314                         OID_AUTO, "enable_aim", CTLFLAG_RW,
315                         &ixv_enable_aim, 1, "Interrupt Moderation");
316
317         /* Set up the timer callout */
318         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
319
320         /* Determine hardware revision */
321         ixv_identify_hardware(adapter);
322
323         /* Do base PCI setup - map BAR0 */
324         if (ixv_allocate_pci_resources(adapter)) {
325                 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
326                 error = ENXIO;
327                 goto err_out;
328         }
329
330         /* Sysctls for limiting the amount of work done in the taskqueues */
331         ixv_set_sysctl_value(adapter, "rx_processing_limit",
332             "max number of rx packets to process",
333             &adapter->rx_process_limit, ixv_rx_process_limit);
334
335         ixv_set_sysctl_value(adapter, "tx_processing_limit",
336             "max number of tx packets to process",
337             &adapter->tx_process_limit, ixv_tx_process_limit);
338
339         /* Sysctls for limiting the amount of work done in the taskqueues */
340         ixv_set_sysctl_value(adapter, "rx_processing_limit",
341             "max number of rx packets to process",
342             &adapter->rx_process_limit, ixv_rx_process_limit);
343
344         ixv_set_sysctl_value(adapter, "tx_processing_limit",
345             "max number of tx packets to process",
346             &adapter->tx_process_limit, ixv_tx_process_limit);
347
348         /* Do descriptor calc and sanity checks */
349         if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350             ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351                 device_printf(dev, "TXD config issue, using default!\n");
352                 adapter->num_tx_desc = DEFAULT_TXD;
353         } else
354                 adapter->num_tx_desc = ixv_txd;
355
356         if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357             ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
358                 device_printf(dev, "RXD config issue, using default!\n");
359                 adapter->num_rx_desc = DEFAULT_RXD;
360         } else
361                 adapter->num_rx_desc = ixv_rxd;
362
363         /* Allocate our TX/RX Queues */
364         if (ixgbe_allocate_queues(adapter)) {
365                 device_printf(dev, "ixgbe_allocate_queues() failed!\n");
366                 error = ENOMEM;
367                 goto err_out;
368         }
369
370         /*
371         ** Initialize the shared code: its
372         ** at this point the mac type is set.
373         */
374         error = ixgbe_init_shared_code(hw);
375         if (error) {
376                 device_printf(dev, "ixgbe_init_shared_code() failed!\n");
377                 error = EIO;
378                 goto err_late;
379         }
380
381         /* Setup the mailbox */
382         ixgbe_init_mbx_params_vf(hw);
383
384         /* Reset mbox api to 1.0 */
385         error = ixgbe_reset_hw(hw);
386         if (error == IXGBE_ERR_RESET_FAILED)
387                 device_printf(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
388         else if (error)
389                 device_printf(dev, "ixgbe_reset_hw() failed with error %d\n", error);
390         if (error) {
391                 error = EIO;
392                 goto err_late;
393         }
394
395         /* Negotiate mailbox API version */
396         error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
397         if (error) {
398                 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
399                 error = EIO;
400                 goto err_late;
401         }
402
403         error = ixgbe_init_hw(hw);
404         if (error) {
405                 device_printf(dev, "ixgbe_init_hw() failed!\n");
406                 error = EIO;
407                 goto err_late;
408         }
409         
410         error = ixv_allocate_msix(adapter); 
411         if (error) {
412                 device_printf(dev, "ixv_allocate_msix() failed!\n");
413                 goto err_late;
414         }
415
416         /* If no mac address was assigned, make a random one */
417         if (!ixv_check_ether_addr(hw->mac.addr)) {
418                 u8 addr[ETHER_ADDR_LEN];
419                 arc4rand(&addr, sizeof(addr), 0);
420                 addr[0] &= 0xFE;
421                 addr[0] |= 0x02;
422                 bcopy(addr, hw->mac.addr, sizeof(addr));
423         }
424
425         /* Setup OS specific network interface */
426         ixv_setup_interface(dev, adapter);
427
428         /* Do the stats setup */
429         ixv_save_stats(adapter);
430         ixv_init_stats(adapter);
431         ixv_add_stats_sysctls(adapter);
432
433         /* Register for VLAN events */
434         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
435             ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
436         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
437             ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
438
439 #ifdef DEV_NETMAP
440         ixgbe_netmap_attach(adapter);
441 #endif /* DEV_NETMAP */
442         INIT_DEBUGOUT("ixv_attach: end");
443         return (0);
444
445 err_late:
446         ixgbe_free_transmit_structures(adapter);
447         ixgbe_free_receive_structures(adapter);
448 err_out:
449         ixv_free_pci_resources(adapter);
450         return (error);
451
452 }
453
454 /*********************************************************************
455  *  Device removal routine
456  *
457  *  The detach entry point is called when the driver is being removed.
458  *  This routine stops the adapter and deallocates all the resources
459  *  that were allocated for driver operation.
460  *
461  *  return 0 on success, positive on failure
462  *********************************************************************/
463
464 static int
465 ixv_detach(device_t dev)
466 {
467         struct adapter *adapter = device_get_softc(dev);
468         struct ix_queue *que = adapter->queues;
469
470         INIT_DEBUGOUT("ixv_detach: begin");
471
472         /* Make sure VLANS are not using driver */
473         if (adapter->ifp->if_vlantrunk != NULL) {
474                 device_printf(dev, "Vlan in use, detach first\n");
475                 return (EBUSY);
476         }
477
478         IXGBE_CORE_LOCK(adapter);
479         ixv_stop(adapter);
480         IXGBE_CORE_UNLOCK(adapter);
481
482         for (int i = 0; i < adapter->num_queues; i++, que++) {
483                 if (que->tq) {
484                         struct tx_ring  *txr = que->txr;
485                         taskqueue_drain(que->tq, &txr->txq_task);
486                         taskqueue_drain(que->tq, &que->que_task);
487                         taskqueue_free(que->tq);
488                 }
489         }
490
491         /* Drain the Mailbox(link) queue */
492         if (adapter->tq) {
493                 taskqueue_drain(adapter->tq, &adapter->link_task);
494                 taskqueue_free(adapter->tq);
495         }
496
497         /* Unregister VLAN events */
498         if (adapter->vlan_attach != NULL)
499                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
500         if (adapter->vlan_detach != NULL)
501                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
502
503         ether_ifdetach(adapter->ifp);
504         callout_drain(&adapter->timer);
505 #ifdef DEV_NETMAP
506         netmap_detach(adapter->ifp);
507 #endif /* DEV_NETMAP */
508         ixv_free_pci_resources(adapter);
509         bus_generic_detach(dev);
510         if_free(adapter->ifp);
511
512         ixgbe_free_transmit_structures(adapter);
513         ixgbe_free_receive_structures(adapter);
514
515         IXGBE_CORE_LOCK_DESTROY(adapter);
516         return (0);
517 }
518
519 /*********************************************************************
520  *
521  *  Shutdown entry point
522  *
523  **********************************************************************/
524 static int
525 ixv_shutdown(device_t dev)
526 {
527         struct adapter *adapter = device_get_softc(dev);
528         IXGBE_CORE_LOCK(adapter);
529         ixv_stop(adapter);
530         IXGBE_CORE_UNLOCK(adapter);
531         return (0);
532 }
533
534
535 /*********************************************************************
536  *  Ioctl entry point
537  *
538  *  ixv_ioctl is called when the user wants to configure the
539  *  interface.
540  *
541  *  return 0 on success, positive on failure
542  **********************************************************************/
543
544 static int
545 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
546 {
547         struct adapter  *adapter = ifp->if_softc;
548         struct ifreq    *ifr = (struct ifreq *) data;
549 #if defined(INET) || defined(INET6)
550         struct ifaddr   *ifa = (struct ifaddr *) data;
551         bool            avoid_reset = FALSE;
552 #endif
553         int             error = 0;
554
555         switch (command) {
556
557         case SIOCSIFADDR:
558 #ifdef INET
559                 if (ifa->ifa_addr->sa_family == AF_INET)
560                         avoid_reset = TRUE;
561 #endif
562 #ifdef INET6
563                 if (ifa->ifa_addr->sa_family == AF_INET6)
564                         avoid_reset = TRUE;
565 #endif
566 #if defined(INET) || defined(INET6)
567                 /*
568                 ** Calling init results in link renegotiation,
569                 ** so we avoid doing it when possible.
570                 */
571                 if (avoid_reset) {
572                         ifp->if_flags |= IFF_UP;
573                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
574                                 ixv_init(adapter);
575                         if (!(ifp->if_flags & IFF_NOARP))
576                                 arp_ifinit(ifp, ifa);
577                 } else
578                         error = ether_ioctl(ifp, command, data);
579                 break;
580 #endif
581         case SIOCSIFMTU:
582                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
583                 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
584                         error = EINVAL;
585                 } else {
586                         IXGBE_CORE_LOCK(adapter);
587                         ifp->if_mtu = ifr->ifr_mtu;
588                         adapter->max_frame_size =
589                                 ifp->if_mtu + IXGBE_MTU_HDR;
590                         ixv_init_locked(adapter);
591                         IXGBE_CORE_UNLOCK(adapter);
592                 }
593                 break;
594         case SIOCSIFFLAGS:
595                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
596                 IXGBE_CORE_LOCK(adapter);
597                 if (ifp->if_flags & IFF_UP) {
598                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
599                                 ixv_init_locked(adapter);
600                 } else
601                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
602                                 ixv_stop(adapter);
603                 adapter->if_flags = ifp->if_flags;
604                 IXGBE_CORE_UNLOCK(adapter);
605                 break;
606         case SIOCADDMULTI:
607         case SIOCDELMULTI:
608                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
609                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
610                         IXGBE_CORE_LOCK(adapter);
611                         ixv_disable_intr(adapter);
612                         ixv_set_multi(adapter);
613                         ixv_enable_intr(adapter);
614                         IXGBE_CORE_UNLOCK(adapter);
615                 }
616                 break;
617         case SIOCSIFMEDIA:
618         case SIOCGIFMEDIA:
619                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
620                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
621                 break;
622         case SIOCSIFCAP:
623         {
624                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
625                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
626                 if (mask & IFCAP_HWCSUM)
627                         ifp->if_capenable ^= IFCAP_HWCSUM;
628                 if (mask & IFCAP_TSO4)
629                         ifp->if_capenable ^= IFCAP_TSO4;
630                 if (mask & IFCAP_LRO)
631                         ifp->if_capenable ^= IFCAP_LRO;
632                 if (mask & IFCAP_VLAN_HWTAGGING)
633                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
634                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
635                         IXGBE_CORE_LOCK(adapter);
636                         ixv_init_locked(adapter);
637                         IXGBE_CORE_UNLOCK(adapter);
638                 }
639                 VLAN_CAPABILITIES(ifp);
640                 break;
641         }
642
643         default:
644                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
645                 error = ether_ioctl(ifp, command, data);
646                 break;
647         }
648
649         return (error);
650 }
651
652 /*********************************************************************
653  *  Init entry point
654  *
655  *  This routine is used in two ways. It is used by the stack as
656  *  init entry point in network interface structure. It is also used
657  *  by the driver as a hw/sw initialization routine to get to a
658  *  consistent state.
659  *
660  *  return 0 on success, positive on failure
661  **********************************************************************/
662 #define IXGBE_MHADD_MFS_SHIFT 16
663
664 static void
665 ixv_init_locked(struct adapter *adapter)
666 {
667         struct ifnet    *ifp = adapter->ifp;
668         device_t        dev = adapter->dev;
669         struct ixgbe_hw *hw = &adapter->hw;
670         int error = 0;
671
672         INIT_DEBUGOUT("ixv_init_locked: begin");
673         mtx_assert(&adapter->core_mtx, MA_OWNED);
674         hw->adapter_stopped = FALSE;
675         ixgbe_stop_adapter(hw);
676         callout_stop(&adapter->timer);
677
678         /* reprogram the RAR[0] in case user changed it. */
679         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
680
681         /* Get the latest mac address, User can use a LAA */
682         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
683              IXGBE_ETH_LENGTH_OF_ADDRESS);
684         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
685         hw->addr_ctrl.rar_used_count = 1;
686
687         /* Prepare transmit descriptors and buffers */
688         if (ixgbe_setup_transmit_structures(adapter)) {
689                 device_printf(dev, "Could not setup transmit structures\n");
690                 ixv_stop(adapter);
691                 return;
692         }
693
694         /* Reset VF and renegotiate mailbox API version */
695         ixgbe_reset_hw(hw);
696         error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
697         if (error)
698                 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
699
700         ixv_initialize_transmit_units(adapter);
701
702         /* Setup Multicast table */
703         ixv_set_multi(adapter);
704
705         /*
706         ** Determine the correct mbuf pool
707         ** for doing jumbo/headersplit
708         */
709         if (ifp->if_mtu > ETHERMTU)
710                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
711         else
712                 adapter->rx_mbuf_sz = MCLBYTES;
713
714         /* Prepare receive descriptors and buffers */
715         if (ixgbe_setup_receive_structures(adapter)) {
716                 device_printf(dev, "Could not setup receive structures\n");
717                 ixv_stop(adapter);
718                 return;
719         }
720
721         /* Configure RX settings */
722         ixv_initialize_receive_units(adapter);
723
724         /* Set the various hardware offload abilities */
725         ifp->if_hwassist = 0;
726         if (ifp->if_capenable & IFCAP_TSO4)
727                 ifp->if_hwassist |= CSUM_TSO;
728         if (ifp->if_capenable & IFCAP_TXCSUM) {
729                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
730 #if __FreeBSD_version >= 800000
731                 ifp->if_hwassist |= CSUM_SCTP;
732 #endif
733         }
734         
735         /* Set up VLAN offload and filter */
736         ixv_setup_vlan_support(adapter);
737
738         /* Set up MSI/X routing */
739         ixv_configure_ivars(adapter);
740
741         /* Set up auto-mask */
742         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
743
744         /* Set moderation on the Link interrupt */
745         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
746
747         /* Stats init */
748         ixv_init_stats(adapter);
749
750         /* Config/Enable Link */
751         ixv_config_link(adapter);
752
753         /* Start watchdog */
754         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
755
756         /* And now turn on interrupts */
757         ixv_enable_intr(adapter);
758
759         /* Now inform the stack we're ready */
760         ifp->if_drv_flags |= IFF_DRV_RUNNING;
761         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
762
763         return;
764 }
765
766 static void
767 ixv_init(void *arg)
768 {
769         struct adapter *adapter = arg;
770
771         IXGBE_CORE_LOCK(adapter);
772         ixv_init_locked(adapter);
773         IXGBE_CORE_UNLOCK(adapter);
774         return;
775 }
776
777
778 /*
779 **
780 ** MSIX Interrupt Handlers and Tasklets
781 **
782 */
783
784 static inline void
785 ixv_enable_queue(struct adapter *adapter, u32 vector)
786 {
787         struct ixgbe_hw *hw = &adapter->hw;
788         u32     queue = 1 << vector;
789         u32     mask;
790
791         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
792         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
793 }
794
795 static inline void
796 ixv_disable_queue(struct adapter *adapter, u32 vector)
797 {
798         struct ixgbe_hw *hw = &adapter->hw;
799         u64     queue = (u64)(1 << vector);
800         u32     mask;
801
802         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
803         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
804 }
805
806 static inline void
807 ixv_rearm_queues(struct adapter *adapter, u64 queues)
808 {
809         u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
810         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
811 }
812
813
814 static void
815 ixv_handle_que(void *context, int pending)
816 {
817         struct ix_queue *que = context;
818         struct adapter  *adapter = que->adapter;
819         struct tx_ring  *txr = que->txr;
820         struct ifnet    *ifp = adapter->ifp;
821         bool            more;
822
823         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
824                 more = ixgbe_rxeof(que);
825                 IXGBE_TX_LOCK(txr);
826                 ixgbe_txeof(txr);
827 #if __FreeBSD_version >= 800000
828                 if (!drbr_empty(ifp, txr->br))
829                         ixgbe_mq_start_locked(ifp, txr);
830 #else
831                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
832                         ixgbe_start_locked(txr, ifp);
833 #endif
834                 IXGBE_TX_UNLOCK(txr);
835                 if (more) {
836                         taskqueue_enqueue(que->tq, &que->que_task);
837                         return;
838                 }
839         }
840
841         /* Reenable this interrupt */
842         ixv_enable_queue(adapter, que->msix);
843         return;
844 }
845
846 /*********************************************************************
847  *
848  *  MSI Queue Interrupt Service routine
849  *
850  **********************************************************************/
851 void
852 ixv_msix_que(void *arg)
853 {
854         struct ix_queue *que = arg;
855         struct adapter  *adapter = que->adapter;
856         struct ifnet    *ifp = adapter->ifp;
857         struct tx_ring  *txr = que->txr;
858         struct rx_ring  *rxr = que->rxr;
859         bool            more;
860         u32             newitr = 0;
861
862         ixv_disable_queue(adapter, que->msix);
863         ++que->irqs;
864
865         more = ixgbe_rxeof(que);
866
867         IXGBE_TX_LOCK(txr);
868         ixgbe_txeof(txr);
869         /*
870         ** Make certain that if the stack
871         ** has anything queued the task gets
872         ** scheduled to handle it.
873         */
874 #ifdef IXGBE_LEGACY_TX
875         if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
876                 ixgbe_start_locked(txr, ifp);
877 #else
878         if (!drbr_empty(adapter->ifp, txr->br))
879                 ixgbe_mq_start_locked(ifp, txr);
880 #endif
881         IXGBE_TX_UNLOCK(txr);
882
883         /* Do AIM now? */
884
885         if (ixv_enable_aim == FALSE)
886                 goto no_calc;
887         /*
888         ** Do Adaptive Interrupt Moderation:
889         **  - Write out last calculated setting
890         **  - Calculate based on average size over
891         **    the last interval.
892         */
893         if (que->eitr_setting)
894                 IXGBE_WRITE_REG(&adapter->hw,
895                     IXGBE_VTEITR(que->msix),
896                     que->eitr_setting);
897  
898         que->eitr_setting = 0;
899
900         /* Idle, do nothing */
901         if ((txr->bytes == 0) && (rxr->bytes == 0))
902                 goto no_calc;
903                                 
904         if ((txr->bytes) && (txr->packets))
905                 newitr = txr->bytes/txr->packets;
906         if ((rxr->bytes) && (rxr->packets))
907                 newitr = max(newitr,
908                     (rxr->bytes / rxr->packets));
909         newitr += 24; /* account for hardware frame, crc */
910
911         /* set an upper boundary */
912         newitr = min(newitr, 3000);
913
914         /* Be nice to the mid range */
915         if ((newitr > 300) && (newitr < 1200))
916                 newitr = (newitr / 3);
917         else
918                 newitr = (newitr / 2);
919
920         newitr |= newitr << 16;
921                  
922         /* save for next interrupt */
923         que->eitr_setting = newitr;
924
925         /* Reset state */
926         txr->bytes = 0;
927         txr->packets = 0;
928         rxr->bytes = 0;
929         rxr->packets = 0;
930
931 no_calc:
932         if (more)
933                 taskqueue_enqueue(que->tq, &que->que_task);
934         else /* Reenable this interrupt */
935                 ixv_enable_queue(adapter, que->msix);
936         return;
937 }
938
939 static void
940 ixv_msix_mbx(void *arg)
941 {
942         struct adapter  *adapter = arg;
943         struct ixgbe_hw *hw = &adapter->hw;
944         u32             reg;
945
946         ++adapter->link_irq;
947
948         /* First get the cause */
949         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
950         /* Clear interrupt with write */
951         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
952
953         /* Link status change */
954         if (reg & IXGBE_EICR_LSC)
955                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
956
957         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
958         return;
959 }
960
961 /*********************************************************************
962  *
963  *  Media Ioctl callback
964  *
965  *  This routine is called whenever the user queries the status of
966  *  the interface using ifconfig.
967  *
968  **********************************************************************/
969 static void
970 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
971 {
972         struct adapter *adapter = ifp->if_softc;
973
974         INIT_DEBUGOUT("ixv_media_status: begin");
975         IXGBE_CORE_LOCK(adapter);
976         ixv_update_link_status(adapter);
977
978         ifmr->ifm_status = IFM_AVALID;
979         ifmr->ifm_active = IFM_ETHER;
980
981         if (!adapter->link_active) {
982                 IXGBE_CORE_UNLOCK(adapter);
983                 return;
984         }
985
986         ifmr->ifm_status |= IFM_ACTIVE;
987
988         switch (adapter->link_speed) {
989                 case IXGBE_LINK_SPEED_1GB_FULL:
990                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
991                         break;
992                 case IXGBE_LINK_SPEED_10GB_FULL:
993                         ifmr->ifm_active |= IFM_FDX;
994                         break;
995         }
996
997         IXGBE_CORE_UNLOCK(adapter);
998
999         return;
1000 }
1001
1002 /*********************************************************************
1003  *
1004  *  Media Ioctl callback
1005  *
1006  *  This routine is called when the user changes speed/duplex using
1007  *  media/mediopt option with ifconfig.
1008  *
1009  **********************************************************************/
1010 static int
1011 ixv_media_change(struct ifnet * ifp)
1012 {
1013         struct adapter *adapter = ifp->if_softc;
1014         struct ifmedia *ifm = &adapter->media;
1015
1016         INIT_DEBUGOUT("ixv_media_change: begin");
1017
1018         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1019                 return (EINVAL);
1020
1021         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1022         case IFM_AUTO:
1023                 break;
1024         default:
1025                 device_printf(adapter->dev, "Only auto media type\n");
1026                 return (EINVAL);
1027         }
1028
1029         return (0);
1030 }
1031
1032
1033 /*********************************************************************
1034  *  Multicast Update
1035  *
1036  *  This routine is called whenever multicast address list is updated.
1037  *
1038  **********************************************************************/
1039 #define IXGBE_RAR_ENTRIES 16
1040
1041 static void
1042 ixv_set_multi(struct adapter *adapter)
1043 {
1044         u8      mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1045         u8      *update_ptr;
1046         struct  ifmultiaddr *ifma;
1047         int     mcnt = 0;
1048         struct ifnet   *ifp = adapter->ifp;
1049
1050         IOCTL_DEBUGOUT("ixv_set_multi: begin");
1051
1052 #if __FreeBSD_version < 800000
1053         IF_ADDR_LOCK(ifp);
1054 #else
1055         if_maddr_rlock(ifp);
1056 #endif
1057         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1058                 if (ifma->ifma_addr->sa_family != AF_LINK)
1059                         continue;
1060                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1061                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1062                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1063                 mcnt++;
1064         }
1065 #if __FreeBSD_version < 800000
1066         IF_ADDR_UNLOCK(ifp);
1067 #else
1068         if_maddr_runlock(ifp);
1069 #endif
1070
1071         update_ptr = mta;
1072
1073         ixgbe_update_mc_addr_list(&adapter->hw,
1074             update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1075
1076         return;
1077 }
1078
1079 /*
1080  * This is an iterator function now needed by the multicast
1081  * shared code. It simply feeds the shared code routine the
1082  * addresses in the array of ixv_set_multi() one by one.
1083  */
1084 static u8 *
1085 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1086 {
1087         u8 *addr = *update_ptr;
1088         u8 *newptr;
1089         *vmdq = 0;
1090
1091         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1092         *update_ptr = newptr;
1093         return addr;
1094 }
1095
1096 /*********************************************************************
1097  *  Timer routine
1098  *
1099  *  This routine checks for link status,updates statistics,
1100  *  and runs the watchdog check.
1101  *
1102  **********************************************************************/
1103
1104 static void
1105 ixv_local_timer(void *arg)
1106 {
1107         struct adapter  *adapter = arg;
1108         device_t        dev = adapter->dev;
1109         struct ix_queue *que = adapter->queues;
1110         u64             queues = 0;
1111         int             hung = 0;
1112
1113         mtx_assert(&adapter->core_mtx, MA_OWNED);
1114
1115         ixv_update_link_status(adapter);
1116
1117         /* Stats Update */
1118         ixv_update_stats(adapter);
1119
1120         /*
1121         ** Check the TX queues status
1122         **      - mark hung queues so we don't schedule on them
1123         **      - watchdog only if all queues show hung
1124         */
1125         for (int i = 0; i < adapter->num_queues; i++, que++) {
1126                 /* Keep track of queues with work for soft irq */
1127                 if (que->txr->busy)
1128                         queues |= ((u64)1 << que->me);
1129                 /*
1130                 ** Each time txeof runs without cleaning, but there
1131                 ** are uncleaned descriptors it increments busy. If
1132                 ** we get to the MAX we declare it hung.
1133                 */
1134                 if (que->busy == IXGBE_QUEUE_HUNG) {
1135                         ++hung;
1136                         /* Mark the queue as inactive */
1137                         adapter->active_queues &= ~((u64)1 << que->me);
1138                         continue;
1139                 } else {
1140                         /* Check if we've come back from hung */
1141                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1142                                 adapter->active_queues |= ((u64)1 << que->me);
1143                 }
1144                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1145                         device_printf(dev,"Warning queue %d "
1146                             "appears to be hung!\n", i);
1147                         que->txr->busy = IXGBE_QUEUE_HUNG;
1148                         ++hung;
1149                 }
1150
1151         }
1152
1153         /* Only truely watchdog if all queues show hung */
1154         if (hung == adapter->num_queues)
1155                 goto watchdog;
1156         else if (queues != 0) { /* Force an IRQ on queues with work */
1157                 ixv_rearm_queues(adapter, queues);
1158         }
1159
1160         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1161         return;
1162
1163 watchdog:
1164         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1165         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1166         adapter->watchdog_events++;
1167         ixv_init_locked(adapter);
1168 }
1169
1170 /*
1171 ** Note: this routine updates the OS on the link state
1172 **      the real check of the hardware only happens with
1173 **      a link interrupt.
1174 */
1175 static void
1176 ixv_update_link_status(struct adapter *adapter)
1177 {
1178         struct ifnet    *ifp = adapter->ifp;
1179         device_t dev = adapter->dev;
1180
1181         if (adapter->link_up){ 
1182                 if (adapter->link_active == FALSE) {
1183                         if (bootverbose)
1184                                 device_printf(dev,"Link is up %d Gbps %s \n",
1185                                     ((adapter->link_speed == 128)? 10:1),
1186                                     "Full Duplex");
1187                         adapter->link_active = TRUE;
1188                         if_link_state_change(ifp, LINK_STATE_UP);
1189                 }
1190         } else { /* Link down */
1191                 if (adapter->link_active == TRUE) {
1192                         if (bootverbose)
1193                                 device_printf(dev,"Link is Down\n");
1194                         if_link_state_change(ifp, LINK_STATE_DOWN);
1195                         adapter->link_active = FALSE;
1196                 }
1197         }
1198
1199         return;
1200 }
1201
1202
1203 /*********************************************************************
1204  *
1205  *  This routine disables all traffic on the adapter by issuing a
1206  *  global reset on the MAC and deallocates TX/RX buffers.
1207  *
1208  **********************************************************************/
1209
1210 static void
1211 ixv_stop(void *arg)
1212 {
1213         struct ifnet   *ifp;
1214         struct adapter *adapter = arg;
1215         struct ixgbe_hw *hw = &adapter->hw;
1216         ifp = adapter->ifp;
1217
1218         mtx_assert(&adapter->core_mtx, MA_OWNED);
1219
1220         INIT_DEBUGOUT("ixv_stop: begin\n");
1221         ixv_disable_intr(adapter);
1222
1223         /* Tell the stack that the interface is no longer active */
1224         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1225
1226         ixgbe_reset_hw(hw);
1227         adapter->hw.adapter_stopped = FALSE;
1228         ixgbe_stop_adapter(hw);
1229         callout_stop(&adapter->timer);
1230
1231         /* reprogram the RAR[0] in case user changed it. */
1232         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1233
1234         return;
1235 }
1236
1237
1238 /*********************************************************************
1239  *
1240  *  Determine hardware revision.
1241  *
1242  **********************************************************************/
1243 static void
1244 ixv_identify_hardware(struct adapter *adapter)
1245 {
1246         device_t        dev = adapter->dev;
1247         struct ixgbe_hw *hw = &adapter->hw;
1248
1249         /*
1250         ** Make sure BUSMASTER is set, on a VM under
1251         ** KVM it may not be and will break things.
1252         */
1253         pci_enable_busmaster(dev);
1254
1255         /* Save off the information about this board */
1256         hw->vendor_id = pci_get_vendor(dev);
1257         hw->device_id = pci_get_device(dev);
1258         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1259         hw->subsystem_vendor_id =
1260             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1261         hw->subsystem_device_id =
1262             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1263
1264         /* We need this to determine device-specific things */
1265         ixgbe_set_mac_type(hw);
1266
1267         /* Set the right number of segments */
1268         adapter->num_segs = IXGBE_82599_SCATTER;
1269
1270         return;
1271 }
1272
1273 /*********************************************************************
1274  *
1275  *  Setup MSIX Interrupt resources and handlers 
1276  *
1277  **********************************************************************/
1278 static int
1279 ixv_allocate_msix(struct adapter *adapter)
1280 {
1281         device_t        dev = adapter->dev;
1282         struct          ix_queue *que = adapter->queues;
1283         struct          tx_ring *txr = adapter->tx_rings;
1284         int             error, rid, vector = 0;
1285
1286         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1287                 rid = vector + 1;
1288                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1289                     RF_SHAREABLE | RF_ACTIVE);
1290                 if (que->res == NULL) {
1291                         device_printf(dev,"Unable to allocate"
1292                             " bus resource: que interrupt [%d]\n", vector);
1293                         return (ENXIO);
1294                 }
1295                 /* Set the handler function */
1296                 error = bus_setup_intr(dev, que->res,
1297                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1298                     ixv_msix_que, que, &que->tag);
1299                 if (error) {
1300                         que->res = NULL;
1301                         device_printf(dev, "Failed to register QUE handler");
1302                         return (error);
1303                 }
1304 #if __FreeBSD_version >= 800504
1305                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1306 #endif
1307                 que->msix = vector;
1308                 adapter->active_queues |= (u64)(1 << que->msix);
1309                 /*
1310                 ** Bind the msix vector, and thus the
1311                 ** ring to the corresponding cpu.
1312                 */
1313                 if (adapter->num_queues > 1)
1314                         bus_bind_intr(dev, que->res, i);
1315                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1316                 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1317                 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1318                     taskqueue_thread_enqueue, &que->tq);
1319                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1320                     device_get_nameunit(adapter->dev));
1321         }
1322
1323         /* and Mailbox */
1324         rid = vector + 1;
1325         adapter->res = bus_alloc_resource_any(dev,
1326             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1327         if (!adapter->res) {
1328                 device_printf(dev,"Unable to allocate"
1329             " bus resource: MBX interrupt [%d]\n", rid);
1330                 return (ENXIO);
1331         }
1332         /* Set the mbx handler function */
1333         error = bus_setup_intr(dev, adapter->res,
1334             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1335             ixv_msix_mbx, adapter, &adapter->tag);
1336         if (error) {
1337                 adapter->res = NULL;
1338                 device_printf(dev, "Failed to register LINK handler");
1339                 return (error);
1340         }
1341 #if __FreeBSD_version >= 800504
1342         bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1343 #endif
1344         adapter->vector = vector;
1345         /* Tasklets for Mailbox */
1346         TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1347         adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1348             taskqueue_thread_enqueue, &adapter->tq);
1349         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1350             device_get_nameunit(adapter->dev));
1351         /*
1352         ** Due to a broken design QEMU will fail to properly
1353         ** enable the guest for MSIX unless the vectors in
1354         ** the table are all set up, so we must rewrite the
1355         ** ENABLE in the MSIX control register again at this
1356         ** point to cause it to successfully initialize us.
1357         */
1358         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1359                 int msix_ctrl;
1360                 pci_find_cap(dev, PCIY_MSIX, &rid);
1361                 rid += PCIR_MSIX_CTRL;
1362                 msix_ctrl = pci_read_config(dev, rid, 2);
1363                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1364                 pci_write_config(dev, rid, msix_ctrl, 2);
1365         }
1366
1367         return (0);
1368 }
1369
1370 /*
1371  * Setup MSIX resources, note that the VF
1372  * device MUST use MSIX, there is no fallback.
1373  */
1374 static int
1375 ixv_setup_msix(struct adapter *adapter)
1376 {
1377         device_t dev = adapter->dev;
1378         int rid, want, msgs;
1379
1380
1381         /* Must have at least 2 MSIX vectors */
1382         msgs = pci_msix_count(dev);
1383         if (msgs < 2)
1384                 goto out;
1385         rid = PCIR_BAR(3);
1386         adapter->msix_mem = bus_alloc_resource_any(dev,
1387             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1388         if (adapter->msix_mem == NULL) {
1389                 device_printf(adapter->dev,
1390                     "Unable to map MSIX table \n");
1391                 goto out;
1392         }
1393
1394         /*
1395         ** Want vectors for the queues,
1396         ** plus an additional for mailbox.
1397         */
1398         want = adapter->num_queues + 1;
1399         if (want > msgs) {
1400                 want = msgs;
1401                 adapter->num_queues = msgs - 1;
1402         } else
1403                 msgs = want;
1404         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1405                 device_printf(adapter->dev,
1406                     "Using MSIX interrupts with %d vectors\n", want);
1407                 return (want);
1408         }
1409         /* Release in case alloc was insufficient */
1410         pci_release_msi(dev);
1411 out:
1412         if (adapter->msix_mem != NULL) {
1413                 bus_release_resource(dev, SYS_RES_MEMORY,
1414                     rid, adapter->msix_mem);
1415                 adapter->msix_mem = NULL;
1416         }
1417         device_printf(adapter->dev,"MSIX config error\n");
1418         return (ENXIO);
1419 }
1420
1421
1422 static int
1423 ixv_allocate_pci_resources(struct adapter *adapter)
1424 {
1425         int             rid;
1426         device_t        dev = adapter->dev;
1427
1428         rid = PCIR_BAR(0);
1429         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1430             &rid, RF_ACTIVE);
1431
1432         if (!(adapter->pci_mem)) {
1433                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1434                 return (ENXIO);
1435         }
1436
1437         adapter->osdep.mem_bus_space_tag =
1438                 rman_get_bustag(adapter->pci_mem);
1439         adapter->osdep.mem_bus_space_handle =
1440                 rman_get_bushandle(adapter->pci_mem);
1441         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1442
1443         /* Pick up the tuneable queues */
1444         adapter->num_queues = ixv_num_queues;
1445         adapter->hw.back = adapter;
1446
1447         /*
1448         ** Now setup MSI/X, should
1449         ** return us the number of
1450         ** configured vectors.
1451         */
1452         adapter->msix = ixv_setup_msix(adapter);
1453         if (adapter->msix == ENXIO)
1454                 return (ENXIO);
1455         else
1456                 return (0);
1457 }
1458
1459 static void
1460 ixv_free_pci_resources(struct adapter * adapter)
1461 {
1462         struct          ix_queue *que = adapter->queues;
1463         device_t        dev = adapter->dev;
1464         int             rid, memrid;
1465
1466         memrid = PCIR_BAR(MSIX_82598_BAR);
1467
1468         /*
1469         ** There is a slight possibility of a failure mode
1470         ** in attach that will result in entering this function
1471         ** before interrupt resources have been initialized, and
1472         ** in that case we do not want to execute the loops below
1473         ** We can detect this reliably by the state of the adapter
1474         ** res pointer.
1475         */
1476         if (adapter->res == NULL)
1477                 goto mem;
1478
1479         /*
1480         **  Release all msix queue resources:
1481         */
1482         for (int i = 0; i < adapter->num_queues; i++, que++) {
1483                 rid = que->msix + 1;
1484                 if (que->tag != NULL) {
1485                         bus_teardown_intr(dev, que->res, que->tag);
1486                         que->tag = NULL;
1487                 }
1488                 if (que->res != NULL)
1489                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1490         }
1491
1492
1493         /* Clean the Legacy or Link interrupt last */
1494         if (adapter->vector) /* we are doing MSIX */
1495                 rid = adapter->vector + 1;
1496         else
1497                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1498
1499         if (adapter->tag != NULL) {
1500                 bus_teardown_intr(dev, adapter->res, adapter->tag);
1501                 adapter->tag = NULL;
1502         }
1503         if (adapter->res != NULL)
1504                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1505
1506 mem:
1507         if (adapter->msix)
1508                 pci_release_msi(dev);
1509
1510         if (adapter->msix_mem != NULL)
1511                 bus_release_resource(dev, SYS_RES_MEMORY,
1512                     memrid, adapter->msix_mem);
1513
1514         if (adapter->pci_mem != NULL)
1515                 bus_release_resource(dev, SYS_RES_MEMORY,
1516                     PCIR_BAR(0), adapter->pci_mem);
1517
1518         return;
1519 }
1520
1521 /*********************************************************************
1522  *
1523  *  Setup networking device structure and register an interface.
1524  *
1525  **********************************************************************/
1526 static void
1527 ixv_setup_interface(device_t dev, struct adapter *adapter)
1528 {
1529         struct ifnet   *ifp;
1530
1531         INIT_DEBUGOUT("ixv_setup_interface: begin");
1532
1533         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1534         if (ifp == NULL)
1535                 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1536         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1537         ifp->if_baudrate = 1000000000;
1538         ifp->if_init = ixv_init;
1539         ifp->if_softc = adapter;
1540         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1541         ifp->if_ioctl = ixv_ioctl;
1542 #if __FreeBSD_version >= 800000
1543         ifp->if_transmit = ixgbe_mq_start;
1544         ifp->if_qflush = ixgbe_qflush;
1545 #else
1546         ifp->if_start = ixgbe_start;
1547 #endif
1548         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1549
1550         ether_ifattach(ifp, adapter->hw.mac.addr);
1551
1552         adapter->max_frame_size =
1553             ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1554
1555         /*
1556          * Tell the upper layer(s) we support long frames.
1557          */
1558         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1559
1560         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1561         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1562         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1563                              |  IFCAP_VLAN_HWTSO
1564                              |  IFCAP_VLAN_MTU;
1565         ifp->if_capabilities |= IFCAP_LRO;
1566         ifp->if_capenable = ifp->if_capabilities;
1567
1568         /*
1569          * Specify the media types supported by this adapter and register
1570          * callbacks to update media and link information
1571          */
1572         ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1573                      ixv_media_status);
1574         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1575         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1576
1577         return;
1578 }
1579         
1580 static void
1581 ixv_config_link(struct adapter *adapter)
1582 {
1583         struct ixgbe_hw *hw = &adapter->hw;
1584         u32     autoneg;
1585
1586         if (hw->mac.ops.check_link)
1587                 hw->mac.ops.check_link(hw, &autoneg,
1588                     &adapter->link_up, FALSE);
1589 }
1590
1591
1592 /*********************************************************************
1593  *
1594  *  Enable transmit unit.
1595  *
1596  **********************************************************************/
1597 static void
1598 ixv_initialize_transmit_units(struct adapter *adapter)
1599 {
1600         struct tx_ring  *txr = adapter->tx_rings;
1601         struct ixgbe_hw *hw = &adapter->hw;
1602
1603
1604         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1605                 u64     tdba = txr->txdma.dma_paddr;
1606                 u32     txctrl, txdctl;
1607
1608                 /* Set WTHRESH to 8, burst writeback */
1609                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1610                 txdctl |= (8 << 16);
1611                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1612
1613                 /* Set the HW Tx Head and Tail indices */
1614                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1615                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1616
1617                 /* Set Tx Tail register */
1618                 txr->tail = IXGBE_VFTDT(i);
1619
1620                 /* Set Ring parameters */
1621                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1622                        (tdba & 0x00000000ffffffffULL));
1623                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1624                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1625                     adapter->num_tx_desc *
1626                     sizeof(struct ixgbe_legacy_tx_desc));
1627                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1628                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1629                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1630
1631                 /* Now enable */
1632                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1633                 txdctl |= IXGBE_TXDCTL_ENABLE;
1634                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1635         }
1636
1637         return;
1638 }
1639
1640
1641 /*********************************************************************
1642  *
1643  *  Setup receive registers and features.
1644  *
1645  **********************************************************************/
1646 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1647
1648 static void
1649 ixv_initialize_receive_units(struct adapter *adapter)
1650 {
1651         struct  rx_ring *rxr = adapter->rx_rings;
1652         struct ixgbe_hw *hw = &adapter->hw;
1653         struct ifnet    *ifp = adapter->ifp;
1654         u32             bufsz, rxcsum, psrtype;
1655
1656         if (ifp->if_mtu > ETHERMTU)
1657                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1658         else
1659                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1660
1661         psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1662             IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1663             IXGBE_PSRTYPE_L2HDR;
1664
1665         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1666
1667         /* Tell PF our max_frame size */
1668         ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1669
1670         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1671                 u64 rdba = rxr->rxdma.dma_paddr;
1672                 u32 reg, rxdctl;
1673
1674                 /* Disable the queue */
1675                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1676                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1677                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1678                 for (int j = 0; j < 10; j++) {
1679                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1680                             IXGBE_RXDCTL_ENABLE)
1681                                 msec_delay(1);
1682                         else
1683                                 break;
1684                 }
1685                 wmb();
1686                 /* Setup the Base and Length of the Rx Descriptor Ring */
1687                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1688                     (rdba & 0x00000000ffffffffULL));
1689                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1690                     (rdba >> 32));
1691                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1692                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1693
1694                 /* Reset the ring indices */
1695                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1696                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1697
1698                 /* Set up the SRRCTL register */
1699                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1700                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1701                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1702                 reg |= bufsz;
1703                 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1704                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1705
1706                 /* Capture  Rx Tail register */
1707                 rxr->tail = IXGBE_VFRDT(rxr->me);
1708
1709                 /* Do the queue enabling last */
1710                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1711                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1712                 for (int k = 0; k < 10; k++) {
1713                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1714                             IXGBE_RXDCTL_ENABLE)
1715                                 break;
1716                         else
1717                                 msec_delay(1);
1718                 }
1719                 wmb();
1720
1721                 /* Set the Tail Pointer */
1722 #ifdef DEV_NETMAP
1723                 /*
1724                  * In netmap mode, we must preserve the buffers made
1725                  * available to userspace before the if_init()
1726                  * (this is true by default on the TX side, because
1727                  * init makes all buffers available to userspace).
1728                  *
1729                  * netmap_reset() and the device specific routines
1730                  * (e.g. ixgbe_setup_receive_rings()) map these
1731                  * buffers at the end of the NIC ring, so here we
1732                  * must set the RDT (tail) register to make sure
1733                  * they are not overwritten.
1734                  *
1735                  * In this driver the NIC ring starts at RDH = 0,
1736                  * RDT points to the last slot available for reception (?),
1737                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1738                  */
1739                 if (ifp->if_capenable & IFCAP_NETMAP) {
1740                         struct netmap_adapter *na = NA(adapter->ifp);
1741                         struct netmap_kring *kring = &na->rx_rings[i];
1742                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1743
1744                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1745                 } else
1746 #endif /* DEV_NETMAP */
1747                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1748                             adapter->num_rx_desc - 1);
1749         }
1750
1751         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1752
1753         if (ifp->if_capenable & IFCAP_RXCSUM)
1754                 rxcsum |= IXGBE_RXCSUM_PCSD;
1755
1756         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1757                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1758
1759         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1760
1761         return;
1762 }
1763
1764 static void
1765 ixv_setup_vlan_support(struct adapter *adapter)
1766 {
1767         struct ixgbe_hw *hw = &adapter->hw;
1768         u32             ctrl, vid, vfta, retry;
1769         struct rx_ring  *rxr;
1770
1771         /*
1772         ** We get here thru init_locked, meaning
1773         ** a soft reset, this has already cleared
1774         ** the VFTA and other state, so if there
1775         ** have been no vlan's registered do nothing.
1776         */
1777         if (adapter->num_vlans == 0)
1778                 return;
1779
1780         /* Enable the queues */
1781         for (int i = 0; i < adapter->num_queues; i++) {
1782                 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1783                 ctrl |= IXGBE_RXDCTL_VME;
1784                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1785                 /*
1786                  * Let Rx path know that it needs to store VLAN tag
1787                  * as part of extra mbuf info.
1788                  */
1789                 rxr = &adapter->rx_rings[i];
1790                 rxr->vtag_strip = TRUE;
1791         }
1792
1793         /*
1794         ** A soft reset zero's out the VFTA, so
1795         ** we need to repopulate it now.
1796         */
1797         for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1798                 if (ixv_shadow_vfta[i] == 0)
1799                         continue;
1800                 vfta = ixv_shadow_vfta[i];
1801                 /*
1802                 ** Reconstruct the vlan id's
1803                 ** based on the bits set in each
1804                 ** of the array ints.
1805                 */
1806                 for (int j = 0; j < 32; j++) {
1807                         retry = 0;
1808                         if ((vfta & (1 << j)) == 0)
1809                                 continue;
1810                         vid = (i * 32) + j;
1811                         /* Call the shared code mailbox routine */
1812                         while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1813                                 if (++retry > 5)
1814                                         break;
1815                         }
1816                 }
1817         }
1818 }
1819
1820 /*
1821 ** This routine is run via an vlan config EVENT,
1822 ** it enables us to use the HW Filter table since
1823 ** we can get the vlan id. This just creates the
1824 ** entry in the soft version of the VFTA, init will
1825 ** repopulate the real table.
1826 */
1827 static void
1828 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1829 {
1830         struct adapter  *adapter = ifp->if_softc;
1831         u16             index, bit;
1832
1833         if (ifp->if_softc != arg) /* Not our event */
1834                 return;
1835
1836         if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1837                 return;
1838
1839         IXGBE_CORE_LOCK(adapter);
1840         index = (vtag >> 5) & 0x7F;
1841         bit = vtag & 0x1F;
1842         ixv_shadow_vfta[index] |= (1 << bit);
1843         ++adapter->num_vlans;
1844         /* Re-init to load the changes */
1845         ixv_init_locked(adapter);
1846         IXGBE_CORE_UNLOCK(adapter);
1847 }
1848
1849 /*
1850 ** This routine is run via an vlan
1851 ** unconfig EVENT, remove our entry
1852 ** in the soft vfta.
1853 */
1854 static void
1855 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1856 {
1857         struct adapter  *adapter = ifp->if_softc;
1858         u16             index, bit;
1859
1860         if (ifp->if_softc !=  arg)
1861                 return;
1862
1863         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1864                 return;
1865
1866         IXGBE_CORE_LOCK(adapter);
1867         index = (vtag >> 5) & 0x7F;
1868         bit = vtag & 0x1F;
1869         ixv_shadow_vfta[index] &= ~(1 << bit);
1870         --adapter->num_vlans;
1871         /* Re-init to load the changes */
1872         ixv_init_locked(adapter);
1873         IXGBE_CORE_UNLOCK(adapter);
1874 }
1875
1876 static void
1877 ixv_enable_intr(struct adapter *adapter)
1878 {
1879         struct ixgbe_hw *hw = &adapter->hw;
1880         struct ix_queue *que = adapter->queues;
1881         u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1882
1883
1884         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1885
1886         mask = IXGBE_EIMS_ENABLE_MASK;
1887         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1888         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1889
1890         for (int i = 0; i < adapter->num_queues; i++, que++)
1891                 ixv_enable_queue(adapter, que->msix);
1892
1893         IXGBE_WRITE_FLUSH(hw);
1894
1895         return;
1896 }
1897
1898 static void
1899 ixv_disable_intr(struct adapter *adapter)
1900 {
1901         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1902         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1903         IXGBE_WRITE_FLUSH(&adapter->hw);
1904         return;
1905 }
1906
1907 /*
1908 ** Setup the correct IVAR register for a particular MSIX interrupt
1909 **  - entry is the register array entry
1910 **  - vector is the MSIX vector for this queue
1911 **  - type is RX/TX/MISC
1912 */
1913 static void
1914 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1915 {
1916         struct ixgbe_hw *hw = &adapter->hw;
1917         u32 ivar, index;
1918
1919         vector |= IXGBE_IVAR_ALLOC_VAL;
1920
1921         if (type == -1) { /* MISC IVAR */
1922                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1923                 ivar &= ~0xFF;
1924                 ivar |= vector;
1925                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1926         } else {        /* RX/TX IVARS */
1927                 index = (16 * (entry & 1)) + (8 * type);
1928                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1929                 ivar &= ~(0xFF << index);
1930                 ivar |= (vector << index);
1931                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1932         }
1933 }
1934
1935 static void
1936 ixv_configure_ivars(struct adapter *adapter)
1937 {
1938         struct  ix_queue *que = adapter->queues;
1939
1940         for (int i = 0; i < adapter->num_queues; i++, que++) {
1941                 /* First the RX queue entry */
1942                 ixv_set_ivar(adapter, i, que->msix, 0);
1943                 /* ... and the TX */
1944                 ixv_set_ivar(adapter, i, que->msix, 1);
1945                 /* Set an initial value in EITR */
1946                 IXGBE_WRITE_REG(&adapter->hw,
1947                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1948         }
1949
1950         /* For the mailbox interrupt */
1951         ixv_set_ivar(adapter, 1, adapter->vector, -1);
1952 }
1953
1954
1955 /*
1956 ** Tasklet handler for MSIX MBX interrupts
1957 **  - do outside interrupt since it might sleep
1958 */
1959 static void
1960 ixv_handle_mbx(void *context, int pending)
1961 {
1962         struct adapter  *adapter = context;
1963
1964         ixgbe_check_link(&adapter->hw,
1965             &adapter->link_speed, &adapter->link_up, 0);
1966         ixv_update_link_status(adapter);
1967 }
1968
1969 /*
1970 ** The VF stats registers never have a truely virgin
1971 ** starting point, so this routine tries to make an
1972 ** artificial one, marking ground zero on attach as
1973 ** it were.
1974 */
1975 static void
1976 ixv_save_stats(struct adapter *adapter)
1977 {
1978         if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1979                 adapter->stats.vf.saved_reset_vfgprc +=
1980                     adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1981                 adapter->stats.vf.saved_reset_vfgptc +=
1982                     adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1983                 adapter->stats.vf.saved_reset_vfgorc +=
1984                     adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1985                 adapter->stats.vf.saved_reset_vfgotc +=
1986                     adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1987                 adapter->stats.vf.saved_reset_vfmprc +=
1988                     adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1989         }
1990 }
1991  
1992 static void
1993 ixv_init_stats(struct adapter *adapter)
1994 {
1995         struct ixgbe_hw *hw = &adapter->hw;
1996  
1997         adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1998         adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1999         adapter->stats.vf.last_vfgorc |=
2000             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2001
2002         adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2003         adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2004         adapter->stats.vf.last_vfgotc |=
2005             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2006
2007         adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2008
2009         adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2010         adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2011         adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2012         adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2013         adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2014 }
2015
2016 #define UPDATE_STAT_32(reg, last, count)                \
2017 {                                                       \
2018         u32 current = IXGBE_READ_REG(hw, reg);          \
2019         if (current < last)                             \
2020                 count += 0x100000000LL;                 \
2021         last = current;                                 \
2022         count &= 0xFFFFFFFF00000000LL;                  \
2023         count |= current;                               \
2024 }
2025
2026 #define UPDATE_STAT_36(lsb, msb, last, count)           \
2027 {                                                       \
2028         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
2029         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
2030         u64 current = ((cur_msb << 32) | cur_lsb);      \
2031         if (current < last)                             \
2032                 count += 0x1000000000LL;                \
2033         last = current;                                 \
2034         count &= 0xFFFFFFF000000000LL;                  \
2035         count |= current;                               \
2036 }
2037
2038 /*
2039 ** ixv_update_stats - Update the board statistics counters.
2040 */
2041 void
2042 ixv_update_stats(struct adapter *adapter)
2043 {
2044         struct ixgbe_hw *hw = &adapter->hw;
2045
2046         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2047             adapter->stats.vf.vfgprc);
2048         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2049             adapter->stats.vf.vfgptc);
2050         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2051             adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2052         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2053             adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2054         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2055             adapter->stats.vf.vfmprc);
2056 }
2057
2058 /*
2059  * Add statistic sysctls for the VF.
2060  */
2061 static void
2062 ixv_add_stats_sysctls(struct adapter *adapter)
2063 {
2064         device_t dev = adapter->dev;
2065         struct ix_queue *que = &adapter->queues[0];
2066         struct tx_ring *txr = que->txr;
2067         struct rx_ring *rxr = que->rxr;
2068
2069         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2070         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2071         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2072         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2073
2074         struct sysctl_oid *stat_node, *queue_node;
2075         struct sysctl_oid_list *stat_list, *queue_list;
2076
2077         /* Driver Statistics */
2078         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2079                         CTLFLAG_RD, &adapter->dropped_pkts,
2080                         "Driver dropped packets");
2081         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2082                         CTLFLAG_RD, &adapter->mbuf_defrag_failed,
2083                         "m_defrag() failed");
2084         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2085                         CTLFLAG_RD, &adapter->watchdog_events,
2086                         "Watchdog timeouts");
2087
2088         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2089                                     CTLFLAG_RD, NULL,
2090                                     "VF Statistics (read from HW registers)");
2091         stat_list = SYSCTL_CHILDREN(stat_node);
2092
2093         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2094                         CTLFLAG_RD, &stats->vfgprc,
2095                         "Good Packets Received");
2096         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2097                         CTLFLAG_RD, &stats->vfgorc, 
2098                         "Good Octets Received"); 
2099         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2100                         CTLFLAG_RD, &stats->vfmprc,
2101                         "Multicast Packets Received");
2102         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2103                         CTLFLAG_RD, &stats->vfgptc,
2104                         "Good Packets Transmitted");
2105         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2106                         CTLFLAG_RD, &stats->vfgotc, 
2107                         "Good Octets Transmitted"); 
2108
2109         queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2110                                     CTLFLAG_RD, NULL,
2111                                     "Queue Statistics (collected by SW)");
2112         queue_list = SYSCTL_CHILDREN(queue_node);
2113
2114         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2115                         CTLFLAG_RD, &(que->irqs),
2116                         "IRQs on queue");
2117         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2118                         CTLFLAG_RD, &(rxr->rx_irq),
2119                         "RX irqs on queue");
2120         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2121                         CTLFLAG_RD, &(rxr->rx_packets),
2122                         "RX packets");
2123         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2124                         CTLFLAG_RD, &(rxr->rx_bytes),
2125                         "RX bytes");
2126         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2127                         CTLFLAG_RD, &(rxr->rx_discarded),
2128                         "Discarded RX packets");
2129
2130         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2131                         CTLFLAG_RD, &(txr->total_packets),
2132                         "TX Packets");
2133         SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
2134                         CTLFLAG_RD, &(txr->bytes), 0,
2135                         "TX Bytes");
2136         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2137                         CTLFLAG_RD, &(txr->no_desc_avail),
2138                         "# of times not enough descriptors were available during TX");
2139 }
2140
2141 static void
2142 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2143         const char *description, int *limit, int value)
2144 {
2145         *limit = value;
2146         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2147             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2148             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2149 }
2150
2151 /**********************************************************************
2152  *
2153  *  This routine is called only when em_display_debug_stats is enabled.
2154  *  This routine provides a way to take a look at important statistics
2155  *  maintained by the driver and hardware.
2156  *
2157  **********************************************************************/
2158 static void
2159 ixv_print_debug_info(struct adapter *adapter)
2160 {
2161         device_t dev = adapter->dev;
2162         struct ixgbe_hw         *hw = &adapter->hw;
2163         struct ix_queue         *que = adapter->queues;
2164         struct rx_ring          *rxr;
2165         struct tx_ring          *txr;
2166         struct lro_ctrl         *lro;
2167
2168         device_printf(dev,"Error Byte Count = %u \n",
2169             IXGBE_READ_REG(hw, IXGBE_ERRBC));
2170
2171         for (int i = 0; i < adapter->num_queues; i++, que++) {
2172                 txr = que->txr;
2173                 rxr = que->rxr;
2174                 lro = &rxr->lro;
2175                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2176                     que->msix, (long)que->irqs);
2177                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2178                     rxr->me, (long long)rxr->rx_packets);
2179                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2180                     rxr->me, (long)rxr->rx_bytes);
2181                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2182                     rxr->me, lro->lro_queued);
2183                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2184                     rxr->me, lro->lro_flushed);
2185                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2186                     txr->me, (long)txr->total_packets);
2187                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2188                     txr->me, (long)txr->no_desc_avail);
2189         }
2190
2191         device_printf(dev,"MBX IRQ Handled: %lu\n",
2192             (long)adapter->link_irq);
2193         return;
2194 }
2195
2196 static int
2197 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2198 {
2199         int error, result;
2200         struct adapter *adapter;
2201
2202         result = -1;
2203         error = sysctl_handle_int(oidp, &result, 0, req);
2204
2205         if (error || !req->newptr)
2206                 return (error);
2207
2208         if (result == 1) {
2209                 adapter = (struct adapter *) arg1;
2210                 ixv_print_debug_info(adapter);
2211         }
2212         return error;
2213 }
2214