]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixgbe/if_ixv.c
MFC 308664,308742,308743
[FreeBSD/stable/10.git] / sys / dev / ixgbe / if_ixv.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /*********************************************************************
44  *  Driver version
45  *********************************************************************/
46 char ixv_driver_version[] = "1.4.6-k";
47
48 /*********************************************************************
49  *  PCI Device ID Table
50  *
51  *  Used by probe to select devices to load on
52  *  Last field stores an index into ixv_strings
53  *  Last entry must be all 0s
54  *
55  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56  *********************************************************************/
57
58 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
59 {
60         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64         /* required last entry */
65         {0, 0, 0, 0, 0}
66 };
67
68 /*********************************************************************
69  *  Table of branding strings
70  *********************************************************************/
71
72 static char    *ixv_strings[] = {
73         "Intel(R) PRO/10GbE Virtual Function Network Driver"
74 };
75
76 /*********************************************************************
77  *  Function prototypes
78  *********************************************************************/
79 static int      ixv_probe(device_t);
80 static int      ixv_attach(device_t);
81 static int      ixv_detach(device_t);
82 static int      ixv_shutdown(device_t);
83 static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
84 static void     ixv_init(void *);
85 static void     ixv_init_locked(struct adapter *);
86 static void     ixv_stop(void *);
87 static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
88 static int      ixv_media_change(struct ifnet *);
89 static void     ixv_identify_hardware(struct adapter *);
90 static int      ixv_allocate_pci_resources(struct adapter *);
91 static int      ixv_allocate_msix(struct adapter *);
92 static int      ixv_setup_msix(struct adapter *);
93 static void     ixv_free_pci_resources(struct adapter *);
94 static void     ixv_local_timer(void *);
95 static void     ixv_setup_interface(device_t, struct adapter *);
96 static void     ixv_config_link(struct adapter *);
97
98 static void     ixv_initialize_transmit_units(struct adapter *);
99 static void     ixv_initialize_receive_units(struct adapter *);
100
101 static void     ixv_enable_intr(struct adapter *);
102 static void     ixv_disable_intr(struct adapter *);
103 static void     ixv_set_multi(struct adapter *);
104 static void     ixv_update_link_status(struct adapter *);
105 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
107 static void     ixv_configure_ivars(struct adapter *);
108 static u8 *     ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
109
110 static void     ixv_setup_vlan_support(struct adapter *);
111 static void     ixv_register_vlan(void *, struct ifnet *, u16);
112 static void     ixv_unregister_vlan(void *, struct ifnet *, u16);
113
114 static void     ixv_save_stats(struct adapter *);
115 static void     ixv_init_stats(struct adapter *);
116 static void     ixv_update_stats(struct adapter *);
117 static void     ixv_add_stats_sysctls(struct adapter *);
118 static void     ixv_set_sysctl_value(struct adapter *, const char *,
119                     const char *, int *, int);
120
121 /* The MSI/X Interrupt handlers */
122 static void     ixv_msix_que(void *);
123 static void     ixv_msix_mbx(void *);
124
125 /* Deferred interrupt tasklets */
126 static void     ixv_handle_que(void *, int);
127 static void     ixv_handle_mbx(void *, int);
128
129 #ifdef DEV_NETMAP
130 /*
131  * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
132  * if_ix.c.
133  */
134 extern void ixgbe_netmap_attach(struct adapter *adapter);
135
136 #include <net/netmap.h>
137 #include <sys/selinfo.h>
138 #include <dev/netmap/netmap_kern.h>
139 #endif /* DEV_NETMAP */
140
141 /*********************************************************************
142  *  FreeBSD Device Interface Entry Points
143  *********************************************************************/
144
145 static device_method_t ixv_methods[] = {
146         /* Device interface */
147         DEVMETHOD(device_probe, ixv_probe),
148         DEVMETHOD(device_attach, ixv_attach),
149         DEVMETHOD(device_detach, ixv_detach),
150         DEVMETHOD(device_shutdown, ixv_shutdown),
151         DEVMETHOD_END
152 };
153
154 static driver_t ixv_driver = {
155         "ixv", ixv_methods, sizeof(struct adapter),
156 };
157
158 devclass_t ixv_devclass;
159 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
160 MODULE_DEPEND(ixv, pci, 1, 1, 1);
161 MODULE_DEPEND(ixv, ether, 1, 1, 1);
162 #ifdef DEV_NETMAP
163 MODULE_DEPEND(ix, netmap, 1, 1, 1);
164 #endif /* DEV_NETMAP */
165 /* XXX depend on 'ix' ? */
166
167 /*
168 ** TUNEABLE PARAMETERS:
169 */
170
171 /* Number of Queues - do not exceed MSIX vectors - 1 */
172 static int ixv_num_queues = 1;
173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
174
175 /*
176 ** AIM: Adaptive Interrupt Moderation
177 ** which means that the interrupt rate
178 ** is varied over time based on the
179 ** traffic for that interrupt vector
180 */
181 static int ixv_enable_aim = FALSE;
182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
183
184 /* How many packets rxeof tries to clean at a time */
185 static int ixv_rx_process_limit = 256;
186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
187
188 /* How many packets txeof tries to clean at a time */
189 static int ixv_tx_process_limit = 256;
190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
191
192 /* Flow control setting, default to full */
193 static int ixv_flow_control = ixgbe_fc_full;
194 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
195
196 /*
197  * Header split: this causes the hardware to DMA
198  * the header into a seperate mbuf from the payload,
199  * it can be a performance win in some workloads, but
200  * in others it actually hurts, its off by default.
201  */
202 static int ixv_header_split = FALSE;
203 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
204
205 /*
206 ** Number of TX descriptors per ring,
207 ** setting higher than RX as this seems
208 ** the better performing choice.
209 */
210 static int ixv_txd = DEFAULT_TXD;
211 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
212
213 /* Number of RX descriptors per ring */
214 static int ixv_rxd = DEFAULT_RXD;
215 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
216
217 /*
218 ** Shadow VFTA table, this is needed because
219 ** the real filter table gets cleared during
220 ** a soft reset and we need to repopulate it.
221 */
222 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
223
224 /*********************************************************************
225  *  Device identification routine
226  *
227  *  ixv_probe determines if the driver should be loaded on
228  *  adapter based on PCI vendor/device id of the adapter.
229  *
230  *  return BUS_PROBE_DEFAULT on success, positive on failure
231  *********************************************************************/
232
233 static int
234 ixv_probe(device_t dev)
235 {
236         ixgbe_vendor_info_t *ent;
237
238         u16     pci_vendor_id = 0;
239         u16     pci_device_id = 0;
240         u16     pci_subvendor_id = 0;
241         u16     pci_subdevice_id = 0;
242         char    adapter_name[256];
243
244
245         pci_vendor_id = pci_get_vendor(dev);
246         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
247                 return (ENXIO);
248
249         pci_device_id = pci_get_device(dev);
250         pci_subvendor_id = pci_get_subvendor(dev);
251         pci_subdevice_id = pci_get_subdevice(dev);
252
253         ent = ixv_vendor_info_array;
254         while (ent->vendor_id != 0) {
255                 if ((pci_vendor_id == ent->vendor_id) &&
256                     (pci_device_id == ent->device_id) &&
257
258                     ((pci_subvendor_id == ent->subvendor_id) ||
259                      (ent->subvendor_id == 0)) &&
260
261                     ((pci_subdevice_id == ent->subdevice_id) ||
262                      (ent->subdevice_id == 0))) {
263                         sprintf(adapter_name, "%s, Version - %s",
264                                 ixv_strings[ent->index],
265                                 ixv_driver_version);
266                         device_set_desc_copy(dev, adapter_name);
267                         return (BUS_PROBE_DEFAULT);
268                 }
269                 ent++;
270         }
271         return (ENXIO);
272 }
273
274 /*********************************************************************
275  *  Device initialization routine
276  *
277  *  The attach entry point is called when the driver is being loaded.
278  *  This routine identifies the type of hardware, allocates all resources
279  *  and initializes the hardware.
280  *
281  *  return 0 on success, positive on failure
282  *********************************************************************/
283
284 static int
285 ixv_attach(device_t dev)
286 {
287         struct adapter *adapter;
288         struct ixgbe_hw *hw;
289         int             error = 0;
290
291         INIT_DEBUGOUT("ixv_attach: begin");
292
293         /* Allocate, clear, and link in our adapter structure */
294         adapter = device_get_softc(dev);
295         adapter->dev = dev;
296         hw = &adapter->hw;
297
298 #ifdef DEV_NETMAP
299         adapter->init_locked = ixv_init_locked;
300         adapter->stop_locked = ixv_stop;
301 #endif
302
303         /* Core Lock Init*/
304         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
305
306         /* SYSCTL APIs */
307         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
309                         OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
310                         adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
311
312         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
314                         OID_AUTO, "enable_aim", CTLFLAG_RW,
315                         &ixv_enable_aim, 1, "Interrupt Moderation");
316
317         /* Set up the timer callout */
318         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
319
320         /* Determine hardware revision */
321         ixv_identify_hardware(adapter);
322
323         /* Do base PCI setup - map BAR0 */
324         if (ixv_allocate_pci_resources(adapter)) {
325                 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
326                 error = ENXIO;
327                 goto err_out;
328         }
329
330         /* Sysctls for limiting the amount of work done in the taskqueues */
331         ixv_set_sysctl_value(adapter, "rx_processing_limit",
332             "max number of rx packets to process",
333             &adapter->rx_process_limit, ixv_rx_process_limit);
334
335         ixv_set_sysctl_value(adapter, "tx_processing_limit",
336             "max number of tx packets to process",
337             &adapter->tx_process_limit, ixv_tx_process_limit);
338
339         /* Sysctls for limiting the amount of work done in the taskqueues */
340         ixv_set_sysctl_value(adapter, "rx_processing_limit",
341             "max number of rx packets to process",
342             &adapter->rx_process_limit, ixv_rx_process_limit);
343
344         ixv_set_sysctl_value(adapter, "tx_processing_limit",
345             "max number of tx packets to process",
346             &adapter->tx_process_limit, ixv_tx_process_limit);
347
348         /* Do descriptor calc and sanity checks */
349         if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350             ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351                 device_printf(dev, "TXD config issue, using default!\n");
352                 adapter->num_tx_desc = DEFAULT_TXD;
353         } else
354                 adapter->num_tx_desc = ixv_txd;
355
356         if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357             ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
358                 device_printf(dev, "RXD config issue, using default!\n");
359                 adapter->num_rx_desc = DEFAULT_RXD;
360         } else
361                 adapter->num_rx_desc = ixv_rxd;
362
363         /* Allocate our TX/RX Queues */
364         if (ixgbe_allocate_queues(adapter)) {
365                 device_printf(dev, "ixgbe_allocate_queues() failed!\n");
366                 error = ENOMEM;
367                 goto err_out;
368         }
369
370         /*
371         ** Initialize the shared code: its
372         ** at this point the mac type is set.
373         */
374         error = ixgbe_init_shared_code(hw);
375         if (error) {
376                 device_printf(dev, "ixgbe_init_shared_code() failed!\n");
377                 error = EIO;
378                 goto err_late;
379         }
380
381         /* Setup the mailbox */
382         ixgbe_init_mbx_params_vf(hw);
383
384         /* Reset mbox api to 1.0 */
385         error = ixgbe_reset_hw(hw);
386         if (error == IXGBE_ERR_RESET_FAILED)
387                 device_printf(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
388         else if (error)
389                 device_printf(dev, "ixgbe_reset_hw() failed with error %d\n", error);
390         if (error) {
391                 error = EIO;
392                 goto err_late;
393         }
394
395         /* Negotiate mailbox API version */
396         error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
397         if (error) {
398                 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
399                 error = EIO;
400                 goto err_late;
401         }
402
403         error = ixgbe_init_hw(hw);
404         if (error) {
405                 device_printf(dev, "ixgbe_init_hw() failed!\n");
406                 error = EIO;
407                 goto err_late;
408         }
409         
410         error = ixv_allocate_msix(adapter); 
411         if (error) {
412                 device_printf(dev, "ixv_allocate_msix() failed!\n");
413                 goto err_late;
414         }
415
416         /* If no mac address was assigned, make a random one */
417         if (!ixv_check_ether_addr(hw->mac.addr)) {
418                 u8 addr[ETHER_ADDR_LEN];
419                 arc4rand(&addr, sizeof(addr), 0);
420                 addr[0] &= 0xFE;
421                 addr[0] |= 0x02;
422                 bcopy(addr, hw->mac.addr, sizeof(addr));
423         }
424
425         /* Setup OS specific network interface */
426         ixv_setup_interface(dev, adapter);
427
428         /* Do the stats setup */
429         ixv_save_stats(adapter);
430         ixv_init_stats(adapter);
431         ixv_add_stats_sysctls(adapter);
432
433         /* Register for VLAN events */
434         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
435             ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
436         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
437             ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
438
439 #ifdef DEV_NETMAP
440         ixgbe_netmap_attach(adapter);
441 #endif /* DEV_NETMAP */
442         INIT_DEBUGOUT("ixv_attach: end");
443         return (0);
444
445 err_late:
446         ixgbe_free_transmit_structures(adapter);
447         ixgbe_free_receive_structures(adapter);
448 err_out:
449         ixv_free_pci_resources(adapter);
450         return (error);
451
452 }
453
454 /*********************************************************************
455  *  Device removal routine
456  *
457  *  The detach entry point is called when the driver is being removed.
458  *  This routine stops the adapter and deallocates all the resources
459  *  that were allocated for driver operation.
460  *
461  *  return 0 on success, positive on failure
462  *********************************************************************/
463
464 static int
465 ixv_detach(device_t dev)
466 {
467         struct adapter *adapter = device_get_softc(dev);
468         struct ix_queue *que = adapter->queues;
469
470         INIT_DEBUGOUT("ixv_detach: begin");
471
472         /* Make sure VLANS are not using driver */
473         if (adapter->ifp->if_vlantrunk != NULL) {
474                 device_printf(dev, "Vlan in use, detach first\n");
475                 return (EBUSY);
476         }
477
478         IXGBE_CORE_LOCK(adapter);
479         ixv_stop(adapter);
480         IXGBE_CORE_UNLOCK(adapter);
481
482         for (int i = 0; i < adapter->num_queues; i++, que++) {
483                 if (que->tq) {
484                         struct tx_ring  *txr = que->txr;
485                         taskqueue_drain(que->tq, &txr->txq_task);
486                         taskqueue_drain(que->tq, &que->que_task);
487                         taskqueue_free(que->tq);
488                 }
489         }
490
491         /* Drain the Mailbox(link) queue */
492         if (adapter->tq) {
493                 taskqueue_drain(adapter->tq, &adapter->link_task);
494                 taskqueue_free(adapter->tq);
495         }
496
497         /* Unregister VLAN events */
498         if (adapter->vlan_attach != NULL)
499                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
500         if (adapter->vlan_detach != NULL)
501                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
502
503         ether_ifdetach(adapter->ifp);
504         callout_drain(&adapter->timer);
505 #ifdef DEV_NETMAP
506         netmap_detach(adapter->ifp);
507 #endif /* DEV_NETMAP */
508         ixv_free_pci_resources(adapter);
509         bus_generic_detach(dev);
510         if_free(adapter->ifp);
511
512         ixgbe_free_transmit_structures(adapter);
513         ixgbe_free_receive_structures(adapter);
514
515         IXGBE_CORE_LOCK_DESTROY(adapter);
516         return (0);
517 }
518
519 /*********************************************************************
520  *
521  *  Shutdown entry point
522  *
523  **********************************************************************/
524 static int
525 ixv_shutdown(device_t dev)
526 {
527         struct adapter *adapter = device_get_softc(dev);
528         IXGBE_CORE_LOCK(adapter);
529         ixv_stop(adapter);
530         IXGBE_CORE_UNLOCK(adapter);
531         return (0);
532 }
533
534
535 /*********************************************************************
536  *  Ioctl entry point
537  *
538  *  ixv_ioctl is called when the user wants to configure the
539  *  interface.
540  *
541  *  return 0 on success, positive on failure
542  **********************************************************************/
543
544 static int
545 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
546 {
547         struct adapter  *adapter = ifp->if_softc;
548         struct ifreq    *ifr = (struct ifreq *) data;
549 #if defined(INET) || defined(INET6)
550         struct ifaddr   *ifa = (struct ifaddr *) data;
551         bool            avoid_reset = FALSE;
552 #endif
553         int             error = 0;
554
555         switch (command) {
556
557         case SIOCSIFADDR:
558 #ifdef INET
559                 if (ifa->ifa_addr->sa_family == AF_INET)
560                         avoid_reset = TRUE;
561 #endif
562 #ifdef INET6
563                 if (ifa->ifa_addr->sa_family == AF_INET6)
564                         avoid_reset = TRUE;
565 #endif
566 #if defined(INET) || defined(INET6)
567                 /*
568                 ** Calling init results in link renegotiation,
569                 ** so we avoid doing it when possible.
570                 */
571                 if (avoid_reset) {
572                         ifp->if_flags |= IFF_UP;
573                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
574                                 ixv_init(adapter);
575                         if (!(ifp->if_flags & IFF_NOARP))
576                                 arp_ifinit(ifp, ifa);
577                 } else
578                         error = ether_ioctl(ifp, command, data);
579                 break;
580 #endif
581         case SIOCSIFMTU:
582                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
583                 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
584                         error = EINVAL;
585                 } else {
586                         IXGBE_CORE_LOCK(adapter);
587                         ifp->if_mtu = ifr->ifr_mtu;
588                         adapter->max_frame_size =
589                                 ifp->if_mtu + IXGBE_MTU_HDR;
590                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
591                                 ixv_init_locked(adapter);
592                         IXGBE_CORE_UNLOCK(adapter);
593                 }
594                 break;
595         case SIOCSIFFLAGS:
596                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
597                 IXGBE_CORE_LOCK(adapter);
598                 if (ifp->if_flags & IFF_UP) {
599                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
600                                 ixv_init_locked(adapter);
601                 } else
602                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
603                                 ixv_stop(adapter);
604                 adapter->if_flags = ifp->if_flags;
605                 IXGBE_CORE_UNLOCK(adapter);
606                 break;
607         case SIOCADDMULTI:
608         case SIOCDELMULTI:
609                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
610                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
611                         IXGBE_CORE_LOCK(adapter);
612                         ixv_disable_intr(adapter);
613                         ixv_set_multi(adapter);
614                         ixv_enable_intr(adapter);
615                         IXGBE_CORE_UNLOCK(adapter);
616                 }
617                 break;
618         case SIOCSIFMEDIA:
619         case SIOCGIFMEDIA:
620                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
621                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
622                 break;
623         case SIOCSIFCAP:
624         {
625                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
626                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
627                 if (mask & IFCAP_HWCSUM)
628                         ifp->if_capenable ^= IFCAP_HWCSUM;
629                 if (mask & IFCAP_TSO4)
630                         ifp->if_capenable ^= IFCAP_TSO4;
631                 if (mask & IFCAP_LRO)
632                         ifp->if_capenable ^= IFCAP_LRO;
633                 if (mask & IFCAP_VLAN_HWTAGGING)
634                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
635                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
636                         IXGBE_CORE_LOCK(adapter);
637                         ixv_init_locked(adapter);
638                         IXGBE_CORE_UNLOCK(adapter);
639                 }
640                 VLAN_CAPABILITIES(ifp);
641                 break;
642         }
643
644         default:
645                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
646                 error = ether_ioctl(ifp, command, data);
647                 break;
648         }
649
650         return (error);
651 }
652
653 /*********************************************************************
654  *  Init entry point
655  *
656  *  This routine is used in two ways. It is used by the stack as
657  *  init entry point in network interface structure. It is also used
658  *  by the driver as a hw/sw initialization routine to get to a
659  *  consistent state.
660  *
661  *  return 0 on success, positive on failure
662  **********************************************************************/
663 #define IXGBE_MHADD_MFS_SHIFT 16
664
665 static void
666 ixv_init_locked(struct adapter *adapter)
667 {
668         struct ifnet    *ifp = adapter->ifp;
669         device_t        dev = adapter->dev;
670         struct ixgbe_hw *hw = &adapter->hw;
671         int error = 0;
672
673         INIT_DEBUGOUT("ixv_init_locked: begin");
674         mtx_assert(&adapter->core_mtx, MA_OWNED);
675         hw->adapter_stopped = FALSE;
676         ixgbe_stop_adapter(hw);
677         callout_stop(&adapter->timer);
678
679         /* reprogram the RAR[0] in case user changed it. */
680         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
681
682         /* Get the latest mac address, User can use a LAA */
683         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
684              IXGBE_ETH_LENGTH_OF_ADDRESS);
685         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
686         hw->addr_ctrl.rar_used_count = 1;
687
688         /* Prepare transmit descriptors and buffers */
689         if (ixgbe_setup_transmit_structures(adapter)) {
690                 device_printf(dev, "Could not setup transmit structures\n");
691                 ixv_stop(adapter);
692                 return;
693         }
694
695         /* Reset VF and renegotiate mailbox API version */
696         ixgbe_reset_hw(hw);
697         error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
698         if (error)
699                 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
700
701         ixv_initialize_transmit_units(adapter);
702
703         /* Setup Multicast table */
704         ixv_set_multi(adapter);
705
706         /*
707         ** Determine the correct mbuf pool
708         ** for doing jumbo/headersplit
709         */
710         if (ifp->if_mtu > ETHERMTU)
711                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
712         else
713                 adapter->rx_mbuf_sz = MCLBYTES;
714
715         /* Prepare receive descriptors and buffers */
716         if (ixgbe_setup_receive_structures(adapter)) {
717                 device_printf(dev, "Could not setup receive structures\n");
718                 ixv_stop(adapter);
719                 return;
720         }
721
722         /* Configure RX settings */
723         ixv_initialize_receive_units(adapter);
724
725         /* Set the various hardware offload abilities */
726         ifp->if_hwassist = 0;
727         if (ifp->if_capenable & IFCAP_TSO4)
728                 ifp->if_hwassist |= CSUM_TSO;
729         if (ifp->if_capenable & IFCAP_TXCSUM) {
730                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
731 #if __FreeBSD_version >= 800000
732                 ifp->if_hwassist |= CSUM_SCTP;
733 #endif
734         }
735         
736         /* Set up VLAN offload and filter */
737         ixv_setup_vlan_support(adapter);
738
739         /* Set up MSI/X routing */
740         ixv_configure_ivars(adapter);
741
742         /* Set up auto-mask */
743         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
744
745         /* Set moderation on the Link interrupt */
746         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
747
748         /* Stats init */
749         ixv_init_stats(adapter);
750
751         /* Config/Enable Link */
752         ixv_config_link(adapter);
753
754         /* Start watchdog */
755         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
756
757         /* And now turn on interrupts */
758         ixv_enable_intr(adapter);
759
760         /* Now inform the stack we're ready */
761         ifp->if_drv_flags |= IFF_DRV_RUNNING;
762         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
763
764         return;
765 }
766
767 static void
768 ixv_init(void *arg)
769 {
770         struct adapter *adapter = arg;
771
772         IXGBE_CORE_LOCK(adapter);
773         ixv_init_locked(adapter);
774         IXGBE_CORE_UNLOCK(adapter);
775         return;
776 }
777
778
779 /*
780 **
781 ** MSIX Interrupt Handlers and Tasklets
782 **
783 */
784
785 static inline void
786 ixv_enable_queue(struct adapter *adapter, u32 vector)
787 {
788         struct ixgbe_hw *hw = &adapter->hw;
789         u32     queue = 1 << vector;
790         u32     mask;
791
792         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
793         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
794 }
795
796 static inline void
797 ixv_disable_queue(struct adapter *adapter, u32 vector)
798 {
799         struct ixgbe_hw *hw = &adapter->hw;
800         u64     queue = (u64)(1 << vector);
801         u32     mask;
802
803         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
804         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
805 }
806
807 static inline void
808 ixv_rearm_queues(struct adapter *adapter, u64 queues)
809 {
810         u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
811         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
812 }
813
814
815 static void
816 ixv_handle_que(void *context, int pending)
817 {
818         struct ix_queue *que = context;
819         struct adapter  *adapter = que->adapter;
820         struct tx_ring  *txr = que->txr;
821         struct ifnet    *ifp = adapter->ifp;
822         bool            more;
823
824         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
825                 more = ixgbe_rxeof(que);
826                 IXGBE_TX_LOCK(txr);
827                 ixgbe_txeof(txr);
828 #if __FreeBSD_version >= 800000
829                 if (!drbr_empty(ifp, txr->br))
830                         ixgbe_mq_start_locked(ifp, txr);
831 #else
832                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
833                         ixgbe_start_locked(txr, ifp);
834 #endif
835                 IXGBE_TX_UNLOCK(txr);
836                 if (more) {
837                         taskqueue_enqueue(que->tq, &que->que_task);
838                         return;
839                 }
840         }
841
842         /* Reenable this interrupt */
843         ixv_enable_queue(adapter, que->msix);
844         return;
845 }
846
847 /*********************************************************************
848  *
849  *  MSI Queue Interrupt Service routine
850  *
851  **********************************************************************/
852 void
853 ixv_msix_que(void *arg)
854 {
855         struct ix_queue *que = arg;
856         struct adapter  *adapter = que->adapter;
857         struct ifnet    *ifp = adapter->ifp;
858         struct tx_ring  *txr = que->txr;
859         struct rx_ring  *rxr = que->rxr;
860         bool            more;
861         u32             newitr = 0;
862
863         ixv_disable_queue(adapter, que->msix);
864         ++que->irqs;
865
866         more = ixgbe_rxeof(que);
867
868         IXGBE_TX_LOCK(txr);
869         ixgbe_txeof(txr);
870         /*
871         ** Make certain that if the stack
872         ** has anything queued the task gets
873         ** scheduled to handle it.
874         */
875 #ifdef IXGBE_LEGACY_TX
876         if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
877                 ixgbe_start_locked(txr, ifp);
878 #else
879         if (!drbr_empty(adapter->ifp, txr->br))
880                 ixgbe_mq_start_locked(ifp, txr);
881 #endif
882         IXGBE_TX_UNLOCK(txr);
883
884         /* Do AIM now? */
885
886         if (ixv_enable_aim == FALSE)
887                 goto no_calc;
888         /*
889         ** Do Adaptive Interrupt Moderation:
890         **  - Write out last calculated setting
891         **  - Calculate based on average size over
892         **    the last interval.
893         */
894         if (que->eitr_setting)
895                 IXGBE_WRITE_REG(&adapter->hw,
896                     IXGBE_VTEITR(que->msix),
897                     que->eitr_setting);
898  
899         que->eitr_setting = 0;
900
901         /* Idle, do nothing */
902         if ((txr->bytes == 0) && (rxr->bytes == 0))
903                 goto no_calc;
904                                 
905         if ((txr->bytes) && (txr->packets))
906                 newitr = txr->bytes/txr->packets;
907         if ((rxr->bytes) && (rxr->packets))
908                 newitr = max(newitr,
909                     (rxr->bytes / rxr->packets));
910         newitr += 24; /* account for hardware frame, crc */
911
912         /* set an upper boundary */
913         newitr = min(newitr, 3000);
914
915         /* Be nice to the mid range */
916         if ((newitr > 300) && (newitr < 1200))
917                 newitr = (newitr / 3);
918         else
919                 newitr = (newitr / 2);
920
921         newitr |= newitr << 16;
922                  
923         /* save for next interrupt */
924         que->eitr_setting = newitr;
925
926         /* Reset state */
927         txr->bytes = 0;
928         txr->packets = 0;
929         rxr->bytes = 0;
930         rxr->packets = 0;
931
932 no_calc:
933         if (more)
934                 taskqueue_enqueue(que->tq, &que->que_task);
935         else /* Reenable this interrupt */
936                 ixv_enable_queue(adapter, que->msix);
937         return;
938 }
939
940 static void
941 ixv_msix_mbx(void *arg)
942 {
943         struct adapter  *adapter = arg;
944         struct ixgbe_hw *hw = &adapter->hw;
945         u32             reg;
946
947         ++adapter->link_irq;
948
949         /* First get the cause */
950         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
951         /* Clear interrupt with write */
952         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
953
954         /* Link status change */
955         if (reg & IXGBE_EICR_LSC)
956                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
957
958         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
959         return;
960 }
961
962 /*********************************************************************
963  *
964  *  Media Ioctl callback
965  *
966  *  This routine is called whenever the user queries the status of
967  *  the interface using ifconfig.
968  *
969  **********************************************************************/
970 static void
971 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
972 {
973         struct adapter *adapter = ifp->if_softc;
974
975         INIT_DEBUGOUT("ixv_media_status: begin");
976         IXGBE_CORE_LOCK(adapter);
977         ixv_update_link_status(adapter);
978
979         ifmr->ifm_status = IFM_AVALID;
980         ifmr->ifm_active = IFM_ETHER;
981
982         if (!adapter->link_active) {
983                 IXGBE_CORE_UNLOCK(adapter);
984                 return;
985         }
986
987         ifmr->ifm_status |= IFM_ACTIVE;
988
989         switch (adapter->link_speed) {
990                 case IXGBE_LINK_SPEED_1GB_FULL:
991                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
992                         break;
993                 case IXGBE_LINK_SPEED_10GB_FULL:
994                         ifmr->ifm_active |= IFM_FDX;
995                         break;
996         }
997
998         IXGBE_CORE_UNLOCK(adapter);
999
1000         return;
1001 }
1002
1003 /*********************************************************************
1004  *
1005  *  Media Ioctl callback
1006  *
1007  *  This routine is called when the user changes speed/duplex using
1008  *  media/mediopt option with ifconfig.
1009  *
1010  **********************************************************************/
1011 static int
1012 ixv_media_change(struct ifnet * ifp)
1013 {
1014         struct adapter *adapter = ifp->if_softc;
1015         struct ifmedia *ifm = &adapter->media;
1016
1017         INIT_DEBUGOUT("ixv_media_change: begin");
1018
1019         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1020                 return (EINVAL);
1021
1022         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1023         case IFM_AUTO:
1024                 break;
1025         default:
1026                 device_printf(adapter->dev, "Only auto media type\n");
1027                 return (EINVAL);
1028         }
1029
1030         return (0);
1031 }
1032
1033
1034 /*********************************************************************
1035  *  Multicast Update
1036  *
1037  *  This routine is called whenever multicast address list is updated.
1038  *
1039  **********************************************************************/
1040 #define IXGBE_RAR_ENTRIES 16
1041
1042 static void
1043 ixv_set_multi(struct adapter *adapter)
1044 {
1045         u8      mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1046         u8      *update_ptr;
1047         struct  ifmultiaddr *ifma;
1048         int     mcnt = 0;
1049         struct ifnet   *ifp = adapter->ifp;
1050
1051         IOCTL_DEBUGOUT("ixv_set_multi: begin");
1052
1053 #if __FreeBSD_version < 800000
1054         IF_ADDR_LOCK(ifp);
1055 #else
1056         if_maddr_rlock(ifp);
1057 #endif
1058         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1059                 if (ifma->ifma_addr->sa_family != AF_LINK)
1060                         continue;
1061                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1062                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1063                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1064                 mcnt++;
1065         }
1066 #if __FreeBSD_version < 800000
1067         IF_ADDR_UNLOCK(ifp);
1068 #else
1069         if_maddr_runlock(ifp);
1070 #endif
1071
1072         update_ptr = mta;
1073
1074         ixgbe_update_mc_addr_list(&adapter->hw,
1075             update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1076
1077         return;
1078 }
1079
1080 /*
1081  * This is an iterator function now needed by the multicast
1082  * shared code. It simply feeds the shared code routine the
1083  * addresses in the array of ixv_set_multi() one by one.
1084  */
1085 static u8 *
1086 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1087 {
1088         u8 *addr = *update_ptr;
1089         u8 *newptr;
1090         *vmdq = 0;
1091
1092         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1093         *update_ptr = newptr;
1094         return addr;
1095 }
1096
1097 /*********************************************************************
1098  *  Timer routine
1099  *
1100  *  This routine checks for link status,updates statistics,
1101  *  and runs the watchdog check.
1102  *
1103  **********************************************************************/
1104
1105 static void
1106 ixv_local_timer(void *arg)
1107 {
1108         struct adapter  *adapter = arg;
1109         device_t        dev = adapter->dev;
1110         struct ix_queue *que = adapter->queues;
1111         u64             queues = 0;
1112         int             hung = 0;
1113
1114         mtx_assert(&adapter->core_mtx, MA_OWNED);
1115
1116         ixv_update_link_status(adapter);
1117
1118         /* Stats Update */
1119         ixv_update_stats(adapter);
1120
1121         /*
1122         ** Check the TX queues status
1123         **      - mark hung queues so we don't schedule on them
1124         **      - watchdog only if all queues show hung
1125         */
1126         for (int i = 0; i < adapter->num_queues; i++, que++) {
1127                 /* Keep track of queues with work for soft irq */
1128                 if (que->txr->busy)
1129                         queues |= ((u64)1 << que->me);
1130                 /*
1131                 ** Each time txeof runs without cleaning, but there
1132                 ** are uncleaned descriptors it increments busy. If
1133                 ** we get to the MAX we declare it hung.
1134                 */
1135                 if (que->busy == IXGBE_QUEUE_HUNG) {
1136                         ++hung;
1137                         /* Mark the queue as inactive */
1138                         adapter->active_queues &= ~((u64)1 << que->me);
1139                         continue;
1140                 } else {
1141                         /* Check if we've come back from hung */
1142                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1143                                 adapter->active_queues |= ((u64)1 << que->me);
1144                 }
1145                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1146                         device_printf(dev,"Warning queue %d "
1147                             "appears to be hung!\n", i);
1148                         que->txr->busy = IXGBE_QUEUE_HUNG;
1149                         ++hung;
1150                 }
1151
1152         }
1153
1154         /* Only truely watchdog if all queues show hung */
1155         if (hung == adapter->num_queues)
1156                 goto watchdog;
1157         else if (queues != 0) { /* Force an IRQ on queues with work */
1158                 ixv_rearm_queues(adapter, queues);
1159         }
1160
1161         callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1162         return;
1163
1164 watchdog:
1165         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1166         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1167         adapter->watchdog_events++;
1168         ixv_init_locked(adapter);
1169 }
1170
1171 /*
1172 ** Note: this routine updates the OS on the link state
1173 **      the real check of the hardware only happens with
1174 **      a link interrupt.
1175 */
1176 static void
1177 ixv_update_link_status(struct adapter *adapter)
1178 {
1179         struct ifnet    *ifp = adapter->ifp;
1180         device_t dev = adapter->dev;
1181
1182         if (adapter->link_up){ 
1183                 if (adapter->link_active == FALSE) {
1184                         if (bootverbose)
1185                                 device_printf(dev,"Link is up %d Gbps %s \n",
1186                                     ((adapter->link_speed == 128)? 10:1),
1187                                     "Full Duplex");
1188                         adapter->link_active = TRUE;
1189                         if_link_state_change(ifp, LINK_STATE_UP);
1190                 }
1191         } else { /* Link down */
1192                 if (adapter->link_active == TRUE) {
1193                         if (bootverbose)
1194                                 device_printf(dev,"Link is Down\n");
1195                         if_link_state_change(ifp, LINK_STATE_DOWN);
1196                         adapter->link_active = FALSE;
1197                 }
1198         }
1199
1200         return;
1201 }
1202
1203
1204 /*********************************************************************
1205  *
1206  *  This routine disables all traffic on the adapter by issuing a
1207  *  global reset on the MAC and deallocates TX/RX buffers.
1208  *
1209  **********************************************************************/
1210
1211 static void
1212 ixv_stop(void *arg)
1213 {
1214         struct ifnet   *ifp;
1215         struct adapter *adapter = arg;
1216         struct ixgbe_hw *hw = &adapter->hw;
1217         ifp = adapter->ifp;
1218
1219         mtx_assert(&adapter->core_mtx, MA_OWNED);
1220
1221         INIT_DEBUGOUT("ixv_stop: begin\n");
1222         ixv_disable_intr(adapter);
1223
1224         /* Tell the stack that the interface is no longer active */
1225         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1226
1227         ixgbe_reset_hw(hw);
1228         adapter->hw.adapter_stopped = FALSE;
1229         ixgbe_stop_adapter(hw);
1230         callout_stop(&adapter->timer);
1231
1232         /* reprogram the RAR[0] in case user changed it. */
1233         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1234
1235         return;
1236 }
1237
1238
1239 /*********************************************************************
1240  *
1241  *  Determine hardware revision.
1242  *
1243  **********************************************************************/
1244 static void
1245 ixv_identify_hardware(struct adapter *adapter)
1246 {
1247         device_t        dev = adapter->dev;
1248         struct ixgbe_hw *hw = &adapter->hw;
1249
1250         /*
1251         ** Make sure BUSMASTER is set, on a VM under
1252         ** KVM it may not be and will break things.
1253         */
1254         pci_enable_busmaster(dev);
1255
1256         /* Save off the information about this board */
1257         hw->vendor_id = pci_get_vendor(dev);
1258         hw->device_id = pci_get_device(dev);
1259         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1260         hw->subsystem_vendor_id =
1261             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1262         hw->subsystem_device_id =
1263             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1264
1265         /* We need this to determine device-specific things */
1266         ixgbe_set_mac_type(hw);
1267
1268         /* Set the right number of segments */
1269         adapter->num_segs = IXGBE_82599_SCATTER;
1270
1271         return;
1272 }
1273
1274 /*********************************************************************
1275  *
1276  *  Setup MSIX Interrupt resources and handlers 
1277  *
1278  **********************************************************************/
1279 static int
1280 ixv_allocate_msix(struct adapter *adapter)
1281 {
1282         device_t        dev = adapter->dev;
1283         struct          ix_queue *que = adapter->queues;
1284         struct          tx_ring *txr = adapter->tx_rings;
1285         int             error, rid, vector = 0;
1286
1287         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1288                 rid = vector + 1;
1289                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1290                     RF_SHAREABLE | RF_ACTIVE);
1291                 if (que->res == NULL) {
1292                         device_printf(dev,"Unable to allocate"
1293                             " bus resource: que interrupt [%d]\n", vector);
1294                         return (ENXIO);
1295                 }
1296                 /* Set the handler function */
1297                 error = bus_setup_intr(dev, que->res,
1298                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1299                     ixv_msix_que, que, &que->tag);
1300                 if (error) {
1301                         que->res = NULL;
1302                         device_printf(dev, "Failed to register QUE handler");
1303                         return (error);
1304                 }
1305 #if __FreeBSD_version >= 800504
1306                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1307 #endif
1308                 que->msix = vector;
1309                 adapter->active_queues |= (u64)(1 << que->msix);
1310                 /*
1311                 ** Bind the msix vector, and thus the
1312                 ** ring to the corresponding cpu.
1313                 */
1314                 if (adapter->num_queues > 1)
1315                         bus_bind_intr(dev, que->res, i);
1316                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1317                 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1318                 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1319                     taskqueue_thread_enqueue, &que->tq);
1320                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1321                     device_get_nameunit(adapter->dev));
1322         }
1323
1324         /* and Mailbox */
1325         rid = vector + 1;
1326         adapter->res = bus_alloc_resource_any(dev,
1327             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1328         if (!adapter->res) {
1329                 device_printf(dev,"Unable to allocate"
1330             " bus resource: MBX interrupt [%d]\n", rid);
1331                 return (ENXIO);
1332         }
1333         /* Set the mbx handler function */
1334         error = bus_setup_intr(dev, adapter->res,
1335             INTR_TYPE_NET | INTR_MPSAFE, NULL,
1336             ixv_msix_mbx, adapter, &adapter->tag);
1337         if (error) {
1338                 adapter->res = NULL;
1339                 device_printf(dev, "Failed to register LINK handler");
1340                 return (error);
1341         }
1342 #if __FreeBSD_version >= 800504
1343         bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1344 #endif
1345         adapter->vector = vector;
1346         /* Tasklets for Mailbox */
1347         TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1348         adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1349             taskqueue_thread_enqueue, &adapter->tq);
1350         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1351             device_get_nameunit(adapter->dev));
1352         /*
1353         ** Due to a broken design QEMU will fail to properly
1354         ** enable the guest for MSIX unless the vectors in
1355         ** the table are all set up, so we must rewrite the
1356         ** ENABLE in the MSIX control register again at this
1357         ** point to cause it to successfully initialize us.
1358         */
1359         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1360                 int msix_ctrl;
1361                 pci_find_cap(dev, PCIY_MSIX, &rid);
1362                 rid += PCIR_MSIX_CTRL;
1363                 msix_ctrl = pci_read_config(dev, rid, 2);
1364                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1365                 pci_write_config(dev, rid, msix_ctrl, 2);
1366         }
1367
1368         return (0);
1369 }
1370
1371 /*
1372  * Setup MSIX resources, note that the VF
1373  * device MUST use MSIX, there is no fallback.
1374  */
1375 static int
1376 ixv_setup_msix(struct adapter *adapter)
1377 {
1378         device_t dev = adapter->dev;
1379         int rid, want, msgs;
1380
1381
1382         /* Must have at least 2 MSIX vectors */
1383         msgs = pci_msix_count(dev);
1384         if (msgs < 2)
1385                 goto out;
1386         rid = PCIR_BAR(3);
1387         adapter->msix_mem = bus_alloc_resource_any(dev,
1388             SYS_RES_MEMORY, &rid, RF_ACTIVE);
1389         if (adapter->msix_mem == NULL) {
1390                 device_printf(adapter->dev,
1391                     "Unable to map MSIX table \n");
1392                 goto out;
1393         }
1394
1395         /*
1396         ** Want vectors for the queues,
1397         ** plus an additional for mailbox.
1398         */
1399         want = adapter->num_queues + 1;
1400         if (want > msgs) {
1401                 want = msgs;
1402                 adapter->num_queues = msgs - 1;
1403         } else
1404                 msgs = want;
1405         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1406                 device_printf(adapter->dev,
1407                     "Using MSIX interrupts with %d vectors\n", want);
1408                 return (want);
1409         }
1410         /* Release in case alloc was insufficient */
1411         pci_release_msi(dev);
1412 out:
1413         if (adapter->msix_mem != NULL) {
1414                 bus_release_resource(dev, SYS_RES_MEMORY,
1415                     rid, adapter->msix_mem);
1416                 adapter->msix_mem = NULL;
1417         }
1418         device_printf(adapter->dev,"MSIX config error\n");
1419         return (ENXIO);
1420 }
1421
1422
1423 static int
1424 ixv_allocate_pci_resources(struct adapter *adapter)
1425 {
1426         int             rid;
1427         device_t        dev = adapter->dev;
1428
1429         rid = PCIR_BAR(0);
1430         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1431             &rid, RF_ACTIVE);
1432
1433         if (!(adapter->pci_mem)) {
1434                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1435                 return (ENXIO);
1436         }
1437
1438         adapter->osdep.mem_bus_space_tag =
1439                 rman_get_bustag(adapter->pci_mem);
1440         adapter->osdep.mem_bus_space_handle =
1441                 rman_get_bushandle(adapter->pci_mem);
1442         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1443
1444         /* Pick up the tuneable queues */
1445         adapter->num_queues = ixv_num_queues;
1446         adapter->hw.back = adapter;
1447
1448         /*
1449         ** Now setup MSI/X, should
1450         ** return us the number of
1451         ** configured vectors.
1452         */
1453         adapter->msix = ixv_setup_msix(adapter);
1454         if (adapter->msix == ENXIO)
1455                 return (ENXIO);
1456         else
1457                 return (0);
1458 }
1459
1460 static void
1461 ixv_free_pci_resources(struct adapter * adapter)
1462 {
1463         struct          ix_queue *que = adapter->queues;
1464         device_t        dev = adapter->dev;
1465         int             rid, memrid;
1466
1467         memrid = PCIR_BAR(MSIX_82598_BAR);
1468
1469         /*
1470         ** There is a slight possibility of a failure mode
1471         ** in attach that will result in entering this function
1472         ** before interrupt resources have been initialized, and
1473         ** in that case we do not want to execute the loops below
1474         ** We can detect this reliably by the state of the adapter
1475         ** res pointer.
1476         */
1477         if (adapter->res == NULL)
1478                 goto mem;
1479
1480         /*
1481         **  Release all msix queue resources:
1482         */
1483         for (int i = 0; i < adapter->num_queues; i++, que++) {
1484                 rid = que->msix + 1;
1485                 if (que->tag != NULL) {
1486                         bus_teardown_intr(dev, que->res, que->tag);
1487                         que->tag = NULL;
1488                 }
1489                 if (que->res != NULL)
1490                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1491         }
1492
1493
1494         /* Clean the Legacy or Link interrupt last */
1495         if (adapter->vector) /* we are doing MSIX */
1496                 rid = adapter->vector + 1;
1497         else
1498                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1499
1500         if (adapter->tag != NULL) {
1501                 bus_teardown_intr(dev, adapter->res, adapter->tag);
1502                 adapter->tag = NULL;
1503         }
1504         if (adapter->res != NULL)
1505                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1506
1507 mem:
1508         if (adapter->msix)
1509                 pci_release_msi(dev);
1510
1511         if (adapter->msix_mem != NULL)
1512                 bus_release_resource(dev, SYS_RES_MEMORY,
1513                     memrid, adapter->msix_mem);
1514
1515         if (adapter->pci_mem != NULL)
1516                 bus_release_resource(dev, SYS_RES_MEMORY,
1517                     PCIR_BAR(0), adapter->pci_mem);
1518
1519         return;
1520 }
1521
1522 /*********************************************************************
1523  *
1524  *  Setup networking device structure and register an interface.
1525  *
1526  **********************************************************************/
1527 static void
1528 ixv_setup_interface(device_t dev, struct adapter *adapter)
1529 {
1530         struct ifnet   *ifp;
1531
1532         INIT_DEBUGOUT("ixv_setup_interface: begin");
1533
1534         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1535         if (ifp == NULL)
1536                 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1537         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1538         ifp->if_baudrate = 1000000000;
1539         ifp->if_init = ixv_init;
1540         ifp->if_softc = adapter;
1541         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1542         ifp->if_ioctl = ixv_ioctl;
1543 #if __FreeBSD_version >= 800000
1544         ifp->if_transmit = ixgbe_mq_start;
1545         ifp->if_qflush = ixgbe_qflush;
1546 #else
1547         ifp->if_start = ixgbe_start;
1548 #endif
1549         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1550
1551         ether_ifattach(ifp, adapter->hw.mac.addr);
1552
1553         adapter->max_frame_size =
1554             ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1555
1556         /*
1557          * Tell the upper layer(s) we support long frames.
1558          */
1559         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1560
1561         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1562         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1563         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1564                              |  IFCAP_VLAN_HWTSO
1565                              |  IFCAP_VLAN_MTU;
1566         ifp->if_capabilities |= IFCAP_LRO;
1567         ifp->if_capenable = ifp->if_capabilities;
1568
1569         /*
1570          * Specify the media types supported by this adapter and register
1571          * callbacks to update media and link information
1572          */
1573         ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1574                      ixv_media_status);
1575         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1576         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1577
1578         return;
1579 }
1580         
1581 static void
1582 ixv_config_link(struct adapter *adapter)
1583 {
1584         struct ixgbe_hw *hw = &adapter->hw;
1585         u32     autoneg;
1586
1587         if (hw->mac.ops.check_link)
1588                 hw->mac.ops.check_link(hw, &autoneg,
1589                     &adapter->link_up, FALSE);
1590 }
1591
1592
1593 /*********************************************************************
1594  *
1595  *  Enable transmit unit.
1596  *
1597  **********************************************************************/
1598 static void
1599 ixv_initialize_transmit_units(struct adapter *adapter)
1600 {
1601         struct tx_ring  *txr = adapter->tx_rings;
1602         struct ixgbe_hw *hw = &adapter->hw;
1603
1604
1605         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1606                 u64     tdba = txr->txdma.dma_paddr;
1607                 u32     txctrl, txdctl;
1608
1609                 /* Set WTHRESH to 8, burst writeback */
1610                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1611                 txdctl |= (8 << 16);
1612                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1613
1614                 /* Set the HW Tx Head and Tail indices */
1615                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1616                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1617
1618                 /* Set Tx Tail register */
1619                 txr->tail = IXGBE_VFTDT(i);
1620
1621                 /* Set Ring parameters */
1622                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1623                        (tdba & 0x00000000ffffffffULL));
1624                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1625                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1626                     adapter->num_tx_desc *
1627                     sizeof(struct ixgbe_legacy_tx_desc));
1628                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1629                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1630                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1631
1632                 /* Now enable */
1633                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1634                 txdctl |= IXGBE_TXDCTL_ENABLE;
1635                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1636         }
1637
1638         return;
1639 }
1640
1641
1642 /*********************************************************************
1643  *
1644  *  Setup receive registers and features.
1645  *
1646  **********************************************************************/
1647 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1648
1649 static void
1650 ixv_initialize_receive_units(struct adapter *adapter)
1651 {
1652         struct  rx_ring *rxr = adapter->rx_rings;
1653         struct ixgbe_hw *hw = &adapter->hw;
1654         struct ifnet    *ifp = adapter->ifp;
1655         u32             bufsz, rxcsum, psrtype;
1656
1657         if (ifp->if_mtu > ETHERMTU)
1658                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1659         else
1660                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1661
1662         psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1663             IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1664             IXGBE_PSRTYPE_L2HDR;
1665
1666         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1667
1668         /* Tell PF our max_frame size */
1669         ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1670
1671         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1672                 u64 rdba = rxr->rxdma.dma_paddr;
1673                 u32 reg, rxdctl;
1674
1675                 /* Disable the queue */
1676                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1677                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1678                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1679                 for (int j = 0; j < 10; j++) {
1680                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1681                             IXGBE_RXDCTL_ENABLE)
1682                                 msec_delay(1);
1683                         else
1684                                 break;
1685                 }
1686                 wmb();
1687                 /* Setup the Base and Length of the Rx Descriptor Ring */
1688                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1689                     (rdba & 0x00000000ffffffffULL));
1690                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1691                     (rdba >> 32));
1692                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1693                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1694
1695                 /* Reset the ring indices */
1696                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1697                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1698
1699                 /* Set up the SRRCTL register */
1700                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1701                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1702                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1703                 reg |= bufsz;
1704                 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1705                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1706
1707                 /* Capture  Rx Tail register */
1708                 rxr->tail = IXGBE_VFRDT(rxr->me);
1709
1710                 /* Do the queue enabling last */
1711                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1712                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1713                 for (int k = 0; k < 10; k++) {
1714                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1715                             IXGBE_RXDCTL_ENABLE)
1716                                 break;
1717                         else
1718                                 msec_delay(1);
1719                 }
1720                 wmb();
1721
1722                 /* Set the Tail Pointer */
1723 #ifdef DEV_NETMAP
1724                 /*
1725                  * In netmap mode, we must preserve the buffers made
1726                  * available to userspace before the if_init()
1727                  * (this is true by default on the TX side, because
1728                  * init makes all buffers available to userspace).
1729                  *
1730                  * netmap_reset() and the device specific routines
1731                  * (e.g. ixgbe_setup_receive_rings()) map these
1732                  * buffers at the end of the NIC ring, so here we
1733                  * must set the RDT (tail) register to make sure
1734                  * they are not overwritten.
1735                  *
1736                  * In this driver the NIC ring starts at RDH = 0,
1737                  * RDT points to the last slot available for reception (?),
1738                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1739                  */
1740                 if (ifp->if_capenable & IFCAP_NETMAP) {
1741                         struct netmap_adapter *na = NA(adapter->ifp);
1742                         struct netmap_kring *kring = &na->rx_rings[i];
1743                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1744
1745                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1746                 } else
1747 #endif /* DEV_NETMAP */
1748                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1749                             adapter->num_rx_desc - 1);
1750         }
1751
1752         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1753
1754         if (ifp->if_capenable & IFCAP_RXCSUM)
1755                 rxcsum |= IXGBE_RXCSUM_PCSD;
1756
1757         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1758                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1759
1760         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1761
1762         return;
1763 }
1764
1765 static void
1766 ixv_setup_vlan_support(struct adapter *adapter)
1767 {
1768         struct ixgbe_hw *hw = &adapter->hw;
1769         u32             ctrl, vid, vfta, retry;
1770         struct rx_ring  *rxr;
1771
1772         /*
1773         ** We get here thru init_locked, meaning
1774         ** a soft reset, this has already cleared
1775         ** the VFTA and other state, so if there
1776         ** have been no vlan's registered do nothing.
1777         */
1778         if (adapter->num_vlans == 0)
1779                 return;
1780
1781         /* Enable the queues */
1782         for (int i = 0; i < adapter->num_queues; i++) {
1783                 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1784                 ctrl |= IXGBE_RXDCTL_VME;
1785                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1786                 /*
1787                  * Let Rx path know that it needs to store VLAN tag
1788                  * as part of extra mbuf info.
1789                  */
1790                 rxr = &adapter->rx_rings[i];
1791                 rxr->vtag_strip = TRUE;
1792         }
1793
1794         /*
1795         ** A soft reset zero's out the VFTA, so
1796         ** we need to repopulate it now.
1797         */
1798         for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1799                 if (ixv_shadow_vfta[i] == 0)
1800                         continue;
1801                 vfta = ixv_shadow_vfta[i];
1802                 /*
1803                 ** Reconstruct the vlan id's
1804                 ** based on the bits set in each
1805                 ** of the array ints.
1806                 */
1807                 for (int j = 0; j < 32; j++) {
1808                         retry = 0;
1809                         if ((vfta & (1 << j)) == 0)
1810                                 continue;
1811                         vid = (i * 32) + j;
1812                         /* Call the shared code mailbox routine */
1813                         while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1814                                 if (++retry > 5)
1815                                         break;
1816                         }
1817                 }
1818         }
1819 }
1820
1821 /*
1822 ** This routine is run via an vlan config EVENT,
1823 ** it enables us to use the HW Filter table since
1824 ** we can get the vlan id. This just creates the
1825 ** entry in the soft version of the VFTA, init will
1826 ** repopulate the real table.
1827 */
1828 static void
1829 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1830 {
1831         struct adapter  *adapter = ifp->if_softc;
1832         u16             index, bit;
1833
1834         if (ifp->if_softc != arg) /* Not our event */
1835                 return;
1836
1837         if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1838                 return;
1839
1840         IXGBE_CORE_LOCK(adapter);
1841         index = (vtag >> 5) & 0x7F;
1842         bit = vtag & 0x1F;
1843         ixv_shadow_vfta[index] |= (1 << bit);
1844         ++adapter->num_vlans;
1845         /* Re-init to load the changes */
1846         ixv_init_locked(adapter);
1847         IXGBE_CORE_UNLOCK(adapter);
1848 }
1849
1850 /*
1851 ** This routine is run via an vlan
1852 ** unconfig EVENT, remove our entry
1853 ** in the soft vfta.
1854 */
1855 static void
1856 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1857 {
1858         struct adapter  *adapter = ifp->if_softc;
1859         u16             index, bit;
1860
1861         if (ifp->if_softc !=  arg)
1862                 return;
1863
1864         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
1865                 return;
1866
1867         IXGBE_CORE_LOCK(adapter);
1868         index = (vtag >> 5) & 0x7F;
1869         bit = vtag & 0x1F;
1870         ixv_shadow_vfta[index] &= ~(1 << bit);
1871         --adapter->num_vlans;
1872         /* Re-init to load the changes */
1873         ixv_init_locked(adapter);
1874         IXGBE_CORE_UNLOCK(adapter);
1875 }
1876
1877 static void
1878 ixv_enable_intr(struct adapter *adapter)
1879 {
1880         struct ixgbe_hw *hw = &adapter->hw;
1881         struct ix_queue *que = adapter->queues;
1882         u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1883
1884
1885         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1886
1887         mask = IXGBE_EIMS_ENABLE_MASK;
1888         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1889         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1890
1891         for (int i = 0; i < adapter->num_queues; i++, que++)
1892                 ixv_enable_queue(adapter, que->msix);
1893
1894         IXGBE_WRITE_FLUSH(hw);
1895
1896         return;
1897 }
1898
1899 static void
1900 ixv_disable_intr(struct adapter *adapter)
1901 {
1902         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1903         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1904         IXGBE_WRITE_FLUSH(&adapter->hw);
1905         return;
1906 }
1907
1908 /*
1909 ** Setup the correct IVAR register for a particular MSIX interrupt
1910 **  - entry is the register array entry
1911 **  - vector is the MSIX vector for this queue
1912 **  - type is RX/TX/MISC
1913 */
1914 static void
1915 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1916 {
1917         struct ixgbe_hw *hw = &adapter->hw;
1918         u32 ivar, index;
1919
1920         vector |= IXGBE_IVAR_ALLOC_VAL;
1921
1922         if (type == -1) { /* MISC IVAR */
1923                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1924                 ivar &= ~0xFF;
1925                 ivar |= vector;
1926                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1927         } else {        /* RX/TX IVARS */
1928                 index = (16 * (entry & 1)) + (8 * type);
1929                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1930                 ivar &= ~(0xFF << index);
1931                 ivar |= (vector << index);
1932                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1933         }
1934 }
1935
1936 static void
1937 ixv_configure_ivars(struct adapter *adapter)
1938 {
1939         struct  ix_queue *que = adapter->queues;
1940
1941         for (int i = 0; i < adapter->num_queues; i++, que++) {
1942                 /* First the RX queue entry */
1943                 ixv_set_ivar(adapter, i, que->msix, 0);
1944                 /* ... and the TX */
1945                 ixv_set_ivar(adapter, i, que->msix, 1);
1946                 /* Set an initial value in EITR */
1947                 IXGBE_WRITE_REG(&adapter->hw,
1948                     IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1949         }
1950
1951         /* For the mailbox interrupt */
1952         ixv_set_ivar(adapter, 1, adapter->vector, -1);
1953 }
1954
1955
1956 /*
1957 ** Tasklet handler for MSIX MBX interrupts
1958 **  - do outside interrupt since it might sleep
1959 */
1960 static void
1961 ixv_handle_mbx(void *context, int pending)
1962 {
1963         struct adapter  *adapter = context;
1964
1965         ixgbe_check_link(&adapter->hw,
1966             &adapter->link_speed, &adapter->link_up, 0);
1967         ixv_update_link_status(adapter);
1968 }
1969
1970 /*
1971 ** The VF stats registers never have a truely virgin
1972 ** starting point, so this routine tries to make an
1973 ** artificial one, marking ground zero on attach as
1974 ** it were.
1975 */
1976 static void
1977 ixv_save_stats(struct adapter *adapter)
1978 {
1979         if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1980                 adapter->stats.vf.saved_reset_vfgprc +=
1981                     adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1982                 adapter->stats.vf.saved_reset_vfgptc +=
1983                     adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1984                 adapter->stats.vf.saved_reset_vfgorc +=
1985                     adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1986                 adapter->stats.vf.saved_reset_vfgotc +=
1987                     adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1988                 adapter->stats.vf.saved_reset_vfmprc +=
1989                     adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1990         }
1991 }
1992  
1993 static void
1994 ixv_init_stats(struct adapter *adapter)
1995 {
1996         struct ixgbe_hw *hw = &adapter->hw;
1997  
1998         adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1999         adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2000         adapter->stats.vf.last_vfgorc |=
2001             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2002
2003         adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2004         adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2005         adapter->stats.vf.last_vfgotc |=
2006             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2007
2008         adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2009
2010         adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2011         adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2012         adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2013         adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2014         adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2015 }
2016
2017 #define UPDATE_STAT_32(reg, last, count)                \
2018 {                                                       \
2019         u32 current = IXGBE_READ_REG(hw, reg);          \
2020         if (current < last)                             \
2021                 count += 0x100000000LL;                 \
2022         last = current;                                 \
2023         count &= 0xFFFFFFFF00000000LL;                  \
2024         count |= current;                               \
2025 }
2026
2027 #define UPDATE_STAT_36(lsb, msb, last, count)           \
2028 {                                                       \
2029         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
2030         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
2031         u64 current = ((cur_msb << 32) | cur_lsb);      \
2032         if (current < last)                             \
2033                 count += 0x1000000000LL;                \
2034         last = current;                                 \
2035         count &= 0xFFFFFFF000000000LL;                  \
2036         count |= current;                               \
2037 }
2038
2039 /*
2040 ** ixv_update_stats - Update the board statistics counters.
2041 */
2042 void
2043 ixv_update_stats(struct adapter *adapter)
2044 {
2045         struct ixgbe_hw *hw = &adapter->hw;
2046
2047         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2048             adapter->stats.vf.vfgprc);
2049         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2050             adapter->stats.vf.vfgptc);
2051         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2052             adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2053         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2054             adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2055         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2056             adapter->stats.vf.vfmprc);
2057 }
2058
2059 /*
2060  * Add statistic sysctls for the VF.
2061  */
2062 static void
2063 ixv_add_stats_sysctls(struct adapter *adapter)
2064 {
2065         device_t dev = adapter->dev;
2066         struct ix_queue *que = &adapter->queues[0];
2067         struct tx_ring *txr = que->txr;
2068         struct rx_ring *rxr = que->rxr;
2069
2070         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2071         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2072         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2073         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2074
2075         struct sysctl_oid *stat_node, *queue_node;
2076         struct sysctl_oid_list *stat_list, *queue_list;
2077
2078         /* Driver Statistics */
2079         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2080                         CTLFLAG_RD, &adapter->dropped_pkts,
2081                         "Driver dropped packets");
2082         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2083                         CTLFLAG_RD, &adapter->mbuf_defrag_failed,
2084                         "m_defrag() failed");
2085         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2086                         CTLFLAG_RD, &adapter->watchdog_events,
2087                         "Watchdog timeouts");
2088
2089         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2090                                     CTLFLAG_RD, NULL,
2091                                     "VF Statistics (read from HW registers)");
2092         stat_list = SYSCTL_CHILDREN(stat_node);
2093
2094         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2095                         CTLFLAG_RD, &stats->vfgprc,
2096                         "Good Packets Received");
2097         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2098                         CTLFLAG_RD, &stats->vfgorc, 
2099                         "Good Octets Received"); 
2100         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2101                         CTLFLAG_RD, &stats->vfmprc,
2102                         "Multicast Packets Received");
2103         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2104                         CTLFLAG_RD, &stats->vfgptc,
2105                         "Good Packets Transmitted");
2106         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2107                         CTLFLAG_RD, &stats->vfgotc, 
2108                         "Good Octets Transmitted"); 
2109
2110         queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2111                                     CTLFLAG_RD, NULL,
2112                                     "Queue Statistics (collected by SW)");
2113         queue_list = SYSCTL_CHILDREN(queue_node);
2114
2115         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2116                         CTLFLAG_RD, &(que->irqs),
2117                         "IRQs on queue");
2118         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2119                         CTLFLAG_RD, &(rxr->rx_irq),
2120                         "RX irqs on queue");
2121         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2122                         CTLFLAG_RD, &(rxr->rx_packets),
2123                         "RX packets");
2124         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2125                         CTLFLAG_RD, &(rxr->rx_bytes),
2126                         "RX bytes");
2127         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2128                         CTLFLAG_RD, &(rxr->rx_discarded),
2129                         "Discarded RX packets");
2130
2131         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2132                         CTLFLAG_RD, &(txr->total_packets),
2133                         "TX Packets");
2134         SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
2135                         CTLFLAG_RD, &(txr->bytes), 0,
2136                         "TX Bytes");
2137         SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2138                         CTLFLAG_RD, &(txr->no_desc_avail),
2139                         "# of times not enough descriptors were available during TX");
2140 }
2141
2142 static void
2143 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2144         const char *description, int *limit, int value)
2145 {
2146         *limit = value;
2147         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2148             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2149             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2150 }
2151
2152 /**********************************************************************
2153  *
2154  *  This routine is called only when em_display_debug_stats is enabled.
2155  *  This routine provides a way to take a look at important statistics
2156  *  maintained by the driver and hardware.
2157  *
2158  **********************************************************************/
2159 static void
2160 ixv_print_debug_info(struct adapter *adapter)
2161 {
2162         device_t dev = adapter->dev;
2163         struct ixgbe_hw         *hw = &adapter->hw;
2164         struct ix_queue         *que = adapter->queues;
2165         struct rx_ring          *rxr;
2166         struct tx_ring          *txr;
2167         struct lro_ctrl         *lro;
2168
2169         device_printf(dev,"Error Byte Count = %u \n",
2170             IXGBE_READ_REG(hw, IXGBE_ERRBC));
2171
2172         for (int i = 0; i < adapter->num_queues; i++, que++) {
2173                 txr = que->txr;
2174                 rxr = que->rxr;
2175                 lro = &rxr->lro;
2176                 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2177                     que->msix, (long)que->irqs);
2178                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2179                     rxr->me, (long long)rxr->rx_packets);
2180                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2181                     rxr->me, (long)rxr->rx_bytes);
2182                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2183                     rxr->me, lro->lro_queued);
2184                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2185                     rxr->me, lro->lro_flushed);
2186                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2187                     txr->me, (long)txr->total_packets);
2188                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2189                     txr->me, (long)txr->no_desc_avail);
2190         }
2191
2192         device_printf(dev,"MBX IRQ Handled: %lu\n",
2193             (long)adapter->link_irq);
2194         return;
2195 }
2196
2197 static int
2198 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2199 {
2200         int error, result;
2201         struct adapter *adapter;
2202
2203         result = -1;
2204         error = sysctl_handle_int(oidp, &result, 0, req);
2205
2206         if (error || !req->newptr)
2207                 return (error);
2208
2209         if (result == 1) {
2210                 adapter = (struct adapter *) arg1;
2211                 ixv_print_debug_info(adapter);
2212         }
2213         return error;
2214 }
2215