1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /************************************************************************
45 ************************************************************************/
46 char ixv_driver_version[] = "1.5.9-k";
48 /************************************************************************
51 * Used by probe to select devices to load on
52 * Last field stores an index into ixv_strings
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 ************************************************************************/
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
64 /* required last entry */
68 /************************************************************************
69 * Table of branding strings
70 ************************************************************************/
71 static char *ixv_strings[] = {
72 "Intel(R) PRO/10GbE Virtual Function Network Driver"
75 /************************************************************************
77 ************************************************************************/
78 static int ixv_probe(device_t);
79 static int ixv_attach(device_t);
80 static int ixv_detach(device_t);
81 static int ixv_shutdown(device_t);
82 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
83 static void ixv_init(void *);
84 static void ixv_stop(void *);
85 static void ixv_init_device_features(struct adapter *);
86 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
87 static int ixv_media_change(struct ifnet *);
88 static int ixv_allocate_pci_resources(struct adapter *);
89 static int ixv_allocate_msix(struct adapter *);
90 static int ixv_configure_interrupts(struct adapter *);
91 static void ixv_free_pci_resources(struct adapter *);
92 static void ixv_local_timer(void *);
93 static void ixv_setup_interface(device_t, struct adapter *);
95 static void ixv_initialize_transmit_units(struct adapter *);
96 static void ixv_initialize_receive_units(struct adapter *);
97 static void ixv_initialize_rss_mapping(struct adapter *);
98 static void ixv_check_link(struct adapter *);
100 static void ixv_enable_intr(struct adapter *);
101 static void ixv_disable_intr(struct adapter *);
102 static void ixv_set_multi(struct adapter *);
103 static void ixv_update_link_status(struct adapter *);
104 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
105 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
106 static void ixv_configure_ivars(struct adapter *);
107 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
109 static void ixv_setup_vlan_support(struct adapter *);
110 static void ixv_register_vlan(void *, struct ifnet *, u16);
111 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
113 static void ixv_save_stats(struct adapter *);
114 static void ixv_init_stats(struct adapter *);
115 static void ixv_update_stats(struct adapter *);
116 static void ixv_add_stats_sysctls(struct adapter *);
117 static void ixv_set_sysctl_value(struct adapter *, const char *,
118 const char *, int *, int);
120 /* The MSI-X Interrupt handlers */
121 static void ixv_msix_que(void *);
122 static void ixv_msix_mbx(void *);
124 /* Deferred interrupt tasklets */
125 static void ixv_handle_que(void *, int);
126 static void ixv_handle_link(void *, int);
128 /************************************************************************
129 * FreeBSD Device Interface Entry Points
130 ************************************************************************/
131 static device_method_t ixv_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, ixv_probe),
134 DEVMETHOD(device_attach, ixv_attach),
135 DEVMETHOD(device_detach, ixv_detach),
136 DEVMETHOD(device_shutdown, ixv_shutdown),
140 static driver_t ixv_driver = {
141 "ixv", ixv_methods, sizeof(struct adapter),
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 MODULE_DEPEND(ixv, pci, 1, 1, 1);
147 MODULE_DEPEND(ixv, ether, 1, 1, 1);
148 #if __FreeBSD_version >= 1100000
149 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
153 * TUNEABLE PARAMETERS:
156 static SYSCTL_NODE(_hw, OID_AUTO, ixv, CTLFLAG_RD, 0, "IXV driver parameters");
158 /* Number of Queues - do not exceed MSI-X vectors - 1 */
159 static int ixv_num_queues = 1;
160 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
161 SYSCTL_INT(_hw_ixv, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixv_num_queues, 0,
162 "Number of queues to configure, 0 indicates autoconfigure");
165 * AIM: Adaptive Interrupt Moderation
166 * which means that the interrupt rate
167 * is varied over time based on the
168 * traffic for that interrupt vector
170 static int ixv_enable_aim = FALSE;
171 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
172 SYSCTL_INT(_hw_ixv, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixv_enable_aim, 0,
173 "Adaptive Interrupt Moderation");
175 /* How many packets rxeof tries to clean at a time */
176 static int ixv_rx_process_limit = 256;
177 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
178 SYSCTL_INT(_hw_ixv, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
179 &ixv_rx_process_limit, 0, "Limit to RX packet processing");
181 /* How many packets txeof tries to clean at a time */
182 static int ixv_tx_process_limit = 256;
183 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
184 SYSCTL_INT(_hw_ixv, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
185 &ixv_tx_process_limit, 0, "Limit to TX packet processing");
187 /* Flow control setting, default to full */
188 static int ixv_flow_control = ixgbe_fc_full;
189 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
190 SYSCTL_INT(_hw_ixv, OID_AUTO, flow_control, CTLFLAG_RDTUN, &ixv_flow_control, 0,
194 * Header split: this causes the hardware to DMA
195 * the header into a separate mbuf from the payload,
196 * it can be a performance win in some workloads, but
197 * in others it actually hurts, its off by default.
199 static int ixv_header_split = FALSE;
200 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
201 SYSCTL_INT(_hw_ixv, OID_AUTO, header_split, CTLFLAG_RDTUN, &ixv_header_split, 0,
202 "Header Split: DMA header into separate mbuf");
205 * Number of TX descriptors per ring,
206 * setting higher than RX as this seems
207 * the better performing choice.
209 static int ixv_txd = DEFAULT_TXD;
210 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
211 SYSCTL_INT(_hw_ixv, OID_AUTO, txd, CTLFLAG_RDTUN, &ixv_txd, 0,
212 "Number of Transmit descriptors");
214 /* Number of RX descriptors per ring */
215 static int ixv_rxd = DEFAULT_RXD;
216 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
217 SYSCTL_INT(_hw_ixv, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixv_rxd, 0,
218 "Number of Receive descriptors");
220 /* Legacy Transmit (single queue) */
221 static int ixv_enable_legacy_tx = 0;
222 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
223 SYSCTL_INT(_hw_ixv, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
224 &ixv_enable_legacy_tx, 0, "Enable Legacy TX flow");
227 * Shadow VFTA table, this is needed because
228 * the real filter table gets cleared during
229 * a soft reset and we need to repopulate it.
231 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
233 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
234 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
236 MALLOC_DEFINE(M_IXV, "ixv", "ixv driver allocations");
238 /************************************************************************
239 * ixv_probe - Device identification routine
241 * Determines if the driver should be loaded on
242 * adapter based on its PCI vendor/device ID.
244 * return BUS_PROBE_DEFAULT on success, positive on failure
245 ************************************************************************/
247 ixv_probe(device_t dev)
249 ixgbe_vendor_info_t *ent;
250 u16 pci_vendor_id = 0;
251 u16 pci_device_id = 0;
252 u16 pci_subvendor_id = 0;
253 u16 pci_subdevice_id = 0;
254 char adapter_name[256];
257 pci_vendor_id = pci_get_vendor(dev);
258 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
261 pci_device_id = pci_get_device(dev);
262 pci_subvendor_id = pci_get_subvendor(dev);
263 pci_subdevice_id = pci_get_subdevice(dev);
265 ent = ixv_vendor_info_array;
266 while (ent->vendor_id != 0) {
267 if ((pci_vendor_id == ent->vendor_id) &&
268 (pci_device_id == ent->device_id) &&
269 ((pci_subvendor_id == ent->subvendor_id) ||
270 (ent->subvendor_id == 0)) &&
271 ((pci_subdevice_id == ent->subdevice_id) ||
272 (ent->subdevice_id == 0))) {
273 sprintf(adapter_name, "%s, Version - %s",
274 ixv_strings[ent->index], ixv_driver_version);
275 device_set_desc_copy(dev, adapter_name);
276 return (BUS_PROBE_DEFAULT);
284 /************************************************************************
285 * ixv_attach - Device initialization routine
287 * Called when the driver is being loaded.
288 * Identifies the type of hardware, allocates all resources
289 * and initializes the hardware.
291 * return 0 on success, positive on failure
292 ************************************************************************/
294 ixv_attach(device_t dev)
296 struct adapter *adapter;
300 INIT_DEBUGOUT("ixv_attach: begin");
303 * Make sure BUSMASTER is set, on a VM under
304 * KVM it may not be and will break things.
306 pci_enable_busmaster(dev);
308 /* Allocate, clear, and link in our adapter structure */
309 adapter = device_get_softc(dev);
311 adapter->hw.back = adapter;
314 adapter->init_locked = ixv_init_locked;
315 adapter->stop_locked = ixv_stop;
318 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
320 /* Do base PCI setup - map BAR0 */
321 if (ixv_allocate_pci_resources(adapter)) {
322 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
328 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
329 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
330 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
333 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
334 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
335 "enable_aim", CTLFLAG_RW, &ixv_enable_aim, 1,
336 "Interrupt Moderation");
338 /* Set up the timer callout */
339 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
341 /* Save off the information about this board */
342 hw->vendor_id = pci_get_vendor(dev);
343 hw->device_id = pci_get_device(dev);
344 hw->revision_id = pci_get_revid(dev);
345 hw->subsystem_vendor_id = pci_get_subvendor(dev);
346 hw->subsystem_device_id = pci_get_subdevice(dev);
348 /* A subset of set_mac_type */
349 switch (hw->device_id) {
350 case IXGBE_DEV_ID_82599_VF:
351 hw->mac.type = ixgbe_mac_82599_vf;
353 case IXGBE_DEV_ID_X540_VF:
354 hw->mac.type = ixgbe_mac_X540_vf;
356 case IXGBE_DEV_ID_X550_VF:
357 hw->mac.type = ixgbe_mac_X550_vf;
359 case IXGBE_DEV_ID_X550EM_X_VF:
360 hw->mac.type = ixgbe_mac_X550EM_x_vf;
362 case IXGBE_DEV_ID_X550EM_A_VF:
363 hw->mac.type = ixgbe_mac_X550EM_a_vf;
366 /* Shouldn't get here since probe succeeded */
367 device_printf(dev, "Unknown device ID!\n");
373 ixv_init_device_features(adapter);
375 /* Initialize the shared code */
376 error = ixgbe_init_ops_vf(hw);
378 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
383 /* Setup the mailbox */
384 ixv_init_mbx_params_vf(hw);
386 //hw->mac.max_tx_queues = 2;
387 //hw->mac.max_rx_queues = 2;
389 /* Set the right number of segments */
390 adapter->num_segs = IXGBE_82599_SCATTER;
392 error = hw->mac.ops.reset_hw(hw);
393 if (error == IXGBE_ERR_RESET_FAILED)
394 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
396 device_printf(dev, "...reset_hw() failed with error %d\n",
403 error = hw->mac.ops.init_hw(hw);
405 device_printf(dev, "...init_hw() failed with error %d\n",
411 /* Negotiate mailbox API version */
412 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_12);
414 device_printf(dev, "MBX API 1.2 negotiation failed! Error %d\n",
420 /* If no mac address was assigned, make a random one */
421 if (!ixv_check_ether_addr(hw->mac.addr)) {
422 u8 addr[ETHER_ADDR_LEN];
423 arc4rand(&addr, sizeof(addr), 0);
426 bcopy(addr, hw->mac.addr, sizeof(addr));
427 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
430 /* Register for VLAN events */
431 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
432 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
433 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
434 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
436 /* Sysctls for limiting the amount of work done in the taskqueues */
437 ixv_set_sysctl_value(adapter, "rx_processing_limit",
438 "max number of rx packets to process",
439 &adapter->rx_process_limit, ixv_rx_process_limit);
441 ixv_set_sysctl_value(adapter, "tx_processing_limit",
442 "max number of tx packets to process",
443 &adapter->tx_process_limit, ixv_tx_process_limit);
445 /* Do descriptor calc and sanity checks */
446 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
447 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
448 device_printf(dev, "TXD config issue, using default!\n");
449 adapter->num_tx_desc = DEFAULT_TXD;
451 adapter->num_tx_desc = ixv_txd;
453 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
454 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
455 device_printf(dev, "RXD config issue, using default!\n");
456 adapter->num_rx_desc = DEFAULT_RXD;
458 adapter->num_rx_desc = ixv_rxd;
461 error = ixv_configure_interrupts(adapter);
465 /* Allocate our TX/RX Queues */
466 if (ixv_allocate_queues(adapter)) {
467 device_printf(dev, "ixv_allocate_queues() failed!\n");
472 /* Setup OS specific network interface */
473 ixv_setup_interface(dev, adapter);
475 error = ixv_allocate_msix(adapter);
477 device_printf(dev, "ixv_allocate_msix() failed!\n");
481 /* Do the stats setup */
482 ixv_save_stats(adapter);
483 ixv_init_stats(adapter);
484 ixv_add_stats_sysctls(adapter);
486 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
487 ixv_netmap_attach(adapter);
489 INIT_DEBUGOUT("ixv_attach: end");
494 ixv_free_transmit_structures(adapter);
495 ixv_free_receive_structures(adapter);
496 free(adapter->queues, M_IXV);
498 ixv_free_pci_resources(adapter);
499 IXGBE_CORE_LOCK_DESTROY(adapter);
504 /************************************************************************
505 * ixv_detach - Device removal routine
507 * Called when the driver is being removed.
508 * Stops the adapter and deallocates all the resources
509 * that were allocated for driver operation.
511 * return 0 on success, positive on failure
512 ************************************************************************/
514 ixv_detach(device_t dev)
516 struct adapter *adapter = device_get_softc(dev);
517 struct ix_queue *que = adapter->queues;
519 INIT_DEBUGOUT("ixv_detach: begin");
521 /* Make sure VLANS are not using driver */
522 if (adapter->ifp->if_vlantrunk != NULL) {
523 device_printf(dev, "Vlan in use, detach first\n");
527 ether_ifdetach(adapter->ifp);
528 IXGBE_CORE_LOCK(adapter);
530 IXGBE_CORE_UNLOCK(adapter);
532 for (int i = 0; i < adapter->num_queues; i++, que++) {
534 struct tx_ring *txr = que->txr;
535 taskqueue_drain(que->tq, &txr->txq_task);
536 taskqueue_drain(que->tq, &que->que_task);
537 taskqueue_free(que->tq);
541 /* Drain the Mailbox(link) queue */
543 taskqueue_drain(adapter->tq, &adapter->link_task);
544 taskqueue_free(adapter->tq);
547 /* Unregister VLAN events */
548 if (adapter->vlan_attach != NULL)
549 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
550 if (adapter->vlan_detach != NULL)
551 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
553 callout_drain(&adapter->timer);
555 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
556 netmap_detach(adapter->ifp);
558 ixv_free_pci_resources(adapter);
559 bus_generic_detach(dev);
560 if_free(adapter->ifp);
562 ixv_free_transmit_structures(adapter);
563 ixv_free_receive_structures(adapter);
564 free(adapter->queues, M_IXV);
566 IXGBE_CORE_LOCK_DESTROY(adapter);
571 /************************************************************************
572 * ixv_shutdown - Shutdown entry point
573 ************************************************************************/
575 ixv_shutdown(device_t dev)
577 struct adapter *adapter = device_get_softc(dev);
578 IXGBE_CORE_LOCK(adapter);
580 IXGBE_CORE_UNLOCK(adapter);
586 /************************************************************************
587 * ixv_ioctl - Ioctl entry point
589 * Called when the user wants to configure the interface.
591 * return 0 on success, positive on failure
592 ************************************************************************/
594 ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
596 struct adapter *adapter = ifp->if_softc;
597 struct ifreq *ifr = (struct ifreq *)data;
598 #if defined(INET) || defined(INET6)
599 struct ifaddr *ifa = (struct ifaddr *)data;
600 bool avoid_reset = FALSE;
608 if (ifa->ifa_addr->sa_family == AF_INET)
612 if (ifa->ifa_addr->sa_family == AF_INET6)
615 #if defined(INET) || defined(INET6)
617 * Calling init results in link renegotiation,
618 * so we avoid doing it when possible.
621 ifp->if_flags |= IFF_UP;
622 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
624 if (!(ifp->if_flags & IFF_NOARP))
625 arp_ifinit(ifp, ifa);
627 error = ether_ioctl(ifp, command, data);
631 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
632 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
635 IXGBE_CORE_LOCK(adapter);
636 ifp->if_mtu = ifr->ifr_mtu;
637 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
638 ixv_init_locked(adapter);
639 IXGBE_CORE_UNLOCK(adapter);
643 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
644 IXGBE_CORE_LOCK(adapter);
645 if (ifp->if_flags & IFF_UP) {
646 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
647 ixv_init_locked(adapter);
649 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
651 adapter->if_flags = ifp->if_flags;
652 IXGBE_CORE_UNLOCK(adapter);
656 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
657 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
658 IXGBE_CORE_LOCK(adapter);
659 ixv_disable_intr(adapter);
660 ixv_set_multi(adapter);
661 ixv_enable_intr(adapter);
662 IXGBE_CORE_UNLOCK(adapter);
667 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
668 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
672 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
673 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
674 if (mask & IFCAP_HWCSUM)
675 ifp->if_capenable ^= IFCAP_HWCSUM;
676 if (mask & IFCAP_TSO4)
677 ifp->if_capenable ^= IFCAP_TSO4;
678 if (mask & IFCAP_LRO)
679 ifp->if_capenable ^= IFCAP_LRO;
680 if (mask & IFCAP_VLAN_HWTAGGING)
681 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
682 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
683 IXGBE_CORE_LOCK(adapter);
684 ixv_init_locked(adapter);
685 IXGBE_CORE_UNLOCK(adapter);
687 VLAN_CAPABILITIES(ifp);
692 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
693 error = ether_ioctl(ifp, command, data);
700 /************************************************************************
701 * ixv_init_device_features
702 ************************************************************************/
704 ixv_init_device_features(struct adapter *adapter)
706 adapter->feat_cap = IXGBE_FEATURE_NETMAP
708 | IXGBE_FEATURE_LEGACY_TX;
710 /* A tad short on feature flags for VFs, atm. */
711 switch (adapter->hw.mac.type) {
712 case ixgbe_mac_82599_vf:
713 adapter->feat_cap |= IXGBE_FEATURE_FRAME_LIMIT;
715 case ixgbe_mac_X540_vf:
716 case ixgbe_mac_X550_vf:
717 case ixgbe_mac_X550EM_x_vf:
718 case ixgbe_mac_X550EM_a_vf:
723 /* Enabled by default... */
725 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
726 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
727 /* Receive-Side Scaling (RSS) */
728 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
729 adapter->feat_en |= IXGBE_FEATURE_RSS;
730 /* Frame size limitation */
731 if (adapter->feat_cap & IXGBE_FEATURE_FRAME_LIMIT)
732 adapter->feat_en |= IXGBE_FEATURE_FRAME_LIMIT;
734 /* Enabled via sysctl... */
735 /* Legacy (single queue) transmit */
736 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
737 ixv_enable_legacy_tx)
738 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
739 } /* ixv_init_device_features */
741 /************************************************************************
742 * ixv_init_locked - Init entry point
744 * Used in two ways: It is used by the stack as init entry
745 * point in network interface structure. It is also used
746 * by the driver as a hw/sw initialization routine to get
747 * to a consistent state.
749 * return 0 on success, positive on failure
750 ************************************************************************/
751 #define IXGBE_MHADD_MFS_SHIFT 16
754 ixv_init_locked(struct adapter *adapter)
756 struct ifnet *ifp = adapter->ifp;
757 device_t dev = adapter->dev;
758 struct ixgbe_hw *hw = &adapter->hw;
761 INIT_DEBUGOUT("ixv_init_locked: begin");
762 mtx_assert(&adapter->core_mtx, MA_OWNED);
763 hw->adapter_stopped = FALSE;
764 hw->mac.ops.stop_adapter(hw);
765 callout_stop(&adapter->timer);
767 /* reprogram the RAR[0] in case user changed it. */
768 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
770 /* Get the latest mac address, User can use a LAA */
771 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
772 IXGBE_ETH_LENGTH_OF_ADDRESS);
773 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
775 /* Prepare transmit descriptors and buffers */
776 if (ixv_setup_transmit_structures(adapter)) {
777 device_printf(dev, "Could not setup transmit structures\n");
782 /* Reset VF and renegotiate mailbox API version */
783 hw->mac.ops.reset_hw(hw);
784 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_12);
786 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n",
789 ixv_initialize_transmit_units(adapter);
791 /* Setup Multicast table */
792 ixv_set_multi(adapter);
795 * Determine the correct mbuf pool
796 * for doing jumbo/headersplit
798 if (ifp->if_mtu > ETHERMTU)
799 adapter->rx_mbuf_sz = MJUMPAGESIZE;
801 adapter->rx_mbuf_sz = MCLBYTES;
803 /* Prepare receive descriptors and buffers */
804 if (ixv_setup_receive_structures(adapter)) {
805 device_printf(dev, "Could not setup receive structures\n");
810 /* Configure RX settings */
811 ixv_initialize_receive_units(adapter);
813 /* Set the various hardware offload abilities */
814 ifp->if_hwassist = 0;
815 if (ifp->if_capenable & IFCAP_TSO4)
816 ifp->if_hwassist |= CSUM_TSO;
817 if (ifp->if_capenable & IFCAP_TXCSUM) {
818 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
819 #if __FreeBSD_version >= 800000
820 ifp->if_hwassist |= CSUM_SCTP;
824 /* Set up VLAN offload and filter */
825 ixv_setup_vlan_support(adapter);
827 /* Set up MSI-X routing */
828 ixv_configure_ivars(adapter);
830 /* Set up auto-mask */
831 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
833 /* Set moderation on the Link interrupt */
834 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
837 ixv_init_stats(adapter);
839 /* Config/Enable Link */
840 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
844 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
846 /* And now turn on interrupts */
847 ixv_enable_intr(adapter);
849 /* Now inform the stack we're ready */
850 ifp->if_drv_flags |= IFF_DRV_RUNNING;
851 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
854 } /* ixv_init_locked */
856 /************************************************************************
858 ************************************************************************/
862 struct adapter *adapter = arg;
864 IXGBE_CORE_LOCK(adapter);
865 ixv_init_locked(adapter);
866 IXGBE_CORE_UNLOCK(adapter);
873 * MSI-X Interrupt Handlers and Tasklets
877 ixv_enable_queue(struct adapter *adapter, u32 vector)
879 struct ixgbe_hw *hw = &adapter->hw;
880 u32 queue = 1 << vector;
883 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
884 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
885 } /* ixv_enable_queue */
888 ixv_disable_queue(struct adapter *adapter, u32 vector)
890 struct ixgbe_hw *hw = &adapter->hw;
891 u64 queue = (u64)(1 << vector);
894 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
895 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
896 } /* ixv_disable_queue */
899 ixv_rearm_queues(struct adapter *adapter, u64 queues)
901 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
902 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
903 } /* ixv_rearm_queues */
907 ixv_handle_que(void *context, int pending)
909 struct ix_queue *que = context;
910 struct adapter *adapter = que->adapter;
911 struct tx_ring *txr = que->txr;
912 struct ifnet *ifp = adapter->ifp;
915 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
916 more = ixv_rxeof(que);
919 if (!ixv_ring_empty(ifp, txr->br))
920 ixv_start_locked(ifp, txr);
921 IXGBE_TX_UNLOCK(txr);
923 taskqueue_enqueue(que->tq, &que->que_task);
928 /* Reenable this interrupt */
929 ixv_enable_queue(adapter, que->msix);
932 } /* ixv_handle_que */
934 /************************************************************************
935 * ixv_msix_que - MSI Queue Interrupt Service routine
936 ************************************************************************/
938 ixv_msix_que(void *arg)
940 struct ix_queue *que = arg;
941 struct adapter *adapter = que->adapter;
942 struct ifnet *ifp = adapter->ifp;
943 struct tx_ring *txr = que->txr;
944 struct rx_ring *rxr = que->rxr;
948 ixv_disable_queue(adapter, que->msix);
951 more = ixv_rxeof(que);
956 * Make certain that if the stack
957 * has anything queued the task gets
958 * scheduled to handle it.
960 if (!ixv_ring_empty(adapter->ifp, txr->br))
961 ixv_start_locked(ifp, txr);
962 IXGBE_TX_UNLOCK(txr);
966 if (ixv_enable_aim == FALSE)
969 * Do Adaptive Interrupt Moderation:
970 * - Write out last calculated setting
971 * - Calculate based on average size over
974 if (que->eitr_setting)
975 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
978 que->eitr_setting = 0;
980 /* Idle, do nothing */
981 if ((txr->bytes == 0) && (rxr->bytes == 0))
984 if ((txr->bytes) && (txr->packets))
985 newitr = txr->bytes/txr->packets;
986 if ((rxr->bytes) && (rxr->packets))
987 newitr = max(newitr, (rxr->bytes / rxr->packets));
988 newitr += 24; /* account for hardware frame, crc */
990 /* set an upper boundary */
991 newitr = min(newitr, 3000);
993 /* Be nice to the mid range */
994 if ((newitr > 300) && (newitr < 1200))
995 newitr = (newitr / 3);
997 newitr = (newitr / 2);
999 newitr |= newitr << 16;
1001 /* save for next interrupt */
1002 que->eitr_setting = newitr;
1012 taskqueue_enqueue(que->tq, &que->que_task);
1013 else /* Reenable this interrupt */
1014 ixv_enable_queue(adapter, que->msix);
1017 } /* ixv_msix_que */
1019 /************************************************************************
1021 ************************************************************************/
1023 ixv_msix_mbx(void *arg)
1025 struct adapter *adapter = arg;
1026 struct ixgbe_hw *hw = &adapter->hw;
1029 ++adapter->link_irq;
1031 /* First get the cause */
1032 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1033 /* Clear interrupt with write */
1034 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1036 /* Link status change */
1037 if (reg & IXGBE_EICR_LSC)
1038 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1040 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1043 } /* ixv_msix_mbx */
1045 /************************************************************************
1046 * ixv_media_status - Media Ioctl callback
1048 * Called whenever the user queries the status of
1049 * the interface using ifconfig.
1050 ************************************************************************/
1052 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1054 struct adapter *adapter = ifp->if_softc;
1056 INIT_DEBUGOUT("ixv_media_status: begin");
1057 IXGBE_CORE_LOCK(adapter);
1058 ixv_update_link_status(adapter);
1060 ifmr->ifm_status = IFM_AVALID;
1061 ifmr->ifm_active = IFM_ETHER;
1063 if (!adapter->link_active) {
1064 IXGBE_CORE_UNLOCK(adapter);
1068 ifmr->ifm_status |= IFM_ACTIVE;
1070 switch (adapter->link_speed) {
1071 case IXGBE_LINK_SPEED_1GB_FULL:
1072 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1074 case IXGBE_LINK_SPEED_10GB_FULL:
1075 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1077 case IXGBE_LINK_SPEED_100_FULL:
1078 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1080 case IXGBE_LINK_SPEED_10_FULL:
1081 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1085 IXGBE_CORE_UNLOCK(adapter);
1088 } /* ixv_media_status */
1090 /************************************************************************
1091 * ixv_media_change - Media Ioctl callback
1093 * Called when the user changes speed/duplex using
1094 * media/mediopt option with ifconfig.
1095 ************************************************************************/
1097 ixv_media_change(struct ifnet *ifp)
1099 struct adapter *adapter = ifp->if_softc;
1100 struct ifmedia *ifm = &adapter->media;
1102 INIT_DEBUGOUT("ixv_media_change: begin");
1104 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1107 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1111 device_printf(adapter->dev, "Only auto media type\n");
1116 } /* ixv_media_change */
1119 /************************************************************************
1120 * ixv_set_multi - Multicast Update
1122 * Called whenever multicast address list is updated.
1123 ************************************************************************/
1125 ixv_set_multi(struct adapter *adapter)
1127 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1129 struct ifmultiaddr *ifma;
1130 struct ifnet *ifp = adapter->ifp;
1133 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1135 #if __FreeBSD_version < 800000
1138 if_maddr_rlock(ifp);
1140 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1141 if (ifma->ifma_addr->sa_family != AF_LINK)
1143 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1144 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1145 IXGBE_ETH_LENGTH_OF_ADDRESS);
1148 #if __FreeBSD_version < 800000
1149 IF_ADDR_UNLOCK(ifp);
1151 if_maddr_runlock(ifp);
1156 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1157 ixv_mc_array_itr, TRUE);
1160 } /* ixv_set_multi */
1162 /************************************************************************
1165 * An iterator function needed by the multicast shared code.
1166 * It feeds the shared code routine the addresses in the
1167 * array of ixv_set_multi() one by one.
1168 ************************************************************************/
1170 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1172 u8 *addr = *update_ptr;
1176 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1177 *update_ptr = newptr;
1180 } /* ixv_mc_array_itr */
1182 /************************************************************************
1183 * ixv_local_timer - Timer routine
1185 * Checks for link status, updates statistics,
1186 * and runs the watchdog check.
1187 ************************************************************************/
1189 ixv_local_timer(void *arg)
1191 struct adapter *adapter = arg;
1192 device_t dev = adapter->dev;
1193 struct ix_queue *que = adapter->queues;
1197 mtx_assert(&adapter->core_mtx, MA_OWNED);
1199 ixv_check_link(adapter);
1202 ixv_update_stats(adapter);
1205 * Check the TX queues status
1206 * - mark hung queues so we don't schedule on them
1207 * - watchdog only if all queues show hung
1209 for (int i = 0; i < adapter->num_queues; i++, que++) {
1210 /* Keep track of queues with work for soft irq */
1212 queues |= ((u64)1 << que->me);
1214 * Each time txeof runs without cleaning, but there
1215 * are uncleaned descriptors it increments busy. If
1216 * we get to the MAX we declare it hung.
1218 if (que->busy == IXGBE_QUEUE_HUNG) {
1220 /* Mark the queue as inactive */
1221 adapter->active_queues &= ~((u64)1 << que->me);
1224 /* Check if we've come back from hung */
1225 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1226 adapter->active_queues |= ((u64)1 << que->me);
1228 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1230 "Warning queue %d appears to be hung!\n", i);
1231 que->txr->busy = IXGBE_QUEUE_HUNG;
1237 /* Only truly watchdog if all queues show hung */
1238 if (hung == adapter->num_queues)
1240 else if (queues != 0) { /* Force an IRQ on queues with work */
1241 ixv_rearm_queues(adapter, queues);
1244 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1250 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1251 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1252 adapter->watchdog_events++;
1253 ixv_init_locked(adapter);
1254 } /* ixv_local_timer */
1256 /************************************************************************
1257 * ixv_update_link_status - Update OS on link state
1259 * Note: Only updates the OS on the cached link state.
1260 * The real check of the hardware only happens with
1262 ************************************************************************/
1264 ixv_update_link_status(struct adapter *adapter)
1266 struct ifnet *ifp = adapter->ifp;
1267 device_t dev = adapter->dev;
1269 if (adapter->link_up) {
1270 if (adapter->link_active == FALSE) {
1272 device_printf(dev,"Link is up %d Gbps %s \n",
1273 ((adapter->link_speed == 128) ? 10 : 1),
1275 adapter->link_active = TRUE;
1276 if_link_state_change(ifp, LINK_STATE_UP);
1278 } else { /* Link down */
1279 if (adapter->link_active == TRUE) {
1281 device_printf(dev,"Link is Down\n");
1282 if_link_state_change(ifp, LINK_STATE_DOWN);
1283 adapter->link_active = FALSE;
1288 } /* ixv_update_link_status */
1291 /************************************************************************
1292 * ixv_stop - Stop the hardware
1294 * Disables all traffic on the adapter by issuing a
1295 * global reset on the MAC and deallocates TX/RX buffers.
1296 ************************************************************************/
1301 struct adapter *adapter = arg;
1302 struct ixgbe_hw *hw = &adapter->hw;
1306 mtx_assert(&adapter->core_mtx, MA_OWNED);
1308 INIT_DEBUGOUT("ixv_stop: begin\n");
1309 ixv_disable_intr(adapter);
1311 /* Tell the stack that the interface is no longer active */
1312 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1314 hw->mac.ops.reset_hw(hw);
1315 adapter->hw.adapter_stopped = FALSE;
1316 hw->mac.ops.stop_adapter(hw);
1317 callout_stop(&adapter->timer);
1319 /* reprogram the RAR[0] in case user changed it. */
1320 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1326 /************************************************************************
1327 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
1328 ************************************************************************/
1330 ixv_allocate_msix(struct adapter *adapter)
1332 device_t dev = adapter->dev;
1333 struct ix_queue *que = adapter->queues;
1334 struct tx_ring *txr = adapter->tx_rings;
1335 int error, msix_ctrl, rid, vector = 0;
1337 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1339 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1340 RF_SHAREABLE | RF_ACTIVE);
1341 if (que->res == NULL) {
1342 device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
1346 /* Set the handler function */
1347 error = bus_setup_intr(dev, que->res,
1348 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1349 ixv_msix_que, que, &que->tag);
1352 device_printf(dev, "Failed to register QUE handler");
1355 #if __FreeBSD_version >= 800504
1356 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1359 adapter->active_queues |= (u64)(1 << que->msix);
1361 * Bind the MSI-X vector, and thus the
1362 * ring to the corresponding CPU.
1364 if (adapter->num_queues > 1)
1365 bus_bind_intr(dev, que->res, i);
1366 TASK_INIT(&txr->txq_task, 0, ixv_deferred_mq_start, txr);
1367 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1368 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1369 taskqueue_thread_enqueue, &que->tq);
1370 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1371 device_get_nameunit(adapter->dev));
1376 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1377 RF_SHAREABLE | RF_ACTIVE);
1378 if (!adapter->res) {
1380 "Unable to allocate bus resource: MBX interrupt [%d]\n",
1384 /* Set the mbx handler function */
1385 error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
1386 NULL, ixv_msix_mbx, adapter, &adapter->tag);
1388 adapter->res = NULL;
1389 device_printf(dev, "Failed to register LINK handler");
1392 #if __FreeBSD_version >= 800504
1393 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1395 adapter->vector = vector;
1396 /* Tasklets for Mailbox */
1397 TASK_INIT(&adapter->link_task, 0, ixv_handle_link, adapter);
1398 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1399 taskqueue_thread_enqueue, &adapter->tq);
1400 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1401 device_get_nameunit(adapter->dev));
1403 * Due to a broken design QEMU will fail to properly
1404 * enable the guest for MSI-X unless the vectors in
1405 * the table are all set up, so we must rewrite the
1406 * ENABLE in the MSI-X control register again at this
1407 * point to cause it to successfully initialize us.
1409 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1410 pci_find_cap(dev, PCIY_MSIX, &rid);
1411 rid += PCIR_MSIX_CTRL;
1412 msix_ctrl = pci_read_config(dev, rid, 2);
1413 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1414 pci_write_config(dev, rid, msix_ctrl, 2);
1418 } /* ixv_allocate_msix */
1420 /************************************************************************
1421 * ixv_configure_interrupts - Setup MSI-X resources
1423 * Note: The VF device MUST use MSI-X, there is no fallback.
1424 ************************************************************************/
1426 ixv_configure_interrupts(struct adapter *adapter)
1428 device_t dev = adapter->dev;
1429 int rid, want, msgs;
1431 /* Must have at least 2 MSI-X vectors */
1432 msgs = pci_msix_count(dev);
1436 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1438 if (adapter->msix_mem == NULL) {
1439 device_printf(adapter->dev, "Unable to map MSI-X table \n");
1444 * Want vectors for the queues,
1445 * plus an additional for mailbox.
1447 want = adapter->num_queues + 1;
1450 adapter->num_queues = msgs - 1;
1453 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1454 device_printf(adapter->dev,
1455 "Using MSI-X interrupts with %d vectors\n", want);
1456 /* reflect correct sysctl value */
1457 ixv_num_queues = adapter->num_queues;
1461 /* Release in case alloc was insufficient */
1462 pci_release_msi(dev);
1464 if (adapter->msix_mem != NULL) {
1465 bus_release_resource(dev, SYS_RES_MEMORY, rid,
1467 adapter->msix_mem = NULL;
1469 device_printf(adapter->dev, "MSI-X config error\n");
1472 } /* ixv_configure_interrupts */
1475 /************************************************************************
1476 * ixv_allocate_pci_resources
1477 ************************************************************************/
1479 ixv_allocate_pci_resources(struct adapter *adapter)
1481 device_t dev = adapter->dev;
1485 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1488 if (!(adapter->pci_mem)) {
1489 device_printf(dev, "Unable to allocate bus resource: memory\n");
1493 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1494 adapter->osdep.mem_bus_space_handle =
1495 rman_get_bushandle(adapter->pci_mem);
1496 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1498 /* Pick up the tuneable queues */
1499 adapter->num_queues = ixv_num_queues;
1502 } /* ixv_allocate_pci_resources */
1504 /************************************************************************
1505 * ixv_free_pci_resources
1506 ************************************************************************/
1508 ixv_free_pci_resources(struct adapter * adapter)
1510 struct ix_queue *que = adapter->queues;
1511 device_t dev = adapter->dev;
1514 memrid = PCIR_BAR(MSIX_82598_BAR);
1517 * There is a slight possibility of a failure mode
1518 * in attach that will result in entering this function
1519 * before interrupt resources have been initialized, and
1520 * in that case we do not want to execute the loops below
1521 * We can detect this reliably by the state of the adapter
1524 if (adapter->res == NULL)
1528 * Release all msix queue resources:
1530 for (int i = 0; i < adapter->num_queues; i++, que++) {
1531 rid = que->msix + 1;
1532 if (que->tag != NULL) {
1533 bus_teardown_intr(dev, que->res, que->tag);
1536 if (que->res != NULL)
1537 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1541 /* Clean the Mailbox interrupt last */
1542 rid = adapter->vector + 1;
1544 if (adapter->tag != NULL) {
1545 bus_teardown_intr(dev, adapter->res, adapter->tag);
1546 adapter->tag = NULL;
1548 if (adapter->res != NULL)
1549 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1552 pci_release_msi(dev);
1554 if (adapter->msix_mem != NULL)
1555 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
1558 if (adapter->pci_mem != NULL)
1559 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1563 } /* ixv_free_pci_resources */
1565 /************************************************************************
1566 * ixv_setup_interface
1568 * Setup networking device structure and register an interface.
1569 ************************************************************************/
1571 ixv_setup_interface(device_t dev, struct adapter *adapter)
1575 INIT_DEBUGOUT("ixv_setup_interface: begin");
1577 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1579 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1580 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1581 ifp->if_baudrate = 1000000000;
1582 ifp->if_init = ixv_init;
1583 ifp->if_softc = adapter;
1584 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1585 ifp->if_ioctl = ixv_ioctl;
1586 #if __FreeBSD_version >= 1100045
1587 /* TSO parameters */
1588 ifp->if_hw_tsomax = 65518;
1589 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1590 ifp->if_hw_tsomaxsegsize = 2048;
1592 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1593 ifp->if_start = ixv_legacy_start;
1594 ixv_start_locked = ixv_legacy_start_locked;
1595 ixv_ring_empty = ixgbe_legacy_ring_empty;
1597 ifp->if_transmit = ixv_mq_start;
1598 ifp->if_qflush = ixv_qflush;
1599 ixv_start_locked = ixv_mq_start_locked;
1600 ixv_ring_empty = drbr_empty;
1602 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1604 ether_ifattach(ifp, adapter->hw.mac.addr);
1606 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1609 * Tell the upper layer(s) we support long frames.
1611 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1613 /* Set capability flags */
1614 ifp->if_capabilities |= IFCAP_HWCSUM
1618 | IFCAP_VLAN_HWTAGGING
1624 /* Enable the above capabilities by default */
1625 ifp->if_capenable = ifp->if_capabilities;
1628 * Specify the media types supported by this adapter and register
1629 * callbacks to update media and link information
1631 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1633 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1634 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1637 } /* ixv_setup_interface */
1640 /************************************************************************
1641 * ixv_initialize_transmit_units - Enable transmit unit.
1642 ************************************************************************/
1644 ixv_initialize_transmit_units(struct adapter *adapter)
1646 struct tx_ring *txr = adapter->tx_rings;
1647 struct ixgbe_hw *hw = &adapter->hw;
1650 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1651 u64 tdba = txr->txdma.dma_paddr;
1654 /* Set WTHRESH to 8, burst writeback */
1655 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1656 txdctl |= (8 << 16);
1657 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1659 /* Set the HW Tx Head and Tail indices */
1660 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1663 /* Set Tx Tail register */
1664 txr->tail = IXGBE_VFTDT(i);
1666 /* Set Ring parameters */
1667 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1668 (tdba & 0x00000000ffffffffULL));
1669 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1670 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1671 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1672 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1673 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1674 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1677 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1678 txdctl |= IXGBE_TXDCTL_ENABLE;
1679 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1683 } /* ixv_initialize_transmit_units */
1686 /************************************************************************
1687 * ixv_initialize_rss_mapping
1688 ************************************************************************/
1690 ixv_initialize_rss_mapping(struct adapter *adapter)
1692 struct ixgbe_hw *hw = &adapter->hw;
1693 u32 reta = 0, mrqc, rss_key[10];
1696 u32 rss_hash_config;
1698 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1699 /* Fetch the configured RSS key */
1700 rss_getkey((uint8_t *)&rss_key);
1702 /* set up random bits */
1703 arc4rand(&rss_key, sizeof(rss_key), 0);
1706 /* Now fill out hash function seeds */
1707 for (i = 0; i < 10; i++)
1708 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1710 /* Set up the redirection table */
1711 for (i = 0, j = 0; i < 64; i++, j++) {
1712 if (j == adapter->num_queues)
1715 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1717 * Fetch the RSS bucket id for the given indirection
1718 * entry. Cap it at the number of configured buckets
1719 * (which is num_queues.)
1721 queue_id = rss_get_indirection_to_bucket(i);
1722 queue_id = queue_id % adapter->num_queues;
1727 * The low 8 bits are for hash value (n+0);
1728 * The next 8 bits are for hash value (n+1), etc.
1731 reta |= ((uint32_t)queue_id) << 24;
1733 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1738 /* Perform hash on these packet types */
1739 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1740 rss_hash_config = rss_gethashconfig();
1743 * Disable UDP - IP fragments aren't currently being handled
1744 * and so we end up with a mix of 2-tuple and 4-tuple
1747 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1748 | RSS_HASHTYPE_RSS_TCP_IPV4
1749 | RSS_HASHTYPE_RSS_IPV6
1750 | RSS_HASHTYPE_RSS_TCP_IPV6;
1753 mrqc = IXGBE_MRQC_RSSEN;
1754 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1755 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1756 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1757 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1758 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1759 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1760 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1761 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1762 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1763 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1765 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1766 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1768 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1769 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1770 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
1771 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
1773 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1774 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1775 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1776 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1778 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1779 } /* ixv_initialize_rss_mapping */
1782 /************************************************************************
1783 * ixv_initialize_receive_units - Setup receive registers and features.
1784 ************************************************************************/
1786 ixv_initialize_receive_units(struct adapter *adapter)
1788 struct rx_ring *rxr = adapter->rx_rings;
1789 struct ixgbe_hw *hw = &adapter->hw;
1790 struct ifnet *ifp = adapter->ifp;
1791 u32 bufsz, rxcsum, psrtype;
1793 if (ifp->if_mtu > ETHERMTU)
1794 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1796 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1798 psrtype = IXGBE_PSRTYPE_TCPHDR
1799 | IXGBE_PSRTYPE_UDPHDR
1800 | IXGBE_PSRTYPE_IPV4HDR
1801 | IXGBE_PSRTYPE_IPV6HDR
1802 | IXGBE_PSRTYPE_L2HDR;
1804 if (adapter->num_queues > 1)
1807 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1809 /* Tell PF our max_frame size */
1810 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size)) {
1812 * Workaround for hardware that can't support frames with VLAN
1813 * headers without turning on jumbo frames in the PF driver.
1815 if (adapter->feat_en & IXGBE_FEATURE_FRAME_LIMIT) {
1816 device_printf(adapter->dev, "This is a device with a frame size limitation. The PF driver is forced to deny a change in frame size to allow for VLAN headers while jumbo frames is not enabled. To work around this, we're telling the stack that the MTU must shrink by sizeof(VLAN header) if VLANs are enabled. Thus, our maximum frame size is standard MTU + ethernet header/CRC. If you want standard MTU plus VLAN headers, you can also enable jumbo frames in the PF first.\n");
1817 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1818 ifp->if_capabilities &= ~IFCAP_VLAN_MTU;
1819 ifp->if_capenable &= ~IFCAP_VLAN_MTU;
1822 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size)) {
1823 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1827 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1828 u64 rdba = rxr->rxdma.dma_paddr;
1831 /* Disable the queue */
1832 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1833 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1834 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1835 for (int j = 0; j < 10; j++) {
1836 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1837 IXGBE_RXDCTL_ENABLE)
1843 /* Setup the Base and Length of the Rx Descriptor Ring */
1844 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1845 (rdba & 0x00000000ffffffffULL));
1846 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1847 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1848 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1850 /* Reset the ring indices */
1851 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1852 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1854 /* Set up the SRRCTL register */
1855 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1856 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1857 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1859 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1860 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1862 /* Capture Rx Tail index */
1863 rxr->tail = IXGBE_VFRDT(rxr->me);
1865 /* Do the queue enabling last */
1866 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1867 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1868 for (int k = 0; k < 10; k++) {
1869 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1870 IXGBE_RXDCTL_ENABLE)
1876 /* Set the Tail Pointer */
1878 * In netmap mode, we must preserve the buffers made
1879 * available to userspace before the if_init()
1880 * (this is true by default on the TX side, because
1881 * init makes all buffers available to userspace).
1883 * netmap_reset() and the device specific routines
1884 * (e.g. ixgbe_setup_receive_rings()) map these
1885 * buffers at the end of the NIC ring, so here we
1886 * must set the RDT (tail) register to make sure
1887 * they are not overwritten.
1889 * In this driver the NIC ring starts at RDH = 0,
1890 * RDT points to the last slot available for reception (?),
1891 * so RDT = num_rx_desc - 1 means the whole ring is available.
1894 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1895 (ifp->if_capenable & IFCAP_NETMAP)) {
1896 struct netmap_adapter *na = NA(adapter->ifp);
1897 struct netmap_kring *kring = &na->rx_rings[i];
1898 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1900 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1902 #endif /* DEV_NETMAP */
1903 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1904 adapter->num_rx_desc - 1);
1907 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1909 ixv_initialize_rss_mapping(adapter);
1911 if (adapter->num_queues > 1) {
1912 /* RSS and RX IPP Checksum are mutually exclusive */
1913 rxcsum |= IXGBE_RXCSUM_PCSD;
1916 if (ifp->if_capenable & IFCAP_RXCSUM)
1917 rxcsum |= IXGBE_RXCSUM_PCSD;
1919 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1920 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1922 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1925 } /* ixv_initialize_receive_units */
1927 /************************************************************************
1928 * ixv_setup_vlan_support
1929 ************************************************************************/
1931 ixv_setup_vlan_support(struct adapter *adapter)
1933 struct ixgbe_hw *hw = &adapter->hw;
1934 u32 ctrl, vid, vfta, retry;
1937 * We get here thru init_locked, meaning
1938 * a soft reset, this has already cleared
1939 * the VFTA and other state, so if there
1940 * have been no vlan's registered do nothing.
1942 if (adapter->num_vlans == 0)
1945 /* Enable the queues */
1946 for (int i = 0; i < adapter->num_queues; i++) {
1947 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1948 ctrl |= IXGBE_RXDCTL_VME;
1949 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1951 * Let Rx path know that it needs to store VLAN tag
1952 * as part of extra mbuf info.
1954 adapter->rx_rings[i].vtag_strip = TRUE;
1958 * A soft reset zero's out the VFTA, so
1959 * we need to repopulate it now.
1961 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1962 if (ixv_shadow_vfta[i] == 0)
1964 vfta = ixv_shadow_vfta[i];
1966 * Reconstruct the vlan id's
1967 * based on the bits set in each
1968 * of the array ints.
1970 for (int j = 0; j < 32; j++) {
1972 if ((vfta & (1 << j)) == 0)
1975 /* Call the shared code mailbox routine */
1976 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1982 } /* ixv_setup_vlan_support */
1984 /************************************************************************
1987 * Run via a vlan config EVENT, it enables us to use the
1988 * HW Filter table since we can get the vlan id. This just
1989 * creates the entry in the soft version of the VFTA, init
1990 * will repopulate the real table.
1991 ************************************************************************/
1993 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1995 struct adapter *adapter = ifp->if_softc;
1998 if (ifp->if_softc != arg) /* Not our event */
2001 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2004 IXGBE_CORE_LOCK(adapter);
2005 index = (vtag >> 5) & 0x7F;
2007 ixv_shadow_vfta[index] |= (1 << bit);
2008 ++adapter->num_vlans;
2009 /* Re-init to load the changes */
2010 ixv_init_locked(adapter);
2011 IXGBE_CORE_UNLOCK(adapter);
2012 } /* ixv_register_vlan */
2014 /************************************************************************
2015 * ixv_unregister_vlan
2017 * Run via a vlan unconfig EVENT, remove our entry
2019 ************************************************************************/
2021 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2023 struct adapter *adapter = ifp->if_softc;
2026 if (ifp->if_softc != arg)
2029 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2032 IXGBE_CORE_LOCK(adapter);
2033 index = (vtag >> 5) & 0x7F;
2035 ixv_shadow_vfta[index] &= ~(1 << bit);
2036 --adapter->num_vlans;
2037 /* Re-init to load the changes */
2038 ixv_init_locked(adapter);
2039 IXGBE_CORE_UNLOCK(adapter);
2040 } /* ixv_unregister_vlan */
2042 /************************************************************************
2044 ************************************************************************/
2046 ixv_enable_intr(struct adapter *adapter)
2048 struct ixgbe_hw *hw = &adapter->hw;
2049 struct ix_queue *que = adapter->queues;
2050 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2053 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
2055 mask = IXGBE_EIMS_ENABLE_MASK;
2056 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
2057 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2059 for (int i = 0; i < adapter->num_queues; i++, que++)
2060 ixv_enable_queue(adapter, que->msix);
2062 IXGBE_WRITE_FLUSH(hw);
2065 } /* ixv_enable_intr */
2067 /************************************************************************
2069 ************************************************************************/
2071 ixv_disable_intr(struct adapter *adapter)
2073 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2074 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
2075 IXGBE_WRITE_FLUSH(&adapter->hw);
2078 } /* ixv_disable_intr */
2080 /************************************************************************
2083 * Setup the correct IVAR register for a particular MSI-X interrupt
2084 * - entry is the register array entry
2085 * - vector is the MSI-X vector for this queue
2086 * - type is RX/TX/MISC
2087 ************************************************************************/
2089 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2091 struct ixgbe_hw *hw = &adapter->hw;
2094 vector |= IXGBE_IVAR_ALLOC_VAL;
2096 if (type == -1) { /* MISC IVAR */
2097 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2100 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2101 } else { /* RX/TX IVARS */
2102 index = (16 * (entry & 1)) + (8 * type);
2103 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2104 ivar &= ~(0xFF << index);
2105 ivar |= (vector << index);
2106 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2108 } /* ixv_set_ivar */
2110 /************************************************************************
2111 * ixv_configure_ivars
2112 ************************************************************************/
2114 ixv_configure_ivars(struct adapter *adapter)
2116 struct ix_queue *que = adapter->queues;
2118 for (int i = 0; i < adapter->num_queues; i++, que++) {
2119 /* First the RX queue entry */
2120 ixv_set_ivar(adapter, i, que->msix, 0);
2121 /* ... and the TX */
2122 ixv_set_ivar(adapter, i, que->msix, 1);
2123 /* Set an initial value in EITR */
2124 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
2125 IXGBE_EITR_DEFAULT);
2128 /* For the mailbox interrupt */
2129 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2130 } /* ixv_configure_ivars */
2133 /************************************************************************
2134 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2136 * Done outside of interrupt context since the driver might sleep
2137 ************************************************************************/
2139 ixv_handle_link(void *context, int pending)
2141 struct adapter *adapter = context;
2143 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2144 &adapter->link_up, FALSE);
2145 ixv_update_link_status(adapter);
2146 } /* ixv_handle_link */
2148 /************************************************************************
2149 * ixv_check_link - Used in the local timer to poll for link changes
2150 ************************************************************************/
2152 ixv_check_link(struct adapter *adapter)
2154 adapter->hw.mac.get_link_status = TRUE;
2156 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2157 &adapter->link_up, FALSE);
2158 ixv_update_link_status(adapter);
2159 } /* ixv_check_link */
2161 /************************************************************************
2164 * The VF stats registers never have a truly virgin
2165 * starting point, so this routine tries to make an
2166 * artificial one, marking ground zero on attach as
2168 ************************************************************************/
2170 ixv_save_stats(struct adapter *adapter)
2172 if (adapter->stats_vf.vfgprc || adapter->stats_vf.vfgptc) {
2173 adapter->stats_vf.saved_reset_vfgprc +=
2174 adapter->stats_vf.vfgprc - adapter->stats_vf.base_vfgprc;
2175 adapter->stats_vf.saved_reset_vfgptc +=
2176 adapter->stats_vf.vfgptc - adapter->stats_vf.base_vfgptc;
2177 adapter->stats_vf.saved_reset_vfgorc +=
2178 adapter->stats_vf.vfgorc - adapter->stats_vf.base_vfgorc;
2179 adapter->stats_vf.saved_reset_vfgotc +=
2180 adapter->stats_vf.vfgotc - adapter->stats_vf.base_vfgotc;
2181 adapter->stats_vf.saved_reset_vfmprc +=
2182 adapter->stats_vf.vfmprc - adapter->stats_vf.base_vfmprc;
2184 } /* ixv_save_stats */
2186 /************************************************************************
2188 ************************************************************************/
2190 ixv_init_stats(struct adapter *adapter)
2192 struct ixgbe_hw *hw = &adapter->hw;
2194 adapter->stats_vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2195 adapter->stats_vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2196 adapter->stats_vf.last_vfgorc |=
2197 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2199 adapter->stats_vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2200 adapter->stats_vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2201 adapter->stats_vf.last_vfgotc |=
2202 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2204 adapter->stats_vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2206 adapter->stats_vf.base_vfgprc = adapter->stats_vf.last_vfgprc;
2207 adapter->stats_vf.base_vfgorc = adapter->stats_vf.last_vfgorc;
2208 adapter->stats_vf.base_vfgptc = adapter->stats_vf.last_vfgptc;
2209 adapter->stats_vf.base_vfgotc = adapter->stats_vf.last_vfgotc;
2210 adapter->stats_vf.base_vfmprc = adapter->stats_vf.last_vfmprc;
2211 } /* ixv_init_stats */
2213 #define UPDATE_STAT_32(reg, last, count) \
2215 u32 current = IXGBE_READ_REG(hw, reg); \
2216 if (current < last) \
2217 count += 0x100000000LL; \
2219 count &= 0xFFFFFFFF00000000LL; \
2223 #define UPDATE_STAT_36(lsb, msb, last, count) \
2225 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2226 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2227 u64 current = ((cur_msb << 32) | cur_lsb); \
2228 if (current < last) \
2229 count += 0x1000000000LL; \
2231 count &= 0xFFFFFFF000000000LL; \
2235 /************************************************************************
2236 * ixv_update_stats - Update the board statistics counters.
2237 ************************************************************************/
2239 ixv_update_stats(struct adapter *adapter)
2241 struct ixgbe_hw *hw = &adapter->hw;
2243 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats_vf.last_vfgprc,
2244 adapter->stats_vf.vfgprc);
2245 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats_vf.last_vfgptc,
2246 adapter->stats_vf.vfgptc);
2247 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2248 adapter->stats_vf.last_vfgorc, adapter->stats_vf.vfgorc);
2249 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2250 adapter->stats_vf.last_vfgotc, adapter->stats_vf.vfgotc);
2251 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats_vf.last_vfmprc,
2252 adapter->stats_vf.vfmprc);
2253 } /* ixv_update_stats */
2255 /************************************************************************
2256 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2257 ************************************************************************/
2259 ixv_add_stats_sysctls(struct adapter *adapter)
2261 device_t dev = adapter->dev;
2262 struct tx_ring *txr = adapter->tx_rings;
2263 struct rx_ring *rxr = adapter->rx_rings;
2264 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2265 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2266 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2267 struct ixgbevf_hw_stats *stats = &adapter->stats_vf;
2268 struct sysctl_oid *stat_node, *queue_node;
2269 struct sysctl_oid_list *stat_list, *queue_list;
2271 #define QUEUE_NAME_LEN 32
2272 char namebuf[QUEUE_NAME_LEN];
2274 /* Driver Statistics */
2275 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2276 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
2277 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2278 CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
2279 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2280 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
2281 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
2282 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
2284 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2285 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
2286 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2287 CTLFLAG_RD, NULL, "Queue Name");
2288 queue_list = SYSCTL_CHILDREN(queue_node);
2290 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2291 CTLFLAG_RD, &(adapter->queues[i].irqs), "IRQs on queue");
2292 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
2293 CTLFLAG_RD, &(txr->no_tx_dma_setup),
2294 "Driver Tx DMA failure in Tx");
2295 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2296 CTLFLAG_RD, &(txr->no_desc_avail),
2297 "Not-enough-descriptors count: TX");
2298 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2299 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
2300 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
2301 CTLFLAG_RD, &txr->br->br_drops,
2302 "Not-enough-descriptors count: TX");
2305 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2306 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
2307 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
2308 CTLFLAG_RD, NULL, "Queue Name");
2309 queue_list = SYSCTL_CHILDREN(queue_node);
2311 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2312 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
2313 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2314 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
2315 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2316 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
2319 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2320 CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
2321 stat_list = SYSCTL_CHILDREN(stat_node);
2323 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2324 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
2325 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2326 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
2327 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2328 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
2329 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2330 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
2331 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2332 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
2333 } /* ixv_add_stats_sysctls */
2335 /************************************************************************
2336 * ixv_set_sysctl_value
2337 ************************************************************************/
2339 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2340 const char *description, int *limit, int value)
2343 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2344 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2345 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2346 } /* ixv_set_sysctl_value */
2348 /************************************************************************
2349 * ixv_print_debug_info
2351 * Called only when em_display_debug_stats is enabled.
2352 * Provides a way to take a look at important statistics
2353 * maintained by the driver and hardware.
2354 ************************************************************************/
2356 ixv_print_debug_info(struct adapter *adapter)
2358 device_t dev = adapter->dev;
2359 struct ixgbe_hw *hw = &adapter->hw;
2360 struct ix_queue *que = adapter->queues;
2361 struct rx_ring *rxr;
2362 struct tx_ring *txr;
2363 struct lro_ctrl *lro;
2365 device_printf(dev, "Error Byte Count = %u \n",
2366 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2368 for (int i = 0; i < adapter->num_queues; i++, que++) {
2372 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2373 que->msix, (long)que->irqs);
2374 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2375 rxr->me, (long long)rxr->rx_packets);
2376 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2377 rxr->me, (long)rxr->rx_bytes);
2378 #if __FreeBSD_version < 1100000
2379 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2380 rxr->me, (long long)lro->lro_queued);
2381 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2382 rxr->me, (long long)lro->lro_flushed);
2384 device_printf(dev, "RX(%d) LRO Queued= %lu\n",
2385 rxr->me, lro->lro_queued);
2386 device_printf(dev, "RX(%d) LRO Flushed= %lu\n",
2387 rxr->me, lro->lro_flushed);
2389 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2390 txr->me, (long)txr->total_packets);
2391 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2392 txr->me, (long)txr->no_desc_avail);
2395 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
2396 } /* ixv_print_debug_info */
2399 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2401 struct adapter *adapter;
2405 error = sysctl_handle_int(oidp, &result, 0, req);
2407 if (error || !req->newptr)
2411 adapter = (struct adapter *)arg1;
2412 ixv_print_debug_info(adapter);
2416 } /* ixv_sysctl_debug */