1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /************************************************************************
45 ************************************************************************/
46 char ixv_driver_version[] = "1.5.13-k";
48 /************************************************************************
51 * Used by probe to select devices to load on
52 * Last field stores an index into ixv_strings
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 ************************************************************************/
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
64 /* required last entry */
68 /************************************************************************
69 * Table of branding strings
70 ************************************************************************/
71 static char *ixv_strings[] = {
72 "Intel(R) PRO/10GbE Virtual Function Network Driver"
75 /************************************************************************
77 ************************************************************************/
78 static int ixv_probe(device_t);
79 static int ixv_attach(device_t);
80 static int ixv_detach(device_t);
81 static int ixv_shutdown(device_t);
82 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
83 static void ixv_init(void *);
84 static void ixv_init_locked(struct adapter *);
85 static void ixv_stop(void *);
86 static uint64_t ixv_get_counter(struct ifnet *, ift_counter);
87 static void ixv_init_device_features(struct adapter *);
88 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int ixv_media_change(struct ifnet *);
90 static int ixv_allocate_pci_resources(struct adapter *);
91 static int ixv_allocate_msix(struct adapter *);
92 static int ixv_configure_interrupts(struct adapter *);
93 static void ixv_free_pci_resources(struct adapter *);
94 static void ixv_local_timer(void *);
95 static void ixv_setup_interface(device_t, struct adapter *);
97 static void ixv_initialize_transmit_units(struct adapter *);
98 static void ixv_initialize_receive_units(struct adapter *);
99 static void ixv_initialize_rss_mapping(struct adapter *);
100 static void ixv_check_link(struct adapter *);
102 static void ixv_enable_intr(struct adapter *);
103 static void ixv_disable_intr(struct adapter *);
104 static void ixv_set_multi(struct adapter *);
105 static void ixv_update_link_status(struct adapter *);
106 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
107 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
108 static void ixv_configure_ivars(struct adapter *);
109 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
111 static void ixv_setup_vlan_support(struct adapter *);
112 static void ixv_register_vlan(void *, struct ifnet *, u16);
113 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
115 static void ixv_save_stats(struct adapter *);
116 static void ixv_init_stats(struct adapter *);
117 static void ixv_update_stats(struct adapter *);
118 static void ixv_add_stats_sysctls(struct adapter *);
119 static void ixv_set_sysctl_value(struct adapter *, const char *,
120 const char *, int *, int);
122 /* The MSI-X Interrupt handlers */
123 static void ixv_msix_que(void *);
124 static void ixv_msix_mbx(void *);
126 /* Deferred interrupt tasklets */
127 static void ixv_handle_que(void *, int);
128 static void ixv_handle_link(void *, int);
130 /************************************************************************
131 * FreeBSD Device Interface Entry Points
132 ************************************************************************/
133 static device_method_t ixv_methods[] = {
134 /* Device interface */
135 DEVMETHOD(device_probe, ixv_probe),
136 DEVMETHOD(device_attach, ixv_attach),
137 DEVMETHOD(device_detach, ixv_detach),
138 DEVMETHOD(device_shutdown, ixv_shutdown),
142 static driver_t ixv_driver = {
143 "ixv", ixv_methods, sizeof(struct adapter),
146 devclass_t ixv_devclass;
147 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
148 MODULE_DEPEND(ixv, pci, 1, 1, 1);
149 MODULE_DEPEND(ixv, ether, 1, 1, 1);
150 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
153 * TUNEABLE PARAMETERS:
156 /* Number of Queues - do not exceed MSI-X vectors - 1 */
157 static int ixv_num_queues = 1;
158 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
161 * AIM: Adaptive Interrupt Moderation
162 * which means that the interrupt rate
163 * is varied over time based on the
164 * traffic for that interrupt vector
166 static int ixv_enable_aim = FALSE;
167 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
169 /* How many packets rxeof tries to clean at a time */
170 static int ixv_rx_process_limit = 256;
171 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
173 /* How many packets txeof tries to clean at a time */
174 static int ixv_tx_process_limit = 256;
175 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
177 /* Flow control setting, default to full */
178 static int ixv_flow_control = ixgbe_fc_full;
179 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
182 * Header split: this causes the hardware to DMA
183 * the header into a separate mbuf from the payload,
184 * it can be a performance win in some workloads, but
185 * in others it actually hurts, its off by default.
187 static int ixv_header_split = FALSE;
188 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
191 * Number of TX descriptors per ring,
192 * setting higher than RX as this seems
193 * the better performing choice.
195 static int ixv_txd = DEFAULT_TXD;
196 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
198 /* Number of RX descriptors per ring */
199 static int ixv_rxd = DEFAULT_RXD;
200 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
202 /* Legacy Transmit (single queue) */
203 static int ixv_enable_legacy_tx = 0;
204 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
207 * Shadow VFTA table, this is needed because
208 * the real filter table gets cleared during
209 * a soft reset and we need to repopulate it.
211 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
213 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
214 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
216 /************************************************************************
217 * ixv_probe - Device identification routine
219 * Determines if the driver should be loaded on
220 * adapter based on its PCI vendor/device ID.
222 * return BUS_PROBE_DEFAULT on success, positive on failure
223 ************************************************************************/
225 ixv_probe(device_t dev)
227 ixgbe_vendor_info_t *ent;
228 u16 pci_vendor_id = 0;
229 u16 pci_device_id = 0;
230 u16 pci_subvendor_id = 0;
231 u16 pci_subdevice_id = 0;
232 char adapter_name[256];
235 pci_vendor_id = pci_get_vendor(dev);
236 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
239 pci_device_id = pci_get_device(dev);
240 pci_subvendor_id = pci_get_subvendor(dev);
241 pci_subdevice_id = pci_get_subdevice(dev);
243 ent = ixv_vendor_info_array;
244 while (ent->vendor_id != 0) {
245 if ((pci_vendor_id == ent->vendor_id) &&
246 (pci_device_id == ent->device_id) &&
247 ((pci_subvendor_id == ent->subvendor_id) ||
248 (ent->subvendor_id == 0)) &&
249 ((pci_subdevice_id == ent->subdevice_id) ||
250 (ent->subdevice_id == 0))) {
251 sprintf(adapter_name, "%s, Version - %s",
252 ixv_strings[ent->index], ixv_driver_version);
253 device_set_desc_copy(dev, adapter_name);
254 return (BUS_PROBE_DEFAULT);
262 /************************************************************************
263 * ixv_attach - Device initialization routine
265 * Called when the driver is being loaded.
266 * Identifies the type of hardware, allocates all resources
267 * and initializes the hardware.
269 * return 0 on success, positive on failure
270 ************************************************************************/
272 ixv_attach(device_t dev)
274 struct adapter *adapter;
278 INIT_DEBUGOUT("ixv_attach: begin");
281 * Make sure BUSMASTER is set, on a VM under
282 * KVM it may not be and will break things.
284 pci_enable_busmaster(dev);
286 /* Allocate, clear, and link in our adapter structure */
287 adapter = device_get_softc(dev);
289 adapter->hw.back = adapter;
292 adapter->init_locked = ixv_init_locked;
293 adapter->stop_locked = ixv_stop;
296 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
298 /* Do base PCI setup - map BAR0 */
299 if (ixv_allocate_pci_resources(adapter)) {
300 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
306 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
307 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
308 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
311 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
312 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
313 "enable_aim", CTLFLAG_RW, &ixv_enable_aim, 1,
314 "Interrupt Moderation");
316 /* Set up the timer callout */
317 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
319 /* Save off the information about this board */
320 hw->vendor_id = pci_get_vendor(dev);
321 hw->device_id = pci_get_device(dev);
322 hw->revision_id = pci_get_revid(dev);
323 hw->subsystem_vendor_id = pci_get_subvendor(dev);
324 hw->subsystem_device_id = pci_get_subdevice(dev);
326 /* A subset of set_mac_type */
327 switch (hw->device_id) {
328 case IXGBE_DEV_ID_82599_VF:
329 hw->mac.type = ixgbe_mac_82599_vf;
331 case IXGBE_DEV_ID_X540_VF:
332 hw->mac.type = ixgbe_mac_X540_vf;
334 case IXGBE_DEV_ID_X550_VF:
335 hw->mac.type = ixgbe_mac_X550_vf;
337 case IXGBE_DEV_ID_X550EM_X_VF:
338 hw->mac.type = ixgbe_mac_X550EM_x_vf;
340 case IXGBE_DEV_ID_X550EM_A_VF:
341 hw->mac.type = ixgbe_mac_X550EM_a_vf;
344 /* Shouldn't get here since probe succeeded */
345 device_printf(dev, "Unknown device ID!\n");
351 ixv_init_device_features(adapter);
353 /* Initialize the shared code */
354 error = ixgbe_init_ops_vf(hw);
356 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
361 /* Setup the mailbox */
362 ixgbe_init_mbx_params_vf(hw);
364 /* Set the right number of segments */
365 adapter->num_segs = IXGBE_82599_SCATTER;
367 error = hw->mac.ops.reset_hw(hw);
368 if (error == IXGBE_ERR_RESET_FAILED)
369 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
371 device_printf(dev, "...reset_hw() failed with error %d\n",
378 error = hw->mac.ops.init_hw(hw);
380 device_printf(dev, "...init_hw() failed with error %d\n",
386 /* Negotiate mailbox API version */
387 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_12);
389 device_printf(dev, "MBX API 1.2 negotiation failed! Error %d\n",
395 /* If no mac address was assigned, make a random one */
396 if (!ixv_check_ether_addr(hw->mac.addr)) {
397 u8 addr[ETHER_ADDR_LEN];
398 arc4rand(&addr, sizeof(addr), 0);
401 bcopy(addr, hw->mac.addr, sizeof(addr));
402 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
405 /* Register for VLAN events */
406 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
407 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
408 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
409 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
411 /* Sysctls for limiting the amount of work done in the taskqueues */
412 ixv_set_sysctl_value(adapter, "rx_processing_limit",
413 "max number of rx packets to process",
414 &adapter->rx_process_limit, ixv_rx_process_limit);
416 ixv_set_sysctl_value(adapter, "tx_processing_limit",
417 "max number of tx packets to process",
418 &adapter->tx_process_limit, ixv_tx_process_limit);
420 /* Do descriptor calc and sanity checks */
421 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
422 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
423 device_printf(dev, "TXD config issue, using default!\n");
424 adapter->num_tx_desc = DEFAULT_TXD;
426 adapter->num_tx_desc = ixv_txd;
428 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
429 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
430 device_printf(dev, "RXD config issue, using default!\n");
431 adapter->num_rx_desc = DEFAULT_RXD;
433 adapter->num_rx_desc = ixv_rxd;
436 error = ixv_configure_interrupts(adapter);
440 /* Allocate our TX/RX Queues */
441 if (ixgbe_allocate_queues(adapter)) {
442 device_printf(dev, "ixgbe_allocate_queues() failed!\n");
447 /* Setup OS specific network interface */
448 ixv_setup_interface(dev, adapter);
450 error = ixv_allocate_msix(adapter);
452 device_printf(dev, "ixv_allocate_msix() failed!\n");
456 /* Do the stats setup */
457 ixv_save_stats(adapter);
458 ixv_init_stats(adapter);
459 ixv_add_stats_sysctls(adapter);
461 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
462 ixgbe_netmap_attach(adapter);
464 INIT_DEBUGOUT("ixv_attach: end");
469 ixgbe_free_transmit_structures(adapter);
470 ixgbe_free_receive_structures(adapter);
471 free(adapter->queues, M_DEVBUF);
473 ixv_free_pci_resources(adapter);
474 IXGBE_CORE_LOCK_DESTROY(adapter);
479 /************************************************************************
480 * ixv_detach - Device removal routine
482 * Called when the driver is being removed.
483 * Stops the adapter and deallocates all the resources
484 * that were allocated for driver operation.
486 * return 0 on success, positive on failure
487 ************************************************************************/
489 ixv_detach(device_t dev)
491 struct adapter *adapter = device_get_softc(dev);
492 struct ix_queue *que = adapter->queues;
494 INIT_DEBUGOUT("ixv_detach: begin");
496 /* Make sure VLANS are not using driver */
497 if (adapter->ifp->if_vlantrunk != NULL) {
498 device_printf(dev, "Vlan in use, detach first\n");
502 ether_ifdetach(adapter->ifp);
503 IXGBE_CORE_LOCK(adapter);
505 IXGBE_CORE_UNLOCK(adapter);
507 for (int i = 0; i < adapter->num_queues; i++, que++) {
509 struct tx_ring *txr = que->txr;
510 taskqueue_drain(que->tq, &txr->txq_task);
511 taskqueue_drain(que->tq, &que->que_task);
512 taskqueue_free(que->tq);
516 /* Drain the Mailbox(link) queue */
518 taskqueue_drain(adapter->tq, &adapter->link_task);
519 taskqueue_free(adapter->tq);
522 /* Unregister VLAN events */
523 if (adapter->vlan_attach != NULL)
524 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
525 if (adapter->vlan_detach != NULL)
526 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
528 callout_drain(&adapter->timer);
530 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
531 netmap_detach(adapter->ifp);
533 ixv_free_pci_resources(adapter);
534 bus_generic_detach(dev);
535 if_free(adapter->ifp);
537 ixgbe_free_transmit_structures(adapter);
538 ixgbe_free_receive_structures(adapter);
539 free(adapter->queues, M_DEVBUF);
541 IXGBE_CORE_LOCK_DESTROY(adapter);
546 /************************************************************************
547 * ixv_init_locked - Init entry point
549 * Used in two ways: It is used by the stack as an init entry
550 * point in network interface structure. It is also used
551 * by the driver as a hw/sw initialization routine to get
552 * to a consistent state.
554 * return 0 on success, positive on failure
555 ************************************************************************/
557 ixv_init_locked(struct adapter *adapter)
559 struct ifnet *ifp = adapter->ifp;
560 device_t dev = adapter->dev;
561 struct ixgbe_hw *hw = &adapter->hw;
564 INIT_DEBUGOUT("ixv_init_locked: begin");
565 mtx_assert(&adapter->core_mtx, MA_OWNED);
566 hw->adapter_stopped = FALSE;
567 hw->mac.ops.stop_adapter(hw);
568 callout_stop(&adapter->timer);
570 /* reprogram the RAR[0] in case user changed it. */
571 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
573 /* Get the latest mac address, User can use a LAA */
574 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
575 IXGBE_ETH_LENGTH_OF_ADDRESS);
576 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
578 /* Prepare transmit descriptors and buffers */
579 if (ixgbe_setup_transmit_structures(adapter)) {
580 device_printf(dev, "Could not setup transmit structures\n");
585 /* Reset VF and renegotiate mailbox API version */
586 hw->mac.ops.reset_hw(hw);
587 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_12);
589 device_printf(dev, "MBX API 1.2 negotiation failed! Error %d\n",
592 ixv_initialize_transmit_units(adapter);
594 /* Setup Multicast table */
595 ixv_set_multi(adapter);
598 * Determine the correct mbuf pool
599 * for doing jumbo/headersplit
601 if (ifp->if_mtu > ETHERMTU)
602 adapter->rx_mbuf_sz = MJUMPAGESIZE;
604 adapter->rx_mbuf_sz = MCLBYTES;
606 /* Prepare receive descriptors and buffers */
607 if (ixgbe_setup_receive_structures(adapter)) {
608 device_printf(dev, "Could not setup receive structures\n");
613 /* Configure RX settings */
614 ixv_initialize_receive_units(adapter);
616 /* Set the various hardware offload abilities */
617 ifp->if_hwassist = 0;
618 if (ifp->if_capenable & IFCAP_TSO4)
619 ifp->if_hwassist |= CSUM_TSO;
620 if (ifp->if_capenable & IFCAP_TXCSUM) {
621 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
622 #if __FreeBSD_version >= 800000
623 ifp->if_hwassist |= CSUM_SCTP;
627 /* Set up VLAN offload and filter */
628 ixv_setup_vlan_support(adapter);
630 /* Set up MSI-X routing */
631 ixv_configure_ivars(adapter);
633 /* Set up auto-mask */
634 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
636 /* Set moderation on the Link interrupt */
637 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
640 ixv_init_stats(adapter);
642 /* Config/Enable Link */
643 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
647 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
649 /* And now turn on interrupts */
650 ixv_enable_intr(adapter);
652 /* Now inform the stack we're ready */
653 ifp->if_drv_flags |= IFF_DRV_RUNNING;
654 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
657 } /* ixv_init_locked */
660 * MSI-X Interrupt Handlers and Tasklets
664 ixv_enable_queue(struct adapter *adapter, u32 vector)
666 struct ixgbe_hw *hw = &adapter->hw;
667 u32 queue = 1 << vector;
670 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
671 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
672 } /* ixv_enable_queue */
675 ixv_disable_queue(struct adapter *adapter, u32 vector)
677 struct ixgbe_hw *hw = &adapter->hw;
678 u64 queue = (u64)(1 << vector);
681 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
682 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
683 } /* ixv_disable_queue */
686 ixv_rearm_queues(struct adapter *adapter, u64 queues)
688 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
689 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
690 } /* ixv_rearm_queues */
693 /************************************************************************
694 * ixv_msix_que - MSI Queue Interrupt Service routine
695 ************************************************************************/
697 ixv_msix_que(void *arg)
699 struct ix_queue *que = arg;
700 struct adapter *adapter = que->adapter;
701 struct ifnet *ifp = adapter->ifp;
702 struct tx_ring *txr = que->txr;
703 struct rx_ring *rxr = que->rxr;
707 ixv_disable_queue(adapter, que->msix);
710 more = ixgbe_rxeof(que);
715 * Make certain that if the stack
716 * has anything queued the task gets
717 * scheduled to handle it.
719 if (!ixv_ring_empty(adapter->ifp, txr->br))
720 ixv_start_locked(ifp, txr);
721 IXGBE_TX_UNLOCK(txr);
725 if (ixv_enable_aim == FALSE)
728 * Do Adaptive Interrupt Moderation:
729 * - Write out last calculated setting
730 * - Calculate based on average size over
733 if (que->eitr_setting)
734 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
737 que->eitr_setting = 0;
739 /* Idle, do nothing */
740 if ((txr->bytes == 0) && (rxr->bytes == 0))
743 if ((txr->bytes) && (txr->packets))
744 newitr = txr->bytes/txr->packets;
745 if ((rxr->bytes) && (rxr->packets))
746 newitr = max(newitr, (rxr->bytes / rxr->packets));
747 newitr += 24; /* account for hardware frame, crc */
749 /* set an upper boundary */
750 newitr = min(newitr, 3000);
752 /* Be nice to the mid range */
753 if ((newitr > 300) && (newitr < 1200))
754 newitr = (newitr / 3);
756 newitr = (newitr / 2);
758 newitr |= newitr << 16;
760 /* save for next interrupt */
761 que->eitr_setting = newitr;
771 taskqueue_enqueue(que->tq, &que->que_task);
772 else /* Re-enable this interrupt */
773 ixv_enable_queue(adapter, que->msix);
778 /************************************************************************
780 ************************************************************************/
782 ixv_msix_mbx(void *arg)
784 struct adapter *adapter = arg;
785 struct ixgbe_hw *hw = &adapter->hw;
790 /* First get the cause */
791 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
792 /* Clear interrupt with write */
793 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
795 /* Link status change */
796 if (reg & IXGBE_EICR_LSC)
797 taskqueue_enqueue(adapter->tq, &adapter->link_task);
799 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
804 /************************************************************************
805 * ixv_media_status - Media Ioctl callback
807 * Called whenever the user queries the status of
808 * the interface using ifconfig.
809 ************************************************************************/
811 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
813 struct adapter *adapter = ifp->if_softc;
815 INIT_DEBUGOUT("ixv_media_status: begin");
816 IXGBE_CORE_LOCK(adapter);
817 ixv_update_link_status(adapter);
819 ifmr->ifm_status = IFM_AVALID;
820 ifmr->ifm_active = IFM_ETHER;
822 if (!adapter->link_active) {
823 IXGBE_CORE_UNLOCK(adapter);
827 ifmr->ifm_status |= IFM_ACTIVE;
829 switch (adapter->link_speed) {
830 case IXGBE_LINK_SPEED_1GB_FULL:
831 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
833 case IXGBE_LINK_SPEED_10GB_FULL:
834 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
836 case IXGBE_LINK_SPEED_100_FULL:
837 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
839 case IXGBE_LINK_SPEED_10_FULL:
840 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
844 IXGBE_CORE_UNLOCK(adapter);
847 } /* ixv_media_status */
849 /************************************************************************
850 * ixv_media_change - Media Ioctl callback
852 * Called when the user changes speed/duplex using
853 * media/mediopt option with ifconfig.
854 ************************************************************************/
856 ixv_media_change(struct ifnet *ifp)
858 struct adapter *adapter = ifp->if_softc;
859 struct ifmedia *ifm = &adapter->media;
861 INIT_DEBUGOUT("ixv_media_change: begin");
863 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
866 switch (IFM_SUBTYPE(ifm->ifm_media)) {
870 device_printf(adapter->dev, "Only auto media type\n");
875 } /* ixv_media_change */
878 /************************************************************************
879 * ixv_set_multi - Multicast Update
881 * Called whenever multicast address list is updated.
882 ************************************************************************/
884 ixv_set_multi(struct adapter *adapter)
886 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
888 struct ifmultiaddr *ifma;
889 struct ifnet *ifp = adapter->ifp;
892 IOCTL_DEBUGOUT("ixv_set_multi: begin");
894 #if __FreeBSD_version < 800000
899 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
900 if (ifma->ifma_addr->sa_family != AF_LINK)
902 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
903 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
904 IXGBE_ETH_LENGTH_OF_ADDRESS);
907 #if __FreeBSD_version < 800000
910 if_maddr_runlock(ifp);
915 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
916 ixv_mc_array_itr, TRUE);
919 } /* ixv_set_multi */
921 /************************************************************************
924 * An iterator function needed by the multicast shared code.
925 * It feeds the shared code routine the addresses in the
926 * array of ixv_set_multi() one by one.
927 ************************************************************************/
929 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
931 u8 *addr = *update_ptr;
935 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
936 *update_ptr = newptr;
939 } /* ixv_mc_array_itr */
941 /************************************************************************
942 * ixv_local_timer - Timer routine
944 * Checks for link status, updates statistics,
945 * and runs the watchdog check.
946 ************************************************************************/
948 ixv_local_timer(void *arg)
950 struct adapter *adapter = arg;
951 device_t dev = adapter->dev;
952 struct ix_queue *que = adapter->queues;
956 mtx_assert(&adapter->core_mtx, MA_OWNED);
958 ixv_check_link(adapter);
961 ixv_update_stats(adapter);
964 * Check the TX queues status
965 * - mark hung queues so we don't schedule on them
966 * - watchdog only if all queues show hung
968 for (int i = 0; i < adapter->num_queues; i++, que++) {
969 /* Keep track of queues with work for soft irq */
971 queues |= ((u64)1 << que->me);
973 * Each time txeof runs without cleaning, but there
974 * are uncleaned descriptors it increments busy. If
975 * we get to the MAX we declare it hung.
977 if (que->busy == IXGBE_QUEUE_HUNG) {
979 /* Mark the queue as inactive */
980 adapter->active_queues &= ~((u64)1 << que->me);
983 /* Check if we've come back from hung */
984 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
985 adapter->active_queues |= ((u64)1 << que->me);
987 if (que->busy >= IXGBE_MAX_TX_BUSY) {
989 "Warning queue %d appears to be hung!\n", i);
990 que->txr->busy = IXGBE_QUEUE_HUNG;
996 /* Only truly watchdog if all queues show hung */
997 if (hung == adapter->num_queues)
999 else if (queues != 0) { /* Force an IRQ on queues with work */
1000 ixv_rearm_queues(adapter, queues);
1003 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1009 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1010 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1011 adapter->watchdog_events++;
1012 ixv_init_locked(adapter);
1013 } /* ixv_local_timer */
1015 /************************************************************************
1016 * ixv_update_link_status - Update OS on link state
1018 * Note: Only updates the OS on the cached link state.
1019 * The real check of the hardware only happens with
1021 ************************************************************************/
1023 ixv_update_link_status(struct adapter *adapter)
1025 struct ifnet *ifp = adapter->ifp;
1026 device_t dev = adapter->dev;
1028 if (adapter->link_up) {
1029 if (adapter->link_active == FALSE) {
1031 device_printf(dev,"Link is up %d Gbps %s \n",
1032 ((adapter->link_speed == 128) ? 10 : 1),
1034 adapter->link_active = TRUE;
1035 if_link_state_change(ifp, LINK_STATE_UP);
1037 } else { /* Link down */
1038 if (adapter->link_active == TRUE) {
1040 device_printf(dev,"Link is Down\n");
1041 if_link_state_change(ifp, LINK_STATE_DOWN);
1042 adapter->link_active = FALSE;
1047 } /* ixv_update_link_status */
1050 /************************************************************************
1051 * ixv_stop - Stop the hardware
1053 * Disables all traffic on the adapter by issuing a
1054 * global reset on the MAC and deallocates TX/RX buffers.
1055 ************************************************************************/
1060 struct adapter *adapter = arg;
1061 struct ixgbe_hw *hw = &adapter->hw;
1065 mtx_assert(&adapter->core_mtx, MA_OWNED);
1067 INIT_DEBUGOUT("ixv_stop: begin\n");
1068 ixv_disable_intr(adapter);
1070 /* Tell the stack that the interface is no longer active */
1071 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1073 hw->mac.ops.reset_hw(hw);
1074 adapter->hw.adapter_stopped = FALSE;
1075 hw->mac.ops.stop_adapter(hw);
1076 callout_stop(&adapter->timer);
1078 /* reprogram the RAR[0] in case user changed it. */
1079 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1085 /************************************************************************
1086 * ixv_allocate_pci_resources
1087 ************************************************************************/
1089 ixv_allocate_pci_resources(struct adapter *adapter)
1091 device_t dev = adapter->dev;
1095 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1098 if (!(adapter->pci_mem)) {
1099 device_printf(dev, "Unable to allocate bus resource: memory\n");
1103 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1104 adapter->osdep.mem_bus_space_handle =
1105 rman_get_bushandle(adapter->pci_mem);
1106 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1108 /* Pick up the tuneable queues */
1109 adapter->num_queues = ixv_num_queues;
1112 } /* ixv_allocate_pci_resources */
1114 /************************************************************************
1115 * ixv_free_pci_resources
1116 ************************************************************************/
1118 ixv_free_pci_resources(struct adapter * adapter)
1120 struct ix_queue *que = adapter->queues;
1121 device_t dev = adapter->dev;
1124 memrid = PCIR_BAR(MSIX_82598_BAR);
1127 * There is a slight possibility of a failure mode
1128 * in attach that will result in entering this function
1129 * before interrupt resources have been initialized, and
1130 * in that case we do not want to execute the loops below
1131 * We can detect this reliably by the state of the adapter
1134 if (adapter->res == NULL)
1138 * Release all msix queue resources:
1140 for (int i = 0; i < adapter->num_queues; i++, que++) {
1141 rid = que->msix + 1;
1142 if (que->tag != NULL) {
1143 bus_teardown_intr(dev, que->res, que->tag);
1146 if (que->res != NULL)
1147 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1151 /* Clean the Mailbox interrupt last */
1152 rid = adapter->vector + 1;
1154 if (adapter->tag != NULL) {
1155 bus_teardown_intr(dev, adapter->res, adapter->tag);
1156 adapter->tag = NULL;
1158 if (adapter->res != NULL)
1159 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1162 pci_release_msi(dev);
1164 if (adapter->msix_mem != NULL)
1165 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
1168 if (adapter->pci_mem != NULL)
1169 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1173 } /* ixv_free_pci_resources */
1175 /************************************************************************
1176 * ixv_setup_interface
1178 * Setup networking device structure and register an interface.
1179 ************************************************************************/
1181 ixv_setup_interface(device_t dev, struct adapter *adapter)
1185 INIT_DEBUGOUT("ixv_setup_interface: begin");
1187 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1189 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1190 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1191 ifp->if_baudrate = 1000000000;
1192 ifp->if_init = ixv_init;
1193 ifp->if_softc = adapter;
1194 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1195 ifp->if_ioctl = ixv_ioctl;
1196 if_setgetcounterfn(ifp, ixv_get_counter);
1197 /* TSO parameters */
1198 ifp->if_hw_tsomax = 65518;
1199 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1200 ifp->if_hw_tsomaxsegsize = 2048;
1201 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1202 ifp->if_start = ixgbe_legacy_start;
1203 ixv_start_locked = ixgbe_legacy_start_locked;
1204 ixv_ring_empty = ixgbe_legacy_ring_empty;
1206 ifp->if_transmit = ixgbe_mq_start;
1207 ifp->if_qflush = ixgbe_qflush;
1208 ixv_start_locked = ixgbe_mq_start_locked;
1209 ixv_ring_empty = drbr_empty;
1211 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1213 ether_ifattach(ifp, adapter->hw.mac.addr);
1215 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1218 * Tell the upper layer(s) we support long frames.
1220 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1222 /* Set capability flags */
1223 ifp->if_capabilities |= IFCAP_HWCSUM
1227 | IFCAP_VLAN_HWTAGGING
1233 /* Enable the above capabilities by default */
1234 ifp->if_capenable = ifp->if_capabilities;
1237 * Specify the media types supported by this adapter and register
1238 * callbacks to update media and link information
1240 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1242 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1243 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1246 } /* ixv_setup_interface */
1249 /************************************************************************
1250 * ixv_initialize_transmit_units - Enable transmit unit.
1251 ************************************************************************/
1253 ixv_initialize_transmit_units(struct adapter *adapter)
1255 struct tx_ring *txr = adapter->tx_rings;
1256 struct ixgbe_hw *hw = &adapter->hw;
1259 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1260 u64 tdba = txr->txdma.dma_paddr;
1263 /* Set WTHRESH to 8, burst writeback */
1264 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1265 txdctl |= (8 << 16);
1266 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1268 /* Set the HW Tx Head and Tail indices */
1269 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1270 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1272 /* Set Tx Tail register */
1273 txr->tail = IXGBE_VFTDT(i);
1275 /* Set Ring parameters */
1276 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1277 (tdba & 0x00000000ffffffffULL));
1278 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1279 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1280 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1281 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1282 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1283 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1286 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1287 txdctl |= IXGBE_TXDCTL_ENABLE;
1288 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1292 } /* ixv_initialize_transmit_units */
1295 /************************************************************************
1296 * ixv_initialize_rss_mapping
1297 ************************************************************************/
1299 ixv_initialize_rss_mapping(struct adapter *adapter)
1301 struct ixgbe_hw *hw = &adapter->hw;
1302 u32 reta = 0, mrqc, rss_key[10];
1305 u32 rss_hash_config;
1307 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1308 /* Fetch the configured RSS key */
1309 rss_getkey((uint8_t *)&rss_key);
1311 /* set up random bits */
1312 arc4rand(&rss_key, sizeof(rss_key), 0);
1315 /* Now fill out hash function seeds */
1316 for (i = 0; i < 10; i++)
1317 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1319 /* Set up the redirection table */
1320 for (i = 0, j = 0; i < 64; i++, j++) {
1321 if (j == adapter->num_queues)
1324 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1326 * Fetch the RSS bucket id for the given indirection
1327 * entry. Cap it at the number of configured buckets
1328 * (which is num_queues.)
1330 queue_id = rss_get_indirection_to_bucket(i);
1331 queue_id = queue_id % adapter->num_queues;
1336 * The low 8 bits are for hash value (n+0);
1337 * The next 8 bits are for hash value (n+1), etc.
1340 reta |= ((uint32_t)queue_id) << 24;
1342 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1347 /* Perform hash on these packet types */
1348 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1349 rss_hash_config = rss_gethashconfig();
1352 * Disable UDP - IP fragments aren't currently being handled
1353 * and so we end up with a mix of 2-tuple and 4-tuple
1356 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1357 | RSS_HASHTYPE_RSS_TCP_IPV4
1358 | RSS_HASHTYPE_RSS_IPV6
1359 | RSS_HASHTYPE_RSS_TCP_IPV6;
1362 mrqc = IXGBE_MRQC_RSSEN;
1363 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1364 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1365 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1366 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1367 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1368 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1369 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1370 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1371 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1372 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1374 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1375 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1377 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1378 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1379 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
1380 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
1382 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1383 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1384 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1385 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1387 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1388 } /* ixv_initialize_rss_mapping */
1391 /************************************************************************
1392 * ixv_initialize_receive_units - Setup receive registers and features.
1393 ************************************************************************/
1395 ixv_initialize_receive_units(struct adapter *adapter)
1397 struct rx_ring *rxr = adapter->rx_rings;
1398 struct ixgbe_hw *hw = &adapter->hw;
1399 struct ifnet *ifp = adapter->ifp;
1400 u32 bufsz, rxcsum, psrtype;
1402 if (ifp->if_mtu > ETHERMTU)
1403 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1405 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1407 psrtype = IXGBE_PSRTYPE_TCPHDR
1408 | IXGBE_PSRTYPE_UDPHDR
1409 | IXGBE_PSRTYPE_IPV4HDR
1410 | IXGBE_PSRTYPE_IPV6HDR
1411 | IXGBE_PSRTYPE_L2HDR;
1413 if (adapter->num_queues > 1)
1416 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1418 /* Tell PF our max_frame size */
1419 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1420 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1423 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1424 u64 rdba = rxr->rxdma.dma_paddr;
1427 /* Disable the queue */
1428 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1429 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1430 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1431 for (int j = 0; j < 10; j++) {
1432 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1433 IXGBE_RXDCTL_ENABLE)
1439 /* Setup the Base and Length of the Rx Descriptor Ring */
1440 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1441 (rdba & 0x00000000ffffffffULL));
1442 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1443 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1444 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1446 /* Reset the ring indices */
1447 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1448 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1450 /* Set up the SRRCTL register */
1451 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1452 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1453 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1455 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1456 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1458 /* Capture Rx Tail index */
1459 rxr->tail = IXGBE_VFRDT(rxr->me);
1461 /* Do the queue enabling last */
1462 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1463 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1464 for (int k = 0; k < 10; k++) {
1465 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1466 IXGBE_RXDCTL_ENABLE)
1472 /* Set the Tail Pointer */
1474 * In netmap mode, we must preserve the buffers made
1475 * available to userspace before the if_init()
1476 * (this is true by default on the TX side, because
1477 * init makes all buffers available to userspace).
1479 * netmap_reset() and the device specific routines
1480 * (e.g. ixgbe_setup_receive_rings()) map these
1481 * buffers at the end of the NIC ring, so here we
1482 * must set the RDT (tail) register to make sure
1483 * they are not overwritten.
1485 * In this driver the NIC ring starts at RDH = 0,
1486 * RDT points to the last slot available for reception (?),
1487 * so RDT = num_rx_desc - 1 means the whole ring is available.
1490 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1491 (ifp->if_capenable & IFCAP_NETMAP)) {
1492 struct netmap_adapter *na = NA(adapter->ifp);
1493 struct netmap_kring *kring = &na->rx_rings[i];
1494 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1496 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1498 #endif /* DEV_NETMAP */
1499 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1500 adapter->num_rx_desc - 1);
1503 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1505 ixv_initialize_rss_mapping(adapter);
1507 if (adapter->num_queues > 1) {
1508 /* RSS and RX IPP Checksum are mutually exclusive */
1509 rxcsum |= IXGBE_RXCSUM_PCSD;
1512 if (ifp->if_capenable & IFCAP_RXCSUM)
1513 rxcsum |= IXGBE_RXCSUM_PCSD;
1515 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1516 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1518 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1521 } /* ixv_initialize_receive_units */
1523 /************************************************************************
1524 * ixv_setup_vlan_support
1525 ************************************************************************/
1527 ixv_setup_vlan_support(struct adapter *adapter)
1529 struct ixgbe_hw *hw = &adapter->hw;
1530 u32 ctrl, vid, vfta, retry;
1533 * We get here thru init_locked, meaning
1534 * a soft reset, this has already cleared
1535 * the VFTA and other state, so if there
1536 * have been no vlan's registered do nothing.
1538 if (adapter->num_vlans == 0)
1541 /* Enable the queues */
1542 for (int i = 0; i < adapter->num_queues; i++) {
1543 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1544 ctrl |= IXGBE_RXDCTL_VME;
1545 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1547 * Let Rx path know that it needs to store VLAN tag
1548 * as part of extra mbuf info.
1550 adapter->rx_rings[i].vtag_strip = TRUE;
1554 * A soft reset zero's out the VFTA, so
1555 * we need to repopulate it now.
1557 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1558 if (ixv_shadow_vfta[i] == 0)
1560 vfta = ixv_shadow_vfta[i];
1562 * Reconstruct the vlan id's
1563 * based on the bits set in each
1564 * of the array ints.
1566 for (int j = 0; j < 32; j++) {
1568 if ((vfta & (1 << j)) == 0)
1571 /* Call the shared code mailbox routine */
1572 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1578 } /* ixv_setup_vlan_support */
1580 /************************************************************************
1583 * Run via a vlan config EVENT, it enables us to use the
1584 * HW Filter table since we can get the vlan id. This just
1585 * creates the entry in the soft version of the VFTA, init
1586 * will repopulate the real table.
1587 ************************************************************************/
1589 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1591 struct adapter *adapter = ifp->if_softc;
1594 if (ifp->if_softc != arg) /* Not our event */
1597 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1600 IXGBE_CORE_LOCK(adapter);
1601 index = (vtag >> 5) & 0x7F;
1603 ixv_shadow_vfta[index] |= (1 << bit);
1604 ++adapter->num_vlans;
1605 /* Re-init to load the changes */
1606 ixv_init_locked(adapter);
1607 IXGBE_CORE_UNLOCK(adapter);
1608 } /* ixv_register_vlan */
1610 /************************************************************************
1611 * ixv_unregister_vlan
1613 * Run via a vlan unconfig EVENT, remove our entry
1615 ************************************************************************/
1617 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1619 struct adapter *adapter = ifp->if_softc;
1622 if (ifp->if_softc != arg)
1625 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1628 IXGBE_CORE_LOCK(adapter);
1629 index = (vtag >> 5) & 0x7F;
1631 ixv_shadow_vfta[index] &= ~(1 << bit);
1632 --adapter->num_vlans;
1633 /* Re-init to load the changes */
1634 ixv_init_locked(adapter);
1635 IXGBE_CORE_UNLOCK(adapter);
1636 } /* ixv_unregister_vlan */
1638 /************************************************************************
1640 ************************************************************************/
1642 ixv_enable_intr(struct adapter *adapter)
1644 struct ixgbe_hw *hw = &adapter->hw;
1645 struct ix_queue *que = adapter->queues;
1646 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1649 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1651 mask = IXGBE_EIMS_ENABLE_MASK;
1652 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1653 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1655 for (int i = 0; i < adapter->num_queues; i++, que++)
1656 ixv_enable_queue(adapter, que->msix);
1658 IXGBE_WRITE_FLUSH(hw);
1661 } /* ixv_enable_intr */
1663 /************************************************************************
1665 ************************************************************************/
1667 ixv_disable_intr(struct adapter *adapter)
1669 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1670 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1671 IXGBE_WRITE_FLUSH(&adapter->hw);
1674 } /* ixv_disable_intr */
1676 /************************************************************************
1679 * Setup the correct IVAR register for a particular MSI-X interrupt
1680 * - entry is the register array entry
1681 * - vector is the MSI-X vector for this queue
1682 * - type is RX/TX/MISC
1683 ************************************************************************/
1685 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1687 struct ixgbe_hw *hw = &adapter->hw;
1690 vector |= IXGBE_IVAR_ALLOC_VAL;
1692 if (type == -1) { /* MISC IVAR */
1693 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1696 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1697 } else { /* RX/TX IVARS */
1698 index = (16 * (entry & 1)) + (8 * type);
1699 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1700 ivar &= ~(0xFF << index);
1701 ivar |= (vector << index);
1702 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1704 } /* ixv_set_ivar */
1706 /************************************************************************
1707 * ixv_configure_ivars
1708 ************************************************************************/
1710 ixv_configure_ivars(struct adapter *adapter)
1712 struct ix_queue *que = adapter->queues;
1714 for (int i = 0; i < adapter->num_queues; i++, que++) {
1715 /* First the RX queue entry */
1716 ixv_set_ivar(adapter, i, que->msix, 0);
1717 /* ... and the TX */
1718 ixv_set_ivar(adapter, i, que->msix, 1);
1719 /* Set an initial value in EITR */
1720 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1721 IXGBE_EITR_DEFAULT);
1724 /* For the mailbox interrupt */
1725 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1726 } /* ixv_configure_ivars */
1729 /************************************************************************
1731 ************************************************************************/
1733 ixv_get_counter(struct ifnet *ifp, ift_counter cnt)
1735 struct adapter *adapter;
1737 adapter = if_getsoftc(ifp);
1740 case IFCOUNTER_IPACKETS:
1741 return (adapter->ipackets);
1742 case IFCOUNTER_OPACKETS:
1743 return (adapter->opackets);
1744 case IFCOUNTER_IBYTES:
1745 return (adapter->ibytes);
1746 case IFCOUNTER_OBYTES:
1747 return (adapter->obytes);
1748 case IFCOUNTER_IMCASTS:
1749 return (adapter->imcasts);
1751 return (if_get_counter_default(ifp, cnt));
1753 } /* ixv_get_counter */
1755 /************************************************************************
1758 * The VF stats registers never have a truly virgin
1759 * starting point, so this routine tries to make an
1760 * artificial one, marking ground zero on attach as
1762 ************************************************************************/
1764 ixv_save_stats(struct adapter *adapter)
1766 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1767 adapter->stats.vf.saved_reset_vfgprc +=
1768 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1769 adapter->stats.vf.saved_reset_vfgptc +=
1770 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1771 adapter->stats.vf.saved_reset_vfgorc +=
1772 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1773 adapter->stats.vf.saved_reset_vfgotc +=
1774 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1775 adapter->stats.vf.saved_reset_vfmprc +=
1776 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1778 } /* ixv_save_stats */
1780 /************************************************************************
1782 ************************************************************************/
1784 ixv_init_stats(struct adapter *adapter)
1786 struct ixgbe_hw *hw = &adapter->hw;
1788 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1789 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1790 adapter->stats.vf.last_vfgorc |=
1791 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1793 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1794 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1795 adapter->stats.vf.last_vfgotc |=
1796 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1798 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1800 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1801 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1802 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1803 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1804 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1805 } /* ixv_init_stats */
1807 #define UPDATE_STAT_32(reg, last, count) \
1809 u32 current = IXGBE_READ_REG(hw, reg); \
1810 if (current < last) \
1811 count += 0x100000000LL; \
1813 count &= 0xFFFFFFFF00000000LL; \
1817 #define UPDATE_STAT_36(lsb, msb, last, count) \
1819 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1820 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1821 u64 current = ((cur_msb << 32) | cur_lsb); \
1822 if (current < last) \
1823 count += 0x1000000000LL; \
1825 count &= 0xFFFFFFF000000000LL; \
1829 /************************************************************************
1830 * ixv_update_stats - Update the board statistics counters.
1831 ************************************************************************/
1833 ixv_update_stats(struct adapter *adapter)
1835 struct ixgbe_hw *hw = &adapter->hw;
1836 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1838 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1839 adapter->stats.vf.vfgprc);
1840 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1841 adapter->stats.vf.vfgptc);
1842 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1843 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1844 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1845 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1846 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1847 adapter->stats.vf.vfmprc);
1849 /* Fill out the OS statistics structure */
1850 IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1851 IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1852 IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1853 IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1854 IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1855 } /* ixv_update_stats */
1857 /************************************************************************
1858 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1859 ************************************************************************/
1861 ixv_add_stats_sysctls(struct adapter *adapter)
1863 device_t dev = adapter->dev;
1864 struct tx_ring *txr = adapter->tx_rings;
1865 struct rx_ring *rxr = adapter->rx_rings;
1866 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1867 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1868 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1869 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1870 struct sysctl_oid *stat_node, *queue_node;
1871 struct sysctl_oid_list *stat_list, *queue_list;
1873 #define QUEUE_NAME_LEN 32
1874 char namebuf[QUEUE_NAME_LEN];
1876 /* Driver Statistics */
1877 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1878 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1879 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1880 CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1881 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1882 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1883 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1884 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1886 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1887 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1888 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1889 CTLFLAG_RD, NULL, "Queue Name");
1890 queue_list = SYSCTL_CHILDREN(queue_node);
1892 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1893 CTLFLAG_RD, &(adapter->queues[i].irqs), "IRQs on queue");
1894 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1895 CTLFLAG_RD, &(txr->no_tx_dma_setup),
1896 "Driver Tx DMA failure in Tx");
1897 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
1898 CTLFLAG_RD, &(txr->no_desc_avail),
1899 "Not-enough-descriptors count: TX");
1900 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1901 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1902 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1903 CTLFLAG_RD, &(txr->br->br_drops),
1904 "Packets dropped in buf_ring");
1907 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1908 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1909 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1910 CTLFLAG_RD, NULL, "Queue Name");
1911 queue_list = SYSCTL_CHILDREN(queue_node);
1913 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1914 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1915 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1916 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1917 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1918 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1921 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1922 CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1923 stat_list = SYSCTL_CHILDREN(stat_node);
1925 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1926 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1927 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1928 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1929 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1930 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1931 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1932 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1933 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1934 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1935 } /* ixv_add_stats_sysctls */
1937 /************************************************************************
1938 * ixv_set_sysctl_value
1939 ************************************************************************/
1941 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
1942 const char *description, int *limit, int value)
1945 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
1946 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
1947 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
1948 } /* ixv_set_sysctl_value */
1950 /************************************************************************
1951 * ixv_print_debug_info
1953 * Called only when em_display_debug_stats is enabled.
1954 * Provides a way to take a look at important statistics
1955 * maintained by the driver and hardware.
1956 ************************************************************************/
1958 ixv_print_debug_info(struct adapter *adapter)
1960 device_t dev = adapter->dev;
1961 struct ixgbe_hw *hw = &adapter->hw;
1962 struct ix_queue *que = adapter->queues;
1963 struct rx_ring *rxr;
1964 struct tx_ring *txr;
1965 struct lro_ctrl *lro;
1967 device_printf(dev, "Error Byte Count = %u \n",
1968 IXGBE_READ_REG(hw, IXGBE_ERRBC));
1970 for (int i = 0; i < adapter->num_queues; i++, que++) {
1974 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
1975 que->msix, (long)que->irqs);
1976 device_printf(dev, "RX(%d) Packets Received: %lld\n",
1977 rxr->me, (long long)rxr->rx_packets);
1978 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
1979 rxr->me, (long)rxr->rx_bytes);
1980 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
1981 rxr->me, (long long)lro->lro_queued);
1982 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
1983 rxr->me, (long long)lro->lro_flushed);
1984 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
1985 txr->me, (long)txr->total_packets);
1986 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
1987 txr->me, (long)txr->no_desc_avail);
1990 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1991 } /* ixv_print_debug_info */
1993 /************************************************************************
1995 ************************************************************************/
1997 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1999 struct adapter *adapter;
2003 error = sysctl_handle_int(oidp, &result, 0, req);
2005 if (error || !req->newptr)
2009 adapter = (struct adapter *)arg1;
2010 ixv_print_debug_info(adapter);
2014 } /* ixv_sysctl_debug */
2016 /************************************************************************
2017 * ixv_init_device_features
2018 ************************************************************************/
2020 ixv_init_device_features(struct adapter *adapter)
2022 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2025 | IXGBE_FEATURE_LEGACY_TX;
2027 /* A tad short on feature flags for VFs, atm. */
2028 switch (adapter->hw.mac.type) {
2029 case ixgbe_mac_82599_vf:
2031 case ixgbe_mac_X540_vf:
2033 case ixgbe_mac_X550_vf:
2034 case ixgbe_mac_X550EM_x_vf:
2035 case ixgbe_mac_X550EM_a_vf:
2036 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2042 /* Enabled by default... */
2043 /* Is a virtual function (VF) */
2044 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2045 adapter->feat_en |= IXGBE_FEATURE_VF;
2047 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2048 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2049 /* Receive-Side Scaling (RSS) */
2050 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2051 adapter->feat_en |= IXGBE_FEATURE_RSS;
2052 /* Needs advanced context descriptor regardless of offloads req'd */
2053 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2054 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2056 /* Enabled via sysctl... */
2057 /* Legacy (single queue) transmit */
2058 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2059 ixv_enable_legacy_tx)
2060 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2061 } /* ixv_init_device_features */
2063 /************************************************************************
2064 * ixv_shutdown - Shutdown entry point
2065 ************************************************************************/
2067 ixv_shutdown(device_t dev)
2069 struct adapter *adapter = device_get_softc(dev);
2070 IXGBE_CORE_LOCK(adapter);
2072 IXGBE_CORE_UNLOCK(adapter);
2075 } /* ixv_shutdown */
2078 /************************************************************************
2079 * ixv_ioctl - Ioctl entry point
2081 * Called when the user wants to configure the interface.
2083 * return 0 on success, positive on failure
2084 ************************************************************************/
2086 ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2088 struct adapter *adapter = ifp->if_softc;
2089 struct ifreq *ifr = (struct ifreq *)data;
2090 #if defined(INET) || defined(INET6)
2091 struct ifaddr *ifa = (struct ifaddr *)data;
2092 bool avoid_reset = FALSE;
2100 if (ifa->ifa_addr->sa_family == AF_INET)
2104 if (ifa->ifa_addr->sa_family == AF_INET6)
2107 #if defined(INET) || defined(INET6)
2109 * Calling init results in link renegotiation,
2110 * so we avoid doing it when possible.
2113 ifp->if_flags |= IFF_UP;
2114 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2116 if (!(ifp->if_flags & IFF_NOARP))
2117 arp_ifinit(ifp, ifa);
2119 error = ether_ioctl(ifp, command, data);
2123 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2124 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
2127 IXGBE_CORE_LOCK(adapter);
2128 ifp->if_mtu = ifr->ifr_mtu;
2129 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
2130 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2131 ixv_init_locked(adapter);
2132 IXGBE_CORE_UNLOCK(adapter);
2136 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2137 IXGBE_CORE_LOCK(adapter);
2138 if (ifp->if_flags & IFF_UP) {
2139 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2140 ixv_init_locked(adapter);
2142 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2144 adapter->if_flags = ifp->if_flags;
2145 IXGBE_CORE_UNLOCK(adapter);
2149 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2150 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2151 IXGBE_CORE_LOCK(adapter);
2152 ixv_disable_intr(adapter);
2153 ixv_set_multi(adapter);
2154 ixv_enable_intr(adapter);
2155 IXGBE_CORE_UNLOCK(adapter);
2160 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2161 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2165 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2166 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2167 if (mask & IFCAP_HWCSUM)
2168 ifp->if_capenable ^= IFCAP_HWCSUM;
2169 if (mask & IFCAP_TSO4)
2170 ifp->if_capenable ^= IFCAP_TSO4;
2171 if (mask & IFCAP_LRO)
2172 ifp->if_capenable ^= IFCAP_LRO;
2173 if (mask & IFCAP_VLAN_HWTAGGING)
2174 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2175 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2176 IXGBE_CORE_LOCK(adapter);
2177 ixv_init_locked(adapter);
2178 IXGBE_CORE_UNLOCK(adapter);
2180 VLAN_CAPABILITIES(ifp);
2185 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
2186 error = ether_ioctl(ifp, command, data);
2193 /************************************************************************
2195 ************************************************************************/
2199 struct adapter *adapter = arg;
2201 IXGBE_CORE_LOCK(adapter);
2202 ixv_init_locked(adapter);
2203 IXGBE_CORE_UNLOCK(adapter);
2209 /************************************************************************
2211 ************************************************************************/
2213 ixv_handle_que(void *context, int pending)
2215 struct ix_queue *que = context;
2216 struct adapter *adapter = que->adapter;
2217 struct tx_ring *txr = que->txr;
2218 struct ifnet *ifp = adapter->ifp;
2221 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2222 more = ixgbe_rxeof(que);
2225 if (!ixv_ring_empty(ifp, txr->br))
2226 ixv_start_locked(ifp, txr);
2227 IXGBE_TX_UNLOCK(txr);
2229 taskqueue_enqueue(que->tq, &que->que_task);
2234 /* Re-enable this interrupt */
2235 ixv_enable_queue(adapter, que->msix);
2238 } /* ixv_handle_que */
2240 /************************************************************************
2241 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2242 ************************************************************************/
2244 ixv_allocate_msix(struct adapter *adapter)
2246 device_t dev = adapter->dev;
2247 struct ix_queue *que = adapter->queues;
2248 struct tx_ring *txr = adapter->tx_rings;
2249 int error, msix_ctrl, rid, vector = 0;
2251 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2253 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2254 RF_SHAREABLE | RF_ACTIVE);
2255 if (que->res == NULL) {
2256 device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
2260 /* Set the handler function */
2261 error = bus_setup_intr(dev, que->res,
2262 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2263 ixv_msix_que, que, &que->tag);
2266 device_printf(dev, "Failed to register QUE handler");
2269 #if __FreeBSD_version >= 800504
2270 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2273 adapter->active_queues |= (u64)(1 << que->msix);
2275 * Bind the MSI-X vector, and thus the
2276 * ring to the corresponding CPU.
2278 if (adapter->num_queues > 1)
2279 bus_bind_intr(dev, que->res, i);
2280 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2281 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
2282 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
2283 taskqueue_thread_enqueue, &que->tq);
2284 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2285 device_get_nameunit(adapter->dev));
2290 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2291 RF_SHAREABLE | RF_ACTIVE);
2292 if (!adapter->res) {
2294 "Unable to allocate bus resource: MBX interrupt [%d]\n",
2298 /* Set the mbx handler function */
2299 error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
2300 NULL, ixv_msix_mbx, adapter, &adapter->tag);
2302 adapter->res = NULL;
2303 device_printf(dev, "Failed to register LINK handler");
2306 #if __FreeBSD_version >= 800504
2307 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
2309 adapter->vector = vector;
2310 /* Tasklets for Mailbox */
2311 TASK_INIT(&adapter->link_task, 0, ixv_handle_link, adapter);
2312 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
2313 taskqueue_thread_enqueue, &adapter->tq);
2314 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
2315 device_get_nameunit(adapter->dev));
2317 * Due to a broken design QEMU will fail to properly
2318 * enable the guest for MSI-X unless the vectors in
2319 * the table are all set up, so we must rewrite the
2320 * ENABLE in the MSI-X control register again at this
2321 * point to cause it to successfully initialize us.
2323 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2324 pci_find_cap(dev, PCIY_MSIX, &rid);
2325 rid += PCIR_MSIX_CTRL;
2326 msix_ctrl = pci_read_config(dev, rid, 2);
2327 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2328 pci_write_config(dev, rid, msix_ctrl, 2);
2332 } /* ixv_allocate_msix */
2334 /************************************************************************
2335 * ixv_configure_interrupts - Setup MSI-X resources
2337 * Note: The VF device MUST use MSI-X, there is no fallback.
2338 ************************************************************************/
2340 ixv_configure_interrupts(struct adapter *adapter)
2342 device_t dev = adapter->dev;
2343 int rid, want, msgs;
2345 /* Must have at least 2 MSI-X vectors */
2346 msgs = pci_msix_count(dev);
2350 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2352 if (adapter->msix_mem == NULL) {
2353 device_printf(adapter->dev, "Unable to map MSI-X table \n");
2358 * Want vectors for the queues,
2359 * plus an additional for mailbox.
2361 want = adapter->num_queues + 1;
2364 adapter->num_queues = msgs - 1;
2367 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2368 device_printf(adapter->dev,
2369 "Using MSI-X interrupts with %d vectors\n", want);
2370 /* reflect correct sysctl value */
2371 ixv_num_queues = adapter->num_queues;
2375 /* Release in case alloc was insufficient */
2376 pci_release_msi(dev);
2378 if (adapter->msix_mem != NULL) {
2379 bus_release_resource(dev, SYS_RES_MEMORY, rid,
2381 adapter->msix_mem = NULL;
2383 device_printf(adapter->dev, "MSI-X config error\n");
2386 } /* ixv_configure_interrupts */
2389 /************************************************************************
2390 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2392 * Done outside of interrupt context since the driver might sleep
2393 ************************************************************************/
2395 ixv_handle_link(void *context, int pending)
2397 struct adapter *adapter = context;
2399 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2400 &adapter->link_up, FALSE);
2401 ixv_update_link_status(adapter);
2402 } /* ixv_handle_link */
2404 /************************************************************************
2405 * ixv_check_link - Used in the local timer to poll for link changes
2406 ************************************************************************/
2408 ixv_check_link(struct adapter *adapter)
2410 adapter->hw.mac.get_link_status = TRUE;
2412 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2413 &adapter->link_up, FALSE);
2414 ixv_update_link_status(adapter);
2415 } /* ixv_check_link */