1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /************************************************************************
45 ************************************************************************/
46 char ixv_driver_version[] = "1.5.13-k";
48 /************************************************************************
51 * Used by probe to select devices to load on
52 * Last field stores an index into ixv_strings
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 ************************************************************************/
57 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
64 /* required last entry */
68 /************************************************************************
69 * Table of branding strings
70 ************************************************************************/
71 static char *ixv_strings[] = {
72 "Intel(R) PRO/10GbE Virtual Function Network Driver"
75 /************************************************************************
77 ************************************************************************/
78 static int ixv_probe(device_t);
79 static int ixv_attach(device_t);
80 static int ixv_detach(device_t);
81 static int ixv_shutdown(device_t);
82 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
83 static void ixv_init(void *);
84 static void ixv_init_locked(struct adapter *);
85 static void ixv_stop(void *);
86 static uint64_t ixv_get_counter(struct ifnet *, ift_counter);
87 static void ixv_init_device_features(struct adapter *);
88 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
89 static int ixv_media_change(struct ifnet *);
90 static int ixv_allocate_pci_resources(struct adapter *);
91 static int ixv_allocate_msix(struct adapter *);
92 static int ixv_configure_interrupts(struct adapter *);
93 static void ixv_free_pci_resources(struct adapter *);
94 static void ixv_local_timer(void *);
95 static void ixv_setup_interface(device_t, struct adapter *);
96 static int ixv_negotiate_api(struct adapter *);
98 static void ixv_initialize_transmit_units(struct adapter *);
99 static void ixv_initialize_receive_units(struct adapter *);
100 static void ixv_initialize_rss_mapping(struct adapter *);
101 static void ixv_check_link(struct adapter *);
103 static void ixv_enable_intr(struct adapter *);
104 static void ixv_disable_intr(struct adapter *);
105 static void ixv_set_multi(struct adapter *);
106 static void ixv_update_link_status(struct adapter *);
107 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
108 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
109 static void ixv_configure_ivars(struct adapter *);
110 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
112 static void ixv_setup_vlan_support(struct adapter *);
113 static void ixv_register_vlan(void *, struct ifnet *, u16);
114 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
116 static void ixv_save_stats(struct adapter *);
117 static void ixv_init_stats(struct adapter *);
118 static void ixv_update_stats(struct adapter *);
119 static void ixv_add_stats_sysctls(struct adapter *);
120 static void ixv_set_sysctl_value(struct adapter *, const char *,
121 const char *, int *, int);
123 /* The MSI-X Interrupt handlers */
124 static void ixv_msix_que(void *);
125 static void ixv_msix_mbx(void *);
127 /* Deferred interrupt tasklets */
128 static void ixv_handle_que(void *, int);
129 static void ixv_handle_link(void *, int);
131 /************************************************************************
132 * FreeBSD Device Interface Entry Points
133 ************************************************************************/
134 static device_method_t ixv_methods[] = {
135 /* Device interface */
136 DEVMETHOD(device_probe, ixv_probe),
137 DEVMETHOD(device_attach, ixv_attach),
138 DEVMETHOD(device_detach, ixv_detach),
139 DEVMETHOD(device_shutdown, ixv_shutdown),
143 static driver_t ixv_driver = {
144 "ixv", ixv_methods, sizeof(struct adapter),
147 devclass_t ixv_devclass;
148 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
149 MODULE_DEPEND(ixv, pci, 1, 1, 1);
150 MODULE_DEPEND(ixv, ether, 1, 1, 1);
151 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
154 * TUNEABLE PARAMETERS:
157 /* Number of Queues - do not exceed MSI-X vectors - 1 */
158 static int ixv_num_queues = 1;
159 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
162 * AIM: Adaptive Interrupt Moderation
163 * which means that the interrupt rate
164 * is varied over time based on the
165 * traffic for that interrupt vector
167 static int ixv_enable_aim = FALSE;
168 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
170 /* How many packets rxeof tries to clean at a time */
171 static int ixv_rx_process_limit = 256;
172 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
174 /* How many packets txeof tries to clean at a time */
175 static int ixv_tx_process_limit = 256;
176 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
178 /* Flow control setting, default to full */
179 static int ixv_flow_control = ixgbe_fc_full;
180 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
183 * Header split: this causes the hardware to DMA
184 * the header into a separate mbuf from the payload,
185 * it can be a performance win in some workloads, but
186 * in others it actually hurts, its off by default.
188 static int ixv_header_split = FALSE;
189 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
192 * Number of TX descriptors per ring,
193 * setting higher than RX as this seems
194 * the better performing choice.
196 static int ixv_txd = DEFAULT_TXD;
197 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
199 /* Number of RX descriptors per ring */
200 static int ixv_rxd = DEFAULT_RXD;
201 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
203 /* Legacy Transmit (single queue) */
204 static int ixv_enable_legacy_tx = 0;
205 TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
208 * Shadow VFTA table, this is needed because
209 * the real filter table gets cleared during
210 * a soft reset and we need to repopulate it.
212 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
214 static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
215 static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
217 /************************************************************************
218 * ixv_probe - Device identification routine
220 * Determines if the driver should be loaded on
221 * adapter based on its PCI vendor/device ID.
223 * return BUS_PROBE_DEFAULT on success, positive on failure
224 ************************************************************************/
226 ixv_probe(device_t dev)
228 ixgbe_vendor_info_t *ent;
229 u16 pci_vendor_id = 0;
230 u16 pci_device_id = 0;
231 u16 pci_subvendor_id = 0;
232 u16 pci_subdevice_id = 0;
233 char adapter_name[256];
236 pci_vendor_id = pci_get_vendor(dev);
237 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
240 pci_device_id = pci_get_device(dev);
241 pci_subvendor_id = pci_get_subvendor(dev);
242 pci_subdevice_id = pci_get_subdevice(dev);
244 ent = ixv_vendor_info_array;
245 while (ent->vendor_id != 0) {
246 if ((pci_vendor_id == ent->vendor_id) &&
247 (pci_device_id == ent->device_id) &&
248 ((pci_subvendor_id == ent->subvendor_id) ||
249 (ent->subvendor_id == 0)) &&
250 ((pci_subdevice_id == ent->subdevice_id) ||
251 (ent->subdevice_id == 0))) {
252 sprintf(adapter_name, "%s, Version - %s",
253 ixv_strings[ent->index], ixv_driver_version);
254 device_set_desc_copy(dev, adapter_name);
255 return (BUS_PROBE_DEFAULT);
263 /************************************************************************
264 * ixv_attach - Device initialization routine
266 * Called when the driver is being loaded.
267 * Identifies the type of hardware, allocates all resources
268 * and initializes the hardware.
270 * return 0 on success, positive on failure
271 ************************************************************************/
273 ixv_attach(device_t dev)
275 struct adapter *adapter;
279 INIT_DEBUGOUT("ixv_attach: begin");
282 * Make sure BUSMASTER is set, on a VM under
283 * KVM it may not be and will break things.
285 pci_enable_busmaster(dev);
287 /* Allocate, clear, and link in our adapter structure */
288 adapter = device_get_softc(dev);
290 adapter->hw.back = adapter;
293 adapter->init_locked = ixv_init_locked;
294 adapter->stop_locked = ixv_stop;
297 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
299 /* Do base PCI setup - map BAR0 */
300 if (ixv_allocate_pci_resources(adapter)) {
301 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
307 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
309 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
312 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
314 "enable_aim", CTLFLAG_RW, &ixv_enable_aim, 1,
315 "Interrupt Moderation");
317 /* Set up the timer callout */
318 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
320 /* Save off the information about this board */
321 hw->vendor_id = pci_get_vendor(dev);
322 hw->device_id = pci_get_device(dev);
323 hw->revision_id = pci_get_revid(dev);
324 hw->subsystem_vendor_id = pci_get_subvendor(dev);
325 hw->subsystem_device_id = pci_get_subdevice(dev);
327 /* A subset of set_mac_type */
328 switch (hw->device_id) {
329 case IXGBE_DEV_ID_82599_VF:
330 hw->mac.type = ixgbe_mac_82599_vf;
332 case IXGBE_DEV_ID_X540_VF:
333 hw->mac.type = ixgbe_mac_X540_vf;
335 case IXGBE_DEV_ID_X550_VF:
336 hw->mac.type = ixgbe_mac_X550_vf;
338 case IXGBE_DEV_ID_X550EM_X_VF:
339 hw->mac.type = ixgbe_mac_X550EM_x_vf;
341 case IXGBE_DEV_ID_X550EM_A_VF:
342 hw->mac.type = ixgbe_mac_X550EM_a_vf;
345 /* Shouldn't get here since probe succeeded */
346 device_printf(dev, "Unknown device ID!\n");
352 ixv_init_device_features(adapter);
354 /* Initialize the shared code */
355 error = ixgbe_init_ops_vf(hw);
357 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
362 /* Setup the mailbox */
363 ixgbe_init_mbx_params_vf(hw);
365 /* Set the right number of segments */
366 adapter->num_segs = IXGBE_82599_SCATTER;
368 error = hw->mac.ops.reset_hw(hw);
369 if (error == IXGBE_ERR_RESET_FAILED)
370 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
372 device_printf(dev, "...reset_hw() failed with error %d\n",
379 error = hw->mac.ops.init_hw(hw);
381 device_printf(dev, "...init_hw() failed with error %d\n",
387 /* Negotiate mailbox API version */
388 error = ixv_negotiate_api(adapter);
391 "Mailbox API negotiation failed during attach!\n");
395 /* If no mac address was assigned, make a random one */
396 if (!ixv_check_ether_addr(hw->mac.addr)) {
397 u8 addr[ETHER_ADDR_LEN];
398 arc4rand(&addr, sizeof(addr), 0);
401 bcopy(addr, hw->mac.addr, sizeof(addr));
402 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
405 /* Register for VLAN events */
406 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
407 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
408 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
409 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
411 /* Sysctls for limiting the amount of work done in the taskqueues */
412 ixv_set_sysctl_value(adapter, "rx_processing_limit",
413 "max number of rx packets to process",
414 &adapter->rx_process_limit, ixv_rx_process_limit);
416 ixv_set_sysctl_value(adapter, "tx_processing_limit",
417 "max number of tx packets to process",
418 &adapter->tx_process_limit, ixv_tx_process_limit);
420 /* Do descriptor calc and sanity checks */
421 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
422 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
423 device_printf(dev, "TXD config issue, using default!\n");
424 adapter->num_tx_desc = DEFAULT_TXD;
426 adapter->num_tx_desc = ixv_txd;
428 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
429 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
430 device_printf(dev, "RXD config issue, using default!\n");
431 adapter->num_rx_desc = DEFAULT_RXD;
433 adapter->num_rx_desc = ixv_rxd;
436 error = ixv_configure_interrupts(adapter);
440 /* Allocate our TX/RX Queues */
441 if (ixgbe_allocate_queues(adapter)) {
442 device_printf(dev, "ixgbe_allocate_queues() failed!\n");
447 /* Setup OS specific network interface */
448 ixv_setup_interface(dev, adapter);
450 error = ixv_allocate_msix(adapter);
452 device_printf(dev, "ixv_allocate_msix() failed!\n");
456 /* Do the stats setup */
457 ixv_save_stats(adapter);
458 ixv_init_stats(adapter);
459 ixv_add_stats_sysctls(adapter);
461 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
462 ixgbe_netmap_attach(adapter);
464 INIT_DEBUGOUT("ixv_attach: end");
469 ixgbe_free_transmit_structures(adapter);
470 ixgbe_free_receive_structures(adapter);
471 free(adapter->queues, M_DEVBUF);
473 ixv_free_pci_resources(adapter);
474 IXGBE_CORE_LOCK_DESTROY(adapter);
479 /************************************************************************
480 * ixv_detach - Device removal routine
482 * Called when the driver is being removed.
483 * Stops the adapter and deallocates all the resources
484 * that were allocated for driver operation.
486 * return 0 on success, positive on failure
487 ************************************************************************/
489 ixv_detach(device_t dev)
491 struct adapter *adapter = device_get_softc(dev);
492 struct ix_queue *que = adapter->queues;
494 INIT_DEBUGOUT("ixv_detach: begin");
496 /* Make sure VLANS are not using driver */
497 if (adapter->ifp->if_vlantrunk != NULL) {
498 device_printf(dev, "Vlan in use, detach first\n");
502 ether_ifdetach(adapter->ifp);
503 IXGBE_CORE_LOCK(adapter);
505 IXGBE_CORE_UNLOCK(adapter);
507 for (int i = 0; i < adapter->num_queues; i++, que++) {
509 struct tx_ring *txr = que->txr;
510 taskqueue_drain(que->tq, &txr->txq_task);
511 taskqueue_drain(que->tq, &que->que_task);
512 taskqueue_free(que->tq);
516 /* Drain the Mailbox(link) queue */
518 taskqueue_drain(adapter->tq, &adapter->link_task);
519 taskqueue_free(adapter->tq);
522 /* Unregister VLAN events */
523 if (adapter->vlan_attach != NULL)
524 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
525 if (adapter->vlan_detach != NULL)
526 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
528 callout_drain(&adapter->timer);
530 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
531 netmap_detach(adapter->ifp);
533 ixv_free_pci_resources(adapter);
534 bus_generic_detach(dev);
535 if_free(adapter->ifp);
537 ixgbe_free_transmit_structures(adapter);
538 ixgbe_free_receive_structures(adapter);
539 free(adapter->queues, M_DEVBUF);
541 IXGBE_CORE_LOCK_DESTROY(adapter);
546 /************************************************************************
547 * ixv_init_locked - Init entry point
549 * Used in two ways: It is used by the stack as an init entry
550 * point in network interface structure. It is also used
551 * by the driver as a hw/sw initialization routine to get
552 * to a consistent state.
554 * return 0 on success, positive on failure
555 ************************************************************************/
557 ixv_init_locked(struct adapter *adapter)
559 struct ifnet *ifp = adapter->ifp;
560 device_t dev = adapter->dev;
561 struct ixgbe_hw *hw = &adapter->hw;
564 INIT_DEBUGOUT("ixv_init_locked: begin");
565 mtx_assert(&adapter->core_mtx, MA_OWNED);
566 hw->adapter_stopped = FALSE;
567 hw->mac.ops.stop_adapter(hw);
568 callout_stop(&adapter->timer);
570 /* reprogram the RAR[0] in case user changed it. */
571 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
573 /* Get the latest mac address, User can use a LAA */
574 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
575 IXGBE_ETH_LENGTH_OF_ADDRESS);
576 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
578 /* Prepare transmit descriptors and buffers */
579 if (ixgbe_setup_transmit_structures(adapter)) {
580 device_printf(dev, "Could not setup transmit structures\n");
585 /* Reset VF and renegotiate mailbox API version */
586 hw->mac.ops.reset_hw(hw);
587 error = ixv_negotiate_api(adapter);
590 "Mailbox API negotiation failed in init_locked!\n");
594 ixv_initialize_transmit_units(adapter);
596 /* Setup Multicast table */
597 ixv_set_multi(adapter);
600 * Determine the correct mbuf pool
601 * for doing jumbo/headersplit
603 if (ifp->if_mtu > ETHERMTU)
604 adapter->rx_mbuf_sz = MJUMPAGESIZE;
606 adapter->rx_mbuf_sz = MCLBYTES;
608 /* Prepare receive descriptors and buffers */
609 if (ixgbe_setup_receive_structures(adapter)) {
610 device_printf(dev, "Could not setup receive structures\n");
615 /* Configure RX settings */
616 ixv_initialize_receive_units(adapter);
618 /* Set the various hardware offload abilities */
619 ifp->if_hwassist = 0;
620 if (ifp->if_capenable & IFCAP_TSO4)
621 ifp->if_hwassist |= CSUM_TSO;
622 if (ifp->if_capenable & IFCAP_TXCSUM) {
623 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
624 #if __FreeBSD_version >= 800000
625 ifp->if_hwassist |= CSUM_SCTP;
629 /* Set up VLAN offload and filter */
630 ixv_setup_vlan_support(adapter);
632 /* Set up MSI-X routing */
633 ixv_configure_ivars(adapter);
635 /* Set up auto-mask */
636 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
638 /* Set moderation on the Link interrupt */
639 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
642 ixv_init_stats(adapter);
644 /* Config/Enable Link */
645 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
649 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
651 /* And now turn on interrupts */
652 ixv_enable_intr(adapter);
654 /* Now inform the stack we're ready */
655 ifp->if_drv_flags |= IFF_DRV_RUNNING;
656 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
659 } /* ixv_init_locked */
662 * MSI-X Interrupt Handlers and Tasklets
666 ixv_enable_queue(struct adapter *adapter, u32 vector)
668 struct ixgbe_hw *hw = &adapter->hw;
669 u32 queue = 1 << vector;
672 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
673 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
674 } /* ixv_enable_queue */
677 ixv_disable_queue(struct adapter *adapter, u32 vector)
679 struct ixgbe_hw *hw = &adapter->hw;
680 u64 queue = (u64)(1 << vector);
683 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
684 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
685 } /* ixv_disable_queue */
688 ixv_rearm_queues(struct adapter *adapter, u64 queues)
690 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
691 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
692 } /* ixv_rearm_queues */
695 /************************************************************************
696 * ixv_msix_que - MSI Queue Interrupt Service routine
697 ************************************************************************/
699 ixv_msix_que(void *arg)
701 struct ix_queue *que = arg;
702 struct adapter *adapter = que->adapter;
703 struct ifnet *ifp = adapter->ifp;
704 struct tx_ring *txr = que->txr;
705 struct rx_ring *rxr = que->rxr;
709 ixv_disable_queue(adapter, que->msix);
712 more = ixgbe_rxeof(que);
717 * Make certain that if the stack
718 * has anything queued the task gets
719 * scheduled to handle it.
721 if (!ixv_ring_empty(adapter->ifp, txr->br))
722 ixv_start_locked(ifp, txr);
723 IXGBE_TX_UNLOCK(txr);
727 if (ixv_enable_aim == FALSE)
730 * Do Adaptive Interrupt Moderation:
731 * - Write out last calculated setting
732 * - Calculate based on average size over
735 if (que->eitr_setting)
736 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
739 que->eitr_setting = 0;
741 /* Idle, do nothing */
742 if ((txr->bytes == 0) && (rxr->bytes == 0))
745 if ((txr->bytes) && (txr->packets))
746 newitr = txr->bytes/txr->packets;
747 if ((rxr->bytes) && (rxr->packets))
748 newitr = max(newitr, (rxr->bytes / rxr->packets));
749 newitr += 24; /* account for hardware frame, crc */
751 /* set an upper boundary */
752 newitr = min(newitr, 3000);
754 /* Be nice to the mid range */
755 if ((newitr > 300) && (newitr < 1200))
756 newitr = (newitr / 3);
758 newitr = (newitr / 2);
760 newitr |= newitr << 16;
762 /* save for next interrupt */
763 que->eitr_setting = newitr;
773 taskqueue_enqueue(que->tq, &que->que_task);
774 else /* Re-enable this interrupt */
775 ixv_enable_queue(adapter, que->msix);
780 /************************************************************************
782 ************************************************************************/
784 ixv_msix_mbx(void *arg)
786 struct adapter *adapter = arg;
787 struct ixgbe_hw *hw = &adapter->hw;
792 /* First get the cause */
793 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
794 /* Clear interrupt with write */
795 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
797 /* Link status change */
798 if (reg & IXGBE_EICR_LSC)
799 taskqueue_enqueue(adapter->tq, &adapter->link_task);
801 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
806 /************************************************************************
807 * ixv_media_status - Media Ioctl callback
809 * Called whenever the user queries the status of
810 * the interface using ifconfig.
811 ************************************************************************/
813 ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
815 struct adapter *adapter = ifp->if_softc;
817 INIT_DEBUGOUT("ixv_media_status: begin");
818 IXGBE_CORE_LOCK(adapter);
819 ixv_update_link_status(adapter);
821 ifmr->ifm_status = IFM_AVALID;
822 ifmr->ifm_active = IFM_ETHER;
824 if (!adapter->link_active) {
825 IXGBE_CORE_UNLOCK(adapter);
829 ifmr->ifm_status |= IFM_ACTIVE;
831 switch (adapter->link_speed) {
832 case IXGBE_LINK_SPEED_1GB_FULL:
833 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
835 case IXGBE_LINK_SPEED_10GB_FULL:
836 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
838 case IXGBE_LINK_SPEED_100_FULL:
839 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
841 case IXGBE_LINK_SPEED_10_FULL:
842 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
846 IXGBE_CORE_UNLOCK(adapter);
849 } /* ixv_media_status */
851 /************************************************************************
852 * ixv_media_change - Media Ioctl callback
854 * Called when the user changes speed/duplex using
855 * media/mediopt option with ifconfig.
856 ************************************************************************/
858 ixv_media_change(struct ifnet *ifp)
860 struct adapter *adapter = ifp->if_softc;
861 struct ifmedia *ifm = &adapter->media;
863 INIT_DEBUGOUT("ixv_media_change: begin");
865 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
868 switch (IFM_SUBTYPE(ifm->ifm_media)) {
872 device_printf(adapter->dev, "Only auto media type\n");
877 } /* ixv_media_change */
880 /************************************************************************
883 * Negotiate the Mailbox API with the PF;
884 * start with the most featured API first.
885 ************************************************************************/
887 ixv_negotiate_api(struct adapter *adapter)
889 struct ixgbe_hw *hw = &adapter->hw;
890 int mbx_api[] = { ixgbe_mbox_api_11,
892 ixgbe_mbox_api_unknown };
895 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
896 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
902 } /* ixv_negotiate_api */
905 /************************************************************************
906 * ixv_set_multi - Multicast Update
908 * Called whenever multicast address list is updated.
909 ************************************************************************/
911 ixv_set_multi(struct adapter *adapter)
913 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
915 struct ifmultiaddr *ifma;
916 struct ifnet *ifp = adapter->ifp;
919 IOCTL_DEBUGOUT("ixv_set_multi: begin");
921 #if __FreeBSD_version < 800000
926 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
927 if (ifma->ifma_addr->sa_family != AF_LINK)
929 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
930 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
931 IXGBE_ETH_LENGTH_OF_ADDRESS);
934 #if __FreeBSD_version < 800000
937 if_maddr_runlock(ifp);
942 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
943 ixv_mc_array_itr, TRUE);
946 } /* ixv_set_multi */
948 /************************************************************************
951 * An iterator function needed by the multicast shared code.
952 * It feeds the shared code routine the addresses in the
953 * array of ixv_set_multi() one by one.
954 ************************************************************************/
956 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
958 u8 *addr = *update_ptr;
962 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
963 *update_ptr = newptr;
966 } /* ixv_mc_array_itr */
968 /************************************************************************
969 * ixv_local_timer - Timer routine
971 * Checks for link status, updates statistics,
972 * and runs the watchdog check.
973 ************************************************************************/
975 ixv_local_timer(void *arg)
977 struct adapter *adapter = arg;
978 device_t dev = adapter->dev;
979 struct ix_queue *que = adapter->queues;
983 mtx_assert(&adapter->core_mtx, MA_OWNED);
985 ixv_check_link(adapter);
988 ixv_update_stats(adapter);
991 * Check the TX queues status
992 * - mark hung queues so we don't schedule on them
993 * - watchdog only if all queues show hung
995 for (int i = 0; i < adapter->num_queues; i++, que++) {
996 /* Keep track of queues with work for soft irq */
998 queues |= ((u64)1 << que->me);
1000 * Each time txeof runs without cleaning, but there
1001 * are uncleaned descriptors it increments busy. If
1002 * we get to the MAX we declare it hung.
1004 if (que->busy == IXGBE_QUEUE_HUNG) {
1006 /* Mark the queue as inactive */
1007 adapter->active_queues &= ~((u64)1 << que->me);
1010 /* Check if we've come back from hung */
1011 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1012 adapter->active_queues |= ((u64)1 << que->me);
1014 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1016 "Warning queue %d appears to be hung!\n", i);
1017 que->txr->busy = IXGBE_QUEUE_HUNG;
1023 /* Only truly watchdog if all queues show hung */
1024 if (hung == adapter->num_queues)
1026 else if (queues != 0) { /* Force an IRQ on queues with work */
1027 ixv_rearm_queues(adapter, queues);
1030 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1036 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1037 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1038 adapter->watchdog_events++;
1039 ixv_init_locked(adapter);
1040 } /* ixv_local_timer */
1042 /************************************************************************
1043 * ixv_update_link_status - Update OS on link state
1045 * Note: Only updates the OS on the cached link state.
1046 * The real check of the hardware only happens with
1048 ************************************************************************/
1050 ixv_update_link_status(struct adapter *adapter)
1052 struct ifnet *ifp = adapter->ifp;
1053 device_t dev = adapter->dev;
1055 if (adapter->link_up) {
1056 if (adapter->link_active == FALSE) {
1058 device_printf(dev,"Link is up %d Gbps %s \n",
1059 ((adapter->link_speed == 128) ? 10 : 1),
1061 adapter->link_active = TRUE;
1062 if_link_state_change(ifp, LINK_STATE_UP);
1064 } else { /* Link down */
1065 if (adapter->link_active == TRUE) {
1067 device_printf(dev,"Link is Down\n");
1068 if_link_state_change(ifp, LINK_STATE_DOWN);
1069 adapter->link_active = FALSE;
1074 } /* ixv_update_link_status */
1077 /************************************************************************
1078 * ixv_stop - Stop the hardware
1080 * Disables all traffic on the adapter by issuing a
1081 * global reset on the MAC and deallocates TX/RX buffers.
1082 ************************************************************************/
1087 struct adapter *adapter = arg;
1088 struct ixgbe_hw *hw = &adapter->hw;
1092 mtx_assert(&adapter->core_mtx, MA_OWNED);
1094 INIT_DEBUGOUT("ixv_stop: begin\n");
1095 ixv_disable_intr(adapter);
1097 /* Tell the stack that the interface is no longer active */
1098 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1100 hw->mac.ops.reset_hw(hw);
1101 adapter->hw.adapter_stopped = FALSE;
1102 hw->mac.ops.stop_adapter(hw);
1103 callout_stop(&adapter->timer);
1105 /* reprogram the RAR[0] in case user changed it. */
1106 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1112 /************************************************************************
1113 * ixv_allocate_pci_resources
1114 ************************************************************************/
1116 ixv_allocate_pci_resources(struct adapter *adapter)
1118 device_t dev = adapter->dev;
1122 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1125 if (!(adapter->pci_mem)) {
1126 device_printf(dev, "Unable to allocate bus resource: memory\n");
1130 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1131 adapter->osdep.mem_bus_space_handle =
1132 rman_get_bushandle(adapter->pci_mem);
1133 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1135 /* Pick up the tuneable queues */
1136 adapter->num_queues = ixv_num_queues;
1139 } /* ixv_allocate_pci_resources */
1141 /************************************************************************
1142 * ixv_free_pci_resources
1143 ************************************************************************/
1145 ixv_free_pci_resources(struct adapter * adapter)
1147 struct ix_queue *que = adapter->queues;
1148 device_t dev = adapter->dev;
1151 memrid = PCIR_BAR(MSIX_82598_BAR);
1154 * There is a slight possibility of a failure mode
1155 * in attach that will result in entering this function
1156 * before interrupt resources have been initialized, and
1157 * in that case we do not want to execute the loops below
1158 * We can detect this reliably by the state of the adapter
1161 if (adapter->res == NULL)
1165 * Release all msix queue resources:
1167 for (int i = 0; i < adapter->num_queues; i++, que++) {
1168 rid = que->msix + 1;
1169 if (que->tag != NULL) {
1170 bus_teardown_intr(dev, que->res, que->tag);
1173 if (que->res != NULL)
1174 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1178 /* Clean the Mailbox interrupt last */
1179 rid = adapter->vector + 1;
1181 if (adapter->tag != NULL) {
1182 bus_teardown_intr(dev, adapter->res, adapter->tag);
1183 adapter->tag = NULL;
1185 if (adapter->res != NULL)
1186 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1189 pci_release_msi(dev);
1191 if (adapter->msix_mem != NULL)
1192 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
1195 if (adapter->pci_mem != NULL)
1196 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1200 } /* ixv_free_pci_resources */
1202 /************************************************************************
1203 * ixv_setup_interface
1205 * Setup networking device structure and register an interface.
1206 ************************************************************************/
1208 ixv_setup_interface(device_t dev, struct adapter *adapter)
1212 INIT_DEBUGOUT("ixv_setup_interface: begin");
1214 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1216 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1217 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1218 ifp->if_baudrate = 1000000000;
1219 ifp->if_init = ixv_init;
1220 ifp->if_softc = adapter;
1221 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1222 ifp->if_ioctl = ixv_ioctl;
1223 if_setgetcounterfn(ifp, ixv_get_counter);
1224 /* TSO parameters */
1225 ifp->if_hw_tsomax = 65518;
1226 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1227 ifp->if_hw_tsomaxsegsize = 2048;
1228 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1229 ifp->if_start = ixgbe_legacy_start;
1230 ixv_start_locked = ixgbe_legacy_start_locked;
1231 ixv_ring_empty = ixgbe_legacy_ring_empty;
1233 ifp->if_transmit = ixgbe_mq_start;
1234 ifp->if_qflush = ixgbe_qflush;
1235 ixv_start_locked = ixgbe_mq_start_locked;
1236 ixv_ring_empty = drbr_empty;
1238 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1240 ether_ifattach(ifp, adapter->hw.mac.addr);
1242 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1245 * Tell the upper layer(s) we support long frames.
1247 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1249 /* Set capability flags */
1250 ifp->if_capabilities |= IFCAP_HWCSUM
1254 | IFCAP_VLAN_HWTAGGING
1260 /* Enable the above capabilities by default */
1261 ifp->if_capenable = ifp->if_capabilities;
1264 * Specify the media types supported by this adapter and register
1265 * callbacks to update media and link information
1267 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1269 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1270 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1273 } /* ixv_setup_interface */
1276 /************************************************************************
1277 * ixv_initialize_transmit_units - Enable transmit unit.
1278 ************************************************************************/
1280 ixv_initialize_transmit_units(struct adapter *adapter)
1282 struct tx_ring *txr = adapter->tx_rings;
1283 struct ixgbe_hw *hw = &adapter->hw;
1286 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1287 u64 tdba = txr->txdma.dma_paddr;
1290 /* Set WTHRESH to 8, burst writeback */
1291 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1292 txdctl |= (8 << 16);
1293 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1295 /* Set the HW Tx Head and Tail indices */
1296 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1297 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1299 /* Set Tx Tail register */
1300 txr->tail = IXGBE_VFTDT(i);
1302 /* Set Ring parameters */
1303 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1304 (tdba & 0x00000000ffffffffULL));
1305 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1306 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1307 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1308 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1309 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1310 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1313 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1314 txdctl |= IXGBE_TXDCTL_ENABLE;
1315 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1319 } /* ixv_initialize_transmit_units */
1322 /************************************************************************
1323 * ixv_initialize_rss_mapping
1324 ************************************************************************/
1326 ixv_initialize_rss_mapping(struct adapter *adapter)
1328 struct ixgbe_hw *hw = &adapter->hw;
1329 u32 reta = 0, mrqc, rss_key[10];
1332 u32 rss_hash_config;
1334 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1335 /* Fetch the configured RSS key */
1336 rss_getkey((uint8_t *)&rss_key);
1338 /* set up random bits */
1339 arc4rand(&rss_key, sizeof(rss_key), 0);
1342 /* Now fill out hash function seeds */
1343 for (i = 0; i < 10; i++)
1344 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1346 /* Set up the redirection table */
1347 for (i = 0, j = 0; i < 64; i++, j++) {
1348 if (j == adapter->num_queues)
1351 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1353 * Fetch the RSS bucket id for the given indirection
1354 * entry. Cap it at the number of configured buckets
1355 * (which is num_queues.)
1357 queue_id = rss_get_indirection_to_bucket(i);
1358 queue_id = queue_id % adapter->num_queues;
1363 * The low 8 bits are for hash value (n+0);
1364 * The next 8 bits are for hash value (n+1), etc.
1367 reta |= ((uint32_t)queue_id) << 24;
1369 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1374 /* Perform hash on these packet types */
1375 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1376 rss_hash_config = rss_gethashconfig();
1379 * Disable UDP - IP fragments aren't currently being handled
1380 * and so we end up with a mix of 2-tuple and 4-tuple
1383 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1384 | RSS_HASHTYPE_RSS_TCP_IPV4
1385 | RSS_HASHTYPE_RSS_IPV6
1386 | RSS_HASHTYPE_RSS_TCP_IPV6;
1389 mrqc = IXGBE_MRQC_RSSEN;
1390 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1391 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1392 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1393 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1394 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1395 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1396 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1397 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1398 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1399 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1401 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1402 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1404 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1405 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1406 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1407 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1408 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1409 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1411 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1412 } /* ixv_initialize_rss_mapping */
1415 /************************************************************************
1416 * ixv_initialize_receive_units - Setup receive registers and features.
1417 ************************************************************************/
1419 ixv_initialize_receive_units(struct adapter *adapter)
1421 struct rx_ring *rxr = adapter->rx_rings;
1422 struct ixgbe_hw *hw = &adapter->hw;
1423 struct ifnet *ifp = adapter->ifp;
1424 u32 bufsz, rxcsum, psrtype;
1426 if (ifp->if_mtu > ETHERMTU)
1427 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1429 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1431 psrtype = IXGBE_PSRTYPE_TCPHDR
1432 | IXGBE_PSRTYPE_UDPHDR
1433 | IXGBE_PSRTYPE_IPV4HDR
1434 | IXGBE_PSRTYPE_IPV6HDR
1435 | IXGBE_PSRTYPE_L2HDR;
1437 if (adapter->num_queues > 1)
1440 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1442 /* Tell PF our max_frame size */
1443 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1444 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1447 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1448 u64 rdba = rxr->rxdma.dma_paddr;
1451 /* Disable the queue */
1452 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1453 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1454 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1455 for (int j = 0; j < 10; j++) {
1456 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1457 IXGBE_RXDCTL_ENABLE)
1463 /* Setup the Base and Length of the Rx Descriptor Ring */
1464 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1465 (rdba & 0x00000000ffffffffULL));
1466 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
1467 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1468 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1470 /* Reset the ring indices */
1471 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1472 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1474 /* Set up the SRRCTL register */
1475 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1476 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1477 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1479 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1480 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1482 /* Capture Rx Tail index */
1483 rxr->tail = IXGBE_VFRDT(rxr->me);
1485 /* Do the queue enabling last */
1486 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1487 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1488 for (int k = 0; k < 10; k++) {
1489 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1490 IXGBE_RXDCTL_ENABLE)
1496 /* Set the Tail Pointer */
1498 * In netmap mode, we must preserve the buffers made
1499 * available to userspace before the if_init()
1500 * (this is true by default on the TX side, because
1501 * init makes all buffers available to userspace).
1503 * netmap_reset() and the device specific routines
1504 * (e.g. ixgbe_setup_receive_rings()) map these
1505 * buffers at the end of the NIC ring, so here we
1506 * must set the RDT (tail) register to make sure
1507 * they are not overwritten.
1509 * In this driver the NIC ring starts at RDH = 0,
1510 * RDT points to the last slot available for reception (?),
1511 * so RDT = num_rx_desc - 1 means the whole ring is available.
1514 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1515 (ifp->if_capenable & IFCAP_NETMAP)) {
1516 struct netmap_adapter *na = NA(adapter->ifp);
1517 struct netmap_kring *kring = &na->rx_rings[i];
1518 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1520 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1522 #endif /* DEV_NETMAP */
1523 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1524 adapter->num_rx_desc - 1);
1527 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1529 ixv_initialize_rss_mapping(adapter);
1531 if (adapter->num_queues > 1) {
1532 /* RSS and RX IPP Checksum are mutually exclusive */
1533 rxcsum |= IXGBE_RXCSUM_PCSD;
1536 if (ifp->if_capenable & IFCAP_RXCSUM)
1537 rxcsum |= IXGBE_RXCSUM_PCSD;
1539 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1540 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1542 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1545 } /* ixv_initialize_receive_units */
1547 /************************************************************************
1548 * ixv_setup_vlan_support
1549 ************************************************************************/
1551 ixv_setup_vlan_support(struct adapter *adapter)
1553 struct ixgbe_hw *hw = &adapter->hw;
1554 u32 ctrl, vid, vfta, retry;
1557 * We get here thru init_locked, meaning
1558 * a soft reset, this has already cleared
1559 * the VFTA and other state, so if there
1560 * have been no vlan's registered do nothing.
1562 if (adapter->num_vlans == 0)
1565 /* Enable the queues */
1566 for (int i = 0; i < adapter->num_queues; i++) {
1567 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1568 ctrl |= IXGBE_RXDCTL_VME;
1569 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1571 * Let Rx path know that it needs to store VLAN tag
1572 * as part of extra mbuf info.
1574 adapter->rx_rings[i].vtag_strip = TRUE;
1578 * A soft reset zero's out the VFTA, so
1579 * we need to repopulate it now.
1581 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1582 if (ixv_shadow_vfta[i] == 0)
1584 vfta = ixv_shadow_vfta[i];
1586 * Reconstruct the vlan id's
1587 * based on the bits set in each
1588 * of the array ints.
1590 for (int j = 0; j < 32; j++) {
1592 if ((vfta & (1 << j)) == 0)
1595 /* Call the shared code mailbox routine */
1596 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1602 } /* ixv_setup_vlan_support */
1604 /************************************************************************
1607 * Run via a vlan config EVENT, it enables us to use the
1608 * HW Filter table since we can get the vlan id. This just
1609 * creates the entry in the soft version of the VFTA, init
1610 * will repopulate the real table.
1611 ************************************************************************/
1613 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1615 struct adapter *adapter = ifp->if_softc;
1618 if (ifp->if_softc != arg) /* Not our event */
1621 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1624 IXGBE_CORE_LOCK(adapter);
1625 index = (vtag >> 5) & 0x7F;
1627 ixv_shadow_vfta[index] |= (1 << bit);
1628 ++adapter->num_vlans;
1629 /* Re-init to load the changes */
1630 ixv_init_locked(adapter);
1631 IXGBE_CORE_UNLOCK(adapter);
1632 } /* ixv_register_vlan */
1634 /************************************************************************
1635 * ixv_unregister_vlan
1637 * Run via a vlan unconfig EVENT, remove our entry
1639 ************************************************************************/
1641 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1643 struct adapter *adapter = ifp->if_softc;
1646 if (ifp->if_softc != arg)
1649 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1652 IXGBE_CORE_LOCK(adapter);
1653 index = (vtag >> 5) & 0x7F;
1655 ixv_shadow_vfta[index] &= ~(1 << bit);
1656 --adapter->num_vlans;
1657 /* Re-init to load the changes */
1658 ixv_init_locked(adapter);
1659 IXGBE_CORE_UNLOCK(adapter);
1660 } /* ixv_unregister_vlan */
1662 /************************************************************************
1664 ************************************************************************/
1666 ixv_enable_intr(struct adapter *adapter)
1668 struct ixgbe_hw *hw = &adapter->hw;
1669 struct ix_queue *que = adapter->queues;
1670 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1673 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1675 mask = IXGBE_EIMS_ENABLE_MASK;
1676 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1677 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1679 for (int i = 0; i < adapter->num_queues; i++, que++)
1680 ixv_enable_queue(adapter, que->msix);
1682 IXGBE_WRITE_FLUSH(hw);
1685 } /* ixv_enable_intr */
1687 /************************************************************************
1689 ************************************************************************/
1691 ixv_disable_intr(struct adapter *adapter)
1693 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1694 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1695 IXGBE_WRITE_FLUSH(&adapter->hw);
1698 } /* ixv_disable_intr */
1700 /************************************************************************
1703 * Setup the correct IVAR register for a particular MSI-X interrupt
1704 * - entry is the register array entry
1705 * - vector is the MSI-X vector for this queue
1706 * - type is RX/TX/MISC
1707 ************************************************************************/
1709 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1711 struct ixgbe_hw *hw = &adapter->hw;
1714 vector |= IXGBE_IVAR_ALLOC_VAL;
1716 if (type == -1) { /* MISC IVAR */
1717 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1720 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1721 } else { /* RX/TX IVARS */
1722 index = (16 * (entry & 1)) + (8 * type);
1723 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1724 ivar &= ~(0xFF << index);
1725 ivar |= (vector << index);
1726 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1728 } /* ixv_set_ivar */
1730 /************************************************************************
1731 * ixv_configure_ivars
1732 ************************************************************************/
1734 ixv_configure_ivars(struct adapter *adapter)
1736 struct ix_queue *que = adapter->queues;
1738 for (int i = 0; i < adapter->num_queues; i++, que++) {
1739 /* First the RX queue entry */
1740 ixv_set_ivar(adapter, i, que->msix, 0);
1741 /* ... and the TX */
1742 ixv_set_ivar(adapter, i, que->msix, 1);
1743 /* Set an initial value in EITR */
1744 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1745 IXGBE_EITR_DEFAULT);
1748 /* For the mailbox interrupt */
1749 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1750 } /* ixv_configure_ivars */
1753 /************************************************************************
1755 ************************************************************************/
1757 ixv_get_counter(struct ifnet *ifp, ift_counter cnt)
1759 struct adapter *adapter;
1761 adapter = if_getsoftc(ifp);
1764 case IFCOUNTER_IPACKETS:
1765 return (adapter->ipackets);
1766 case IFCOUNTER_OPACKETS:
1767 return (adapter->opackets);
1768 case IFCOUNTER_IBYTES:
1769 return (adapter->ibytes);
1770 case IFCOUNTER_OBYTES:
1771 return (adapter->obytes);
1772 case IFCOUNTER_IMCASTS:
1773 return (adapter->imcasts);
1775 return (if_get_counter_default(ifp, cnt));
1777 } /* ixv_get_counter */
1779 /************************************************************************
1782 * The VF stats registers never have a truly virgin
1783 * starting point, so this routine tries to make an
1784 * artificial one, marking ground zero on attach as
1786 ************************************************************************/
1788 ixv_save_stats(struct adapter *adapter)
1790 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1791 adapter->stats.vf.saved_reset_vfgprc +=
1792 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1793 adapter->stats.vf.saved_reset_vfgptc +=
1794 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1795 adapter->stats.vf.saved_reset_vfgorc +=
1796 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1797 adapter->stats.vf.saved_reset_vfgotc +=
1798 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1799 adapter->stats.vf.saved_reset_vfmprc +=
1800 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1802 } /* ixv_save_stats */
1804 /************************************************************************
1806 ************************************************************************/
1808 ixv_init_stats(struct adapter *adapter)
1810 struct ixgbe_hw *hw = &adapter->hw;
1812 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1813 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1814 adapter->stats.vf.last_vfgorc |=
1815 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1817 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1818 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1819 adapter->stats.vf.last_vfgotc |=
1820 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1822 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1824 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1825 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1826 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1827 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1828 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1829 } /* ixv_init_stats */
1831 #define UPDATE_STAT_32(reg, last, count) \
1833 u32 current = IXGBE_READ_REG(hw, reg); \
1834 if (current < last) \
1835 count += 0x100000000LL; \
1837 count &= 0xFFFFFFFF00000000LL; \
1841 #define UPDATE_STAT_36(lsb, msb, last, count) \
1843 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1844 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1845 u64 current = ((cur_msb << 32) | cur_lsb); \
1846 if (current < last) \
1847 count += 0x1000000000LL; \
1849 count &= 0xFFFFFFF000000000LL; \
1853 /************************************************************************
1854 * ixv_update_stats - Update the board statistics counters.
1855 ************************************************************************/
1857 ixv_update_stats(struct adapter *adapter)
1859 struct ixgbe_hw *hw = &adapter->hw;
1860 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1862 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1863 adapter->stats.vf.vfgprc);
1864 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1865 adapter->stats.vf.vfgptc);
1866 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1867 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1868 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1869 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1870 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1871 adapter->stats.vf.vfmprc);
1873 /* Fill out the OS statistics structure */
1874 IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1875 IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1876 IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1877 IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1878 IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1879 } /* ixv_update_stats */
1881 /************************************************************************
1882 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1883 ************************************************************************/
1885 ixv_add_stats_sysctls(struct adapter *adapter)
1887 device_t dev = adapter->dev;
1888 struct tx_ring *txr = adapter->tx_rings;
1889 struct rx_ring *rxr = adapter->rx_rings;
1890 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1891 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1892 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1893 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1894 struct sysctl_oid *stat_node, *queue_node;
1895 struct sysctl_oid_list *stat_list, *queue_list;
1897 #define QUEUE_NAME_LEN 32
1898 char namebuf[QUEUE_NAME_LEN];
1900 /* Driver Statistics */
1901 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1902 CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1903 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1904 CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1905 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1906 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1907 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1908 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1910 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1911 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1912 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1913 CTLFLAG_RD, NULL, "Queue Name");
1914 queue_list = SYSCTL_CHILDREN(queue_node);
1916 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1917 CTLFLAG_RD, &(adapter->queues[i].irqs), "IRQs on queue");
1918 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1919 CTLFLAG_RD, &(txr->no_tx_dma_setup),
1920 "Driver Tx DMA failure in Tx");
1921 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
1922 CTLFLAG_RD, &(txr->no_desc_avail),
1923 "Not-enough-descriptors count: TX");
1924 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1925 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1926 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1927 CTLFLAG_RD, &(txr->br->br_drops),
1928 "Packets dropped in buf_ring");
1931 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1932 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1933 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1934 CTLFLAG_RD, NULL, "Queue Name");
1935 queue_list = SYSCTL_CHILDREN(queue_node);
1937 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1938 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1939 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1940 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1941 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1942 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1945 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1946 CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1947 stat_list = SYSCTL_CHILDREN(stat_node);
1949 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1950 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1951 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1952 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1953 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1954 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1955 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1956 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1957 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1958 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1959 } /* ixv_add_stats_sysctls */
1961 /************************************************************************
1962 * ixv_set_sysctl_value
1963 ************************************************************************/
1965 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
1966 const char *description, int *limit, int value)
1969 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
1970 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
1971 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
1972 } /* ixv_set_sysctl_value */
1974 /************************************************************************
1975 * ixv_print_debug_info
1977 * Called only when em_display_debug_stats is enabled.
1978 * Provides a way to take a look at important statistics
1979 * maintained by the driver and hardware.
1980 ************************************************************************/
1982 ixv_print_debug_info(struct adapter *adapter)
1984 device_t dev = adapter->dev;
1985 struct ixgbe_hw *hw = &adapter->hw;
1986 struct ix_queue *que = adapter->queues;
1987 struct rx_ring *rxr;
1988 struct tx_ring *txr;
1989 struct lro_ctrl *lro;
1991 device_printf(dev, "Error Byte Count = %u \n",
1992 IXGBE_READ_REG(hw, IXGBE_ERRBC));
1994 for (int i = 0; i < adapter->num_queues; i++, que++) {
1998 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
1999 que->msix, (long)que->irqs);
2000 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2001 rxr->me, (long long)rxr->rx_packets);
2002 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2003 rxr->me, (long)rxr->rx_bytes);
2004 device_printf(dev, "RX(%d) LRO Queued= %lld\n",
2005 rxr->me, (long long)lro->lro_queued);
2006 device_printf(dev, "RX(%d) LRO Flushed= %lld\n",
2007 rxr->me, (long long)lro->lro_flushed);
2008 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2009 txr->me, (long)txr->total_packets);
2010 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2011 txr->me, (long)txr->no_desc_avail);
2014 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
2015 } /* ixv_print_debug_info */
2017 /************************************************************************
2019 ************************************************************************/
2021 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2023 struct adapter *adapter;
2027 error = sysctl_handle_int(oidp, &result, 0, req);
2029 if (error || !req->newptr)
2033 adapter = (struct adapter *)arg1;
2034 ixv_print_debug_info(adapter);
2038 } /* ixv_sysctl_debug */
2040 /************************************************************************
2041 * ixv_init_device_features
2042 ************************************************************************/
2044 ixv_init_device_features(struct adapter *adapter)
2046 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2049 | IXGBE_FEATURE_LEGACY_TX;
2051 /* A tad short on feature flags for VFs, atm. */
2052 switch (adapter->hw.mac.type) {
2053 case ixgbe_mac_82599_vf:
2055 case ixgbe_mac_X540_vf:
2057 case ixgbe_mac_X550_vf:
2058 case ixgbe_mac_X550EM_x_vf:
2059 case ixgbe_mac_X550EM_a_vf:
2060 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2066 /* Enabled by default... */
2067 /* Is a virtual function (VF) */
2068 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2069 adapter->feat_en |= IXGBE_FEATURE_VF;
2071 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2072 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2073 /* Receive-Side Scaling (RSS) */
2074 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2075 adapter->feat_en |= IXGBE_FEATURE_RSS;
2076 /* Needs advanced context descriptor regardless of offloads req'd */
2077 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2078 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2080 /* Enabled via sysctl... */
2081 /* Legacy (single queue) transmit */
2082 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2083 ixv_enable_legacy_tx)
2084 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2085 } /* ixv_init_device_features */
2087 /************************************************************************
2088 * ixv_shutdown - Shutdown entry point
2089 ************************************************************************/
2091 ixv_shutdown(device_t dev)
2093 struct adapter *adapter = device_get_softc(dev);
2094 IXGBE_CORE_LOCK(adapter);
2096 IXGBE_CORE_UNLOCK(adapter);
2099 } /* ixv_shutdown */
2102 /************************************************************************
2103 * ixv_ioctl - Ioctl entry point
2105 * Called when the user wants to configure the interface.
2107 * return 0 on success, positive on failure
2108 ************************************************************************/
2110 ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2112 struct adapter *adapter = ifp->if_softc;
2113 struct ifreq *ifr = (struct ifreq *)data;
2114 #if defined(INET) || defined(INET6)
2115 struct ifaddr *ifa = (struct ifaddr *)data;
2116 bool avoid_reset = FALSE;
2124 if (ifa->ifa_addr->sa_family == AF_INET)
2128 if (ifa->ifa_addr->sa_family == AF_INET6)
2131 #if defined(INET) || defined(INET6)
2133 * Calling init results in link renegotiation,
2134 * so we avoid doing it when possible.
2137 ifp->if_flags |= IFF_UP;
2138 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2140 if (!(ifp->if_flags & IFF_NOARP))
2141 arp_ifinit(ifp, ifa);
2143 error = ether_ioctl(ifp, command, data);
2147 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2148 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
2151 IXGBE_CORE_LOCK(adapter);
2152 ifp->if_mtu = ifr->ifr_mtu;
2153 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
2154 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2155 ixv_init_locked(adapter);
2156 IXGBE_CORE_UNLOCK(adapter);
2160 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2161 IXGBE_CORE_LOCK(adapter);
2162 if (ifp->if_flags & IFF_UP) {
2163 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2164 ixv_init_locked(adapter);
2166 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2168 adapter->if_flags = ifp->if_flags;
2169 IXGBE_CORE_UNLOCK(adapter);
2173 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2174 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2175 IXGBE_CORE_LOCK(adapter);
2176 ixv_disable_intr(adapter);
2177 ixv_set_multi(adapter);
2178 ixv_enable_intr(adapter);
2179 IXGBE_CORE_UNLOCK(adapter);
2184 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2185 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2189 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2190 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2191 if (mask & IFCAP_HWCSUM)
2192 ifp->if_capenable ^= IFCAP_HWCSUM;
2193 if (mask & IFCAP_TSO4)
2194 ifp->if_capenable ^= IFCAP_TSO4;
2195 if (mask & IFCAP_LRO)
2196 ifp->if_capenable ^= IFCAP_LRO;
2197 if (mask & IFCAP_VLAN_HWTAGGING)
2198 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2199 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2200 IXGBE_CORE_LOCK(adapter);
2201 ixv_init_locked(adapter);
2202 IXGBE_CORE_UNLOCK(adapter);
2204 VLAN_CAPABILITIES(ifp);
2209 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
2210 error = ether_ioctl(ifp, command, data);
2217 /************************************************************************
2219 ************************************************************************/
2223 struct adapter *adapter = arg;
2225 IXGBE_CORE_LOCK(adapter);
2226 ixv_init_locked(adapter);
2227 IXGBE_CORE_UNLOCK(adapter);
2233 /************************************************************************
2235 ************************************************************************/
2237 ixv_handle_que(void *context, int pending)
2239 struct ix_queue *que = context;
2240 struct adapter *adapter = que->adapter;
2241 struct tx_ring *txr = que->txr;
2242 struct ifnet *ifp = adapter->ifp;
2245 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2246 more = ixgbe_rxeof(que);
2249 if (!ixv_ring_empty(ifp, txr->br))
2250 ixv_start_locked(ifp, txr);
2251 IXGBE_TX_UNLOCK(txr);
2253 taskqueue_enqueue(que->tq, &que->que_task);
2258 /* Re-enable this interrupt */
2259 ixv_enable_queue(adapter, que->msix);
2262 } /* ixv_handle_que */
2264 /************************************************************************
2265 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2266 ************************************************************************/
2268 ixv_allocate_msix(struct adapter *adapter)
2270 device_t dev = adapter->dev;
2271 struct ix_queue *que = adapter->queues;
2272 struct tx_ring *txr = adapter->tx_rings;
2273 int error, msix_ctrl, rid, vector = 0;
2275 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2277 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2278 RF_SHAREABLE | RF_ACTIVE);
2279 if (que->res == NULL) {
2280 device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
2284 /* Set the handler function */
2285 error = bus_setup_intr(dev, que->res,
2286 INTR_TYPE_NET | INTR_MPSAFE, NULL,
2287 ixv_msix_que, que, &que->tag);
2290 device_printf(dev, "Failed to register QUE handler");
2293 #if __FreeBSD_version >= 800504
2294 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2297 adapter->active_queues |= (u64)(1 << que->msix);
2299 * Bind the MSI-X vector, and thus the
2300 * ring to the corresponding CPU.
2302 if (adapter->num_queues > 1)
2303 bus_bind_intr(dev, que->res, i);
2304 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2305 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
2306 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
2307 taskqueue_thread_enqueue, &que->tq);
2308 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2309 device_get_nameunit(adapter->dev));
2314 adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2315 RF_SHAREABLE | RF_ACTIVE);
2316 if (!adapter->res) {
2318 "Unable to allocate bus resource: MBX interrupt [%d]\n",
2322 /* Set the mbx handler function */
2323 error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
2324 NULL, ixv_msix_mbx, adapter, &adapter->tag);
2326 adapter->res = NULL;
2327 device_printf(dev, "Failed to register LINK handler");
2330 #if __FreeBSD_version >= 800504
2331 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
2333 adapter->vector = vector;
2334 /* Tasklets for Mailbox */
2335 TASK_INIT(&adapter->link_task, 0, ixv_handle_link, adapter);
2336 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
2337 taskqueue_thread_enqueue, &adapter->tq);
2338 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
2339 device_get_nameunit(adapter->dev));
2341 * Due to a broken design QEMU will fail to properly
2342 * enable the guest for MSI-X unless the vectors in
2343 * the table are all set up, so we must rewrite the
2344 * ENABLE in the MSI-X control register again at this
2345 * point to cause it to successfully initialize us.
2347 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
2348 pci_find_cap(dev, PCIY_MSIX, &rid);
2349 rid += PCIR_MSIX_CTRL;
2350 msix_ctrl = pci_read_config(dev, rid, 2);
2351 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2352 pci_write_config(dev, rid, msix_ctrl, 2);
2356 } /* ixv_allocate_msix */
2358 /************************************************************************
2359 * ixv_configure_interrupts - Setup MSI-X resources
2361 * Note: The VF device MUST use MSI-X, there is no fallback.
2362 ************************************************************************/
2364 ixv_configure_interrupts(struct adapter *adapter)
2366 device_t dev = adapter->dev;
2367 int rid, want, msgs;
2369 /* Must have at least 2 MSI-X vectors */
2370 msgs = pci_msix_count(dev);
2374 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2376 if (adapter->msix_mem == NULL) {
2377 device_printf(adapter->dev, "Unable to map MSI-X table \n");
2382 * Want vectors for the queues,
2383 * plus an additional for mailbox.
2385 want = adapter->num_queues + 1;
2388 adapter->num_queues = msgs - 1;
2391 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2392 device_printf(adapter->dev,
2393 "Using MSI-X interrupts with %d vectors\n", want);
2394 /* reflect correct sysctl value */
2395 ixv_num_queues = adapter->num_queues;
2399 /* Release in case alloc was insufficient */
2400 pci_release_msi(dev);
2402 if (adapter->msix_mem != NULL) {
2403 bus_release_resource(dev, SYS_RES_MEMORY, rid,
2405 adapter->msix_mem = NULL;
2407 device_printf(adapter->dev, "MSI-X config error\n");
2410 } /* ixv_configure_interrupts */
2413 /************************************************************************
2414 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
2416 * Done outside of interrupt context since the driver might sleep
2417 ************************************************************************/
2419 ixv_handle_link(void *context, int pending)
2421 struct adapter *adapter = context;
2423 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2424 &adapter->link_up, FALSE);
2425 ixv_update_link_status(adapter);
2426 } /* ixv_handle_link */
2428 /************************************************************************
2429 * ixv_check_link - Used in the local timer to poll for link changes
2430 ************************************************************************/
2432 ixv_check_link(struct adapter *adapter)
2434 adapter->hw.mac.get_link_status = TRUE;
2436 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
2437 &adapter->link_up, FALSE);
2438 ixv_update_link_status(adapter);
2439 } /* ixv_check_link */