1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
41 /*********************************************************************
43 *********************************************************************/
44 char ixv_driver_version[] = "1.0.0";
46 /*********************************************************************
49 * Used by probe to select devices to load on
50 * Last field stores an index into ixv_strings
51 * Last entry must be all 0s
53 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
54 *********************************************************************/
56 static ixv_vendor_info_t ixv_vendor_info_array[] =
58 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
59 /* required last entry */
63 /*********************************************************************
64 * Table of branding strings
65 *********************************************************************/
67 static char *ixv_strings[] = {
68 "Intel(R) PRO/10GbE Virtual Function Network Driver"
71 /*********************************************************************
73 *********************************************************************/
74 static int ixv_probe(device_t);
75 static int ixv_attach(device_t);
76 static int ixv_detach(device_t);
77 static int ixv_shutdown(device_t);
78 #if __FreeBSD_version < 800000
79 static void ixv_start(struct ifnet *);
80 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
82 static int ixv_mq_start(struct ifnet *, struct mbuf *);
83 static int ixv_mq_start_locked(struct ifnet *,
84 struct tx_ring *, struct mbuf *);
85 static void ixv_qflush(struct ifnet *);
87 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
88 static void ixv_init(void *);
89 static void ixv_init_locked(struct adapter *);
90 static void ixv_stop(void *);
91 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
92 static int ixv_media_change(struct ifnet *);
93 static void ixv_identify_hardware(struct adapter *);
94 static int ixv_allocate_pci_resources(struct adapter *);
95 static int ixv_allocate_msix(struct adapter *);
96 static int ixv_allocate_queues(struct adapter *);
97 static int ixv_setup_msix(struct adapter *);
98 static void ixv_free_pci_resources(struct adapter *);
99 static void ixv_local_timer(void *);
100 static void ixv_setup_interface(device_t, struct adapter *);
101 static void ixv_config_link(struct adapter *);
103 static int ixv_allocate_transmit_buffers(struct tx_ring *);
104 static int ixv_setup_transmit_structures(struct adapter *);
105 static void ixv_setup_transmit_ring(struct tx_ring *);
106 static void ixv_initialize_transmit_units(struct adapter *);
107 static void ixv_free_transmit_structures(struct adapter *);
108 static void ixv_free_transmit_buffers(struct tx_ring *);
110 static int ixv_allocate_receive_buffers(struct rx_ring *);
111 static int ixv_setup_receive_structures(struct adapter *);
112 static int ixv_setup_receive_ring(struct rx_ring *);
113 static void ixv_initialize_receive_units(struct adapter *);
114 static void ixv_free_receive_structures(struct adapter *);
115 static void ixv_free_receive_buffers(struct rx_ring *);
117 static void ixv_enable_intr(struct adapter *);
118 static void ixv_disable_intr(struct adapter *);
119 static bool ixv_txeof(struct tx_ring *);
120 static bool ixv_rxeof(struct ix_queue *, int);
121 static void ixv_rx_checksum(u32, struct mbuf *, u32);
122 static void ixv_set_multi(struct adapter *);
123 static void ixv_update_link_status(struct adapter *);
124 static void ixv_refresh_mbufs(struct rx_ring *, int);
125 static int ixv_xmit(struct tx_ring *, struct mbuf **);
126 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
127 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
128 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
129 static int ixv_dma_malloc(struct adapter *, bus_size_t,
130 struct ixv_dma_alloc *, int);
131 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
132 static void ixv_add_rx_process_limit(struct adapter *, const char *,
133 const char *, int *, int);
134 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
135 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
136 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
137 static void ixv_configure_ivars(struct adapter *);
138 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
140 static void ixv_setup_vlan_support(struct adapter *);
141 static void ixv_register_vlan(void *, struct ifnet *, u16);
142 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
144 static void ixv_save_stats(struct adapter *);
145 static void ixv_init_stats(struct adapter *);
146 static void ixv_update_stats(struct adapter *);
148 static __inline void ixv_rx_discard(struct rx_ring *, int);
149 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
152 /* The MSI/X Interrupt handlers */
153 static void ixv_msix_que(void *);
154 static void ixv_msix_mbx(void *);
156 /* Deferred interrupt tasklets */
157 static void ixv_handle_que(void *, int);
158 static void ixv_handle_mbx(void *, int);
160 /*********************************************************************
161 * FreeBSD Device Interface Entry Points
162 *********************************************************************/
164 static device_method_t ixv_methods[] = {
165 /* Device interface */
166 DEVMETHOD(device_probe, ixv_probe),
167 DEVMETHOD(device_attach, ixv_attach),
168 DEVMETHOD(device_detach, ixv_detach),
169 DEVMETHOD(device_shutdown, ixv_shutdown),
173 static driver_t ixv_driver = {
174 "ix", ixv_methods, sizeof(struct adapter),
177 extern devclass_t ixgbe_devclass;
178 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
179 MODULE_DEPEND(ixv, pci, 1, 1, 1);
180 MODULE_DEPEND(ixv, ether, 1, 1, 1);
183 ** TUNEABLE PARAMETERS:
187 ** AIM: Adaptive Interrupt Moderation
188 ** which means that the interrupt rate
189 ** is varied over time based on the
190 ** traffic for that interrupt vector
192 static int ixv_enable_aim = FALSE;
193 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
195 /* How many packets rxeof tries to clean at a time */
196 static int ixv_rx_process_limit = 128;
197 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
199 /* Flow control setting, default to full */
200 static int ixv_flow_control = ixgbe_fc_full;
201 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
204 * Header split: this causes the hardware to DMA
205 * the header into a seperate mbuf from the payload,
206 * it can be a performance win in some workloads, but
207 * in others it actually hurts, its off by default.
209 static bool ixv_header_split = FALSE;
210 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
213 ** Number of TX descriptors per ring,
214 ** setting higher than RX as this seems
215 ** the better performing choice.
217 static int ixv_txd = DEFAULT_TXD;
218 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
220 /* Number of RX descriptors per ring */
221 static int ixv_rxd = DEFAULT_RXD;
222 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
225 ** Shadow VFTA table, this is needed because
226 ** the real filter table gets cleared during
227 ** a soft reset and we need to repopulate it.
229 static u32 ixv_shadow_vfta[VFTA_SIZE];
231 /*********************************************************************
232 * Device identification routine
234 * ixv_probe determines if the driver should be loaded on
235 * adapter based on PCI vendor/device id of the adapter.
237 * return 0 on success, positive on failure
238 *********************************************************************/
241 ixv_probe(device_t dev)
243 ixv_vendor_info_t *ent;
245 u16 pci_vendor_id = 0;
246 u16 pci_device_id = 0;
247 u16 pci_subvendor_id = 0;
248 u16 pci_subdevice_id = 0;
249 char adapter_name[256];
252 pci_vendor_id = pci_get_vendor(dev);
253 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
256 pci_device_id = pci_get_device(dev);
257 pci_subvendor_id = pci_get_subvendor(dev);
258 pci_subdevice_id = pci_get_subdevice(dev);
260 ent = ixv_vendor_info_array;
261 while (ent->vendor_id != 0) {
262 if ((pci_vendor_id == ent->vendor_id) &&
263 (pci_device_id == ent->device_id) &&
265 ((pci_subvendor_id == ent->subvendor_id) ||
266 (ent->subvendor_id == 0)) &&
268 ((pci_subdevice_id == ent->subdevice_id) ||
269 (ent->subdevice_id == 0))) {
270 sprintf(adapter_name, "%s, Version - %s",
271 ixv_strings[ent->index],
273 device_set_desc_copy(dev, adapter_name);
281 /*********************************************************************
282 * Device initialization routine
284 * The attach entry point is called when the driver is being loaded.
285 * This routine identifies the type of hardware, allocates all resources
286 * and initializes the hardware.
288 * return 0 on success, positive on failure
289 *********************************************************************/
292 ixv_attach(device_t dev)
294 struct adapter *adapter;
298 INIT_DEBUGOUT("ixv_attach: begin");
300 /* Allocate, clear, and link in our adapter structure */
301 adapter = device_get_softc(dev);
302 adapter->dev = adapter->osdep.dev = dev;
306 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
309 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
310 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
311 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
312 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
314 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
315 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
316 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
317 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
319 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
320 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
321 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
322 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
324 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
325 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
326 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
327 &ixv_enable_aim, 1, "Interrupt Moderation");
329 /* Set up the timer callout */
330 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
332 /* Determine hardware revision */
333 ixv_identify_hardware(adapter);
335 /* Do base PCI setup - map BAR0 */
336 if (ixv_allocate_pci_resources(adapter)) {
337 device_printf(dev, "Allocation of PCI resources failed\n");
342 /* Do descriptor calc and sanity checks */
343 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
344 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
345 device_printf(dev, "TXD config issue, using default!\n");
346 adapter->num_tx_desc = DEFAULT_TXD;
348 adapter->num_tx_desc = ixv_txd;
350 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
351 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
352 device_printf(dev, "RXD config issue, using default!\n");
353 adapter->num_rx_desc = DEFAULT_RXD;
355 adapter->num_rx_desc = ixv_rxd;
357 /* Allocate our TX/RX Queues */
358 if (ixv_allocate_queues(adapter)) {
364 ** Initialize the shared code: its
365 ** at this point the mac type is set.
367 error = ixgbe_init_shared_code(hw);
369 device_printf(dev,"Shared Code Initialization Failure\n");
374 /* Setup the mailbox */
375 ixgbe_init_mbx_params_vf(hw);
379 /* Get Hardware Flow Control setting */
380 hw->fc.requested_mode = ixgbe_fc_full;
381 hw->fc.pause_time = IXV_FC_PAUSE;
382 hw->fc.low_water = IXV_FC_LO;
383 hw->fc.high_water = IXV_FC_HI;
384 hw->fc.send_xon = TRUE;
386 error = ixgbe_init_hw(hw);
388 device_printf(dev,"Hardware Initialization Failure\n");
393 error = ixv_allocate_msix(adapter);
397 /* Setup OS specific network interface */
398 ixv_setup_interface(dev, adapter);
400 /* Sysctl for limiting the amount of work done in the taskqueue */
401 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
402 "max number of rx packets to process", &adapter->rx_process_limit,
403 ixv_rx_process_limit);
405 /* Do the stats setup */
406 ixv_save_stats(adapter);
407 ixv_init_stats(adapter);
409 /* Register for VLAN events */
410 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
411 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
412 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
413 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
415 INIT_DEBUGOUT("ixv_attach: end");
419 ixv_free_transmit_structures(adapter);
420 ixv_free_receive_structures(adapter);
422 ixv_free_pci_resources(adapter);
427 /*********************************************************************
428 * Device removal routine
430 * The detach entry point is called when the driver is being removed.
431 * This routine stops the adapter and deallocates all the resources
432 * that were allocated for driver operation.
434 * return 0 on success, positive on failure
435 *********************************************************************/
438 ixv_detach(device_t dev)
440 struct adapter *adapter = device_get_softc(dev);
441 struct ix_queue *que = adapter->queues;
443 INIT_DEBUGOUT("ixv_detach: begin");
445 /* Make sure VLANS are not using driver */
446 if (adapter->ifp->if_vlantrunk != NULL) {
447 device_printf(dev,"Vlan in use, detach first\n");
451 IXV_CORE_LOCK(adapter);
453 IXV_CORE_UNLOCK(adapter);
455 for (int i = 0; i < adapter->num_queues; i++, que++) {
457 taskqueue_drain(que->tq, &que->que_task);
458 taskqueue_free(que->tq);
462 /* Drain the Link queue */
464 taskqueue_drain(adapter->tq, &adapter->mbx_task);
465 taskqueue_free(adapter->tq);
468 /* Unregister VLAN events */
469 if (adapter->vlan_attach != NULL)
470 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
471 if (adapter->vlan_detach != NULL)
472 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
474 ether_ifdetach(adapter->ifp);
475 callout_drain(&adapter->timer);
476 ixv_free_pci_resources(adapter);
477 bus_generic_detach(dev);
478 if_free(adapter->ifp);
480 ixv_free_transmit_structures(adapter);
481 ixv_free_receive_structures(adapter);
483 IXV_CORE_LOCK_DESTROY(adapter);
487 /*********************************************************************
489 * Shutdown entry point
491 **********************************************************************/
493 ixv_shutdown(device_t dev)
495 struct adapter *adapter = device_get_softc(dev);
496 IXV_CORE_LOCK(adapter);
498 IXV_CORE_UNLOCK(adapter);
502 #if __FreeBSD_version < 800000
503 /*********************************************************************
504 * Transmit entry point
506 * ixv_start is called by the stack to initiate a transmit.
507 * The driver will remain in this routine as long as there are
508 * packets to transmit and transmit resources are available.
509 * In case resources are not available stack is notified and
510 * the packet is requeued.
511 **********************************************************************/
513 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
516 struct adapter *adapter = txr->adapter;
518 IXV_TX_LOCK_ASSERT(txr);
520 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
523 if (!adapter->link_active)
526 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
528 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
532 if (ixv_xmit(txr, &m_head)) {
535 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
536 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
539 /* Send a copy of the frame to the BPF listener */
540 ETHER_BPF_MTAP(ifp, m_head);
542 /* Set watchdog on */
543 txr->watchdog_check = TRUE;
544 txr->watchdog_time = ticks;
551 * Legacy TX start - called by the stack, this
552 * always uses the first tx ring, and should
553 * not be used with multiqueue tx enabled.
556 ixv_start(struct ifnet *ifp)
558 struct adapter *adapter = ifp->if_softc;
559 struct tx_ring *txr = adapter->tx_rings;
561 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
563 ixv_start_locked(txr, ifp);
572 ** Multiqueue Transmit driver
576 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
578 struct adapter *adapter = ifp->if_softc;
579 struct ix_queue *que;
583 /* Which queue to use */
584 if ((m->m_flags & M_FLOWID) != 0)
585 i = m->m_pkthdr.flowid % adapter->num_queues;
587 txr = &adapter->tx_rings[i];
588 que = &adapter->queues[i];
590 if (IXV_TX_TRYLOCK(txr)) {
591 err = ixv_mq_start_locked(ifp, txr, m);
594 err = drbr_enqueue(ifp, txr->br, m);
595 taskqueue_enqueue(que->tq, &que->que_task);
602 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
604 struct adapter *adapter = txr->adapter;
606 int enqueued, err = 0;
608 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
609 IFF_DRV_RUNNING || adapter->link_active == 0) {
611 err = drbr_enqueue(ifp, txr->br, m);
615 /* Do a clean if descriptors are low */
616 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
621 next = drbr_dequeue(ifp, txr->br);
622 } else if (drbr_needs_enqueue(ifp, txr->br)) {
623 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
625 next = drbr_dequeue(ifp, txr->br);
629 /* Process the queue */
630 while (next != NULL) {
631 if ((err = ixv_xmit(txr, &next)) != 0) {
633 err = drbr_enqueue(ifp, txr->br, next);
637 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
638 /* Send a copy of the frame to the BPF listener */
639 ETHER_BPF_MTAP(ifp, next);
640 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
642 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
643 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
646 next = drbr_dequeue(ifp, txr->br);
650 /* Set watchdog on */
651 txr->watchdog_check = TRUE;
652 txr->watchdog_time = ticks;
659 ** Flush all ring buffers
662 ixv_qflush(struct ifnet *ifp)
664 struct adapter *adapter = ifp->if_softc;
665 struct tx_ring *txr = adapter->tx_rings;
668 for (int i = 0; i < adapter->num_queues; i++, txr++) {
670 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
679 /*********************************************************************
682 * ixv_ioctl is called when the user wants to configure the
685 * return 0 on success, positive on failure
686 **********************************************************************/
689 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
691 struct adapter *adapter = ifp->if_softc;
692 struct ifreq *ifr = (struct ifreq *) data;
698 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
699 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
702 IXV_CORE_LOCK(adapter);
703 ifp->if_mtu = ifr->ifr_mtu;
704 adapter->max_frame_size =
705 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
706 ixv_init_locked(adapter);
707 IXV_CORE_UNLOCK(adapter);
711 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
712 IXV_CORE_LOCK(adapter);
713 if (ifp->if_flags & IFF_UP) {
714 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
715 ixv_init_locked(adapter);
717 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
719 adapter->if_flags = ifp->if_flags;
720 IXV_CORE_UNLOCK(adapter);
724 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
725 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
726 IXV_CORE_LOCK(adapter);
727 ixv_disable_intr(adapter);
728 ixv_set_multi(adapter);
729 ixv_enable_intr(adapter);
730 IXV_CORE_UNLOCK(adapter);
735 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
736 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
740 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
741 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
742 if (mask & IFCAP_HWCSUM)
743 ifp->if_capenable ^= IFCAP_HWCSUM;
744 if (mask & IFCAP_TSO4)
745 ifp->if_capenable ^= IFCAP_TSO4;
746 if (mask & IFCAP_LRO)
747 ifp->if_capenable ^= IFCAP_LRO;
748 if (mask & IFCAP_VLAN_HWTAGGING)
749 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
750 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
751 IXV_CORE_LOCK(adapter);
752 ixv_init_locked(adapter);
753 IXV_CORE_UNLOCK(adapter);
755 VLAN_CAPABILITIES(ifp);
760 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
761 error = ether_ioctl(ifp, command, data);
768 /*********************************************************************
771 * This routine is used in two ways. It is used by the stack as
772 * init entry point in network interface structure. It is also used
773 * by the driver as a hw/sw initialization routine to get to a
776 * return 0 on success, positive on failure
777 **********************************************************************/
778 #define IXGBE_MHADD_MFS_SHIFT 16
781 ixv_init_locked(struct adapter *adapter)
783 struct ifnet *ifp = adapter->ifp;
784 device_t dev = adapter->dev;
785 struct ixgbe_hw *hw = &adapter->hw;
788 INIT_DEBUGOUT("ixv_init: begin");
789 mtx_assert(&adapter->core_mtx, MA_OWNED);
790 hw->adapter_stopped = FALSE;
791 ixgbe_stop_adapter(hw);
792 callout_stop(&adapter->timer);
794 /* reprogram the RAR[0] in case user changed it. */
795 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
797 /* Get the latest mac address, User can use a LAA */
798 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
799 IXGBE_ETH_LENGTH_OF_ADDRESS);
800 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
801 hw->addr_ctrl.rar_used_count = 1;
803 /* Prepare transmit descriptors and buffers */
804 if (ixv_setup_transmit_structures(adapter)) {
805 device_printf(dev,"Could not setup transmit structures\n");
811 ixv_initialize_transmit_units(adapter);
813 /* Setup Multicast table */
814 ixv_set_multi(adapter);
817 ** Determine the correct mbuf pool
818 ** for doing jumbo/headersplit
820 if (ifp->if_mtu > ETHERMTU)
821 adapter->rx_mbuf_sz = MJUMPAGESIZE;
823 adapter->rx_mbuf_sz = MCLBYTES;
825 /* Prepare receive descriptors and buffers */
826 if (ixv_setup_receive_structures(adapter)) {
827 device_printf(dev,"Could not setup receive structures\n");
832 /* Configure RX settings */
833 ixv_initialize_receive_units(adapter);
835 /* Enable Enhanced MSIX mode */
836 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
837 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
838 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
839 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
841 /* Set the various hardware offload abilities */
842 ifp->if_hwassist = 0;
843 if (ifp->if_capenable & IFCAP_TSO4)
844 ifp->if_hwassist |= CSUM_TSO;
845 if (ifp->if_capenable & IFCAP_TXCSUM) {
846 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
847 #if __FreeBSD_version >= 800000
848 ifp->if_hwassist |= CSUM_SCTP;
853 if (ifp->if_mtu > ETHERMTU) {
854 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
855 mhadd &= ~IXGBE_MHADD_MFS_MASK;
856 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
857 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
860 /* Set up VLAN offload and filter */
861 ixv_setup_vlan_support(adapter);
863 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
865 /* Set up MSI/X routing */
866 ixv_configure_ivars(adapter);
868 /* Set up auto-mask */
869 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
871 /* Set moderation on the Link interrupt */
872 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
875 ixv_init_stats(adapter);
877 /* Config/Enable Link */
878 ixv_config_link(adapter);
880 /* And now turn on interrupts */
881 ixv_enable_intr(adapter);
883 /* Now inform the stack we're ready */
884 ifp->if_drv_flags |= IFF_DRV_RUNNING;
885 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
893 struct adapter *adapter = arg;
895 IXV_CORE_LOCK(adapter);
896 ixv_init_locked(adapter);
897 IXV_CORE_UNLOCK(adapter);
904 ** MSIX Interrupt Handlers and Tasklets
909 ixv_enable_queue(struct adapter *adapter, u32 vector)
911 struct ixgbe_hw *hw = &adapter->hw;
912 u32 queue = 1 << vector;
915 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
916 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
920 ixv_disable_queue(struct adapter *adapter, u32 vector)
922 struct ixgbe_hw *hw = &adapter->hw;
923 u64 queue = (u64)(1 << vector);
926 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
927 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
931 ixv_rearm_queues(struct adapter *adapter, u64 queues)
933 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
934 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
939 ixv_handle_que(void *context, int pending)
941 struct ix_queue *que = context;
942 struct adapter *adapter = que->adapter;
943 struct tx_ring *txr = que->txr;
944 struct ifnet *ifp = adapter->ifp;
947 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
948 more = ixv_rxeof(que, adapter->rx_process_limit);
951 #if __FreeBSD_version >= 800000
952 if (!drbr_empty(ifp, txr->br))
953 ixv_mq_start_locked(ifp, txr, NULL);
955 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
956 ixv_start_locked(txr, ifp);
960 taskqueue_enqueue(que->tq, &que->que_task);
965 /* Reenable this interrupt */
966 ixv_enable_queue(adapter, que->msix);
970 /*********************************************************************
972 * MSI Queue Interrupt Service routine
974 **********************************************************************/
976 ixv_msix_que(void *arg)
978 struct ix_queue *que = arg;
979 struct adapter *adapter = que->adapter;
980 struct tx_ring *txr = que->txr;
981 struct rx_ring *rxr = que->rxr;
982 bool more_tx, more_rx;
985 ixv_disable_queue(adapter, que->msix);
988 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
991 more_tx = ixv_txeof(txr);
994 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
998 if (ixv_enable_aim == FALSE)
1001 ** Do Adaptive Interrupt Moderation:
1002 ** - Write out last calculated setting
1003 ** - Calculate based on average size over
1004 ** the last interval.
1006 if (que->eitr_setting)
1007 IXGBE_WRITE_REG(&adapter->hw,
1008 IXGBE_VTEITR(que->msix),
1011 que->eitr_setting = 0;
1013 /* Idle, do nothing */
1014 if ((txr->bytes == 0) && (rxr->bytes == 0))
1017 if ((txr->bytes) && (txr->packets))
1018 newitr = txr->bytes/txr->packets;
1019 if ((rxr->bytes) && (rxr->packets))
1020 newitr = max(newitr,
1021 (rxr->bytes / rxr->packets));
1022 newitr += 24; /* account for hardware frame, crc */
1024 /* set an upper boundary */
1025 newitr = min(newitr, 3000);
1027 /* Be nice to the mid range */
1028 if ((newitr > 300) && (newitr < 1200))
1029 newitr = (newitr / 3);
1031 newitr = (newitr / 2);
1033 newitr |= newitr << 16;
1035 /* save for next interrupt */
1036 que->eitr_setting = newitr;
1045 if (more_tx || more_rx)
1046 taskqueue_enqueue(que->tq, &que->que_task);
1047 else /* Reenable this interrupt */
1048 ixv_enable_queue(adapter, que->msix);
1053 ixv_msix_mbx(void *arg)
1055 struct adapter *adapter = arg;
1056 struct ixgbe_hw *hw = &adapter->hw;
1061 /* First get the cause */
1062 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1063 /* Clear interrupt with write */
1064 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1066 /* Link status change */
1067 if (reg & IXGBE_EICR_LSC)
1068 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1070 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1074 /*********************************************************************
1076 * Media Ioctl callback
1078 * This routine is called whenever the user queries the status of
1079 * the interface using ifconfig.
1081 **********************************************************************/
1083 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1085 struct adapter *adapter = ifp->if_softc;
1087 INIT_DEBUGOUT("ixv_media_status: begin");
1088 IXV_CORE_LOCK(adapter);
1089 ixv_update_link_status(adapter);
1091 ifmr->ifm_status = IFM_AVALID;
1092 ifmr->ifm_active = IFM_ETHER;
1094 if (!adapter->link_active) {
1095 IXV_CORE_UNLOCK(adapter);
1099 ifmr->ifm_status |= IFM_ACTIVE;
1101 switch (adapter->link_speed) {
1102 case IXGBE_LINK_SPEED_1GB_FULL:
1103 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1105 case IXGBE_LINK_SPEED_10GB_FULL:
1106 ifmr->ifm_active |= IFM_FDX;
1110 IXV_CORE_UNLOCK(adapter);
1115 /*********************************************************************
1117 * Media Ioctl callback
1119 * This routine is called when the user changes speed/duplex using
1120 * media/mediopt option with ifconfig.
1122 **********************************************************************/
1124 ixv_media_change(struct ifnet * ifp)
1126 struct adapter *adapter = ifp->if_softc;
1127 struct ifmedia *ifm = &adapter->media;
1129 INIT_DEBUGOUT("ixv_media_change: begin");
1131 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1134 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1138 device_printf(adapter->dev, "Only auto media type\n");
1145 /*********************************************************************
1147 * This routine maps the mbufs to tx descriptors, allowing the
1148 * TX engine to transmit the packets.
1149 * - return 0 on success, positive on failure
1151 **********************************************************************/
1154 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1156 struct adapter *adapter = txr->adapter;
1157 u32 olinfo_status = 0, cmd_type_len;
1159 int i, j, error, nsegs;
1160 int first, last = 0;
1161 struct mbuf *m_head;
1162 bus_dma_segment_t segs[32];
1164 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1165 union ixgbe_adv_tx_desc *txd = NULL;
1169 /* Basic descriptor defines */
1170 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1171 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1173 if (m_head->m_flags & M_VLANTAG)
1174 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1177 * Important to capture the first descriptor
1178 * used because it will contain the index of
1179 * the one we tell the hardware to report back
1181 first = txr->next_avail_desc;
1182 txbuf = &txr->tx_buffers[first];
1183 txbuf_mapped = txbuf;
1187 * Map the packet for DMA.
1189 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1190 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1192 if (error == EFBIG) {
1195 m = m_defrag(*m_headp, M_DONTWAIT);
1197 adapter->mbuf_defrag_failed++;
1205 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1206 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1208 if (error == ENOMEM) {
1209 adapter->no_tx_dma_setup++;
1211 } else if (error != 0) {
1212 adapter->no_tx_dma_setup++;
1217 } else if (error == ENOMEM) {
1218 adapter->no_tx_dma_setup++;
1220 } else if (error != 0) {
1221 adapter->no_tx_dma_setup++;
1227 /* Make certain there are enough descriptors */
1228 if (nsegs > txr->tx_avail - 2) {
1229 txr->no_desc_avail++;
1236 ** Set up the appropriate offload context
1237 ** this becomes the first descriptor of
1240 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1241 if (ixv_tso_setup(txr, m_head, &paylen)) {
1242 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1243 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1244 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1245 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1249 } else if (ixv_tx_ctx_setup(txr, m_head))
1250 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1252 /* Record payload length */
1254 olinfo_status |= m_head->m_pkthdr.len <<
1255 IXGBE_ADVTXD_PAYLEN_SHIFT;
1257 i = txr->next_avail_desc;
1258 for (j = 0; j < nsegs; j++) {
1262 txbuf = &txr->tx_buffers[i];
1263 txd = &txr->tx_base[i];
1264 seglen = segs[j].ds_len;
1265 segaddr = htole64(segs[j].ds_addr);
1267 txd->read.buffer_addr = segaddr;
1268 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1269 cmd_type_len |seglen);
1270 txd->read.olinfo_status = htole32(olinfo_status);
1271 last = i; /* descriptor that will get completion IRQ */
1273 if (++i == adapter->num_tx_desc)
1276 txbuf->m_head = NULL;
1277 txbuf->eop_index = -1;
1280 txd->read.cmd_type_len |=
1281 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1282 txr->tx_avail -= nsegs;
1283 txr->next_avail_desc = i;
1285 txbuf->m_head = m_head;
1287 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1289 /* Set the index of the descriptor that will be marked done */
1290 txbuf = &txr->tx_buffers[first];
1291 txbuf->eop_index = last;
1293 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1294 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1296 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1297 * hardware that this frame is available to transmit.
1299 ++txr->total_packets;
1300 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1305 bus_dmamap_unload(txr->txtag, txbuf->map);
1311 /*********************************************************************
1314 * This routine is called whenever multicast address list is updated.
1316 **********************************************************************/
1317 #define IXGBE_RAR_ENTRIES 16
1320 ixv_set_multi(struct adapter *adapter)
1322 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1324 struct ifmultiaddr *ifma;
1326 struct ifnet *ifp = adapter->ifp;
1328 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1330 #if __FreeBSD_version < 800000
1333 if_maddr_rlock(ifp);
1335 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1336 if (ifma->ifma_addr->sa_family != AF_LINK)
1338 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1339 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1340 IXGBE_ETH_LENGTH_OF_ADDRESS);
1343 #if __FreeBSD_version < 800000
1344 IF_ADDR_UNLOCK(ifp);
1346 if_maddr_runlock(ifp);
1351 ixgbe_update_mc_addr_list(&adapter->hw,
1352 update_ptr, mcnt, ixv_mc_array_itr);
1358 * This is an iterator function now needed by the multicast
1359 * shared code. It simply feeds the shared code routine the
1360 * addresses in the array of ixv_set_multi() one by one.
1363 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1365 u8 *addr = *update_ptr;
1369 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1370 *update_ptr = newptr;
1374 /*********************************************************************
1377 * This routine checks for link status,updates statistics,
1378 * and runs the watchdog check.
1380 **********************************************************************/
1383 ixv_local_timer(void *arg)
1385 struct adapter *adapter = arg;
1386 device_t dev = adapter->dev;
1387 struct tx_ring *txr = adapter->tx_rings;
1390 mtx_assert(&adapter->core_mtx, MA_OWNED);
1392 ixv_update_link_status(adapter);
1395 ixv_update_stats(adapter);
1398 * If the interface has been paused
1399 * then don't do the watchdog check
1401 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1404 ** Check for time since any descriptor was cleaned
1406 for (i = 0; i < adapter->num_queues; i++, txr++) {
1408 if (txr->watchdog_check == FALSE) {
1412 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1417 ixv_rearm_queues(adapter, adapter->que_mask);
1418 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1422 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1423 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1424 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1425 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1426 device_printf(dev,"TX(%d) desc avail = %d,"
1427 "Next TX to Clean = %d\n",
1428 txr->me, txr->tx_avail, txr->next_to_clean);
1429 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1430 adapter->watchdog_events++;
1432 ixv_init_locked(adapter);
1436 ** Note: this routine updates the OS on the link state
1437 ** the real check of the hardware only happens with
1438 ** a link interrupt.
1441 ixv_update_link_status(struct adapter *adapter)
1443 struct ifnet *ifp = adapter->ifp;
1444 struct tx_ring *txr = adapter->tx_rings;
1445 device_t dev = adapter->dev;
1448 if (adapter->link_up){
1449 if (adapter->link_active == FALSE) {
1451 device_printf(dev,"Link is up %d Gbps %s \n",
1452 ((adapter->link_speed == 128)? 10:1),
1454 adapter->link_active = TRUE;
1455 if_link_state_change(ifp, LINK_STATE_UP);
1457 } else { /* Link down */
1458 if (adapter->link_active == TRUE) {
1460 device_printf(dev,"Link is Down\n");
1461 if_link_state_change(ifp, LINK_STATE_DOWN);
1462 adapter->link_active = FALSE;
1463 for (int i = 0; i < adapter->num_queues;
1465 txr->watchdog_check = FALSE;
1473 /*********************************************************************
1475 * This routine disables all traffic on the adapter by issuing a
1476 * global reset on the MAC and deallocates TX/RX buffers.
1478 **********************************************************************/
1484 struct adapter *adapter = arg;
1485 struct ixgbe_hw *hw = &adapter->hw;
1488 mtx_assert(&adapter->core_mtx, MA_OWNED);
1490 INIT_DEBUGOUT("ixv_stop: begin\n");
1491 ixv_disable_intr(adapter);
1493 /* Tell the stack that the interface is no longer active */
1494 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1497 adapter->hw.adapter_stopped = FALSE;
1498 ixgbe_stop_adapter(hw);
1499 callout_stop(&adapter->timer);
1501 /* reprogram the RAR[0] in case user changed it. */
1502 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1508 /*********************************************************************
1510 * Determine hardware revision.
1512 **********************************************************************/
1514 ixv_identify_hardware(struct adapter *adapter)
1516 device_t dev = adapter->dev;
1520 ** Make sure BUSMASTER is set, on a VM under
1521 ** KVM it may not be and will break things.
1523 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1524 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1525 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1526 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1527 "bits were not set!\n");
1528 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1529 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1532 /* Save off the information about this board */
1533 adapter->hw.vendor_id = pci_get_vendor(dev);
1534 adapter->hw.device_id = pci_get_device(dev);
1535 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1536 adapter->hw.subsystem_vendor_id =
1537 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1538 adapter->hw.subsystem_device_id =
1539 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1544 /*********************************************************************
1546 * Setup MSIX Interrupt resources and handlers
1548 **********************************************************************/
1550 ixv_allocate_msix(struct adapter *adapter)
1552 device_t dev = adapter->dev;
1553 struct ix_queue *que = adapter->queues;
1554 int error, rid, vector = 0;
1556 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1558 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1559 RF_SHAREABLE | RF_ACTIVE);
1560 if (que->res == NULL) {
1561 device_printf(dev,"Unable to allocate"
1562 " bus resource: que interrupt [%d]\n", vector);
1565 /* Set the handler function */
1566 error = bus_setup_intr(dev, que->res,
1567 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1568 ixv_msix_que, que, &que->tag);
1571 device_printf(dev, "Failed to register QUE handler");
1574 #if __FreeBSD_version >= 800504
1575 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1578 adapter->que_mask |= (u64)(1 << que->msix);
1580 ** Bind the msix vector, and thus the
1581 ** ring to the corresponding cpu.
1583 if (adapter->num_queues > 1)
1584 bus_bind_intr(dev, que->res, i);
1586 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1587 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1588 taskqueue_thread_enqueue, &que->tq);
1589 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1590 device_get_nameunit(adapter->dev));
1595 adapter->res = bus_alloc_resource_any(dev,
1596 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1597 if (!adapter->res) {
1598 device_printf(dev,"Unable to allocate"
1599 " bus resource: MBX interrupt [%d]\n", rid);
1602 /* Set the mbx handler function */
1603 error = bus_setup_intr(dev, adapter->res,
1604 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1605 ixv_msix_mbx, adapter, &adapter->tag);
1607 adapter->res = NULL;
1608 device_printf(dev, "Failed to register LINK handler");
1611 #if __FreeBSD_version >= 800504
1612 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1614 adapter->mbxvec = vector;
1615 /* Tasklets for Mailbox */
1616 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1617 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1618 taskqueue_thread_enqueue, &adapter->tq);
1619 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1620 device_get_nameunit(adapter->dev));
1622 ** Due to a broken design QEMU will fail to properly
1623 ** enable the guest for MSIX unless the vectors in
1624 ** the table are all set up, so we must rewrite the
1625 ** ENABLE in the MSIX control register again at this
1626 ** point to cause it to successfully initialize us.
1628 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1630 pci_find_extcap(dev, PCIY_MSIX, &rid);
1631 rid += PCIR_MSIX_CTRL;
1632 msix_ctrl = pci_read_config(dev, rid, 2);
1633 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1634 pci_write_config(dev, rid, msix_ctrl, 2);
1641 * Setup MSIX resources, note that the VF
1642 * device MUST use MSIX, there is no fallback.
1645 ixv_setup_msix(struct adapter *adapter)
1647 device_t dev = adapter->dev;
1648 int rid, vectors, want = 2;
1651 /* First try MSI/X */
1653 adapter->msix_mem = bus_alloc_resource_any(dev,
1654 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1655 if (!adapter->msix_mem) {
1656 device_printf(adapter->dev,
1657 "Unable to map MSIX table \n");
1661 vectors = pci_msix_count(dev);
1663 bus_release_resource(dev, SYS_RES_MEMORY,
1664 rid, adapter->msix_mem);
1665 adapter->msix_mem = NULL;
1670 ** Want two vectors: one for a queue,
1671 ** plus an additional for mailbox.
1673 if (pci_alloc_msix(dev, &want) == 0) {
1674 device_printf(adapter->dev,
1675 "Using MSIX interrupts with %d vectors\n", want);
1679 device_printf(adapter->dev,"MSIX config error\n");
1685 ixv_allocate_pci_resources(struct adapter *adapter)
1688 device_t dev = adapter->dev;
1691 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1694 if (!(adapter->pci_mem)) {
1695 device_printf(dev,"Unable to allocate bus resource: memory\n");
1699 adapter->osdep.mem_bus_space_tag =
1700 rman_get_bustag(adapter->pci_mem);
1701 adapter->osdep.mem_bus_space_handle =
1702 rman_get_bushandle(adapter->pci_mem);
1703 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1705 adapter->num_queues = 1;
1706 adapter->hw.back = &adapter->osdep;
1709 ** Now setup MSI/X, should
1710 ** return us the number of
1711 ** configured vectors.
1713 adapter->msix = ixv_setup_msix(adapter);
1714 if (adapter->msix == ENXIO)
1721 ixv_free_pci_resources(struct adapter * adapter)
1723 struct ix_queue *que = adapter->queues;
1724 device_t dev = adapter->dev;
1727 memrid = PCIR_BAR(MSIX_BAR);
1730 ** There is a slight possibility of a failure mode
1731 ** in attach that will result in entering this function
1732 ** before interrupt resources have been initialized, and
1733 ** in that case we do not want to execute the loops below
1734 ** We can detect this reliably by the state of the adapter
1737 if (adapter->res == NULL)
1741 ** Release all msix queue resources:
1743 for (int i = 0; i < adapter->num_queues; i++, que++) {
1744 rid = que->msix + 1;
1745 if (que->tag != NULL) {
1746 bus_teardown_intr(dev, que->res, que->tag);
1749 if (que->res != NULL)
1750 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1754 /* Clean the Legacy or Link interrupt last */
1755 if (adapter->mbxvec) /* we are doing MSIX */
1756 rid = adapter->mbxvec + 1;
1758 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1760 if (adapter->tag != NULL) {
1761 bus_teardown_intr(dev, adapter->res, adapter->tag);
1762 adapter->tag = NULL;
1764 if (adapter->res != NULL)
1765 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1769 pci_release_msi(dev);
1771 if (adapter->msix_mem != NULL)
1772 bus_release_resource(dev, SYS_RES_MEMORY,
1773 memrid, adapter->msix_mem);
1775 if (adapter->pci_mem != NULL)
1776 bus_release_resource(dev, SYS_RES_MEMORY,
1777 PCIR_BAR(0), adapter->pci_mem);
1782 /*********************************************************************
1784 * Setup networking device structure and register an interface.
1786 **********************************************************************/
1788 ixv_setup_interface(device_t dev, struct adapter *adapter)
1792 INIT_DEBUGOUT("ixv_setup_interface: begin");
1794 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1796 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1797 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1798 ifp->if_mtu = ETHERMTU;
1799 ifp->if_baudrate = 1000000000;
1800 ifp->if_init = ixv_init;
1801 ifp->if_softc = adapter;
1802 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1803 ifp->if_ioctl = ixv_ioctl;
1804 #if __FreeBSD_version >= 800000
1805 ifp->if_transmit = ixv_mq_start;
1806 ifp->if_qflush = ixv_qflush;
1808 ifp->if_start = ixv_start;
1810 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1812 ether_ifattach(ifp, adapter->hw.mac.addr);
1814 adapter->max_frame_size =
1815 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1818 * Tell the upper layer(s) we support long frames.
1820 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1822 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1823 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1824 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
1826 ifp->if_capenable = ifp->if_capabilities;
1829 * Specify the media types supported by this adapter and register
1830 * callbacks to update media and link information
1832 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1834 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1835 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1836 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1842 ixv_config_link(struct adapter *adapter)
1844 struct ixgbe_hw *hw = &adapter->hw;
1845 u32 autoneg, err = 0;
1846 bool negotiate = TRUE;
1848 if (hw->mac.ops.check_link)
1849 err = hw->mac.ops.check_link(hw, &autoneg,
1850 &adapter->link_up, FALSE);
1854 if (hw->mac.ops.setup_link)
1855 err = hw->mac.ops.setup_link(hw, autoneg,
1856 negotiate, adapter->link_up);
1861 /********************************************************************
1862 * Manage DMA'able memory.
1863 *******************************************************************/
1865 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1869 *(bus_addr_t *) arg = segs->ds_addr;
1874 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1875 struct ixv_dma_alloc *dma, int mapflags)
1877 device_t dev = adapter->dev;
1880 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1881 DBA_ALIGN, 0, /* alignment, bounds */
1882 BUS_SPACE_MAXADDR, /* lowaddr */
1883 BUS_SPACE_MAXADDR, /* highaddr */
1884 NULL, NULL, /* filter, filterarg */
1887 size, /* maxsegsize */
1888 BUS_DMA_ALLOCNOW, /* flags */
1889 NULL, /* lockfunc */
1890 NULL, /* lockfuncarg */
1893 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1897 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1898 BUS_DMA_NOWAIT, &dma->dma_map);
1900 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1904 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1908 mapflags | BUS_DMA_NOWAIT);
1910 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1914 dma->dma_size = size;
1917 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1919 bus_dma_tag_destroy(dma->dma_tag);
1921 dma->dma_map = NULL;
1922 dma->dma_tag = NULL;
1927 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1929 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1930 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1931 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1932 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1933 bus_dma_tag_destroy(dma->dma_tag);
1937 /*********************************************************************
1939 * Allocate memory for the transmit and receive rings, and then
1940 * the descriptors associated with each, called only once at attach.
1942 **********************************************************************/
1944 ixv_allocate_queues(struct adapter *adapter)
1946 device_t dev = adapter->dev;
1947 struct ix_queue *que;
1948 struct tx_ring *txr;
1949 struct rx_ring *rxr;
1950 int rsize, tsize, error = 0;
1951 int txconf = 0, rxconf = 0;
1953 /* First allocate the top level queue structs */
1954 if (!(adapter->queues =
1955 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1956 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1957 device_printf(dev, "Unable to allocate queue memory\n");
1962 /* First allocate the TX ring struct memory */
1963 if (!(adapter->tx_rings =
1964 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
1965 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1966 device_printf(dev, "Unable to allocate TX ring memory\n");
1971 /* Next allocate the RX */
1972 if (!(adapter->rx_rings =
1973 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
1974 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1975 device_printf(dev, "Unable to allocate RX ring memory\n");
1980 /* For the ring itself */
1981 tsize = roundup2(adapter->num_tx_desc *
1982 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
1985 * Now set up the TX queues, txconf is needed to handle the
1986 * possibility that things fail midcourse and we need to
1987 * undo memory gracefully
1989 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
1990 /* Set up some basics */
1991 txr = &adapter->tx_rings[i];
1992 txr->adapter = adapter;
1995 /* Initialize the TX side lock */
1996 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1997 device_get_nameunit(dev), txr->me);
1998 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2000 if (ixv_dma_malloc(adapter, tsize,
2001 &txr->txdma, BUS_DMA_NOWAIT)) {
2003 "Unable to allocate TX Descriptor memory\n");
2007 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2008 bzero((void *)txr->tx_base, tsize);
2010 /* Now allocate transmit buffers for the ring */
2011 if (ixv_allocate_transmit_buffers(txr)) {
2013 "Critical Failure setting up transmit buffers\n");
2017 #if __FreeBSD_version >= 800000
2018 /* Allocate a buf ring */
2019 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2020 M_WAITOK, &txr->tx_mtx);
2021 if (txr->br == NULL) {
2023 "Critical Failure setting up buf ring\n");
2031 * Next the RX queues...
2033 rsize = roundup2(adapter->num_rx_desc *
2034 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2035 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2036 rxr = &adapter->rx_rings[i];
2037 /* Set up some basics */
2038 rxr->adapter = adapter;
2041 /* Initialize the RX side lock */
2042 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2043 device_get_nameunit(dev), rxr->me);
2044 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2046 if (ixv_dma_malloc(adapter, rsize,
2047 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2049 "Unable to allocate RxDescriptor memory\n");
2053 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2054 bzero((void *)rxr->rx_base, rsize);
2056 /* Allocate receive buffers for the ring*/
2057 if (ixv_allocate_receive_buffers(rxr)) {
2059 "Critical Failure setting up receive buffers\n");
2066 ** Finally set up the queue holding structs
2068 for (int i = 0; i < adapter->num_queues; i++) {
2069 que = &adapter->queues[i];
2070 que->adapter = adapter;
2071 que->txr = &adapter->tx_rings[i];
2072 que->rxr = &adapter->rx_rings[i];
2078 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2079 ixv_dma_free(adapter, &rxr->rxdma);
2081 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2082 ixv_dma_free(adapter, &txr->txdma);
2083 free(adapter->rx_rings, M_DEVBUF);
2085 free(adapter->tx_rings, M_DEVBUF);
2087 free(adapter->queues, M_DEVBUF);
2093 /*********************************************************************
2095 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2096 * the information needed to transmit a packet on the wire. This is
2097 * called only once at attach, setup is done every reset.
2099 **********************************************************************/
2101 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2103 struct adapter *adapter = txr->adapter;
2104 device_t dev = adapter->dev;
2105 struct ixv_tx_buf *txbuf;
2109 * Setup DMA descriptor areas.
2111 if ((error = bus_dma_tag_create(NULL, /* parent */
2112 1, 0, /* alignment, bounds */
2113 BUS_SPACE_MAXADDR, /* lowaddr */
2114 BUS_SPACE_MAXADDR, /* highaddr */
2115 NULL, NULL, /* filter, filterarg */
2116 IXV_TSO_SIZE, /* maxsize */
2118 PAGE_SIZE, /* maxsegsize */
2120 NULL, /* lockfunc */
2121 NULL, /* lockfuncarg */
2123 device_printf(dev,"Unable to allocate TX DMA tag\n");
2127 if (!(txr->tx_buffers =
2128 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2129 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2130 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2135 /* Create the descriptor buffer dma maps */
2136 txbuf = txr->tx_buffers;
2137 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2138 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2140 device_printf(dev, "Unable to create TX DMA map\n");
2147 /* We free all, it handles case where we are in the middle */
2148 ixv_free_transmit_structures(adapter);
2152 /*********************************************************************
2154 * Initialize a transmit ring.
2156 **********************************************************************/
2158 ixv_setup_transmit_ring(struct tx_ring *txr)
2160 struct adapter *adapter = txr->adapter;
2161 struct ixv_tx_buf *txbuf;
2164 /* Clear the old ring contents */
2166 bzero((void *)txr->tx_base,
2167 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2169 txr->next_avail_desc = 0;
2170 txr->next_to_clean = 0;
2172 /* Free any existing tx buffers. */
2173 txbuf = txr->tx_buffers;
2174 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2175 if (txbuf->m_head != NULL) {
2176 bus_dmamap_sync(txr->txtag, txbuf->map,
2177 BUS_DMASYNC_POSTWRITE);
2178 bus_dmamap_unload(txr->txtag, txbuf->map);
2179 m_freem(txbuf->m_head);
2180 txbuf->m_head = NULL;
2182 /* Clear the EOP index */
2183 txbuf->eop_index = -1;
2186 /* Set number of descriptors available */
2187 txr->tx_avail = adapter->num_tx_desc;
2189 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2190 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2194 /*********************************************************************
2196 * Initialize all transmit rings.
2198 **********************************************************************/
2200 ixv_setup_transmit_structures(struct adapter *adapter)
2202 struct tx_ring *txr = adapter->tx_rings;
2204 for (int i = 0; i < adapter->num_queues; i++, txr++)
2205 ixv_setup_transmit_ring(txr);
2210 /*********************************************************************
2212 * Enable transmit unit.
2214 **********************************************************************/
2216 ixv_initialize_transmit_units(struct adapter *adapter)
2218 struct tx_ring *txr = adapter->tx_rings;
2219 struct ixgbe_hw *hw = &adapter->hw;
2222 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2223 u64 tdba = txr->txdma.dma_paddr;
2226 /* Set WTHRESH to 8, burst writeback */
2227 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2228 txdctl |= (8 << 16);
2229 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2231 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2232 txdctl |= IXGBE_TXDCTL_ENABLE;
2233 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2235 /* Set the HW Tx Head and Tail indices */
2236 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2237 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2239 /* Setup Transmit Descriptor Cmd Settings */
2240 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2241 txr->watchdog_check = FALSE;
2243 /* Set Ring parameters */
2244 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2245 (tdba & 0x00000000ffffffffULL));
2246 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2247 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2248 adapter->num_tx_desc *
2249 sizeof(struct ixgbe_legacy_tx_desc));
2250 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2251 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2252 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2259 /*********************************************************************
2261 * Free all transmit rings.
2263 **********************************************************************/
2265 ixv_free_transmit_structures(struct adapter *adapter)
2267 struct tx_ring *txr = adapter->tx_rings;
2269 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2271 ixv_free_transmit_buffers(txr);
2272 ixv_dma_free(adapter, &txr->txdma);
2274 IXV_TX_LOCK_DESTROY(txr);
2276 free(adapter->tx_rings, M_DEVBUF);
2279 /*********************************************************************
2281 * Free transmit ring related data structures.
2283 **********************************************************************/
2285 ixv_free_transmit_buffers(struct tx_ring *txr)
2287 struct adapter *adapter = txr->adapter;
2288 struct ixv_tx_buf *tx_buffer;
2291 INIT_DEBUGOUT("free_transmit_ring: begin");
2293 if (txr->tx_buffers == NULL)
2296 tx_buffer = txr->tx_buffers;
2297 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2298 if (tx_buffer->m_head != NULL) {
2299 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2300 BUS_DMASYNC_POSTWRITE);
2301 bus_dmamap_unload(txr->txtag,
2303 m_freem(tx_buffer->m_head);
2304 tx_buffer->m_head = NULL;
2305 if (tx_buffer->map != NULL) {
2306 bus_dmamap_destroy(txr->txtag,
2308 tx_buffer->map = NULL;
2310 } else if (tx_buffer->map != NULL) {
2311 bus_dmamap_unload(txr->txtag,
2313 bus_dmamap_destroy(txr->txtag,
2315 tx_buffer->map = NULL;
2318 #if __FreeBSD_version >= 800000
2319 if (txr->br != NULL)
2320 buf_ring_free(txr->br, M_DEVBUF);
2322 if (txr->tx_buffers != NULL) {
2323 free(txr->tx_buffers, M_DEVBUF);
2324 txr->tx_buffers = NULL;
2326 if (txr->txtag != NULL) {
2327 bus_dma_tag_destroy(txr->txtag);
2333 /*********************************************************************
2335 * Advanced Context Descriptor setup for VLAN or CSUM
2337 **********************************************************************/
2340 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2342 struct adapter *adapter = txr->adapter;
2343 struct ixgbe_adv_tx_context_desc *TXD;
2344 struct ixv_tx_buf *tx_buffer;
2345 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2346 struct ether_vlan_header *eh;
2348 struct ip6_hdr *ip6;
2349 int ehdrlen, ip_hlen = 0;
2352 bool offload = TRUE;
2353 int ctxd = txr->next_avail_desc;
2357 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2361 tx_buffer = &txr->tx_buffers[ctxd];
2362 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2365 ** In advanced descriptors the vlan tag must
2366 ** be placed into the descriptor itself.
2368 if (mp->m_flags & M_VLANTAG) {
2369 vtag = htole16(mp->m_pkthdr.ether_vtag);
2370 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2371 } else if (offload == FALSE)
2375 * Determine where frame payload starts.
2376 * Jump over vlan headers if already present,
2377 * helpful for QinQ too.
2379 eh = mtod(mp, struct ether_vlan_header *);
2380 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2381 etype = ntohs(eh->evl_proto);
2382 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2384 etype = ntohs(eh->evl_encap_proto);
2385 ehdrlen = ETHER_HDR_LEN;
2388 /* Set the ether header length */
2389 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2393 ip = (struct ip *)(mp->m_data + ehdrlen);
2394 ip_hlen = ip->ip_hl << 2;
2395 if (mp->m_len < ehdrlen + ip_hlen)
2398 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2400 case ETHERTYPE_IPV6:
2401 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2402 ip_hlen = sizeof(struct ip6_hdr);
2403 if (mp->m_len < ehdrlen + ip_hlen)
2405 ipproto = ip6->ip6_nxt;
2406 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2413 vlan_macip_lens |= ip_hlen;
2414 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2418 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2419 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2423 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2424 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2427 #if __FreeBSD_version >= 800000
2429 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2430 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2438 /* Now copy bits into descriptor */
2439 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2440 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2441 TXD->seqnum_seed = htole32(0);
2442 TXD->mss_l4len_idx = htole32(0);
2444 tx_buffer->m_head = NULL;
2445 tx_buffer->eop_index = -1;
2447 /* We've consumed the first desc, adjust counters */
2448 if (++ctxd == adapter->num_tx_desc)
2450 txr->next_avail_desc = ctxd;
2456 /**********************************************************************
2458 * Setup work for hardware segmentation offload (TSO) on
2459 * adapters using advanced tx descriptors
2461 **********************************************************************/
2463 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2465 struct adapter *adapter = txr->adapter;
2466 struct ixgbe_adv_tx_context_desc *TXD;
2467 struct ixv_tx_buf *tx_buffer;
2468 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2469 u32 mss_l4len_idx = 0;
2471 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2472 struct ether_vlan_header *eh;
2478 * Determine where frame payload starts.
2479 * Jump over vlan headers if already present
2481 eh = mtod(mp, struct ether_vlan_header *);
2482 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2483 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2485 ehdrlen = ETHER_HDR_LEN;
2487 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2488 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2491 ctxd = txr->next_avail_desc;
2492 tx_buffer = &txr->tx_buffers[ctxd];
2493 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2495 ip = (struct ip *)(mp->m_data + ehdrlen);
2496 if (ip->ip_p != IPPROTO_TCP)
2497 return FALSE; /* 0 */
2499 ip_hlen = ip->ip_hl << 2;
2500 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2501 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2502 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2503 tcp_hlen = th->th_off << 2;
2504 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2506 /* This is used in the transmit desc in encap */
2507 *paylen = mp->m_pkthdr.len - hdrlen;
2509 /* VLAN MACLEN IPLEN */
2510 if (mp->m_flags & M_VLANTAG) {
2511 vtag = htole16(mp->m_pkthdr.ether_vtag);
2512 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2515 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2516 vlan_macip_lens |= ip_hlen;
2517 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2519 /* ADV DTYPE TUCMD */
2520 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2521 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2522 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2523 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2527 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2528 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2529 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2531 TXD->seqnum_seed = htole32(0);
2532 tx_buffer->m_head = NULL;
2533 tx_buffer->eop_index = -1;
2535 if (++ctxd == adapter->num_tx_desc)
2539 txr->next_avail_desc = ctxd;
2544 /**********************************************************************
2546 * Examine each tx_buffer in the used queue. If the hardware is done
2547 * processing the packet then free associated resources. The
2548 * tx_buffer is put back on the free queue.
2550 **********************************************************************/
2552 ixv_txeof(struct tx_ring *txr)
2554 struct adapter *adapter = txr->adapter;
2555 struct ifnet *ifp = adapter->ifp;
2556 u32 first, last, done;
2557 struct ixv_tx_buf *tx_buffer;
2558 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2560 mtx_assert(&txr->tx_mtx, MA_OWNED);
2562 if (txr->tx_avail == adapter->num_tx_desc)
2565 first = txr->next_to_clean;
2566 tx_buffer = &txr->tx_buffers[first];
2567 /* For cleanup we just use legacy struct */
2568 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2569 last = tx_buffer->eop_index;
2572 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2575 ** Get the index of the first descriptor
2576 ** BEYOND the EOP and call that 'done'.
2577 ** I do this so the comparison in the
2578 ** inner while loop below can be simple
2580 if (++last == adapter->num_tx_desc) last = 0;
2583 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2584 BUS_DMASYNC_POSTREAD);
2586 ** Only the EOP descriptor of a packet now has the DD
2587 ** bit set, this is what we look for...
2589 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2590 /* We clean the range of the packet */
2591 while (first != done) {
2592 tx_desc->upper.data = 0;
2593 tx_desc->lower.data = 0;
2594 tx_desc->buffer_addr = 0;
2597 if (tx_buffer->m_head) {
2598 bus_dmamap_sync(txr->txtag,
2600 BUS_DMASYNC_POSTWRITE);
2601 bus_dmamap_unload(txr->txtag,
2603 m_freem(tx_buffer->m_head);
2604 tx_buffer->m_head = NULL;
2605 tx_buffer->map = NULL;
2607 tx_buffer->eop_index = -1;
2608 txr->watchdog_time = ticks;
2610 if (++first == adapter->num_tx_desc)
2613 tx_buffer = &txr->tx_buffers[first];
2615 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2618 /* See if there is more work now */
2619 last = tx_buffer->eop_index;
2622 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2623 /* Get next done point */
2624 if (++last == adapter->num_tx_desc) last = 0;
2629 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2630 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2632 txr->next_to_clean = first;
2635 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2636 * it is OK to send packets. If there are no pending descriptors,
2637 * clear the timeout. Otherwise, if some descriptors have been freed,
2638 * restart the timeout.
2640 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2641 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2642 if (txr->tx_avail == adapter->num_tx_desc) {
2643 txr->watchdog_check = FALSE;
2651 /*********************************************************************
2653 * Refresh mbuf buffers for RX descriptor rings
2654 * - now keeps its own state so discards due to resource
2655 * exhaustion are unnecessary, if an mbuf cannot be obtained
2656 * it just returns, keeping its placeholder, thus it can simply
2657 * be recalled to try again.
2659 **********************************************************************/
2661 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2663 struct adapter *adapter = rxr->adapter;
2664 bus_dma_segment_t hseg[1];
2665 bus_dma_segment_t pseg[1];
2666 struct ixv_rx_buf *rxbuf;
2667 struct mbuf *mh, *mp;
2668 int i, nsegs, error, cleaned;
2670 i = rxr->next_to_refresh;
2671 cleaned = -1; /* Signify no completions */
2672 while (i != limit) {
2673 rxbuf = &rxr->rx_buffers[i];
2674 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2675 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2678 mh->m_pkthdr.len = mh->m_len = MHLEN;
2680 mh->m_flags |= M_PKTHDR;
2681 m_adj(mh, ETHER_ALIGN);
2682 /* Get the memory mapping */
2683 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2684 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2686 printf("GET BUF: dmamap load"
2687 " failure - %d\n", error);
2692 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2693 BUS_DMASYNC_PREREAD);
2694 rxr->rx_base[i].read.hdr_addr =
2695 htole64(hseg[0].ds_addr);
2698 if (rxbuf->m_pack == NULL) {
2699 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2700 M_PKTHDR, adapter->rx_mbuf_sz);
2703 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2704 /* Get the memory mapping */
2705 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2706 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2708 printf("GET BUF: dmamap load"
2709 " failure - %d\n", error);
2714 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2715 BUS_DMASYNC_PREREAD);
2716 rxr->rx_base[i].read.pkt_addr =
2717 htole64(pseg[0].ds_addr);
2721 /* Calculate next index */
2722 if (++i == adapter->num_rx_desc)
2724 /* This is the work marker for refresh */
2725 rxr->next_to_refresh = i;
2728 if (cleaned != -1) /* If we refreshed some, bump tail */
2729 IXGBE_WRITE_REG(&adapter->hw,
2730 IXGBE_VFRDT(rxr->me), cleaned);
2734 /*********************************************************************
2736 * Allocate memory for rx_buffer structures. Since we use one
2737 * rx_buffer per received packet, the maximum number of rx_buffer's
2738 * that we'll need is equal to the number of receive descriptors
2739 * that we've allocated.
2741 **********************************************************************/
2743 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2745 struct adapter *adapter = rxr->adapter;
2746 device_t dev = adapter->dev;
2747 struct ixv_rx_buf *rxbuf;
2748 int i, bsize, error;
2750 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2751 if (!(rxr->rx_buffers =
2752 (struct ixv_rx_buf *) malloc(bsize,
2753 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2754 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2759 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2760 1, 0, /* alignment, bounds */
2761 BUS_SPACE_MAXADDR, /* lowaddr */
2762 BUS_SPACE_MAXADDR, /* highaddr */
2763 NULL, NULL, /* filter, filterarg */
2764 MSIZE, /* maxsize */
2766 MSIZE, /* maxsegsize */
2768 NULL, /* lockfunc */
2769 NULL, /* lockfuncarg */
2771 device_printf(dev, "Unable to create RX DMA tag\n");
2775 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2776 1, 0, /* alignment, bounds */
2777 BUS_SPACE_MAXADDR, /* lowaddr */
2778 BUS_SPACE_MAXADDR, /* highaddr */
2779 NULL, NULL, /* filter, filterarg */
2780 MJUMPAGESIZE, /* maxsize */
2782 MJUMPAGESIZE, /* maxsegsize */
2784 NULL, /* lockfunc */
2785 NULL, /* lockfuncarg */
2787 device_printf(dev, "Unable to create RX DMA tag\n");
2791 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2792 rxbuf = &rxr->rx_buffers[i];
2793 error = bus_dmamap_create(rxr->htag,
2794 BUS_DMA_NOWAIT, &rxbuf->hmap);
2796 device_printf(dev, "Unable to create RX head map\n");
2799 error = bus_dmamap_create(rxr->ptag,
2800 BUS_DMA_NOWAIT, &rxbuf->pmap);
2802 device_printf(dev, "Unable to create RX pkt map\n");
2810 /* Frees all, but can handle partial completion */
2811 ixv_free_receive_structures(adapter);
2816 ixv_free_receive_ring(struct rx_ring *rxr)
2818 struct adapter *adapter;
2819 struct ixv_rx_buf *rxbuf;
2822 adapter = rxr->adapter;
2823 for (i = 0; i < adapter->num_rx_desc; i++) {
2824 rxbuf = &rxr->rx_buffers[i];
2825 if (rxbuf->m_head != NULL) {
2826 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2827 BUS_DMASYNC_POSTREAD);
2828 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2829 rxbuf->m_head->m_flags |= M_PKTHDR;
2830 m_freem(rxbuf->m_head);
2832 if (rxbuf->m_pack != NULL) {
2833 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2834 BUS_DMASYNC_POSTREAD);
2835 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2836 rxbuf->m_pack->m_flags |= M_PKTHDR;
2837 m_freem(rxbuf->m_pack);
2839 rxbuf->m_head = NULL;
2840 rxbuf->m_pack = NULL;
2845 /*********************************************************************
2847 * Initialize a receive ring and its buffers.
2849 **********************************************************************/
2851 ixv_setup_receive_ring(struct rx_ring *rxr)
2853 struct adapter *adapter;
2856 struct ixv_rx_buf *rxbuf;
2857 bus_dma_segment_t pseg[1], hseg[1];
2858 struct lro_ctrl *lro = &rxr->lro;
2859 int rsize, nsegs, error = 0;
2861 adapter = rxr->adapter;
2865 /* Clear the ring contents */
2867 rsize = roundup2(adapter->num_rx_desc *
2868 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2869 bzero((void *)rxr->rx_base, rsize);
2871 /* Free current RX buffer structs and their mbufs */
2872 ixv_free_receive_ring(rxr);
2874 /* Configure header split? */
2875 if (ixv_header_split)
2876 rxr->hdr_split = TRUE;
2878 /* Now replenish the mbufs */
2879 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2880 struct mbuf *mh, *mp;
2882 rxbuf = &rxr->rx_buffers[j];
2884 ** Dont allocate mbufs if not
2885 ** doing header split, its wasteful
2887 if (rxr->hdr_split == FALSE)
2890 /* First the header */
2891 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2892 if (rxbuf->m_head == NULL) {
2896 m_adj(rxbuf->m_head, ETHER_ALIGN);
2898 mh->m_len = mh->m_pkthdr.len = MHLEN;
2899 mh->m_flags |= M_PKTHDR;
2900 /* Get the memory mapping */
2901 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2902 rxbuf->hmap, rxbuf->m_head, hseg,
2903 &nsegs, BUS_DMA_NOWAIT);
2904 if (error != 0) /* Nothing elegant to do here */
2906 bus_dmamap_sync(rxr->htag,
2907 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2908 /* Update descriptor */
2909 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2912 /* Now the payload cluster */
2913 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2914 M_PKTHDR, adapter->rx_mbuf_sz);
2915 if (rxbuf->m_pack == NULL) {
2920 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2921 /* Get the memory mapping */
2922 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2923 rxbuf->pmap, mp, pseg,
2924 &nsegs, BUS_DMA_NOWAIT);
2927 bus_dmamap_sync(rxr->ptag,
2928 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2929 /* Update descriptor */
2930 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2934 /* Setup our descriptor indices */
2935 rxr->next_to_check = 0;
2936 rxr->next_to_refresh = 0;
2937 rxr->lro_enabled = FALSE;
2938 rxr->rx_split_packets = 0;
2941 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2942 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2945 ** Now set up the LRO interface:
2947 if (ifp->if_capenable & IFCAP_LRO) {
2948 int err = tcp_lro_init(lro);
2950 device_printf(dev, "LRO Initialization failed!\n");
2953 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
2954 rxr->lro_enabled = TRUE;
2955 lro->ifp = adapter->ifp;
2962 ixv_free_receive_ring(rxr);
2967 /*********************************************************************
2969 * Initialize all receive rings.
2971 **********************************************************************/
2973 ixv_setup_receive_structures(struct adapter *adapter)
2975 struct rx_ring *rxr = adapter->rx_rings;
2978 for (j = 0; j < adapter->num_queues; j++, rxr++)
2979 if (ixv_setup_receive_ring(rxr))
2985 * Free RX buffers allocated so far, we will only handle
2986 * the rings that completed, the failing case will have
2987 * cleaned up for itself. 'j' failed, so its the terminus.
2989 for (int i = 0; i < j; ++i) {
2990 rxr = &adapter->rx_rings[i];
2991 ixv_free_receive_ring(rxr);
2997 /*********************************************************************
2999 * Setup receive registers and features.
3001 **********************************************************************/
3002 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3005 ixv_initialize_receive_units(struct adapter *adapter)
3007 struct rx_ring *rxr = adapter->rx_rings;
3008 struct ixgbe_hw *hw = &adapter->hw;
3009 struct ifnet *ifp = adapter->ifp;
3010 u32 bufsz, fctrl, rxcsum, hlreg;
3013 /* Enable broadcasts */
3014 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3015 fctrl |= IXGBE_FCTRL_BAM;
3016 fctrl |= IXGBE_FCTRL_DPF;
3017 fctrl |= IXGBE_FCTRL_PMCF;
3018 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3020 /* Set for Jumbo Frames? */
3021 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3022 if (ifp->if_mtu > ETHERMTU) {
3023 hlreg |= IXGBE_HLREG0_JUMBOEN;
3024 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3026 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3027 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3029 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3031 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3032 u64 rdba = rxr->rxdma.dma_paddr;
3035 /* Do the queue enabling first */
3036 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3037 rxdctl |= IXGBE_RXDCTL_ENABLE;
3038 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3039 for (int k = 0; k < 10; k++) {
3040 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3041 IXGBE_RXDCTL_ENABLE)
3048 /* Setup the Base and Length of the Rx Descriptor Ring */
3049 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3050 (rdba & 0x00000000ffffffffULL));
3051 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3053 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3054 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3056 /* Set up the SRRCTL register */
3057 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3058 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3059 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3061 if (rxr->hdr_split) {
3062 /* Use a standard mbuf for the header */
3063 reg |= ((IXV_RX_HDR <<
3064 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3065 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3066 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3068 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3069 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3071 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3072 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3073 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3074 adapter->num_rx_desc - 1);
3077 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3079 if (ifp->if_capenable & IFCAP_RXCSUM)
3080 rxcsum |= IXGBE_RXCSUM_PCSD;
3082 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3083 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3085 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3090 /*********************************************************************
3092 * Free all receive rings.
3094 **********************************************************************/
3096 ixv_free_receive_structures(struct adapter *adapter)
3098 struct rx_ring *rxr = adapter->rx_rings;
3100 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3101 struct lro_ctrl *lro = &rxr->lro;
3102 ixv_free_receive_buffers(rxr);
3103 /* Free LRO memory */
3105 /* Free the ring memory as well */
3106 ixv_dma_free(adapter, &rxr->rxdma);
3109 free(adapter->rx_rings, M_DEVBUF);
3113 /*********************************************************************
3115 * Free receive ring data structures
3117 **********************************************************************/
3119 ixv_free_receive_buffers(struct rx_ring *rxr)
3121 struct adapter *adapter = rxr->adapter;
3122 struct ixv_rx_buf *rxbuf;
3124 INIT_DEBUGOUT("free_receive_structures: begin");
3126 /* Cleanup any existing buffers */
3127 if (rxr->rx_buffers != NULL) {
3128 for (int i = 0; i < adapter->num_rx_desc; i++) {
3129 rxbuf = &rxr->rx_buffers[i];
3130 if (rxbuf->m_head != NULL) {
3131 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3132 BUS_DMASYNC_POSTREAD);
3133 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3134 rxbuf->m_head->m_flags |= M_PKTHDR;
3135 m_freem(rxbuf->m_head);
3137 if (rxbuf->m_pack != NULL) {
3138 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3139 BUS_DMASYNC_POSTREAD);
3140 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3141 rxbuf->m_pack->m_flags |= M_PKTHDR;
3142 m_freem(rxbuf->m_pack);
3144 rxbuf->m_head = NULL;
3145 rxbuf->m_pack = NULL;
3146 if (rxbuf->hmap != NULL) {
3147 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3150 if (rxbuf->pmap != NULL) {
3151 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3155 if (rxr->rx_buffers != NULL) {
3156 free(rxr->rx_buffers, M_DEVBUF);
3157 rxr->rx_buffers = NULL;
3161 if (rxr->htag != NULL) {
3162 bus_dma_tag_destroy(rxr->htag);
3165 if (rxr->ptag != NULL) {
3166 bus_dma_tag_destroy(rxr->ptag);
3173 static __inline void
3174 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3178 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3179 * should be computed by hardware. Also it should not have VLAN tag in
3182 if (rxr->lro_enabled &&
3183 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3184 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3185 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3186 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3187 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3188 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3190 * Send to the stack if:
3191 ** - LRO not enabled, or
3192 ** - no LRO resources, or
3193 ** - lro enqueue fails
3195 if (rxr->lro.lro_cnt != 0)
3196 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3199 (*ifp->if_input)(ifp, m);
3202 static __inline void
3203 ixv_rx_discard(struct rx_ring *rxr, int i)
3205 struct adapter *adapter = rxr->adapter;
3206 struct ixv_rx_buf *rbuf;
3207 struct mbuf *mh, *mp;
3209 rbuf = &rxr->rx_buffers[i];
3210 if (rbuf->fmp != NULL) /* Partial chain ? */
3216 /* Reuse loaded DMA map and just update mbuf chain */
3218 mh->m_flags |= M_PKTHDR;
3221 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
3222 mp->m_data = mp->m_ext.ext_buf;
3228 /*********************************************************************
3230 * This routine executes in interrupt context. It replenishes
3231 * the mbufs in the descriptor and sends data which has been
3232 * dma'ed into host memory to upper layer.
3234 * We loop at most count times if count is > 0, or until done if
3237 * Return TRUE for more work, FALSE for all clean.
3238 *********************************************************************/
3240 ixv_rxeof(struct ix_queue *que, int count)
3242 struct adapter *adapter = que->adapter;
3243 struct rx_ring *rxr = que->rxr;
3244 struct ifnet *ifp = adapter->ifp;
3245 struct lro_ctrl *lro = &rxr->lro;
3246 struct lro_entry *queued;
3247 int i, nextp, processed = 0;
3249 union ixgbe_adv_rx_desc *cur;
3250 struct ixv_rx_buf *rbuf, *nbuf;
3254 for (i = rxr->next_to_check; count != 0;) {
3255 struct mbuf *sendmp, *mh, *mp;
3257 u16 hlen, plen, hdr, vtag;
3260 /* Sync the ring. */
3261 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3262 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3264 cur = &rxr->rx_base[i];
3265 staterr = le32toh(cur->wb.upper.status_error);
3267 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3269 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3276 cur->wb.upper.status_error = 0;
3277 rbuf = &rxr->rx_buffers[i];
3281 plen = le16toh(cur->wb.upper.length);
3282 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3283 IXGBE_RXDADV_PKTTYPE_MASK;
3284 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3285 vtag = le16toh(cur->wb.upper.vlan);
3286 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3288 /* Make sure all parts of a bad packet are discarded */
3289 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3292 rxr->rx_discarded++;
3294 rxr->discard = TRUE;
3296 rxr->discard = FALSE;
3297 ixv_rx_discard(rxr, i);
3303 if (nextp == adapter->num_rx_desc)
3305 nbuf = &rxr->rx_buffers[nextp];
3309 ** The header mbuf is ONLY used when header
3310 ** split is enabled, otherwise we get normal
3311 ** behavior, ie, both header and payload
3312 ** are DMA'd into the payload buffer.
3314 ** Rather than using the fmp/lmp global pointers
3315 ** we now keep the head of a packet chain in the
3316 ** buffer struct and pass this along from one
3317 ** descriptor to the next, until we get EOP.
3319 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3320 /* This must be an initial descriptor */
3321 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3322 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3323 if (hlen > IXV_RX_HDR)
3326 mh->m_flags |= M_PKTHDR;
3328 mh->m_pkthdr.len = mh->m_len;
3329 /* Null buf pointer so it is refreshed */
3330 rbuf->m_head = NULL;
3332 ** Check the payload length, this
3333 ** could be zero if its a small
3339 mp->m_flags &= ~M_PKTHDR;
3341 mh->m_pkthdr.len += mp->m_len;
3342 /* Null buf pointer so it is refreshed */
3343 rbuf->m_pack = NULL;
3344 rxr->rx_split_packets++;
3347 ** Now create the forward
3348 ** chain so when complete
3352 /* stash the chain head */
3354 /* Make forward chain */
3356 mp->m_next = nbuf->m_pack;
3358 mh->m_next = nbuf->m_pack;
3360 /* Singlet, prepare to send */
3362 if (staterr & IXGBE_RXD_STAT_VP) {
3363 sendmp->m_pkthdr.ether_vtag = vtag;
3364 sendmp->m_flags |= M_VLANTAG;
3369 ** Either no header split, or a
3370 ** secondary piece of a fragmented
3375 ** See if there is a stored head
3376 ** that determines what we are
3379 rbuf->m_pack = rbuf->fmp = NULL;
3381 if (sendmp != NULL) /* secondary frag */
3382 sendmp->m_pkthdr.len += mp->m_len;
3384 /* first desc of a non-ps chain */
3386 sendmp->m_flags |= M_PKTHDR;
3387 sendmp->m_pkthdr.len = mp->m_len;
3388 if (staterr & IXGBE_RXD_STAT_VP) {
3389 sendmp->m_pkthdr.ether_vtag = vtag;
3390 sendmp->m_flags |= M_VLANTAG;
3393 /* Pass the head pointer on */
3397 mp->m_next = nbuf->m_pack;
3401 /* Sending this frame? */
3403 sendmp->m_pkthdr.rcvif = ifp;
3406 /* capture data for AIM */
3407 rxr->bytes += sendmp->m_pkthdr.len;
3408 rxr->rx_bytes += sendmp->m_pkthdr.len;
3409 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3410 ixv_rx_checksum(staterr, sendmp, ptype);
3411 #if __FreeBSD_version >= 800000
3412 sendmp->m_pkthdr.flowid = que->msix;
3413 sendmp->m_flags |= M_FLOWID;
3417 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3418 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3420 /* Advance our pointers to the next descriptor. */
3421 if (++i == adapter->num_rx_desc)
3424 /* Now send to the stack or do LRO */
3426 ixv_rx_input(rxr, ifp, sendmp, ptype);
3428 /* Every 8 descriptors we go to refresh mbufs */
3429 if (processed == 8) {
3430 ixv_refresh_mbufs(rxr, i);
3435 /* Refresh any remaining buf structs */
3436 if (processed != 0) {
3437 ixv_refresh_mbufs(rxr, i);
3441 rxr->next_to_check = i;
3444 * Flush any outstanding LRO work
3446 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3447 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3448 tcp_lro_flush(lro, queued);
3454 ** We still have cleaning to do?
3455 ** Schedule another interrupt if so.
3457 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3458 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3466 /*********************************************************************
3468 * Verify that the hardware indicated that the checksum is valid.
3469 * Inform the stack about the status of checksum so that stack
3470 * doesn't spend time verifying the checksum.
3472 *********************************************************************/
3474 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3476 u16 status = (u16) staterr;
3477 u8 errors = (u8) (staterr >> 24);
3480 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3481 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3484 if (status & IXGBE_RXD_STAT_IPCS) {
3485 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3486 /* IP Checksum Good */
3487 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3488 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3491 mp->m_pkthdr.csum_flags = 0;
3493 if (status & IXGBE_RXD_STAT_L4CS) {
3494 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3495 #if __FreeBSD_version >= 800000
3497 type = CSUM_SCTP_VALID;
3499 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3500 mp->m_pkthdr.csum_flags |= type;
3502 mp->m_pkthdr.csum_data = htons(0xffff);
3509 ixv_setup_vlan_support(struct adapter *adapter)
3511 struct ixgbe_hw *hw = &adapter->hw;
3512 u32 ctrl, vid, vfta, retry;
3516 ** We get here thru init_locked, meaning
3517 ** a soft reset, this has already cleared
3518 ** the VFTA and other state, so if there
3519 ** have been no vlan's registered do nothing.
3521 if (adapter->num_vlans == 0)
3524 /* Enable the queues */
3525 for (int i = 0; i < adapter->num_queues; i++) {
3526 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3527 ctrl |= IXGBE_RXDCTL_VME;
3528 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3532 ** A soft reset zero's out the VFTA, so
3533 ** we need to repopulate it now.
3535 for (int i = 0; i < VFTA_SIZE; i++) {
3536 if (ixv_shadow_vfta[i] == 0)
3538 vfta = ixv_shadow_vfta[i];
3540 ** Reconstruct the vlan id's
3541 ** based on the bits set in each
3542 ** of the array ints.
3544 for ( int j = 0; j < 32; j++) {
3546 if ((vfta & (1 << j)) == 0)
3549 /* Call the shared code mailbox routine */
3550 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3559 ** This routine is run via an vlan config EVENT,
3560 ** it enables us to use the HW Filter table since
3561 ** we can get the vlan id. This just creates the
3562 ** entry in the soft version of the VFTA, init will
3563 ** repopulate the real table.
3566 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3568 struct adapter *adapter = ifp->if_softc;
3571 if (ifp->if_softc != arg) /* Not our event */
3574 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3577 index = (vtag >> 5) & 0x7F;
3579 ixv_shadow_vfta[index] |= (1 << bit);
3580 ++adapter->num_vlans;
3581 /* Re-init to load the changes */
3586 ** This routine is run via an vlan
3587 ** unconfig EVENT, remove our entry
3588 ** in the soft vfta.
3591 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3593 struct adapter *adapter = ifp->if_softc;
3596 if (ifp->if_softc != arg)
3599 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3602 index = (vtag >> 5) & 0x7F;
3604 ixv_shadow_vfta[index] &= ~(1 << bit);
3605 --adapter->num_vlans;
3606 /* Re-init to load the changes */
3611 ixv_enable_intr(struct adapter *adapter)
3613 struct ixgbe_hw *hw = &adapter->hw;
3614 struct ix_queue *que = adapter->queues;
3615 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3618 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3620 mask = IXGBE_EIMS_ENABLE_MASK;
3621 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3622 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3624 for (int i = 0; i < adapter->num_queues; i++, que++)
3625 ixv_enable_queue(adapter, que->msix);
3627 IXGBE_WRITE_FLUSH(hw);
3633 ixv_disable_intr(struct adapter *adapter)
3635 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3636 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3637 IXGBE_WRITE_FLUSH(&adapter->hw);
3642 ** Setup the correct IVAR register for a particular MSIX interrupt
3643 ** - entry is the register array entry
3644 ** - vector is the MSIX vector for this queue
3645 ** - type is RX/TX/MISC
3648 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3650 struct ixgbe_hw *hw = &adapter->hw;
3653 vector |= IXGBE_IVAR_ALLOC_VAL;
3655 if (type == -1) { /* MISC IVAR */
3656 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3659 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3660 } else { /* RX/TX IVARS */
3661 index = (16 * (entry & 1)) + (8 * type);
3662 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3663 ivar &= ~(0xFF << index);
3664 ivar |= (vector << index);
3665 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3670 ixv_configure_ivars(struct adapter *adapter)
3672 struct ix_queue *que = adapter->queues;
3674 for (int i = 0; i < adapter->num_queues; i++, que++) {
3675 /* First the RX queue entry */
3676 ixv_set_ivar(adapter, i, que->msix, 0);
3677 /* ... and the TX */
3678 ixv_set_ivar(adapter, i, que->msix, 1);
3679 /* Set an initial value in EITR */
3680 IXGBE_WRITE_REG(&adapter->hw,
3681 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3684 /* For the Link interrupt */
3685 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3690 ** Tasklet handler for MSIX MBX interrupts
3691 ** - do outside interrupt since it might sleep
3694 ixv_handle_mbx(void *context, int pending)
3696 struct adapter *adapter = context;
3698 ixgbe_check_link(&adapter->hw,
3699 &adapter->link_speed, &adapter->link_up, 0);
3700 ixv_update_link_status(adapter);
3704 ** The VF stats registers never have a truely virgin
3705 ** starting point, so this routine tries to make an
3706 ** artificial one, marking ground zero on attach as
3710 ixv_save_stats(struct adapter *adapter)
3712 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3713 adapter->stats.saved_reset_vfgprc +=
3714 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3715 adapter->stats.saved_reset_vfgptc +=
3716 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3717 adapter->stats.saved_reset_vfgorc +=
3718 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3719 adapter->stats.saved_reset_vfgotc +=
3720 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3721 adapter->stats.saved_reset_vfmprc +=
3722 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3727 ixv_init_stats(struct adapter *adapter)
3729 struct ixgbe_hw *hw = &adapter->hw;
3731 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3732 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3733 adapter->stats.last_vfgorc |=
3734 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3736 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3737 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3738 adapter->stats.last_vfgotc |=
3739 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3741 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3743 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3744 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3745 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3746 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3747 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3750 #define UPDATE_STAT_32(reg, last, count) \
3752 u32 current = IXGBE_READ_REG(hw, reg); \
3753 if (current < last) \
3754 count += 0x100000000LL; \
3756 count &= 0xFFFFFFFF00000000LL; \
3760 #define UPDATE_STAT_36(lsb, msb, last, count) \
3762 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3763 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3764 u64 current = ((cur_msb << 32) | cur_lsb); \
3765 if (current < last) \
3766 count += 0x1000000000LL; \
3768 count &= 0xFFFFFFF000000000LL; \
3773 ** ixv_update_stats - Update the board statistics counters.
3776 ixv_update_stats(struct adapter *adapter)
3778 struct ixgbe_hw *hw = &adapter->hw;
3780 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3781 adapter->stats.vfgprc);
3782 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3783 adapter->stats.vfgptc);
3784 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3785 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3786 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3787 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3788 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3789 adapter->stats.vfmprc);
3792 /**********************************************************************
3794 * This routine is called only when ixgbe_display_debug_stats is enabled.
3795 * This routine provides a way to take a look at important statistics
3796 * maintained by the driver and hardware.
3798 **********************************************************************/
3800 ixv_print_hw_stats(struct adapter * adapter)
3802 device_t dev = adapter->dev;
3804 device_printf(dev,"Std Mbuf Failed = %lu\n",
3805 adapter->mbuf_defrag_failed);
3806 device_printf(dev,"Driver dropped packets = %lu\n",
3807 adapter->dropped_pkts);
3808 device_printf(dev, "watchdog timeouts = %ld\n",
3809 adapter->watchdog_events);
3811 device_printf(dev,"Good Packets Rcvd = %llu\n",
3812 (long long)adapter->stats.vfgprc);
3813 device_printf(dev,"Good Packets Xmtd = %llu\n",
3814 (long long)adapter->stats.vfgptc);
3815 device_printf(dev,"TSO Transmissions = %lu\n",
3820 /**********************************************************************
3822 * This routine is called only when em_display_debug_stats is enabled.
3823 * This routine provides a way to take a look at important statistics
3824 * maintained by the driver and hardware.
3826 **********************************************************************/
3828 ixv_print_debug_info(struct adapter *adapter)
3830 device_t dev = adapter->dev;
3831 struct ixgbe_hw *hw = &adapter->hw;
3832 struct ix_queue *que = adapter->queues;
3833 struct rx_ring *rxr;
3834 struct tx_ring *txr;
3835 struct lro_ctrl *lro;
3837 device_printf(dev,"Error Byte Count = %u \n",
3838 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3840 for (int i = 0; i < adapter->num_queues; i++, que++) {
3844 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3845 que->msix, (long)que->irqs);
3846 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3847 rxr->me, (long long)rxr->rx_packets);
3848 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3849 rxr->me, (long long)rxr->rx_split_packets);
3850 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3851 rxr->me, (long)rxr->rx_bytes);
3852 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3853 rxr->me, lro->lro_queued);
3854 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3855 rxr->me, lro->lro_flushed);
3856 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3857 txr->me, (long)txr->total_packets);
3858 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3859 txr->me, (long)txr->no_desc_avail);
3862 device_printf(dev,"MBX IRQ Handled: %lu\n",
3863 (long)adapter->mbx_irq);
3868 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3872 struct adapter *adapter;
3875 error = sysctl_handle_int(oidp, &result, 0, req);
3877 if (error || !req->newptr)
3881 adapter = (struct adapter *) arg1;
3882 ixv_print_hw_stats(adapter);
3888 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3891 struct adapter *adapter;
3894 error = sysctl_handle_int(oidp, &result, 0, req);
3896 if (error || !req->newptr)
3900 adapter = (struct adapter *) arg1;
3901 ixv_print_debug_info(adapter);
3907 ** Set flow control using sysctl:
3908 ** Flow control values:
3915 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3918 struct adapter *adapter;
3920 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3925 adapter = (struct adapter *) arg1;
3926 switch (ixv_flow_control) {
3927 case ixgbe_fc_rx_pause:
3928 case ixgbe_fc_tx_pause:
3930 adapter->hw.fc.requested_mode = ixv_flow_control;
3934 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3937 ixgbe_fc_enable(&adapter->hw, 0);
3942 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
3943 const char *description, int *limit, int value)
3946 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3947 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3948 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);