1 /******************************************************************************
3 Copyright (c) 2001-2013, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
39 /*********************************************************************
41 *********************************************************************/
42 char ixv_driver_version[] = "1.1.4";
44 /*********************************************************************
47 * Used by probe to select devices to load on
48 * Last field stores an index into ixv_strings
49 * Last entry must be all 0s
51 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
52 *********************************************************************/
54 static ixv_vendor_info_t ixv_vendor_info_array[] =
56 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
57 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
58 /* required last entry */
62 /*********************************************************************
63 * Table of branding strings
64 *********************************************************************/
66 static char *ixv_strings[] = {
67 "Intel(R) PRO/10GbE Virtual Function Network Driver"
70 /*********************************************************************
72 *********************************************************************/
73 static int ixv_probe(device_t);
74 static int ixv_attach(device_t);
75 static int ixv_detach(device_t);
76 static int ixv_shutdown(device_t);
77 #if __FreeBSD_version < 800000
78 static void ixv_start(struct ifnet *);
79 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
81 static int ixv_mq_start(struct ifnet *, struct mbuf *);
82 static int ixv_mq_start_locked(struct ifnet *,
83 struct tx_ring *, struct mbuf *);
84 static void ixv_qflush(struct ifnet *);
86 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
87 static void ixv_init(void *);
88 static void ixv_init_locked(struct adapter *);
89 static void ixv_stop(void *);
90 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
91 static int ixv_media_change(struct ifnet *);
92 static void ixv_identify_hardware(struct adapter *);
93 static int ixv_allocate_pci_resources(struct adapter *);
94 static int ixv_allocate_msix(struct adapter *);
95 static int ixv_allocate_queues(struct adapter *);
96 static int ixv_setup_msix(struct adapter *);
97 static void ixv_free_pci_resources(struct adapter *);
98 static void ixv_local_timer(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
102 static int ixv_allocate_transmit_buffers(struct tx_ring *);
103 static int ixv_setup_transmit_structures(struct adapter *);
104 static void ixv_setup_transmit_ring(struct tx_ring *);
105 static void ixv_initialize_transmit_units(struct adapter *);
106 static void ixv_free_transmit_structures(struct adapter *);
107 static void ixv_free_transmit_buffers(struct tx_ring *);
109 static int ixv_allocate_receive_buffers(struct rx_ring *);
110 static int ixv_setup_receive_structures(struct adapter *);
111 static int ixv_setup_receive_ring(struct rx_ring *);
112 static void ixv_initialize_receive_units(struct adapter *);
113 static void ixv_free_receive_structures(struct adapter *);
114 static void ixv_free_receive_buffers(struct rx_ring *);
116 static void ixv_enable_intr(struct adapter *);
117 static void ixv_disable_intr(struct adapter *);
118 static bool ixv_txeof(struct tx_ring *);
119 static bool ixv_rxeof(struct ix_queue *, int);
120 static void ixv_rx_checksum(u32, struct mbuf *, u32);
121 static void ixv_set_multi(struct adapter *);
122 static void ixv_update_link_status(struct adapter *);
123 static void ixv_refresh_mbufs(struct rx_ring *, int);
124 static int ixv_xmit(struct tx_ring *, struct mbuf **);
125 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
126 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
127 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
128 static int ixv_dma_malloc(struct adapter *, bus_size_t,
129 struct ixv_dma_alloc *, int);
130 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
131 static void ixv_add_rx_process_limit(struct adapter *, const char *,
132 const char *, int *, int);
133 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
134 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
135 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
136 static void ixv_configure_ivars(struct adapter *);
137 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
139 static void ixv_setup_vlan_support(struct adapter *);
140 static void ixv_register_vlan(void *, struct ifnet *, u16);
141 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
143 static void ixv_save_stats(struct adapter *);
144 static void ixv_init_stats(struct adapter *);
145 static void ixv_update_stats(struct adapter *);
147 static __inline void ixv_rx_discard(struct rx_ring *, int);
148 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
151 /* The MSI/X Interrupt handlers */
152 static void ixv_msix_que(void *);
153 static void ixv_msix_mbx(void *);
155 /* Deferred interrupt tasklets */
156 static void ixv_handle_que(void *, int);
157 static void ixv_handle_mbx(void *, int);
159 /*********************************************************************
160 * FreeBSD Device Interface Entry Points
161 *********************************************************************/
163 static device_method_t ixv_methods[] = {
164 /* Device interface */
165 DEVMETHOD(device_probe, ixv_probe),
166 DEVMETHOD(device_attach, ixv_attach),
167 DEVMETHOD(device_detach, ixv_detach),
168 DEVMETHOD(device_shutdown, ixv_shutdown),
172 static driver_t ixv_driver = {
173 "ix", ixv_methods, sizeof(struct adapter),
176 extern devclass_t ixgbe_devclass;
177 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
178 MODULE_DEPEND(ixv, pci, 1, 1, 1);
179 MODULE_DEPEND(ixv, ether, 1, 1, 1);
182 ** TUNEABLE PARAMETERS:
186 ** AIM: Adaptive Interrupt Moderation
187 ** which means that the interrupt rate
188 ** is varied over time based on the
189 ** traffic for that interrupt vector
191 static int ixv_enable_aim = FALSE;
192 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194 /* How many packets rxeof tries to clean at a time */
195 static int ixv_rx_process_limit = 128;
196 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
198 /* Flow control setting, default to full */
199 static int ixv_flow_control = ixgbe_fc_full;
200 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
203 * Header split: this causes the hardware to DMA
204 * the header into a seperate mbuf from the payload,
205 * it can be a performance win in some workloads, but
206 * in others it actually hurts, its off by default.
208 static int ixv_header_split = FALSE;
209 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
212 ** Number of TX descriptors per ring,
213 ** setting higher than RX as this seems
214 ** the better performing choice.
216 static int ixv_txd = DEFAULT_TXD;
217 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
219 /* Number of RX descriptors per ring */
220 static int ixv_rxd = DEFAULT_RXD;
221 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
224 ** Shadow VFTA table, this is needed because
225 ** the real filter table gets cleared during
226 ** a soft reset and we need to repopulate it.
228 static u32 ixv_shadow_vfta[VFTA_SIZE];
230 /*********************************************************************
231 * Device identification routine
233 * ixv_probe determines if the driver should be loaded on
234 * adapter based on PCI vendor/device id of the adapter.
236 * return BUS_PROBE_DEFAULT on success, positive on failure
237 *********************************************************************/
240 ixv_probe(device_t dev)
242 ixv_vendor_info_t *ent;
244 u16 pci_vendor_id = 0;
245 u16 pci_device_id = 0;
246 u16 pci_subvendor_id = 0;
247 u16 pci_subdevice_id = 0;
248 char adapter_name[256];
251 pci_vendor_id = pci_get_vendor(dev);
252 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
255 pci_device_id = pci_get_device(dev);
256 pci_subvendor_id = pci_get_subvendor(dev);
257 pci_subdevice_id = pci_get_subdevice(dev);
259 ent = ixv_vendor_info_array;
260 while (ent->vendor_id != 0) {
261 if ((pci_vendor_id == ent->vendor_id) &&
262 (pci_device_id == ent->device_id) &&
264 ((pci_subvendor_id == ent->subvendor_id) ||
265 (ent->subvendor_id == 0)) &&
267 ((pci_subdevice_id == ent->subdevice_id) ||
268 (ent->subdevice_id == 0))) {
269 sprintf(adapter_name, "%s, Version - %s",
270 ixv_strings[ent->index],
272 device_set_desc_copy(dev, adapter_name);
273 return (BUS_PROBE_DEFAULT);
280 /*********************************************************************
281 * Device initialization routine
283 * The attach entry point is called when the driver is being loaded.
284 * This routine identifies the type of hardware, allocates all resources
285 * and initializes the hardware.
287 * return 0 on success, positive on failure
288 *********************************************************************/
291 ixv_attach(device_t dev)
293 struct adapter *adapter;
297 INIT_DEBUGOUT("ixv_attach: begin");
299 /* Allocate, clear, and link in our adapter structure */
300 adapter = device_get_softc(dev);
301 adapter->dev = adapter->osdep.dev = dev;
305 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
308 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
309 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
310 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
311 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
313 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
314 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
315 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
316 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
318 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
319 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
320 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
321 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
323 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
324 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
325 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
326 &ixv_enable_aim, 1, "Interrupt Moderation");
328 /* Set up the timer callout */
329 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
331 /* Determine hardware revision */
332 ixv_identify_hardware(adapter);
334 /* Do base PCI setup - map BAR0 */
335 if (ixv_allocate_pci_resources(adapter)) {
336 device_printf(dev, "Allocation of PCI resources failed\n");
341 /* Do descriptor calc and sanity checks */
342 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
343 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
344 device_printf(dev, "TXD config issue, using default!\n");
345 adapter->num_tx_desc = DEFAULT_TXD;
347 adapter->num_tx_desc = ixv_txd;
349 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
351 device_printf(dev, "RXD config issue, using default!\n");
352 adapter->num_rx_desc = DEFAULT_RXD;
354 adapter->num_rx_desc = ixv_rxd;
356 /* Allocate our TX/RX Queues */
357 if (ixv_allocate_queues(adapter)) {
363 ** Initialize the shared code: its
364 ** at this point the mac type is set.
366 error = ixgbe_init_shared_code(hw);
368 device_printf(dev,"Shared Code Initialization Failure\n");
373 /* Setup the mailbox */
374 ixgbe_init_mbx_params_vf(hw);
378 /* Get Hardware Flow Control setting */
379 hw->fc.requested_mode = ixgbe_fc_full;
380 hw->fc.pause_time = IXV_FC_PAUSE;
381 hw->fc.low_water[0] = IXV_FC_LO;
382 hw->fc.high_water[0] = IXV_FC_HI;
383 hw->fc.send_xon = TRUE;
385 error = ixgbe_init_hw(hw);
387 device_printf(dev,"Hardware Initialization Failure\n");
392 error = ixv_allocate_msix(adapter);
396 /* Setup OS specific network interface */
397 ixv_setup_interface(dev, adapter);
399 /* Sysctl for limiting the amount of work done in the taskqueue */
400 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
401 "max number of rx packets to process", &adapter->rx_process_limit,
402 ixv_rx_process_limit);
404 /* Do the stats setup */
405 ixv_save_stats(adapter);
406 ixv_init_stats(adapter);
408 /* Register for VLAN events */
409 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
410 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
411 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
412 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
414 INIT_DEBUGOUT("ixv_attach: end");
418 ixv_free_transmit_structures(adapter);
419 ixv_free_receive_structures(adapter);
421 ixv_free_pci_resources(adapter);
426 /*********************************************************************
427 * Device removal routine
429 * The detach entry point is called when the driver is being removed.
430 * This routine stops the adapter and deallocates all the resources
431 * that were allocated for driver operation.
433 * return 0 on success, positive on failure
434 *********************************************************************/
437 ixv_detach(device_t dev)
439 struct adapter *adapter = device_get_softc(dev);
440 struct ix_queue *que = adapter->queues;
442 INIT_DEBUGOUT("ixv_detach: begin");
444 /* Make sure VLANS are not using driver */
445 if (adapter->ifp->if_vlantrunk != NULL) {
446 device_printf(dev,"Vlan in use, detach first\n");
450 IXV_CORE_LOCK(adapter);
452 IXV_CORE_UNLOCK(adapter);
454 for (int i = 0; i < adapter->num_queues; i++, que++) {
456 taskqueue_drain(que->tq, &que->que_task);
457 taskqueue_free(que->tq);
461 /* Drain the Link queue */
463 taskqueue_drain(adapter->tq, &adapter->mbx_task);
464 taskqueue_free(adapter->tq);
467 /* Unregister VLAN events */
468 if (adapter->vlan_attach != NULL)
469 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
470 if (adapter->vlan_detach != NULL)
471 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
473 ether_ifdetach(adapter->ifp);
474 callout_drain(&adapter->timer);
475 ixv_free_pci_resources(adapter);
476 bus_generic_detach(dev);
477 if_free(adapter->ifp);
479 ixv_free_transmit_structures(adapter);
480 ixv_free_receive_structures(adapter);
482 IXV_CORE_LOCK_DESTROY(adapter);
486 /*********************************************************************
488 * Shutdown entry point
490 **********************************************************************/
492 ixv_shutdown(device_t dev)
494 struct adapter *adapter = device_get_softc(dev);
495 IXV_CORE_LOCK(adapter);
497 IXV_CORE_UNLOCK(adapter);
501 #if __FreeBSD_version < 800000
502 /*********************************************************************
503 * Transmit entry point
505 * ixv_start is called by the stack to initiate a transmit.
506 * The driver will remain in this routine as long as there are
507 * packets to transmit and transmit resources are available.
508 * In case resources are not available stack is notified and
509 * the packet is requeued.
510 **********************************************************************/
512 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
515 struct adapter *adapter = txr->adapter;
517 IXV_TX_LOCK_ASSERT(txr);
519 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
522 if (!adapter->link_active)
525 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
527 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
531 if (ixv_xmit(txr, &m_head)) {
534 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
535 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
538 /* Send a copy of the frame to the BPF listener */
539 ETHER_BPF_MTAP(ifp, m_head);
541 /* Set watchdog on */
542 txr->watchdog_check = TRUE;
543 txr->watchdog_time = ticks;
550 * Legacy TX start - called by the stack, this
551 * always uses the first tx ring, and should
552 * not be used with multiqueue tx enabled.
555 ixv_start(struct ifnet *ifp)
557 struct adapter *adapter = ifp->if_softc;
558 struct tx_ring *txr = adapter->tx_rings;
560 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
562 ixv_start_locked(txr, ifp);
571 ** Multiqueue Transmit driver
575 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
577 struct adapter *adapter = ifp->if_softc;
578 struct ix_queue *que;
582 /* Which queue to use */
583 if ((m->m_flags & M_FLOWID) != 0)
584 i = m->m_pkthdr.flowid % adapter->num_queues;
586 txr = &adapter->tx_rings[i];
587 que = &adapter->queues[i];
589 if (IXV_TX_TRYLOCK(txr)) {
590 err = ixv_mq_start_locked(ifp, txr, m);
593 err = drbr_enqueue(ifp, txr->br, m);
594 taskqueue_enqueue(que->tq, &que->que_task);
601 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
603 struct adapter *adapter = txr->adapter;
605 int enqueued, err = 0;
607 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
608 IFF_DRV_RUNNING || adapter->link_active == 0) {
610 err = drbr_enqueue(ifp, txr->br, m);
614 /* Do a clean if descriptors are low */
615 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
620 err = drbr_enqueue(ifp, txr->br, m);
625 /* Process the queue */
626 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
627 if ((err = ixv_xmit(txr, &next)) != 0) {
629 drbr_advance(ifp, txr->br);
631 drbr_putback(ifp, txr->br, next);
635 drbr_advance(ifp, txr->br);
637 ifp->if_obytes += next->m_pkthdr.len;
638 if (next->m_flags & M_MCAST)
640 /* Send a copy of the frame to the BPF listener */
641 ETHER_BPF_MTAP(ifp, next);
642 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
644 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
645 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
651 /* Set watchdog on */
652 txr->watchdog_check = TRUE;
653 txr->watchdog_time = ticks;
660 ** Flush all ring buffers
663 ixv_qflush(struct ifnet *ifp)
665 struct adapter *adapter = ifp->if_softc;
666 struct tx_ring *txr = adapter->tx_rings;
669 for (int i = 0; i < adapter->num_queues; i++, txr++) {
671 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
680 /*********************************************************************
683 * ixv_ioctl is called when the user wants to configure the
686 * return 0 on success, positive on failure
687 **********************************************************************/
690 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
692 struct adapter *adapter = ifp->if_softc;
693 struct ifreq *ifr = (struct ifreq *) data;
694 #if defined(INET) || defined(INET6)
695 struct ifaddr *ifa = (struct ifaddr *) data;
696 bool avoid_reset = FALSE;
704 if (ifa->ifa_addr->sa_family == AF_INET)
708 if (ifa->ifa_addr->sa_family == AF_INET6)
711 #if defined(INET) || defined(INET6)
713 ** Calling init results in link renegotiation,
714 ** so we avoid doing it when possible.
717 ifp->if_flags |= IFF_UP;
718 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
720 if (!(ifp->if_flags & IFF_NOARP))
721 arp_ifinit(ifp, ifa);
723 error = ether_ioctl(ifp, command, data);
727 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
728 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
731 IXV_CORE_LOCK(adapter);
732 ifp->if_mtu = ifr->ifr_mtu;
733 adapter->max_frame_size =
734 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
735 ixv_init_locked(adapter);
736 IXV_CORE_UNLOCK(adapter);
740 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
741 IXV_CORE_LOCK(adapter);
742 if (ifp->if_flags & IFF_UP) {
743 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
744 ixv_init_locked(adapter);
746 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
748 adapter->if_flags = ifp->if_flags;
749 IXV_CORE_UNLOCK(adapter);
753 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
754 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
755 IXV_CORE_LOCK(adapter);
756 ixv_disable_intr(adapter);
757 ixv_set_multi(adapter);
758 ixv_enable_intr(adapter);
759 IXV_CORE_UNLOCK(adapter);
764 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
765 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
769 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
770 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
771 if (mask & IFCAP_HWCSUM)
772 ifp->if_capenable ^= IFCAP_HWCSUM;
773 if (mask & IFCAP_TSO4)
774 ifp->if_capenable ^= IFCAP_TSO4;
775 if (mask & IFCAP_LRO)
776 ifp->if_capenable ^= IFCAP_LRO;
777 if (mask & IFCAP_VLAN_HWTAGGING)
778 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
779 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
780 IXV_CORE_LOCK(adapter);
781 ixv_init_locked(adapter);
782 IXV_CORE_UNLOCK(adapter);
784 VLAN_CAPABILITIES(ifp);
789 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
790 error = ether_ioctl(ifp, command, data);
797 /*********************************************************************
800 * This routine is used in two ways. It is used by the stack as
801 * init entry point in network interface structure. It is also used
802 * by the driver as a hw/sw initialization routine to get to a
805 * return 0 on success, positive on failure
806 **********************************************************************/
807 #define IXGBE_MHADD_MFS_SHIFT 16
810 ixv_init_locked(struct adapter *adapter)
812 struct ifnet *ifp = adapter->ifp;
813 device_t dev = adapter->dev;
814 struct ixgbe_hw *hw = &adapter->hw;
817 INIT_DEBUGOUT("ixv_init: begin");
818 mtx_assert(&adapter->core_mtx, MA_OWNED);
819 hw->adapter_stopped = FALSE;
820 ixgbe_stop_adapter(hw);
821 callout_stop(&adapter->timer);
823 /* reprogram the RAR[0] in case user changed it. */
824 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
826 /* Get the latest mac address, User can use a LAA */
827 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
828 IXGBE_ETH_LENGTH_OF_ADDRESS);
829 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
830 hw->addr_ctrl.rar_used_count = 1;
832 /* Prepare transmit descriptors and buffers */
833 if (ixv_setup_transmit_structures(adapter)) {
834 device_printf(dev,"Could not setup transmit structures\n");
840 ixv_initialize_transmit_units(adapter);
842 /* Setup Multicast table */
843 ixv_set_multi(adapter);
846 ** Determine the correct mbuf pool
847 ** for doing jumbo/headersplit
849 if (ifp->if_mtu > ETHERMTU)
850 adapter->rx_mbuf_sz = MJUMPAGESIZE;
852 adapter->rx_mbuf_sz = MCLBYTES;
854 /* Prepare receive descriptors and buffers */
855 if (ixv_setup_receive_structures(adapter)) {
856 device_printf(dev,"Could not setup receive structures\n");
861 /* Configure RX settings */
862 ixv_initialize_receive_units(adapter);
864 /* Enable Enhanced MSIX mode */
865 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
866 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
867 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
868 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
870 /* Set the various hardware offload abilities */
871 ifp->if_hwassist = 0;
872 if (ifp->if_capenable & IFCAP_TSO4)
873 ifp->if_hwassist |= CSUM_TSO;
874 if (ifp->if_capenable & IFCAP_TXCSUM) {
875 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
876 #if __FreeBSD_version >= 800000
877 ifp->if_hwassist |= CSUM_SCTP;
882 if (ifp->if_mtu > ETHERMTU) {
883 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
884 mhadd &= ~IXGBE_MHADD_MFS_MASK;
885 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
886 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
889 /* Set up VLAN offload and filter */
890 ixv_setup_vlan_support(adapter);
892 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
894 /* Set up MSI/X routing */
895 ixv_configure_ivars(adapter);
897 /* Set up auto-mask */
898 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
900 /* Set moderation on the Link interrupt */
901 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
904 ixv_init_stats(adapter);
906 /* Config/Enable Link */
907 ixv_config_link(adapter);
909 /* And now turn on interrupts */
910 ixv_enable_intr(adapter);
912 /* Now inform the stack we're ready */
913 ifp->if_drv_flags |= IFF_DRV_RUNNING;
914 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
922 struct adapter *adapter = arg;
924 IXV_CORE_LOCK(adapter);
925 ixv_init_locked(adapter);
926 IXV_CORE_UNLOCK(adapter);
933 ** MSIX Interrupt Handlers and Tasklets
938 ixv_enable_queue(struct adapter *adapter, u32 vector)
940 struct ixgbe_hw *hw = &adapter->hw;
941 u32 queue = 1 << vector;
944 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
945 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
949 ixv_disable_queue(struct adapter *adapter, u32 vector)
951 struct ixgbe_hw *hw = &adapter->hw;
952 u64 queue = (u64)(1 << vector);
955 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
956 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
960 ixv_rearm_queues(struct adapter *adapter, u64 queues)
962 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
963 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
968 ixv_handle_que(void *context, int pending)
970 struct ix_queue *que = context;
971 struct adapter *adapter = que->adapter;
972 struct tx_ring *txr = que->txr;
973 struct ifnet *ifp = adapter->ifp;
976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977 more = ixv_rxeof(que, adapter->rx_process_limit);
980 #if __FreeBSD_version >= 800000
981 if (!drbr_empty(ifp, txr->br))
982 ixv_mq_start_locked(ifp, txr, NULL);
984 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
985 ixv_start_locked(txr, ifp);
989 taskqueue_enqueue(que->tq, &que->que_task);
994 /* Reenable this interrupt */
995 ixv_enable_queue(adapter, que->msix);
999 /*********************************************************************
1001 * MSI Queue Interrupt Service routine
1003 **********************************************************************/
1005 ixv_msix_que(void *arg)
1007 struct ix_queue *que = arg;
1008 struct adapter *adapter = que->adapter;
1009 struct tx_ring *txr = que->txr;
1010 struct rx_ring *rxr = que->rxr;
1011 bool more_tx, more_rx;
1014 ixv_disable_queue(adapter, que->msix);
1017 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1020 more_tx = ixv_txeof(txr);
1022 ** Make certain that if the stack
1023 ** has anything queued the task gets
1024 ** scheduled to handle it.
1026 #if __FreeBSD_version < 800000
1027 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1029 if (!drbr_empty(adapter->ifp, txr->br))
1034 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1038 if (ixv_enable_aim == FALSE)
1041 ** Do Adaptive Interrupt Moderation:
1042 ** - Write out last calculated setting
1043 ** - Calculate based on average size over
1044 ** the last interval.
1046 if (que->eitr_setting)
1047 IXGBE_WRITE_REG(&adapter->hw,
1048 IXGBE_VTEITR(que->msix),
1051 que->eitr_setting = 0;
1053 /* Idle, do nothing */
1054 if ((txr->bytes == 0) && (rxr->bytes == 0))
1057 if ((txr->bytes) && (txr->packets))
1058 newitr = txr->bytes/txr->packets;
1059 if ((rxr->bytes) && (rxr->packets))
1060 newitr = max(newitr,
1061 (rxr->bytes / rxr->packets));
1062 newitr += 24; /* account for hardware frame, crc */
1064 /* set an upper boundary */
1065 newitr = min(newitr, 3000);
1067 /* Be nice to the mid range */
1068 if ((newitr > 300) && (newitr < 1200))
1069 newitr = (newitr / 3);
1071 newitr = (newitr / 2);
1073 newitr |= newitr << 16;
1075 /* save for next interrupt */
1076 que->eitr_setting = newitr;
1085 if (more_tx || more_rx)
1086 taskqueue_enqueue(que->tq, &que->que_task);
1087 else /* Reenable this interrupt */
1088 ixv_enable_queue(adapter, que->msix);
1093 ixv_msix_mbx(void *arg)
1095 struct adapter *adapter = arg;
1096 struct ixgbe_hw *hw = &adapter->hw;
1101 /* First get the cause */
1102 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1103 /* Clear interrupt with write */
1104 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1106 /* Link status change */
1107 if (reg & IXGBE_EICR_LSC)
1108 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1110 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1114 /*********************************************************************
1116 * Media Ioctl callback
1118 * This routine is called whenever the user queries the status of
1119 * the interface using ifconfig.
1121 **********************************************************************/
1123 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1125 struct adapter *adapter = ifp->if_softc;
1127 INIT_DEBUGOUT("ixv_media_status: begin");
1128 IXV_CORE_LOCK(adapter);
1129 ixv_update_link_status(adapter);
1131 ifmr->ifm_status = IFM_AVALID;
1132 ifmr->ifm_active = IFM_ETHER;
1134 if (!adapter->link_active) {
1135 IXV_CORE_UNLOCK(adapter);
1139 ifmr->ifm_status |= IFM_ACTIVE;
1141 switch (adapter->link_speed) {
1142 case IXGBE_LINK_SPEED_1GB_FULL:
1143 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1145 case IXGBE_LINK_SPEED_10GB_FULL:
1146 ifmr->ifm_active |= IFM_FDX;
1150 IXV_CORE_UNLOCK(adapter);
1155 /*********************************************************************
1157 * Media Ioctl callback
1159 * This routine is called when the user changes speed/duplex using
1160 * media/mediopt option with ifconfig.
1162 **********************************************************************/
1164 ixv_media_change(struct ifnet * ifp)
1166 struct adapter *adapter = ifp->if_softc;
1167 struct ifmedia *ifm = &adapter->media;
1169 INIT_DEBUGOUT("ixv_media_change: begin");
1171 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1174 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1178 device_printf(adapter->dev, "Only auto media type\n");
1185 /*********************************************************************
1187 * This routine maps the mbufs to tx descriptors, allowing the
1188 * TX engine to transmit the packets.
1189 * - return 0 on success, positive on failure
1191 **********************************************************************/
1194 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1196 struct adapter *adapter = txr->adapter;
1197 u32 olinfo_status = 0, cmd_type_len;
1199 int i, j, error, nsegs;
1200 int first, last = 0;
1201 struct mbuf *m_head;
1202 bus_dma_segment_t segs[32];
1204 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1205 union ixgbe_adv_tx_desc *txd = NULL;
1209 /* Basic descriptor defines */
1210 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1211 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1213 if (m_head->m_flags & M_VLANTAG)
1214 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1217 * Important to capture the first descriptor
1218 * used because it will contain the index of
1219 * the one we tell the hardware to report back
1221 first = txr->next_avail_desc;
1222 txbuf = &txr->tx_buffers[first];
1223 txbuf_mapped = txbuf;
1227 * Map the packet for DMA.
1229 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1230 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1232 if (error == EFBIG) {
1235 m = m_defrag(*m_headp, M_NOWAIT);
1237 adapter->mbuf_defrag_failed++;
1245 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1246 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1248 if (error == ENOMEM) {
1249 adapter->no_tx_dma_setup++;
1251 } else if (error != 0) {
1252 adapter->no_tx_dma_setup++;
1257 } else if (error == ENOMEM) {
1258 adapter->no_tx_dma_setup++;
1260 } else if (error != 0) {
1261 adapter->no_tx_dma_setup++;
1267 /* Make certain there are enough descriptors */
1268 if (nsegs > txr->tx_avail - 2) {
1269 txr->no_desc_avail++;
1276 ** Set up the appropriate offload context
1277 ** this becomes the first descriptor of
1280 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1281 if (ixv_tso_setup(txr, m_head, &paylen)) {
1282 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1283 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1284 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1285 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1289 } else if (ixv_tx_ctx_setup(txr, m_head))
1290 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1292 /* Record payload length */
1294 olinfo_status |= m_head->m_pkthdr.len <<
1295 IXGBE_ADVTXD_PAYLEN_SHIFT;
1297 i = txr->next_avail_desc;
1298 for (j = 0; j < nsegs; j++) {
1302 txbuf = &txr->tx_buffers[i];
1303 txd = &txr->tx_base[i];
1304 seglen = segs[j].ds_len;
1305 segaddr = htole64(segs[j].ds_addr);
1307 txd->read.buffer_addr = segaddr;
1308 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1309 cmd_type_len |seglen);
1310 txd->read.olinfo_status = htole32(olinfo_status);
1311 last = i; /* descriptor that will get completion IRQ */
1313 if (++i == adapter->num_tx_desc)
1316 txbuf->m_head = NULL;
1317 txbuf->eop_index = -1;
1320 txd->read.cmd_type_len |=
1321 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1322 txr->tx_avail -= nsegs;
1323 txr->next_avail_desc = i;
1325 txbuf->m_head = m_head;
1326 txr->tx_buffers[first].map = txbuf->map;
1328 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1330 /* Set the index of the descriptor that will be marked done */
1331 txbuf = &txr->tx_buffers[first];
1332 txbuf->eop_index = last;
1334 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1335 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1337 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1338 * hardware that this frame is available to transmit.
1340 ++txr->total_packets;
1341 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1346 bus_dmamap_unload(txr->txtag, txbuf->map);
1352 /*********************************************************************
1355 * This routine is called whenever multicast address list is updated.
1357 **********************************************************************/
1358 #define IXGBE_RAR_ENTRIES 16
1361 ixv_set_multi(struct adapter *adapter)
1363 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1365 struct ifmultiaddr *ifma;
1367 struct ifnet *ifp = adapter->ifp;
1369 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1371 #if __FreeBSD_version < 800000
1374 if_maddr_rlock(ifp);
1376 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1377 if (ifma->ifma_addr->sa_family != AF_LINK)
1379 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1380 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1381 IXGBE_ETH_LENGTH_OF_ADDRESS);
1384 #if __FreeBSD_version < 800000
1385 IF_ADDR_UNLOCK(ifp);
1387 if_maddr_runlock(ifp);
1392 ixgbe_update_mc_addr_list(&adapter->hw,
1393 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1399 * This is an iterator function now needed by the multicast
1400 * shared code. It simply feeds the shared code routine the
1401 * addresses in the array of ixv_set_multi() one by one.
1404 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1406 u8 *addr = *update_ptr;
1410 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1411 *update_ptr = newptr;
1415 /*********************************************************************
1418 * This routine checks for link status,updates statistics,
1419 * and runs the watchdog check.
1421 **********************************************************************/
1424 ixv_local_timer(void *arg)
1426 struct adapter *adapter = arg;
1427 device_t dev = adapter->dev;
1428 struct tx_ring *txr = adapter->tx_rings;
1431 mtx_assert(&adapter->core_mtx, MA_OWNED);
1433 ixv_update_link_status(adapter);
1436 ixv_update_stats(adapter);
1439 * If the interface has been paused
1440 * then don't do the watchdog check
1442 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1445 ** Check for time since any descriptor was cleaned
1447 for (i = 0; i < adapter->num_queues; i++, txr++) {
1449 if (txr->watchdog_check == FALSE) {
1453 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1458 ixv_rearm_queues(adapter, adapter->que_mask);
1459 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1463 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1464 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1465 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1466 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1467 device_printf(dev,"TX(%d) desc avail = %d,"
1468 "Next TX to Clean = %d\n",
1469 txr->me, txr->tx_avail, txr->next_to_clean);
1470 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1471 adapter->watchdog_events++;
1473 ixv_init_locked(adapter);
1477 ** Note: this routine updates the OS on the link state
1478 ** the real check of the hardware only happens with
1479 ** a link interrupt.
1482 ixv_update_link_status(struct adapter *adapter)
1484 struct ifnet *ifp = adapter->ifp;
1485 struct tx_ring *txr = adapter->tx_rings;
1486 device_t dev = adapter->dev;
1489 if (adapter->link_up){
1490 if (adapter->link_active == FALSE) {
1492 device_printf(dev,"Link is up %d Gbps %s \n",
1493 ((adapter->link_speed == 128)? 10:1),
1495 adapter->link_active = TRUE;
1496 if_link_state_change(ifp, LINK_STATE_UP);
1498 } else { /* Link down */
1499 if (adapter->link_active == TRUE) {
1501 device_printf(dev,"Link is Down\n");
1502 if_link_state_change(ifp, LINK_STATE_DOWN);
1503 adapter->link_active = FALSE;
1504 for (int i = 0; i < adapter->num_queues;
1506 txr->watchdog_check = FALSE;
1514 /*********************************************************************
1516 * This routine disables all traffic on the adapter by issuing a
1517 * global reset on the MAC and deallocates TX/RX buffers.
1519 **********************************************************************/
1525 struct adapter *adapter = arg;
1526 struct ixgbe_hw *hw = &adapter->hw;
1529 mtx_assert(&adapter->core_mtx, MA_OWNED);
1531 INIT_DEBUGOUT("ixv_stop: begin\n");
1532 ixv_disable_intr(adapter);
1534 /* Tell the stack that the interface is no longer active */
1535 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1538 adapter->hw.adapter_stopped = FALSE;
1539 ixgbe_stop_adapter(hw);
1540 callout_stop(&adapter->timer);
1542 /* reprogram the RAR[0] in case user changed it. */
1543 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1549 /*********************************************************************
1551 * Determine hardware revision.
1553 **********************************************************************/
1555 ixv_identify_hardware(struct adapter *adapter)
1557 device_t dev = adapter->dev;
1561 ** Make sure BUSMASTER is set, on a VM under
1562 ** KVM it may not be and will break things.
1564 pci_enable_busmaster(dev);
1565 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1567 /* Save off the information about this board */
1568 adapter->hw.vendor_id = pci_get_vendor(dev);
1569 adapter->hw.device_id = pci_get_device(dev);
1570 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1571 adapter->hw.subsystem_vendor_id =
1572 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1573 adapter->hw.subsystem_device_id =
1574 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1579 /*********************************************************************
1581 * Setup MSIX Interrupt resources and handlers
1583 **********************************************************************/
1585 ixv_allocate_msix(struct adapter *adapter)
1587 device_t dev = adapter->dev;
1588 struct ix_queue *que = adapter->queues;
1589 int error, rid, vector = 0;
1591 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1593 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1594 RF_SHAREABLE | RF_ACTIVE);
1595 if (que->res == NULL) {
1596 device_printf(dev,"Unable to allocate"
1597 " bus resource: que interrupt [%d]\n", vector);
1600 /* Set the handler function */
1601 error = bus_setup_intr(dev, que->res,
1602 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1603 ixv_msix_que, que, &que->tag);
1606 device_printf(dev, "Failed to register QUE handler");
1609 #if __FreeBSD_version >= 800504
1610 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1613 adapter->que_mask |= (u64)(1 << que->msix);
1615 ** Bind the msix vector, and thus the
1616 ** ring to the corresponding cpu.
1618 if (adapter->num_queues > 1)
1619 bus_bind_intr(dev, que->res, i);
1621 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1622 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1623 taskqueue_thread_enqueue, &que->tq);
1624 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1625 device_get_nameunit(adapter->dev));
1630 adapter->res = bus_alloc_resource_any(dev,
1631 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1632 if (!adapter->res) {
1633 device_printf(dev,"Unable to allocate"
1634 " bus resource: MBX interrupt [%d]\n", rid);
1637 /* Set the mbx handler function */
1638 error = bus_setup_intr(dev, adapter->res,
1639 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1640 ixv_msix_mbx, adapter, &adapter->tag);
1642 adapter->res = NULL;
1643 device_printf(dev, "Failed to register LINK handler");
1646 #if __FreeBSD_version >= 800504
1647 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1649 adapter->mbxvec = vector;
1650 /* Tasklets for Mailbox */
1651 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1652 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1653 taskqueue_thread_enqueue, &adapter->tq);
1654 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1655 device_get_nameunit(adapter->dev));
1657 ** Due to a broken design QEMU will fail to properly
1658 ** enable the guest for MSIX unless the vectors in
1659 ** the table are all set up, so we must rewrite the
1660 ** ENABLE in the MSIX control register again at this
1661 ** point to cause it to successfully initialize us.
1663 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1665 pci_find_cap(dev, PCIY_MSIX, &rid);
1666 rid += PCIR_MSIX_CTRL;
1667 msix_ctrl = pci_read_config(dev, rid, 2);
1668 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1669 pci_write_config(dev, rid, msix_ctrl, 2);
1676 * Setup MSIX resources, note that the VF
1677 * device MUST use MSIX, there is no fallback.
1680 ixv_setup_msix(struct adapter *adapter)
1682 device_t dev = adapter->dev;
1686 /* First try MSI/X */
1688 adapter->msix_mem = bus_alloc_resource_any(dev,
1689 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1690 if (adapter->msix_mem == NULL) {
1691 device_printf(adapter->dev,
1692 "Unable to map MSIX table \n");
1697 ** Want two vectors: one for a queue,
1698 ** plus an additional for mailbox.
1701 if (pci_alloc_msix(dev, &want) == 0) {
1702 device_printf(adapter->dev,
1703 "Using MSIX interrupts with %d vectors\n", want);
1707 if (adapter->msix_mem != NULL) {
1708 bus_release_resource(dev, SYS_RES_MEMORY,
1709 rid, adapter->msix_mem);
1710 adapter->msix_mem = NULL;
1712 device_printf(adapter->dev,"MSIX config error\n");
1718 ixv_allocate_pci_resources(struct adapter *adapter)
1721 device_t dev = adapter->dev;
1724 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1727 if (!(adapter->pci_mem)) {
1728 device_printf(dev,"Unable to allocate bus resource: memory\n");
1732 adapter->osdep.mem_bus_space_tag =
1733 rman_get_bustag(adapter->pci_mem);
1734 adapter->osdep.mem_bus_space_handle =
1735 rman_get_bushandle(adapter->pci_mem);
1736 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1738 adapter->num_queues = 1;
1739 adapter->hw.back = &adapter->osdep;
1742 ** Now setup MSI/X, should
1743 ** return us the number of
1744 ** configured vectors.
1746 adapter->msix = ixv_setup_msix(adapter);
1747 if (adapter->msix == ENXIO)
1754 ixv_free_pci_resources(struct adapter * adapter)
1756 struct ix_queue *que = adapter->queues;
1757 device_t dev = adapter->dev;
1760 memrid = PCIR_BAR(MSIX_BAR);
1763 ** There is a slight possibility of a failure mode
1764 ** in attach that will result in entering this function
1765 ** before interrupt resources have been initialized, and
1766 ** in that case we do not want to execute the loops below
1767 ** We can detect this reliably by the state of the adapter
1770 if (adapter->res == NULL)
1774 ** Release all msix queue resources:
1776 for (int i = 0; i < adapter->num_queues; i++, que++) {
1777 rid = que->msix + 1;
1778 if (que->tag != NULL) {
1779 bus_teardown_intr(dev, que->res, que->tag);
1782 if (que->res != NULL)
1783 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1787 /* Clean the Legacy or Link interrupt last */
1788 if (adapter->mbxvec) /* we are doing MSIX */
1789 rid = adapter->mbxvec + 1;
1791 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1793 if (adapter->tag != NULL) {
1794 bus_teardown_intr(dev, adapter->res, adapter->tag);
1795 adapter->tag = NULL;
1797 if (adapter->res != NULL)
1798 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1802 pci_release_msi(dev);
1804 if (adapter->msix_mem != NULL)
1805 bus_release_resource(dev, SYS_RES_MEMORY,
1806 memrid, adapter->msix_mem);
1808 if (adapter->pci_mem != NULL)
1809 bus_release_resource(dev, SYS_RES_MEMORY,
1810 PCIR_BAR(0), adapter->pci_mem);
1815 /*********************************************************************
1817 * Setup networking device structure and register an interface.
1819 **********************************************************************/
1821 ixv_setup_interface(device_t dev, struct adapter *adapter)
1825 INIT_DEBUGOUT("ixv_setup_interface: begin");
1827 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1829 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1830 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1831 ifp->if_baudrate = 1000000000;
1832 ifp->if_init = ixv_init;
1833 ifp->if_softc = adapter;
1834 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1835 ifp->if_ioctl = ixv_ioctl;
1836 #if __FreeBSD_version >= 800000
1837 ifp->if_transmit = ixv_mq_start;
1838 ifp->if_qflush = ixv_qflush;
1840 ifp->if_start = ixv_start;
1842 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1844 ether_ifattach(ifp, adapter->hw.mac.addr);
1846 adapter->max_frame_size =
1847 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1850 * Tell the upper layer(s) we support long frames.
1852 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1854 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1855 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1856 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1859 ifp->if_capenable = ifp->if_capabilities;
1861 /* Don't enable LRO by default */
1862 ifp->if_capabilities |= IFCAP_LRO;
1865 * Specify the media types supported by this adapter and register
1866 * callbacks to update media and link information
1868 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1870 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1871 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1872 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1878 ixv_config_link(struct adapter *adapter)
1880 struct ixgbe_hw *hw = &adapter->hw;
1881 u32 autoneg, err = 0;
1883 if (hw->mac.ops.check_link)
1884 err = hw->mac.ops.check_link(hw, &autoneg,
1885 &adapter->link_up, FALSE);
1889 if (hw->mac.ops.setup_link)
1890 err = hw->mac.ops.setup_link(hw,
1891 autoneg, adapter->link_up);
1896 /********************************************************************
1897 * Manage DMA'able memory.
1898 *******************************************************************/
1900 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1904 *(bus_addr_t *) arg = segs->ds_addr;
1909 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1910 struct ixv_dma_alloc *dma, int mapflags)
1912 device_t dev = adapter->dev;
1915 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1916 DBA_ALIGN, 0, /* alignment, bounds */
1917 BUS_SPACE_MAXADDR, /* lowaddr */
1918 BUS_SPACE_MAXADDR, /* highaddr */
1919 NULL, NULL, /* filter, filterarg */
1922 size, /* maxsegsize */
1923 BUS_DMA_ALLOCNOW, /* flags */
1924 NULL, /* lockfunc */
1925 NULL, /* lockfuncarg */
1928 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1932 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1933 BUS_DMA_NOWAIT, &dma->dma_map);
1935 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1939 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1943 mapflags | BUS_DMA_NOWAIT);
1945 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1949 dma->dma_size = size;
1952 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1954 bus_dma_tag_destroy(dma->dma_tag);
1956 dma->dma_map = NULL;
1957 dma->dma_tag = NULL;
1962 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1964 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1965 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1966 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1967 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1968 bus_dma_tag_destroy(dma->dma_tag);
1972 /*********************************************************************
1974 * Allocate memory for the transmit and receive rings, and then
1975 * the descriptors associated with each, called only once at attach.
1977 **********************************************************************/
1979 ixv_allocate_queues(struct adapter *adapter)
1981 device_t dev = adapter->dev;
1982 struct ix_queue *que;
1983 struct tx_ring *txr;
1984 struct rx_ring *rxr;
1985 int rsize, tsize, error = 0;
1986 int txconf = 0, rxconf = 0;
1988 /* First allocate the top level queue structs */
1989 if (!(adapter->queues =
1990 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1991 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1992 device_printf(dev, "Unable to allocate queue memory\n");
1997 /* First allocate the TX ring struct memory */
1998 if (!(adapter->tx_rings =
1999 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2000 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2001 device_printf(dev, "Unable to allocate TX ring memory\n");
2006 /* Next allocate the RX */
2007 if (!(adapter->rx_rings =
2008 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2009 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2010 device_printf(dev, "Unable to allocate RX ring memory\n");
2015 /* For the ring itself */
2016 tsize = roundup2(adapter->num_tx_desc *
2017 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2020 * Now set up the TX queues, txconf is needed to handle the
2021 * possibility that things fail midcourse and we need to
2022 * undo memory gracefully
2024 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2025 /* Set up some basics */
2026 txr = &adapter->tx_rings[i];
2027 txr->adapter = adapter;
2030 /* Initialize the TX side lock */
2031 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2032 device_get_nameunit(dev), txr->me);
2033 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2035 if (ixv_dma_malloc(adapter, tsize,
2036 &txr->txdma, BUS_DMA_NOWAIT)) {
2038 "Unable to allocate TX Descriptor memory\n");
2042 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2043 bzero((void *)txr->tx_base, tsize);
2045 /* Now allocate transmit buffers for the ring */
2046 if (ixv_allocate_transmit_buffers(txr)) {
2048 "Critical Failure setting up transmit buffers\n");
2052 #if __FreeBSD_version >= 800000
2053 /* Allocate a buf ring */
2054 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2055 M_WAITOK, &txr->tx_mtx);
2056 if (txr->br == NULL) {
2058 "Critical Failure setting up buf ring\n");
2066 * Next the RX queues...
2068 rsize = roundup2(adapter->num_rx_desc *
2069 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2070 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2071 rxr = &adapter->rx_rings[i];
2072 /* Set up some basics */
2073 rxr->adapter = adapter;
2076 /* Initialize the RX side lock */
2077 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2078 device_get_nameunit(dev), rxr->me);
2079 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2081 if (ixv_dma_malloc(adapter, rsize,
2082 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2084 "Unable to allocate RxDescriptor memory\n");
2088 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2089 bzero((void *)rxr->rx_base, rsize);
2091 /* Allocate receive buffers for the ring*/
2092 if (ixv_allocate_receive_buffers(rxr)) {
2094 "Critical Failure setting up receive buffers\n");
2101 ** Finally set up the queue holding structs
2103 for (int i = 0; i < adapter->num_queues; i++) {
2104 que = &adapter->queues[i];
2105 que->adapter = adapter;
2106 que->txr = &adapter->tx_rings[i];
2107 que->rxr = &adapter->rx_rings[i];
2113 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2114 ixv_dma_free(adapter, &rxr->rxdma);
2116 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2117 ixv_dma_free(adapter, &txr->txdma);
2118 free(adapter->rx_rings, M_DEVBUF);
2120 free(adapter->tx_rings, M_DEVBUF);
2122 free(adapter->queues, M_DEVBUF);
2128 /*********************************************************************
2130 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2131 * the information needed to transmit a packet on the wire. This is
2132 * called only once at attach, setup is done every reset.
2134 **********************************************************************/
2136 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2138 struct adapter *adapter = txr->adapter;
2139 device_t dev = adapter->dev;
2140 struct ixv_tx_buf *txbuf;
2144 * Setup DMA descriptor areas.
2146 if ((error = bus_dma_tag_create(
2147 bus_get_dma_tag(adapter->dev), /* parent */
2148 1, 0, /* alignment, bounds */
2149 BUS_SPACE_MAXADDR, /* lowaddr */
2150 BUS_SPACE_MAXADDR, /* highaddr */
2151 NULL, NULL, /* filter, filterarg */
2152 IXV_TSO_SIZE, /* maxsize */
2154 PAGE_SIZE, /* maxsegsize */
2156 NULL, /* lockfunc */
2157 NULL, /* lockfuncarg */
2159 device_printf(dev,"Unable to allocate TX DMA tag\n");
2163 if (!(txr->tx_buffers =
2164 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2165 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2166 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2171 /* Create the descriptor buffer dma maps */
2172 txbuf = txr->tx_buffers;
2173 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2174 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2176 device_printf(dev, "Unable to create TX DMA map\n");
2183 /* We free all, it handles case where we are in the middle */
2184 ixv_free_transmit_structures(adapter);
2188 /*********************************************************************
2190 * Initialize a transmit ring.
2192 **********************************************************************/
2194 ixv_setup_transmit_ring(struct tx_ring *txr)
2196 struct adapter *adapter = txr->adapter;
2197 struct ixv_tx_buf *txbuf;
2200 /* Clear the old ring contents */
2202 bzero((void *)txr->tx_base,
2203 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2205 txr->next_avail_desc = 0;
2206 txr->next_to_clean = 0;
2208 /* Free any existing tx buffers. */
2209 txbuf = txr->tx_buffers;
2210 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2211 if (txbuf->m_head != NULL) {
2212 bus_dmamap_sync(txr->txtag, txbuf->map,
2213 BUS_DMASYNC_POSTWRITE);
2214 bus_dmamap_unload(txr->txtag, txbuf->map);
2215 m_freem(txbuf->m_head);
2216 txbuf->m_head = NULL;
2218 /* Clear the EOP index */
2219 txbuf->eop_index = -1;
2222 /* Set number of descriptors available */
2223 txr->tx_avail = adapter->num_tx_desc;
2225 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2226 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2230 /*********************************************************************
2232 * Initialize all transmit rings.
2234 **********************************************************************/
2236 ixv_setup_transmit_structures(struct adapter *adapter)
2238 struct tx_ring *txr = adapter->tx_rings;
2240 for (int i = 0; i < adapter->num_queues; i++, txr++)
2241 ixv_setup_transmit_ring(txr);
2246 /*********************************************************************
2248 * Enable transmit unit.
2250 **********************************************************************/
2252 ixv_initialize_transmit_units(struct adapter *adapter)
2254 struct tx_ring *txr = adapter->tx_rings;
2255 struct ixgbe_hw *hw = &adapter->hw;
2258 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2259 u64 tdba = txr->txdma.dma_paddr;
2262 /* Set WTHRESH to 8, burst writeback */
2263 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2264 txdctl |= (8 << 16);
2265 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2267 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2268 txdctl |= IXGBE_TXDCTL_ENABLE;
2269 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2271 /* Set the HW Tx Head and Tail indices */
2272 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2273 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2275 /* Setup Transmit Descriptor Cmd Settings */
2276 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2277 txr->watchdog_check = FALSE;
2279 /* Set Ring parameters */
2280 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2281 (tdba & 0x00000000ffffffffULL));
2282 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2283 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2284 adapter->num_tx_desc *
2285 sizeof(struct ixgbe_legacy_tx_desc));
2286 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2287 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2288 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2295 /*********************************************************************
2297 * Free all transmit rings.
2299 **********************************************************************/
2301 ixv_free_transmit_structures(struct adapter *adapter)
2303 struct tx_ring *txr = adapter->tx_rings;
2305 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2307 ixv_free_transmit_buffers(txr);
2308 ixv_dma_free(adapter, &txr->txdma);
2310 IXV_TX_LOCK_DESTROY(txr);
2312 free(adapter->tx_rings, M_DEVBUF);
2315 /*********************************************************************
2317 * Free transmit ring related data structures.
2319 **********************************************************************/
2321 ixv_free_transmit_buffers(struct tx_ring *txr)
2323 struct adapter *adapter = txr->adapter;
2324 struct ixv_tx_buf *tx_buffer;
2327 INIT_DEBUGOUT("free_transmit_ring: begin");
2329 if (txr->tx_buffers == NULL)
2332 tx_buffer = txr->tx_buffers;
2333 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2334 if (tx_buffer->m_head != NULL) {
2335 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2336 BUS_DMASYNC_POSTWRITE);
2337 bus_dmamap_unload(txr->txtag,
2339 m_freem(tx_buffer->m_head);
2340 tx_buffer->m_head = NULL;
2341 if (tx_buffer->map != NULL) {
2342 bus_dmamap_destroy(txr->txtag,
2344 tx_buffer->map = NULL;
2346 } else if (tx_buffer->map != NULL) {
2347 bus_dmamap_unload(txr->txtag,
2349 bus_dmamap_destroy(txr->txtag,
2351 tx_buffer->map = NULL;
2354 #if __FreeBSD_version >= 800000
2355 if (txr->br != NULL)
2356 buf_ring_free(txr->br, M_DEVBUF);
2358 if (txr->tx_buffers != NULL) {
2359 free(txr->tx_buffers, M_DEVBUF);
2360 txr->tx_buffers = NULL;
2362 if (txr->txtag != NULL) {
2363 bus_dma_tag_destroy(txr->txtag);
2369 /*********************************************************************
2371 * Advanced Context Descriptor setup for VLAN or CSUM
2373 **********************************************************************/
2376 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2378 struct adapter *adapter = txr->adapter;
2379 struct ixgbe_adv_tx_context_desc *TXD;
2380 struct ixv_tx_buf *tx_buffer;
2381 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2382 struct ether_vlan_header *eh;
2384 struct ip6_hdr *ip6;
2385 int ehdrlen, ip_hlen = 0;
2388 bool offload = TRUE;
2389 int ctxd = txr->next_avail_desc;
2393 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2397 tx_buffer = &txr->tx_buffers[ctxd];
2398 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2401 ** In advanced descriptors the vlan tag must
2402 ** be placed into the descriptor itself.
2404 if (mp->m_flags & M_VLANTAG) {
2405 vtag = htole16(mp->m_pkthdr.ether_vtag);
2406 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2407 } else if (offload == FALSE)
2411 * Determine where frame payload starts.
2412 * Jump over vlan headers if already present,
2413 * helpful for QinQ too.
2415 eh = mtod(mp, struct ether_vlan_header *);
2416 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2417 etype = ntohs(eh->evl_proto);
2418 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2420 etype = ntohs(eh->evl_encap_proto);
2421 ehdrlen = ETHER_HDR_LEN;
2424 /* Set the ether header length */
2425 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2429 ip = (struct ip *)(mp->m_data + ehdrlen);
2430 ip_hlen = ip->ip_hl << 2;
2431 if (mp->m_len < ehdrlen + ip_hlen)
2434 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2436 case ETHERTYPE_IPV6:
2437 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2438 ip_hlen = sizeof(struct ip6_hdr);
2439 if (mp->m_len < ehdrlen + ip_hlen)
2441 ipproto = ip6->ip6_nxt;
2442 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2449 vlan_macip_lens |= ip_hlen;
2450 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2454 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2455 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2459 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2460 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2463 #if __FreeBSD_version >= 800000
2465 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2466 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2474 /* Now copy bits into descriptor */
2475 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2476 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2477 TXD->seqnum_seed = htole32(0);
2478 TXD->mss_l4len_idx = htole32(0);
2480 tx_buffer->m_head = NULL;
2481 tx_buffer->eop_index = -1;
2483 /* We've consumed the first desc, adjust counters */
2484 if (++ctxd == adapter->num_tx_desc)
2486 txr->next_avail_desc = ctxd;
2492 /**********************************************************************
2494 * Setup work for hardware segmentation offload (TSO) on
2495 * adapters using advanced tx descriptors
2497 **********************************************************************/
2499 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2501 struct adapter *adapter = txr->adapter;
2502 struct ixgbe_adv_tx_context_desc *TXD;
2503 struct ixv_tx_buf *tx_buffer;
2504 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2505 u32 mss_l4len_idx = 0;
2507 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2508 struct ether_vlan_header *eh;
2514 * Determine where frame payload starts.
2515 * Jump over vlan headers if already present
2517 eh = mtod(mp, struct ether_vlan_header *);
2518 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2519 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2521 ehdrlen = ETHER_HDR_LEN;
2523 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2524 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2527 ctxd = txr->next_avail_desc;
2528 tx_buffer = &txr->tx_buffers[ctxd];
2529 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2531 ip = (struct ip *)(mp->m_data + ehdrlen);
2532 if (ip->ip_p != IPPROTO_TCP)
2533 return FALSE; /* 0 */
2535 ip_hlen = ip->ip_hl << 2;
2536 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2537 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2538 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2539 tcp_hlen = th->th_off << 2;
2540 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2542 /* This is used in the transmit desc in encap */
2543 *paylen = mp->m_pkthdr.len - hdrlen;
2545 /* VLAN MACLEN IPLEN */
2546 if (mp->m_flags & M_VLANTAG) {
2547 vtag = htole16(mp->m_pkthdr.ether_vtag);
2548 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2551 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2552 vlan_macip_lens |= ip_hlen;
2553 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2555 /* ADV DTYPE TUCMD */
2556 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2557 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2558 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2559 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2563 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2564 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2565 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2567 TXD->seqnum_seed = htole32(0);
2568 tx_buffer->m_head = NULL;
2569 tx_buffer->eop_index = -1;
2571 if (++ctxd == adapter->num_tx_desc)
2575 txr->next_avail_desc = ctxd;
2580 /**********************************************************************
2582 * Examine each tx_buffer in the used queue. If the hardware is done
2583 * processing the packet then free associated resources. The
2584 * tx_buffer is put back on the free queue.
2586 **********************************************************************/
2588 ixv_txeof(struct tx_ring *txr)
2590 struct adapter *adapter = txr->adapter;
2591 struct ifnet *ifp = adapter->ifp;
2592 u32 first, last, done;
2593 struct ixv_tx_buf *tx_buffer;
2594 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2596 mtx_assert(&txr->tx_mtx, MA_OWNED);
2598 if (txr->tx_avail == adapter->num_tx_desc)
2601 first = txr->next_to_clean;
2602 tx_buffer = &txr->tx_buffers[first];
2603 /* For cleanup we just use legacy struct */
2604 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2605 last = tx_buffer->eop_index;
2608 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2611 ** Get the index of the first descriptor
2612 ** BEYOND the EOP and call that 'done'.
2613 ** I do this so the comparison in the
2614 ** inner while loop below can be simple
2616 if (++last == adapter->num_tx_desc) last = 0;
2619 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2620 BUS_DMASYNC_POSTREAD);
2622 ** Only the EOP descriptor of a packet now has the DD
2623 ** bit set, this is what we look for...
2625 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2626 /* We clean the range of the packet */
2627 while (first != done) {
2628 tx_desc->upper.data = 0;
2629 tx_desc->lower.data = 0;
2630 tx_desc->buffer_addr = 0;
2633 if (tx_buffer->m_head) {
2634 bus_dmamap_sync(txr->txtag,
2636 BUS_DMASYNC_POSTWRITE);
2637 bus_dmamap_unload(txr->txtag,
2639 m_freem(tx_buffer->m_head);
2640 tx_buffer->m_head = NULL;
2641 tx_buffer->map = NULL;
2643 tx_buffer->eop_index = -1;
2644 txr->watchdog_time = ticks;
2646 if (++first == adapter->num_tx_desc)
2649 tx_buffer = &txr->tx_buffers[first];
2651 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2654 /* See if there is more work now */
2655 last = tx_buffer->eop_index;
2658 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2659 /* Get next done point */
2660 if (++last == adapter->num_tx_desc) last = 0;
2665 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2666 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2668 txr->next_to_clean = first;
2671 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2672 * it is OK to send packets. If there are no pending descriptors,
2673 * clear the timeout. Otherwise, if some descriptors have been freed,
2674 * restart the timeout.
2676 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2677 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2678 if (txr->tx_avail == adapter->num_tx_desc) {
2679 txr->watchdog_check = FALSE;
2687 /*********************************************************************
2689 * Refresh mbuf buffers for RX descriptor rings
2690 * - now keeps its own state so discards due to resource
2691 * exhaustion are unnecessary, if an mbuf cannot be obtained
2692 * it just returns, keeping its placeholder, thus it can simply
2693 * be recalled to try again.
2695 **********************************************************************/
2697 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2699 struct adapter *adapter = rxr->adapter;
2700 bus_dma_segment_t hseg[1];
2701 bus_dma_segment_t pseg[1];
2702 struct ixv_rx_buf *rxbuf;
2703 struct mbuf *mh, *mp;
2704 int i, j, nsegs, error;
2705 bool refreshed = FALSE;
2707 i = j = rxr->next_to_refresh;
2708 /* Get the control variable, one beyond refresh point */
2709 if (++j == adapter->num_rx_desc)
2711 while (j != limit) {
2712 rxbuf = &rxr->rx_buffers[i];
2713 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2714 mh = m_gethdr(M_NOWAIT, MT_DATA);
2717 mh->m_pkthdr.len = mh->m_len = MHLEN;
2719 mh->m_flags |= M_PKTHDR;
2720 m_adj(mh, ETHER_ALIGN);
2721 /* Get the memory mapping */
2722 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2723 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2725 printf("GET BUF: dmamap load"
2726 " failure - %d\n", error);
2731 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2732 BUS_DMASYNC_PREREAD);
2733 rxr->rx_base[i].read.hdr_addr =
2734 htole64(hseg[0].ds_addr);
2737 if (rxbuf->m_pack == NULL) {
2738 mp = m_getjcl(M_NOWAIT, MT_DATA,
2739 M_PKTHDR, adapter->rx_mbuf_sz);
2745 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2746 /* Get the memory mapping */
2747 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2748 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2750 printf("GET BUF: dmamap load"
2751 " failure - %d\n", error);
2753 rxbuf->m_pack = NULL;
2757 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2758 BUS_DMASYNC_PREREAD);
2759 rxr->rx_base[i].read.pkt_addr =
2760 htole64(pseg[0].ds_addr);
2763 rxr->next_to_refresh = i = j;
2764 /* Calculate next index */
2765 if (++j == adapter->num_rx_desc)
2769 if (refreshed) /* update tail index */
2770 IXGBE_WRITE_REG(&adapter->hw,
2771 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2775 /*********************************************************************
2777 * Allocate memory for rx_buffer structures. Since we use one
2778 * rx_buffer per received packet, the maximum number of rx_buffer's
2779 * that we'll need is equal to the number of receive descriptors
2780 * that we've allocated.
2782 **********************************************************************/
2784 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2786 struct adapter *adapter = rxr->adapter;
2787 device_t dev = adapter->dev;
2788 struct ixv_rx_buf *rxbuf;
2789 int i, bsize, error;
2791 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2792 if (!(rxr->rx_buffers =
2793 (struct ixv_rx_buf *) malloc(bsize,
2794 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2795 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2800 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2801 1, 0, /* alignment, bounds */
2802 BUS_SPACE_MAXADDR, /* lowaddr */
2803 BUS_SPACE_MAXADDR, /* highaddr */
2804 NULL, NULL, /* filter, filterarg */
2805 MSIZE, /* maxsize */
2807 MSIZE, /* maxsegsize */
2809 NULL, /* lockfunc */
2810 NULL, /* lockfuncarg */
2812 device_printf(dev, "Unable to create RX DMA tag\n");
2816 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2817 1, 0, /* alignment, bounds */
2818 BUS_SPACE_MAXADDR, /* lowaddr */
2819 BUS_SPACE_MAXADDR, /* highaddr */
2820 NULL, NULL, /* filter, filterarg */
2821 MJUMPAGESIZE, /* maxsize */
2823 MJUMPAGESIZE, /* maxsegsize */
2825 NULL, /* lockfunc */
2826 NULL, /* lockfuncarg */
2828 device_printf(dev, "Unable to create RX DMA tag\n");
2832 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2833 rxbuf = &rxr->rx_buffers[i];
2834 error = bus_dmamap_create(rxr->htag,
2835 BUS_DMA_NOWAIT, &rxbuf->hmap);
2837 device_printf(dev, "Unable to create RX head map\n");
2840 error = bus_dmamap_create(rxr->ptag,
2841 BUS_DMA_NOWAIT, &rxbuf->pmap);
2843 device_printf(dev, "Unable to create RX pkt map\n");
2851 /* Frees all, but can handle partial completion */
2852 ixv_free_receive_structures(adapter);
2857 ixv_free_receive_ring(struct rx_ring *rxr)
2859 struct adapter *adapter;
2860 struct ixv_rx_buf *rxbuf;
2863 adapter = rxr->adapter;
2864 for (i = 0; i < adapter->num_rx_desc; i++) {
2865 rxbuf = &rxr->rx_buffers[i];
2866 if (rxbuf->m_head != NULL) {
2867 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2868 BUS_DMASYNC_POSTREAD);
2869 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2870 rxbuf->m_head->m_flags |= M_PKTHDR;
2871 m_freem(rxbuf->m_head);
2873 if (rxbuf->m_pack != NULL) {
2874 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2875 BUS_DMASYNC_POSTREAD);
2876 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2877 rxbuf->m_pack->m_flags |= M_PKTHDR;
2878 m_freem(rxbuf->m_pack);
2880 rxbuf->m_head = NULL;
2881 rxbuf->m_pack = NULL;
2886 /*********************************************************************
2888 * Initialize a receive ring and its buffers.
2890 **********************************************************************/
2892 ixv_setup_receive_ring(struct rx_ring *rxr)
2894 struct adapter *adapter;
2897 struct ixv_rx_buf *rxbuf;
2898 bus_dma_segment_t pseg[1], hseg[1];
2899 struct lro_ctrl *lro = &rxr->lro;
2900 int rsize, nsegs, error = 0;
2902 adapter = rxr->adapter;
2906 /* Clear the ring contents */
2908 rsize = roundup2(adapter->num_rx_desc *
2909 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2910 bzero((void *)rxr->rx_base, rsize);
2912 /* Free current RX buffer structs and their mbufs */
2913 ixv_free_receive_ring(rxr);
2915 /* Configure header split? */
2916 if (ixv_header_split)
2917 rxr->hdr_split = TRUE;
2919 /* Now replenish the mbufs */
2920 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2921 struct mbuf *mh, *mp;
2923 rxbuf = &rxr->rx_buffers[j];
2925 ** Dont allocate mbufs if not
2926 ** doing header split, its wasteful
2928 if (rxr->hdr_split == FALSE)
2931 /* First the header */
2932 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2933 if (rxbuf->m_head == NULL) {
2937 m_adj(rxbuf->m_head, ETHER_ALIGN);
2939 mh->m_len = mh->m_pkthdr.len = MHLEN;
2940 mh->m_flags |= M_PKTHDR;
2941 /* Get the memory mapping */
2942 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2943 rxbuf->hmap, rxbuf->m_head, hseg,
2944 &nsegs, BUS_DMA_NOWAIT);
2945 if (error != 0) /* Nothing elegant to do here */
2947 bus_dmamap_sync(rxr->htag,
2948 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2949 /* Update descriptor */
2950 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2953 /* Now the payload cluster */
2954 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2955 M_PKTHDR, adapter->rx_mbuf_sz);
2956 if (rxbuf->m_pack == NULL) {
2961 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2962 /* Get the memory mapping */
2963 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2964 rxbuf->pmap, mp, pseg,
2965 &nsegs, BUS_DMA_NOWAIT);
2968 bus_dmamap_sync(rxr->ptag,
2969 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2970 /* Update descriptor */
2971 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2975 /* Setup our descriptor indices */
2976 rxr->next_to_check = 0;
2977 rxr->next_to_refresh = 0;
2978 rxr->lro_enabled = FALSE;
2979 rxr->rx_split_packets = 0;
2981 rxr->discard = FALSE;
2983 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2984 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2987 ** Now set up the LRO interface:
2989 if (ifp->if_capenable & IFCAP_LRO) {
2990 int err = tcp_lro_init(lro);
2992 device_printf(dev, "LRO Initialization failed!\n");
2995 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
2996 rxr->lro_enabled = TRUE;
2997 lro->ifp = adapter->ifp;
3004 ixv_free_receive_ring(rxr);
3009 /*********************************************************************
3011 * Initialize all receive rings.
3013 **********************************************************************/
3015 ixv_setup_receive_structures(struct adapter *adapter)
3017 struct rx_ring *rxr = adapter->rx_rings;
3020 for (j = 0; j < adapter->num_queues; j++, rxr++)
3021 if (ixv_setup_receive_ring(rxr))
3027 * Free RX buffers allocated so far, we will only handle
3028 * the rings that completed, the failing case will have
3029 * cleaned up for itself. 'j' failed, so its the terminus.
3031 for (int i = 0; i < j; ++i) {
3032 rxr = &adapter->rx_rings[i];
3033 ixv_free_receive_ring(rxr);
3039 /*********************************************************************
3041 * Setup receive registers and features.
3043 **********************************************************************/
3044 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3047 ixv_initialize_receive_units(struct adapter *adapter)
3049 struct rx_ring *rxr = adapter->rx_rings;
3050 struct ixgbe_hw *hw = &adapter->hw;
3051 struct ifnet *ifp = adapter->ifp;
3052 u32 bufsz, fctrl, rxcsum, hlreg;
3055 /* Enable broadcasts */
3056 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3057 fctrl |= IXGBE_FCTRL_BAM;
3058 fctrl |= IXGBE_FCTRL_DPF;
3059 fctrl |= IXGBE_FCTRL_PMCF;
3060 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3062 /* Set for Jumbo Frames? */
3063 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3064 if (ifp->if_mtu > ETHERMTU) {
3065 hlreg |= IXGBE_HLREG0_JUMBOEN;
3066 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3068 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3069 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3071 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3073 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3074 u64 rdba = rxr->rxdma.dma_paddr;
3077 /* Do the queue enabling first */
3078 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3079 rxdctl |= IXGBE_RXDCTL_ENABLE;
3080 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3081 for (int k = 0; k < 10; k++) {
3082 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3083 IXGBE_RXDCTL_ENABLE)
3090 /* Setup the Base and Length of the Rx Descriptor Ring */
3091 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3092 (rdba & 0x00000000ffffffffULL));
3093 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3095 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3096 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3098 /* Set up the SRRCTL register */
3099 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3100 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3101 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3103 if (rxr->hdr_split) {
3104 /* Use a standard mbuf for the header */
3105 reg |= ((IXV_RX_HDR <<
3106 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3107 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3108 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3110 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3111 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3113 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3114 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3116 adapter->num_rx_desc - 1);
3119 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3121 if (ifp->if_capenable & IFCAP_RXCSUM)
3122 rxcsum |= IXGBE_RXCSUM_PCSD;
3124 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3125 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3127 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3132 /*********************************************************************
3134 * Free all receive rings.
3136 **********************************************************************/
3138 ixv_free_receive_structures(struct adapter *adapter)
3140 struct rx_ring *rxr = adapter->rx_rings;
3142 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3143 struct lro_ctrl *lro = &rxr->lro;
3144 ixv_free_receive_buffers(rxr);
3145 /* Free LRO memory */
3147 /* Free the ring memory as well */
3148 ixv_dma_free(adapter, &rxr->rxdma);
3151 free(adapter->rx_rings, M_DEVBUF);
3155 /*********************************************************************
3157 * Free receive ring data structures
3159 **********************************************************************/
3161 ixv_free_receive_buffers(struct rx_ring *rxr)
3163 struct adapter *adapter = rxr->adapter;
3164 struct ixv_rx_buf *rxbuf;
3166 INIT_DEBUGOUT("free_receive_structures: begin");
3168 /* Cleanup any existing buffers */
3169 if (rxr->rx_buffers != NULL) {
3170 for (int i = 0; i < adapter->num_rx_desc; i++) {
3171 rxbuf = &rxr->rx_buffers[i];
3172 if (rxbuf->m_head != NULL) {
3173 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3174 BUS_DMASYNC_POSTREAD);
3175 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3176 rxbuf->m_head->m_flags |= M_PKTHDR;
3177 m_freem(rxbuf->m_head);
3179 if (rxbuf->m_pack != NULL) {
3180 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3181 BUS_DMASYNC_POSTREAD);
3182 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3183 rxbuf->m_pack->m_flags |= M_PKTHDR;
3184 m_freem(rxbuf->m_pack);
3186 rxbuf->m_head = NULL;
3187 rxbuf->m_pack = NULL;
3188 if (rxbuf->hmap != NULL) {
3189 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3192 if (rxbuf->pmap != NULL) {
3193 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3197 if (rxr->rx_buffers != NULL) {
3198 free(rxr->rx_buffers, M_DEVBUF);
3199 rxr->rx_buffers = NULL;
3203 if (rxr->htag != NULL) {
3204 bus_dma_tag_destroy(rxr->htag);
3207 if (rxr->ptag != NULL) {
3208 bus_dma_tag_destroy(rxr->ptag);
3215 static __inline void
3216 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3220 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3221 * should be computed by hardware. Also it should not have VLAN tag in
3224 if (rxr->lro_enabled &&
3225 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3226 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3227 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3228 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3229 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3230 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3232 * Send to the stack if:
3233 ** - LRO not enabled, or
3234 ** - no LRO resources, or
3235 ** - lro enqueue fails
3237 if (rxr->lro.lro_cnt != 0)
3238 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3242 (*ifp->if_input)(ifp, m);
3246 static __inline void
3247 ixv_rx_discard(struct rx_ring *rxr, int i)
3249 struct ixv_rx_buf *rbuf;
3251 rbuf = &rxr->rx_buffers[i];
3253 if (rbuf->fmp != NULL) {/* Partial chain ? */
3254 rbuf->fmp->m_flags |= M_PKTHDR;
3260 ** With advanced descriptors the writeback
3261 ** clobbers the buffer addrs, so its easier
3262 ** to just free the existing mbufs and take
3263 ** the normal refresh path to get new buffers
3267 m_free(rbuf->m_head);
3268 rbuf->m_head = NULL;
3272 m_free(rbuf->m_pack);
3273 rbuf->m_pack = NULL;
3280 /*********************************************************************
3282 * This routine executes in interrupt context. It replenishes
3283 * the mbufs in the descriptor and sends data which has been
3284 * dma'ed into host memory to upper layer.
3286 * We loop at most count times if count is > 0, or until done if
3289 * Return TRUE for more work, FALSE for all clean.
3290 *********************************************************************/
3292 ixv_rxeof(struct ix_queue *que, int count)
3294 struct adapter *adapter = que->adapter;
3295 struct rx_ring *rxr = que->rxr;
3296 struct ifnet *ifp = adapter->ifp;
3297 struct lro_ctrl *lro = &rxr->lro;
3298 struct lro_entry *queued;
3299 int i, nextp, processed = 0;
3301 union ixgbe_adv_rx_desc *cur;
3302 struct ixv_rx_buf *rbuf, *nbuf;
3306 for (i = rxr->next_to_check; count != 0;) {
3307 struct mbuf *sendmp, *mh, *mp;
3309 u16 hlen, plen, hdr, vtag;
3312 /* Sync the ring. */
3313 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3314 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3316 cur = &rxr->rx_base[i];
3317 staterr = le32toh(cur->wb.upper.status_error);
3319 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3321 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3328 cur->wb.upper.status_error = 0;
3329 rbuf = &rxr->rx_buffers[i];
3333 plen = le16toh(cur->wb.upper.length);
3334 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3335 IXGBE_RXDADV_PKTTYPE_MASK;
3336 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3337 vtag = le16toh(cur->wb.upper.vlan);
3338 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3340 /* Make sure all parts of a bad packet are discarded */
3341 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3344 rxr->rx_discarded++;
3346 rxr->discard = TRUE;
3348 rxr->discard = FALSE;
3349 ixv_rx_discard(rxr, i);
3355 if (nextp == adapter->num_rx_desc)
3357 nbuf = &rxr->rx_buffers[nextp];
3361 ** The header mbuf is ONLY used when header
3362 ** split is enabled, otherwise we get normal
3363 ** behavior, ie, both header and payload
3364 ** are DMA'd into the payload buffer.
3366 ** Rather than using the fmp/lmp global pointers
3367 ** we now keep the head of a packet chain in the
3368 ** buffer struct and pass this along from one
3369 ** descriptor to the next, until we get EOP.
3371 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3372 /* This must be an initial descriptor */
3373 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3374 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3375 if (hlen > IXV_RX_HDR)
3378 mh->m_flags |= M_PKTHDR;
3380 mh->m_pkthdr.len = mh->m_len;
3381 /* Null buf pointer so it is refreshed */
3382 rbuf->m_head = NULL;
3384 ** Check the payload length, this
3385 ** could be zero if its a small
3391 mp->m_flags &= ~M_PKTHDR;
3393 mh->m_pkthdr.len += mp->m_len;
3394 /* Null buf pointer so it is refreshed */
3395 rbuf->m_pack = NULL;
3396 rxr->rx_split_packets++;
3399 ** Now create the forward
3400 ** chain so when complete
3404 /* stash the chain head */
3406 /* Make forward chain */
3408 mp->m_next = nbuf->m_pack;
3410 mh->m_next = nbuf->m_pack;
3412 /* Singlet, prepare to send */
3414 if ((adapter->num_vlans) &&
3415 (staterr & IXGBE_RXD_STAT_VP)) {
3416 sendmp->m_pkthdr.ether_vtag = vtag;
3417 sendmp->m_flags |= M_VLANTAG;
3422 ** Either no header split, or a
3423 ** secondary piece of a fragmented
3428 ** See if there is a stored head
3429 ** that determines what we are
3432 rbuf->m_pack = rbuf->fmp = NULL;
3434 if (sendmp != NULL) /* secondary frag */
3435 sendmp->m_pkthdr.len += mp->m_len;
3437 /* first desc of a non-ps chain */
3439 sendmp->m_flags |= M_PKTHDR;
3440 sendmp->m_pkthdr.len = mp->m_len;
3441 if (staterr & IXGBE_RXD_STAT_VP) {
3442 sendmp->m_pkthdr.ether_vtag = vtag;
3443 sendmp->m_flags |= M_VLANTAG;
3446 /* Pass the head pointer on */
3450 mp->m_next = nbuf->m_pack;
3454 /* Sending this frame? */
3456 sendmp->m_pkthdr.rcvif = ifp;
3459 /* capture data for AIM */
3460 rxr->bytes += sendmp->m_pkthdr.len;
3461 rxr->rx_bytes += sendmp->m_pkthdr.len;
3462 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3463 ixv_rx_checksum(staterr, sendmp, ptype);
3464 #if __FreeBSD_version >= 800000
3465 sendmp->m_pkthdr.flowid = que->msix;
3466 sendmp->m_flags |= M_FLOWID;
3470 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3471 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3473 /* Advance our pointers to the next descriptor. */
3474 if (++i == adapter->num_rx_desc)
3477 /* Now send to the stack or do LRO */
3479 ixv_rx_input(rxr, ifp, sendmp, ptype);
3481 /* Every 8 descriptors we go to refresh mbufs */
3482 if (processed == 8) {
3483 ixv_refresh_mbufs(rxr, i);
3488 /* Refresh any remaining buf structs */
3489 if (ixv_rx_unrefreshed(rxr))
3490 ixv_refresh_mbufs(rxr, i);
3492 rxr->next_to_check = i;
3495 * Flush any outstanding LRO work
3497 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3498 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3499 tcp_lro_flush(lro, queued);
3505 ** We still have cleaning to do?
3506 ** Schedule another interrupt if so.
3508 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3509 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3517 /*********************************************************************
3519 * Verify that the hardware indicated that the checksum is valid.
3520 * Inform the stack about the status of checksum so that stack
3521 * doesn't spend time verifying the checksum.
3523 *********************************************************************/
3525 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3527 u16 status = (u16) staterr;
3528 u8 errors = (u8) (staterr >> 24);
3531 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3532 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3535 if (status & IXGBE_RXD_STAT_IPCS) {
3536 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3537 /* IP Checksum Good */
3538 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3539 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3542 mp->m_pkthdr.csum_flags = 0;
3544 if (status & IXGBE_RXD_STAT_L4CS) {
3545 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3546 #if __FreeBSD_version >= 800000
3548 type = CSUM_SCTP_VALID;
3550 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3551 mp->m_pkthdr.csum_flags |= type;
3553 mp->m_pkthdr.csum_data = htons(0xffff);
3560 ixv_setup_vlan_support(struct adapter *adapter)
3562 struct ixgbe_hw *hw = &adapter->hw;
3563 u32 ctrl, vid, vfta, retry;
3567 ** We get here thru init_locked, meaning
3568 ** a soft reset, this has already cleared
3569 ** the VFTA and other state, so if there
3570 ** have been no vlan's registered do nothing.
3572 if (adapter->num_vlans == 0)
3575 /* Enable the queues */
3576 for (int i = 0; i < adapter->num_queues; i++) {
3577 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3578 ctrl |= IXGBE_RXDCTL_VME;
3579 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3583 ** A soft reset zero's out the VFTA, so
3584 ** we need to repopulate it now.
3586 for (int i = 0; i < VFTA_SIZE; i++) {
3587 if (ixv_shadow_vfta[i] == 0)
3589 vfta = ixv_shadow_vfta[i];
3591 ** Reconstruct the vlan id's
3592 ** based on the bits set in each
3593 ** of the array ints.
3595 for ( int j = 0; j < 32; j++) {
3597 if ((vfta & (1 << j)) == 0)
3600 /* Call the shared code mailbox routine */
3601 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3610 ** This routine is run via an vlan config EVENT,
3611 ** it enables us to use the HW Filter table since
3612 ** we can get the vlan id. This just creates the
3613 ** entry in the soft version of the VFTA, init will
3614 ** repopulate the real table.
3617 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3619 struct adapter *adapter = ifp->if_softc;
3622 if (ifp->if_softc != arg) /* Not our event */
3625 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3628 IXV_CORE_LOCK(adapter);
3629 index = (vtag >> 5) & 0x7F;
3631 ixv_shadow_vfta[index] |= (1 << bit);
3632 ++adapter->num_vlans;
3633 /* Re-init to load the changes */
3634 ixv_init_locked(adapter);
3635 IXV_CORE_UNLOCK(adapter);
3639 ** This routine is run via an vlan
3640 ** unconfig EVENT, remove our entry
3641 ** in the soft vfta.
3644 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3646 struct adapter *adapter = ifp->if_softc;
3649 if (ifp->if_softc != arg)
3652 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3655 IXV_CORE_LOCK(adapter);
3656 index = (vtag >> 5) & 0x7F;
3658 ixv_shadow_vfta[index] &= ~(1 << bit);
3659 --adapter->num_vlans;
3660 /* Re-init to load the changes */
3661 ixv_init_locked(adapter);
3662 IXV_CORE_UNLOCK(adapter);
3666 ixv_enable_intr(struct adapter *adapter)
3668 struct ixgbe_hw *hw = &adapter->hw;
3669 struct ix_queue *que = adapter->queues;
3670 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3673 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3675 mask = IXGBE_EIMS_ENABLE_MASK;
3676 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3677 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3679 for (int i = 0; i < adapter->num_queues; i++, que++)
3680 ixv_enable_queue(adapter, que->msix);
3682 IXGBE_WRITE_FLUSH(hw);
3688 ixv_disable_intr(struct adapter *adapter)
3690 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3691 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3692 IXGBE_WRITE_FLUSH(&adapter->hw);
3697 ** Setup the correct IVAR register for a particular MSIX interrupt
3698 ** - entry is the register array entry
3699 ** - vector is the MSIX vector for this queue
3700 ** - type is RX/TX/MISC
3703 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3705 struct ixgbe_hw *hw = &adapter->hw;
3708 vector |= IXGBE_IVAR_ALLOC_VAL;
3710 if (type == -1) { /* MISC IVAR */
3711 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3714 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3715 } else { /* RX/TX IVARS */
3716 index = (16 * (entry & 1)) + (8 * type);
3717 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3718 ivar &= ~(0xFF << index);
3719 ivar |= (vector << index);
3720 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3725 ixv_configure_ivars(struct adapter *adapter)
3727 struct ix_queue *que = adapter->queues;
3729 for (int i = 0; i < adapter->num_queues; i++, que++) {
3730 /* First the RX queue entry */
3731 ixv_set_ivar(adapter, i, que->msix, 0);
3732 /* ... and the TX */
3733 ixv_set_ivar(adapter, i, que->msix, 1);
3734 /* Set an initial value in EITR */
3735 IXGBE_WRITE_REG(&adapter->hw,
3736 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3739 /* For the Link interrupt */
3740 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3745 ** Tasklet handler for MSIX MBX interrupts
3746 ** - do outside interrupt since it might sleep
3749 ixv_handle_mbx(void *context, int pending)
3751 struct adapter *adapter = context;
3753 ixgbe_check_link(&adapter->hw,
3754 &adapter->link_speed, &adapter->link_up, 0);
3755 ixv_update_link_status(adapter);
3759 ** The VF stats registers never have a truely virgin
3760 ** starting point, so this routine tries to make an
3761 ** artificial one, marking ground zero on attach as
3765 ixv_save_stats(struct adapter *adapter)
3767 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3768 adapter->stats.saved_reset_vfgprc +=
3769 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3770 adapter->stats.saved_reset_vfgptc +=
3771 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3772 adapter->stats.saved_reset_vfgorc +=
3773 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3774 adapter->stats.saved_reset_vfgotc +=
3775 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3776 adapter->stats.saved_reset_vfmprc +=
3777 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3782 ixv_init_stats(struct adapter *adapter)
3784 struct ixgbe_hw *hw = &adapter->hw;
3786 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3787 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3788 adapter->stats.last_vfgorc |=
3789 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3791 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3792 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3793 adapter->stats.last_vfgotc |=
3794 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3796 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3798 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3799 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3800 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3801 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3802 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3805 #define UPDATE_STAT_32(reg, last, count) \
3807 u32 current = IXGBE_READ_REG(hw, reg); \
3808 if (current < last) \
3809 count += 0x100000000LL; \
3811 count &= 0xFFFFFFFF00000000LL; \
3815 #define UPDATE_STAT_36(lsb, msb, last, count) \
3817 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3818 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3819 u64 current = ((cur_msb << 32) | cur_lsb); \
3820 if (current < last) \
3821 count += 0x1000000000LL; \
3823 count &= 0xFFFFFFF000000000LL; \
3828 ** ixv_update_stats - Update the board statistics counters.
3831 ixv_update_stats(struct adapter *adapter)
3833 struct ixgbe_hw *hw = &adapter->hw;
3835 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3836 adapter->stats.vfgprc);
3837 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3838 adapter->stats.vfgptc);
3839 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3840 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3841 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3842 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3843 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3844 adapter->stats.vfmprc);
3847 /**********************************************************************
3849 * This routine is called only when ixgbe_display_debug_stats is enabled.
3850 * This routine provides a way to take a look at important statistics
3851 * maintained by the driver and hardware.
3853 **********************************************************************/
3855 ixv_print_hw_stats(struct adapter * adapter)
3857 device_t dev = adapter->dev;
3859 device_printf(dev,"Std Mbuf Failed = %lu\n",
3860 adapter->mbuf_defrag_failed);
3861 device_printf(dev,"Driver dropped packets = %lu\n",
3862 adapter->dropped_pkts);
3863 device_printf(dev, "watchdog timeouts = %ld\n",
3864 adapter->watchdog_events);
3866 device_printf(dev,"Good Packets Rcvd = %llu\n",
3867 (long long)adapter->stats.vfgprc);
3868 device_printf(dev,"Good Packets Xmtd = %llu\n",
3869 (long long)adapter->stats.vfgptc);
3870 device_printf(dev,"TSO Transmissions = %lu\n",
3875 /**********************************************************************
3877 * This routine is called only when em_display_debug_stats is enabled.
3878 * This routine provides a way to take a look at important statistics
3879 * maintained by the driver and hardware.
3881 **********************************************************************/
3883 ixv_print_debug_info(struct adapter *adapter)
3885 device_t dev = adapter->dev;
3886 struct ixgbe_hw *hw = &adapter->hw;
3887 struct ix_queue *que = adapter->queues;
3888 struct rx_ring *rxr;
3889 struct tx_ring *txr;
3890 struct lro_ctrl *lro;
3892 device_printf(dev,"Error Byte Count = %u \n",
3893 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3895 for (int i = 0; i < adapter->num_queues; i++, que++) {
3899 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3900 que->msix, (long)que->irqs);
3901 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3902 rxr->me, (long long)rxr->rx_packets);
3903 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3904 rxr->me, (long long)rxr->rx_split_packets);
3905 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3906 rxr->me, (long)rxr->rx_bytes);
3907 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3908 rxr->me, lro->lro_queued);
3909 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3910 rxr->me, lro->lro_flushed);
3911 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3912 txr->me, (long)txr->total_packets);
3913 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3914 txr->me, (long)txr->no_desc_avail);
3917 device_printf(dev,"MBX IRQ Handled: %lu\n",
3918 (long)adapter->mbx_irq);
3923 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3927 struct adapter *adapter;
3930 error = sysctl_handle_int(oidp, &result, 0, req);
3932 if (error || !req->newptr)
3936 adapter = (struct adapter *) arg1;
3937 ixv_print_hw_stats(adapter);
3943 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3946 struct adapter *adapter;
3949 error = sysctl_handle_int(oidp, &result, 0, req);
3951 if (error || !req->newptr)
3955 adapter = (struct adapter *) arg1;
3956 ixv_print_debug_info(adapter);
3962 ** Set flow control using sysctl:
3963 ** Flow control values:
3970 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3973 struct adapter *adapter;
3975 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3980 adapter = (struct adapter *) arg1;
3981 switch (ixv_flow_control) {
3982 case ixgbe_fc_rx_pause:
3983 case ixgbe_fc_tx_pause:
3985 adapter->hw.fc.requested_mode = ixv_flow_control;
3989 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3992 ixgbe_fc_enable(&adapter->hw);
3997 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
3998 const char *description, int *limit, int value)
4001 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4002 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4003 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);