1 /******************************************************************************
3 Copyright (c) 2001-2013, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
39 /*********************************************************************
41 *********************************************************************/
42 char ixv_driver_version[] = "1.1.4";
44 /*********************************************************************
47 * Used by probe to select devices to load on
48 * Last field stores an index into ixv_strings
49 * Last entry must be all 0s
51 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
52 *********************************************************************/
54 static ixv_vendor_info_t ixv_vendor_info_array[] =
56 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
57 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
58 /* required last entry */
62 /*********************************************************************
63 * Table of branding strings
64 *********************************************************************/
66 static char *ixv_strings[] = {
67 "Intel(R) PRO/10GbE Virtual Function Network Driver"
70 /*********************************************************************
72 *********************************************************************/
73 static int ixv_probe(device_t);
74 static int ixv_attach(device_t);
75 static int ixv_detach(device_t);
76 static int ixv_shutdown(device_t);
77 #if __FreeBSD_version < 800000
78 static void ixv_start(struct ifnet *);
79 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
81 static int ixv_mq_start(struct ifnet *, struct mbuf *);
82 static int ixv_mq_start_locked(struct ifnet *,
83 struct tx_ring *, struct mbuf *);
84 static void ixv_qflush(struct ifnet *);
86 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
87 static void ixv_init(void *);
88 static void ixv_init_locked(struct adapter *);
89 static void ixv_stop(void *);
90 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
91 static int ixv_media_change(struct ifnet *);
92 static void ixv_identify_hardware(struct adapter *);
93 static int ixv_allocate_pci_resources(struct adapter *);
94 static int ixv_allocate_msix(struct adapter *);
95 static int ixv_allocate_queues(struct adapter *);
96 static int ixv_setup_msix(struct adapter *);
97 static void ixv_free_pci_resources(struct adapter *);
98 static void ixv_local_timer(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
102 static int ixv_allocate_transmit_buffers(struct tx_ring *);
103 static int ixv_setup_transmit_structures(struct adapter *);
104 static void ixv_setup_transmit_ring(struct tx_ring *);
105 static void ixv_initialize_transmit_units(struct adapter *);
106 static void ixv_free_transmit_structures(struct adapter *);
107 static void ixv_free_transmit_buffers(struct tx_ring *);
109 static int ixv_allocate_receive_buffers(struct rx_ring *);
110 static int ixv_setup_receive_structures(struct adapter *);
111 static int ixv_setup_receive_ring(struct rx_ring *);
112 static void ixv_initialize_receive_units(struct adapter *);
113 static void ixv_free_receive_structures(struct adapter *);
114 static void ixv_free_receive_buffers(struct rx_ring *);
116 static void ixv_enable_intr(struct adapter *);
117 static void ixv_disable_intr(struct adapter *);
118 static bool ixv_txeof(struct tx_ring *);
119 static bool ixv_rxeof(struct ix_queue *, int);
120 static void ixv_rx_checksum(u32, struct mbuf *, u32);
121 static void ixv_set_multi(struct adapter *);
122 static void ixv_update_link_status(struct adapter *);
123 static void ixv_refresh_mbufs(struct rx_ring *, int);
124 static int ixv_xmit(struct tx_ring *, struct mbuf **);
125 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
126 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
127 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
128 static int ixv_dma_malloc(struct adapter *, bus_size_t,
129 struct ixv_dma_alloc *, int);
130 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
131 static void ixv_add_rx_process_limit(struct adapter *, const char *,
132 const char *, int *, int);
133 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
134 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
135 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
136 static void ixv_configure_ivars(struct adapter *);
137 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
139 static void ixv_setup_vlan_support(struct adapter *);
140 static void ixv_register_vlan(void *, struct ifnet *, u16);
141 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
143 static void ixv_save_stats(struct adapter *);
144 static void ixv_init_stats(struct adapter *);
145 static void ixv_update_stats(struct adapter *);
147 static __inline void ixv_rx_discard(struct rx_ring *, int);
148 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
151 /* The MSI/X Interrupt handlers */
152 static void ixv_msix_que(void *);
153 static void ixv_msix_mbx(void *);
155 /* Deferred interrupt tasklets */
156 static void ixv_handle_que(void *, int);
157 static void ixv_handle_mbx(void *, int);
159 /*********************************************************************
160 * FreeBSD Device Interface Entry Points
161 *********************************************************************/
163 static device_method_t ixv_methods[] = {
164 /* Device interface */
165 DEVMETHOD(device_probe, ixv_probe),
166 DEVMETHOD(device_attach, ixv_attach),
167 DEVMETHOD(device_detach, ixv_detach),
168 DEVMETHOD(device_shutdown, ixv_shutdown),
172 static driver_t ixv_driver = {
173 "ix", ixv_methods, sizeof(struct adapter),
176 extern devclass_t ixgbe_devclass;
177 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
178 MODULE_DEPEND(ixv, pci, 1, 1, 1);
179 MODULE_DEPEND(ixv, ether, 1, 1, 1);
182 ** TUNEABLE PARAMETERS:
186 ** AIM: Adaptive Interrupt Moderation
187 ** which means that the interrupt rate
188 ** is varied over time based on the
189 ** traffic for that interrupt vector
191 static int ixv_enable_aim = FALSE;
192 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194 /* How many packets rxeof tries to clean at a time */
195 static int ixv_rx_process_limit = 128;
196 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
198 /* Flow control setting, default to full */
199 static int ixv_flow_control = ixgbe_fc_full;
200 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
203 * Header split: this causes the hardware to DMA
204 * the header into a seperate mbuf from the payload,
205 * it can be a performance win in some workloads, but
206 * in others it actually hurts, its off by default.
208 static int ixv_header_split = FALSE;
209 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
212 ** Number of TX descriptors per ring,
213 ** setting higher than RX as this seems
214 ** the better performing choice.
216 static int ixv_txd = DEFAULT_TXD;
217 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
219 /* Number of RX descriptors per ring */
220 static int ixv_rxd = DEFAULT_RXD;
221 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
224 ** Shadow VFTA table, this is needed because
225 ** the real filter table gets cleared during
226 ** a soft reset and we need to repopulate it.
228 static u32 ixv_shadow_vfta[VFTA_SIZE];
230 /*********************************************************************
231 * Device identification routine
233 * ixv_probe determines if the driver should be loaded on
234 * adapter based on PCI vendor/device id of the adapter.
236 * return BUS_PROBE_DEFAULT on success, positive on failure
237 *********************************************************************/
240 ixv_probe(device_t dev)
242 ixv_vendor_info_t *ent;
244 u16 pci_vendor_id = 0;
245 u16 pci_device_id = 0;
246 u16 pci_subvendor_id = 0;
247 u16 pci_subdevice_id = 0;
248 char adapter_name[256];
251 pci_vendor_id = pci_get_vendor(dev);
252 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
255 pci_device_id = pci_get_device(dev);
256 pci_subvendor_id = pci_get_subvendor(dev);
257 pci_subdevice_id = pci_get_subdevice(dev);
259 ent = ixv_vendor_info_array;
260 while (ent->vendor_id != 0) {
261 if ((pci_vendor_id == ent->vendor_id) &&
262 (pci_device_id == ent->device_id) &&
264 ((pci_subvendor_id == ent->subvendor_id) ||
265 (ent->subvendor_id == 0)) &&
267 ((pci_subdevice_id == ent->subdevice_id) ||
268 (ent->subdevice_id == 0))) {
269 sprintf(adapter_name, "%s, Version - %s",
270 ixv_strings[ent->index],
272 device_set_desc_copy(dev, adapter_name);
273 return (BUS_PROBE_DEFAULT);
280 /*********************************************************************
281 * Device initialization routine
283 * The attach entry point is called when the driver is being loaded.
284 * This routine identifies the type of hardware, allocates all resources
285 * and initializes the hardware.
287 * return 0 on success, positive on failure
288 *********************************************************************/
291 ixv_attach(device_t dev)
293 struct adapter *adapter;
297 INIT_DEBUGOUT("ixv_attach: begin");
299 /* Allocate, clear, and link in our adapter structure */
300 adapter = device_get_softc(dev);
301 adapter->dev = adapter->osdep.dev = dev;
305 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
308 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
309 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
310 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
311 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
313 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
314 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
315 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
316 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
318 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
319 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
320 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
321 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
323 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
324 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
325 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
326 &ixv_enable_aim, 1, "Interrupt Moderation");
328 /* Set up the timer callout */
329 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
331 /* Determine hardware revision */
332 ixv_identify_hardware(adapter);
334 /* Do base PCI setup - map BAR0 */
335 if (ixv_allocate_pci_resources(adapter)) {
336 device_printf(dev, "Allocation of PCI resources failed\n");
341 /* Do descriptor calc and sanity checks */
342 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
343 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
344 device_printf(dev, "TXD config issue, using default!\n");
345 adapter->num_tx_desc = DEFAULT_TXD;
347 adapter->num_tx_desc = ixv_txd;
349 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
351 device_printf(dev, "RXD config issue, using default!\n");
352 adapter->num_rx_desc = DEFAULT_RXD;
354 adapter->num_rx_desc = ixv_rxd;
356 /* Allocate our TX/RX Queues */
357 if (ixv_allocate_queues(adapter)) {
363 ** Initialize the shared code: its
364 ** at this point the mac type is set.
366 error = ixgbe_init_shared_code(hw);
368 device_printf(dev,"Shared Code Initialization Failure\n");
373 /* Setup the mailbox */
374 ixgbe_init_mbx_params_vf(hw);
378 /* Get Hardware Flow Control setting */
379 hw->fc.requested_mode = ixgbe_fc_full;
380 hw->fc.pause_time = IXV_FC_PAUSE;
381 hw->fc.low_water[0] = IXV_FC_LO;
382 hw->fc.high_water[0] = IXV_FC_HI;
383 hw->fc.send_xon = TRUE;
385 error = ixgbe_init_hw(hw);
387 device_printf(dev,"Hardware Initialization Failure\n");
392 error = ixv_allocate_msix(adapter);
396 /* Setup OS specific network interface */
397 ixv_setup_interface(dev, adapter);
399 /* Sysctl for limiting the amount of work done in the taskqueue */
400 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
401 "max number of rx packets to process", &adapter->rx_process_limit,
402 ixv_rx_process_limit);
404 /* Do the stats setup */
405 ixv_save_stats(adapter);
406 ixv_init_stats(adapter);
408 /* Register for VLAN events */
409 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
410 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
411 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
412 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
414 INIT_DEBUGOUT("ixv_attach: end");
418 ixv_free_transmit_structures(adapter);
419 ixv_free_receive_structures(adapter);
421 ixv_free_pci_resources(adapter);
426 /*********************************************************************
427 * Device removal routine
429 * The detach entry point is called when the driver is being removed.
430 * This routine stops the adapter and deallocates all the resources
431 * that were allocated for driver operation.
433 * return 0 on success, positive on failure
434 *********************************************************************/
437 ixv_detach(device_t dev)
439 struct adapter *adapter = device_get_softc(dev);
440 struct ix_queue *que = adapter->queues;
442 INIT_DEBUGOUT("ixv_detach: begin");
444 /* Make sure VLANS are not using driver */
445 if (adapter->ifp->if_vlantrunk != NULL) {
446 device_printf(dev,"Vlan in use, detach first\n");
450 IXV_CORE_LOCK(adapter);
452 IXV_CORE_UNLOCK(adapter);
454 for (int i = 0; i < adapter->num_queues; i++, que++) {
456 taskqueue_drain(que->tq, &que->que_task);
457 taskqueue_free(que->tq);
461 /* Drain the Link queue */
463 taskqueue_drain(adapter->tq, &adapter->mbx_task);
464 taskqueue_free(adapter->tq);
467 /* Unregister VLAN events */
468 if (adapter->vlan_attach != NULL)
469 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
470 if (adapter->vlan_detach != NULL)
471 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
473 ether_ifdetach(adapter->ifp);
474 callout_drain(&adapter->timer);
475 ixv_free_pci_resources(adapter);
476 bus_generic_detach(dev);
477 if_free(adapter->ifp);
479 ixv_free_transmit_structures(adapter);
480 ixv_free_receive_structures(adapter);
482 IXV_CORE_LOCK_DESTROY(adapter);
486 /*********************************************************************
488 * Shutdown entry point
490 **********************************************************************/
492 ixv_shutdown(device_t dev)
494 struct adapter *adapter = device_get_softc(dev);
495 IXV_CORE_LOCK(adapter);
497 IXV_CORE_UNLOCK(adapter);
501 #if __FreeBSD_version < 800000
502 /*********************************************************************
503 * Transmit entry point
505 * ixv_start is called by the stack to initiate a transmit.
506 * The driver will remain in this routine as long as there are
507 * packets to transmit and transmit resources are available.
508 * In case resources are not available stack is notified and
509 * the packet is requeued.
510 **********************************************************************/
512 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
515 struct adapter *adapter = txr->adapter;
517 IXV_TX_LOCK_ASSERT(txr);
519 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
522 if (!adapter->link_active)
525 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
527 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
531 if (ixv_xmit(txr, &m_head)) {
534 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
535 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
538 /* Send a copy of the frame to the BPF listener */
539 ETHER_BPF_MTAP(ifp, m_head);
541 /* Set watchdog on */
542 txr->watchdog_check = TRUE;
543 txr->watchdog_time = ticks;
550 * Legacy TX start - called by the stack, this
551 * always uses the first tx ring, and should
552 * not be used with multiqueue tx enabled.
555 ixv_start(struct ifnet *ifp)
557 struct adapter *adapter = ifp->if_softc;
558 struct tx_ring *txr = adapter->tx_rings;
560 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
562 ixv_start_locked(txr, ifp);
571 ** Multiqueue Transmit driver
575 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
577 struct adapter *adapter = ifp->if_softc;
578 struct ix_queue *que;
582 /* Which queue to use */
583 if ((m->m_flags & M_FLOWID) != 0)
584 i = m->m_pkthdr.flowid % adapter->num_queues;
586 txr = &adapter->tx_rings[i];
587 que = &adapter->queues[i];
589 if (IXV_TX_TRYLOCK(txr)) {
590 err = ixv_mq_start_locked(ifp, txr, m);
593 err = drbr_enqueue(ifp, txr->br, m);
594 taskqueue_enqueue(que->tq, &que->que_task);
601 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
603 struct adapter *adapter = txr->adapter;
605 int enqueued, err = 0;
607 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
608 IFF_DRV_RUNNING || adapter->link_active == 0) {
610 err = drbr_enqueue(ifp, txr->br, m);
614 /* Do a clean if descriptors are low */
615 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
620 err = drbr_enqueue(ifp, txr->br, m);
625 /* Process the queue */
626 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
627 if ((err = ixv_xmit(txr, &next)) != 0) {
629 drbr_advance(ifp, txr->br);
631 drbr_putback(ifp, txr->br, next);
635 drbr_advance(ifp, txr->br);
637 ifp->if_obytes += next->m_pkthdr.len;
638 if (next->m_flags & M_MCAST)
640 /* Send a copy of the frame to the BPF listener */
641 ETHER_BPF_MTAP(ifp, next);
642 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
644 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
645 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
651 /* Set watchdog on */
652 txr->watchdog_check = TRUE;
653 txr->watchdog_time = ticks;
660 ** Flush all ring buffers
663 ixv_qflush(struct ifnet *ifp)
665 struct adapter *adapter = ifp->if_softc;
666 struct tx_ring *txr = adapter->tx_rings;
669 for (int i = 0; i < adapter->num_queues; i++, txr++) {
671 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
680 /*********************************************************************
683 * ixv_ioctl is called when the user wants to configure the
686 * return 0 on success, positive on failure
687 **********************************************************************/
690 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
692 struct adapter *adapter = ifp->if_softc;
693 struct ifreq *ifr = (struct ifreq *) data;
694 #if defined(INET) || defined(INET6)
695 struct ifaddr *ifa = (struct ifaddr *) data;
696 bool avoid_reset = FALSE;
704 if (ifa->ifa_addr->sa_family == AF_INET)
708 if (ifa->ifa_addr->sa_family == AF_INET6)
711 #if defined(INET) || defined(INET6)
713 ** Calling init results in link renegotiation,
714 ** so we avoid doing it when possible.
717 ifp->if_flags |= IFF_UP;
718 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
720 if (!(ifp->if_flags & IFF_NOARP))
721 arp_ifinit(ifp, ifa);
723 error = ether_ioctl(ifp, command, data);
727 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
728 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
731 IXV_CORE_LOCK(adapter);
732 ifp->if_mtu = ifr->ifr_mtu;
733 adapter->max_frame_size =
734 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
735 ixv_init_locked(adapter);
736 IXV_CORE_UNLOCK(adapter);
740 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
741 IXV_CORE_LOCK(adapter);
742 if (ifp->if_flags & IFF_UP) {
743 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
744 ixv_init_locked(adapter);
746 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
748 adapter->if_flags = ifp->if_flags;
749 IXV_CORE_UNLOCK(adapter);
753 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
754 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
755 IXV_CORE_LOCK(adapter);
756 ixv_disable_intr(adapter);
757 ixv_set_multi(adapter);
758 ixv_enable_intr(adapter);
759 IXV_CORE_UNLOCK(adapter);
764 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
765 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
769 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
770 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
771 if (mask & IFCAP_HWCSUM)
772 ifp->if_capenable ^= IFCAP_HWCSUM;
773 if (mask & IFCAP_TSO4)
774 ifp->if_capenable ^= IFCAP_TSO4;
775 if (mask & IFCAP_LRO)
776 ifp->if_capenable ^= IFCAP_LRO;
777 if (mask & IFCAP_VLAN_HWTAGGING)
778 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
779 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
780 IXV_CORE_LOCK(adapter);
781 ixv_init_locked(adapter);
782 IXV_CORE_UNLOCK(adapter);
784 VLAN_CAPABILITIES(ifp);
789 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
790 error = ether_ioctl(ifp, command, data);
797 /*********************************************************************
800 * This routine is used in two ways. It is used by the stack as
801 * init entry point in network interface structure. It is also used
802 * by the driver as a hw/sw initialization routine to get to a
805 * return 0 on success, positive on failure
806 **********************************************************************/
807 #define IXGBE_MHADD_MFS_SHIFT 16
810 ixv_init_locked(struct adapter *adapter)
812 struct ifnet *ifp = adapter->ifp;
813 device_t dev = adapter->dev;
814 struct ixgbe_hw *hw = &adapter->hw;
817 INIT_DEBUGOUT("ixv_init: begin");
818 mtx_assert(&adapter->core_mtx, MA_OWNED);
819 hw->adapter_stopped = FALSE;
820 ixgbe_stop_adapter(hw);
821 callout_stop(&adapter->timer);
823 /* reprogram the RAR[0] in case user changed it. */
824 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
826 /* Get the latest mac address, User can use a LAA */
827 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
828 IXGBE_ETH_LENGTH_OF_ADDRESS);
829 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
830 hw->addr_ctrl.rar_used_count = 1;
832 /* Prepare transmit descriptors and buffers */
833 if (ixv_setup_transmit_structures(adapter)) {
834 device_printf(dev,"Could not setup transmit structures\n");
840 ixv_initialize_transmit_units(adapter);
842 /* Setup Multicast table */
843 ixv_set_multi(adapter);
846 ** Determine the correct mbuf pool
847 ** for doing jumbo/headersplit
849 if (ifp->if_mtu > ETHERMTU)
850 adapter->rx_mbuf_sz = MJUMPAGESIZE;
852 adapter->rx_mbuf_sz = MCLBYTES;
854 /* Prepare receive descriptors and buffers */
855 if (ixv_setup_receive_structures(adapter)) {
856 device_printf(dev,"Could not setup receive structures\n");
861 /* Configure RX settings */
862 ixv_initialize_receive_units(adapter);
864 /* Enable Enhanced MSIX mode */
865 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
866 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
867 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
868 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
870 /* Set the various hardware offload abilities */
871 ifp->if_hwassist = 0;
872 if (ifp->if_capenable & IFCAP_TSO4)
873 ifp->if_hwassist |= CSUM_TSO;
874 if (ifp->if_capenable & IFCAP_TXCSUM) {
875 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
876 #if __FreeBSD_version >= 800000
877 ifp->if_hwassist |= CSUM_SCTP;
882 if (ifp->if_mtu > ETHERMTU) {
883 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
884 mhadd &= ~IXGBE_MHADD_MFS_MASK;
885 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
886 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
889 /* Set up VLAN offload and filter */
890 ixv_setup_vlan_support(adapter);
892 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
894 /* Set up MSI/X routing */
895 ixv_configure_ivars(adapter);
897 /* Set up auto-mask */
898 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
900 /* Set moderation on the Link interrupt */
901 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
904 ixv_init_stats(adapter);
906 /* Config/Enable Link */
907 ixv_config_link(adapter);
909 /* And now turn on interrupts */
910 ixv_enable_intr(adapter);
912 /* Now inform the stack we're ready */
913 ifp->if_drv_flags |= IFF_DRV_RUNNING;
914 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
922 struct adapter *adapter = arg;
924 IXV_CORE_LOCK(adapter);
925 ixv_init_locked(adapter);
926 IXV_CORE_UNLOCK(adapter);
933 ** MSIX Interrupt Handlers and Tasklets
938 ixv_enable_queue(struct adapter *adapter, u32 vector)
940 struct ixgbe_hw *hw = &adapter->hw;
941 u32 queue = 1 << vector;
944 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
945 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
949 ixv_disable_queue(struct adapter *adapter, u32 vector)
951 struct ixgbe_hw *hw = &adapter->hw;
952 u64 queue = (u64)(1 << vector);
955 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
956 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
960 ixv_rearm_queues(struct adapter *adapter, u64 queues)
962 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
963 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
968 ixv_handle_que(void *context, int pending)
970 struct ix_queue *que = context;
971 struct adapter *adapter = que->adapter;
972 struct tx_ring *txr = que->txr;
973 struct ifnet *ifp = adapter->ifp;
976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977 more = ixv_rxeof(que, adapter->rx_process_limit);
980 #if __FreeBSD_version >= 800000
981 if (!drbr_empty(ifp, txr->br))
982 ixv_mq_start_locked(ifp, txr, NULL);
984 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
985 ixv_start_locked(txr, ifp);
989 taskqueue_enqueue(que->tq, &que->que_task);
994 /* Reenable this interrupt */
995 ixv_enable_queue(adapter, que->msix);
999 /*********************************************************************
1001 * MSI Queue Interrupt Service routine
1003 **********************************************************************/
1005 ixv_msix_que(void *arg)
1007 struct ix_queue *que = arg;
1008 struct adapter *adapter = que->adapter;
1009 struct tx_ring *txr = que->txr;
1010 struct rx_ring *rxr = que->rxr;
1011 bool more_tx, more_rx;
1014 ixv_disable_queue(adapter, que->msix);
1017 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1020 more_tx = ixv_txeof(txr);
1022 ** Make certain that if the stack
1023 ** has anything queued the task gets
1024 ** scheduled to handle it.
1026 #if __FreeBSD_version < 800000
1027 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1029 if (!drbr_empty(adapter->ifp, txr->br))
1034 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1038 if (ixv_enable_aim == FALSE)
1041 ** Do Adaptive Interrupt Moderation:
1042 ** - Write out last calculated setting
1043 ** - Calculate based on average size over
1044 ** the last interval.
1046 if (que->eitr_setting)
1047 IXGBE_WRITE_REG(&adapter->hw,
1048 IXGBE_VTEITR(que->msix),
1051 que->eitr_setting = 0;
1053 /* Idle, do nothing */
1054 if ((txr->bytes == 0) && (rxr->bytes == 0))
1057 if ((txr->bytes) && (txr->packets))
1058 newitr = txr->bytes/txr->packets;
1059 if ((rxr->bytes) && (rxr->packets))
1060 newitr = max(newitr,
1061 (rxr->bytes / rxr->packets));
1062 newitr += 24; /* account for hardware frame, crc */
1064 /* set an upper boundary */
1065 newitr = min(newitr, 3000);
1067 /* Be nice to the mid range */
1068 if ((newitr > 300) && (newitr < 1200))
1069 newitr = (newitr / 3);
1071 newitr = (newitr / 2);
1073 newitr |= newitr << 16;
1075 /* save for next interrupt */
1076 que->eitr_setting = newitr;
1085 if (more_tx || more_rx)
1086 taskqueue_enqueue(que->tq, &que->que_task);
1087 else /* Reenable this interrupt */
1088 ixv_enable_queue(adapter, que->msix);
1093 ixv_msix_mbx(void *arg)
1095 struct adapter *adapter = arg;
1096 struct ixgbe_hw *hw = &adapter->hw;
1101 /* First get the cause */
1102 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1103 /* Clear interrupt with write */
1104 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1106 /* Link status change */
1107 if (reg & IXGBE_EICR_LSC)
1108 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1110 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1114 /*********************************************************************
1116 * Media Ioctl callback
1118 * This routine is called whenever the user queries the status of
1119 * the interface using ifconfig.
1121 **********************************************************************/
1123 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1125 struct adapter *adapter = ifp->if_softc;
1127 INIT_DEBUGOUT("ixv_media_status: begin");
1128 IXV_CORE_LOCK(adapter);
1129 ixv_update_link_status(adapter);
1131 ifmr->ifm_status = IFM_AVALID;
1132 ifmr->ifm_active = IFM_ETHER;
1134 if (!adapter->link_active) {
1135 IXV_CORE_UNLOCK(adapter);
1139 ifmr->ifm_status |= IFM_ACTIVE;
1141 switch (adapter->link_speed) {
1142 case IXGBE_LINK_SPEED_1GB_FULL:
1143 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1145 case IXGBE_LINK_SPEED_10GB_FULL:
1146 ifmr->ifm_active |= IFM_FDX;
1150 IXV_CORE_UNLOCK(adapter);
1155 /*********************************************************************
1157 * Media Ioctl callback
1159 * This routine is called when the user changes speed/duplex using
1160 * media/mediopt option with ifconfig.
1162 **********************************************************************/
1164 ixv_media_change(struct ifnet * ifp)
1166 struct adapter *adapter = ifp->if_softc;
1167 struct ifmedia *ifm = &adapter->media;
1169 INIT_DEBUGOUT("ixv_media_change: begin");
1171 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1174 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1178 device_printf(adapter->dev, "Only auto media type\n");
1185 /*********************************************************************
1187 * This routine maps the mbufs to tx descriptors, allowing the
1188 * TX engine to transmit the packets.
1189 * - return 0 on success, positive on failure
1191 **********************************************************************/
1194 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1196 struct adapter *adapter = txr->adapter;
1197 u32 olinfo_status = 0, cmd_type_len;
1199 int i, j, error, nsegs;
1200 int first, last = 0;
1201 struct mbuf *m_head;
1202 bus_dma_segment_t segs[32];
1204 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1205 union ixgbe_adv_tx_desc *txd = NULL;
1209 /* Basic descriptor defines */
1210 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1211 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1213 if (m_head->m_flags & M_VLANTAG)
1214 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1217 * Important to capture the first descriptor
1218 * used because it will contain the index of
1219 * the one we tell the hardware to report back
1221 first = txr->next_avail_desc;
1222 txbuf = &txr->tx_buffers[first];
1223 txbuf_mapped = txbuf;
1227 * Map the packet for DMA.
1229 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1230 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1232 if (error == EFBIG) {
1235 m = m_defrag(*m_headp, M_NOWAIT);
1237 adapter->mbuf_defrag_failed++;
1245 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1246 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1248 if (error == ENOMEM) {
1249 adapter->no_tx_dma_setup++;
1251 } else if (error != 0) {
1252 adapter->no_tx_dma_setup++;
1257 } else if (error == ENOMEM) {
1258 adapter->no_tx_dma_setup++;
1260 } else if (error != 0) {
1261 adapter->no_tx_dma_setup++;
1267 /* Make certain there are enough descriptors */
1268 if (nsegs > txr->tx_avail - 2) {
1269 txr->no_desc_avail++;
1276 ** Set up the appropriate offload context
1277 ** this becomes the first descriptor of
1280 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1281 if (ixv_tso_setup(txr, m_head, &paylen)) {
1282 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1283 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1284 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1285 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1289 } else if (ixv_tx_ctx_setup(txr, m_head))
1290 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1292 /* Record payload length */
1294 olinfo_status |= m_head->m_pkthdr.len <<
1295 IXGBE_ADVTXD_PAYLEN_SHIFT;
1297 i = txr->next_avail_desc;
1298 for (j = 0; j < nsegs; j++) {
1302 txbuf = &txr->tx_buffers[i];
1303 txd = &txr->tx_base[i];
1304 seglen = segs[j].ds_len;
1305 segaddr = htole64(segs[j].ds_addr);
1307 txd->read.buffer_addr = segaddr;
1308 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1309 cmd_type_len |seglen);
1310 txd->read.olinfo_status = htole32(olinfo_status);
1311 last = i; /* descriptor that will get completion IRQ */
1313 if (++i == adapter->num_tx_desc)
1316 txbuf->m_head = NULL;
1317 txbuf->eop_index = -1;
1320 txd->read.cmd_type_len |=
1321 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1322 txr->tx_avail -= nsegs;
1323 txr->next_avail_desc = i;
1325 txbuf->m_head = m_head;
1326 txr->tx_buffers[first].map = txbuf->map;
1328 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1330 /* Set the index of the descriptor that will be marked done */
1331 txbuf = &txr->tx_buffers[first];
1332 txbuf->eop_index = last;
1334 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1335 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1337 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1338 * hardware that this frame is available to transmit.
1340 ++txr->total_packets;
1341 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1346 bus_dmamap_unload(txr->txtag, txbuf->map);
1352 /*********************************************************************
1355 * This routine is called whenever multicast address list is updated.
1357 **********************************************************************/
1358 #define IXGBE_RAR_ENTRIES 16
1361 ixv_set_multi(struct adapter *adapter)
1363 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1365 struct ifmultiaddr *ifma;
1367 struct ifnet *ifp = adapter->ifp;
1369 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1371 #if __FreeBSD_version < 800000
1374 if_maddr_rlock(ifp);
1376 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1377 if (ifma->ifma_addr->sa_family != AF_LINK)
1379 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1380 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1381 IXGBE_ETH_LENGTH_OF_ADDRESS);
1384 #if __FreeBSD_version < 800000
1385 IF_ADDR_UNLOCK(ifp);
1387 if_maddr_runlock(ifp);
1392 ixgbe_update_mc_addr_list(&adapter->hw,
1393 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1399 * This is an iterator function now needed by the multicast
1400 * shared code. It simply feeds the shared code routine the
1401 * addresses in the array of ixv_set_multi() one by one.
1404 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1406 u8 *addr = *update_ptr;
1410 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1411 *update_ptr = newptr;
1415 /*********************************************************************
1418 * This routine checks for link status,updates statistics,
1419 * and runs the watchdog check.
1421 **********************************************************************/
1424 ixv_local_timer(void *arg)
1426 struct adapter *adapter = arg;
1427 device_t dev = adapter->dev;
1428 struct tx_ring *txr = adapter->tx_rings;
1431 mtx_assert(&adapter->core_mtx, MA_OWNED);
1433 ixv_update_link_status(adapter);
1436 ixv_update_stats(adapter);
1439 * If the interface has been paused
1440 * then don't do the watchdog check
1442 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1445 ** Check for time since any descriptor was cleaned
1447 for (i = 0; i < adapter->num_queues; i++, txr++) {
1449 if (txr->watchdog_check == FALSE) {
1453 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1458 ixv_rearm_queues(adapter, adapter->que_mask);
1459 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1463 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1464 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1465 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1466 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1467 device_printf(dev,"TX(%d) desc avail = %d,"
1468 "Next TX to Clean = %d\n",
1469 txr->me, txr->tx_avail, txr->next_to_clean);
1470 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1471 adapter->watchdog_events++;
1473 ixv_init_locked(adapter);
1477 ** Note: this routine updates the OS on the link state
1478 ** the real check of the hardware only happens with
1479 ** a link interrupt.
1482 ixv_update_link_status(struct adapter *adapter)
1484 struct ifnet *ifp = adapter->ifp;
1485 struct tx_ring *txr = adapter->tx_rings;
1486 device_t dev = adapter->dev;
1489 if (adapter->link_up){
1490 if (adapter->link_active == FALSE) {
1492 device_printf(dev,"Link is up %d Gbps %s \n",
1493 ((adapter->link_speed == 128)? 10:1),
1495 adapter->link_active = TRUE;
1496 if_link_state_change(ifp, LINK_STATE_UP);
1498 } else { /* Link down */
1499 if (adapter->link_active == TRUE) {
1501 device_printf(dev,"Link is Down\n");
1502 if_link_state_change(ifp, LINK_STATE_DOWN);
1503 adapter->link_active = FALSE;
1504 for (int i = 0; i < adapter->num_queues;
1506 txr->watchdog_check = FALSE;
1514 /*********************************************************************
1516 * This routine disables all traffic on the adapter by issuing a
1517 * global reset on the MAC and deallocates TX/RX buffers.
1519 **********************************************************************/
1525 struct adapter *adapter = arg;
1526 struct ixgbe_hw *hw = &adapter->hw;
1529 mtx_assert(&adapter->core_mtx, MA_OWNED);
1531 INIT_DEBUGOUT("ixv_stop: begin\n");
1532 ixv_disable_intr(adapter);
1534 /* Tell the stack that the interface is no longer active */
1535 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1538 adapter->hw.adapter_stopped = FALSE;
1539 ixgbe_stop_adapter(hw);
1540 callout_stop(&adapter->timer);
1542 /* reprogram the RAR[0] in case user changed it. */
1543 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1549 /*********************************************************************
1551 * Determine hardware revision.
1553 **********************************************************************/
1555 ixv_identify_hardware(struct adapter *adapter)
1557 device_t dev = adapter->dev;
1561 ** Make sure BUSMASTER is set, on a VM under
1562 ** KVM it may not be and will break things.
1564 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1565 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1566 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1567 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1568 "bits were not set!\n");
1569 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1570 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1573 /* Save off the information about this board */
1574 adapter->hw.vendor_id = pci_get_vendor(dev);
1575 adapter->hw.device_id = pci_get_device(dev);
1576 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1577 adapter->hw.subsystem_vendor_id =
1578 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1579 adapter->hw.subsystem_device_id =
1580 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1585 /*********************************************************************
1587 * Setup MSIX Interrupt resources and handlers
1589 **********************************************************************/
1591 ixv_allocate_msix(struct adapter *adapter)
1593 device_t dev = adapter->dev;
1594 struct ix_queue *que = adapter->queues;
1595 int error, rid, vector = 0;
1597 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1599 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1600 RF_SHAREABLE | RF_ACTIVE);
1601 if (que->res == NULL) {
1602 device_printf(dev,"Unable to allocate"
1603 " bus resource: que interrupt [%d]\n", vector);
1606 /* Set the handler function */
1607 error = bus_setup_intr(dev, que->res,
1608 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1609 ixv_msix_que, que, &que->tag);
1612 device_printf(dev, "Failed to register QUE handler");
1615 #if __FreeBSD_version >= 800504
1616 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1619 adapter->que_mask |= (u64)(1 << que->msix);
1621 ** Bind the msix vector, and thus the
1622 ** ring to the corresponding cpu.
1624 if (adapter->num_queues > 1)
1625 bus_bind_intr(dev, que->res, i);
1627 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1628 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1629 taskqueue_thread_enqueue, &que->tq);
1630 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1631 device_get_nameunit(adapter->dev));
1636 adapter->res = bus_alloc_resource_any(dev,
1637 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1638 if (!adapter->res) {
1639 device_printf(dev,"Unable to allocate"
1640 " bus resource: MBX interrupt [%d]\n", rid);
1643 /* Set the mbx handler function */
1644 error = bus_setup_intr(dev, adapter->res,
1645 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1646 ixv_msix_mbx, adapter, &adapter->tag);
1648 adapter->res = NULL;
1649 device_printf(dev, "Failed to register LINK handler");
1652 #if __FreeBSD_version >= 800504
1653 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1655 adapter->mbxvec = vector;
1656 /* Tasklets for Mailbox */
1657 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1658 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1659 taskqueue_thread_enqueue, &adapter->tq);
1660 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1661 device_get_nameunit(adapter->dev));
1663 ** Due to a broken design QEMU will fail to properly
1664 ** enable the guest for MSIX unless the vectors in
1665 ** the table are all set up, so we must rewrite the
1666 ** ENABLE in the MSIX control register again at this
1667 ** point to cause it to successfully initialize us.
1669 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1671 pci_find_cap(dev, PCIY_MSIX, &rid);
1672 rid += PCIR_MSIX_CTRL;
1673 msix_ctrl = pci_read_config(dev, rid, 2);
1674 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1675 pci_write_config(dev, rid, msix_ctrl, 2);
1682 * Setup MSIX resources, note that the VF
1683 * device MUST use MSIX, there is no fallback.
1686 ixv_setup_msix(struct adapter *adapter)
1688 device_t dev = adapter->dev;
1689 int rid, vectors, want = 2;
1692 /* First try MSI/X */
1694 adapter->msix_mem = bus_alloc_resource_any(dev,
1695 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1696 if (!adapter->msix_mem) {
1697 device_printf(adapter->dev,
1698 "Unable to map MSIX table \n");
1702 vectors = pci_msix_count(dev);
1704 bus_release_resource(dev, SYS_RES_MEMORY,
1705 rid, adapter->msix_mem);
1706 adapter->msix_mem = NULL;
1711 ** Want two vectors: one for a queue,
1712 ** plus an additional for mailbox.
1714 if (pci_alloc_msix(dev, &want) == 0) {
1715 device_printf(adapter->dev,
1716 "Using MSIX interrupts with %d vectors\n", want);
1720 device_printf(adapter->dev,"MSIX config error\n");
1726 ixv_allocate_pci_resources(struct adapter *adapter)
1729 device_t dev = adapter->dev;
1732 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1735 if (!(adapter->pci_mem)) {
1736 device_printf(dev,"Unable to allocate bus resource: memory\n");
1740 adapter->osdep.mem_bus_space_tag =
1741 rman_get_bustag(adapter->pci_mem);
1742 adapter->osdep.mem_bus_space_handle =
1743 rman_get_bushandle(adapter->pci_mem);
1744 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1746 adapter->num_queues = 1;
1747 adapter->hw.back = &adapter->osdep;
1750 ** Now setup MSI/X, should
1751 ** return us the number of
1752 ** configured vectors.
1754 adapter->msix = ixv_setup_msix(adapter);
1755 if (adapter->msix == ENXIO)
1762 ixv_free_pci_resources(struct adapter * adapter)
1764 struct ix_queue *que = adapter->queues;
1765 device_t dev = adapter->dev;
1768 memrid = PCIR_BAR(MSIX_BAR);
1771 ** There is a slight possibility of a failure mode
1772 ** in attach that will result in entering this function
1773 ** before interrupt resources have been initialized, and
1774 ** in that case we do not want to execute the loops below
1775 ** We can detect this reliably by the state of the adapter
1778 if (adapter->res == NULL)
1782 ** Release all msix queue resources:
1784 for (int i = 0; i < adapter->num_queues; i++, que++) {
1785 rid = que->msix + 1;
1786 if (que->tag != NULL) {
1787 bus_teardown_intr(dev, que->res, que->tag);
1790 if (que->res != NULL)
1791 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1795 /* Clean the Legacy or Link interrupt last */
1796 if (adapter->mbxvec) /* we are doing MSIX */
1797 rid = adapter->mbxvec + 1;
1799 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1801 if (adapter->tag != NULL) {
1802 bus_teardown_intr(dev, adapter->res, adapter->tag);
1803 adapter->tag = NULL;
1805 if (adapter->res != NULL)
1806 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1810 pci_release_msi(dev);
1812 if (adapter->msix_mem != NULL)
1813 bus_release_resource(dev, SYS_RES_MEMORY,
1814 memrid, adapter->msix_mem);
1816 if (adapter->pci_mem != NULL)
1817 bus_release_resource(dev, SYS_RES_MEMORY,
1818 PCIR_BAR(0), adapter->pci_mem);
1823 /*********************************************************************
1825 * Setup networking device structure and register an interface.
1827 **********************************************************************/
1829 ixv_setup_interface(device_t dev, struct adapter *adapter)
1833 INIT_DEBUGOUT("ixv_setup_interface: begin");
1835 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1837 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1838 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1839 ifp->if_baudrate = 1000000000;
1840 ifp->if_init = ixv_init;
1841 ifp->if_softc = adapter;
1842 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1843 ifp->if_ioctl = ixv_ioctl;
1844 #if __FreeBSD_version >= 800000
1845 ifp->if_transmit = ixv_mq_start;
1846 ifp->if_qflush = ixv_qflush;
1848 ifp->if_start = ixv_start;
1850 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1852 ether_ifattach(ifp, adapter->hw.mac.addr);
1854 adapter->max_frame_size =
1855 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1858 * Tell the upper layer(s) we support long frames.
1860 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1862 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1863 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1864 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1867 ifp->if_capenable = ifp->if_capabilities;
1869 /* Don't enable LRO by default */
1870 ifp->if_capabilities |= IFCAP_LRO;
1873 * Specify the media types supported by this adapter and register
1874 * callbacks to update media and link information
1876 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1878 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1879 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1880 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1886 ixv_config_link(struct adapter *adapter)
1888 struct ixgbe_hw *hw = &adapter->hw;
1889 u32 autoneg, err = 0;
1891 if (hw->mac.ops.check_link)
1892 err = hw->mac.ops.check_link(hw, &autoneg,
1893 &adapter->link_up, FALSE);
1897 if (hw->mac.ops.setup_link)
1898 err = hw->mac.ops.setup_link(hw,
1899 autoneg, adapter->link_up);
1904 /********************************************************************
1905 * Manage DMA'able memory.
1906 *******************************************************************/
1908 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1912 *(bus_addr_t *) arg = segs->ds_addr;
1917 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1918 struct ixv_dma_alloc *dma, int mapflags)
1920 device_t dev = adapter->dev;
1923 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1924 DBA_ALIGN, 0, /* alignment, bounds */
1925 BUS_SPACE_MAXADDR, /* lowaddr */
1926 BUS_SPACE_MAXADDR, /* highaddr */
1927 NULL, NULL, /* filter, filterarg */
1930 size, /* maxsegsize */
1931 BUS_DMA_ALLOCNOW, /* flags */
1932 NULL, /* lockfunc */
1933 NULL, /* lockfuncarg */
1936 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1940 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1941 BUS_DMA_NOWAIT, &dma->dma_map);
1943 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1947 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1951 mapflags | BUS_DMA_NOWAIT);
1953 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1957 dma->dma_size = size;
1960 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1962 bus_dma_tag_destroy(dma->dma_tag);
1964 dma->dma_map = NULL;
1965 dma->dma_tag = NULL;
1970 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1972 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1973 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1974 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1975 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1976 bus_dma_tag_destroy(dma->dma_tag);
1980 /*********************************************************************
1982 * Allocate memory for the transmit and receive rings, and then
1983 * the descriptors associated with each, called only once at attach.
1985 **********************************************************************/
1987 ixv_allocate_queues(struct adapter *adapter)
1989 device_t dev = adapter->dev;
1990 struct ix_queue *que;
1991 struct tx_ring *txr;
1992 struct rx_ring *rxr;
1993 int rsize, tsize, error = 0;
1994 int txconf = 0, rxconf = 0;
1996 /* First allocate the top level queue structs */
1997 if (!(adapter->queues =
1998 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1999 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2000 device_printf(dev, "Unable to allocate queue memory\n");
2005 /* First allocate the TX ring struct memory */
2006 if (!(adapter->tx_rings =
2007 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2008 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2009 device_printf(dev, "Unable to allocate TX ring memory\n");
2014 /* Next allocate the RX */
2015 if (!(adapter->rx_rings =
2016 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2017 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2018 device_printf(dev, "Unable to allocate RX ring memory\n");
2023 /* For the ring itself */
2024 tsize = roundup2(adapter->num_tx_desc *
2025 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2028 * Now set up the TX queues, txconf is needed to handle the
2029 * possibility that things fail midcourse and we need to
2030 * undo memory gracefully
2032 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2033 /* Set up some basics */
2034 txr = &adapter->tx_rings[i];
2035 txr->adapter = adapter;
2038 /* Initialize the TX side lock */
2039 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2040 device_get_nameunit(dev), txr->me);
2041 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2043 if (ixv_dma_malloc(adapter, tsize,
2044 &txr->txdma, BUS_DMA_NOWAIT)) {
2046 "Unable to allocate TX Descriptor memory\n");
2050 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2051 bzero((void *)txr->tx_base, tsize);
2053 /* Now allocate transmit buffers for the ring */
2054 if (ixv_allocate_transmit_buffers(txr)) {
2056 "Critical Failure setting up transmit buffers\n");
2060 #if __FreeBSD_version >= 800000
2061 /* Allocate a buf ring */
2062 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2063 M_WAITOK, &txr->tx_mtx);
2064 if (txr->br == NULL) {
2066 "Critical Failure setting up buf ring\n");
2074 * Next the RX queues...
2076 rsize = roundup2(adapter->num_rx_desc *
2077 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2078 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2079 rxr = &adapter->rx_rings[i];
2080 /* Set up some basics */
2081 rxr->adapter = adapter;
2084 /* Initialize the RX side lock */
2085 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2086 device_get_nameunit(dev), rxr->me);
2087 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2089 if (ixv_dma_malloc(adapter, rsize,
2090 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2092 "Unable to allocate RxDescriptor memory\n");
2096 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2097 bzero((void *)rxr->rx_base, rsize);
2099 /* Allocate receive buffers for the ring*/
2100 if (ixv_allocate_receive_buffers(rxr)) {
2102 "Critical Failure setting up receive buffers\n");
2109 ** Finally set up the queue holding structs
2111 for (int i = 0; i < adapter->num_queues; i++) {
2112 que = &adapter->queues[i];
2113 que->adapter = adapter;
2114 que->txr = &adapter->tx_rings[i];
2115 que->rxr = &adapter->rx_rings[i];
2121 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2122 ixv_dma_free(adapter, &rxr->rxdma);
2124 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2125 ixv_dma_free(adapter, &txr->txdma);
2126 free(adapter->rx_rings, M_DEVBUF);
2128 free(adapter->tx_rings, M_DEVBUF);
2130 free(adapter->queues, M_DEVBUF);
2136 /*********************************************************************
2138 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2139 * the information needed to transmit a packet on the wire. This is
2140 * called only once at attach, setup is done every reset.
2142 **********************************************************************/
2144 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2146 struct adapter *adapter = txr->adapter;
2147 device_t dev = adapter->dev;
2148 struct ixv_tx_buf *txbuf;
2152 * Setup DMA descriptor areas.
2154 if ((error = bus_dma_tag_create(
2155 bus_get_dma_tag(adapter->dev), /* parent */
2156 1, 0, /* alignment, bounds */
2157 BUS_SPACE_MAXADDR, /* lowaddr */
2158 BUS_SPACE_MAXADDR, /* highaddr */
2159 NULL, NULL, /* filter, filterarg */
2160 IXV_TSO_SIZE, /* maxsize */
2162 PAGE_SIZE, /* maxsegsize */
2164 NULL, /* lockfunc */
2165 NULL, /* lockfuncarg */
2167 device_printf(dev,"Unable to allocate TX DMA tag\n");
2171 if (!(txr->tx_buffers =
2172 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2173 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2174 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2179 /* Create the descriptor buffer dma maps */
2180 txbuf = txr->tx_buffers;
2181 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2182 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2184 device_printf(dev, "Unable to create TX DMA map\n");
2191 /* We free all, it handles case where we are in the middle */
2192 ixv_free_transmit_structures(adapter);
2196 /*********************************************************************
2198 * Initialize a transmit ring.
2200 **********************************************************************/
2202 ixv_setup_transmit_ring(struct tx_ring *txr)
2204 struct adapter *adapter = txr->adapter;
2205 struct ixv_tx_buf *txbuf;
2208 /* Clear the old ring contents */
2210 bzero((void *)txr->tx_base,
2211 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2213 txr->next_avail_desc = 0;
2214 txr->next_to_clean = 0;
2216 /* Free any existing tx buffers. */
2217 txbuf = txr->tx_buffers;
2218 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2219 if (txbuf->m_head != NULL) {
2220 bus_dmamap_sync(txr->txtag, txbuf->map,
2221 BUS_DMASYNC_POSTWRITE);
2222 bus_dmamap_unload(txr->txtag, txbuf->map);
2223 m_freem(txbuf->m_head);
2224 txbuf->m_head = NULL;
2226 /* Clear the EOP index */
2227 txbuf->eop_index = -1;
2230 /* Set number of descriptors available */
2231 txr->tx_avail = adapter->num_tx_desc;
2233 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2234 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2238 /*********************************************************************
2240 * Initialize all transmit rings.
2242 **********************************************************************/
2244 ixv_setup_transmit_structures(struct adapter *adapter)
2246 struct tx_ring *txr = adapter->tx_rings;
2248 for (int i = 0; i < adapter->num_queues; i++, txr++)
2249 ixv_setup_transmit_ring(txr);
2254 /*********************************************************************
2256 * Enable transmit unit.
2258 **********************************************************************/
2260 ixv_initialize_transmit_units(struct adapter *adapter)
2262 struct tx_ring *txr = adapter->tx_rings;
2263 struct ixgbe_hw *hw = &adapter->hw;
2266 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2267 u64 tdba = txr->txdma.dma_paddr;
2270 /* Set WTHRESH to 8, burst writeback */
2271 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2272 txdctl |= (8 << 16);
2273 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2275 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2276 txdctl |= IXGBE_TXDCTL_ENABLE;
2277 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2279 /* Set the HW Tx Head and Tail indices */
2280 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2283 /* Setup Transmit Descriptor Cmd Settings */
2284 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2285 txr->watchdog_check = FALSE;
2287 /* Set Ring parameters */
2288 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2289 (tdba & 0x00000000ffffffffULL));
2290 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2291 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2292 adapter->num_tx_desc *
2293 sizeof(struct ixgbe_legacy_tx_desc));
2294 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2295 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2296 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2303 /*********************************************************************
2305 * Free all transmit rings.
2307 **********************************************************************/
2309 ixv_free_transmit_structures(struct adapter *adapter)
2311 struct tx_ring *txr = adapter->tx_rings;
2313 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2315 ixv_free_transmit_buffers(txr);
2316 ixv_dma_free(adapter, &txr->txdma);
2318 IXV_TX_LOCK_DESTROY(txr);
2320 free(adapter->tx_rings, M_DEVBUF);
2323 /*********************************************************************
2325 * Free transmit ring related data structures.
2327 **********************************************************************/
2329 ixv_free_transmit_buffers(struct tx_ring *txr)
2331 struct adapter *adapter = txr->adapter;
2332 struct ixv_tx_buf *tx_buffer;
2335 INIT_DEBUGOUT("free_transmit_ring: begin");
2337 if (txr->tx_buffers == NULL)
2340 tx_buffer = txr->tx_buffers;
2341 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2342 if (tx_buffer->m_head != NULL) {
2343 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2344 BUS_DMASYNC_POSTWRITE);
2345 bus_dmamap_unload(txr->txtag,
2347 m_freem(tx_buffer->m_head);
2348 tx_buffer->m_head = NULL;
2349 if (tx_buffer->map != NULL) {
2350 bus_dmamap_destroy(txr->txtag,
2352 tx_buffer->map = NULL;
2354 } else if (tx_buffer->map != NULL) {
2355 bus_dmamap_unload(txr->txtag,
2357 bus_dmamap_destroy(txr->txtag,
2359 tx_buffer->map = NULL;
2362 #if __FreeBSD_version >= 800000
2363 if (txr->br != NULL)
2364 buf_ring_free(txr->br, M_DEVBUF);
2366 if (txr->tx_buffers != NULL) {
2367 free(txr->tx_buffers, M_DEVBUF);
2368 txr->tx_buffers = NULL;
2370 if (txr->txtag != NULL) {
2371 bus_dma_tag_destroy(txr->txtag);
2377 /*********************************************************************
2379 * Advanced Context Descriptor setup for VLAN or CSUM
2381 **********************************************************************/
2384 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2386 struct adapter *adapter = txr->adapter;
2387 struct ixgbe_adv_tx_context_desc *TXD;
2388 struct ixv_tx_buf *tx_buffer;
2389 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2390 struct ether_vlan_header *eh;
2392 struct ip6_hdr *ip6;
2393 int ehdrlen, ip_hlen = 0;
2396 bool offload = TRUE;
2397 int ctxd = txr->next_avail_desc;
2401 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2405 tx_buffer = &txr->tx_buffers[ctxd];
2406 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2409 ** In advanced descriptors the vlan tag must
2410 ** be placed into the descriptor itself.
2412 if (mp->m_flags & M_VLANTAG) {
2413 vtag = htole16(mp->m_pkthdr.ether_vtag);
2414 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2415 } else if (offload == FALSE)
2419 * Determine where frame payload starts.
2420 * Jump over vlan headers if already present,
2421 * helpful for QinQ too.
2423 eh = mtod(mp, struct ether_vlan_header *);
2424 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2425 etype = ntohs(eh->evl_proto);
2426 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2428 etype = ntohs(eh->evl_encap_proto);
2429 ehdrlen = ETHER_HDR_LEN;
2432 /* Set the ether header length */
2433 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2437 ip = (struct ip *)(mp->m_data + ehdrlen);
2438 ip_hlen = ip->ip_hl << 2;
2439 if (mp->m_len < ehdrlen + ip_hlen)
2442 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2444 case ETHERTYPE_IPV6:
2445 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2446 ip_hlen = sizeof(struct ip6_hdr);
2447 if (mp->m_len < ehdrlen + ip_hlen)
2449 ipproto = ip6->ip6_nxt;
2450 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2457 vlan_macip_lens |= ip_hlen;
2458 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2462 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2463 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2467 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2468 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2471 #if __FreeBSD_version >= 800000
2473 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2474 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2482 /* Now copy bits into descriptor */
2483 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2484 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2485 TXD->seqnum_seed = htole32(0);
2486 TXD->mss_l4len_idx = htole32(0);
2488 tx_buffer->m_head = NULL;
2489 tx_buffer->eop_index = -1;
2491 /* We've consumed the first desc, adjust counters */
2492 if (++ctxd == adapter->num_tx_desc)
2494 txr->next_avail_desc = ctxd;
2500 /**********************************************************************
2502 * Setup work for hardware segmentation offload (TSO) on
2503 * adapters using advanced tx descriptors
2505 **********************************************************************/
2507 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2509 struct adapter *adapter = txr->adapter;
2510 struct ixgbe_adv_tx_context_desc *TXD;
2511 struct ixv_tx_buf *tx_buffer;
2512 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2513 u32 mss_l4len_idx = 0;
2515 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2516 struct ether_vlan_header *eh;
2522 * Determine where frame payload starts.
2523 * Jump over vlan headers if already present
2525 eh = mtod(mp, struct ether_vlan_header *);
2526 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2527 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2529 ehdrlen = ETHER_HDR_LEN;
2531 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2532 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2535 ctxd = txr->next_avail_desc;
2536 tx_buffer = &txr->tx_buffers[ctxd];
2537 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2539 ip = (struct ip *)(mp->m_data + ehdrlen);
2540 if (ip->ip_p != IPPROTO_TCP)
2541 return FALSE; /* 0 */
2543 ip_hlen = ip->ip_hl << 2;
2544 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2545 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2546 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2547 tcp_hlen = th->th_off << 2;
2548 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2550 /* This is used in the transmit desc in encap */
2551 *paylen = mp->m_pkthdr.len - hdrlen;
2553 /* VLAN MACLEN IPLEN */
2554 if (mp->m_flags & M_VLANTAG) {
2555 vtag = htole16(mp->m_pkthdr.ether_vtag);
2556 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2559 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2560 vlan_macip_lens |= ip_hlen;
2561 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2563 /* ADV DTYPE TUCMD */
2564 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2565 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2566 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2567 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2571 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2572 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2573 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2575 TXD->seqnum_seed = htole32(0);
2576 tx_buffer->m_head = NULL;
2577 tx_buffer->eop_index = -1;
2579 if (++ctxd == adapter->num_tx_desc)
2583 txr->next_avail_desc = ctxd;
2588 /**********************************************************************
2590 * Examine each tx_buffer in the used queue. If the hardware is done
2591 * processing the packet then free associated resources. The
2592 * tx_buffer is put back on the free queue.
2594 **********************************************************************/
2596 ixv_txeof(struct tx_ring *txr)
2598 struct adapter *adapter = txr->adapter;
2599 struct ifnet *ifp = adapter->ifp;
2600 u32 first, last, done;
2601 struct ixv_tx_buf *tx_buffer;
2602 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2604 mtx_assert(&txr->tx_mtx, MA_OWNED);
2606 if (txr->tx_avail == adapter->num_tx_desc)
2609 first = txr->next_to_clean;
2610 tx_buffer = &txr->tx_buffers[first];
2611 /* For cleanup we just use legacy struct */
2612 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2613 last = tx_buffer->eop_index;
2616 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2619 ** Get the index of the first descriptor
2620 ** BEYOND the EOP and call that 'done'.
2621 ** I do this so the comparison in the
2622 ** inner while loop below can be simple
2624 if (++last == adapter->num_tx_desc) last = 0;
2627 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2628 BUS_DMASYNC_POSTREAD);
2630 ** Only the EOP descriptor of a packet now has the DD
2631 ** bit set, this is what we look for...
2633 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2634 /* We clean the range of the packet */
2635 while (first != done) {
2636 tx_desc->upper.data = 0;
2637 tx_desc->lower.data = 0;
2638 tx_desc->buffer_addr = 0;
2641 if (tx_buffer->m_head) {
2642 bus_dmamap_sync(txr->txtag,
2644 BUS_DMASYNC_POSTWRITE);
2645 bus_dmamap_unload(txr->txtag,
2647 m_freem(tx_buffer->m_head);
2648 tx_buffer->m_head = NULL;
2649 tx_buffer->map = NULL;
2651 tx_buffer->eop_index = -1;
2652 txr->watchdog_time = ticks;
2654 if (++first == adapter->num_tx_desc)
2657 tx_buffer = &txr->tx_buffers[first];
2659 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2662 /* See if there is more work now */
2663 last = tx_buffer->eop_index;
2666 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2667 /* Get next done point */
2668 if (++last == adapter->num_tx_desc) last = 0;
2673 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2674 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2676 txr->next_to_clean = first;
2679 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2680 * it is OK to send packets. If there are no pending descriptors,
2681 * clear the timeout. Otherwise, if some descriptors have been freed,
2682 * restart the timeout.
2684 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2685 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2686 if (txr->tx_avail == adapter->num_tx_desc) {
2687 txr->watchdog_check = FALSE;
2695 /*********************************************************************
2697 * Refresh mbuf buffers for RX descriptor rings
2698 * - now keeps its own state so discards due to resource
2699 * exhaustion are unnecessary, if an mbuf cannot be obtained
2700 * it just returns, keeping its placeholder, thus it can simply
2701 * be recalled to try again.
2703 **********************************************************************/
2705 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2707 struct adapter *adapter = rxr->adapter;
2708 bus_dma_segment_t hseg[1];
2709 bus_dma_segment_t pseg[1];
2710 struct ixv_rx_buf *rxbuf;
2711 struct mbuf *mh, *mp;
2712 int i, j, nsegs, error;
2713 bool refreshed = FALSE;
2715 i = j = rxr->next_to_refresh;
2716 /* Get the control variable, one beyond refresh point */
2717 if (++j == adapter->num_rx_desc)
2719 while (j != limit) {
2720 rxbuf = &rxr->rx_buffers[i];
2721 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2722 mh = m_gethdr(M_NOWAIT, MT_DATA);
2725 mh->m_pkthdr.len = mh->m_len = MHLEN;
2727 mh->m_flags |= M_PKTHDR;
2728 m_adj(mh, ETHER_ALIGN);
2729 /* Get the memory mapping */
2730 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2731 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2733 printf("GET BUF: dmamap load"
2734 " failure - %d\n", error);
2739 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2740 BUS_DMASYNC_PREREAD);
2741 rxr->rx_base[i].read.hdr_addr =
2742 htole64(hseg[0].ds_addr);
2745 if (rxbuf->m_pack == NULL) {
2746 mp = m_getjcl(M_NOWAIT, MT_DATA,
2747 M_PKTHDR, adapter->rx_mbuf_sz);
2753 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2754 /* Get the memory mapping */
2755 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2756 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2758 printf("GET BUF: dmamap load"
2759 " failure - %d\n", error);
2761 rxbuf->m_pack = NULL;
2765 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2766 BUS_DMASYNC_PREREAD);
2767 rxr->rx_base[i].read.pkt_addr =
2768 htole64(pseg[0].ds_addr);
2771 rxr->next_to_refresh = i = j;
2772 /* Calculate next index */
2773 if (++j == adapter->num_rx_desc)
2777 if (refreshed) /* update tail index */
2778 IXGBE_WRITE_REG(&adapter->hw,
2779 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2783 /*********************************************************************
2785 * Allocate memory for rx_buffer structures. Since we use one
2786 * rx_buffer per received packet, the maximum number of rx_buffer's
2787 * that we'll need is equal to the number of receive descriptors
2788 * that we've allocated.
2790 **********************************************************************/
2792 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2794 struct adapter *adapter = rxr->adapter;
2795 device_t dev = adapter->dev;
2796 struct ixv_rx_buf *rxbuf;
2797 int i, bsize, error;
2799 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2800 if (!(rxr->rx_buffers =
2801 (struct ixv_rx_buf *) malloc(bsize,
2802 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2803 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2808 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2809 1, 0, /* alignment, bounds */
2810 BUS_SPACE_MAXADDR, /* lowaddr */
2811 BUS_SPACE_MAXADDR, /* highaddr */
2812 NULL, NULL, /* filter, filterarg */
2813 MSIZE, /* maxsize */
2815 MSIZE, /* maxsegsize */
2817 NULL, /* lockfunc */
2818 NULL, /* lockfuncarg */
2820 device_printf(dev, "Unable to create RX DMA tag\n");
2824 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2825 1, 0, /* alignment, bounds */
2826 BUS_SPACE_MAXADDR, /* lowaddr */
2827 BUS_SPACE_MAXADDR, /* highaddr */
2828 NULL, NULL, /* filter, filterarg */
2829 MJUMPAGESIZE, /* maxsize */
2831 MJUMPAGESIZE, /* maxsegsize */
2833 NULL, /* lockfunc */
2834 NULL, /* lockfuncarg */
2836 device_printf(dev, "Unable to create RX DMA tag\n");
2840 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2841 rxbuf = &rxr->rx_buffers[i];
2842 error = bus_dmamap_create(rxr->htag,
2843 BUS_DMA_NOWAIT, &rxbuf->hmap);
2845 device_printf(dev, "Unable to create RX head map\n");
2848 error = bus_dmamap_create(rxr->ptag,
2849 BUS_DMA_NOWAIT, &rxbuf->pmap);
2851 device_printf(dev, "Unable to create RX pkt map\n");
2859 /* Frees all, but can handle partial completion */
2860 ixv_free_receive_structures(adapter);
2865 ixv_free_receive_ring(struct rx_ring *rxr)
2867 struct adapter *adapter;
2868 struct ixv_rx_buf *rxbuf;
2871 adapter = rxr->adapter;
2872 for (i = 0; i < adapter->num_rx_desc; i++) {
2873 rxbuf = &rxr->rx_buffers[i];
2874 if (rxbuf->m_head != NULL) {
2875 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2876 BUS_DMASYNC_POSTREAD);
2877 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2878 rxbuf->m_head->m_flags |= M_PKTHDR;
2879 m_freem(rxbuf->m_head);
2881 if (rxbuf->m_pack != NULL) {
2882 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2883 BUS_DMASYNC_POSTREAD);
2884 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2885 rxbuf->m_pack->m_flags |= M_PKTHDR;
2886 m_freem(rxbuf->m_pack);
2888 rxbuf->m_head = NULL;
2889 rxbuf->m_pack = NULL;
2894 /*********************************************************************
2896 * Initialize a receive ring and its buffers.
2898 **********************************************************************/
2900 ixv_setup_receive_ring(struct rx_ring *rxr)
2902 struct adapter *adapter;
2905 struct ixv_rx_buf *rxbuf;
2906 bus_dma_segment_t pseg[1], hseg[1];
2907 struct lro_ctrl *lro = &rxr->lro;
2908 int rsize, nsegs, error = 0;
2910 adapter = rxr->adapter;
2914 /* Clear the ring contents */
2916 rsize = roundup2(adapter->num_rx_desc *
2917 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2918 bzero((void *)rxr->rx_base, rsize);
2920 /* Free current RX buffer structs and their mbufs */
2921 ixv_free_receive_ring(rxr);
2923 /* Configure header split? */
2924 if (ixv_header_split)
2925 rxr->hdr_split = TRUE;
2927 /* Now replenish the mbufs */
2928 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2929 struct mbuf *mh, *mp;
2931 rxbuf = &rxr->rx_buffers[j];
2933 ** Dont allocate mbufs if not
2934 ** doing header split, its wasteful
2936 if (rxr->hdr_split == FALSE)
2939 /* First the header */
2940 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2941 if (rxbuf->m_head == NULL) {
2945 m_adj(rxbuf->m_head, ETHER_ALIGN);
2947 mh->m_len = mh->m_pkthdr.len = MHLEN;
2948 mh->m_flags |= M_PKTHDR;
2949 /* Get the memory mapping */
2950 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2951 rxbuf->hmap, rxbuf->m_head, hseg,
2952 &nsegs, BUS_DMA_NOWAIT);
2953 if (error != 0) /* Nothing elegant to do here */
2955 bus_dmamap_sync(rxr->htag,
2956 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2957 /* Update descriptor */
2958 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2961 /* Now the payload cluster */
2962 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2963 M_PKTHDR, adapter->rx_mbuf_sz);
2964 if (rxbuf->m_pack == NULL) {
2969 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2970 /* Get the memory mapping */
2971 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2972 rxbuf->pmap, mp, pseg,
2973 &nsegs, BUS_DMA_NOWAIT);
2976 bus_dmamap_sync(rxr->ptag,
2977 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2978 /* Update descriptor */
2979 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2983 /* Setup our descriptor indices */
2984 rxr->next_to_check = 0;
2985 rxr->next_to_refresh = 0;
2986 rxr->lro_enabled = FALSE;
2987 rxr->rx_split_packets = 0;
2989 rxr->discard = FALSE;
2991 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2992 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2995 ** Now set up the LRO interface:
2997 if (ifp->if_capenable & IFCAP_LRO) {
2998 int err = tcp_lro_init(lro);
3000 device_printf(dev, "LRO Initialization failed!\n");
3003 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3004 rxr->lro_enabled = TRUE;
3005 lro->ifp = adapter->ifp;
3012 ixv_free_receive_ring(rxr);
3017 /*********************************************************************
3019 * Initialize all receive rings.
3021 **********************************************************************/
3023 ixv_setup_receive_structures(struct adapter *adapter)
3025 struct rx_ring *rxr = adapter->rx_rings;
3028 for (j = 0; j < adapter->num_queues; j++, rxr++)
3029 if (ixv_setup_receive_ring(rxr))
3035 * Free RX buffers allocated so far, we will only handle
3036 * the rings that completed, the failing case will have
3037 * cleaned up for itself. 'j' failed, so its the terminus.
3039 for (int i = 0; i < j; ++i) {
3040 rxr = &adapter->rx_rings[i];
3041 ixv_free_receive_ring(rxr);
3047 /*********************************************************************
3049 * Setup receive registers and features.
3051 **********************************************************************/
3052 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3055 ixv_initialize_receive_units(struct adapter *adapter)
3057 struct rx_ring *rxr = adapter->rx_rings;
3058 struct ixgbe_hw *hw = &adapter->hw;
3059 struct ifnet *ifp = adapter->ifp;
3060 u32 bufsz, fctrl, rxcsum, hlreg;
3063 /* Enable broadcasts */
3064 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3065 fctrl |= IXGBE_FCTRL_BAM;
3066 fctrl |= IXGBE_FCTRL_DPF;
3067 fctrl |= IXGBE_FCTRL_PMCF;
3068 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3070 /* Set for Jumbo Frames? */
3071 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3072 if (ifp->if_mtu > ETHERMTU) {
3073 hlreg |= IXGBE_HLREG0_JUMBOEN;
3074 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3076 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3077 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3079 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3081 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3082 u64 rdba = rxr->rxdma.dma_paddr;
3085 /* Do the queue enabling first */
3086 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3087 rxdctl |= IXGBE_RXDCTL_ENABLE;
3088 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3089 for (int k = 0; k < 10; k++) {
3090 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3091 IXGBE_RXDCTL_ENABLE)
3098 /* Setup the Base and Length of the Rx Descriptor Ring */
3099 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3100 (rdba & 0x00000000ffffffffULL));
3101 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3103 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3104 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3106 /* Set up the SRRCTL register */
3107 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3108 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3109 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3111 if (rxr->hdr_split) {
3112 /* Use a standard mbuf for the header */
3113 reg |= ((IXV_RX_HDR <<
3114 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3115 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3116 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3118 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3119 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3121 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3122 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3123 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3124 adapter->num_rx_desc - 1);
3127 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3129 if (ifp->if_capenable & IFCAP_RXCSUM)
3130 rxcsum |= IXGBE_RXCSUM_PCSD;
3132 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3133 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3135 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3140 /*********************************************************************
3142 * Free all receive rings.
3144 **********************************************************************/
3146 ixv_free_receive_structures(struct adapter *adapter)
3148 struct rx_ring *rxr = adapter->rx_rings;
3150 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3151 struct lro_ctrl *lro = &rxr->lro;
3152 ixv_free_receive_buffers(rxr);
3153 /* Free LRO memory */
3155 /* Free the ring memory as well */
3156 ixv_dma_free(adapter, &rxr->rxdma);
3159 free(adapter->rx_rings, M_DEVBUF);
3163 /*********************************************************************
3165 * Free receive ring data structures
3167 **********************************************************************/
3169 ixv_free_receive_buffers(struct rx_ring *rxr)
3171 struct adapter *adapter = rxr->adapter;
3172 struct ixv_rx_buf *rxbuf;
3174 INIT_DEBUGOUT("free_receive_structures: begin");
3176 /* Cleanup any existing buffers */
3177 if (rxr->rx_buffers != NULL) {
3178 for (int i = 0; i < adapter->num_rx_desc; i++) {
3179 rxbuf = &rxr->rx_buffers[i];
3180 if (rxbuf->m_head != NULL) {
3181 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3182 BUS_DMASYNC_POSTREAD);
3183 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3184 rxbuf->m_head->m_flags |= M_PKTHDR;
3185 m_freem(rxbuf->m_head);
3187 if (rxbuf->m_pack != NULL) {
3188 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3189 BUS_DMASYNC_POSTREAD);
3190 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3191 rxbuf->m_pack->m_flags |= M_PKTHDR;
3192 m_freem(rxbuf->m_pack);
3194 rxbuf->m_head = NULL;
3195 rxbuf->m_pack = NULL;
3196 if (rxbuf->hmap != NULL) {
3197 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3200 if (rxbuf->pmap != NULL) {
3201 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3205 if (rxr->rx_buffers != NULL) {
3206 free(rxr->rx_buffers, M_DEVBUF);
3207 rxr->rx_buffers = NULL;
3211 if (rxr->htag != NULL) {
3212 bus_dma_tag_destroy(rxr->htag);
3215 if (rxr->ptag != NULL) {
3216 bus_dma_tag_destroy(rxr->ptag);
3223 static __inline void
3224 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3228 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3229 * should be computed by hardware. Also it should not have VLAN tag in
3232 if (rxr->lro_enabled &&
3233 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3234 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3235 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3236 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3237 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3238 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3240 * Send to the stack if:
3241 ** - LRO not enabled, or
3242 ** - no LRO resources, or
3243 ** - lro enqueue fails
3245 if (rxr->lro.lro_cnt != 0)
3246 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3250 (*ifp->if_input)(ifp, m);
3254 static __inline void
3255 ixv_rx_discard(struct rx_ring *rxr, int i)
3257 struct ixv_rx_buf *rbuf;
3259 rbuf = &rxr->rx_buffers[i];
3261 if (rbuf->fmp != NULL) {/* Partial chain ? */
3262 rbuf->fmp->m_flags |= M_PKTHDR;
3268 ** With advanced descriptors the writeback
3269 ** clobbers the buffer addrs, so its easier
3270 ** to just free the existing mbufs and take
3271 ** the normal refresh path to get new buffers
3275 m_free(rbuf->m_head);
3276 rbuf->m_head = NULL;
3280 m_free(rbuf->m_pack);
3281 rbuf->m_pack = NULL;
3288 /*********************************************************************
3290 * This routine executes in interrupt context. It replenishes
3291 * the mbufs in the descriptor and sends data which has been
3292 * dma'ed into host memory to upper layer.
3294 * We loop at most count times if count is > 0, or until done if
3297 * Return TRUE for more work, FALSE for all clean.
3298 *********************************************************************/
3300 ixv_rxeof(struct ix_queue *que, int count)
3302 struct adapter *adapter = que->adapter;
3303 struct rx_ring *rxr = que->rxr;
3304 struct ifnet *ifp = adapter->ifp;
3305 struct lro_ctrl *lro = &rxr->lro;
3306 struct lro_entry *queued;
3307 int i, nextp, processed = 0;
3309 union ixgbe_adv_rx_desc *cur;
3310 struct ixv_rx_buf *rbuf, *nbuf;
3314 for (i = rxr->next_to_check; count != 0;) {
3315 struct mbuf *sendmp, *mh, *mp;
3317 u16 hlen, plen, hdr, vtag;
3320 /* Sync the ring. */
3321 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3322 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3324 cur = &rxr->rx_base[i];
3325 staterr = le32toh(cur->wb.upper.status_error);
3327 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3329 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3336 cur->wb.upper.status_error = 0;
3337 rbuf = &rxr->rx_buffers[i];
3341 plen = le16toh(cur->wb.upper.length);
3342 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3343 IXGBE_RXDADV_PKTTYPE_MASK;
3344 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3345 vtag = le16toh(cur->wb.upper.vlan);
3346 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3348 /* Make sure all parts of a bad packet are discarded */
3349 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3352 rxr->rx_discarded++;
3354 rxr->discard = TRUE;
3356 rxr->discard = FALSE;
3357 ixv_rx_discard(rxr, i);
3363 if (nextp == adapter->num_rx_desc)
3365 nbuf = &rxr->rx_buffers[nextp];
3369 ** The header mbuf is ONLY used when header
3370 ** split is enabled, otherwise we get normal
3371 ** behavior, ie, both header and payload
3372 ** are DMA'd into the payload buffer.
3374 ** Rather than using the fmp/lmp global pointers
3375 ** we now keep the head of a packet chain in the
3376 ** buffer struct and pass this along from one
3377 ** descriptor to the next, until we get EOP.
3379 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3380 /* This must be an initial descriptor */
3381 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3382 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3383 if (hlen > IXV_RX_HDR)
3386 mh->m_flags |= M_PKTHDR;
3388 mh->m_pkthdr.len = mh->m_len;
3389 /* Null buf pointer so it is refreshed */
3390 rbuf->m_head = NULL;
3392 ** Check the payload length, this
3393 ** could be zero if its a small
3399 mp->m_flags &= ~M_PKTHDR;
3401 mh->m_pkthdr.len += mp->m_len;
3402 /* Null buf pointer so it is refreshed */
3403 rbuf->m_pack = NULL;
3404 rxr->rx_split_packets++;
3407 ** Now create the forward
3408 ** chain so when complete
3412 /* stash the chain head */
3414 /* Make forward chain */
3416 mp->m_next = nbuf->m_pack;
3418 mh->m_next = nbuf->m_pack;
3420 /* Singlet, prepare to send */
3422 if ((adapter->num_vlans) &&
3423 (staterr & IXGBE_RXD_STAT_VP)) {
3424 sendmp->m_pkthdr.ether_vtag = vtag;
3425 sendmp->m_flags |= M_VLANTAG;
3430 ** Either no header split, or a
3431 ** secondary piece of a fragmented
3436 ** See if there is a stored head
3437 ** that determines what we are
3440 rbuf->m_pack = rbuf->fmp = NULL;
3442 if (sendmp != NULL) /* secondary frag */
3443 sendmp->m_pkthdr.len += mp->m_len;
3445 /* first desc of a non-ps chain */
3447 sendmp->m_flags |= M_PKTHDR;
3448 sendmp->m_pkthdr.len = mp->m_len;
3449 if (staterr & IXGBE_RXD_STAT_VP) {
3450 sendmp->m_pkthdr.ether_vtag = vtag;
3451 sendmp->m_flags |= M_VLANTAG;
3454 /* Pass the head pointer on */
3458 mp->m_next = nbuf->m_pack;
3462 /* Sending this frame? */
3464 sendmp->m_pkthdr.rcvif = ifp;
3467 /* capture data for AIM */
3468 rxr->bytes += sendmp->m_pkthdr.len;
3469 rxr->rx_bytes += sendmp->m_pkthdr.len;
3470 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3471 ixv_rx_checksum(staterr, sendmp, ptype);
3472 #if __FreeBSD_version >= 800000
3473 sendmp->m_pkthdr.flowid = que->msix;
3474 sendmp->m_flags |= M_FLOWID;
3478 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3479 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3481 /* Advance our pointers to the next descriptor. */
3482 if (++i == adapter->num_rx_desc)
3485 /* Now send to the stack or do LRO */
3487 ixv_rx_input(rxr, ifp, sendmp, ptype);
3489 /* Every 8 descriptors we go to refresh mbufs */
3490 if (processed == 8) {
3491 ixv_refresh_mbufs(rxr, i);
3496 /* Refresh any remaining buf structs */
3497 if (ixv_rx_unrefreshed(rxr))
3498 ixv_refresh_mbufs(rxr, i);
3500 rxr->next_to_check = i;
3503 * Flush any outstanding LRO work
3505 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3506 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3507 tcp_lro_flush(lro, queued);
3513 ** We still have cleaning to do?
3514 ** Schedule another interrupt if so.
3516 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3517 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3525 /*********************************************************************
3527 * Verify that the hardware indicated that the checksum is valid.
3528 * Inform the stack about the status of checksum so that stack
3529 * doesn't spend time verifying the checksum.
3531 *********************************************************************/
3533 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3535 u16 status = (u16) staterr;
3536 u8 errors = (u8) (staterr >> 24);
3539 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3540 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3543 if (status & IXGBE_RXD_STAT_IPCS) {
3544 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3545 /* IP Checksum Good */
3546 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3547 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3550 mp->m_pkthdr.csum_flags = 0;
3552 if (status & IXGBE_RXD_STAT_L4CS) {
3553 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3554 #if __FreeBSD_version >= 800000
3556 type = CSUM_SCTP_VALID;
3558 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3559 mp->m_pkthdr.csum_flags |= type;
3561 mp->m_pkthdr.csum_data = htons(0xffff);
3568 ixv_setup_vlan_support(struct adapter *adapter)
3570 struct ixgbe_hw *hw = &adapter->hw;
3571 u32 ctrl, vid, vfta, retry;
3575 ** We get here thru init_locked, meaning
3576 ** a soft reset, this has already cleared
3577 ** the VFTA and other state, so if there
3578 ** have been no vlan's registered do nothing.
3580 if (adapter->num_vlans == 0)
3583 /* Enable the queues */
3584 for (int i = 0; i < adapter->num_queues; i++) {
3585 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3586 ctrl |= IXGBE_RXDCTL_VME;
3587 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3591 ** A soft reset zero's out the VFTA, so
3592 ** we need to repopulate it now.
3594 for (int i = 0; i < VFTA_SIZE; i++) {
3595 if (ixv_shadow_vfta[i] == 0)
3597 vfta = ixv_shadow_vfta[i];
3599 ** Reconstruct the vlan id's
3600 ** based on the bits set in each
3601 ** of the array ints.
3603 for ( int j = 0; j < 32; j++) {
3605 if ((vfta & (1 << j)) == 0)
3608 /* Call the shared code mailbox routine */
3609 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3618 ** This routine is run via an vlan config EVENT,
3619 ** it enables us to use the HW Filter table since
3620 ** we can get the vlan id. This just creates the
3621 ** entry in the soft version of the VFTA, init will
3622 ** repopulate the real table.
3625 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3627 struct adapter *adapter = ifp->if_softc;
3630 if (ifp->if_softc != arg) /* Not our event */
3633 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3636 IXV_CORE_LOCK(adapter);
3637 index = (vtag >> 5) & 0x7F;
3639 ixv_shadow_vfta[index] |= (1 << bit);
3640 ++adapter->num_vlans;
3641 /* Re-init to load the changes */
3642 ixv_init_locked(adapter);
3643 IXV_CORE_UNLOCK(adapter);
3647 ** This routine is run via an vlan
3648 ** unconfig EVENT, remove our entry
3649 ** in the soft vfta.
3652 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3654 struct adapter *adapter = ifp->if_softc;
3657 if (ifp->if_softc != arg)
3660 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3663 IXV_CORE_LOCK(adapter);
3664 index = (vtag >> 5) & 0x7F;
3666 ixv_shadow_vfta[index] &= ~(1 << bit);
3667 --adapter->num_vlans;
3668 /* Re-init to load the changes */
3669 ixv_init_locked(adapter);
3670 IXV_CORE_UNLOCK(adapter);
3674 ixv_enable_intr(struct adapter *adapter)
3676 struct ixgbe_hw *hw = &adapter->hw;
3677 struct ix_queue *que = adapter->queues;
3678 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3681 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3683 mask = IXGBE_EIMS_ENABLE_MASK;
3684 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3685 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3687 for (int i = 0; i < adapter->num_queues; i++, que++)
3688 ixv_enable_queue(adapter, que->msix);
3690 IXGBE_WRITE_FLUSH(hw);
3696 ixv_disable_intr(struct adapter *adapter)
3698 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3699 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3700 IXGBE_WRITE_FLUSH(&adapter->hw);
3705 ** Setup the correct IVAR register for a particular MSIX interrupt
3706 ** - entry is the register array entry
3707 ** - vector is the MSIX vector for this queue
3708 ** - type is RX/TX/MISC
3711 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3713 struct ixgbe_hw *hw = &adapter->hw;
3716 vector |= IXGBE_IVAR_ALLOC_VAL;
3718 if (type == -1) { /* MISC IVAR */
3719 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3722 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3723 } else { /* RX/TX IVARS */
3724 index = (16 * (entry & 1)) + (8 * type);
3725 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3726 ivar &= ~(0xFF << index);
3727 ivar |= (vector << index);
3728 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3733 ixv_configure_ivars(struct adapter *adapter)
3735 struct ix_queue *que = adapter->queues;
3737 for (int i = 0; i < adapter->num_queues; i++, que++) {
3738 /* First the RX queue entry */
3739 ixv_set_ivar(adapter, i, que->msix, 0);
3740 /* ... and the TX */
3741 ixv_set_ivar(adapter, i, que->msix, 1);
3742 /* Set an initial value in EITR */
3743 IXGBE_WRITE_REG(&adapter->hw,
3744 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3747 /* For the Link interrupt */
3748 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3753 ** Tasklet handler for MSIX MBX interrupts
3754 ** - do outside interrupt since it might sleep
3757 ixv_handle_mbx(void *context, int pending)
3759 struct adapter *adapter = context;
3761 ixgbe_check_link(&adapter->hw,
3762 &adapter->link_speed, &adapter->link_up, 0);
3763 ixv_update_link_status(adapter);
3767 ** The VF stats registers never have a truely virgin
3768 ** starting point, so this routine tries to make an
3769 ** artificial one, marking ground zero on attach as
3773 ixv_save_stats(struct adapter *adapter)
3775 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3776 adapter->stats.saved_reset_vfgprc +=
3777 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3778 adapter->stats.saved_reset_vfgptc +=
3779 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3780 adapter->stats.saved_reset_vfgorc +=
3781 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3782 adapter->stats.saved_reset_vfgotc +=
3783 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3784 adapter->stats.saved_reset_vfmprc +=
3785 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3790 ixv_init_stats(struct adapter *adapter)
3792 struct ixgbe_hw *hw = &adapter->hw;
3794 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3795 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3796 adapter->stats.last_vfgorc |=
3797 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3799 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3800 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3801 adapter->stats.last_vfgotc |=
3802 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3804 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3806 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3807 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3808 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3809 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3810 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3813 #define UPDATE_STAT_32(reg, last, count) \
3815 u32 current = IXGBE_READ_REG(hw, reg); \
3816 if (current < last) \
3817 count += 0x100000000LL; \
3819 count &= 0xFFFFFFFF00000000LL; \
3823 #define UPDATE_STAT_36(lsb, msb, last, count) \
3825 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3826 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3827 u64 current = ((cur_msb << 32) | cur_lsb); \
3828 if (current < last) \
3829 count += 0x1000000000LL; \
3831 count &= 0xFFFFFFF000000000LL; \
3836 ** ixv_update_stats - Update the board statistics counters.
3839 ixv_update_stats(struct adapter *adapter)
3841 struct ixgbe_hw *hw = &adapter->hw;
3843 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3844 adapter->stats.vfgprc);
3845 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3846 adapter->stats.vfgptc);
3847 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3848 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3849 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3850 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3851 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3852 adapter->stats.vfmprc);
3855 /**********************************************************************
3857 * This routine is called only when ixgbe_display_debug_stats is enabled.
3858 * This routine provides a way to take a look at important statistics
3859 * maintained by the driver and hardware.
3861 **********************************************************************/
3863 ixv_print_hw_stats(struct adapter * adapter)
3865 device_t dev = adapter->dev;
3867 device_printf(dev,"Std Mbuf Failed = %lu\n",
3868 adapter->mbuf_defrag_failed);
3869 device_printf(dev,"Driver dropped packets = %lu\n",
3870 adapter->dropped_pkts);
3871 device_printf(dev, "watchdog timeouts = %ld\n",
3872 adapter->watchdog_events);
3874 device_printf(dev,"Good Packets Rcvd = %llu\n",
3875 (long long)adapter->stats.vfgprc);
3876 device_printf(dev,"Good Packets Xmtd = %llu\n",
3877 (long long)adapter->stats.vfgptc);
3878 device_printf(dev,"TSO Transmissions = %lu\n",
3883 /**********************************************************************
3885 * This routine is called only when em_display_debug_stats is enabled.
3886 * This routine provides a way to take a look at important statistics
3887 * maintained by the driver and hardware.
3889 **********************************************************************/
3891 ixv_print_debug_info(struct adapter *adapter)
3893 device_t dev = adapter->dev;
3894 struct ixgbe_hw *hw = &adapter->hw;
3895 struct ix_queue *que = adapter->queues;
3896 struct rx_ring *rxr;
3897 struct tx_ring *txr;
3898 struct lro_ctrl *lro;
3900 device_printf(dev,"Error Byte Count = %u \n",
3901 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3903 for (int i = 0; i < adapter->num_queues; i++, que++) {
3907 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3908 que->msix, (long)que->irqs);
3909 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3910 rxr->me, (long long)rxr->rx_packets);
3911 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3912 rxr->me, (long long)rxr->rx_split_packets);
3913 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3914 rxr->me, (long)rxr->rx_bytes);
3915 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3916 rxr->me, lro->lro_queued);
3917 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3918 rxr->me, lro->lro_flushed);
3919 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3920 txr->me, (long)txr->total_packets);
3921 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3922 txr->me, (long)txr->no_desc_avail);
3925 device_printf(dev,"MBX IRQ Handled: %lu\n",
3926 (long)adapter->mbx_irq);
3931 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3935 struct adapter *adapter;
3938 error = sysctl_handle_int(oidp, &result, 0, req);
3940 if (error || !req->newptr)
3944 adapter = (struct adapter *) arg1;
3945 ixv_print_hw_stats(adapter);
3951 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3954 struct adapter *adapter;
3957 error = sysctl_handle_int(oidp, &result, 0, req);
3959 if (error || !req->newptr)
3963 adapter = (struct adapter *) arg1;
3964 ixv_print_debug_info(adapter);
3970 ** Set flow control using sysctl:
3971 ** Flow control values:
3978 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3981 struct adapter *adapter;
3983 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3988 adapter = (struct adapter *) arg1;
3989 switch (ixv_flow_control) {
3990 case ixgbe_fc_rx_pause:
3991 case ixgbe_fc_tx_pause:
3993 adapter->hw.fc.requested_mode = ixv_flow_control;
3997 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4000 ixgbe_fc_enable(&adapter->hw);
4005 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4006 const char *description, int *limit, int value)
4009 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4010 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4011 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);