1 /******************************************************************************
3 Copyright (c) 2001-2013, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
39 /*********************************************************************
41 *********************************************************************/
42 char ixv_driver_version[] = "1.1.4";
44 /*********************************************************************
47 * Used by probe to select devices to load on
48 * Last field stores an index into ixv_strings
49 * Last entry must be all 0s
51 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
52 *********************************************************************/
54 static ixv_vendor_info_t ixv_vendor_info_array[] =
56 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
57 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
58 /* required last entry */
62 /*********************************************************************
63 * Table of branding strings
64 *********************************************************************/
66 static char *ixv_strings[] = {
67 "Intel(R) PRO/10GbE Virtual Function Network Driver"
70 /*********************************************************************
72 *********************************************************************/
73 static int ixv_probe(device_t);
74 static int ixv_attach(device_t);
75 static int ixv_detach(device_t);
76 static int ixv_shutdown(device_t);
77 #if __FreeBSD_version < 800000
78 static void ixv_start(struct ifnet *);
79 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
81 static int ixv_mq_start(struct ifnet *, struct mbuf *);
82 static int ixv_mq_start_locked(struct ifnet *,
83 struct tx_ring *, struct mbuf *);
84 static void ixv_qflush(struct ifnet *);
86 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
87 static void ixv_init(void *);
88 static void ixv_init_locked(struct adapter *);
89 static void ixv_stop(void *);
90 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
91 static int ixv_media_change(struct ifnet *);
92 static void ixv_identify_hardware(struct adapter *);
93 static int ixv_allocate_pci_resources(struct adapter *);
94 static int ixv_allocate_msix(struct adapter *);
95 static int ixv_allocate_queues(struct adapter *);
96 static int ixv_setup_msix(struct adapter *);
97 static void ixv_free_pci_resources(struct adapter *);
98 static void ixv_local_timer(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
102 static int ixv_allocate_transmit_buffers(struct tx_ring *);
103 static int ixv_setup_transmit_structures(struct adapter *);
104 static void ixv_setup_transmit_ring(struct tx_ring *);
105 static void ixv_initialize_transmit_units(struct adapter *);
106 static void ixv_free_transmit_structures(struct adapter *);
107 static void ixv_free_transmit_buffers(struct tx_ring *);
109 static int ixv_allocate_receive_buffers(struct rx_ring *);
110 static int ixv_setup_receive_structures(struct adapter *);
111 static int ixv_setup_receive_ring(struct rx_ring *);
112 static void ixv_initialize_receive_units(struct adapter *);
113 static void ixv_free_receive_structures(struct adapter *);
114 static void ixv_free_receive_buffers(struct rx_ring *);
116 static void ixv_enable_intr(struct adapter *);
117 static void ixv_disable_intr(struct adapter *);
118 static bool ixv_txeof(struct tx_ring *);
119 static bool ixv_rxeof(struct ix_queue *, int);
120 static void ixv_rx_checksum(u32, struct mbuf *, u32);
121 static void ixv_set_multi(struct adapter *);
122 static void ixv_update_link_status(struct adapter *);
123 static void ixv_refresh_mbufs(struct rx_ring *, int);
124 static int ixv_xmit(struct tx_ring *, struct mbuf **);
125 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
126 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
127 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
128 static int ixv_dma_malloc(struct adapter *, bus_size_t,
129 struct ixv_dma_alloc *, int);
130 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
131 static void ixv_add_rx_process_limit(struct adapter *, const char *,
132 const char *, int *, int);
133 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
134 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
135 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
136 static void ixv_configure_ivars(struct adapter *);
137 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
139 static void ixv_setup_vlan_support(struct adapter *);
140 static void ixv_register_vlan(void *, struct ifnet *, u16);
141 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
143 static void ixv_save_stats(struct adapter *);
144 static void ixv_init_stats(struct adapter *);
145 static void ixv_update_stats(struct adapter *);
147 static __inline void ixv_rx_discard(struct rx_ring *, int);
148 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
151 /* The MSI/X Interrupt handlers */
152 static void ixv_msix_que(void *);
153 static void ixv_msix_mbx(void *);
155 /* Deferred interrupt tasklets */
156 static void ixv_handle_que(void *, int);
157 static void ixv_handle_mbx(void *, int);
159 /*********************************************************************
160 * FreeBSD Device Interface Entry Points
161 *********************************************************************/
163 static device_method_t ixv_methods[] = {
164 /* Device interface */
165 DEVMETHOD(device_probe, ixv_probe),
166 DEVMETHOD(device_attach, ixv_attach),
167 DEVMETHOD(device_detach, ixv_detach),
168 DEVMETHOD(device_shutdown, ixv_shutdown),
172 static driver_t ixv_driver = {
173 "ix", ixv_methods, sizeof(struct adapter),
176 extern devclass_t ixgbe_devclass;
177 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
178 MODULE_DEPEND(ixv, pci, 1, 1, 1);
179 MODULE_DEPEND(ixv, ether, 1, 1, 1);
182 ** TUNEABLE PARAMETERS:
186 ** AIM: Adaptive Interrupt Moderation
187 ** which means that the interrupt rate
188 ** is varied over time based on the
189 ** traffic for that interrupt vector
191 static int ixv_enable_aim = FALSE;
192 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194 /* How many packets rxeof tries to clean at a time */
195 static int ixv_rx_process_limit = 128;
196 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
198 /* Flow control setting, default to full */
199 static int ixv_flow_control = ixgbe_fc_full;
200 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
203 * Header split: this causes the hardware to DMA
204 * the header into a seperate mbuf from the payload,
205 * it can be a performance win in some workloads, but
206 * in others it actually hurts, its off by default.
208 static int ixv_header_split = FALSE;
209 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
212 ** Number of TX descriptors per ring,
213 ** setting higher than RX as this seems
214 ** the better performing choice.
216 static int ixv_txd = DEFAULT_TXD;
217 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
219 /* Number of RX descriptors per ring */
220 static int ixv_rxd = DEFAULT_RXD;
221 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
224 ** Shadow VFTA table, this is needed because
225 ** the real filter table gets cleared during
226 ** a soft reset and we need to repopulate it.
228 static u32 ixv_shadow_vfta[VFTA_SIZE];
230 /*********************************************************************
231 * Device identification routine
233 * ixv_probe determines if the driver should be loaded on
234 * adapter based on PCI vendor/device id of the adapter.
236 * return BUS_PROBE_DEFAULT on success, positive on failure
237 *********************************************************************/
240 ixv_probe(device_t dev)
242 ixv_vendor_info_t *ent;
244 u16 pci_vendor_id = 0;
245 u16 pci_device_id = 0;
246 u16 pci_subvendor_id = 0;
247 u16 pci_subdevice_id = 0;
248 char adapter_name[256];
251 pci_vendor_id = pci_get_vendor(dev);
252 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
255 pci_device_id = pci_get_device(dev);
256 pci_subvendor_id = pci_get_subvendor(dev);
257 pci_subdevice_id = pci_get_subdevice(dev);
259 ent = ixv_vendor_info_array;
260 while (ent->vendor_id != 0) {
261 if ((pci_vendor_id == ent->vendor_id) &&
262 (pci_device_id == ent->device_id) &&
264 ((pci_subvendor_id == ent->subvendor_id) ||
265 (ent->subvendor_id == 0)) &&
267 ((pci_subdevice_id == ent->subdevice_id) ||
268 (ent->subdevice_id == 0))) {
269 sprintf(adapter_name, "%s, Version - %s",
270 ixv_strings[ent->index],
272 device_set_desc_copy(dev, adapter_name);
273 return (BUS_PROBE_DEFAULT);
280 /*********************************************************************
281 * Device initialization routine
283 * The attach entry point is called when the driver is being loaded.
284 * This routine identifies the type of hardware, allocates all resources
285 * and initializes the hardware.
287 * return 0 on success, positive on failure
288 *********************************************************************/
291 ixv_attach(device_t dev)
293 struct adapter *adapter;
297 INIT_DEBUGOUT("ixv_attach: begin");
299 /* Allocate, clear, and link in our adapter structure */
300 adapter = device_get_softc(dev);
301 adapter->dev = adapter->osdep.dev = dev;
305 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
308 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
309 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
310 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
311 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
313 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
314 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
315 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
316 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
318 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
319 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
320 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
321 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
323 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
324 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
325 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
326 &ixv_enable_aim, 1, "Interrupt Moderation");
328 /* Set up the timer callout */
329 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
331 /* Determine hardware revision */
332 ixv_identify_hardware(adapter);
334 /* Do base PCI setup - map BAR0 */
335 if (ixv_allocate_pci_resources(adapter)) {
336 device_printf(dev, "Allocation of PCI resources failed\n");
341 /* Do descriptor calc and sanity checks */
342 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
343 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
344 device_printf(dev, "TXD config issue, using default!\n");
345 adapter->num_tx_desc = DEFAULT_TXD;
347 adapter->num_tx_desc = ixv_txd;
349 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
351 device_printf(dev, "RXD config issue, using default!\n");
352 adapter->num_rx_desc = DEFAULT_RXD;
354 adapter->num_rx_desc = ixv_rxd;
356 /* Allocate our TX/RX Queues */
357 if (ixv_allocate_queues(adapter)) {
363 ** Initialize the shared code: its
364 ** at this point the mac type is set.
366 error = ixgbe_init_shared_code(hw);
368 device_printf(dev,"Shared Code Initialization Failure\n");
373 /* Setup the mailbox */
374 ixgbe_init_mbx_params_vf(hw);
378 /* Get Hardware Flow Control setting */
379 hw->fc.requested_mode = ixgbe_fc_full;
380 hw->fc.pause_time = IXV_FC_PAUSE;
381 hw->fc.low_water[0] = IXV_FC_LO;
382 hw->fc.high_water[0] = IXV_FC_HI;
383 hw->fc.send_xon = TRUE;
385 error = ixgbe_init_hw(hw);
387 device_printf(dev,"Hardware Initialization Failure\n");
392 error = ixv_allocate_msix(adapter);
396 /* Setup OS specific network interface */
397 ixv_setup_interface(dev, adapter);
399 /* Sysctl for limiting the amount of work done in the taskqueue */
400 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
401 "max number of rx packets to process", &adapter->rx_process_limit,
402 ixv_rx_process_limit);
404 /* Do the stats setup */
405 ixv_save_stats(adapter);
406 ixv_init_stats(adapter);
408 /* Register for VLAN events */
409 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
410 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
411 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
412 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
414 INIT_DEBUGOUT("ixv_attach: end");
418 ixv_free_transmit_structures(adapter);
419 ixv_free_receive_structures(adapter);
421 ixv_free_pci_resources(adapter);
426 /*********************************************************************
427 * Device removal routine
429 * The detach entry point is called when the driver is being removed.
430 * This routine stops the adapter and deallocates all the resources
431 * that were allocated for driver operation.
433 * return 0 on success, positive on failure
434 *********************************************************************/
437 ixv_detach(device_t dev)
439 struct adapter *adapter = device_get_softc(dev);
440 struct ix_queue *que = adapter->queues;
442 INIT_DEBUGOUT("ixv_detach: begin");
444 /* Make sure VLANS are not using driver */
445 if (adapter->ifp->if_vlantrunk != NULL) {
446 device_printf(dev,"Vlan in use, detach first\n");
450 IXV_CORE_LOCK(adapter);
452 IXV_CORE_UNLOCK(adapter);
454 for (int i = 0; i < adapter->num_queues; i++, que++) {
456 taskqueue_drain(que->tq, &que->que_task);
457 taskqueue_free(que->tq);
461 /* Drain the Link queue */
463 taskqueue_drain(adapter->tq, &adapter->mbx_task);
464 taskqueue_free(adapter->tq);
467 /* Unregister VLAN events */
468 if (adapter->vlan_attach != NULL)
469 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
470 if (adapter->vlan_detach != NULL)
471 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
473 ether_ifdetach(adapter->ifp);
474 callout_drain(&adapter->timer);
475 ixv_free_pci_resources(adapter);
476 bus_generic_detach(dev);
477 if_free(adapter->ifp);
479 ixv_free_transmit_structures(adapter);
480 ixv_free_receive_structures(adapter);
482 IXV_CORE_LOCK_DESTROY(adapter);
486 /*********************************************************************
488 * Shutdown entry point
490 **********************************************************************/
492 ixv_shutdown(device_t dev)
494 struct adapter *adapter = device_get_softc(dev);
495 IXV_CORE_LOCK(adapter);
497 IXV_CORE_UNLOCK(adapter);
501 #if __FreeBSD_version < 800000
502 /*********************************************************************
503 * Transmit entry point
505 * ixv_start is called by the stack to initiate a transmit.
506 * The driver will remain in this routine as long as there are
507 * packets to transmit and transmit resources are available.
508 * In case resources are not available stack is notified and
509 * the packet is requeued.
510 **********************************************************************/
512 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
515 struct adapter *adapter = txr->adapter;
517 IXV_TX_LOCK_ASSERT(txr);
519 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
522 if (!adapter->link_active)
525 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
527 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
531 if (ixv_xmit(txr, &m_head)) {
534 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
535 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
538 /* Send a copy of the frame to the BPF listener */
539 ETHER_BPF_MTAP(ifp, m_head);
541 /* Set watchdog on */
542 txr->watchdog_check = TRUE;
543 txr->watchdog_time = ticks;
550 * Legacy TX start - called by the stack, this
551 * always uses the first tx ring, and should
552 * not be used with multiqueue tx enabled.
555 ixv_start(struct ifnet *ifp)
557 struct adapter *adapter = ifp->if_softc;
558 struct tx_ring *txr = adapter->tx_rings;
560 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
562 ixv_start_locked(txr, ifp);
571 ** Multiqueue Transmit driver
575 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
577 struct adapter *adapter = ifp->if_softc;
578 struct ix_queue *que;
582 /* Which queue to use */
583 if ((m->m_flags & M_FLOWID) != 0)
584 i = m->m_pkthdr.flowid % adapter->num_queues;
586 txr = &adapter->tx_rings[i];
587 que = &adapter->queues[i];
589 if (IXV_TX_TRYLOCK(txr)) {
590 err = ixv_mq_start_locked(ifp, txr, m);
593 err = drbr_enqueue(ifp, txr->br, m);
594 taskqueue_enqueue(que->tq, &que->que_task);
601 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
603 struct adapter *adapter = txr->adapter;
605 int enqueued, err = 0;
607 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
608 IFF_DRV_RUNNING || adapter->link_active == 0) {
610 err = drbr_enqueue(ifp, txr->br, m);
614 /* Do a clean if descriptors are low */
615 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
620 err = drbr_enqueue(ifp, txr->br, m);
625 /* Process the queue */
626 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
627 if ((err = ixv_xmit(txr, &next)) != 0) {
629 drbr_advance(ifp, txr->br);
631 drbr_putback(ifp, txr->br, next);
635 drbr_advance(ifp, txr->br);
637 ifp->if_obytes += next->m_pkthdr.len;
638 if (next->m_flags & M_MCAST)
640 /* Send a copy of the frame to the BPF listener */
641 ETHER_BPF_MTAP(ifp, next);
642 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
644 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
645 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
651 /* Set watchdog on */
652 txr->watchdog_check = TRUE;
653 txr->watchdog_time = ticks;
660 ** Flush all ring buffers
663 ixv_qflush(struct ifnet *ifp)
665 struct adapter *adapter = ifp->if_softc;
666 struct tx_ring *txr = adapter->tx_rings;
669 for (int i = 0; i < adapter->num_queues; i++, txr++) {
671 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
680 /*********************************************************************
683 * ixv_ioctl is called when the user wants to configure the
686 * return 0 on success, positive on failure
687 **********************************************************************/
690 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
692 struct adapter *adapter = ifp->if_softc;
693 struct ifreq *ifr = (struct ifreq *) data;
694 #if defined(INET) || defined(INET6)
695 struct ifaddr *ifa = (struct ifaddr *) data;
696 bool avoid_reset = FALSE;
704 if (ifa->ifa_addr->sa_family == AF_INET)
708 if (ifa->ifa_addr->sa_family == AF_INET6)
711 #if defined(INET) || defined(INET6)
713 ** Calling init results in link renegotiation,
714 ** so we avoid doing it when possible.
717 ifp->if_flags |= IFF_UP;
718 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
720 if (!(ifp->if_flags & IFF_NOARP))
721 arp_ifinit(ifp, ifa);
723 error = ether_ioctl(ifp, command, data);
727 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
728 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
731 IXV_CORE_LOCK(adapter);
732 ifp->if_mtu = ifr->ifr_mtu;
733 adapter->max_frame_size =
734 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
735 ixv_init_locked(adapter);
736 IXV_CORE_UNLOCK(adapter);
740 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
741 IXV_CORE_LOCK(adapter);
742 if (ifp->if_flags & IFF_UP) {
743 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
744 ixv_init_locked(adapter);
746 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
748 adapter->if_flags = ifp->if_flags;
749 IXV_CORE_UNLOCK(adapter);
753 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
754 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
755 IXV_CORE_LOCK(adapter);
756 ixv_disable_intr(adapter);
757 ixv_set_multi(adapter);
758 ixv_enable_intr(adapter);
759 IXV_CORE_UNLOCK(adapter);
764 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
765 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
769 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
770 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
771 if (mask & IFCAP_HWCSUM)
772 ifp->if_capenable ^= IFCAP_HWCSUM;
773 if (mask & IFCAP_TSO4)
774 ifp->if_capenable ^= IFCAP_TSO4;
775 if (mask & IFCAP_LRO)
776 ifp->if_capenable ^= IFCAP_LRO;
777 if (mask & IFCAP_VLAN_HWTAGGING)
778 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
779 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
780 IXV_CORE_LOCK(adapter);
781 ixv_init_locked(adapter);
782 IXV_CORE_UNLOCK(adapter);
784 VLAN_CAPABILITIES(ifp);
789 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
790 error = ether_ioctl(ifp, command, data);
797 /*********************************************************************
800 * This routine is used in two ways. It is used by the stack as
801 * init entry point in network interface structure. It is also used
802 * by the driver as a hw/sw initialization routine to get to a
805 * return 0 on success, positive on failure
806 **********************************************************************/
807 #define IXGBE_MHADD_MFS_SHIFT 16
810 ixv_init_locked(struct adapter *adapter)
812 struct ifnet *ifp = adapter->ifp;
813 device_t dev = adapter->dev;
814 struct ixgbe_hw *hw = &adapter->hw;
817 INIT_DEBUGOUT("ixv_init: begin");
818 mtx_assert(&adapter->core_mtx, MA_OWNED);
819 hw->adapter_stopped = FALSE;
820 ixgbe_stop_adapter(hw);
821 callout_stop(&adapter->timer);
823 /* reprogram the RAR[0] in case user changed it. */
824 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
826 /* Get the latest mac address, User can use a LAA */
827 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
828 IXGBE_ETH_LENGTH_OF_ADDRESS);
829 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
830 hw->addr_ctrl.rar_used_count = 1;
832 /* Prepare transmit descriptors and buffers */
833 if (ixv_setup_transmit_structures(adapter)) {
834 device_printf(dev,"Could not setup transmit structures\n");
840 ixv_initialize_transmit_units(adapter);
842 /* Setup Multicast table */
843 ixv_set_multi(adapter);
846 ** Determine the correct mbuf pool
847 ** for doing jumbo/headersplit
849 if (ifp->if_mtu > ETHERMTU)
850 adapter->rx_mbuf_sz = MJUMPAGESIZE;
852 adapter->rx_mbuf_sz = MCLBYTES;
854 /* Prepare receive descriptors and buffers */
855 if (ixv_setup_receive_structures(adapter)) {
856 device_printf(dev,"Could not setup receive structures\n");
861 /* Configure RX settings */
862 ixv_initialize_receive_units(adapter);
864 /* Enable Enhanced MSIX mode */
865 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
866 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
867 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
868 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
870 /* Set the various hardware offload abilities */
871 ifp->if_hwassist = 0;
872 if (ifp->if_capenable & IFCAP_TSO4)
873 ifp->if_hwassist |= CSUM_TSO;
874 if (ifp->if_capenable & IFCAP_TXCSUM) {
875 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
876 #if __FreeBSD_version >= 800000
877 ifp->if_hwassist |= CSUM_SCTP;
882 if (ifp->if_mtu > ETHERMTU) {
883 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
884 mhadd &= ~IXGBE_MHADD_MFS_MASK;
885 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
886 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
889 /* Set up VLAN offload and filter */
890 ixv_setup_vlan_support(adapter);
892 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
894 /* Set up MSI/X routing */
895 ixv_configure_ivars(adapter);
897 /* Set up auto-mask */
898 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
900 /* Set moderation on the Link interrupt */
901 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
904 ixv_init_stats(adapter);
906 /* Config/Enable Link */
907 ixv_config_link(adapter);
909 /* And now turn on interrupts */
910 ixv_enable_intr(adapter);
912 /* Now inform the stack we're ready */
913 ifp->if_drv_flags |= IFF_DRV_RUNNING;
914 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
922 struct adapter *adapter = arg;
924 IXV_CORE_LOCK(adapter);
925 ixv_init_locked(adapter);
926 IXV_CORE_UNLOCK(adapter);
933 ** MSIX Interrupt Handlers and Tasklets
938 ixv_enable_queue(struct adapter *adapter, u32 vector)
940 struct ixgbe_hw *hw = &adapter->hw;
941 u32 queue = 1 << vector;
944 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
945 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
949 ixv_disable_queue(struct adapter *adapter, u32 vector)
951 struct ixgbe_hw *hw = &adapter->hw;
952 u64 queue = (u64)(1 << vector);
955 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
956 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
960 ixv_rearm_queues(struct adapter *adapter, u64 queues)
962 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
963 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
968 ixv_handle_que(void *context, int pending)
970 struct ix_queue *que = context;
971 struct adapter *adapter = que->adapter;
972 struct tx_ring *txr = que->txr;
973 struct ifnet *ifp = adapter->ifp;
976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977 more = ixv_rxeof(que, adapter->rx_process_limit);
980 #if __FreeBSD_version >= 800000
981 if (!drbr_empty(ifp, txr->br))
982 ixv_mq_start_locked(ifp, txr, NULL);
984 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
985 ixv_start_locked(txr, ifp);
989 taskqueue_enqueue(que->tq, &que->que_task);
994 /* Reenable this interrupt */
995 ixv_enable_queue(adapter, que->msix);
999 /*********************************************************************
1001 * MSI Queue Interrupt Service routine
1003 **********************************************************************/
1005 ixv_msix_que(void *arg)
1007 struct ix_queue *que = arg;
1008 struct adapter *adapter = que->adapter;
1009 struct tx_ring *txr = que->txr;
1010 struct rx_ring *rxr = que->rxr;
1011 bool more_tx, more_rx;
1014 ixv_disable_queue(adapter, que->msix);
1017 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1020 more_tx = ixv_txeof(txr);
1022 ** Make certain that if the stack
1023 ** has anything queued the task gets
1024 ** scheduled to handle it.
1026 #if __FreeBSD_version < 800000
1027 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1029 if (!drbr_empty(adapter->ifp, txr->br))
1034 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1038 if (ixv_enable_aim == FALSE)
1041 ** Do Adaptive Interrupt Moderation:
1042 ** - Write out last calculated setting
1043 ** - Calculate based on average size over
1044 ** the last interval.
1046 if (que->eitr_setting)
1047 IXGBE_WRITE_REG(&adapter->hw,
1048 IXGBE_VTEITR(que->msix),
1051 que->eitr_setting = 0;
1053 /* Idle, do nothing */
1054 if ((txr->bytes == 0) && (rxr->bytes == 0))
1057 if ((txr->bytes) && (txr->packets))
1058 newitr = txr->bytes/txr->packets;
1059 if ((rxr->bytes) && (rxr->packets))
1060 newitr = max(newitr,
1061 (rxr->bytes / rxr->packets));
1062 newitr += 24; /* account for hardware frame, crc */
1064 /* set an upper boundary */
1065 newitr = min(newitr, 3000);
1067 /* Be nice to the mid range */
1068 if ((newitr > 300) && (newitr < 1200))
1069 newitr = (newitr / 3);
1071 newitr = (newitr / 2);
1073 newitr |= newitr << 16;
1075 /* save for next interrupt */
1076 que->eitr_setting = newitr;
1085 if (more_tx || more_rx)
1086 taskqueue_enqueue(que->tq, &que->que_task);
1087 else /* Reenable this interrupt */
1088 ixv_enable_queue(adapter, que->msix);
1093 ixv_msix_mbx(void *arg)
1095 struct adapter *adapter = arg;
1096 struct ixgbe_hw *hw = &adapter->hw;
1101 /* First get the cause */
1102 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1103 /* Clear interrupt with write */
1104 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1106 /* Link status change */
1107 if (reg & IXGBE_EICR_LSC)
1108 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1110 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1114 /*********************************************************************
1116 * Media Ioctl callback
1118 * This routine is called whenever the user queries the status of
1119 * the interface using ifconfig.
1121 **********************************************************************/
1123 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1125 struct adapter *adapter = ifp->if_softc;
1127 INIT_DEBUGOUT("ixv_media_status: begin");
1128 IXV_CORE_LOCK(adapter);
1129 ixv_update_link_status(adapter);
1131 ifmr->ifm_status = IFM_AVALID;
1132 ifmr->ifm_active = IFM_ETHER;
1134 if (!adapter->link_active) {
1135 IXV_CORE_UNLOCK(adapter);
1139 ifmr->ifm_status |= IFM_ACTIVE;
1141 switch (adapter->link_speed) {
1142 case IXGBE_LINK_SPEED_1GB_FULL:
1143 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1145 case IXGBE_LINK_SPEED_10GB_FULL:
1146 ifmr->ifm_active |= IFM_FDX;
1150 IXV_CORE_UNLOCK(adapter);
1155 /*********************************************************************
1157 * Media Ioctl callback
1159 * This routine is called when the user changes speed/duplex using
1160 * media/mediopt option with ifconfig.
1162 **********************************************************************/
1164 ixv_media_change(struct ifnet * ifp)
1166 struct adapter *adapter = ifp->if_softc;
1167 struct ifmedia *ifm = &adapter->media;
1169 INIT_DEBUGOUT("ixv_media_change: begin");
1171 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1174 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1178 device_printf(adapter->dev, "Only auto media type\n");
1185 /*********************************************************************
1187 * This routine maps the mbufs to tx descriptors, allowing the
1188 * TX engine to transmit the packets.
1189 * - return 0 on success, positive on failure
1191 **********************************************************************/
1194 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1196 struct adapter *adapter = txr->adapter;
1197 u32 olinfo_status = 0, cmd_type_len;
1199 int i, j, error, nsegs;
1200 int first, last = 0;
1201 struct mbuf *m_head;
1202 bus_dma_segment_t segs[32];
1204 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1205 union ixgbe_adv_tx_desc *txd = NULL;
1209 /* Basic descriptor defines */
1210 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1211 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1213 if (m_head->m_flags & M_VLANTAG)
1214 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1217 * Important to capture the first descriptor
1218 * used because it will contain the index of
1219 * the one we tell the hardware to report back
1221 first = txr->next_avail_desc;
1222 txbuf = &txr->tx_buffers[first];
1223 txbuf_mapped = txbuf;
1227 * Map the packet for DMA.
1229 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1230 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1232 if (error == EFBIG) {
1235 m = m_defrag(*m_headp, M_NOWAIT);
1237 adapter->mbuf_defrag_failed++;
1245 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1246 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1248 if (error == ENOMEM) {
1249 adapter->no_tx_dma_setup++;
1251 } else if (error != 0) {
1252 adapter->no_tx_dma_setup++;
1257 } else if (error == ENOMEM) {
1258 adapter->no_tx_dma_setup++;
1260 } else if (error != 0) {
1261 adapter->no_tx_dma_setup++;
1267 /* Make certain there are enough descriptors */
1268 if (nsegs > txr->tx_avail - 2) {
1269 txr->no_desc_avail++;
1276 ** Set up the appropriate offload context
1277 ** this becomes the first descriptor of
1280 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1281 if (ixv_tso_setup(txr, m_head, &paylen)) {
1282 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1283 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1284 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1285 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1289 } else if (ixv_tx_ctx_setup(txr, m_head))
1290 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1292 /* Record payload length */
1294 olinfo_status |= m_head->m_pkthdr.len <<
1295 IXGBE_ADVTXD_PAYLEN_SHIFT;
1297 i = txr->next_avail_desc;
1298 for (j = 0; j < nsegs; j++) {
1302 txbuf = &txr->tx_buffers[i];
1303 txd = &txr->tx_base[i];
1304 seglen = segs[j].ds_len;
1305 segaddr = htole64(segs[j].ds_addr);
1307 txd->read.buffer_addr = segaddr;
1308 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1309 cmd_type_len |seglen);
1310 txd->read.olinfo_status = htole32(olinfo_status);
1311 last = i; /* descriptor that will get completion IRQ */
1313 if (++i == adapter->num_tx_desc)
1316 txbuf->m_head = NULL;
1317 txbuf->eop_index = -1;
1320 txd->read.cmd_type_len |=
1321 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1322 txr->tx_avail -= nsegs;
1323 txr->next_avail_desc = i;
1325 txbuf->m_head = m_head;
1326 txr->tx_buffers[first].map = txbuf->map;
1328 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1330 /* Set the index of the descriptor that will be marked done */
1331 txbuf = &txr->tx_buffers[first];
1332 txbuf->eop_index = last;
1334 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1335 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1337 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1338 * hardware that this frame is available to transmit.
1340 ++txr->total_packets;
1341 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1346 bus_dmamap_unload(txr->txtag, txbuf->map);
1352 /*********************************************************************
1355 * This routine is called whenever multicast address list is updated.
1357 **********************************************************************/
1358 #define IXGBE_RAR_ENTRIES 16
1361 ixv_set_multi(struct adapter *adapter)
1363 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1365 struct ifmultiaddr *ifma;
1367 struct ifnet *ifp = adapter->ifp;
1369 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1371 #if __FreeBSD_version < 800000
1374 if_maddr_rlock(ifp);
1376 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1377 if (ifma->ifma_addr->sa_family != AF_LINK)
1379 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1380 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1381 IXGBE_ETH_LENGTH_OF_ADDRESS);
1384 #if __FreeBSD_version < 800000
1385 IF_ADDR_UNLOCK(ifp);
1387 if_maddr_runlock(ifp);
1392 ixgbe_update_mc_addr_list(&adapter->hw,
1393 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1399 * This is an iterator function now needed by the multicast
1400 * shared code. It simply feeds the shared code routine the
1401 * addresses in the array of ixv_set_multi() one by one.
1404 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1406 u8 *addr = *update_ptr;
1410 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1411 *update_ptr = newptr;
1415 /*********************************************************************
1418 * This routine checks for link status,updates statistics,
1419 * and runs the watchdog check.
1421 **********************************************************************/
1424 ixv_local_timer(void *arg)
1426 struct adapter *adapter = arg;
1427 device_t dev = adapter->dev;
1428 struct tx_ring *txr = adapter->tx_rings;
1431 mtx_assert(&adapter->core_mtx, MA_OWNED);
1433 ixv_update_link_status(adapter);
1436 ixv_update_stats(adapter);
1439 * If the interface has been paused
1440 * then don't do the watchdog check
1442 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1445 ** Check for time since any descriptor was cleaned
1447 for (i = 0; i < adapter->num_queues; i++, txr++) {
1449 if (txr->watchdog_check == FALSE) {
1453 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1458 ixv_rearm_queues(adapter, adapter->que_mask);
1459 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1463 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1464 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1465 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1466 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1467 device_printf(dev,"TX(%d) desc avail = %d,"
1468 "Next TX to Clean = %d\n",
1469 txr->me, txr->tx_avail, txr->next_to_clean);
1470 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1471 adapter->watchdog_events++;
1473 ixv_init_locked(adapter);
1477 ** Note: this routine updates the OS on the link state
1478 ** the real check of the hardware only happens with
1479 ** a link interrupt.
1482 ixv_update_link_status(struct adapter *adapter)
1484 struct ifnet *ifp = adapter->ifp;
1485 struct tx_ring *txr = adapter->tx_rings;
1486 device_t dev = adapter->dev;
1489 if (adapter->link_up){
1490 if (adapter->link_active == FALSE) {
1492 device_printf(dev,"Link is up %d Gbps %s \n",
1493 ((adapter->link_speed == 128)? 10:1),
1495 adapter->link_active = TRUE;
1496 if_link_state_change(ifp, LINK_STATE_UP);
1498 } else { /* Link down */
1499 if (adapter->link_active == TRUE) {
1501 device_printf(dev,"Link is Down\n");
1502 if_link_state_change(ifp, LINK_STATE_DOWN);
1503 adapter->link_active = FALSE;
1504 for (int i = 0; i < adapter->num_queues;
1506 txr->watchdog_check = FALSE;
1514 /*********************************************************************
1516 * This routine disables all traffic on the adapter by issuing a
1517 * global reset on the MAC and deallocates TX/RX buffers.
1519 **********************************************************************/
1525 struct adapter *adapter = arg;
1526 struct ixgbe_hw *hw = &adapter->hw;
1529 mtx_assert(&adapter->core_mtx, MA_OWNED);
1531 INIT_DEBUGOUT("ixv_stop: begin\n");
1532 ixv_disable_intr(adapter);
1534 /* Tell the stack that the interface is no longer active */
1535 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1538 adapter->hw.adapter_stopped = FALSE;
1539 ixgbe_stop_adapter(hw);
1540 callout_stop(&adapter->timer);
1542 /* reprogram the RAR[0] in case user changed it. */
1543 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1549 /*********************************************************************
1551 * Determine hardware revision.
1553 **********************************************************************/
1555 ixv_identify_hardware(struct adapter *adapter)
1557 device_t dev = adapter->dev;
1561 ** Make sure BUSMASTER is set, on a VM under
1562 ** KVM it may not be and will break things.
1564 pci_enable_busmaster(dev);
1565 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1567 /* Save off the information about this board */
1568 adapter->hw.vendor_id = pci_get_vendor(dev);
1569 adapter->hw.device_id = pci_get_device(dev);
1570 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1571 adapter->hw.subsystem_vendor_id =
1572 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1573 adapter->hw.subsystem_device_id =
1574 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1579 /*********************************************************************
1581 * Setup MSIX Interrupt resources and handlers
1583 **********************************************************************/
1585 ixv_allocate_msix(struct adapter *adapter)
1587 device_t dev = adapter->dev;
1588 struct ix_queue *que = adapter->queues;
1589 int error, rid, vector = 0;
1591 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1593 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1594 RF_SHAREABLE | RF_ACTIVE);
1595 if (que->res == NULL) {
1596 device_printf(dev,"Unable to allocate"
1597 " bus resource: que interrupt [%d]\n", vector);
1600 /* Set the handler function */
1601 error = bus_setup_intr(dev, que->res,
1602 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1603 ixv_msix_que, que, &que->tag);
1606 device_printf(dev, "Failed to register QUE handler");
1609 #if __FreeBSD_version >= 800504
1610 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1613 adapter->que_mask |= (u64)(1 << que->msix);
1615 ** Bind the msix vector, and thus the
1616 ** ring to the corresponding cpu.
1618 if (adapter->num_queues > 1)
1619 bus_bind_intr(dev, que->res, i);
1621 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1622 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1623 taskqueue_thread_enqueue, &que->tq);
1624 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1625 device_get_nameunit(adapter->dev));
1630 adapter->res = bus_alloc_resource_any(dev,
1631 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1632 if (!adapter->res) {
1633 device_printf(dev,"Unable to allocate"
1634 " bus resource: MBX interrupt [%d]\n", rid);
1637 /* Set the mbx handler function */
1638 error = bus_setup_intr(dev, adapter->res,
1639 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1640 ixv_msix_mbx, adapter, &adapter->tag);
1642 adapter->res = NULL;
1643 device_printf(dev, "Failed to register LINK handler");
1646 #if __FreeBSD_version >= 800504
1647 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1649 adapter->mbxvec = vector;
1650 /* Tasklets for Mailbox */
1651 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1652 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1653 taskqueue_thread_enqueue, &adapter->tq);
1654 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1655 device_get_nameunit(adapter->dev));
1657 ** Due to a broken design QEMU will fail to properly
1658 ** enable the guest for MSIX unless the vectors in
1659 ** the table are all set up, so we must rewrite the
1660 ** ENABLE in the MSIX control register again at this
1661 ** point to cause it to successfully initialize us.
1663 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1665 pci_find_cap(dev, PCIY_MSIX, &rid);
1666 rid += PCIR_MSIX_CTRL;
1667 msix_ctrl = pci_read_config(dev, rid, 2);
1668 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1669 pci_write_config(dev, rid, msix_ctrl, 2);
1676 * Setup MSIX resources, note that the VF
1677 * device MUST use MSIX, there is no fallback.
1680 ixv_setup_msix(struct adapter *adapter)
1682 device_t dev = adapter->dev;
1686 /* First try MSI/X */
1688 adapter->msix_mem = bus_alloc_resource_any(dev,
1689 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1690 if (adapter->msix_mem == NULL) {
1691 device_printf(adapter->dev,
1692 "Unable to map MSIX table \n");
1697 ** Want two vectors: one for a queue,
1698 ** plus an additional for mailbox.
1701 if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
1702 device_printf(adapter->dev,
1703 "Using MSIX interrupts with %d vectors\n", want);
1706 /* Release in case alloc was insufficient */
1707 pci_release_msi(dev);
1709 if (adapter->msix_mem != NULL) {
1710 bus_release_resource(dev, SYS_RES_MEMORY,
1711 rid, adapter->msix_mem);
1712 adapter->msix_mem = NULL;
1714 device_printf(adapter->dev,"MSIX config error\n");
1720 ixv_allocate_pci_resources(struct adapter *adapter)
1723 device_t dev = adapter->dev;
1726 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1729 if (!(adapter->pci_mem)) {
1730 device_printf(dev,"Unable to allocate bus resource: memory\n");
1734 adapter->osdep.mem_bus_space_tag =
1735 rman_get_bustag(adapter->pci_mem);
1736 adapter->osdep.mem_bus_space_handle =
1737 rman_get_bushandle(adapter->pci_mem);
1738 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1740 adapter->num_queues = 1;
1741 adapter->hw.back = &adapter->osdep;
1744 ** Now setup MSI/X, should
1745 ** return us the number of
1746 ** configured vectors.
1748 adapter->msix = ixv_setup_msix(adapter);
1749 if (adapter->msix == ENXIO)
1756 ixv_free_pci_resources(struct adapter * adapter)
1758 struct ix_queue *que = adapter->queues;
1759 device_t dev = adapter->dev;
1762 memrid = PCIR_BAR(MSIX_BAR);
1765 ** There is a slight possibility of a failure mode
1766 ** in attach that will result in entering this function
1767 ** before interrupt resources have been initialized, and
1768 ** in that case we do not want to execute the loops below
1769 ** We can detect this reliably by the state of the adapter
1772 if (adapter->res == NULL)
1776 ** Release all msix queue resources:
1778 for (int i = 0; i < adapter->num_queues; i++, que++) {
1779 rid = que->msix + 1;
1780 if (que->tag != NULL) {
1781 bus_teardown_intr(dev, que->res, que->tag);
1784 if (que->res != NULL)
1785 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1789 /* Clean the Legacy or Link interrupt last */
1790 if (adapter->mbxvec) /* we are doing MSIX */
1791 rid = adapter->mbxvec + 1;
1793 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1795 if (adapter->tag != NULL) {
1796 bus_teardown_intr(dev, adapter->res, adapter->tag);
1797 adapter->tag = NULL;
1799 if (adapter->res != NULL)
1800 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1804 pci_release_msi(dev);
1806 if (adapter->msix_mem != NULL)
1807 bus_release_resource(dev, SYS_RES_MEMORY,
1808 memrid, adapter->msix_mem);
1810 if (adapter->pci_mem != NULL)
1811 bus_release_resource(dev, SYS_RES_MEMORY,
1812 PCIR_BAR(0), adapter->pci_mem);
1817 /*********************************************************************
1819 * Setup networking device structure and register an interface.
1821 **********************************************************************/
1823 ixv_setup_interface(device_t dev, struct adapter *adapter)
1827 INIT_DEBUGOUT("ixv_setup_interface: begin");
1829 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1831 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1832 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1833 ifp->if_baudrate = 1000000000;
1834 ifp->if_init = ixv_init;
1835 ifp->if_softc = adapter;
1836 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1837 ifp->if_ioctl = ixv_ioctl;
1838 #if __FreeBSD_version >= 800000
1839 ifp->if_transmit = ixv_mq_start;
1840 ifp->if_qflush = ixv_qflush;
1842 ifp->if_start = ixv_start;
1844 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1846 ether_ifattach(ifp, adapter->hw.mac.addr);
1848 adapter->max_frame_size =
1849 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1852 * Tell the upper layer(s) we support long frames.
1854 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1856 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1857 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1858 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1861 ifp->if_capenable = ifp->if_capabilities;
1863 /* Don't enable LRO by default */
1864 ifp->if_capabilities |= IFCAP_LRO;
1867 * Specify the media types supported by this adapter and register
1868 * callbacks to update media and link information
1870 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1872 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1873 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1874 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1880 ixv_config_link(struct adapter *adapter)
1882 struct ixgbe_hw *hw = &adapter->hw;
1883 u32 autoneg, err = 0;
1885 if (hw->mac.ops.check_link)
1886 err = hw->mac.ops.check_link(hw, &autoneg,
1887 &adapter->link_up, FALSE);
1891 if (hw->mac.ops.setup_link)
1892 err = hw->mac.ops.setup_link(hw,
1893 autoneg, adapter->link_up);
1898 /********************************************************************
1899 * Manage DMA'able memory.
1900 *******************************************************************/
1902 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1906 *(bus_addr_t *) arg = segs->ds_addr;
1911 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1912 struct ixv_dma_alloc *dma, int mapflags)
1914 device_t dev = adapter->dev;
1917 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1918 DBA_ALIGN, 0, /* alignment, bounds */
1919 BUS_SPACE_MAXADDR, /* lowaddr */
1920 BUS_SPACE_MAXADDR, /* highaddr */
1921 NULL, NULL, /* filter, filterarg */
1924 size, /* maxsegsize */
1925 BUS_DMA_ALLOCNOW, /* flags */
1926 NULL, /* lockfunc */
1927 NULL, /* lockfuncarg */
1930 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1934 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1935 BUS_DMA_NOWAIT, &dma->dma_map);
1937 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1941 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1945 mapflags | BUS_DMA_NOWAIT);
1947 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1951 dma->dma_size = size;
1954 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1956 bus_dma_tag_destroy(dma->dma_tag);
1958 dma->dma_map = NULL;
1959 dma->dma_tag = NULL;
1964 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1966 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1967 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1968 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1969 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1970 bus_dma_tag_destroy(dma->dma_tag);
1974 /*********************************************************************
1976 * Allocate memory for the transmit and receive rings, and then
1977 * the descriptors associated with each, called only once at attach.
1979 **********************************************************************/
1981 ixv_allocate_queues(struct adapter *adapter)
1983 device_t dev = adapter->dev;
1984 struct ix_queue *que;
1985 struct tx_ring *txr;
1986 struct rx_ring *rxr;
1987 int rsize, tsize, error = 0;
1988 int txconf = 0, rxconf = 0;
1990 /* First allocate the top level queue structs */
1991 if (!(adapter->queues =
1992 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1993 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1994 device_printf(dev, "Unable to allocate queue memory\n");
1999 /* First allocate the TX ring struct memory */
2000 if (!(adapter->tx_rings =
2001 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2002 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2003 device_printf(dev, "Unable to allocate TX ring memory\n");
2008 /* Next allocate the RX */
2009 if (!(adapter->rx_rings =
2010 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2011 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2012 device_printf(dev, "Unable to allocate RX ring memory\n");
2017 /* For the ring itself */
2018 tsize = roundup2(adapter->num_tx_desc *
2019 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2022 * Now set up the TX queues, txconf is needed to handle the
2023 * possibility that things fail midcourse and we need to
2024 * undo memory gracefully
2026 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2027 /* Set up some basics */
2028 txr = &adapter->tx_rings[i];
2029 txr->adapter = adapter;
2032 /* Initialize the TX side lock */
2033 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2034 device_get_nameunit(dev), txr->me);
2035 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2037 if (ixv_dma_malloc(adapter, tsize,
2038 &txr->txdma, BUS_DMA_NOWAIT)) {
2040 "Unable to allocate TX Descriptor memory\n");
2044 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2045 bzero((void *)txr->tx_base, tsize);
2047 /* Now allocate transmit buffers for the ring */
2048 if (ixv_allocate_transmit_buffers(txr)) {
2050 "Critical Failure setting up transmit buffers\n");
2054 #if __FreeBSD_version >= 800000
2055 /* Allocate a buf ring */
2056 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2057 M_WAITOK, &txr->tx_mtx);
2058 if (txr->br == NULL) {
2060 "Critical Failure setting up buf ring\n");
2068 * Next the RX queues...
2070 rsize = roundup2(adapter->num_rx_desc *
2071 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2072 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2073 rxr = &adapter->rx_rings[i];
2074 /* Set up some basics */
2075 rxr->adapter = adapter;
2078 /* Initialize the RX side lock */
2079 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2080 device_get_nameunit(dev), rxr->me);
2081 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2083 if (ixv_dma_malloc(adapter, rsize,
2084 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2086 "Unable to allocate RxDescriptor memory\n");
2090 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2091 bzero((void *)rxr->rx_base, rsize);
2093 /* Allocate receive buffers for the ring*/
2094 if (ixv_allocate_receive_buffers(rxr)) {
2096 "Critical Failure setting up receive buffers\n");
2103 ** Finally set up the queue holding structs
2105 for (int i = 0; i < adapter->num_queues; i++) {
2106 que = &adapter->queues[i];
2107 que->adapter = adapter;
2108 que->txr = &adapter->tx_rings[i];
2109 que->rxr = &adapter->rx_rings[i];
2115 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2116 ixv_dma_free(adapter, &rxr->rxdma);
2118 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2119 ixv_dma_free(adapter, &txr->txdma);
2120 free(adapter->rx_rings, M_DEVBUF);
2122 free(adapter->tx_rings, M_DEVBUF);
2124 free(adapter->queues, M_DEVBUF);
2130 /*********************************************************************
2132 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2133 * the information needed to transmit a packet on the wire. This is
2134 * called only once at attach, setup is done every reset.
2136 **********************************************************************/
2138 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2140 struct adapter *adapter = txr->adapter;
2141 device_t dev = adapter->dev;
2142 struct ixv_tx_buf *txbuf;
2146 * Setup DMA descriptor areas.
2148 if ((error = bus_dma_tag_create(
2149 bus_get_dma_tag(adapter->dev), /* parent */
2150 1, 0, /* alignment, bounds */
2151 BUS_SPACE_MAXADDR, /* lowaddr */
2152 BUS_SPACE_MAXADDR, /* highaddr */
2153 NULL, NULL, /* filter, filterarg */
2154 IXV_TSO_SIZE, /* maxsize */
2156 PAGE_SIZE, /* maxsegsize */
2158 NULL, /* lockfunc */
2159 NULL, /* lockfuncarg */
2161 device_printf(dev,"Unable to allocate TX DMA tag\n");
2165 if (!(txr->tx_buffers =
2166 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2167 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2168 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2173 /* Create the descriptor buffer dma maps */
2174 txbuf = txr->tx_buffers;
2175 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2176 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2178 device_printf(dev, "Unable to create TX DMA map\n");
2185 /* We free all, it handles case where we are in the middle */
2186 ixv_free_transmit_structures(adapter);
2190 /*********************************************************************
2192 * Initialize a transmit ring.
2194 **********************************************************************/
2196 ixv_setup_transmit_ring(struct tx_ring *txr)
2198 struct adapter *adapter = txr->adapter;
2199 struct ixv_tx_buf *txbuf;
2202 /* Clear the old ring contents */
2204 bzero((void *)txr->tx_base,
2205 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2207 txr->next_avail_desc = 0;
2208 txr->next_to_clean = 0;
2210 /* Free any existing tx buffers. */
2211 txbuf = txr->tx_buffers;
2212 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2213 if (txbuf->m_head != NULL) {
2214 bus_dmamap_sync(txr->txtag, txbuf->map,
2215 BUS_DMASYNC_POSTWRITE);
2216 bus_dmamap_unload(txr->txtag, txbuf->map);
2217 m_freem(txbuf->m_head);
2218 txbuf->m_head = NULL;
2220 /* Clear the EOP index */
2221 txbuf->eop_index = -1;
2224 /* Set number of descriptors available */
2225 txr->tx_avail = adapter->num_tx_desc;
2227 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2228 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2232 /*********************************************************************
2234 * Initialize all transmit rings.
2236 **********************************************************************/
2238 ixv_setup_transmit_structures(struct adapter *adapter)
2240 struct tx_ring *txr = adapter->tx_rings;
2242 for (int i = 0; i < adapter->num_queues; i++, txr++)
2243 ixv_setup_transmit_ring(txr);
2248 /*********************************************************************
2250 * Enable transmit unit.
2252 **********************************************************************/
2254 ixv_initialize_transmit_units(struct adapter *adapter)
2256 struct tx_ring *txr = adapter->tx_rings;
2257 struct ixgbe_hw *hw = &adapter->hw;
2260 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2261 u64 tdba = txr->txdma.dma_paddr;
2264 /* Set WTHRESH to 8, burst writeback */
2265 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2266 txdctl |= (8 << 16);
2267 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2269 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2270 txdctl |= IXGBE_TXDCTL_ENABLE;
2271 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2273 /* Set the HW Tx Head and Tail indices */
2274 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2277 /* Setup Transmit Descriptor Cmd Settings */
2278 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2279 txr->watchdog_check = FALSE;
2281 /* Set Ring parameters */
2282 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2283 (tdba & 0x00000000ffffffffULL));
2284 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2285 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2286 adapter->num_tx_desc *
2287 sizeof(struct ixgbe_legacy_tx_desc));
2288 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2289 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2290 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2297 /*********************************************************************
2299 * Free all transmit rings.
2301 **********************************************************************/
2303 ixv_free_transmit_structures(struct adapter *adapter)
2305 struct tx_ring *txr = adapter->tx_rings;
2307 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2309 ixv_free_transmit_buffers(txr);
2310 ixv_dma_free(adapter, &txr->txdma);
2312 IXV_TX_LOCK_DESTROY(txr);
2314 free(adapter->tx_rings, M_DEVBUF);
2317 /*********************************************************************
2319 * Free transmit ring related data structures.
2321 **********************************************************************/
2323 ixv_free_transmit_buffers(struct tx_ring *txr)
2325 struct adapter *adapter = txr->adapter;
2326 struct ixv_tx_buf *tx_buffer;
2329 INIT_DEBUGOUT("free_transmit_ring: begin");
2331 if (txr->tx_buffers == NULL)
2334 tx_buffer = txr->tx_buffers;
2335 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2336 if (tx_buffer->m_head != NULL) {
2337 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2338 BUS_DMASYNC_POSTWRITE);
2339 bus_dmamap_unload(txr->txtag,
2341 m_freem(tx_buffer->m_head);
2342 tx_buffer->m_head = NULL;
2343 if (tx_buffer->map != NULL) {
2344 bus_dmamap_destroy(txr->txtag,
2346 tx_buffer->map = NULL;
2348 } else if (tx_buffer->map != NULL) {
2349 bus_dmamap_unload(txr->txtag,
2351 bus_dmamap_destroy(txr->txtag,
2353 tx_buffer->map = NULL;
2356 #if __FreeBSD_version >= 800000
2357 if (txr->br != NULL)
2358 buf_ring_free(txr->br, M_DEVBUF);
2360 if (txr->tx_buffers != NULL) {
2361 free(txr->tx_buffers, M_DEVBUF);
2362 txr->tx_buffers = NULL;
2364 if (txr->txtag != NULL) {
2365 bus_dma_tag_destroy(txr->txtag);
2371 /*********************************************************************
2373 * Advanced Context Descriptor setup for VLAN or CSUM
2375 **********************************************************************/
2378 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2380 struct adapter *adapter = txr->adapter;
2381 struct ixgbe_adv_tx_context_desc *TXD;
2382 struct ixv_tx_buf *tx_buffer;
2383 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2384 struct ether_vlan_header *eh;
2386 struct ip6_hdr *ip6;
2387 int ehdrlen, ip_hlen = 0;
2390 bool offload = TRUE;
2391 int ctxd = txr->next_avail_desc;
2395 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2399 tx_buffer = &txr->tx_buffers[ctxd];
2400 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2403 ** In advanced descriptors the vlan tag must
2404 ** be placed into the descriptor itself.
2406 if (mp->m_flags & M_VLANTAG) {
2407 vtag = htole16(mp->m_pkthdr.ether_vtag);
2408 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2409 } else if (offload == FALSE)
2413 * Determine where frame payload starts.
2414 * Jump over vlan headers if already present,
2415 * helpful for QinQ too.
2417 eh = mtod(mp, struct ether_vlan_header *);
2418 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2419 etype = ntohs(eh->evl_proto);
2420 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2422 etype = ntohs(eh->evl_encap_proto);
2423 ehdrlen = ETHER_HDR_LEN;
2426 /* Set the ether header length */
2427 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2431 ip = (struct ip *)(mp->m_data + ehdrlen);
2432 ip_hlen = ip->ip_hl << 2;
2433 if (mp->m_len < ehdrlen + ip_hlen)
2436 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2438 case ETHERTYPE_IPV6:
2439 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2440 ip_hlen = sizeof(struct ip6_hdr);
2441 if (mp->m_len < ehdrlen + ip_hlen)
2443 ipproto = ip6->ip6_nxt;
2444 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2451 vlan_macip_lens |= ip_hlen;
2452 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2456 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2457 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2461 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2462 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2465 #if __FreeBSD_version >= 800000
2467 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2468 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2476 /* Now copy bits into descriptor */
2477 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2478 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2479 TXD->seqnum_seed = htole32(0);
2480 TXD->mss_l4len_idx = htole32(0);
2482 tx_buffer->m_head = NULL;
2483 tx_buffer->eop_index = -1;
2485 /* We've consumed the first desc, adjust counters */
2486 if (++ctxd == adapter->num_tx_desc)
2488 txr->next_avail_desc = ctxd;
2494 /**********************************************************************
2496 * Setup work for hardware segmentation offload (TSO) on
2497 * adapters using advanced tx descriptors
2499 **********************************************************************/
2501 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2503 struct adapter *adapter = txr->adapter;
2504 struct ixgbe_adv_tx_context_desc *TXD;
2505 struct ixv_tx_buf *tx_buffer;
2506 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2507 u32 mss_l4len_idx = 0;
2509 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2510 struct ether_vlan_header *eh;
2516 * Determine where frame payload starts.
2517 * Jump over vlan headers if already present
2519 eh = mtod(mp, struct ether_vlan_header *);
2520 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2521 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2523 ehdrlen = ETHER_HDR_LEN;
2525 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2526 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2529 ctxd = txr->next_avail_desc;
2530 tx_buffer = &txr->tx_buffers[ctxd];
2531 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2533 ip = (struct ip *)(mp->m_data + ehdrlen);
2534 if (ip->ip_p != IPPROTO_TCP)
2535 return FALSE; /* 0 */
2537 ip_hlen = ip->ip_hl << 2;
2538 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2539 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2540 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2541 tcp_hlen = th->th_off << 2;
2542 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2544 /* This is used in the transmit desc in encap */
2545 *paylen = mp->m_pkthdr.len - hdrlen;
2547 /* VLAN MACLEN IPLEN */
2548 if (mp->m_flags & M_VLANTAG) {
2549 vtag = htole16(mp->m_pkthdr.ether_vtag);
2550 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2553 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2554 vlan_macip_lens |= ip_hlen;
2555 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2557 /* ADV DTYPE TUCMD */
2558 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2559 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2560 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2561 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2565 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2566 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2567 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2569 TXD->seqnum_seed = htole32(0);
2570 tx_buffer->m_head = NULL;
2571 tx_buffer->eop_index = -1;
2573 if (++ctxd == adapter->num_tx_desc)
2577 txr->next_avail_desc = ctxd;
2582 /**********************************************************************
2584 * Examine each tx_buffer in the used queue. If the hardware is done
2585 * processing the packet then free associated resources. The
2586 * tx_buffer is put back on the free queue.
2588 **********************************************************************/
2590 ixv_txeof(struct tx_ring *txr)
2592 struct adapter *adapter = txr->adapter;
2593 struct ifnet *ifp = adapter->ifp;
2594 u32 first, last, done;
2595 struct ixv_tx_buf *tx_buffer;
2596 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2598 mtx_assert(&txr->tx_mtx, MA_OWNED);
2600 if (txr->tx_avail == adapter->num_tx_desc)
2603 first = txr->next_to_clean;
2604 tx_buffer = &txr->tx_buffers[first];
2605 /* For cleanup we just use legacy struct */
2606 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2607 last = tx_buffer->eop_index;
2610 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2613 ** Get the index of the first descriptor
2614 ** BEYOND the EOP and call that 'done'.
2615 ** I do this so the comparison in the
2616 ** inner while loop below can be simple
2618 if (++last == adapter->num_tx_desc) last = 0;
2621 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2622 BUS_DMASYNC_POSTREAD);
2624 ** Only the EOP descriptor of a packet now has the DD
2625 ** bit set, this is what we look for...
2627 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2628 /* We clean the range of the packet */
2629 while (first != done) {
2630 tx_desc->upper.data = 0;
2631 tx_desc->lower.data = 0;
2632 tx_desc->buffer_addr = 0;
2635 if (tx_buffer->m_head) {
2636 bus_dmamap_sync(txr->txtag,
2638 BUS_DMASYNC_POSTWRITE);
2639 bus_dmamap_unload(txr->txtag,
2641 m_freem(tx_buffer->m_head);
2642 tx_buffer->m_head = NULL;
2643 tx_buffer->map = NULL;
2645 tx_buffer->eop_index = -1;
2646 txr->watchdog_time = ticks;
2648 if (++first == adapter->num_tx_desc)
2651 tx_buffer = &txr->tx_buffers[first];
2653 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2656 /* See if there is more work now */
2657 last = tx_buffer->eop_index;
2660 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2661 /* Get next done point */
2662 if (++last == adapter->num_tx_desc) last = 0;
2667 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2668 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2670 txr->next_to_clean = first;
2673 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2674 * it is OK to send packets. If there are no pending descriptors,
2675 * clear the timeout. Otherwise, if some descriptors have been freed,
2676 * restart the timeout.
2678 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2679 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2680 if (txr->tx_avail == adapter->num_tx_desc) {
2681 txr->watchdog_check = FALSE;
2689 /*********************************************************************
2691 * Refresh mbuf buffers for RX descriptor rings
2692 * - now keeps its own state so discards due to resource
2693 * exhaustion are unnecessary, if an mbuf cannot be obtained
2694 * it just returns, keeping its placeholder, thus it can simply
2695 * be recalled to try again.
2697 **********************************************************************/
2699 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2701 struct adapter *adapter = rxr->adapter;
2702 bus_dma_segment_t hseg[1];
2703 bus_dma_segment_t pseg[1];
2704 struct ixv_rx_buf *rxbuf;
2705 struct mbuf *mh, *mp;
2706 int i, j, nsegs, error;
2707 bool refreshed = FALSE;
2709 i = j = rxr->next_to_refresh;
2710 /* Get the control variable, one beyond refresh point */
2711 if (++j == adapter->num_rx_desc)
2713 while (j != limit) {
2714 rxbuf = &rxr->rx_buffers[i];
2715 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2716 mh = m_gethdr(M_NOWAIT, MT_DATA);
2719 mh->m_pkthdr.len = mh->m_len = MHLEN;
2721 mh->m_flags |= M_PKTHDR;
2722 m_adj(mh, ETHER_ALIGN);
2723 /* Get the memory mapping */
2724 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2725 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2727 printf("GET BUF: dmamap load"
2728 " failure - %d\n", error);
2733 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2734 BUS_DMASYNC_PREREAD);
2735 rxr->rx_base[i].read.hdr_addr =
2736 htole64(hseg[0].ds_addr);
2739 if (rxbuf->m_pack == NULL) {
2740 mp = m_getjcl(M_NOWAIT, MT_DATA,
2741 M_PKTHDR, adapter->rx_mbuf_sz);
2747 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2748 /* Get the memory mapping */
2749 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2750 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2752 printf("GET BUF: dmamap load"
2753 " failure - %d\n", error);
2755 rxbuf->m_pack = NULL;
2759 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2760 BUS_DMASYNC_PREREAD);
2761 rxr->rx_base[i].read.pkt_addr =
2762 htole64(pseg[0].ds_addr);
2765 rxr->next_to_refresh = i = j;
2766 /* Calculate next index */
2767 if (++j == adapter->num_rx_desc)
2771 if (refreshed) /* update tail index */
2772 IXGBE_WRITE_REG(&adapter->hw,
2773 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2777 /*********************************************************************
2779 * Allocate memory for rx_buffer structures. Since we use one
2780 * rx_buffer per received packet, the maximum number of rx_buffer's
2781 * that we'll need is equal to the number of receive descriptors
2782 * that we've allocated.
2784 **********************************************************************/
2786 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2788 struct adapter *adapter = rxr->adapter;
2789 device_t dev = adapter->dev;
2790 struct ixv_rx_buf *rxbuf;
2791 int i, bsize, error;
2793 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2794 if (!(rxr->rx_buffers =
2795 (struct ixv_rx_buf *) malloc(bsize,
2796 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2797 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2802 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2803 1, 0, /* alignment, bounds */
2804 BUS_SPACE_MAXADDR, /* lowaddr */
2805 BUS_SPACE_MAXADDR, /* highaddr */
2806 NULL, NULL, /* filter, filterarg */
2807 MSIZE, /* maxsize */
2809 MSIZE, /* maxsegsize */
2811 NULL, /* lockfunc */
2812 NULL, /* lockfuncarg */
2814 device_printf(dev, "Unable to create RX DMA tag\n");
2818 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2819 1, 0, /* alignment, bounds */
2820 BUS_SPACE_MAXADDR, /* lowaddr */
2821 BUS_SPACE_MAXADDR, /* highaddr */
2822 NULL, NULL, /* filter, filterarg */
2823 MJUMPAGESIZE, /* maxsize */
2825 MJUMPAGESIZE, /* maxsegsize */
2827 NULL, /* lockfunc */
2828 NULL, /* lockfuncarg */
2830 device_printf(dev, "Unable to create RX DMA tag\n");
2834 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2835 rxbuf = &rxr->rx_buffers[i];
2836 error = bus_dmamap_create(rxr->htag,
2837 BUS_DMA_NOWAIT, &rxbuf->hmap);
2839 device_printf(dev, "Unable to create RX head map\n");
2842 error = bus_dmamap_create(rxr->ptag,
2843 BUS_DMA_NOWAIT, &rxbuf->pmap);
2845 device_printf(dev, "Unable to create RX pkt map\n");
2853 /* Frees all, but can handle partial completion */
2854 ixv_free_receive_structures(adapter);
2859 ixv_free_receive_ring(struct rx_ring *rxr)
2861 struct adapter *adapter;
2862 struct ixv_rx_buf *rxbuf;
2865 adapter = rxr->adapter;
2866 for (i = 0; i < adapter->num_rx_desc; i++) {
2867 rxbuf = &rxr->rx_buffers[i];
2868 if (rxbuf->m_head != NULL) {
2869 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2870 BUS_DMASYNC_POSTREAD);
2871 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2872 rxbuf->m_head->m_flags |= M_PKTHDR;
2873 m_freem(rxbuf->m_head);
2875 if (rxbuf->m_pack != NULL) {
2876 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2877 BUS_DMASYNC_POSTREAD);
2878 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2879 rxbuf->m_pack->m_flags |= M_PKTHDR;
2880 m_freem(rxbuf->m_pack);
2882 rxbuf->m_head = NULL;
2883 rxbuf->m_pack = NULL;
2888 /*********************************************************************
2890 * Initialize a receive ring and its buffers.
2892 **********************************************************************/
2894 ixv_setup_receive_ring(struct rx_ring *rxr)
2896 struct adapter *adapter;
2899 struct ixv_rx_buf *rxbuf;
2900 bus_dma_segment_t pseg[1], hseg[1];
2901 struct lro_ctrl *lro = &rxr->lro;
2902 int rsize, nsegs, error = 0;
2904 adapter = rxr->adapter;
2908 /* Clear the ring contents */
2910 rsize = roundup2(adapter->num_rx_desc *
2911 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2912 bzero((void *)rxr->rx_base, rsize);
2914 /* Free current RX buffer structs and their mbufs */
2915 ixv_free_receive_ring(rxr);
2917 /* Configure header split? */
2918 if (ixv_header_split)
2919 rxr->hdr_split = TRUE;
2921 /* Now replenish the mbufs */
2922 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2923 struct mbuf *mh, *mp;
2925 rxbuf = &rxr->rx_buffers[j];
2927 ** Dont allocate mbufs if not
2928 ** doing header split, its wasteful
2930 if (rxr->hdr_split == FALSE)
2933 /* First the header */
2934 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2935 if (rxbuf->m_head == NULL) {
2939 m_adj(rxbuf->m_head, ETHER_ALIGN);
2941 mh->m_len = mh->m_pkthdr.len = MHLEN;
2942 mh->m_flags |= M_PKTHDR;
2943 /* Get the memory mapping */
2944 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2945 rxbuf->hmap, rxbuf->m_head, hseg,
2946 &nsegs, BUS_DMA_NOWAIT);
2947 if (error != 0) /* Nothing elegant to do here */
2949 bus_dmamap_sync(rxr->htag,
2950 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2951 /* Update descriptor */
2952 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2955 /* Now the payload cluster */
2956 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2957 M_PKTHDR, adapter->rx_mbuf_sz);
2958 if (rxbuf->m_pack == NULL) {
2963 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2964 /* Get the memory mapping */
2965 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2966 rxbuf->pmap, mp, pseg,
2967 &nsegs, BUS_DMA_NOWAIT);
2970 bus_dmamap_sync(rxr->ptag,
2971 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2972 /* Update descriptor */
2973 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2977 /* Setup our descriptor indices */
2978 rxr->next_to_check = 0;
2979 rxr->next_to_refresh = 0;
2980 rxr->lro_enabled = FALSE;
2981 rxr->rx_split_packets = 0;
2983 rxr->discard = FALSE;
2985 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2986 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2989 ** Now set up the LRO interface:
2991 if (ifp->if_capenable & IFCAP_LRO) {
2992 int err = tcp_lro_init(lro);
2994 device_printf(dev, "LRO Initialization failed!\n");
2997 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
2998 rxr->lro_enabled = TRUE;
2999 lro->ifp = adapter->ifp;
3006 ixv_free_receive_ring(rxr);
3011 /*********************************************************************
3013 * Initialize all receive rings.
3015 **********************************************************************/
3017 ixv_setup_receive_structures(struct adapter *adapter)
3019 struct rx_ring *rxr = adapter->rx_rings;
3022 for (j = 0; j < adapter->num_queues; j++, rxr++)
3023 if (ixv_setup_receive_ring(rxr))
3029 * Free RX buffers allocated so far, we will only handle
3030 * the rings that completed, the failing case will have
3031 * cleaned up for itself. 'j' failed, so its the terminus.
3033 for (int i = 0; i < j; ++i) {
3034 rxr = &adapter->rx_rings[i];
3035 ixv_free_receive_ring(rxr);
3041 /*********************************************************************
3043 * Setup receive registers and features.
3045 **********************************************************************/
3046 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3049 ixv_initialize_receive_units(struct adapter *adapter)
3051 struct rx_ring *rxr = adapter->rx_rings;
3052 struct ixgbe_hw *hw = &adapter->hw;
3053 struct ifnet *ifp = adapter->ifp;
3054 u32 bufsz, fctrl, rxcsum, hlreg;
3057 /* Enable broadcasts */
3058 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3059 fctrl |= IXGBE_FCTRL_BAM;
3060 fctrl |= IXGBE_FCTRL_DPF;
3061 fctrl |= IXGBE_FCTRL_PMCF;
3062 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3064 /* Set for Jumbo Frames? */
3065 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3066 if (ifp->if_mtu > ETHERMTU) {
3067 hlreg |= IXGBE_HLREG0_JUMBOEN;
3068 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3070 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3071 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3073 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3075 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3076 u64 rdba = rxr->rxdma.dma_paddr;
3079 /* Do the queue enabling first */
3080 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3081 rxdctl |= IXGBE_RXDCTL_ENABLE;
3082 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3083 for (int k = 0; k < 10; k++) {
3084 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3085 IXGBE_RXDCTL_ENABLE)
3092 /* Setup the Base and Length of the Rx Descriptor Ring */
3093 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3094 (rdba & 0x00000000ffffffffULL));
3095 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3097 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3098 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3100 /* Set up the SRRCTL register */
3101 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3102 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3103 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3105 if (rxr->hdr_split) {
3106 /* Use a standard mbuf for the header */
3107 reg |= ((IXV_RX_HDR <<
3108 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3109 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3110 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3112 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3113 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3115 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3116 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3117 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3118 adapter->num_rx_desc - 1);
3121 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3123 if (ifp->if_capenable & IFCAP_RXCSUM)
3124 rxcsum |= IXGBE_RXCSUM_PCSD;
3126 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3127 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3129 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3134 /*********************************************************************
3136 * Free all receive rings.
3138 **********************************************************************/
3140 ixv_free_receive_structures(struct adapter *adapter)
3142 struct rx_ring *rxr = adapter->rx_rings;
3144 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3145 struct lro_ctrl *lro = &rxr->lro;
3146 ixv_free_receive_buffers(rxr);
3147 /* Free LRO memory */
3149 /* Free the ring memory as well */
3150 ixv_dma_free(adapter, &rxr->rxdma);
3153 free(adapter->rx_rings, M_DEVBUF);
3157 /*********************************************************************
3159 * Free receive ring data structures
3161 **********************************************************************/
3163 ixv_free_receive_buffers(struct rx_ring *rxr)
3165 struct adapter *adapter = rxr->adapter;
3166 struct ixv_rx_buf *rxbuf;
3168 INIT_DEBUGOUT("free_receive_structures: begin");
3170 /* Cleanup any existing buffers */
3171 if (rxr->rx_buffers != NULL) {
3172 for (int i = 0; i < adapter->num_rx_desc; i++) {
3173 rxbuf = &rxr->rx_buffers[i];
3174 if (rxbuf->m_head != NULL) {
3175 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3176 BUS_DMASYNC_POSTREAD);
3177 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3178 rxbuf->m_head->m_flags |= M_PKTHDR;
3179 m_freem(rxbuf->m_head);
3181 if (rxbuf->m_pack != NULL) {
3182 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3183 BUS_DMASYNC_POSTREAD);
3184 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3185 rxbuf->m_pack->m_flags |= M_PKTHDR;
3186 m_freem(rxbuf->m_pack);
3188 rxbuf->m_head = NULL;
3189 rxbuf->m_pack = NULL;
3190 if (rxbuf->hmap != NULL) {
3191 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3194 if (rxbuf->pmap != NULL) {
3195 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3199 if (rxr->rx_buffers != NULL) {
3200 free(rxr->rx_buffers, M_DEVBUF);
3201 rxr->rx_buffers = NULL;
3205 if (rxr->htag != NULL) {
3206 bus_dma_tag_destroy(rxr->htag);
3209 if (rxr->ptag != NULL) {
3210 bus_dma_tag_destroy(rxr->ptag);
3217 static __inline void
3218 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3222 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3223 * should be computed by hardware. Also it should not have VLAN tag in
3226 if (rxr->lro_enabled &&
3227 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3228 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3229 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3230 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3231 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3232 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3234 * Send to the stack if:
3235 ** - LRO not enabled, or
3236 ** - no LRO resources, or
3237 ** - lro enqueue fails
3239 if (rxr->lro.lro_cnt != 0)
3240 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3244 (*ifp->if_input)(ifp, m);
3248 static __inline void
3249 ixv_rx_discard(struct rx_ring *rxr, int i)
3251 struct ixv_rx_buf *rbuf;
3253 rbuf = &rxr->rx_buffers[i];
3255 if (rbuf->fmp != NULL) {/* Partial chain ? */
3256 rbuf->fmp->m_flags |= M_PKTHDR;
3262 ** With advanced descriptors the writeback
3263 ** clobbers the buffer addrs, so its easier
3264 ** to just free the existing mbufs and take
3265 ** the normal refresh path to get new buffers
3269 m_free(rbuf->m_head);
3270 rbuf->m_head = NULL;
3274 m_free(rbuf->m_pack);
3275 rbuf->m_pack = NULL;
3282 /*********************************************************************
3284 * This routine executes in interrupt context. It replenishes
3285 * the mbufs in the descriptor and sends data which has been
3286 * dma'ed into host memory to upper layer.
3288 * We loop at most count times if count is > 0, or until done if
3291 * Return TRUE for more work, FALSE for all clean.
3292 *********************************************************************/
3294 ixv_rxeof(struct ix_queue *que, int count)
3296 struct adapter *adapter = que->adapter;
3297 struct rx_ring *rxr = que->rxr;
3298 struct ifnet *ifp = adapter->ifp;
3299 struct lro_ctrl *lro = &rxr->lro;
3300 struct lro_entry *queued;
3301 int i, nextp, processed = 0;
3303 union ixgbe_adv_rx_desc *cur;
3304 struct ixv_rx_buf *rbuf, *nbuf;
3308 for (i = rxr->next_to_check; count != 0;) {
3309 struct mbuf *sendmp, *mh, *mp;
3311 u16 hlen, plen, hdr, vtag;
3314 /* Sync the ring. */
3315 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3316 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3318 cur = &rxr->rx_base[i];
3319 staterr = le32toh(cur->wb.upper.status_error);
3321 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3323 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3330 cur->wb.upper.status_error = 0;
3331 rbuf = &rxr->rx_buffers[i];
3335 plen = le16toh(cur->wb.upper.length);
3336 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3337 IXGBE_RXDADV_PKTTYPE_MASK;
3338 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3339 vtag = le16toh(cur->wb.upper.vlan);
3340 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3342 /* Make sure all parts of a bad packet are discarded */
3343 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3346 rxr->rx_discarded++;
3348 rxr->discard = TRUE;
3350 rxr->discard = FALSE;
3351 ixv_rx_discard(rxr, i);
3357 if (nextp == adapter->num_rx_desc)
3359 nbuf = &rxr->rx_buffers[nextp];
3363 ** The header mbuf is ONLY used when header
3364 ** split is enabled, otherwise we get normal
3365 ** behavior, ie, both header and payload
3366 ** are DMA'd into the payload buffer.
3368 ** Rather than using the fmp/lmp global pointers
3369 ** we now keep the head of a packet chain in the
3370 ** buffer struct and pass this along from one
3371 ** descriptor to the next, until we get EOP.
3373 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3374 /* This must be an initial descriptor */
3375 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3376 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3377 if (hlen > IXV_RX_HDR)
3380 mh->m_flags |= M_PKTHDR;
3382 mh->m_pkthdr.len = mh->m_len;
3383 /* Null buf pointer so it is refreshed */
3384 rbuf->m_head = NULL;
3386 ** Check the payload length, this
3387 ** could be zero if its a small
3393 mp->m_flags &= ~M_PKTHDR;
3395 mh->m_pkthdr.len += mp->m_len;
3396 /* Null buf pointer so it is refreshed */
3397 rbuf->m_pack = NULL;
3398 rxr->rx_split_packets++;
3401 ** Now create the forward
3402 ** chain so when complete
3406 /* stash the chain head */
3408 /* Make forward chain */
3410 mp->m_next = nbuf->m_pack;
3412 mh->m_next = nbuf->m_pack;
3414 /* Singlet, prepare to send */
3416 if ((adapter->num_vlans) &&
3417 (staterr & IXGBE_RXD_STAT_VP)) {
3418 sendmp->m_pkthdr.ether_vtag = vtag;
3419 sendmp->m_flags |= M_VLANTAG;
3424 ** Either no header split, or a
3425 ** secondary piece of a fragmented
3430 ** See if there is a stored head
3431 ** that determines what we are
3434 rbuf->m_pack = rbuf->fmp = NULL;
3436 if (sendmp != NULL) /* secondary frag */
3437 sendmp->m_pkthdr.len += mp->m_len;
3439 /* first desc of a non-ps chain */
3441 sendmp->m_flags |= M_PKTHDR;
3442 sendmp->m_pkthdr.len = mp->m_len;
3443 if (staterr & IXGBE_RXD_STAT_VP) {
3444 sendmp->m_pkthdr.ether_vtag = vtag;
3445 sendmp->m_flags |= M_VLANTAG;
3448 /* Pass the head pointer on */
3452 mp->m_next = nbuf->m_pack;
3456 /* Sending this frame? */
3458 sendmp->m_pkthdr.rcvif = ifp;
3461 /* capture data for AIM */
3462 rxr->bytes += sendmp->m_pkthdr.len;
3463 rxr->rx_bytes += sendmp->m_pkthdr.len;
3464 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3465 ixv_rx_checksum(staterr, sendmp, ptype);
3466 #if __FreeBSD_version >= 800000
3467 sendmp->m_pkthdr.flowid = que->msix;
3468 sendmp->m_flags |= M_FLOWID;
3472 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3473 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3475 /* Advance our pointers to the next descriptor. */
3476 if (++i == adapter->num_rx_desc)
3479 /* Now send to the stack or do LRO */
3481 ixv_rx_input(rxr, ifp, sendmp, ptype);
3483 /* Every 8 descriptors we go to refresh mbufs */
3484 if (processed == 8) {
3485 ixv_refresh_mbufs(rxr, i);
3490 /* Refresh any remaining buf structs */
3491 if (ixv_rx_unrefreshed(rxr))
3492 ixv_refresh_mbufs(rxr, i);
3494 rxr->next_to_check = i;
3497 * Flush any outstanding LRO work
3499 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3500 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3501 tcp_lro_flush(lro, queued);
3507 ** We still have cleaning to do?
3508 ** Schedule another interrupt if so.
3510 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3511 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3519 /*********************************************************************
3521 * Verify that the hardware indicated that the checksum is valid.
3522 * Inform the stack about the status of checksum so that stack
3523 * doesn't spend time verifying the checksum.
3525 *********************************************************************/
3527 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3529 u16 status = (u16) staterr;
3530 u8 errors = (u8) (staterr >> 24);
3533 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3534 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3537 if (status & IXGBE_RXD_STAT_IPCS) {
3538 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3539 /* IP Checksum Good */
3540 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3541 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3544 mp->m_pkthdr.csum_flags = 0;
3546 if (status & IXGBE_RXD_STAT_L4CS) {
3547 u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3548 #if __FreeBSD_version >= 800000
3550 type = CSUM_SCTP_VALID;
3552 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3553 mp->m_pkthdr.csum_flags |= type;
3555 mp->m_pkthdr.csum_data = htons(0xffff);
3562 ixv_setup_vlan_support(struct adapter *adapter)
3564 struct ixgbe_hw *hw = &adapter->hw;
3565 u32 ctrl, vid, vfta, retry;
3569 ** We get here thru init_locked, meaning
3570 ** a soft reset, this has already cleared
3571 ** the VFTA and other state, so if there
3572 ** have been no vlan's registered do nothing.
3574 if (adapter->num_vlans == 0)
3577 /* Enable the queues */
3578 for (int i = 0; i < adapter->num_queues; i++) {
3579 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3580 ctrl |= IXGBE_RXDCTL_VME;
3581 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3585 ** A soft reset zero's out the VFTA, so
3586 ** we need to repopulate it now.
3588 for (int i = 0; i < VFTA_SIZE; i++) {
3589 if (ixv_shadow_vfta[i] == 0)
3591 vfta = ixv_shadow_vfta[i];
3593 ** Reconstruct the vlan id's
3594 ** based on the bits set in each
3595 ** of the array ints.
3597 for ( int j = 0; j < 32; j++) {
3599 if ((vfta & (1 << j)) == 0)
3602 /* Call the shared code mailbox routine */
3603 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3612 ** This routine is run via an vlan config EVENT,
3613 ** it enables us to use the HW Filter table since
3614 ** we can get the vlan id. This just creates the
3615 ** entry in the soft version of the VFTA, init will
3616 ** repopulate the real table.
3619 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3621 struct adapter *adapter = ifp->if_softc;
3624 if (ifp->if_softc != arg) /* Not our event */
3627 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3630 IXV_CORE_LOCK(adapter);
3631 index = (vtag >> 5) & 0x7F;
3633 ixv_shadow_vfta[index] |= (1 << bit);
3634 ++adapter->num_vlans;
3635 /* Re-init to load the changes */
3636 ixv_init_locked(adapter);
3637 IXV_CORE_UNLOCK(adapter);
3641 ** This routine is run via an vlan
3642 ** unconfig EVENT, remove our entry
3643 ** in the soft vfta.
3646 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3648 struct adapter *adapter = ifp->if_softc;
3651 if (ifp->if_softc != arg)
3654 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3657 IXV_CORE_LOCK(adapter);
3658 index = (vtag >> 5) & 0x7F;
3660 ixv_shadow_vfta[index] &= ~(1 << bit);
3661 --adapter->num_vlans;
3662 /* Re-init to load the changes */
3663 ixv_init_locked(adapter);
3664 IXV_CORE_UNLOCK(adapter);
3668 ixv_enable_intr(struct adapter *adapter)
3670 struct ixgbe_hw *hw = &adapter->hw;
3671 struct ix_queue *que = adapter->queues;
3672 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3675 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3677 mask = IXGBE_EIMS_ENABLE_MASK;
3678 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3679 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3681 for (int i = 0; i < adapter->num_queues; i++, que++)
3682 ixv_enable_queue(adapter, que->msix);
3684 IXGBE_WRITE_FLUSH(hw);
3690 ixv_disable_intr(struct adapter *adapter)
3692 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3693 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3694 IXGBE_WRITE_FLUSH(&adapter->hw);
3699 ** Setup the correct IVAR register for a particular MSIX interrupt
3700 ** - entry is the register array entry
3701 ** - vector is the MSIX vector for this queue
3702 ** - type is RX/TX/MISC
3705 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3707 struct ixgbe_hw *hw = &adapter->hw;
3710 vector |= IXGBE_IVAR_ALLOC_VAL;
3712 if (type == -1) { /* MISC IVAR */
3713 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3716 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3717 } else { /* RX/TX IVARS */
3718 index = (16 * (entry & 1)) + (8 * type);
3719 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3720 ivar &= ~(0xFF << index);
3721 ivar |= (vector << index);
3722 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3727 ixv_configure_ivars(struct adapter *adapter)
3729 struct ix_queue *que = adapter->queues;
3731 for (int i = 0; i < adapter->num_queues; i++, que++) {
3732 /* First the RX queue entry */
3733 ixv_set_ivar(adapter, i, que->msix, 0);
3734 /* ... and the TX */
3735 ixv_set_ivar(adapter, i, que->msix, 1);
3736 /* Set an initial value in EITR */
3737 IXGBE_WRITE_REG(&adapter->hw,
3738 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3741 /* For the Link interrupt */
3742 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3747 ** Tasklet handler for MSIX MBX interrupts
3748 ** - do outside interrupt since it might sleep
3751 ixv_handle_mbx(void *context, int pending)
3753 struct adapter *adapter = context;
3755 ixgbe_check_link(&adapter->hw,
3756 &adapter->link_speed, &adapter->link_up, 0);
3757 ixv_update_link_status(adapter);
3761 ** The VF stats registers never have a truely virgin
3762 ** starting point, so this routine tries to make an
3763 ** artificial one, marking ground zero on attach as
3767 ixv_save_stats(struct adapter *adapter)
3769 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3770 adapter->stats.saved_reset_vfgprc +=
3771 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3772 adapter->stats.saved_reset_vfgptc +=
3773 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3774 adapter->stats.saved_reset_vfgorc +=
3775 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3776 adapter->stats.saved_reset_vfgotc +=
3777 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3778 adapter->stats.saved_reset_vfmprc +=
3779 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3784 ixv_init_stats(struct adapter *adapter)
3786 struct ixgbe_hw *hw = &adapter->hw;
3788 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3789 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3790 adapter->stats.last_vfgorc |=
3791 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3793 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3794 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3795 adapter->stats.last_vfgotc |=
3796 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3798 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3800 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3801 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3802 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3803 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3804 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3807 #define UPDATE_STAT_32(reg, last, count) \
3809 u32 current = IXGBE_READ_REG(hw, reg); \
3810 if (current < last) \
3811 count += 0x100000000LL; \
3813 count &= 0xFFFFFFFF00000000LL; \
3817 #define UPDATE_STAT_36(lsb, msb, last, count) \
3819 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3820 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3821 u64 current = ((cur_msb << 32) | cur_lsb); \
3822 if (current < last) \
3823 count += 0x1000000000LL; \
3825 count &= 0xFFFFFFF000000000LL; \
3830 ** ixv_update_stats - Update the board statistics counters.
3833 ixv_update_stats(struct adapter *adapter)
3835 struct ixgbe_hw *hw = &adapter->hw;
3837 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3838 adapter->stats.vfgprc);
3839 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3840 adapter->stats.vfgptc);
3841 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3842 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3843 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3844 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3845 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3846 adapter->stats.vfmprc);
3849 /**********************************************************************
3851 * This routine is called only when ixgbe_display_debug_stats is enabled.
3852 * This routine provides a way to take a look at important statistics
3853 * maintained by the driver and hardware.
3855 **********************************************************************/
3857 ixv_print_hw_stats(struct adapter * adapter)
3859 device_t dev = adapter->dev;
3861 device_printf(dev,"Std Mbuf Failed = %lu\n",
3862 adapter->mbuf_defrag_failed);
3863 device_printf(dev,"Driver dropped packets = %lu\n",
3864 adapter->dropped_pkts);
3865 device_printf(dev, "watchdog timeouts = %ld\n",
3866 adapter->watchdog_events);
3868 device_printf(dev,"Good Packets Rcvd = %llu\n",
3869 (long long)adapter->stats.vfgprc);
3870 device_printf(dev,"Good Packets Xmtd = %llu\n",
3871 (long long)adapter->stats.vfgptc);
3872 device_printf(dev,"TSO Transmissions = %lu\n",
3877 /**********************************************************************
3879 * This routine is called only when em_display_debug_stats is enabled.
3880 * This routine provides a way to take a look at important statistics
3881 * maintained by the driver and hardware.
3883 **********************************************************************/
3885 ixv_print_debug_info(struct adapter *adapter)
3887 device_t dev = adapter->dev;
3888 struct ixgbe_hw *hw = &adapter->hw;
3889 struct ix_queue *que = adapter->queues;
3890 struct rx_ring *rxr;
3891 struct tx_ring *txr;
3892 struct lro_ctrl *lro;
3894 device_printf(dev,"Error Byte Count = %u \n",
3895 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3897 for (int i = 0; i < adapter->num_queues; i++, que++) {
3901 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3902 que->msix, (long)que->irqs);
3903 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3904 rxr->me, (long long)rxr->rx_packets);
3905 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3906 rxr->me, (long long)rxr->rx_split_packets);
3907 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3908 rxr->me, (long)rxr->rx_bytes);
3909 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3910 rxr->me, lro->lro_queued);
3911 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3912 rxr->me, lro->lro_flushed);
3913 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3914 txr->me, (long)txr->total_packets);
3915 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3916 txr->me, (long)txr->no_desc_avail);
3919 device_printf(dev,"MBX IRQ Handled: %lu\n",
3920 (long)adapter->mbx_irq);
3925 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3929 struct adapter *adapter;
3932 error = sysctl_handle_int(oidp, &result, 0, req);
3934 if (error || !req->newptr)
3938 adapter = (struct adapter *) arg1;
3939 ixv_print_hw_stats(adapter);
3945 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3948 struct adapter *adapter;
3951 error = sysctl_handle_int(oidp, &result, 0, req);
3953 if (error || !req->newptr)
3957 adapter = (struct adapter *) arg1;
3958 ixv_print_debug_info(adapter);
3964 ** Set flow control using sysctl:
3965 ** Flow control values:
3972 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3975 struct adapter *adapter;
3977 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3982 adapter = (struct adapter *) arg1;
3983 switch (ixv_flow_control) {
3984 case ixgbe_fc_rx_pause:
3985 case ixgbe_fc_tx_pause:
3987 adapter->hw.fc.requested_mode = ixv_flow_control;
3991 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3994 ixgbe_fc_enable(&adapter->hw);
3999 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4000 const char *description, int *limit, int value)
4003 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4004 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4005 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);