1 /******************************************************************************
3 Copyright (c) 2001-2013, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "opt_inet6.h"
39 /*********************************************************************
41 *********************************************************************/
42 char ixv_driver_version[] = "1.1.4";
44 /*********************************************************************
47 * Used by probe to select devices to load on
48 * Last field stores an index into ixv_strings
49 * Last entry must be all 0s
51 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
52 *********************************************************************/
54 static ixv_vendor_info_t ixv_vendor_info_array[] =
56 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
57 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
58 /* required last entry */
62 /*********************************************************************
63 * Table of branding strings
64 *********************************************************************/
66 static char *ixv_strings[] = {
67 "Intel(R) PRO/10GbE Virtual Function Network Driver"
70 /*********************************************************************
72 *********************************************************************/
73 static int ixv_probe(device_t);
74 static int ixv_attach(device_t);
75 static int ixv_detach(device_t);
76 static int ixv_shutdown(device_t);
77 #if __FreeBSD_version < 800000
78 static void ixv_start(struct ifnet *);
79 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
81 static int ixv_mq_start(struct ifnet *, struct mbuf *);
82 static int ixv_mq_start_locked(struct ifnet *,
83 struct tx_ring *, struct mbuf *);
84 static void ixv_qflush(struct ifnet *);
86 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
87 static void ixv_init(void *);
88 static void ixv_init_locked(struct adapter *);
89 static void ixv_stop(void *);
90 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
91 static int ixv_media_change(struct ifnet *);
92 static void ixv_identify_hardware(struct adapter *);
93 static int ixv_allocate_pci_resources(struct adapter *);
94 static int ixv_allocate_msix(struct adapter *);
95 static int ixv_allocate_queues(struct adapter *);
96 static int ixv_setup_msix(struct adapter *);
97 static void ixv_free_pci_resources(struct adapter *);
98 static void ixv_local_timer(void *);
99 static void ixv_setup_interface(device_t, struct adapter *);
100 static void ixv_config_link(struct adapter *);
102 static int ixv_allocate_transmit_buffers(struct tx_ring *);
103 static int ixv_setup_transmit_structures(struct adapter *);
104 static void ixv_setup_transmit_ring(struct tx_ring *);
105 static void ixv_initialize_transmit_units(struct adapter *);
106 static void ixv_free_transmit_structures(struct adapter *);
107 static void ixv_free_transmit_buffers(struct tx_ring *);
109 static int ixv_allocate_receive_buffers(struct rx_ring *);
110 static int ixv_setup_receive_structures(struct adapter *);
111 static int ixv_setup_receive_ring(struct rx_ring *);
112 static void ixv_initialize_receive_units(struct adapter *);
113 static void ixv_free_receive_structures(struct adapter *);
114 static void ixv_free_receive_buffers(struct rx_ring *);
116 static void ixv_enable_intr(struct adapter *);
117 static void ixv_disable_intr(struct adapter *);
118 static bool ixv_txeof(struct tx_ring *);
119 static bool ixv_rxeof(struct ix_queue *, int);
120 static void ixv_rx_checksum(u32, struct mbuf *, u32);
121 static void ixv_set_multi(struct adapter *);
122 static void ixv_update_link_status(struct adapter *);
123 static void ixv_refresh_mbufs(struct rx_ring *, int);
124 static int ixv_xmit(struct tx_ring *, struct mbuf **);
125 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
126 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
127 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
128 static int ixv_dma_malloc(struct adapter *, bus_size_t,
129 struct ixv_dma_alloc *, int);
130 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
131 static void ixv_add_rx_process_limit(struct adapter *, const char *,
132 const char *, int *, int);
133 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
134 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
135 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
136 static void ixv_configure_ivars(struct adapter *);
137 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
139 static void ixv_setup_vlan_support(struct adapter *);
140 static void ixv_register_vlan(void *, struct ifnet *, u16);
141 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
143 static void ixv_save_stats(struct adapter *);
144 static void ixv_init_stats(struct adapter *);
145 static void ixv_update_stats(struct adapter *);
147 static __inline void ixv_rx_discard(struct rx_ring *, int);
148 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
151 /* The MSI/X Interrupt handlers */
152 static void ixv_msix_que(void *);
153 static void ixv_msix_mbx(void *);
155 /* Deferred interrupt tasklets */
156 static void ixv_handle_que(void *, int);
157 static void ixv_handle_mbx(void *, int);
159 /*********************************************************************
160 * FreeBSD Device Interface Entry Points
161 *********************************************************************/
163 static device_method_t ixv_methods[] = {
164 /* Device interface */
165 DEVMETHOD(device_probe, ixv_probe),
166 DEVMETHOD(device_attach, ixv_attach),
167 DEVMETHOD(device_detach, ixv_detach),
168 DEVMETHOD(device_shutdown, ixv_shutdown),
172 static driver_t ixv_driver = {
173 "ix", ixv_methods, sizeof(struct adapter),
176 extern devclass_t ixgbe_devclass;
177 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
178 MODULE_DEPEND(ixv, pci, 1, 1, 1);
179 MODULE_DEPEND(ixv, ether, 1, 1, 1);
182 ** TUNEABLE PARAMETERS:
186 ** AIM: Adaptive Interrupt Moderation
187 ** which means that the interrupt rate
188 ** is varied over time based on the
189 ** traffic for that interrupt vector
191 static int ixv_enable_aim = FALSE;
192 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194 /* How many packets rxeof tries to clean at a time */
195 static int ixv_rx_process_limit = 128;
196 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
198 /* Flow control setting, default to full */
199 static int ixv_flow_control = ixgbe_fc_full;
200 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
203 * Header split: this causes the hardware to DMA
204 * the header into a seperate mbuf from the payload,
205 * it can be a performance win in some workloads, but
206 * in others it actually hurts, its off by default.
208 static int ixv_header_split = FALSE;
209 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
212 ** Number of TX descriptors per ring,
213 ** setting higher than RX as this seems
214 ** the better performing choice.
216 static int ixv_txd = DEFAULT_TXD;
217 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
219 /* Number of RX descriptors per ring */
220 static int ixv_rxd = DEFAULT_RXD;
221 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
224 ** Shadow VFTA table, this is needed because
225 ** the real filter table gets cleared during
226 ** a soft reset and we need to repopulate it.
228 static u32 ixv_shadow_vfta[VFTA_SIZE];
230 /*********************************************************************
231 * Device identification routine
233 * ixv_probe determines if the driver should be loaded on
234 * adapter based on PCI vendor/device id of the adapter.
236 * return BUS_PROBE_DEFAULT on success, positive on failure
237 *********************************************************************/
240 ixv_probe(device_t dev)
242 ixv_vendor_info_t *ent;
244 u16 pci_vendor_id = 0;
245 u16 pci_device_id = 0;
246 u16 pci_subvendor_id = 0;
247 u16 pci_subdevice_id = 0;
248 char adapter_name[256];
251 pci_vendor_id = pci_get_vendor(dev);
252 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
255 pci_device_id = pci_get_device(dev);
256 pci_subvendor_id = pci_get_subvendor(dev);
257 pci_subdevice_id = pci_get_subdevice(dev);
259 ent = ixv_vendor_info_array;
260 while (ent->vendor_id != 0) {
261 if ((pci_vendor_id == ent->vendor_id) &&
262 (pci_device_id == ent->device_id) &&
264 ((pci_subvendor_id == ent->subvendor_id) ||
265 (ent->subvendor_id == 0)) &&
267 ((pci_subdevice_id == ent->subdevice_id) ||
268 (ent->subdevice_id == 0))) {
269 sprintf(adapter_name, "%s, Version - %s",
270 ixv_strings[ent->index],
272 device_set_desc_copy(dev, adapter_name);
273 return (BUS_PROBE_DEFAULT);
280 /*********************************************************************
281 * Device initialization routine
283 * The attach entry point is called when the driver is being loaded.
284 * This routine identifies the type of hardware, allocates all resources
285 * and initializes the hardware.
287 * return 0 on success, positive on failure
288 *********************************************************************/
291 ixv_attach(device_t dev)
293 struct adapter *adapter;
297 INIT_DEBUGOUT("ixv_attach: begin");
299 /* Allocate, clear, and link in our adapter structure */
300 adapter = device_get_softc(dev);
301 adapter->dev = adapter->osdep.dev = dev;
305 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
308 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
309 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
310 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
311 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
313 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
314 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
315 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
316 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
318 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
319 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
320 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
321 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
323 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
324 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
325 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
326 &ixv_enable_aim, 1, "Interrupt Moderation");
328 /* Set up the timer callout */
329 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
331 /* Determine hardware revision */
332 ixv_identify_hardware(adapter);
334 /* Do base PCI setup - map BAR0 */
335 if (ixv_allocate_pci_resources(adapter)) {
336 device_printf(dev, "Allocation of PCI resources failed\n");
341 /* Do descriptor calc and sanity checks */
342 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
343 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
344 device_printf(dev, "TXD config issue, using default!\n");
345 adapter->num_tx_desc = DEFAULT_TXD;
347 adapter->num_tx_desc = ixv_txd;
349 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
351 device_printf(dev, "RXD config issue, using default!\n");
352 adapter->num_rx_desc = DEFAULT_RXD;
354 adapter->num_rx_desc = ixv_rxd;
356 /* Allocate our TX/RX Queues */
357 if (ixv_allocate_queues(adapter)) {
363 ** Initialize the shared code: its
364 ** at this point the mac type is set.
366 error = ixgbe_init_shared_code(hw);
368 device_printf(dev,"Shared Code Initialization Failure\n");
373 /* Setup the mailbox */
374 ixgbe_init_mbx_params_vf(hw);
378 /* Get Hardware Flow Control setting */
379 hw->fc.requested_mode = ixgbe_fc_full;
380 hw->fc.pause_time = IXV_FC_PAUSE;
381 hw->fc.low_water[0] = IXV_FC_LO;
382 hw->fc.high_water[0] = IXV_FC_HI;
383 hw->fc.send_xon = TRUE;
385 error = ixgbe_init_hw(hw);
387 device_printf(dev,"Hardware Initialization Failure\n");
392 error = ixv_allocate_msix(adapter);
396 /* Setup OS specific network interface */
397 ixv_setup_interface(dev, adapter);
399 /* Sysctl for limiting the amount of work done in the taskqueue */
400 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
401 "max number of rx packets to process", &adapter->rx_process_limit,
402 ixv_rx_process_limit);
404 /* Do the stats setup */
405 ixv_save_stats(adapter);
406 ixv_init_stats(adapter);
408 /* Register for VLAN events */
409 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
410 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
411 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
412 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
414 INIT_DEBUGOUT("ixv_attach: end");
418 ixv_free_transmit_structures(adapter);
419 ixv_free_receive_structures(adapter);
421 ixv_free_pci_resources(adapter);
426 /*********************************************************************
427 * Device removal routine
429 * The detach entry point is called when the driver is being removed.
430 * This routine stops the adapter and deallocates all the resources
431 * that were allocated for driver operation.
433 * return 0 on success, positive on failure
434 *********************************************************************/
437 ixv_detach(device_t dev)
439 struct adapter *adapter = device_get_softc(dev);
440 struct ix_queue *que = adapter->queues;
442 INIT_DEBUGOUT("ixv_detach: begin");
444 /* Make sure VLANS are not using driver */
445 if (adapter->ifp->if_vlantrunk != NULL) {
446 device_printf(dev,"Vlan in use, detach first\n");
450 IXV_CORE_LOCK(adapter);
452 IXV_CORE_UNLOCK(adapter);
454 for (int i = 0; i < adapter->num_queues; i++, que++) {
456 taskqueue_drain(que->tq, &que->que_task);
457 taskqueue_free(que->tq);
461 /* Drain the Link queue */
463 taskqueue_drain(adapter->tq, &adapter->mbx_task);
464 taskqueue_free(adapter->tq);
467 /* Unregister VLAN events */
468 if (adapter->vlan_attach != NULL)
469 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
470 if (adapter->vlan_detach != NULL)
471 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
473 ether_ifdetach(adapter->ifp);
474 callout_drain(&adapter->timer);
475 ixv_free_pci_resources(adapter);
476 bus_generic_detach(dev);
477 if_free(adapter->ifp);
479 ixv_free_transmit_structures(adapter);
480 ixv_free_receive_structures(adapter);
482 IXV_CORE_LOCK_DESTROY(adapter);
486 /*********************************************************************
488 * Shutdown entry point
490 **********************************************************************/
492 ixv_shutdown(device_t dev)
494 struct adapter *adapter = device_get_softc(dev);
495 IXV_CORE_LOCK(adapter);
497 IXV_CORE_UNLOCK(adapter);
501 #if __FreeBSD_version < 800000
502 /*********************************************************************
503 * Transmit entry point
505 * ixv_start is called by the stack to initiate a transmit.
506 * The driver will remain in this routine as long as there are
507 * packets to transmit and transmit resources are available.
508 * In case resources are not available stack is notified and
509 * the packet is requeued.
510 **********************************************************************/
512 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
515 struct adapter *adapter = txr->adapter;
517 IXV_TX_LOCK_ASSERT(txr);
519 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
522 if (!adapter->link_active)
525 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
527 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
531 if (ixv_xmit(txr, &m_head)) {
534 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
535 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
538 /* Send a copy of the frame to the BPF listener */
539 ETHER_BPF_MTAP(ifp, m_head);
541 /* Set watchdog on */
542 txr->watchdog_check = TRUE;
543 txr->watchdog_time = ticks;
550 * Legacy TX start - called by the stack, this
551 * always uses the first tx ring, and should
552 * not be used with multiqueue tx enabled.
555 ixv_start(struct ifnet *ifp)
557 struct adapter *adapter = ifp->if_softc;
558 struct tx_ring *txr = adapter->tx_rings;
560 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
562 ixv_start_locked(txr, ifp);
571 ** Multiqueue Transmit driver
575 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
577 struct adapter *adapter = ifp->if_softc;
578 struct ix_queue *que;
582 /* Which queue to use */
583 if ((m->m_flags & M_FLOWID) != 0)
584 i = m->m_pkthdr.flowid % adapter->num_queues;
586 txr = &adapter->tx_rings[i];
587 que = &adapter->queues[i];
589 if (IXV_TX_TRYLOCK(txr)) {
590 err = ixv_mq_start_locked(ifp, txr, m);
593 err = drbr_enqueue(ifp, txr->br, m);
594 taskqueue_enqueue(que->tq, &que->que_task);
601 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
603 struct adapter *adapter = txr->adapter;
605 int enqueued, err = 0;
607 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
608 IFF_DRV_RUNNING || adapter->link_active == 0) {
610 err = drbr_enqueue(ifp, txr->br, m);
614 /* Do a clean if descriptors are low */
615 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
620 err = drbr_enqueue(ifp, txr->br, m);
625 /* Process the queue */
626 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
627 if ((err = ixv_xmit(txr, &next)) != 0) {
629 drbr_advance(ifp, txr->br);
631 drbr_putback(ifp, txr->br, next);
635 drbr_advance(ifp, txr->br);
637 if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
638 if (next->m_flags & M_MCAST)
639 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
640 /* Send a copy of the frame to the BPF listener */
641 ETHER_BPF_MTAP(ifp, next);
642 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
644 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
645 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
651 /* Set watchdog on */
652 txr->watchdog_check = TRUE;
653 txr->watchdog_time = ticks;
660 ** Flush all ring buffers
663 ixv_qflush(struct ifnet *ifp)
665 struct adapter *adapter = ifp->if_softc;
666 struct tx_ring *txr = adapter->tx_rings;
669 for (int i = 0; i < adapter->num_queues; i++, txr++) {
671 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
680 /*********************************************************************
683 * ixv_ioctl is called when the user wants to configure the
686 * return 0 on success, positive on failure
687 **********************************************************************/
690 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
692 struct adapter *adapter = ifp->if_softc;
693 struct ifreq *ifr = (struct ifreq *) data;
694 #if defined(INET) || defined(INET6)
695 struct ifaddr *ifa = (struct ifaddr *) data;
696 bool avoid_reset = FALSE;
704 if (ifa->ifa_addr->sa_family == AF_INET)
708 if (ifa->ifa_addr->sa_family == AF_INET6)
711 #if defined(INET) || defined(INET6)
713 ** Calling init results in link renegotiation,
714 ** so we avoid doing it when possible.
717 ifp->if_flags |= IFF_UP;
718 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
720 if (!(ifp->if_flags & IFF_NOARP))
721 arp_ifinit(ifp, ifa);
723 error = ether_ioctl(ifp, command, data);
727 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
728 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
731 IXV_CORE_LOCK(adapter);
732 ifp->if_mtu = ifr->ifr_mtu;
733 adapter->max_frame_size =
734 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
735 ixv_init_locked(adapter);
736 IXV_CORE_UNLOCK(adapter);
740 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
741 IXV_CORE_LOCK(adapter);
742 if (ifp->if_flags & IFF_UP) {
743 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
744 ixv_init_locked(adapter);
746 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
748 adapter->if_flags = ifp->if_flags;
749 IXV_CORE_UNLOCK(adapter);
753 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
754 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
755 IXV_CORE_LOCK(adapter);
756 ixv_disable_intr(adapter);
757 ixv_set_multi(adapter);
758 ixv_enable_intr(adapter);
759 IXV_CORE_UNLOCK(adapter);
764 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
765 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
769 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
770 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
771 if (mask & IFCAP_HWCSUM)
772 ifp->if_capenable ^= IFCAP_HWCSUM;
773 if (mask & IFCAP_TSO4)
774 ifp->if_capenable ^= IFCAP_TSO4;
775 if (mask & IFCAP_LRO)
776 ifp->if_capenable ^= IFCAP_LRO;
777 if (mask & IFCAP_VLAN_HWTAGGING)
778 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
779 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
780 IXV_CORE_LOCK(adapter);
781 ixv_init_locked(adapter);
782 IXV_CORE_UNLOCK(adapter);
784 VLAN_CAPABILITIES(ifp);
789 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
790 error = ether_ioctl(ifp, command, data);
797 /*********************************************************************
800 * This routine is used in two ways. It is used by the stack as
801 * init entry point in network interface structure. It is also used
802 * by the driver as a hw/sw initialization routine to get to a
805 * return 0 on success, positive on failure
806 **********************************************************************/
807 #define IXGBE_MHADD_MFS_SHIFT 16
810 ixv_init_locked(struct adapter *adapter)
812 struct ifnet *ifp = adapter->ifp;
813 device_t dev = adapter->dev;
814 struct ixgbe_hw *hw = &adapter->hw;
817 INIT_DEBUGOUT("ixv_init: begin");
818 mtx_assert(&adapter->core_mtx, MA_OWNED);
819 hw->adapter_stopped = FALSE;
820 ixgbe_stop_adapter(hw);
821 callout_stop(&adapter->timer);
823 /* reprogram the RAR[0] in case user changed it. */
824 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
826 /* Get the latest mac address, User can use a LAA */
827 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
828 IXGBE_ETH_LENGTH_OF_ADDRESS);
829 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
830 hw->addr_ctrl.rar_used_count = 1;
832 /* Prepare transmit descriptors and buffers */
833 if (ixv_setup_transmit_structures(adapter)) {
834 device_printf(dev,"Could not setup transmit structures\n");
840 ixv_initialize_transmit_units(adapter);
842 /* Setup Multicast table */
843 ixv_set_multi(adapter);
846 ** Determine the correct mbuf pool
847 ** for doing jumbo/headersplit
849 if (ifp->if_mtu > ETHERMTU)
850 adapter->rx_mbuf_sz = MJUMPAGESIZE;
852 adapter->rx_mbuf_sz = MCLBYTES;
854 /* Prepare receive descriptors and buffers */
855 if (ixv_setup_receive_structures(adapter)) {
856 device_printf(dev,"Could not setup receive structures\n");
861 /* Configure RX settings */
862 ixv_initialize_receive_units(adapter);
864 /* Enable Enhanced MSIX mode */
865 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
866 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
867 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
868 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
870 /* Set the various hardware offload abilities */
871 ifp->if_hwassist = 0;
872 if (ifp->if_capenable & IFCAP_TSO4)
873 ifp->if_hwassist |= CSUM_TSO;
874 if (ifp->if_capenable & IFCAP_TXCSUM) {
875 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
876 #if __FreeBSD_version >= 800000
877 ifp->if_hwassist |= CSUM_SCTP;
882 if (ifp->if_mtu > ETHERMTU) {
883 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
884 mhadd &= ~IXGBE_MHADD_MFS_MASK;
885 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
886 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
889 /* Set up VLAN offload and filter */
890 ixv_setup_vlan_support(adapter);
892 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
894 /* Set up MSI/X routing */
895 ixv_configure_ivars(adapter);
897 /* Set up auto-mask */
898 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
900 /* Set moderation on the Link interrupt */
901 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
904 ixv_init_stats(adapter);
906 /* Config/Enable Link */
907 ixv_config_link(adapter);
909 /* And now turn on interrupts */
910 ixv_enable_intr(adapter);
912 /* Now inform the stack we're ready */
913 ifp->if_drv_flags |= IFF_DRV_RUNNING;
914 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
922 struct adapter *adapter = arg;
924 IXV_CORE_LOCK(adapter);
925 ixv_init_locked(adapter);
926 IXV_CORE_UNLOCK(adapter);
933 ** MSIX Interrupt Handlers and Tasklets
938 ixv_enable_queue(struct adapter *adapter, u32 vector)
940 struct ixgbe_hw *hw = &adapter->hw;
941 u32 queue = 1 << vector;
944 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
945 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
949 ixv_disable_queue(struct adapter *adapter, u32 vector)
951 struct ixgbe_hw *hw = &adapter->hw;
952 u64 queue = (u64)(1 << vector);
955 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
956 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
960 ixv_rearm_queues(struct adapter *adapter, u64 queues)
962 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
963 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
968 ixv_handle_que(void *context, int pending)
970 struct ix_queue *que = context;
971 struct adapter *adapter = que->adapter;
972 struct tx_ring *txr = que->txr;
973 struct ifnet *ifp = adapter->ifp;
976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977 more = ixv_rxeof(que, adapter->rx_process_limit);
980 #if __FreeBSD_version >= 800000
981 if (!drbr_empty(ifp, txr->br))
982 ixv_mq_start_locked(ifp, txr, NULL);
984 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
985 ixv_start_locked(txr, ifp);
989 taskqueue_enqueue(que->tq, &que->que_task);
994 /* Reenable this interrupt */
995 ixv_enable_queue(adapter, que->msix);
999 /*********************************************************************
1001 * MSI Queue Interrupt Service routine
1003 **********************************************************************/
1005 ixv_msix_que(void *arg)
1007 struct ix_queue *que = arg;
1008 struct adapter *adapter = que->adapter;
1009 struct tx_ring *txr = que->txr;
1010 struct rx_ring *rxr = que->rxr;
1011 bool more_tx, more_rx;
1014 ixv_disable_queue(adapter, que->msix);
1017 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1020 more_tx = ixv_txeof(txr);
1022 ** Make certain that if the stack
1023 ** has anything queued the task gets
1024 ** scheduled to handle it.
1026 #if __FreeBSD_version < 800000
1027 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1029 if (!drbr_empty(adapter->ifp, txr->br))
1034 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1038 if (ixv_enable_aim == FALSE)
1041 ** Do Adaptive Interrupt Moderation:
1042 ** - Write out last calculated setting
1043 ** - Calculate based on average size over
1044 ** the last interval.
1046 if (que->eitr_setting)
1047 IXGBE_WRITE_REG(&adapter->hw,
1048 IXGBE_VTEITR(que->msix),
1051 que->eitr_setting = 0;
1053 /* Idle, do nothing */
1054 if ((txr->bytes == 0) && (rxr->bytes == 0))
1057 if ((txr->bytes) && (txr->packets))
1058 newitr = txr->bytes/txr->packets;
1059 if ((rxr->bytes) && (rxr->packets))
1060 newitr = max(newitr,
1061 (rxr->bytes / rxr->packets));
1062 newitr += 24; /* account for hardware frame, crc */
1064 /* set an upper boundary */
1065 newitr = min(newitr, 3000);
1067 /* Be nice to the mid range */
1068 if ((newitr > 300) && (newitr < 1200))
1069 newitr = (newitr / 3);
1071 newitr = (newitr / 2);
1073 newitr |= newitr << 16;
1075 /* save for next interrupt */
1076 que->eitr_setting = newitr;
1085 if (more_tx || more_rx)
1086 taskqueue_enqueue(que->tq, &que->que_task);
1087 else /* Reenable this interrupt */
1088 ixv_enable_queue(adapter, que->msix);
1093 ixv_msix_mbx(void *arg)
1095 struct adapter *adapter = arg;
1096 struct ixgbe_hw *hw = &adapter->hw;
1101 /* First get the cause */
1102 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1103 /* Clear interrupt with write */
1104 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1106 /* Link status change */
1107 if (reg & IXGBE_EICR_LSC)
1108 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1110 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1114 /*********************************************************************
1116 * Media Ioctl callback
1118 * This routine is called whenever the user queries the status of
1119 * the interface using ifconfig.
1121 **********************************************************************/
1123 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1125 struct adapter *adapter = ifp->if_softc;
1127 INIT_DEBUGOUT("ixv_media_status: begin");
1128 IXV_CORE_LOCK(adapter);
1129 ixv_update_link_status(adapter);
1131 ifmr->ifm_status = IFM_AVALID;
1132 ifmr->ifm_active = IFM_ETHER;
1134 if (!adapter->link_active) {
1135 IXV_CORE_UNLOCK(adapter);
1139 ifmr->ifm_status |= IFM_ACTIVE;
1141 switch (adapter->link_speed) {
1142 case IXGBE_LINK_SPEED_1GB_FULL:
1143 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1145 case IXGBE_LINK_SPEED_10GB_FULL:
1146 ifmr->ifm_active |= IFM_FDX;
1150 IXV_CORE_UNLOCK(adapter);
1155 /*********************************************************************
1157 * Media Ioctl callback
1159 * This routine is called when the user changes speed/duplex using
1160 * media/mediopt option with ifconfig.
1162 **********************************************************************/
1164 ixv_media_change(struct ifnet * ifp)
1166 struct adapter *adapter = ifp->if_softc;
1167 struct ifmedia *ifm = &adapter->media;
1169 INIT_DEBUGOUT("ixv_media_change: begin");
1171 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1174 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1178 device_printf(adapter->dev, "Only auto media type\n");
1185 /*********************************************************************
1187 * This routine maps the mbufs to tx descriptors, allowing the
1188 * TX engine to transmit the packets.
1189 * - return 0 on success, positive on failure
1191 **********************************************************************/
1194 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1196 struct adapter *adapter = txr->adapter;
1197 u32 olinfo_status = 0, cmd_type_len;
1199 int i, j, error, nsegs;
1200 int first, last = 0;
1201 struct mbuf *m_head;
1202 bus_dma_segment_t segs[32];
1204 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1205 union ixgbe_adv_tx_desc *txd = NULL;
1209 /* Basic descriptor defines */
1210 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1211 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1213 if (m_head->m_flags & M_VLANTAG)
1214 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1217 * Important to capture the first descriptor
1218 * used because it will contain the index of
1219 * the one we tell the hardware to report back
1221 first = txr->next_avail_desc;
1222 txbuf = &txr->tx_buffers[first];
1223 txbuf_mapped = txbuf;
1227 * Map the packet for DMA.
1229 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1230 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1232 if (error == EFBIG) {
1235 m = m_defrag(*m_headp, M_NOWAIT);
1237 adapter->mbuf_defrag_failed++;
1245 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1246 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1248 if (error == ENOMEM) {
1249 adapter->no_tx_dma_setup++;
1251 } else if (error != 0) {
1252 adapter->no_tx_dma_setup++;
1257 } else if (error == ENOMEM) {
1258 adapter->no_tx_dma_setup++;
1260 } else if (error != 0) {
1261 adapter->no_tx_dma_setup++;
1267 /* Make certain there are enough descriptors */
1268 if (nsegs > txr->tx_avail - 2) {
1269 txr->no_desc_avail++;
1276 ** Set up the appropriate offload context
1277 ** this becomes the first descriptor of
1280 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1281 if (ixv_tso_setup(txr, m_head, &paylen)) {
1282 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1283 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1284 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1285 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1289 } else if (ixv_tx_ctx_setup(txr, m_head))
1290 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1292 /* Record payload length */
1294 olinfo_status |= m_head->m_pkthdr.len <<
1295 IXGBE_ADVTXD_PAYLEN_SHIFT;
1297 i = txr->next_avail_desc;
1298 for (j = 0; j < nsegs; j++) {
1302 txbuf = &txr->tx_buffers[i];
1303 txd = &txr->tx_base[i];
1304 seglen = segs[j].ds_len;
1305 segaddr = htole64(segs[j].ds_addr);
1307 txd->read.buffer_addr = segaddr;
1308 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1309 cmd_type_len |seglen);
1310 txd->read.olinfo_status = htole32(olinfo_status);
1311 last = i; /* descriptor that will get completion IRQ */
1313 if (++i == adapter->num_tx_desc)
1316 txbuf->m_head = NULL;
1317 txbuf->eop_index = -1;
1320 txd->read.cmd_type_len |=
1321 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1322 txr->tx_avail -= nsegs;
1323 txr->next_avail_desc = i;
1325 txbuf->m_head = m_head;
1326 txr->tx_buffers[first].map = txbuf->map;
1328 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1330 /* Set the index of the descriptor that will be marked done */
1331 txbuf = &txr->tx_buffers[first];
1332 txbuf->eop_index = last;
1334 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1335 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1337 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1338 * hardware that this frame is available to transmit.
1340 ++txr->total_packets;
1341 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1346 bus_dmamap_unload(txr->txtag, txbuf->map);
1352 /*********************************************************************
1355 * This routine is called whenever multicast address list is updated.
1357 **********************************************************************/
1358 #define IXGBE_RAR_ENTRIES 16
1361 ixv_set_multi(struct adapter *adapter)
1363 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1365 struct ifmultiaddr *ifma;
1367 struct ifnet *ifp = adapter->ifp;
1369 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1371 #if __FreeBSD_version < 800000
1374 if_maddr_rlock(ifp);
1376 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1377 if (ifma->ifma_addr->sa_family != AF_LINK)
1379 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1380 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1381 IXGBE_ETH_LENGTH_OF_ADDRESS);
1384 #if __FreeBSD_version < 800000
1385 IF_ADDR_UNLOCK(ifp);
1387 if_maddr_runlock(ifp);
1392 ixgbe_update_mc_addr_list(&adapter->hw,
1393 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1399 * This is an iterator function now needed by the multicast
1400 * shared code. It simply feeds the shared code routine the
1401 * addresses in the array of ixv_set_multi() one by one.
1404 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1406 u8 *addr = *update_ptr;
1410 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1411 *update_ptr = newptr;
1415 /*********************************************************************
1418 * This routine checks for link status,updates statistics,
1419 * and runs the watchdog check.
1421 **********************************************************************/
1424 ixv_local_timer(void *arg)
1426 struct adapter *adapter = arg;
1427 device_t dev = adapter->dev;
1428 struct tx_ring *txr = adapter->tx_rings;
1431 mtx_assert(&adapter->core_mtx, MA_OWNED);
1433 ixv_update_link_status(adapter);
1436 ixv_update_stats(adapter);
1439 * If the interface has been paused
1440 * then don't do the watchdog check
1442 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1445 ** Check for time since any descriptor was cleaned
1447 for (i = 0; i < adapter->num_queues; i++, txr++) {
1449 if (txr->watchdog_check == FALSE) {
1453 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1458 ixv_rearm_queues(adapter, adapter->que_mask);
1459 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1463 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1464 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1465 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1466 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1467 device_printf(dev,"TX(%d) desc avail = %d,"
1468 "Next TX to Clean = %d\n",
1469 txr->me, txr->tx_avail, txr->next_to_clean);
1470 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1471 adapter->watchdog_events++;
1473 ixv_init_locked(adapter);
1477 ** Note: this routine updates the OS on the link state
1478 ** the real check of the hardware only happens with
1479 ** a link interrupt.
1482 ixv_update_link_status(struct adapter *adapter)
1484 struct ifnet *ifp = adapter->ifp;
1485 struct tx_ring *txr = adapter->tx_rings;
1486 device_t dev = adapter->dev;
1489 if (adapter->link_up){
1490 if (adapter->link_active == FALSE) {
1492 device_printf(dev,"Link is up %d Gbps %s \n",
1493 ((adapter->link_speed == 128)? 10:1),
1495 adapter->link_active = TRUE;
1496 if_link_state_change(ifp, LINK_STATE_UP);
1498 } else { /* Link down */
1499 if (adapter->link_active == TRUE) {
1501 device_printf(dev,"Link is Down\n");
1502 if_link_state_change(ifp, LINK_STATE_DOWN);
1503 adapter->link_active = FALSE;
1504 for (int i = 0; i < adapter->num_queues;
1506 txr->watchdog_check = FALSE;
1514 /*********************************************************************
1516 * This routine disables all traffic on the adapter by issuing a
1517 * global reset on the MAC and deallocates TX/RX buffers.
1519 **********************************************************************/
1525 struct adapter *adapter = arg;
1526 struct ixgbe_hw *hw = &adapter->hw;
1529 mtx_assert(&adapter->core_mtx, MA_OWNED);
1531 INIT_DEBUGOUT("ixv_stop: begin\n");
1532 ixv_disable_intr(adapter);
1534 /* Tell the stack that the interface is no longer active */
1535 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1538 adapter->hw.adapter_stopped = FALSE;
1539 ixgbe_stop_adapter(hw);
1540 callout_stop(&adapter->timer);
1542 /* reprogram the RAR[0] in case user changed it. */
1543 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1549 /*********************************************************************
1551 * Determine hardware revision.
1553 **********************************************************************/
1555 ixv_identify_hardware(struct adapter *adapter)
1557 device_t dev = adapter->dev;
1561 ** Make sure BUSMASTER is set, on a VM under
1562 ** KVM it may not be and will break things.
1564 pci_enable_busmaster(dev);
1565 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1567 /* Save off the information about this board */
1568 adapter->hw.vendor_id = pci_get_vendor(dev);
1569 adapter->hw.device_id = pci_get_device(dev);
1570 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1571 adapter->hw.subsystem_vendor_id =
1572 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1573 adapter->hw.subsystem_device_id =
1574 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1579 /*********************************************************************
1581 * Setup MSIX Interrupt resources and handlers
1583 **********************************************************************/
1585 ixv_allocate_msix(struct adapter *adapter)
1587 device_t dev = adapter->dev;
1588 struct ix_queue *que = adapter->queues;
1589 int error, rid, vector = 0;
1591 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1593 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1594 RF_SHAREABLE | RF_ACTIVE);
1595 if (que->res == NULL) {
1596 device_printf(dev,"Unable to allocate"
1597 " bus resource: que interrupt [%d]\n", vector);
1600 /* Set the handler function */
1601 error = bus_setup_intr(dev, que->res,
1602 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1603 ixv_msix_que, que, &que->tag);
1606 device_printf(dev, "Failed to register QUE handler");
1609 #if __FreeBSD_version >= 800504
1610 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1613 adapter->que_mask |= (u64)(1 << que->msix);
1615 ** Bind the msix vector, and thus the
1616 ** ring to the corresponding cpu.
1618 if (adapter->num_queues > 1)
1619 bus_bind_intr(dev, que->res, i);
1621 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1622 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1623 taskqueue_thread_enqueue, &que->tq);
1624 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1625 device_get_nameunit(adapter->dev));
1630 adapter->res = bus_alloc_resource_any(dev,
1631 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1632 if (!adapter->res) {
1633 device_printf(dev,"Unable to allocate"
1634 " bus resource: MBX interrupt [%d]\n", rid);
1637 /* Set the mbx handler function */
1638 error = bus_setup_intr(dev, adapter->res,
1639 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1640 ixv_msix_mbx, adapter, &adapter->tag);
1642 adapter->res = NULL;
1643 device_printf(dev, "Failed to register LINK handler");
1646 #if __FreeBSD_version >= 800504
1647 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1649 adapter->mbxvec = vector;
1650 /* Tasklets for Mailbox */
1651 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1652 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1653 taskqueue_thread_enqueue, &adapter->tq);
1654 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1655 device_get_nameunit(adapter->dev));
1657 ** Due to a broken design QEMU will fail to properly
1658 ** enable the guest for MSIX unless the vectors in
1659 ** the table are all set up, so we must rewrite the
1660 ** ENABLE in the MSIX control register again at this
1661 ** point to cause it to successfully initialize us.
1663 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1665 pci_find_cap(dev, PCIY_MSIX, &rid);
1666 rid += PCIR_MSIX_CTRL;
1667 msix_ctrl = pci_read_config(dev, rid, 2);
1668 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1669 pci_write_config(dev, rid, msix_ctrl, 2);
1676 * Setup MSIX resources, note that the VF
1677 * device MUST use MSIX, there is no fallback.
1680 ixv_setup_msix(struct adapter *adapter)
1682 device_t dev = adapter->dev;
1686 /* First try MSI/X */
1688 adapter->msix_mem = bus_alloc_resource_any(dev,
1689 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1690 if (adapter->msix_mem == NULL) {
1691 device_printf(adapter->dev,
1692 "Unable to map MSIX table \n");
1697 ** Want two vectors: one for a queue,
1698 ** plus an additional for mailbox.
1701 if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
1702 device_printf(adapter->dev,
1703 "Using MSIX interrupts with %d vectors\n", want);
1706 /* Release in case alloc was insufficient */
1707 pci_release_msi(dev);
1709 if (adapter->msix_mem != NULL) {
1710 bus_release_resource(dev, SYS_RES_MEMORY,
1711 rid, adapter->msix_mem);
1712 adapter->msix_mem = NULL;
1714 device_printf(adapter->dev,"MSIX config error\n");
1720 ixv_allocate_pci_resources(struct adapter *adapter)
1723 device_t dev = adapter->dev;
1726 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1729 if (!(adapter->pci_mem)) {
1730 device_printf(dev,"Unable to allocate bus resource: memory\n");
1734 adapter->osdep.mem_bus_space_tag =
1735 rman_get_bustag(adapter->pci_mem);
1736 adapter->osdep.mem_bus_space_handle =
1737 rman_get_bushandle(adapter->pci_mem);
1738 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1740 adapter->num_queues = 1;
1741 adapter->hw.back = &adapter->osdep;
1744 ** Now setup MSI/X, should
1745 ** return us the number of
1746 ** configured vectors.
1748 adapter->msix = ixv_setup_msix(adapter);
1749 if (adapter->msix == ENXIO)
1756 ixv_free_pci_resources(struct adapter * adapter)
1758 struct ix_queue *que = adapter->queues;
1759 device_t dev = adapter->dev;
1762 memrid = PCIR_BAR(MSIX_BAR);
1765 ** There is a slight possibility of a failure mode
1766 ** in attach that will result in entering this function
1767 ** before interrupt resources have been initialized, and
1768 ** in that case we do not want to execute the loops below
1769 ** We can detect this reliably by the state of the adapter
1772 if (adapter->res == NULL)
1776 ** Release all msix queue resources:
1778 for (int i = 0; i < adapter->num_queues; i++, que++) {
1779 rid = que->msix + 1;
1780 if (que->tag != NULL) {
1781 bus_teardown_intr(dev, que->res, que->tag);
1784 if (que->res != NULL)
1785 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1789 /* Clean the Legacy or Link interrupt last */
1790 if (adapter->mbxvec) /* we are doing MSIX */
1791 rid = adapter->mbxvec + 1;
1793 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1795 if (adapter->tag != NULL) {
1796 bus_teardown_intr(dev, adapter->res, adapter->tag);
1797 adapter->tag = NULL;
1799 if (adapter->res != NULL)
1800 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1804 pci_release_msi(dev);
1806 if (adapter->msix_mem != NULL)
1807 bus_release_resource(dev, SYS_RES_MEMORY,
1808 memrid, adapter->msix_mem);
1810 if (adapter->pci_mem != NULL)
1811 bus_release_resource(dev, SYS_RES_MEMORY,
1812 PCIR_BAR(0), adapter->pci_mem);
1817 /*********************************************************************
1819 * Setup networking device structure and register an interface.
1821 **********************************************************************/
1823 ixv_setup_interface(device_t dev, struct adapter *adapter)
1827 INIT_DEBUGOUT("ixv_setup_interface: begin");
1829 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1831 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1832 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1833 ifp->if_baudrate = 1000000000;
1834 ifp->if_init = ixv_init;
1835 ifp->if_softc = adapter;
1836 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1837 ifp->if_ioctl = ixv_ioctl;
1838 #if __FreeBSD_version >= 800000
1839 ifp->if_transmit = ixv_mq_start;
1840 ifp->if_qflush = ixv_qflush;
1842 ifp->if_start = ixv_start;
1844 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1846 ether_ifattach(ifp, adapter->hw.mac.addr);
1848 adapter->max_frame_size =
1849 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1852 * Tell the upper layer(s) we support long frames.
1854 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1856 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1857 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1858 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1861 ifp->if_capenable = ifp->if_capabilities;
1863 /* Don't enable LRO by default */
1864 ifp->if_capabilities |= IFCAP_LRO;
1867 * Specify the media types supported by this adapter and register
1868 * callbacks to update media and link information
1870 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1872 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1873 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1874 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1880 ixv_config_link(struct adapter *adapter)
1882 struct ixgbe_hw *hw = &adapter->hw;
1883 u32 autoneg, err = 0;
1885 if (hw->mac.ops.check_link)
1886 err = hw->mac.ops.check_link(hw, &autoneg,
1887 &adapter->link_up, FALSE);
1891 if (hw->mac.ops.setup_link)
1892 err = hw->mac.ops.setup_link(hw,
1893 autoneg, adapter->link_up);
1898 /********************************************************************
1899 * Manage DMA'able memory.
1900 *******************************************************************/
1902 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1906 *(bus_addr_t *) arg = segs->ds_addr;
1911 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1912 struct ixv_dma_alloc *dma, int mapflags)
1914 device_t dev = adapter->dev;
1917 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1918 DBA_ALIGN, 0, /* alignment, bounds */
1919 BUS_SPACE_MAXADDR, /* lowaddr */
1920 BUS_SPACE_MAXADDR, /* highaddr */
1921 NULL, NULL, /* filter, filterarg */
1924 size, /* maxsegsize */
1925 BUS_DMA_ALLOCNOW, /* flags */
1926 NULL, /* lockfunc */
1927 NULL, /* lockfuncarg */
1930 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1934 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1935 BUS_DMA_NOWAIT, &dma->dma_map);
1937 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1941 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1945 mapflags | BUS_DMA_NOWAIT);
1947 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1951 dma->dma_size = size;
1954 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1956 bus_dma_tag_destroy(dma->dma_tag);
1958 dma->dma_tag = NULL;
1963 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1965 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1966 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1967 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1968 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1969 bus_dma_tag_destroy(dma->dma_tag);
1973 /*********************************************************************
1975 * Allocate memory for the transmit and receive rings, and then
1976 * the descriptors associated with each, called only once at attach.
1978 **********************************************************************/
1980 ixv_allocate_queues(struct adapter *adapter)
1982 device_t dev = adapter->dev;
1983 struct ix_queue *que;
1984 struct tx_ring *txr;
1985 struct rx_ring *rxr;
1986 int rsize, tsize, error = 0;
1987 int txconf = 0, rxconf = 0;
1989 /* First allocate the top level queue structs */
1990 if (!(adapter->queues =
1991 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1992 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1993 device_printf(dev, "Unable to allocate queue memory\n");
1998 /* First allocate the TX ring struct memory */
1999 if (!(adapter->tx_rings =
2000 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2001 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2002 device_printf(dev, "Unable to allocate TX ring memory\n");
2007 /* Next allocate the RX */
2008 if (!(adapter->rx_rings =
2009 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2010 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2011 device_printf(dev, "Unable to allocate RX ring memory\n");
2016 /* For the ring itself */
2017 tsize = roundup2(adapter->num_tx_desc *
2018 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2021 * Now set up the TX queues, txconf is needed to handle the
2022 * possibility that things fail midcourse and we need to
2023 * undo memory gracefully
2025 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2026 /* Set up some basics */
2027 txr = &adapter->tx_rings[i];
2028 txr->adapter = adapter;
2031 /* Initialize the TX side lock */
2032 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2033 device_get_nameunit(dev), txr->me);
2034 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2036 if (ixv_dma_malloc(adapter, tsize,
2037 &txr->txdma, BUS_DMA_NOWAIT)) {
2039 "Unable to allocate TX Descriptor memory\n");
2043 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2044 bzero((void *)txr->tx_base, tsize);
2046 /* Now allocate transmit buffers for the ring */
2047 if (ixv_allocate_transmit_buffers(txr)) {
2049 "Critical Failure setting up transmit buffers\n");
2053 #if __FreeBSD_version >= 800000
2054 /* Allocate a buf ring */
2055 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2056 M_WAITOK, &txr->tx_mtx);
2057 if (txr->br == NULL) {
2059 "Critical Failure setting up buf ring\n");
2067 * Next the RX queues...
2069 rsize = roundup2(adapter->num_rx_desc *
2070 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2071 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2072 rxr = &adapter->rx_rings[i];
2073 /* Set up some basics */
2074 rxr->adapter = adapter;
2077 /* Initialize the RX side lock */
2078 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2079 device_get_nameunit(dev), rxr->me);
2080 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2082 if (ixv_dma_malloc(adapter, rsize,
2083 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2085 "Unable to allocate RxDescriptor memory\n");
2089 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2090 bzero((void *)rxr->rx_base, rsize);
2092 /* Allocate receive buffers for the ring*/
2093 if (ixv_allocate_receive_buffers(rxr)) {
2095 "Critical Failure setting up receive buffers\n");
2102 ** Finally set up the queue holding structs
2104 for (int i = 0; i < adapter->num_queues; i++) {
2105 que = &adapter->queues[i];
2106 que->adapter = adapter;
2107 que->txr = &adapter->tx_rings[i];
2108 que->rxr = &adapter->rx_rings[i];
2114 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2115 ixv_dma_free(adapter, &rxr->rxdma);
2117 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2118 ixv_dma_free(adapter, &txr->txdma);
2119 free(adapter->rx_rings, M_DEVBUF);
2121 free(adapter->tx_rings, M_DEVBUF);
2123 free(adapter->queues, M_DEVBUF);
2129 /*********************************************************************
2131 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2132 * the information needed to transmit a packet on the wire. This is
2133 * called only once at attach, setup is done every reset.
2135 **********************************************************************/
2137 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2139 struct adapter *adapter = txr->adapter;
2140 device_t dev = adapter->dev;
2141 struct ixv_tx_buf *txbuf;
2145 * Setup DMA descriptor areas.
2147 if ((error = bus_dma_tag_create(
2148 bus_get_dma_tag(adapter->dev), /* parent */
2149 1, 0, /* alignment, bounds */
2150 BUS_SPACE_MAXADDR, /* lowaddr */
2151 BUS_SPACE_MAXADDR, /* highaddr */
2152 NULL, NULL, /* filter, filterarg */
2153 IXV_TSO_SIZE, /* maxsize */
2155 PAGE_SIZE, /* maxsegsize */
2157 NULL, /* lockfunc */
2158 NULL, /* lockfuncarg */
2160 device_printf(dev,"Unable to allocate TX DMA tag\n");
2164 if (!(txr->tx_buffers =
2165 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2166 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2167 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2172 /* Create the descriptor buffer dma maps */
2173 txbuf = txr->tx_buffers;
2174 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2175 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2177 device_printf(dev, "Unable to create TX DMA map\n");
2184 /* We free all, it handles case where we are in the middle */
2185 ixv_free_transmit_structures(adapter);
2189 /*********************************************************************
2191 * Initialize a transmit ring.
2193 **********************************************************************/
2195 ixv_setup_transmit_ring(struct tx_ring *txr)
2197 struct adapter *adapter = txr->adapter;
2198 struct ixv_tx_buf *txbuf;
2201 /* Clear the old ring contents */
2203 bzero((void *)txr->tx_base,
2204 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2206 txr->next_avail_desc = 0;
2207 txr->next_to_clean = 0;
2209 /* Free any existing tx buffers. */
2210 txbuf = txr->tx_buffers;
2211 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2212 if (txbuf->m_head != NULL) {
2213 bus_dmamap_sync(txr->txtag, txbuf->map,
2214 BUS_DMASYNC_POSTWRITE);
2215 bus_dmamap_unload(txr->txtag, txbuf->map);
2216 m_freem(txbuf->m_head);
2217 txbuf->m_head = NULL;
2219 /* Clear the EOP index */
2220 txbuf->eop_index = -1;
2223 /* Set number of descriptors available */
2224 txr->tx_avail = adapter->num_tx_desc;
2226 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2227 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2231 /*********************************************************************
2233 * Initialize all transmit rings.
2235 **********************************************************************/
2237 ixv_setup_transmit_structures(struct adapter *adapter)
2239 struct tx_ring *txr = adapter->tx_rings;
2241 for (int i = 0; i < adapter->num_queues; i++, txr++)
2242 ixv_setup_transmit_ring(txr);
2247 /*********************************************************************
2249 * Enable transmit unit.
2251 **********************************************************************/
2253 ixv_initialize_transmit_units(struct adapter *adapter)
2255 struct tx_ring *txr = adapter->tx_rings;
2256 struct ixgbe_hw *hw = &adapter->hw;
2259 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2260 u64 tdba = txr->txdma.dma_paddr;
2263 /* Set WTHRESH to 8, burst writeback */
2264 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2265 txdctl |= (8 << 16);
2266 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2268 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2269 txdctl |= IXGBE_TXDCTL_ENABLE;
2270 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2272 /* Set the HW Tx Head and Tail indices */
2273 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2274 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2276 /* Setup Transmit Descriptor Cmd Settings */
2277 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2278 txr->watchdog_check = FALSE;
2280 /* Set Ring parameters */
2281 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2282 (tdba & 0x00000000ffffffffULL));
2283 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2284 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2285 adapter->num_tx_desc *
2286 sizeof(struct ixgbe_legacy_tx_desc));
2287 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2288 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2289 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2296 /*********************************************************************
2298 * Free all transmit rings.
2300 **********************************************************************/
2302 ixv_free_transmit_structures(struct adapter *adapter)
2304 struct tx_ring *txr = adapter->tx_rings;
2306 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2308 ixv_free_transmit_buffers(txr);
2309 ixv_dma_free(adapter, &txr->txdma);
2311 IXV_TX_LOCK_DESTROY(txr);
2313 free(adapter->tx_rings, M_DEVBUF);
2316 /*********************************************************************
2318 * Free transmit ring related data structures.
2320 **********************************************************************/
2322 ixv_free_transmit_buffers(struct tx_ring *txr)
2324 struct adapter *adapter = txr->adapter;
2325 struct ixv_tx_buf *tx_buffer;
2328 INIT_DEBUGOUT("free_transmit_ring: begin");
2330 if (txr->tx_buffers == NULL)
2333 tx_buffer = txr->tx_buffers;
2334 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2335 if (tx_buffer->m_head != NULL) {
2336 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2337 BUS_DMASYNC_POSTWRITE);
2338 bus_dmamap_unload(txr->txtag,
2340 m_freem(tx_buffer->m_head);
2341 tx_buffer->m_head = NULL;
2342 if (tx_buffer->map != NULL) {
2343 bus_dmamap_destroy(txr->txtag,
2345 tx_buffer->map = NULL;
2347 } else if (tx_buffer->map != NULL) {
2348 bus_dmamap_unload(txr->txtag,
2350 bus_dmamap_destroy(txr->txtag,
2352 tx_buffer->map = NULL;
2355 #if __FreeBSD_version >= 800000
2356 if (txr->br != NULL)
2357 buf_ring_free(txr->br, M_DEVBUF);
2359 if (txr->tx_buffers != NULL) {
2360 free(txr->tx_buffers, M_DEVBUF);
2361 txr->tx_buffers = NULL;
2363 if (txr->txtag != NULL) {
2364 bus_dma_tag_destroy(txr->txtag);
2370 /*********************************************************************
2372 * Advanced Context Descriptor setup for VLAN or CSUM
2374 **********************************************************************/
2377 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2379 struct adapter *adapter = txr->adapter;
2380 struct ixgbe_adv_tx_context_desc *TXD;
2381 struct ixv_tx_buf *tx_buffer;
2382 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2383 struct ether_vlan_header *eh;
2385 struct ip6_hdr *ip6;
2386 int ehdrlen, ip_hlen = 0;
2389 bool offload = TRUE;
2390 int ctxd = txr->next_avail_desc;
2394 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2398 tx_buffer = &txr->tx_buffers[ctxd];
2399 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2402 ** In advanced descriptors the vlan tag must
2403 ** be placed into the descriptor itself.
2405 if (mp->m_flags & M_VLANTAG) {
2406 vtag = htole16(mp->m_pkthdr.ether_vtag);
2407 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2408 } else if (offload == FALSE)
2412 * Determine where frame payload starts.
2413 * Jump over vlan headers if already present,
2414 * helpful for QinQ too.
2416 eh = mtod(mp, struct ether_vlan_header *);
2417 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2418 etype = ntohs(eh->evl_proto);
2419 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2421 etype = ntohs(eh->evl_encap_proto);
2422 ehdrlen = ETHER_HDR_LEN;
2425 /* Set the ether header length */
2426 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2430 ip = (struct ip *)(mp->m_data + ehdrlen);
2431 ip_hlen = ip->ip_hl << 2;
2432 if (mp->m_len < ehdrlen + ip_hlen)
2435 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2437 case ETHERTYPE_IPV6:
2438 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2439 ip_hlen = sizeof(struct ip6_hdr);
2440 if (mp->m_len < ehdrlen + ip_hlen)
2442 ipproto = ip6->ip6_nxt;
2443 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2450 vlan_macip_lens |= ip_hlen;
2451 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2455 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2456 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2460 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2461 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2464 #if __FreeBSD_version >= 800000
2466 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2467 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2475 /* Now copy bits into descriptor */
2476 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2477 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2478 TXD->seqnum_seed = htole32(0);
2479 TXD->mss_l4len_idx = htole32(0);
2481 tx_buffer->m_head = NULL;
2482 tx_buffer->eop_index = -1;
2484 /* We've consumed the first desc, adjust counters */
2485 if (++ctxd == adapter->num_tx_desc)
2487 txr->next_avail_desc = ctxd;
2493 /**********************************************************************
2495 * Setup work for hardware segmentation offload (TSO) on
2496 * adapters using advanced tx descriptors
2498 **********************************************************************/
2500 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2502 struct adapter *adapter = txr->adapter;
2503 struct ixgbe_adv_tx_context_desc *TXD;
2504 struct ixv_tx_buf *tx_buffer;
2505 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2506 u32 mss_l4len_idx = 0;
2508 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2509 struct ether_vlan_header *eh;
2515 * Determine where frame payload starts.
2516 * Jump over vlan headers if already present
2518 eh = mtod(mp, struct ether_vlan_header *);
2519 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2520 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2522 ehdrlen = ETHER_HDR_LEN;
2524 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2525 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2528 ctxd = txr->next_avail_desc;
2529 tx_buffer = &txr->tx_buffers[ctxd];
2530 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2532 ip = (struct ip *)(mp->m_data + ehdrlen);
2533 if (ip->ip_p != IPPROTO_TCP)
2534 return FALSE; /* 0 */
2536 ip_hlen = ip->ip_hl << 2;
2537 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2538 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2539 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2540 tcp_hlen = th->th_off << 2;
2541 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2543 /* This is used in the transmit desc in encap */
2544 *paylen = mp->m_pkthdr.len - hdrlen;
2546 /* VLAN MACLEN IPLEN */
2547 if (mp->m_flags & M_VLANTAG) {
2548 vtag = htole16(mp->m_pkthdr.ether_vtag);
2549 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2552 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2553 vlan_macip_lens |= ip_hlen;
2554 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2556 /* ADV DTYPE TUCMD */
2557 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2558 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2559 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2560 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2564 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2565 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2566 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2568 TXD->seqnum_seed = htole32(0);
2569 tx_buffer->m_head = NULL;
2570 tx_buffer->eop_index = -1;
2572 if (++ctxd == adapter->num_tx_desc)
2576 txr->next_avail_desc = ctxd;
2581 /**********************************************************************
2583 * Examine each tx_buffer in the used queue. If the hardware is done
2584 * processing the packet then free associated resources. The
2585 * tx_buffer is put back on the free queue.
2587 **********************************************************************/
2589 ixv_txeof(struct tx_ring *txr)
2591 struct adapter *adapter = txr->adapter;
2592 struct ifnet *ifp = adapter->ifp;
2593 u32 first, last, done;
2594 struct ixv_tx_buf *tx_buffer;
2595 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2597 mtx_assert(&txr->tx_mtx, MA_OWNED);
2599 if (txr->tx_avail == adapter->num_tx_desc)
2602 first = txr->next_to_clean;
2603 tx_buffer = &txr->tx_buffers[first];
2604 /* For cleanup we just use legacy struct */
2605 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2606 last = tx_buffer->eop_index;
2609 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2612 ** Get the index of the first descriptor
2613 ** BEYOND the EOP and call that 'done'.
2614 ** I do this so the comparison in the
2615 ** inner while loop below can be simple
2617 if (++last == adapter->num_tx_desc) last = 0;
2620 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2621 BUS_DMASYNC_POSTREAD);
2623 ** Only the EOP descriptor of a packet now has the DD
2624 ** bit set, this is what we look for...
2626 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2627 /* We clean the range of the packet */
2628 while (first != done) {
2629 tx_desc->upper.data = 0;
2630 tx_desc->lower.data = 0;
2631 tx_desc->buffer_addr = 0;
2634 if (tx_buffer->m_head) {
2635 bus_dmamap_sync(txr->txtag,
2637 BUS_DMASYNC_POSTWRITE);
2638 bus_dmamap_unload(txr->txtag,
2640 m_freem(tx_buffer->m_head);
2641 tx_buffer->m_head = NULL;
2642 tx_buffer->map = NULL;
2644 tx_buffer->eop_index = -1;
2645 txr->watchdog_time = ticks;
2647 if (++first == adapter->num_tx_desc)
2650 tx_buffer = &txr->tx_buffers[first];
2652 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2654 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2655 /* See if there is more work now */
2656 last = tx_buffer->eop_index;
2659 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2660 /* Get next done point */
2661 if (++last == adapter->num_tx_desc) last = 0;
2666 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2667 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2669 txr->next_to_clean = first;
2672 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2673 * it is OK to send packets. If there are no pending descriptors,
2674 * clear the timeout. Otherwise, if some descriptors have been freed,
2675 * restart the timeout.
2677 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2678 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2679 if (txr->tx_avail == adapter->num_tx_desc) {
2680 txr->watchdog_check = FALSE;
2688 /*********************************************************************
2690 * Refresh mbuf buffers for RX descriptor rings
2691 * - now keeps its own state so discards due to resource
2692 * exhaustion are unnecessary, if an mbuf cannot be obtained
2693 * it just returns, keeping its placeholder, thus it can simply
2694 * be recalled to try again.
2696 **********************************************************************/
2698 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2700 struct adapter *adapter = rxr->adapter;
2701 bus_dma_segment_t hseg[1];
2702 bus_dma_segment_t pseg[1];
2703 struct ixv_rx_buf *rxbuf;
2704 struct mbuf *mh, *mp;
2705 int i, j, nsegs, error;
2706 bool refreshed = FALSE;
2708 i = j = rxr->next_to_refresh;
2709 /* Get the control variable, one beyond refresh point */
2710 if (++j == adapter->num_rx_desc)
2712 while (j != limit) {
2713 rxbuf = &rxr->rx_buffers[i];
2714 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2715 mh = m_gethdr(M_NOWAIT, MT_DATA);
2718 mh->m_pkthdr.len = mh->m_len = MHLEN;
2720 mh->m_flags |= M_PKTHDR;
2721 m_adj(mh, ETHER_ALIGN);
2722 /* Get the memory mapping */
2723 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2724 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2726 printf("GET BUF: dmamap load"
2727 " failure - %d\n", error);
2732 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2733 BUS_DMASYNC_PREREAD);
2734 rxr->rx_base[i].read.hdr_addr =
2735 htole64(hseg[0].ds_addr);
2738 if (rxbuf->m_pack == NULL) {
2739 mp = m_getjcl(M_NOWAIT, MT_DATA,
2740 M_PKTHDR, adapter->rx_mbuf_sz);
2746 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2747 /* Get the memory mapping */
2748 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2749 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2751 printf("GET BUF: dmamap load"
2752 " failure - %d\n", error);
2754 rxbuf->m_pack = NULL;
2758 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2759 BUS_DMASYNC_PREREAD);
2760 rxr->rx_base[i].read.pkt_addr =
2761 htole64(pseg[0].ds_addr);
2764 rxr->next_to_refresh = i = j;
2765 /* Calculate next index */
2766 if (++j == adapter->num_rx_desc)
2770 if (refreshed) /* update tail index */
2771 IXGBE_WRITE_REG(&adapter->hw,
2772 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2776 /*********************************************************************
2778 * Allocate memory for rx_buffer structures. Since we use one
2779 * rx_buffer per received packet, the maximum number of rx_buffer's
2780 * that we'll need is equal to the number of receive descriptors
2781 * that we've allocated.
2783 **********************************************************************/
2785 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2787 struct adapter *adapter = rxr->adapter;
2788 device_t dev = adapter->dev;
2789 struct ixv_rx_buf *rxbuf;
2790 int i, bsize, error;
2792 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2793 if (!(rxr->rx_buffers =
2794 (struct ixv_rx_buf *) malloc(bsize,
2795 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2796 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2801 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2802 1, 0, /* alignment, bounds */
2803 BUS_SPACE_MAXADDR, /* lowaddr */
2804 BUS_SPACE_MAXADDR, /* highaddr */
2805 NULL, NULL, /* filter, filterarg */
2806 MSIZE, /* maxsize */
2808 MSIZE, /* maxsegsize */
2810 NULL, /* lockfunc */
2811 NULL, /* lockfuncarg */
2813 device_printf(dev, "Unable to create RX DMA tag\n");
2817 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2818 1, 0, /* alignment, bounds */
2819 BUS_SPACE_MAXADDR, /* lowaddr */
2820 BUS_SPACE_MAXADDR, /* highaddr */
2821 NULL, NULL, /* filter, filterarg */
2822 MJUMPAGESIZE, /* maxsize */
2824 MJUMPAGESIZE, /* maxsegsize */
2826 NULL, /* lockfunc */
2827 NULL, /* lockfuncarg */
2829 device_printf(dev, "Unable to create RX DMA tag\n");
2833 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2834 rxbuf = &rxr->rx_buffers[i];
2835 error = bus_dmamap_create(rxr->htag,
2836 BUS_DMA_NOWAIT, &rxbuf->hmap);
2838 device_printf(dev, "Unable to create RX head map\n");
2841 error = bus_dmamap_create(rxr->ptag,
2842 BUS_DMA_NOWAIT, &rxbuf->pmap);
2844 device_printf(dev, "Unable to create RX pkt map\n");
2852 /* Frees all, but can handle partial completion */
2853 ixv_free_receive_structures(adapter);
2858 ixv_free_receive_ring(struct rx_ring *rxr)
2860 struct adapter *adapter;
2861 struct ixv_rx_buf *rxbuf;
2864 adapter = rxr->adapter;
2865 for (i = 0; i < adapter->num_rx_desc; i++) {
2866 rxbuf = &rxr->rx_buffers[i];
2867 if (rxbuf->m_head != NULL) {
2868 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2869 BUS_DMASYNC_POSTREAD);
2870 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2871 rxbuf->m_head->m_flags |= M_PKTHDR;
2872 m_freem(rxbuf->m_head);
2874 if (rxbuf->m_pack != NULL) {
2875 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2876 BUS_DMASYNC_POSTREAD);
2877 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2878 rxbuf->m_pack->m_flags |= M_PKTHDR;
2879 m_freem(rxbuf->m_pack);
2881 rxbuf->m_head = NULL;
2882 rxbuf->m_pack = NULL;
2887 /*********************************************************************
2889 * Initialize a receive ring and its buffers.
2891 **********************************************************************/
2893 ixv_setup_receive_ring(struct rx_ring *rxr)
2895 struct adapter *adapter;
2898 struct ixv_rx_buf *rxbuf;
2899 bus_dma_segment_t pseg[1], hseg[1];
2900 struct lro_ctrl *lro = &rxr->lro;
2901 int rsize, nsegs, error = 0;
2903 adapter = rxr->adapter;
2907 /* Clear the ring contents */
2909 rsize = roundup2(adapter->num_rx_desc *
2910 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2911 bzero((void *)rxr->rx_base, rsize);
2913 /* Free current RX buffer structs and their mbufs */
2914 ixv_free_receive_ring(rxr);
2916 /* Configure header split? */
2917 if (ixv_header_split)
2918 rxr->hdr_split = TRUE;
2920 /* Now replenish the mbufs */
2921 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2922 struct mbuf *mh, *mp;
2924 rxbuf = &rxr->rx_buffers[j];
2926 ** Dont allocate mbufs if not
2927 ** doing header split, its wasteful
2929 if (rxr->hdr_split == FALSE)
2932 /* First the header */
2933 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2934 if (rxbuf->m_head == NULL) {
2938 m_adj(rxbuf->m_head, ETHER_ALIGN);
2940 mh->m_len = mh->m_pkthdr.len = MHLEN;
2941 mh->m_flags |= M_PKTHDR;
2942 /* Get the memory mapping */
2943 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2944 rxbuf->hmap, rxbuf->m_head, hseg,
2945 &nsegs, BUS_DMA_NOWAIT);
2946 if (error != 0) /* Nothing elegant to do here */
2948 bus_dmamap_sync(rxr->htag,
2949 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2950 /* Update descriptor */
2951 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2954 /* Now the payload cluster */
2955 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2956 M_PKTHDR, adapter->rx_mbuf_sz);
2957 if (rxbuf->m_pack == NULL) {
2962 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2963 /* Get the memory mapping */
2964 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2965 rxbuf->pmap, mp, pseg,
2966 &nsegs, BUS_DMA_NOWAIT);
2969 bus_dmamap_sync(rxr->ptag,
2970 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2971 /* Update descriptor */
2972 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2976 /* Setup our descriptor indices */
2977 rxr->next_to_check = 0;
2978 rxr->next_to_refresh = 0;
2979 rxr->lro_enabled = FALSE;
2980 rxr->rx_split_packets = 0;
2982 rxr->discard = FALSE;
2984 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2985 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2988 ** Now set up the LRO interface:
2990 if (ifp->if_capenable & IFCAP_LRO) {
2991 int err = tcp_lro_init(lro);
2993 device_printf(dev, "LRO Initialization failed!\n");
2996 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
2997 rxr->lro_enabled = TRUE;
2998 lro->ifp = adapter->ifp;
3005 ixv_free_receive_ring(rxr);
3010 /*********************************************************************
3012 * Initialize all receive rings.
3014 **********************************************************************/
3016 ixv_setup_receive_structures(struct adapter *adapter)
3018 struct rx_ring *rxr = adapter->rx_rings;
3021 for (j = 0; j < adapter->num_queues; j++, rxr++)
3022 if (ixv_setup_receive_ring(rxr))
3028 * Free RX buffers allocated so far, we will only handle
3029 * the rings that completed, the failing case will have
3030 * cleaned up for itself. 'j' failed, so its the terminus.
3032 for (int i = 0; i < j; ++i) {
3033 rxr = &adapter->rx_rings[i];
3034 ixv_free_receive_ring(rxr);
3040 /*********************************************************************
3042 * Setup receive registers and features.
3044 **********************************************************************/
3045 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3048 ixv_initialize_receive_units(struct adapter *adapter)
3050 struct rx_ring *rxr = adapter->rx_rings;
3051 struct ixgbe_hw *hw = &adapter->hw;
3052 struct ifnet *ifp = adapter->ifp;
3053 u32 bufsz, fctrl, rxcsum, hlreg;
3056 /* Enable broadcasts */
3057 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3058 fctrl |= IXGBE_FCTRL_BAM;
3059 fctrl |= IXGBE_FCTRL_DPF;
3060 fctrl |= IXGBE_FCTRL_PMCF;
3061 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3063 /* Set for Jumbo Frames? */
3064 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3065 if (ifp->if_mtu > ETHERMTU) {
3066 hlreg |= IXGBE_HLREG0_JUMBOEN;
3067 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3069 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3070 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3072 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3074 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3075 u64 rdba = rxr->rxdma.dma_paddr;
3078 /* Do the queue enabling first */
3079 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3080 rxdctl |= IXGBE_RXDCTL_ENABLE;
3081 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3082 for (int k = 0; k < 10; k++) {
3083 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3084 IXGBE_RXDCTL_ENABLE)
3091 /* Setup the Base and Length of the Rx Descriptor Ring */
3092 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3093 (rdba & 0x00000000ffffffffULL));
3094 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3096 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3097 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3099 /* Set up the SRRCTL register */
3100 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3101 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3102 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3104 if (rxr->hdr_split) {
3105 /* Use a standard mbuf for the header */
3106 reg |= ((IXV_RX_HDR <<
3107 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3108 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3109 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3111 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3112 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3114 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3115 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3116 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3117 adapter->num_rx_desc - 1);
3120 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3122 if (ifp->if_capenable & IFCAP_RXCSUM)
3123 rxcsum |= IXGBE_RXCSUM_PCSD;
3125 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3126 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3128 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3133 /*********************************************************************
3135 * Free all receive rings.
3137 **********************************************************************/
3139 ixv_free_receive_structures(struct adapter *adapter)
3141 struct rx_ring *rxr = adapter->rx_rings;
3143 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3144 struct lro_ctrl *lro = &rxr->lro;
3145 ixv_free_receive_buffers(rxr);
3146 /* Free LRO memory */
3148 /* Free the ring memory as well */
3149 ixv_dma_free(adapter, &rxr->rxdma);
3152 free(adapter->rx_rings, M_DEVBUF);
3156 /*********************************************************************
3158 * Free receive ring data structures
3160 **********************************************************************/
3162 ixv_free_receive_buffers(struct rx_ring *rxr)
3164 struct adapter *adapter = rxr->adapter;
3165 struct ixv_rx_buf *rxbuf;
3167 INIT_DEBUGOUT("free_receive_structures: begin");
3169 /* Cleanup any existing buffers */
3170 if (rxr->rx_buffers != NULL) {
3171 for (int i = 0; i < adapter->num_rx_desc; i++) {
3172 rxbuf = &rxr->rx_buffers[i];
3173 if (rxbuf->m_head != NULL) {
3174 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3175 BUS_DMASYNC_POSTREAD);
3176 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3177 rxbuf->m_head->m_flags |= M_PKTHDR;
3178 m_freem(rxbuf->m_head);
3180 if (rxbuf->m_pack != NULL) {
3181 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3182 BUS_DMASYNC_POSTREAD);
3183 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3184 rxbuf->m_pack->m_flags |= M_PKTHDR;
3185 m_freem(rxbuf->m_pack);
3187 rxbuf->m_head = NULL;
3188 rxbuf->m_pack = NULL;
3189 if (rxbuf->hmap != NULL) {
3190 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3193 if (rxbuf->pmap != NULL) {
3194 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3198 if (rxr->rx_buffers != NULL) {
3199 free(rxr->rx_buffers, M_DEVBUF);
3200 rxr->rx_buffers = NULL;
3204 if (rxr->htag != NULL) {
3205 bus_dma_tag_destroy(rxr->htag);
3208 if (rxr->ptag != NULL) {
3209 bus_dma_tag_destroy(rxr->ptag);
3216 static __inline void
3217 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3221 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3222 * should be computed by hardware. Also it should not have VLAN tag in
3225 if (rxr->lro_enabled &&
3226 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3227 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3228 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3229 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3230 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3231 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3233 * Send to the stack if:
3234 ** - LRO not enabled, or
3235 ** - no LRO resources, or
3236 ** - lro enqueue fails
3238 if (rxr->lro.lro_cnt != 0)
3239 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3243 (*ifp->if_input)(ifp, m);
3247 static __inline void
3248 ixv_rx_discard(struct rx_ring *rxr, int i)
3250 struct ixv_rx_buf *rbuf;
3252 rbuf = &rxr->rx_buffers[i];
3254 if (rbuf->fmp != NULL) {/* Partial chain ? */
3255 rbuf->fmp->m_flags |= M_PKTHDR;
3261 ** With advanced descriptors the writeback
3262 ** clobbers the buffer addrs, so its easier
3263 ** to just free the existing mbufs and take
3264 ** the normal refresh path to get new buffers
3268 m_free(rbuf->m_head);
3269 rbuf->m_head = NULL;
3273 m_free(rbuf->m_pack);
3274 rbuf->m_pack = NULL;
3281 /*********************************************************************
3283 * This routine executes in interrupt context. It replenishes
3284 * the mbufs in the descriptor and sends data which has been
3285 * dma'ed into host memory to upper layer.
3287 * We loop at most count times if count is > 0, or until done if
3290 * Return TRUE for more work, FALSE for all clean.
3291 *********************************************************************/
3293 ixv_rxeof(struct ix_queue *que, int count)
3295 struct adapter *adapter = que->adapter;
3296 struct rx_ring *rxr = que->rxr;
3297 struct ifnet *ifp = adapter->ifp;
3298 struct lro_ctrl *lro = &rxr->lro;
3299 struct lro_entry *queued;
3300 int i, nextp, processed = 0;
3302 union ixgbe_adv_rx_desc *cur;
3303 struct ixv_rx_buf *rbuf, *nbuf;
3307 for (i = rxr->next_to_check; count != 0;) {
3308 struct mbuf *sendmp, *mh, *mp;
3310 u16 hlen, plen, hdr, vtag;
3313 /* Sync the ring. */
3314 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3315 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3317 cur = &rxr->rx_base[i];
3318 staterr = le32toh(cur->wb.upper.status_error);
3320 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3322 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3329 cur->wb.upper.status_error = 0;
3330 rbuf = &rxr->rx_buffers[i];
3334 plen = le16toh(cur->wb.upper.length);
3335 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3336 IXGBE_RXDADV_PKTTYPE_MASK;
3337 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3338 vtag = le16toh(cur->wb.upper.vlan);
3339 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3341 /* Make sure all parts of a bad packet are discarded */
3342 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3344 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
3345 rxr->rx_discarded++;
3347 rxr->discard = TRUE;
3349 rxr->discard = FALSE;
3350 ixv_rx_discard(rxr, i);
3356 if (nextp == adapter->num_rx_desc)
3358 nbuf = &rxr->rx_buffers[nextp];
3362 ** The header mbuf is ONLY used when header
3363 ** split is enabled, otherwise we get normal
3364 ** behavior, ie, both header and payload
3365 ** are DMA'd into the payload buffer.
3367 ** Rather than using the fmp/lmp global pointers
3368 ** we now keep the head of a packet chain in the
3369 ** buffer struct and pass this along from one
3370 ** descriptor to the next, until we get EOP.
3372 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3373 /* This must be an initial descriptor */
3374 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3375 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3376 if (hlen > IXV_RX_HDR)
3379 mh->m_flags |= M_PKTHDR;
3381 mh->m_pkthdr.len = mh->m_len;
3382 /* Null buf pointer so it is refreshed */
3383 rbuf->m_head = NULL;
3385 ** Check the payload length, this
3386 ** could be zero if its a small
3392 mp->m_flags &= ~M_PKTHDR;
3394 mh->m_pkthdr.len += mp->m_len;
3395 /* Null buf pointer so it is refreshed */
3396 rbuf->m_pack = NULL;
3397 rxr->rx_split_packets++;
3400 ** Now create the forward
3401 ** chain so when complete
3405 /* stash the chain head */
3407 /* Make forward chain */
3409 mp->m_next = nbuf->m_pack;
3411 mh->m_next = nbuf->m_pack;
3413 /* Singlet, prepare to send */
3415 if ((adapter->num_vlans) &&
3416 (staterr & IXGBE_RXD_STAT_VP)) {
3417 sendmp->m_pkthdr.ether_vtag = vtag;
3418 sendmp->m_flags |= M_VLANTAG;
3423 ** Either no header split, or a
3424 ** secondary piece of a fragmented
3429 ** See if there is a stored head
3430 ** that determines what we are
3433 rbuf->m_pack = rbuf->fmp = NULL;
3435 if (sendmp != NULL) /* secondary frag */
3436 sendmp->m_pkthdr.len += mp->m_len;
3438 /* first desc of a non-ps chain */
3440 sendmp->m_flags |= M_PKTHDR;
3441 sendmp->m_pkthdr.len = mp->m_len;
3442 if (staterr & IXGBE_RXD_STAT_VP) {
3443 sendmp->m_pkthdr.ether_vtag = vtag;
3444 sendmp->m_flags |= M_VLANTAG;
3447 /* Pass the head pointer on */
3451 mp->m_next = nbuf->m_pack;
3455 /* Sending this frame? */
3457 sendmp->m_pkthdr.rcvif = ifp;
3458 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3460 /* capture data for AIM */
3461 rxr->bytes += sendmp->m_pkthdr.len;
3462 rxr->rx_bytes += sendmp->m_pkthdr.len;
3463 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3464 ixv_rx_checksum(staterr, sendmp, ptype);
3465 #if __FreeBSD_version >= 800000
3466 sendmp->m_pkthdr.flowid = que->msix;
3467 sendmp->m_flags |= M_FLOWID;
3471 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3472 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3474 /* Advance our pointers to the next descriptor. */
3475 if (++i == adapter->num_rx_desc)
3478 /* Now send to the stack or do LRO */
3480 ixv_rx_input(rxr, ifp, sendmp, ptype);
3482 /* Every 8 descriptors we go to refresh mbufs */
3483 if (processed == 8) {
3484 ixv_refresh_mbufs(rxr, i);
3489 /* Refresh any remaining buf structs */
3490 if (ixv_rx_unrefreshed(rxr))
3491 ixv_refresh_mbufs(rxr, i);
3493 rxr->next_to_check = i;
3496 * Flush any outstanding LRO work
3498 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3499 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3500 tcp_lro_flush(lro, queued);
3506 ** We still have cleaning to do?
3507 ** Schedule another interrupt if so.
3509 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3510 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3518 /*********************************************************************
3520 * Verify that the hardware indicated that the checksum is valid.
3521 * Inform the stack about the status of checksum so that stack
3522 * doesn't spend time verifying the checksum.
3524 *********************************************************************/
3526 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3528 u16 status = (u16) staterr;
3529 u8 errors = (u8) (staterr >> 24);
3532 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3533 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3536 if (status & IXGBE_RXD_STAT_IPCS) {
3537 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3538 /* IP Checksum Good */
3539 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3540 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3543 mp->m_pkthdr.csum_flags = 0;
3545 if (status & IXGBE_RXD_STAT_L4CS) {
3546 u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3547 #if __FreeBSD_version >= 800000
3549 type = CSUM_SCTP_VALID;
3551 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3552 mp->m_pkthdr.csum_flags |= type;
3554 mp->m_pkthdr.csum_data = htons(0xffff);
3561 ixv_setup_vlan_support(struct adapter *adapter)
3563 struct ixgbe_hw *hw = &adapter->hw;
3564 u32 ctrl, vid, vfta, retry;
3568 ** We get here thru init_locked, meaning
3569 ** a soft reset, this has already cleared
3570 ** the VFTA and other state, so if there
3571 ** have been no vlan's registered do nothing.
3573 if (adapter->num_vlans == 0)
3576 /* Enable the queues */
3577 for (int i = 0; i < adapter->num_queues; i++) {
3578 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3579 ctrl |= IXGBE_RXDCTL_VME;
3580 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3584 ** A soft reset zero's out the VFTA, so
3585 ** we need to repopulate it now.
3587 for (int i = 0; i < VFTA_SIZE; i++) {
3588 if (ixv_shadow_vfta[i] == 0)
3590 vfta = ixv_shadow_vfta[i];
3592 ** Reconstruct the vlan id's
3593 ** based on the bits set in each
3594 ** of the array ints.
3596 for ( int j = 0; j < 32; j++) {
3598 if ((vfta & (1 << j)) == 0)
3601 /* Call the shared code mailbox routine */
3602 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3611 ** This routine is run via an vlan config EVENT,
3612 ** it enables us to use the HW Filter table since
3613 ** we can get the vlan id. This just creates the
3614 ** entry in the soft version of the VFTA, init will
3615 ** repopulate the real table.
3618 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3620 struct adapter *adapter = ifp->if_softc;
3623 if (ifp->if_softc != arg) /* Not our event */
3626 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3629 IXV_CORE_LOCK(adapter);
3630 index = (vtag >> 5) & 0x7F;
3632 ixv_shadow_vfta[index] |= (1 << bit);
3633 ++adapter->num_vlans;
3634 /* Re-init to load the changes */
3635 ixv_init_locked(adapter);
3636 IXV_CORE_UNLOCK(adapter);
3640 ** This routine is run via an vlan
3641 ** unconfig EVENT, remove our entry
3642 ** in the soft vfta.
3645 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3647 struct adapter *adapter = ifp->if_softc;
3650 if (ifp->if_softc != arg)
3653 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3656 IXV_CORE_LOCK(adapter);
3657 index = (vtag >> 5) & 0x7F;
3659 ixv_shadow_vfta[index] &= ~(1 << bit);
3660 --adapter->num_vlans;
3661 /* Re-init to load the changes */
3662 ixv_init_locked(adapter);
3663 IXV_CORE_UNLOCK(adapter);
3667 ixv_enable_intr(struct adapter *adapter)
3669 struct ixgbe_hw *hw = &adapter->hw;
3670 struct ix_queue *que = adapter->queues;
3671 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3674 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3676 mask = IXGBE_EIMS_ENABLE_MASK;
3677 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3678 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3680 for (int i = 0; i < adapter->num_queues; i++, que++)
3681 ixv_enable_queue(adapter, que->msix);
3683 IXGBE_WRITE_FLUSH(hw);
3689 ixv_disable_intr(struct adapter *adapter)
3691 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3692 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3693 IXGBE_WRITE_FLUSH(&adapter->hw);
3698 ** Setup the correct IVAR register for a particular MSIX interrupt
3699 ** - entry is the register array entry
3700 ** - vector is the MSIX vector for this queue
3701 ** - type is RX/TX/MISC
3704 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3706 struct ixgbe_hw *hw = &adapter->hw;
3709 vector |= IXGBE_IVAR_ALLOC_VAL;
3711 if (type == -1) { /* MISC IVAR */
3712 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3715 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3716 } else { /* RX/TX IVARS */
3717 index = (16 * (entry & 1)) + (8 * type);
3718 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3719 ivar &= ~(0xFF << index);
3720 ivar |= (vector << index);
3721 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3726 ixv_configure_ivars(struct adapter *adapter)
3728 struct ix_queue *que = adapter->queues;
3730 for (int i = 0; i < adapter->num_queues; i++, que++) {
3731 /* First the RX queue entry */
3732 ixv_set_ivar(adapter, i, que->msix, 0);
3733 /* ... and the TX */
3734 ixv_set_ivar(adapter, i, que->msix, 1);
3735 /* Set an initial value in EITR */
3736 IXGBE_WRITE_REG(&adapter->hw,
3737 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3740 /* For the Link interrupt */
3741 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3746 ** Tasklet handler for MSIX MBX interrupts
3747 ** - do outside interrupt since it might sleep
3750 ixv_handle_mbx(void *context, int pending)
3752 struct adapter *adapter = context;
3754 ixgbe_check_link(&adapter->hw,
3755 &adapter->link_speed, &adapter->link_up, 0);
3756 ixv_update_link_status(adapter);
3760 ** The VF stats registers never have a truely virgin
3761 ** starting point, so this routine tries to make an
3762 ** artificial one, marking ground zero on attach as
3766 ixv_save_stats(struct adapter *adapter)
3768 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3769 adapter->stats.saved_reset_vfgprc +=
3770 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3771 adapter->stats.saved_reset_vfgptc +=
3772 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3773 adapter->stats.saved_reset_vfgorc +=
3774 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3775 adapter->stats.saved_reset_vfgotc +=
3776 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3777 adapter->stats.saved_reset_vfmprc +=
3778 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3783 ixv_init_stats(struct adapter *adapter)
3785 struct ixgbe_hw *hw = &adapter->hw;
3787 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3788 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3789 adapter->stats.last_vfgorc |=
3790 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3792 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3793 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3794 adapter->stats.last_vfgotc |=
3795 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3797 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3799 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3800 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3801 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3802 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3803 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3806 #define UPDATE_STAT_32(reg, last, count) \
3808 u32 current = IXGBE_READ_REG(hw, reg); \
3809 if (current < last) \
3810 count += 0x100000000LL; \
3812 count &= 0xFFFFFFFF00000000LL; \
3816 #define UPDATE_STAT_36(lsb, msb, last, count) \
3818 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3819 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3820 u64 current = ((cur_msb << 32) | cur_lsb); \
3821 if (current < last) \
3822 count += 0x1000000000LL; \
3824 count &= 0xFFFFFFF000000000LL; \
3829 ** ixv_update_stats - Update the board statistics counters.
3832 ixv_update_stats(struct adapter *adapter)
3834 struct ixgbe_hw *hw = &adapter->hw;
3836 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3837 adapter->stats.vfgprc);
3838 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3839 adapter->stats.vfgptc);
3840 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3841 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3842 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3843 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3844 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3845 adapter->stats.vfmprc);
3848 /**********************************************************************
3850 * This routine is called only when ixgbe_display_debug_stats is enabled.
3851 * This routine provides a way to take a look at important statistics
3852 * maintained by the driver and hardware.
3854 **********************************************************************/
3856 ixv_print_hw_stats(struct adapter * adapter)
3858 device_t dev = adapter->dev;
3860 device_printf(dev,"Std Mbuf Failed = %lu\n",
3861 adapter->mbuf_defrag_failed);
3862 device_printf(dev,"Driver dropped packets = %lu\n",
3863 adapter->dropped_pkts);
3864 device_printf(dev, "watchdog timeouts = %ld\n",
3865 adapter->watchdog_events);
3867 device_printf(dev,"Good Packets Rcvd = %llu\n",
3868 (long long)adapter->stats.vfgprc);
3869 device_printf(dev,"Good Packets Xmtd = %llu\n",
3870 (long long)adapter->stats.vfgptc);
3871 device_printf(dev,"TSO Transmissions = %lu\n",
3876 /**********************************************************************
3878 * This routine is called only when em_display_debug_stats is enabled.
3879 * This routine provides a way to take a look at important statistics
3880 * maintained by the driver and hardware.
3882 **********************************************************************/
3884 ixv_print_debug_info(struct adapter *adapter)
3886 device_t dev = adapter->dev;
3887 struct ixgbe_hw *hw = &adapter->hw;
3888 struct ix_queue *que = adapter->queues;
3889 struct rx_ring *rxr;
3890 struct tx_ring *txr;
3891 struct lro_ctrl *lro;
3893 device_printf(dev,"Error Byte Count = %u \n",
3894 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3896 for (int i = 0; i < adapter->num_queues; i++, que++) {
3900 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3901 que->msix, (long)que->irqs);
3902 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3903 rxr->me, (long long)rxr->rx_packets);
3904 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3905 rxr->me, (long long)rxr->rx_split_packets);
3906 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3907 rxr->me, (long)rxr->rx_bytes);
3908 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3909 rxr->me, lro->lro_queued);
3910 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3911 rxr->me, lro->lro_flushed);
3912 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3913 txr->me, (long)txr->total_packets);
3914 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3915 txr->me, (long)txr->no_desc_avail);
3918 device_printf(dev,"MBX IRQ Handled: %lu\n",
3919 (long)adapter->mbx_irq);
3924 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3928 struct adapter *adapter;
3931 error = sysctl_handle_int(oidp, &result, 0, req);
3933 if (error || !req->newptr)
3937 adapter = (struct adapter *) arg1;
3938 ixv_print_hw_stats(adapter);
3944 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3947 struct adapter *adapter;
3950 error = sysctl_handle_int(oidp, &result, 0, req);
3952 if (error || !req->newptr)
3956 adapter = (struct adapter *) arg1;
3957 ixv_print_debug_info(adapter);
3963 ** Set flow control using sysctl:
3964 ** Flow control values:
3971 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3974 struct adapter *adapter;
3976 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3981 adapter = (struct adapter *) arg1;
3982 switch (ixv_flow_control) {
3983 case ixgbe_fc_rx_pause:
3984 case ixgbe_fc_tx_pause:
3986 adapter->hw.fc.requested_mode = ixv_flow_control;
3990 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3993 ixgbe_fc_enable(&adapter->hw);
3998 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
3999 const char *description, int *limit, int value)
4002 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4003 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4004 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);