1 /******************************************************************************
3 Copyright (c) 2001-2011, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
44 *********************************************************************/
45 char ixv_driver_version[] = "1.0.1";
47 /*********************************************************************
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
57 static ixv_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 /* required last entry */
64 /*********************************************************************
65 * Table of branding strings
66 *********************************************************************/
68 static char *ixv_strings[] = {
69 "Intel(R) PRO/10GbE Virtual Function Network Driver"
72 /*********************************************************************
74 *********************************************************************/
75 static int ixv_probe(device_t);
76 static int ixv_attach(device_t);
77 static int ixv_detach(device_t);
78 static int ixv_shutdown(device_t);
79 #if __FreeBSD_version < 800000
80 static void ixv_start(struct ifnet *);
81 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
83 static int ixv_mq_start(struct ifnet *, struct mbuf *);
84 static int ixv_mq_start_locked(struct ifnet *,
85 struct tx_ring *, struct mbuf *);
86 static void ixv_qflush(struct ifnet *);
88 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
89 static void ixv_init(void *);
90 static void ixv_init_locked(struct adapter *);
91 static void ixv_stop(void *);
92 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
93 static int ixv_media_change(struct ifnet *);
94 static void ixv_identify_hardware(struct adapter *);
95 static int ixv_allocate_pci_resources(struct adapter *);
96 static int ixv_allocate_msix(struct adapter *);
97 static int ixv_allocate_queues(struct adapter *);
98 static int ixv_setup_msix(struct adapter *);
99 static void ixv_free_pci_resources(struct adapter *);
100 static void ixv_local_timer(void *);
101 static void ixv_setup_interface(device_t, struct adapter *);
102 static void ixv_config_link(struct adapter *);
104 static int ixv_allocate_transmit_buffers(struct tx_ring *);
105 static int ixv_setup_transmit_structures(struct adapter *);
106 static void ixv_setup_transmit_ring(struct tx_ring *);
107 static void ixv_initialize_transmit_units(struct adapter *);
108 static void ixv_free_transmit_structures(struct adapter *);
109 static void ixv_free_transmit_buffers(struct tx_ring *);
111 static int ixv_allocate_receive_buffers(struct rx_ring *);
112 static int ixv_setup_receive_structures(struct adapter *);
113 static int ixv_setup_receive_ring(struct rx_ring *);
114 static void ixv_initialize_receive_units(struct adapter *);
115 static void ixv_free_receive_structures(struct adapter *);
116 static void ixv_free_receive_buffers(struct rx_ring *);
118 static void ixv_enable_intr(struct adapter *);
119 static void ixv_disable_intr(struct adapter *);
120 static bool ixv_txeof(struct tx_ring *);
121 static bool ixv_rxeof(struct ix_queue *, int);
122 static void ixv_rx_checksum(u32, struct mbuf *, u32);
123 static void ixv_set_multi(struct adapter *);
124 static void ixv_update_link_status(struct adapter *);
125 static void ixv_refresh_mbufs(struct rx_ring *, int);
126 static int ixv_xmit(struct tx_ring *, struct mbuf **);
127 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
128 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
129 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
130 static int ixv_dma_malloc(struct adapter *, bus_size_t,
131 struct ixv_dma_alloc *, int);
132 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
133 static void ixv_add_rx_process_limit(struct adapter *, const char *,
134 const char *, int *, int);
135 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
136 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
137 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
138 static void ixv_configure_ivars(struct adapter *);
139 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
141 static void ixv_setup_vlan_support(struct adapter *);
142 static void ixv_register_vlan(void *, struct ifnet *, u16);
143 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
145 static void ixv_save_stats(struct adapter *);
146 static void ixv_init_stats(struct adapter *);
147 static void ixv_update_stats(struct adapter *);
149 static __inline void ixv_rx_discard(struct rx_ring *, int);
150 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
153 /* The MSI/X Interrupt handlers */
154 static void ixv_msix_que(void *);
155 static void ixv_msix_mbx(void *);
157 /* Deferred interrupt tasklets */
158 static void ixv_handle_que(void *, int);
159 static void ixv_handle_mbx(void *, int);
161 /*********************************************************************
162 * FreeBSD Device Interface Entry Points
163 *********************************************************************/
165 static device_method_t ixv_methods[] = {
166 /* Device interface */
167 DEVMETHOD(device_probe, ixv_probe),
168 DEVMETHOD(device_attach, ixv_attach),
169 DEVMETHOD(device_detach, ixv_detach),
170 DEVMETHOD(device_shutdown, ixv_shutdown),
174 static driver_t ixv_driver = {
175 "ix", ixv_methods, sizeof(struct adapter),
178 extern devclass_t ixgbe_devclass;
179 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
180 MODULE_DEPEND(ixv, pci, 1, 1, 1);
181 MODULE_DEPEND(ixv, ether, 1, 1, 1);
184 ** TUNEABLE PARAMETERS:
188 ** AIM: Adaptive Interrupt Moderation
189 ** which means that the interrupt rate
190 ** is varied over time based on the
191 ** traffic for that interrupt vector
193 static int ixv_enable_aim = FALSE;
194 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
196 /* How many packets rxeof tries to clean at a time */
197 static int ixv_rx_process_limit = 128;
198 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
200 /* Flow control setting, default to full */
201 static int ixv_flow_control = ixgbe_fc_full;
202 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
205 * Header split: this causes the hardware to DMA
206 * the header into a seperate mbuf from the payload,
207 * it can be a performance win in some workloads, but
208 * in others it actually hurts, its off by default.
210 static bool ixv_header_split = FALSE;
211 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
214 ** Number of TX descriptors per ring,
215 ** setting higher than RX as this seems
216 ** the better performing choice.
218 static int ixv_txd = DEFAULT_TXD;
219 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
221 /* Number of RX descriptors per ring */
222 static int ixv_rxd = DEFAULT_RXD;
223 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
226 ** Shadow VFTA table, this is needed because
227 ** the real filter table gets cleared during
228 ** a soft reset and we need to repopulate it.
230 static u32 ixv_shadow_vfta[VFTA_SIZE];
232 /*********************************************************************
233 * Device identification routine
235 * ixv_probe determines if the driver should be loaded on
236 * adapter based on PCI vendor/device id of the adapter.
238 * return BUS_PROBE_DEFAULT on success, positive on failure
239 *********************************************************************/
242 ixv_probe(device_t dev)
244 ixv_vendor_info_t *ent;
246 u16 pci_vendor_id = 0;
247 u16 pci_device_id = 0;
248 u16 pci_subvendor_id = 0;
249 u16 pci_subdevice_id = 0;
250 char adapter_name[256];
253 pci_vendor_id = pci_get_vendor(dev);
254 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
257 pci_device_id = pci_get_device(dev);
258 pci_subvendor_id = pci_get_subvendor(dev);
259 pci_subdevice_id = pci_get_subdevice(dev);
261 ent = ixv_vendor_info_array;
262 while (ent->vendor_id != 0) {
263 if ((pci_vendor_id == ent->vendor_id) &&
264 (pci_device_id == ent->device_id) &&
266 ((pci_subvendor_id == ent->subvendor_id) ||
267 (ent->subvendor_id == 0)) &&
269 ((pci_subdevice_id == ent->subdevice_id) ||
270 (ent->subdevice_id == 0))) {
271 sprintf(adapter_name, "%s, Version - %s",
272 ixv_strings[ent->index],
274 device_set_desc_copy(dev, adapter_name);
275 return (BUS_PROBE_DEFAULT);
282 /*********************************************************************
283 * Device initialization routine
285 * The attach entry point is called when the driver is being loaded.
286 * This routine identifies the type of hardware, allocates all resources
287 * and initializes the hardware.
289 * return 0 on success, positive on failure
290 *********************************************************************/
293 ixv_attach(device_t dev)
295 struct adapter *adapter;
299 INIT_DEBUGOUT("ixv_attach: begin");
301 if (resource_disabled("ixgbe", device_get_unit(dev))) {
302 device_printf(dev, "Disabled by device hint\n");
306 /* Allocate, clear, and link in our adapter structure */
307 adapter = device_get_softc(dev);
308 adapter->dev = adapter->osdep.dev = dev;
312 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
315 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
316 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
317 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
318 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
320 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
321 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
322 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
323 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
325 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
326 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
327 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
328 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
330 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
331 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
332 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
333 &ixv_enable_aim, 1, "Interrupt Moderation");
335 /* Set up the timer callout */
336 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
338 /* Determine hardware revision */
339 ixv_identify_hardware(adapter);
341 /* Do base PCI setup - map BAR0 */
342 if (ixv_allocate_pci_resources(adapter)) {
343 device_printf(dev, "Allocation of PCI resources failed\n");
348 /* Do descriptor calc and sanity checks */
349 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351 device_printf(dev, "TXD config issue, using default!\n");
352 adapter->num_tx_desc = DEFAULT_TXD;
354 adapter->num_tx_desc = ixv_txd;
356 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
358 device_printf(dev, "RXD config issue, using default!\n");
359 adapter->num_rx_desc = DEFAULT_RXD;
361 adapter->num_rx_desc = ixv_rxd;
363 /* Allocate our TX/RX Queues */
364 if (ixv_allocate_queues(adapter)) {
370 ** Initialize the shared code: its
371 ** at this point the mac type is set.
373 error = ixgbe_init_shared_code(hw);
375 device_printf(dev,"Shared Code Initialization Failure\n");
380 /* Setup the mailbox */
381 ixgbe_init_mbx_params_vf(hw);
385 /* Get Hardware Flow Control setting */
386 hw->fc.requested_mode = ixgbe_fc_full;
387 hw->fc.pause_time = IXV_FC_PAUSE;
388 hw->fc.low_water = IXV_FC_LO;
389 hw->fc.high_water = IXV_FC_HI;
390 hw->fc.send_xon = TRUE;
392 error = ixgbe_init_hw(hw);
394 device_printf(dev,"Hardware Initialization Failure\n");
399 error = ixv_allocate_msix(adapter);
403 /* Setup OS specific network interface */
404 ixv_setup_interface(dev, adapter);
406 /* Sysctl for limiting the amount of work done in the taskqueue */
407 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
408 "max number of rx packets to process", &adapter->rx_process_limit,
409 ixv_rx_process_limit);
411 /* Do the stats setup */
412 ixv_save_stats(adapter);
413 ixv_init_stats(adapter);
415 /* Register for VLAN events */
416 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
417 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
418 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
419 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
421 INIT_DEBUGOUT("ixv_attach: end");
425 ixv_free_transmit_structures(adapter);
426 ixv_free_receive_structures(adapter);
428 ixv_free_pci_resources(adapter);
433 /*********************************************************************
434 * Device removal routine
436 * The detach entry point is called when the driver is being removed.
437 * This routine stops the adapter and deallocates all the resources
438 * that were allocated for driver operation.
440 * return 0 on success, positive on failure
441 *********************************************************************/
444 ixv_detach(device_t dev)
446 struct adapter *adapter = device_get_softc(dev);
447 struct ix_queue *que = adapter->queues;
449 INIT_DEBUGOUT("ixv_detach: begin");
451 /* Make sure VLANS are not using driver */
452 if (adapter->ifp->if_vlantrunk != NULL) {
453 device_printf(dev,"Vlan in use, detach first\n");
457 IXV_CORE_LOCK(adapter);
459 IXV_CORE_UNLOCK(adapter);
461 for (int i = 0; i < adapter->num_queues; i++, que++) {
463 taskqueue_drain(que->tq, &que->que_task);
464 taskqueue_free(que->tq);
468 /* Drain the Link queue */
470 taskqueue_drain(adapter->tq, &adapter->mbx_task);
471 taskqueue_free(adapter->tq);
474 /* Unregister VLAN events */
475 if (adapter->vlan_attach != NULL)
476 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
477 if (adapter->vlan_detach != NULL)
478 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
480 ether_ifdetach(adapter->ifp);
481 callout_drain(&adapter->timer);
482 ixv_free_pci_resources(adapter);
483 bus_generic_detach(dev);
484 if_free(adapter->ifp);
486 ixv_free_transmit_structures(adapter);
487 ixv_free_receive_structures(adapter);
489 IXV_CORE_LOCK_DESTROY(adapter);
493 /*********************************************************************
495 * Shutdown entry point
497 **********************************************************************/
499 ixv_shutdown(device_t dev)
501 struct adapter *adapter = device_get_softc(dev);
502 IXV_CORE_LOCK(adapter);
504 IXV_CORE_UNLOCK(adapter);
508 #if __FreeBSD_version < 800000
509 /*********************************************************************
510 * Transmit entry point
512 * ixv_start is called by the stack to initiate a transmit.
513 * The driver will remain in this routine as long as there are
514 * packets to transmit and transmit resources are available.
515 * In case resources are not available stack is notified and
516 * the packet is requeued.
517 **********************************************************************/
519 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
522 struct adapter *adapter = txr->adapter;
524 IXV_TX_LOCK_ASSERT(txr);
526 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
529 if (!adapter->link_active)
532 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
534 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
538 if (ixv_xmit(txr, &m_head)) {
541 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
542 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
545 /* Send a copy of the frame to the BPF listener */
546 ETHER_BPF_MTAP(ifp, m_head);
548 /* Set watchdog on */
549 txr->watchdog_check = TRUE;
550 txr->watchdog_time = ticks;
557 * Legacy TX start - called by the stack, this
558 * always uses the first tx ring, and should
559 * not be used with multiqueue tx enabled.
562 ixv_start(struct ifnet *ifp)
564 struct adapter *adapter = ifp->if_softc;
565 struct tx_ring *txr = adapter->tx_rings;
567 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
569 ixv_start_locked(txr, ifp);
578 ** Multiqueue Transmit driver
582 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
584 struct adapter *adapter = ifp->if_softc;
585 struct ix_queue *que;
589 /* Which queue to use */
590 if ((m->m_flags & M_FLOWID) != 0)
591 i = m->m_pkthdr.flowid % adapter->num_queues;
593 txr = &adapter->tx_rings[i];
594 que = &adapter->queues[i];
596 if (IXV_TX_TRYLOCK(txr)) {
597 err = ixv_mq_start_locked(ifp, txr, m);
600 err = drbr_enqueue(ifp, txr->br, m);
601 taskqueue_enqueue(que->tq, &que->que_task);
608 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
610 struct adapter *adapter = txr->adapter;
612 int enqueued, err = 0;
614 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
615 IFF_DRV_RUNNING || adapter->link_active == 0) {
617 err = drbr_enqueue(ifp, txr->br, m);
621 /* Do a clean if descriptors are low */
622 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
627 next = drbr_dequeue(ifp, txr->br);
628 } else if (drbr_needs_enqueue(ifp, txr->br)) {
629 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
631 next = drbr_dequeue(ifp, txr->br);
635 /* Process the queue */
636 while (next != NULL) {
637 if ((err = ixv_xmit(txr, &next)) != 0) {
639 err = drbr_enqueue(ifp, txr->br, next);
643 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
644 /* Send a copy of the frame to the BPF listener */
645 ETHER_BPF_MTAP(ifp, next);
646 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
648 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
649 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
652 next = drbr_dequeue(ifp, txr->br);
656 /* Set watchdog on */
657 txr->watchdog_check = TRUE;
658 txr->watchdog_time = ticks;
665 ** Flush all ring buffers
668 ixv_qflush(struct ifnet *ifp)
670 struct adapter *adapter = ifp->if_softc;
671 struct tx_ring *txr = adapter->tx_rings;
674 for (int i = 0; i < adapter->num_queues; i++, txr++) {
676 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
685 /*********************************************************************
688 * ixv_ioctl is called when the user wants to configure the
691 * return 0 on success, positive on failure
692 **********************************************************************/
695 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
697 struct adapter *adapter = ifp->if_softc;
698 struct ifreq *ifr = (struct ifreq *) data;
699 #if defined(INET) || defined(INET6)
700 struct ifaddr *ifa = (struct ifaddr *) data;
701 bool avoid_reset = FALSE;
709 if (ifa->ifa_addr->sa_family == AF_INET)
713 if (ifa->ifa_addr->sa_family == AF_INET6)
716 #if defined(INET) || defined(INET6)
718 ** Calling init results in link renegotiation,
719 ** so we avoid doing it when possible.
722 ifp->if_flags |= IFF_UP;
723 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
725 if (!(ifp->if_flags & IFF_NOARP))
726 arp_ifinit(ifp, ifa);
728 error = ether_ioctl(ifp, command, data);
732 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
733 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
736 IXV_CORE_LOCK(adapter);
737 ifp->if_mtu = ifr->ifr_mtu;
738 adapter->max_frame_size =
739 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
740 ixv_init_locked(adapter);
741 IXV_CORE_UNLOCK(adapter);
745 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
746 IXV_CORE_LOCK(adapter);
747 if (ifp->if_flags & IFF_UP) {
748 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
749 ixv_init_locked(adapter);
751 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
753 adapter->if_flags = ifp->if_flags;
754 IXV_CORE_UNLOCK(adapter);
758 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
759 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
760 IXV_CORE_LOCK(adapter);
761 ixv_disable_intr(adapter);
762 ixv_set_multi(adapter);
763 ixv_enable_intr(adapter);
764 IXV_CORE_UNLOCK(adapter);
769 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
770 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
774 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
775 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
776 if (mask & IFCAP_HWCSUM)
777 ifp->if_capenable ^= IFCAP_HWCSUM;
778 if (mask & IFCAP_TSO4)
779 ifp->if_capenable ^= IFCAP_TSO4;
780 if (mask & IFCAP_LRO)
781 ifp->if_capenable ^= IFCAP_LRO;
782 if (mask & IFCAP_VLAN_HWTAGGING)
783 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
784 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
785 IXV_CORE_LOCK(adapter);
786 ixv_init_locked(adapter);
787 IXV_CORE_UNLOCK(adapter);
789 VLAN_CAPABILITIES(ifp);
794 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
795 error = ether_ioctl(ifp, command, data);
802 /*********************************************************************
805 * This routine is used in two ways. It is used by the stack as
806 * init entry point in network interface structure. It is also used
807 * by the driver as a hw/sw initialization routine to get to a
810 * return 0 on success, positive on failure
811 **********************************************************************/
812 #define IXGBE_MHADD_MFS_SHIFT 16
815 ixv_init_locked(struct adapter *adapter)
817 struct ifnet *ifp = adapter->ifp;
818 device_t dev = adapter->dev;
819 struct ixgbe_hw *hw = &adapter->hw;
822 INIT_DEBUGOUT("ixv_init: begin");
823 mtx_assert(&adapter->core_mtx, MA_OWNED);
824 hw->adapter_stopped = FALSE;
825 ixgbe_stop_adapter(hw);
826 callout_stop(&adapter->timer);
828 /* reprogram the RAR[0] in case user changed it. */
829 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
831 /* Get the latest mac address, User can use a LAA */
832 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
833 IXGBE_ETH_LENGTH_OF_ADDRESS);
834 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
835 hw->addr_ctrl.rar_used_count = 1;
837 /* Prepare transmit descriptors and buffers */
838 if (ixv_setup_transmit_structures(adapter)) {
839 device_printf(dev,"Could not setup transmit structures\n");
845 ixv_initialize_transmit_units(adapter);
847 /* Setup Multicast table */
848 ixv_set_multi(adapter);
851 ** Determine the correct mbuf pool
852 ** for doing jumbo/headersplit
854 if (ifp->if_mtu > ETHERMTU)
855 adapter->rx_mbuf_sz = MJUMPAGESIZE;
857 adapter->rx_mbuf_sz = MCLBYTES;
859 /* Prepare receive descriptors and buffers */
860 if (ixv_setup_receive_structures(adapter)) {
861 device_printf(dev,"Could not setup receive structures\n");
866 /* Configure RX settings */
867 ixv_initialize_receive_units(adapter);
869 /* Enable Enhanced MSIX mode */
870 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
871 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
872 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
873 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
875 /* Set the various hardware offload abilities */
876 ifp->if_hwassist = 0;
877 if (ifp->if_capenable & IFCAP_TSO4)
878 ifp->if_hwassist |= CSUM_TSO;
879 if (ifp->if_capenable & IFCAP_TXCSUM) {
880 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
881 #if __FreeBSD_version >= 800000
882 ifp->if_hwassist |= CSUM_SCTP;
887 if (ifp->if_mtu > ETHERMTU) {
888 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
889 mhadd &= ~IXGBE_MHADD_MFS_MASK;
890 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
891 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
894 /* Set up VLAN offload and filter */
895 ixv_setup_vlan_support(adapter);
897 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
899 /* Set up MSI/X routing */
900 ixv_configure_ivars(adapter);
902 /* Set up auto-mask */
903 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
905 /* Set moderation on the Link interrupt */
906 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
909 ixv_init_stats(adapter);
911 /* Config/Enable Link */
912 ixv_config_link(adapter);
914 /* And now turn on interrupts */
915 ixv_enable_intr(adapter);
917 /* Now inform the stack we're ready */
918 ifp->if_drv_flags |= IFF_DRV_RUNNING;
919 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
927 struct adapter *adapter = arg;
929 IXV_CORE_LOCK(adapter);
930 ixv_init_locked(adapter);
931 IXV_CORE_UNLOCK(adapter);
938 ** MSIX Interrupt Handlers and Tasklets
943 ixv_enable_queue(struct adapter *adapter, u32 vector)
945 struct ixgbe_hw *hw = &adapter->hw;
946 u32 queue = 1 << vector;
949 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
950 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
954 ixv_disable_queue(struct adapter *adapter, u32 vector)
956 struct ixgbe_hw *hw = &adapter->hw;
957 u64 queue = (u64)(1 << vector);
960 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
961 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
965 ixv_rearm_queues(struct adapter *adapter, u64 queues)
967 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
968 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
973 ixv_handle_que(void *context, int pending)
975 struct ix_queue *que = context;
976 struct adapter *adapter = que->adapter;
977 struct tx_ring *txr = que->txr;
978 struct ifnet *ifp = adapter->ifp;
981 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
982 more = ixv_rxeof(que, adapter->rx_process_limit);
985 #if __FreeBSD_version >= 800000
986 if (!drbr_empty(ifp, txr->br))
987 ixv_mq_start_locked(ifp, txr, NULL);
989 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
990 ixv_start_locked(txr, ifp);
994 taskqueue_enqueue(que->tq, &que->que_task);
999 /* Reenable this interrupt */
1000 ixv_enable_queue(adapter, que->msix);
1004 /*********************************************************************
1006 * MSI Queue Interrupt Service routine
1008 **********************************************************************/
1010 ixv_msix_que(void *arg)
1012 struct ix_queue *que = arg;
1013 struct adapter *adapter = que->adapter;
1014 struct tx_ring *txr = que->txr;
1015 struct rx_ring *rxr = que->rxr;
1016 bool more_tx, more_rx;
1019 ixv_disable_queue(adapter, que->msix);
1022 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1025 more_tx = ixv_txeof(txr);
1028 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1032 if (ixv_enable_aim == FALSE)
1035 ** Do Adaptive Interrupt Moderation:
1036 ** - Write out last calculated setting
1037 ** - Calculate based on average size over
1038 ** the last interval.
1040 if (que->eitr_setting)
1041 IXGBE_WRITE_REG(&adapter->hw,
1042 IXGBE_VTEITR(que->msix),
1045 que->eitr_setting = 0;
1047 /* Idle, do nothing */
1048 if ((txr->bytes == 0) && (rxr->bytes == 0))
1051 if ((txr->bytes) && (txr->packets))
1052 newitr = txr->bytes/txr->packets;
1053 if ((rxr->bytes) && (rxr->packets))
1054 newitr = max(newitr,
1055 (rxr->bytes / rxr->packets));
1056 newitr += 24; /* account for hardware frame, crc */
1058 /* set an upper boundary */
1059 newitr = min(newitr, 3000);
1061 /* Be nice to the mid range */
1062 if ((newitr > 300) && (newitr < 1200))
1063 newitr = (newitr / 3);
1065 newitr = (newitr / 2);
1067 newitr |= newitr << 16;
1069 /* save for next interrupt */
1070 que->eitr_setting = newitr;
1079 if (more_tx || more_rx)
1080 taskqueue_enqueue(que->tq, &que->que_task);
1081 else /* Reenable this interrupt */
1082 ixv_enable_queue(adapter, que->msix);
1087 ixv_msix_mbx(void *arg)
1089 struct adapter *adapter = arg;
1090 struct ixgbe_hw *hw = &adapter->hw;
1095 /* First get the cause */
1096 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1097 /* Clear interrupt with write */
1098 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1100 /* Link status change */
1101 if (reg & IXGBE_EICR_LSC)
1102 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1104 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1108 /*********************************************************************
1110 * Media Ioctl callback
1112 * This routine is called whenever the user queries the status of
1113 * the interface using ifconfig.
1115 **********************************************************************/
1117 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1119 struct adapter *adapter = ifp->if_softc;
1121 INIT_DEBUGOUT("ixv_media_status: begin");
1122 IXV_CORE_LOCK(adapter);
1123 ixv_update_link_status(adapter);
1125 ifmr->ifm_status = IFM_AVALID;
1126 ifmr->ifm_active = IFM_ETHER;
1128 if (!adapter->link_active) {
1129 IXV_CORE_UNLOCK(adapter);
1133 ifmr->ifm_status |= IFM_ACTIVE;
1135 switch (adapter->link_speed) {
1136 case IXGBE_LINK_SPEED_1GB_FULL:
1137 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1139 case IXGBE_LINK_SPEED_10GB_FULL:
1140 ifmr->ifm_active |= IFM_FDX;
1144 IXV_CORE_UNLOCK(adapter);
1149 /*********************************************************************
1151 * Media Ioctl callback
1153 * This routine is called when the user changes speed/duplex using
1154 * media/mediopt option with ifconfig.
1156 **********************************************************************/
1158 ixv_media_change(struct ifnet * ifp)
1160 struct adapter *adapter = ifp->if_softc;
1161 struct ifmedia *ifm = &adapter->media;
1163 INIT_DEBUGOUT("ixv_media_change: begin");
1165 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1168 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1172 device_printf(adapter->dev, "Only auto media type\n");
1179 /*********************************************************************
1181 * This routine maps the mbufs to tx descriptors, allowing the
1182 * TX engine to transmit the packets.
1183 * - return 0 on success, positive on failure
1185 **********************************************************************/
1188 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1190 struct adapter *adapter = txr->adapter;
1191 u32 olinfo_status = 0, cmd_type_len;
1193 int i, j, error, nsegs;
1194 int first, last = 0;
1195 struct mbuf *m_head;
1196 bus_dma_segment_t segs[32];
1198 struct ixv_tx_buf *txbuf;
1199 union ixgbe_adv_tx_desc *txd = NULL;
1203 /* Basic descriptor defines */
1204 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1205 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1207 if (m_head->m_flags & M_VLANTAG)
1208 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1211 * Important to capture the first descriptor
1212 * used because it will contain the index of
1213 * the one we tell the hardware to report back
1215 first = txr->next_avail_desc;
1216 txbuf = &txr->tx_buffers[first];
1220 * Map the packet for DMA.
1222 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1223 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1225 if (error == EFBIG) {
1228 m = m_defrag(*m_headp, M_DONTWAIT);
1230 adapter->mbuf_defrag_failed++;
1238 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1239 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1241 if (error == ENOMEM) {
1242 adapter->no_tx_dma_setup++;
1244 } else if (error != 0) {
1245 adapter->no_tx_dma_setup++;
1250 } else if (error == ENOMEM) {
1251 adapter->no_tx_dma_setup++;
1253 } else if (error != 0) {
1254 adapter->no_tx_dma_setup++;
1260 /* Make certain there are enough descriptors */
1261 if (nsegs > txr->tx_avail - 2) {
1262 txr->no_desc_avail++;
1269 ** Set up the appropriate offload context
1270 ** this becomes the first descriptor of
1273 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1274 if (ixv_tso_setup(txr, m_head, &paylen)) {
1275 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1276 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1277 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1278 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1282 } else if (ixv_tx_ctx_setup(txr, m_head))
1283 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1285 /* Record payload length */
1287 olinfo_status |= m_head->m_pkthdr.len <<
1288 IXGBE_ADVTXD_PAYLEN_SHIFT;
1290 i = txr->next_avail_desc;
1291 for (j = 0; j < nsegs; j++) {
1295 txbuf = &txr->tx_buffers[i];
1296 txd = &txr->tx_base[i];
1297 seglen = segs[j].ds_len;
1298 segaddr = htole64(segs[j].ds_addr);
1300 txd->read.buffer_addr = segaddr;
1301 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1302 cmd_type_len |seglen);
1303 txd->read.olinfo_status = htole32(olinfo_status);
1304 last = i; /* descriptor that will get completion IRQ */
1306 if (++i == adapter->num_tx_desc)
1309 txbuf->m_head = NULL;
1310 txbuf->eop_index = -1;
1313 txd->read.cmd_type_len |=
1314 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1315 txr->tx_avail -= nsegs;
1316 txr->next_avail_desc = i;
1318 txbuf->m_head = m_head;
1319 txr->tx_buffers[first].map = txbuf->map;
1321 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1323 /* Set the index of the descriptor that will be marked done */
1324 txbuf = &txr->tx_buffers[first];
1325 txbuf->eop_index = last;
1327 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1328 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1330 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1331 * hardware that this frame is available to transmit.
1333 ++txr->total_packets;
1334 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1339 bus_dmamap_unload(txr->txtag, txbuf->map);
1345 /*********************************************************************
1348 * This routine is called whenever multicast address list is updated.
1350 **********************************************************************/
1351 #define IXGBE_RAR_ENTRIES 16
1354 ixv_set_multi(struct adapter *adapter)
1356 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1358 struct ifmultiaddr *ifma;
1360 struct ifnet *ifp = adapter->ifp;
1362 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1364 #if __FreeBSD_version < 800000
1367 if_maddr_rlock(ifp);
1369 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1370 if (ifma->ifma_addr->sa_family != AF_LINK)
1372 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1373 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1374 IXGBE_ETH_LENGTH_OF_ADDRESS);
1377 #if __FreeBSD_version < 800000
1378 IF_ADDR_UNLOCK(ifp);
1380 if_maddr_runlock(ifp);
1385 ixgbe_update_mc_addr_list(&adapter->hw,
1386 update_ptr, mcnt, ixv_mc_array_itr);
1392 * This is an iterator function now needed by the multicast
1393 * shared code. It simply feeds the shared code routine the
1394 * addresses in the array of ixv_set_multi() one by one.
1397 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1399 u8 *addr = *update_ptr;
1403 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1404 *update_ptr = newptr;
1408 /*********************************************************************
1411 * This routine checks for link status,updates statistics,
1412 * and runs the watchdog check.
1414 **********************************************************************/
1417 ixv_local_timer(void *arg)
1419 struct adapter *adapter = arg;
1420 device_t dev = adapter->dev;
1421 struct tx_ring *txr = adapter->tx_rings;
1424 mtx_assert(&adapter->core_mtx, MA_OWNED);
1426 ixv_update_link_status(adapter);
1429 ixv_update_stats(adapter);
1432 * If the interface has been paused
1433 * then don't do the watchdog check
1435 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1438 ** Check for time since any descriptor was cleaned
1440 for (i = 0; i < adapter->num_queues; i++, txr++) {
1442 if (txr->watchdog_check == FALSE) {
1446 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1451 ixv_rearm_queues(adapter, adapter->que_mask);
1452 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1456 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1457 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1458 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1459 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1460 device_printf(dev,"TX(%d) desc avail = %d,"
1461 "Next TX to Clean = %d\n",
1462 txr->me, txr->tx_avail, txr->next_to_clean);
1463 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1464 adapter->watchdog_events++;
1466 ixv_init_locked(adapter);
1470 ** Note: this routine updates the OS on the link state
1471 ** the real check of the hardware only happens with
1472 ** a link interrupt.
1475 ixv_update_link_status(struct adapter *adapter)
1477 struct ifnet *ifp = adapter->ifp;
1478 struct tx_ring *txr = adapter->tx_rings;
1479 device_t dev = adapter->dev;
1482 if (adapter->link_up){
1483 if (adapter->link_active == FALSE) {
1485 device_printf(dev,"Link is up %d Gbps %s \n",
1486 ((adapter->link_speed == 128)? 10:1),
1488 adapter->link_active = TRUE;
1489 if_link_state_change(ifp, LINK_STATE_UP);
1491 } else { /* Link down */
1492 if (adapter->link_active == TRUE) {
1494 device_printf(dev,"Link is Down\n");
1495 if_link_state_change(ifp, LINK_STATE_DOWN);
1496 adapter->link_active = FALSE;
1497 for (int i = 0; i < adapter->num_queues;
1499 txr->watchdog_check = FALSE;
1507 /*********************************************************************
1509 * This routine disables all traffic on the adapter by issuing a
1510 * global reset on the MAC and deallocates TX/RX buffers.
1512 **********************************************************************/
1518 struct adapter *adapter = arg;
1519 struct ixgbe_hw *hw = &adapter->hw;
1522 mtx_assert(&adapter->core_mtx, MA_OWNED);
1524 INIT_DEBUGOUT("ixv_stop: begin\n");
1525 ixv_disable_intr(adapter);
1527 /* Tell the stack that the interface is no longer active */
1528 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1531 adapter->hw.adapter_stopped = FALSE;
1532 ixgbe_stop_adapter(hw);
1533 callout_stop(&adapter->timer);
1535 /* reprogram the RAR[0] in case user changed it. */
1536 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1542 /*********************************************************************
1544 * Determine hardware revision.
1546 **********************************************************************/
1548 ixv_identify_hardware(struct adapter *adapter)
1550 device_t dev = adapter->dev;
1554 ** Make sure BUSMASTER is set, on a VM under
1555 ** KVM it may not be and will break things.
1557 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1558 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1559 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1560 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1561 "bits were not set!\n");
1562 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1563 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1566 /* Save off the information about this board */
1567 adapter->hw.vendor_id = pci_get_vendor(dev);
1568 adapter->hw.device_id = pci_get_device(dev);
1569 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1570 adapter->hw.subsystem_vendor_id =
1571 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1572 adapter->hw.subsystem_device_id =
1573 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1578 /*********************************************************************
1580 * Setup MSIX Interrupt resources and handlers
1582 **********************************************************************/
1584 ixv_allocate_msix(struct adapter *adapter)
1586 device_t dev = adapter->dev;
1587 struct ix_queue *que = adapter->queues;
1588 int error, rid, vector = 0;
1590 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1592 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1593 RF_SHAREABLE | RF_ACTIVE);
1594 if (que->res == NULL) {
1595 device_printf(dev,"Unable to allocate"
1596 " bus resource: que interrupt [%d]\n", vector);
1599 /* Set the handler function */
1600 error = bus_setup_intr(dev, que->res,
1601 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1602 ixv_msix_que, que, &que->tag);
1605 device_printf(dev, "Failed to register QUE handler");
1608 #if __FreeBSD_version >= 800504
1609 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1612 adapter->que_mask |= (u64)(1 << que->msix);
1614 ** Bind the msix vector, and thus the
1615 ** ring to the corresponding cpu.
1617 if (adapter->num_queues > 1)
1618 bus_bind_intr(dev, que->res, i);
1620 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1621 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1622 taskqueue_thread_enqueue, &que->tq);
1623 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1624 device_get_nameunit(adapter->dev));
1629 adapter->res = bus_alloc_resource_any(dev,
1630 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1631 if (!adapter->res) {
1632 device_printf(dev,"Unable to allocate"
1633 " bus resource: MBX interrupt [%d]\n", rid);
1636 /* Set the mbx handler function */
1637 error = bus_setup_intr(dev, adapter->res,
1638 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1639 ixv_msix_mbx, adapter, &adapter->tag);
1641 adapter->res = NULL;
1642 device_printf(dev, "Failed to register LINK handler");
1645 #if __FreeBSD_version >= 800504
1646 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1648 adapter->mbxvec = vector;
1649 /* Tasklets for Mailbox */
1650 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1651 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1652 taskqueue_thread_enqueue, &adapter->tq);
1653 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1654 device_get_nameunit(adapter->dev));
1656 ** Due to a broken design QEMU will fail to properly
1657 ** enable the guest for MSIX unless the vectors in
1658 ** the table are all set up, so we must rewrite the
1659 ** ENABLE in the MSIX control register again at this
1660 ** point to cause it to successfully initialize us.
1662 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1664 pci_find_cap(dev, PCIY_MSIX, &rid);
1665 rid += PCIR_MSIX_CTRL;
1666 msix_ctrl = pci_read_config(dev, rid, 2);
1667 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1668 pci_write_config(dev, rid, msix_ctrl, 2);
1675 * Setup MSIX resources, note that the VF
1676 * device MUST use MSIX, there is no fallback.
1679 ixv_setup_msix(struct adapter *adapter)
1681 device_t dev = adapter->dev;
1682 int rid, vectors, want = 2;
1685 /* First try MSI/X */
1687 adapter->msix_mem = bus_alloc_resource_any(dev,
1688 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1689 if (!adapter->msix_mem) {
1690 device_printf(adapter->dev,
1691 "Unable to map MSIX table \n");
1695 vectors = pci_msix_count(dev);
1697 bus_release_resource(dev, SYS_RES_MEMORY,
1698 rid, adapter->msix_mem);
1699 adapter->msix_mem = NULL;
1704 ** Want two vectors: one for a queue,
1705 ** plus an additional for mailbox.
1707 if (pci_alloc_msix(dev, &want) == 0) {
1708 device_printf(adapter->dev,
1709 "Using MSIX interrupts with %d vectors\n", want);
1713 device_printf(adapter->dev,"MSIX config error\n");
1719 ixv_allocate_pci_resources(struct adapter *adapter)
1722 device_t dev = adapter->dev;
1725 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1728 if (!(adapter->pci_mem)) {
1729 device_printf(dev,"Unable to allocate bus resource: memory\n");
1733 adapter->osdep.mem_bus_space_tag =
1734 rman_get_bustag(adapter->pci_mem);
1735 adapter->osdep.mem_bus_space_handle =
1736 rman_get_bushandle(adapter->pci_mem);
1737 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1739 adapter->num_queues = 1;
1740 adapter->hw.back = &adapter->osdep;
1743 ** Now setup MSI/X, should
1744 ** return us the number of
1745 ** configured vectors.
1747 adapter->msix = ixv_setup_msix(adapter);
1748 if (adapter->msix == ENXIO)
1755 ixv_free_pci_resources(struct adapter * adapter)
1757 struct ix_queue *que = adapter->queues;
1758 device_t dev = adapter->dev;
1761 memrid = PCIR_BAR(MSIX_BAR);
1764 ** There is a slight possibility of a failure mode
1765 ** in attach that will result in entering this function
1766 ** before interrupt resources have been initialized, and
1767 ** in that case we do not want to execute the loops below
1768 ** We can detect this reliably by the state of the adapter
1771 if (adapter->res == NULL)
1775 ** Release all msix queue resources:
1777 for (int i = 0; i < adapter->num_queues; i++, que++) {
1778 rid = que->msix + 1;
1779 if (que->tag != NULL) {
1780 bus_teardown_intr(dev, que->res, que->tag);
1783 if (que->res != NULL)
1784 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1788 /* Clean the Legacy or Link interrupt last */
1789 if (adapter->mbxvec) /* we are doing MSIX */
1790 rid = adapter->mbxvec + 1;
1792 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1794 if (adapter->tag != NULL) {
1795 bus_teardown_intr(dev, adapter->res, adapter->tag);
1796 adapter->tag = NULL;
1798 if (adapter->res != NULL)
1799 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1803 pci_release_msi(dev);
1805 if (adapter->msix_mem != NULL)
1806 bus_release_resource(dev, SYS_RES_MEMORY,
1807 memrid, adapter->msix_mem);
1809 if (adapter->pci_mem != NULL)
1810 bus_release_resource(dev, SYS_RES_MEMORY,
1811 PCIR_BAR(0), adapter->pci_mem);
1816 /*********************************************************************
1818 * Setup networking device structure and register an interface.
1820 **********************************************************************/
1822 ixv_setup_interface(device_t dev, struct adapter *adapter)
1826 INIT_DEBUGOUT("ixv_setup_interface: begin");
1828 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1830 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1831 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1832 ifp->if_mtu = ETHERMTU;
1833 ifp->if_baudrate = 1000000000;
1834 ifp->if_init = ixv_init;
1835 ifp->if_softc = adapter;
1836 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1837 ifp->if_ioctl = ixv_ioctl;
1838 #if __FreeBSD_version >= 800000
1839 ifp->if_transmit = ixv_mq_start;
1840 ifp->if_qflush = ixv_qflush;
1842 ifp->if_start = ixv_start;
1844 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1846 ether_ifattach(ifp, adapter->hw.mac.addr);
1848 adapter->max_frame_size =
1849 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1852 * Tell the upper layer(s) we support long frames.
1854 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1856 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1857 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1858 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1861 ifp->if_capenable = ifp->if_capabilities;
1863 /* Don't enable LRO by default */
1864 ifp->if_capabilities |= IFCAP_LRO;
1867 * Specify the media types supported by this adapter and register
1868 * callbacks to update media and link information
1870 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1872 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1873 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1874 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1880 ixv_config_link(struct adapter *adapter)
1882 struct ixgbe_hw *hw = &adapter->hw;
1883 u32 autoneg, err = 0;
1884 bool negotiate = TRUE;
1886 if (hw->mac.ops.check_link)
1887 err = hw->mac.ops.check_link(hw, &autoneg,
1888 &adapter->link_up, FALSE);
1892 if (hw->mac.ops.setup_link)
1893 err = hw->mac.ops.setup_link(hw, autoneg,
1894 negotiate, adapter->link_up);
1899 /********************************************************************
1900 * Manage DMA'able memory.
1901 *******************************************************************/
1903 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1907 *(bus_addr_t *) arg = segs->ds_addr;
1912 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1913 struct ixv_dma_alloc *dma, int mapflags)
1915 device_t dev = adapter->dev;
1918 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1919 DBA_ALIGN, 0, /* alignment, bounds */
1920 BUS_SPACE_MAXADDR, /* lowaddr */
1921 BUS_SPACE_MAXADDR, /* highaddr */
1922 NULL, NULL, /* filter, filterarg */
1925 size, /* maxsegsize */
1926 BUS_DMA_ALLOCNOW, /* flags */
1927 NULL, /* lockfunc */
1928 NULL, /* lockfuncarg */
1931 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1935 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1936 BUS_DMA_NOWAIT, &dma->dma_map);
1938 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1942 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1946 mapflags | BUS_DMA_NOWAIT);
1948 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1952 dma->dma_size = size;
1955 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1957 bus_dma_tag_destroy(dma->dma_tag);
1959 dma->dma_map = NULL;
1960 dma->dma_tag = NULL;
1965 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1967 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1968 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1969 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1970 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1971 bus_dma_tag_destroy(dma->dma_tag);
1975 /*********************************************************************
1977 * Allocate memory for the transmit and receive rings, and then
1978 * the descriptors associated with each, called only once at attach.
1980 **********************************************************************/
1982 ixv_allocate_queues(struct adapter *adapter)
1984 device_t dev = adapter->dev;
1985 struct ix_queue *que;
1986 struct tx_ring *txr;
1987 struct rx_ring *rxr;
1988 int rsize, tsize, error = 0;
1989 int txconf = 0, rxconf = 0;
1991 /* First allocate the top level queue structs */
1992 if (!(adapter->queues =
1993 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1994 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1995 device_printf(dev, "Unable to allocate queue memory\n");
2000 /* First allocate the TX ring struct memory */
2001 if (!(adapter->tx_rings =
2002 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2003 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2004 device_printf(dev, "Unable to allocate TX ring memory\n");
2009 /* Next allocate the RX */
2010 if (!(adapter->rx_rings =
2011 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2012 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2013 device_printf(dev, "Unable to allocate RX ring memory\n");
2018 /* For the ring itself */
2019 tsize = roundup2(adapter->num_tx_desc *
2020 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2023 * Now set up the TX queues, txconf is needed to handle the
2024 * possibility that things fail midcourse and we need to
2025 * undo memory gracefully
2027 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2028 /* Set up some basics */
2029 txr = &adapter->tx_rings[i];
2030 txr->adapter = adapter;
2033 /* Initialize the TX side lock */
2034 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2035 device_get_nameunit(dev), txr->me);
2036 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2038 if (ixv_dma_malloc(adapter, tsize,
2039 &txr->txdma, BUS_DMA_NOWAIT)) {
2041 "Unable to allocate TX Descriptor memory\n");
2045 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2046 bzero((void *)txr->tx_base, tsize);
2048 /* Now allocate transmit buffers for the ring */
2049 if (ixv_allocate_transmit_buffers(txr)) {
2051 "Critical Failure setting up transmit buffers\n");
2055 #if __FreeBSD_version >= 800000
2056 /* Allocate a buf ring */
2057 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2058 M_WAITOK, &txr->tx_mtx);
2059 if (txr->br == NULL) {
2061 "Critical Failure setting up buf ring\n");
2069 * Next the RX queues...
2071 rsize = roundup2(adapter->num_rx_desc *
2072 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2073 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2074 rxr = &adapter->rx_rings[i];
2075 /* Set up some basics */
2076 rxr->adapter = adapter;
2079 /* Initialize the RX side lock */
2080 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2081 device_get_nameunit(dev), rxr->me);
2082 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2084 if (ixv_dma_malloc(adapter, rsize,
2085 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2087 "Unable to allocate RxDescriptor memory\n");
2091 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2092 bzero((void *)rxr->rx_base, rsize);
2094 /* Allocate receive buffers for the ring*/
2095 if (ixv_allocate_receive_buffers(rxr)) {
2097 "Critical Failure setting up receive buffers\n");
2104 ** Finally set up the queue holding structs
2106 for (int i = 0; i < adapter->num_queues; i++) {
2107 que = &adapter->queues[i];
2108 que->adapter = adapter;
2109 que->txr = &adapter->tx_rings[i];
2110 que->rxr = &adapter->rx_rings[i];
2116 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2117 ixv_dma_free(adapter, &rxr->rxdma);
2119 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2120 ixv_dma_free(adapter, &txr->txdma);
2121 free(adapter->rx_rings, M_DEVBUF);
2123 free(adapter->tx_rings, M_DEVBUF);
2125 free(adapter->queues, M_DEVBUF);
2131 /*********************************************************************
2133 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2134 * the information needed to transmit a packet on the wire. This is
2135 * called only once at attach, setup is done every reset.
2137 **********************************************************************/
2139 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2141 struct adapter *adapter = txr->adapter;
2142 device_t dev = adapter->dev;
2143 struct ixv_tx_buf *txbuf;
2147 * Setup DMA descriptor areas.
2149 if ((error = bus_dma_tag_create(NULL, /* parent */
2150 1, 0, /* alignment, bounds */
2151 BUS_SPACE_MAXADDR, /* lowaddr */
2152 BUS_SPACE_MAXADDR, /* highaddr */
2153 NULL, NULL, /* filter, filterarg */
2154 IXV_TSO_SIZE, /* maxsize */
2156 PAGE_SIZE, /* maxsegsize */
2158 NULL, /* lockfunc */
2159 NULL, /* lockfuncarg */
2161 device_printf(dev,"Unable to allocate TX DMA tag\n");
2165 if (!(txr->tx_buffers =
2166 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2167 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2168 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2173 /* Create the descriptor buffer dma maps */
2174 txbuf = txr->tx_buffers;
2175 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2176 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2178 device_printf(dev, "Unable to create TX DMA map\n");
2185 /* We free all, it handles case where we are in the middle */
2186 ixv_free_transmit_structures(adapter);
2190 /*********************************************************************
2192 * Initialize a transmit ring.
2194 **********************************************************************/
2196 ixv_setup_transmit_ring(struct tx_ring *txr)
2198 struct adapter *adapter = txr->adapter;
2199 struct ixv_tx_buf *txbuf;
2202 /* Clear the old ring contents */
2204 bzero((void *)txr->tx_base,
2205 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2207 txr->next_avail_desc = 0;
2208 txr->next_to_clean = 0;
2210 /* Free any existing tx buffers. */
2211 txbuf = txr->tx_buffers;
2212 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2213 if (txbuf->m_head != NULL) {
2214 bus_dmamap_sync(txr->txtag, txbuf->map,
2215 BUS_DMASYNC_POSTWRITE);
2216 bus_dmamap_unload(txr->txtag, txbuf->map);
2217 m_freem(txbuf->m_head);
2218 txbuf->m_head = NULL;
2220 /* Clear the EOP index */
2221 txbuf->eop_index = -1;
2224 /* Set number of descriptors available */
2225 txr->tx_avail = adapter->num_tx_desc;
2227 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2228 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2232 /*********************************************************************
2234 * Initialize all transmit rings.
2236 **********************************************************************/
2238 ixv_setup_transmit_structures(struct adapter *adapter)
2240 struct tx_ring *txr = adapter->tx_rings;
2242 for (int i = 0; i < adapter->num_queues; i++, txr++)
2243 ixv_setup_transmit_ring(txr);
2248 /*********************************************************************
2250 * Enable transmit unit.
2252 **********************************************************************/
2254 ixv_initialize_transmit_units(struct adapter *adapter)
2256 struct tx_ring *txr = adapter->tx_rings;
2257 struct ixgbe_hw *hw = &adapter->hw;
2260 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2261 u64 tdba = txr->txdma.dma_paddr;
2264 /* Set WTHRESH to 8, burst writeback */
2265 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2266 txdctl |= (8 << 16);
2267 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2269 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2270 txdctl |= IXGBE_TXDCTL_ENABLE;
2271 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2273 /* Set the HW Tx Head and Tail indices */
2274 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2277 /* Setup Transmit Descriptor Cmd Settings */
2278 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2279 txr->watchdog_check = FALSE;
2281 /* Set Ring parameters */
2282 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2283 (tdba & 0x00000000ffffffffULL));
2284 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2285 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2286 adapter->num_tx_desc *
2287 sizeof(struct ixgbe_legacy_tx_desc));
2288 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2289 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2290 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2297 /*********************************************************************
2299 * Free all transmit rings.
2301 **********************************************************************/
2303 ixv_free_transmit_structures(struct adapter *adapter)
2305 struct tx_ring *txr = adapter->tx_rings;
2307 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2309 ixv_free_transmit_buffers(txr);
2310 ixv_dma_free(adapter, &txr->txdma);
2312 IXV_TX_LOCK_DESTROY(txr);
2314 free(adapter->tx_rings, M_DEVBUF);
2317 /*********************************************************************
2319 * Free transmit ring related data structures.
2321 **********************************************************************/
2323 ixv_free_transmit_buffers(struct tx_ring *txr)
2325 struct adapter *adapter = txr->adapter;
2326 struct ixv_tx_buf *tx_buffer;
2329 INIT_DEBUGOUT("free_transmit_ring: begin");
2331 if (txr->tx_buffers == NULL)
2334 tx_buffer = txr->tx_buffers;
2335 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2336 if (tx_buffer->m_head != NULL) {
2337 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2338 BUS_DMASYNC_POSTWRITE);
2339 bus_dmamap_unload(txr->txtag,
2341 m_freem(tx_buffer->m_head);
2342 tx_buffer->m_head = NULL;
2343 if (tx_buffer->map != NULL) {
2344 bus_dmamap_destroy(txr->txtag,
2346 tx_buffer->map = NULL;
2348 } else if (tx_buffer->map != NULL) {
2349 bus_dmamap_unload(txr->txtag,
2351 bus_dmamap_destroy(txr->txtag,
2353 tx_buffer->map = NULL;
2356 #if __FreeBSD_version >= 800000
2357 if (txr->br != NULL)
2358 buf_ring_free(txr->br, M_DEVBUF);
2360 if (txr->tx_buffers != NULL) {
2361 free(txr->tx_buffers, M_DEVBUF);
2362 txr->tx_buffers = NULL;
2364 if (txr->txtag != NULL) {
2365 bus_dma_tag_destroy(txr->txtag);
2371 /*********************************************************************
2373 * Advanced Context Descriptor setup for VLAN or CSUM
2375 **********************************************************************/
2378 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2380 struct adapter *adapter = txr->adapter;
2381 struct ixgbe_adv_tx_context_desc *TXD;
2382 struct ixv_tx_buf *tx_buffer;
2383 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2384 struct ether_vlan_header *eh;
2386 struct ip6_hdr *ip6;
2387 int ehdrlen, ip_hlen = 0;
2390 bool offload = TRUE;
2391 int ctxd = txr->next_avail_desc;
2395 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2399 tx_buffer = &txr->tx_buffers[ctxd];
2400 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2403 ** In advanced descriptors the vlan tag must
2404 ** be placed into the descriptor itself.
2406 if (mp->m_flags & M_VLANTAG) {
2407 vtag = htole16(mp->m_pkthdr.ether_vtag);
2408 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2409 } else if (offload == FALSE)
2413 * Determine where frame payload starts.
2414 * Jump over vlan headers if already present,
2415 * helpful for QinQ too.
2417 eh = mtod(mp, struct ether_vlan_header *);
2418 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2419 etype = ntohs(eh->evl_proto);
2420 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2422 etype = ntohs(eh->evl_encap_proto);
2423 ehdrlen = ETHER_HDR_LEN;
2426 /* Set the ether header length */
2427 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2431 ip = (struct ip *)(mp->m_data + ehdrlen);
2432 ip_hlen = ip->ip_hl << 2;
2433 if (mp->m_len < ehdrlen + ip_hlen)
2436 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2438 case ETHERTYPE_IPV6:
2439 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2440 ip_hlen = sizeof(struct ip6_hdr);
2441 if (mp->m_len < ehdrlen + ip_hlen)
2443 ipproto = ip6->ip6_nxt;
2444 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2451 vlan_macip_lens |= ip_hlen;
2452 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2456 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2457 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2461 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2462 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2465 #if __FreeBSD_version >= 800000
2467 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2468 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2476 /* Now copy bits into descriptor */
2477 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2478 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2479 TXD->seqnum_seed = htole32(0);
2480 TXD->mss_l4len_idx = htole32(0);
2482 tx_buffer->m_head = NULL;
2483 tx_buffer->eop_index = -1;
2485 /* We've consumed the first desc, adjust counters */
2486 if (++ctxd == adapter->num_tx_desc)
2488 txr->next_avail_desc = ctxd;
2494 /**********************************************************************
2496 * Setup work for hardware segmentation offload (TSO) on
2497 * adapters using advanced tx descriptors
2499 **********************************************************************/
2501 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2503 struct adapter *adapter = txr->adapter;
2504 struct ixgbe_adv_tx_context_desc *TXD;
2505 struct ixv_tx_buf *tx_buffer;
2506 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2507 u32 mss_l4len_idx = 0;
2509 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2510 struct ether_vlan_header *eh;
2516 * Determine where frame payload starts.
2517 * Jump over vlan headers if already present
2519 eh = mtod(mp, struct ether_vlan_header *);
2520 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2521 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2523 ehdrlen = ETHER_HDR_LEN;
2525 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2526 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2529 ctxd = txr->next_avail_desc;
2530 tx_buffer = &txr->tx_buffers[ctxd];
2531 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2533 ip = (struct ip *)(mp->m_data + ehdrlen);
2534 if (ip->ip_p != IPPROTO_TCP)
2535 return FALSE; /* 0 */
2537 ip_hlen = ip->ip_hl << 2;
2538 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2539 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2540 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2541 tcp_hlen = th->th_off << 2;
2542 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2544 /* This is used in the transmit desc in encap */
2545 *paylen = mp->m_pkthdr.len - hdrlen;
2547 /* VLAN MACLEN IPLEN */
2548 if (mp->m_flags & M_VLANTAG) {
2549 vtag = htole16(mp->m_pkthdr.ether_vtag);
2550 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2553 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2554 vlan_macip_lens |= ip_hlen;
2555 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2557 /* ADV DTYPE TUCMD */
2558 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2559 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2560 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2561 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2565 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2566 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2567 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2569 TXD->seqnum_seed = htole32(0);
2570 tx_buffer->m_head = NULL;
2571 tx_buffer->eop_index = -1;
2573 if (++ctxd == adapter->num_tx_desc)
2577 txr->next_avail_desc = ctxd;
2582 /**********************************************************************
2584 * Examine each tx_buffer in the used queue. If the hardware is done
2585 * processing the packet then free associated resources. The
2586 * tx_buffer is put back on the free queue.
2588 **********************************************************************/
2590 ixv_txeof(struct tx_ring *txr)
2592 struct adapter *adapter = txr->adapter;
2593 struct ifnet *ifp = adapter->ifp;
2594 u32 first, last, done;
2595 struct ixv_tx_buf *tx_buffer;
2596 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2598 mtx_assert(&txr->tx_mtx, MA_OWNED);
2600 if (txr->tx_avail == adapter->num_tx_desc)
2603 first = txr->next_to_clean;
2604 tx_buffer = &txr->tx_buffers[first];
2605 /* For cleanup we just use legacy struct */
2606 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2607 last = tx_buffer->eop_index;
2610 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2613 ** Get the index of the first descriptor
2614 ** BEYOND the EOP and call that 'done'.
2615 ** I do this so the comparison in the
2616 ** inner while loop below can be simple
2618 if (++last == adapter->num_tx_desc) last = 0;
2621 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2622 BUS_DMASYNC_POSTREAD);
2624 ** Only the EOP descriptor of a packet now has the DD
2625 ** bit set, this is what we look for...
2627 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2628 /* We clean the range of the packet */
2629 while (first != done) {
2630 tx_desc->upper.data = 0;
2631 tx_desc->lower.data = 0;
2632 tx_desc->buffer_addr = 0;
2635 if (tx_buffer->m_head) {
2636 bus_dmamap_sync(txr->txtag,
2638 BUS_DMASYNC_POSTWRITE);
2639 bus_dmamap_unload(txr->txtag,
2641 m_freem(tx_buffer->m_head);
2642 tx_buffer->m_head = NULL;
2643 tx_buffer->map = NULL;
2645 tx_buffer->eop_index = -1;
2646 txr->watchdog_time = ticks;
2648 if (++first == adapter->num_tx_desc)
2651 tx_buffer = &txr->tx_buffers[first];
2653 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2656 /* See if there is more work now */
2657 last = tx_buffer->eop_index;
2660 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2661 /* Get next done point */
2662 if (++last == adapter->num_tx_desc) last = 0;
2667 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2668 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2670 txr->next_to_clean = first;
2673 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2674 * it is OK to send packets. If there are no pending descriptors,
2675 * clear the timeout. Otherwise, if some descriptors have been freed,
2676 * restart the timeout.
2678 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2679 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2680 if (txr->tx_avail == adapter->num_tx_desc) {
2681 txr->watchdog_check = FALSE;
2689 /*********************************************************************
2691 * Refresh mbuf buffers for RX descriptor rings
2692 * - now keeps its own state so discards due to resource
2693 * exhaustion are unnecessary, if an mbuf cannot be obtained
2694 * it just returns, keeping its placeholder, thus it can simply
2695 * be recalled to try again.
2697 **********************************************************************/
2699 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2701 struct adapter *adapter = rxr->adapter;
2702 bus_dma_segment_t hseg[1];
2703 bus_dma_segment_t pseg[1];
2704 struct ixv_rx_buf *rxbuf;
2705 struct mbuf *mh, *mp;
2706 int i, nsegs, error, cleaned;
2708 i = rxr->next_to_refresh;
2709 cleaned = -1; /* Signify no completions */
2710 while (i != limit) {
2711 rxbuf = &rxr->rx_buffers[i];
2712 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2713 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2716 mh->m_pkthdr.len = mh->m_len = MHLEN;
2718 mh->m_flags |= M_PKTHDR;
2719 m_adj(mh, ETHER_ALIGN);
2720 /* Get the memory mapping */
2721 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2722 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2724 printf("GET BUF: dmamap load"
2725 " failure - %d\n", error);
2730 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2731 BUS_DMASYNC_PREREAD);
2732 rxr->rx_base[i].read.hdr_addr =
2733 htole64(hseg[0].ds_addr);
2736 if (rxbuf->m_pack == NULL) {
2737 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2738 M_PKTHDR, adapter->rx_mbuf_sz);
2741 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2742 /* Get the memory mapping */
2743 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2744 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2746 printf("GET BUF: dmamap load"
2747 " failure - %d\n", error);
2752 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2753 BUS_DMASYNC_PREREAD);
2754 rxr->rx_base[i].read.pkt_addr =
2755 htole64(pseg[0].ds_addr);
2759 /* Calculate next index */
2760 if (++i == adapter->num_rx_desc)
2762 /* This is the work marker for refresh */
2763 rxr->next_to_refresh = i;
2766 if (cleaned != -1) /* If we refreshed some, bump tail */
2767 IXGBE_WRITE_REG(&adapter->hw,
2768 IXGBE_VFRDT(rxr->me), cleaned);
2772 /*********************************************************************
2774 * Allocate memory for rx_buffer structures. Since we use one
2775 * rx_buffer per received packet, the maximum number of rx_buffer's
2776 * that we'll need is equal to the number of receive descriptors
2777 * that we've allocated.
2779 **********************************************************************/
2781 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2783 struct adapter *adapter = rxr->adapter;
2784 device_t dev = adapter->dev;
2785 struct ixv_rx_buf *rxbuf;
2786 int i, bsize, error;
2788 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2789 if (!(rxr->rx_buffers =
2790 (struct ixv_rx_buf *) malloc(bsize,
2791 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2792 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2797 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2798 1, 0, /* alignment, bounds */
2799 BUS_SPACE_MAXADDR, /* lowaddr */
2800 BUS_SPACE_MAXADDR, /* highaddr */
2801 NULL, NULL, /* filter, filterarg */
2802 MSIZE, /* maxsize */
2804 MSIZE, /* maxsegsize */
2806 NULL, /* lockfunc */
2807 NULL, /* lockfuncarg */
2809 device_printf(dev, "Unable to create RX DMA tag\n");
2813 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2814 1, 0, /* alignment, bounds */
2815 BUS_SPACE_MAXADDR, /* lowaddr */
2816 BUS_SPACE_MAXADDR, /* highaddr */
2817 NULL, NULL, /* filter, filterarg */
2818 MJUMPAGESIZE, /* maxsize */
2820 MJUMPAGESIZE, /* maxsegsize */
2822 NULL, /* lockfunc */
2823 NULL, /* lockfuncarg */
2825 device_printf(dev, "Unable to create RX DMA tag\n");
2829 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2830 rxbuf = &rxr->rx_buffers[i];
2831 error = bus_dmamap_create(rxr->htag,
2832 BUS_DMA_NOWAIT, &rxbuf->hmap);
2834 device_printf(dev, "Unable to create RX head map\n");
2837 error = bus_dmamap_create(rxr->ptag,
2838 BUS_DMA_NOWAIT, &rxbuf->pmap);
2840 device_printf(dev, "Unable to create RX pkt map\n");
2848 /* Frees all, but can handle partial completion */
2849 ixv_free_receive_structures(adapter);
2854 ixv_free_receive_ring(struct rx_ring *rxr)
2856 struct adapter *adapter;
2857 struct ixv_rx_buf *rxbuf;
2860 adapter = rxr->adapter;
2861 for (i = 0; i < adapter->num_rx_desc; i++) {
2862 rxbuf = &rxr->rx_buffers[i];
2863 if (rxbuf->m_head != NULL) {
2864 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2865 BUS_DMASYNC_POSTREAD);
2866 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2867 rxbuf->m_head->m_flags |= M_PKTHDR;
2868 m_freem(rxbuf->m_head);
2870 if (rxbuf->m_pack != NULL) {
2871 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2872 BUS_DMASYNC_POSTREAD);
2873 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2874 rxbuf->m_pack->m_flags |= M_PKTHDR;
2875 m_freem(rxbuf->m_pack);
2877 rxbuf->m_head = NULL;
2878 rxbuf->m_pack = NULL;
2883 /*********************************************************************
2885 * Initialize a receive ring and its buffers.
2887 **********************************************************************/
2889 ixv_setup_receive_ring(struct rx_ring *rxr)
2891 struct adapter *adapter;
2894 struct ixv_rx_buf *rxbuf;
2895 bus_dma_segment_t pseg[1], hseg[1];
2896 struct lro_ctrl *lro = &rxr->lro;
2897 int rsize, nsegs, error = 0;
2899 adapter = rxr->adapter;
2903 /* Clear the ring contents */
2905 rsize = roundup2(adapter->num_rx_desc *
2906 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2907 bzero((void *)rxr->rx_base, rsize);
2909 /* Free current RX buffer structs and their mbufs */
2910 ixv_free_receive_ring(rxr);
2912 /* Configure header split? */
2913 if (ixv_header_split)
2914 rxr->hdr_split = TRUE;
2916 /* Now replenish the mbufs */
2917 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2918 struct mbuf *mh, *mp;
2920 rxbuf = &rxr->rx_buffers[j];
2922 ** Dont allocate mbufs if not
2923 ** doing header split, its wasteful
2925 if (rxr->hdr_split == FALSE)
2928 /* First the header */
2929 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2930 if (rxbuf->m_head == NULL) {
2934 m_adj(rxbuf->m_head, ETHER_ALIGN);
2936 mh->m_len = mh->m_pkthdr.len = MHLEN;
2937 mh->m_flags |= M_PKTHDR;
2938 /* Get the memory mapping */
2939 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2940 rxbuf->hmap, rxbuf->m_head, hseg,
2941 &nsegs, BUS_DMA_NOWAIT);
2942 if (error != 0) /* Nothing elegant to do here */
2944 bus_dmamap_sync(rxr->htag,
2945 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2946 /* Update descriptor */
2947 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2950 /* Now the payload cluster */
2951 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2952 M_PKTHDR, adapter->rx_mbuf_sz);
2953 if (rxbuf->m_pack == NULL) {
2958 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2959 /* Get the memory mapping */
2960 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2961 rxbuf->pmap, mp, pseg,
2962 &nsegs, BUS_DMA_NOWAIT);
2965 bus_dmamap_sync(rxr->ptag,
2966 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2967 /* Update descriptor */
2968 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2972 /* Setup our descriptor indices */
2973 rxr->next_to_check = 0;
2974 rxr->next_to_refresh = 0;
2975 rxr->lro_enabled = FALSE;
2976 rxr->rx_split_packets = 0;
2979 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2980 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2983 ** Now set up the LRO interface:
2985 if (ifp->if_capenable & IFCAP_LRO) {
2986 int err = tcp_lro_init(lro);
2988 device_printf(dev, "LRO Initialization failed!\n");
2991 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
2992 rxr->lro_enabled = TRUE;
2993 lro->ifp = adapter->ifp;
3000 ixv_free_receive_ring(rxr);
3005 /*********************************************************************
3007 * Initialize all receive rings.
3009 **********************************************************************/
3011 ixv_setup_receive_structures(struct adapter *adapter)
3013 struct rx_ring *rxr = adapter->rx_rings;
3016 for (j = 0; j < adapter->num_queues; j++, rxr++)
3017 if (ixv_setup_receive_ring(rxr))
3023 * Free RX buffers allocated so far, we will only handle
3024 * the rings that completed, the failing case will have
3025 * cleaned up for itself. 'j' failed, so its the terminus.
3027 for (int i = 0; i < j; ++i) {
3028 rxr = &adapter->rx_rings[i];
3029 ixv_free_receive_ring(rxr);
3035 /*********************************************************************
3037 * Setup receive registers and features.
3039 **********************************************************************/
3040 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3043 ixv_initialize_receive_units(struct adapter *adapter)
3045 struct rx_ring *rxr = adapter->rx_rings;
3046 struct ixgbe_hw *hw = &adapter->hw;
3047 struct ifnet *ifp = adapter->ifp;
3048 u32 bufsz, fctrl, rxcsum, hlreg;
3051 /* Enable broadcasts */
3052 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3053 fctrl |= IXGBE_FCTRL_BAM;
3054 fctrl |= IXGBE_FCTRL_DPF;
3055 fctrl |= IXGBE_FCTRL_PMCF;
3056 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3058 /* Set for Jumbo Frames? */
3059 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3060 if (ifp->if_mtu > ETHERMTU) {
3061 hlreg |= IXGBE_HLREG0_JUMBOEN;
3062 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3064 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3065 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3067 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3069 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3070 u64 rdba = rxr->rxdma.dma_paddr;
3073 /* Do the queue enabling first */
3074 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3075 rxdctl |= IXGBE_RXDCTL_ENABLE;
3076 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3077 for (int k = 0; k < 10; k++) {
3078 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3079 IXGBE_RXDCTL_ENABLE)
3086 /* Setup the Base and Length of the Rx Descriptor Ring */
3087 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3088 (rdba & 0x00000000ffffffffULL));
3089 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3091 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3092 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3094 /* Set up the SRRCTL register */
3095 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3096 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3097 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3099 if (rxr->hdr_split) {
3100 /* Use a standard mbuf for the header */
3101 reg |= ((IXV_RX_HDR <<
3102 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3103 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3104 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3106 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3107 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3109 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3110 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3111 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3112 adapter->num_rx_desc - 1);
3115 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3117 if (ifp->if_capenable & IFCAP_RXCSUM)
3118 rxcsum |= IXGBE_RXCSUM_PCSD;
3120 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3121 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3123 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3128 /*********************************************************************
3130 * Free all receive rings.
3132 **********************************************************************/
3134 ixv_free_receive_structures(struct adapter *adapter)
3136 struct rx_ring *rxr = adapter->rx_rings;
3138 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3139 struct lro_ctrl *lro = &rxr->lro;
3140 ixv_free_receive_buffers(rxr);
3141 /* Free LRO memory */
3143 /* Free the ring memory as well */
3144 ixv_dma_free(adapter, &rxr->rxdma);
3147 free(adapter->rx_rings, M_DEVBUF);
3151 /*********************************************************************
3153 * Free receive ring data structures
3155 **********************************************************************/
3157 ixv_free_receive_buffers(struct rx_ring *rxr)
3159 struct adapter *adapter = rxr->adapter;
3160 struct ixv_rx_buf *rxbuf;
3162 INIT_DEBUGOUT("free_receive_structures: begin");
3164 /* Cleanup any existing buffers */
3165 if (rxr->rx_buffers != NULL) {
3166 for (int i = 0; i < adapter->num_rx_desc; i++) {
3167 rxbuf = &rxr->rx_buffers[i];
3168 if (rxbuf->m_head != NULL) {
3169 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3170 BUS_DMASYNC_POSTREAD);
3171 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3172 rxbuf->m_head->m_flags |= M_PKTHDR;
3173 m_freem(rxbuf->m_head);
3175 if (rxbuf->m_pack != NULL) {
3176 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3177 BUS_DMASYNC_POSTREAD);
3178 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3179 rxbuf->m_pack->m_flags |= M_PKTHDR;
3180 m_freem(rxbuf->m_pack);
3182 rxbuf->m_head = NULL;
3183 rxbuf->m_pack = NULL;
3184 if (rxbuf->hmap != NULL) {
3185 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3188 if (rxbuf->pmap != NULL) {
3189 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3193 if (rxr->rx_buffers != NULL) {
3194 free(rxr->rx_buffers, M_DEVBUF);
3195 rxr->rx_buffers = NULL;
3199 if (rxr->htag != NULL) {
3200 bus_dma_tag_destroy(rxr->htag);
3203 if (rxr->ptag != NULL) {
3204 bus_dma_tag_destroy(rxr->ptag);
3211 static __inline void
3212 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3216 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3217 * should be computed by hardware. Also it should not have VLAN tag in
3220 if (rxr->lro_enabled &&
3221 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3222 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3223 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3224 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3225 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3226 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3228 * Send to the stack if:
3229 ** - LRO not enabled, or
3230 ** - no LRO resources, or
3231 ** - lro enqueue fails
3233 if (rxr->lro.lro_cnt != 0)
3234 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3237 (*ifp->if_input)(ifp, m);
3240 static __inline void
3241 ixv_rx_discard(struct rx_ring *rxr, int i)
3243 struct adapter *adapter = rxr->adapter;
3244 struct ixv_rx_buf *rbuf;
3245 struct mbuf *mh, *mp;
3247 rbuf = &rxr->rx_buffers[i];
3248 if (rbuf->fmp != NULL) /* Partial chain ? */
3254 /* Reuse loaded DMA map and just update mbuf chain */
3256 mh->m_flags |= M_PKTHDR;
3259 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
3260 mp->m_data = mp->m_ext.ext_buf;
3266 /*********************************************************************
3268 * This routine executes in interrupt context. It replenishes
3269 * the mbufs in the descriptor and sends data which has been
3270 * dma'ed into host memory to upper layer.
3272 * We loop at most count times if count is > 0, or until done if
3275 * Return TRUE for more work, FALSE for all clean.
3276 *********************************************************************/
3278 ixv_rxeof(struct ix_queue *que, int count)
3280 struct adapter *adapter = que->adapter;
3281 struct rx_ring *rxr = que->rxr;
3282 struct ifnet *ifp = adapter->ifp;
3283 struct lro_ctrl *lro = &rxr->lro;
3284 struct lro_entry *queued;
3285 int i, nextp, processed = 0;
3287 union ixgbe_adv_rx_desc *cur;
3288 struct ixv_rx_buf *rbuf, *nbuf;
3292 for (i = rxr->next_to_check; count != 0;) {
3293 struct mbuf *sendmp, *mh, *mp;
3295 u16 hlen, plen, hdr, vtag;
3298 /* Sync the ring. */
3299 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3300 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3302 cur = &rxr->rx_base[i];
3303 staterr = le32toh(cur->wb.upper.status_error);
3305 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3307 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3314 cur->wb.upper.status_error = 0;
3315 rbuf = &rxr->rx_buffers[i];
3319 plen = le16toh(cur->wb.upper.length);
3320 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3321 IXGBE_RXDADV_PKTTYPE_MASK;
3322 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3323 vtag = le16toh(cur->wb.upper.vlan);
3324 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3326 /* Make sure all parts of a bad packet are discarded */
3327 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3330 rxr->rx_discarded++;
3332 rxr->discard = TRUE;
3334 rxr->discard = FALSE;
3335 ixv_rx_discard(rxr, i);
3341 if (nextp == adapter->num_rx_desc)
3343 nbuf = &rxr->rx_buffers[nextp];
3347 ** The header mbuf is ONLY used when header
3348 ** split is enabled, otherwise we get normal
3349 ** behavior, ie, both header and payload
3350 ** are DMA'd into the payload buffer.
3352 ** Rather than using the fmp/lmp global pointers
3353 ** we now keep the head of a packet chain in the
3354 ** buffer struct and pass this along from one
3355 ** descriptor to the next, until we get EOP.
3357 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3358 /* This must be an initial descriptor */
3359 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3360 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3361 if (hlen > IXV_RX_HDR)
3364 mh->m_flags |= M_PKTHDR;
3366 mh->m_pkthdr.len = mh->m_len;
3367 /* Null buf pointer so it is refreshed */
3368 rbuf->m_head = NULL;
3370 ** Check the payload length, this
3371 ** could be zero if its a small
3377 mp->m_flags &= ~M_PKTHDR;
3379 mh->m_pkthdr.len += mp->m_len;
3380 /* Null buf pointer so it is refreshed */
3381 rbuf->m_pack = NULL;
3382 rxr->rx_split_packets++;
3385 ** Now create the forward
3386 ** chain so when complete
3390 /* stash the chain head */
3392 /* Make forward chain */
3394 mp->m_next = nbuf->m_pack;
3396 mh->m_next = nbuf->m_pack;
3398 /* Singlet, prepare to send */
3400 if (staterr & IXGBE_RXD_STAT_VP) {
3401 sendmp->m_pkthdr.ether_vtag = vtag;
3402 sendmp->m_flags |= M_VLANTAG;
3407 ** Either no header split, or a
3408 ** secondary piece of a fragmented
3413 ** See if there is a stored head
3414 ** that determines what we are
3417 rbuf->m_pack = rbuf->fmp = NULL;
3419 if (sendmp != NULL) /* secondary frag */
3420 sendmp->m_pkthdr.len += mp->m_len;
3422 /* first desc of a non-ps chain */
3424 sendmp->m_flags |= M_PKTHDR;
3425 sendmp->m_pkthdr.len = mp->m_len;
3426 if (staterr & IXGBE_RXD_STAT_VP) {
3427 sendmp->m_pkthdr.ether_vtag = vtag;
3428 sendmp->m_flags |= M_VLANTAG;
3431 /* Pass the head pointer on */
3435 mp->m_next = nbuf->m_pack;
3439 /* Sending this frame? */
3441 sendmp->m_pkthdr.rcvif = ifp;
3444 /* capture data for AIM */
3445 rxr->bytes += sendmp->m_pkthdr.len;
3446 rxr->rx_bytes += sendmp->m_pkthdr.len;
3447 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3448 ixv_rx_checksum(staterr, sendmp, ptype);
3449 #if __FreeBSD_version >= 800000
3450 sendmp->m_pkthdr.flowid = que->msix;
3451 sendmp->m_flags |= M_FLOWID;
3455 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3456 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3458 /* Advance our pointers to the next descriptor. */
3459 if (++i == adapter->num_rx_desc)
3462 /* Now send to the stack or do LRO */
3464 ixv_rx_input(rxr, ifp, sendmp, ptype);
3466 /* Every 8 descriptors we go to refresh mbufs */
3467 if (processed == 8) {
3468 ixv_refresh_mbufs(rxr, i);
3473 /* Refresh any remaining buf structs */
3474 if (processed != 0) {
3475 ixv_refresh_mbufs(rxr, i);
3479 rxr->next_to_check = i;
3482 * Flush any outstanding LRO work
3484 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3485 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3486 tcp_lro_flush(lro, queued);
3492 ** We still have cleaning to do?
3493 ** Schedule another interrupt if so.
3495 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3496 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3504 /*********************************************************************
3506 * Verify that the hardware indicated that the checksum is valid.
3507 * Inform the stack about the status of checksum so that stack
3508 * doesn't spend time verifying the checksum.
3510 *********************************************************************/
3512 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3514 u16 status = (u16) staterr;
3515 u8 errors = (u8) (staterr >> 24);
3518 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3519 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3522 if (status & IXGBE_RXD_STAT_IPCS) {
3523 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3524 /* IP Checksum Good */
3525 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3526 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3529 mp->m_pkthdr.csum_flags = 0;
3531 if (status & IXGBE_RXD_STAT_L4CS) {
3532 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3533 #if __FreeBSD_version >= 800000
3535 type = CSUM_SCTP_VALID;
3537 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3538 mp->m_pkthdr.csum_flags |= type;
3540 mp->m_pkthdr.csum_data = htons(0xffff);
3547 ixv_setup_vlan_support(struct adapter *adapter)
3549 struct ixgbe_hw *hw = &adapter->hw;
3550 u32 ctrl, vid, vfta, retry;
3554 ** We get here thru init_locked, meaning
3555 ** a soft reset, this has already cleared
3556 ** the VFTA and other state, so if there
3557 ** have been no vlan's registered do nothing.
3559 if (adapter->num_vlans == 0)
3562 /* Enable the queues */
3563 for (int i = 0; i < adapter->num_queues; i++) {
3564 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3565 ctrl |= IXGBE_RXDCTL_VME;
3566 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3570 ** A soft reset zero's out the VFTA, so
3571 ** we need to repopulate it now.
3573 for (int i = 0; i < VFTA_SIZE; i++) {
3574 if (ixv_shadow_vfta[i] == 0)
3576 vfta = ixv_shadow_vfta[i];
3578 ** Reconstruct the vlan id's
3579 ** based on the bits set in each
3580 ** of the array ints.
3582 for ( int j = 0; j < 32; j++) {
3584 if ((vfta & (1 << j)) == 0)
3587 /* Call the shared code mailbox routine */
3588 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3597 ** This routine is run via an vlan config EVENT,
3598 ** it enables us to use the HW Filter table since
3599 ** we can get the vlan id. This just creates the
3600 ** entry in the soft version of the VFTA, init will
3601 ** repopulate the real table.
3604 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3606 struct adapter *adapter = ifp->if_softc;
3609 if (ifp->if_softc != arg) /* Not our event */
3612 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3615 index = (vtag >> 5) & 0x7F;
3617 ixv_shadow_vfta[index] |= (1 << bit);
3618 ++adapter->num_vlans;
3619 /* Re-init to load the changes */
3624 ** This routine is run via an vlan
3625 ** unconfig EVENT, remove our entry
3626 ** in the soft vfta.
3629 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3631 struct adapter *adapter = ifp->if_softc;
3634 if (ifp->if_softc != arg)
3637 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3640 index = (vtag >> 5) & 0x7F;
3642 ixv_shadow_vfta[index] &= ~(1 << bit);
3643 --adapter->num_vlans;
3644 /* Re-init to load the changes */
3649 ixv_enable_intr(struct adapter *adapter)
3651 struct ixgbe_hw *hw = &adapter->hw;
3652 struct ix_queue *que = adapter->queues;
3653 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3656 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3658 mask = IXGBE_EIMS_ENABLE_MASK;
3659 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3660 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3662 for (int i = 0; i < adapter->num_queues; i++, que++)
3663 ixv_enable_queue(adapter, que->msix);
3665 IXGBE_WRITE_FLUSH(hw);
3671 ixv_disable_intr(struct adapter *adapter)
3673 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3674 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3675 IXGBE_WRITE_FLUSH(&adapter->hw);
3680 ** Setup the correct IVAR register for a particular MSIX interrupt
3681 ** - entry is the register array entry
3682 ** - vector is the MSIX vector for this queue
3683 ** - type is RX/TX/MISC
3686 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3688 struct ixgbe_hw *hw = &adapter->hw;
3691 vector |= IXGBE_IVAR_ALLOC_VAL;
3693 if (type == -1) { /* MISC IVAR */
3694 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3697 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3698 } else { /* RX/TX IVARS */
3699 index = (16 * (entry & 1)) + (8 * type);
3700 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3701 ivar &= ~(0xFF << index);
3702 ivar |= (vector << index);
3703 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3708 ixv_configure_ivars(struct adapter *adapter)
3710 struct ix_queue *que = adapter->queues;
3712 for (int i = 0; i < adapter->num_queues; i++, que++) {
3713 /* First the RX queue entry */
3714 ixv_set_ivar(adapter, i, que->msix, 0);
3715 /* ... and the TX */
3716 ixv_set_ivar(adapter, i, que->msix, 1);
3717 /* Set an initial value in EITR */
3718 IXGBE_WRITE_REG(&adapter->hw,
3719 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3722 /* For the Link interrupt */
3723 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3728 ** Tasklet handler for MSIX MBX interrupts
3729 ** - do outside interrupt since it might sleep
3732 ixv_handle_mbx(void *context, int pending)
3734 struct adapter *adapter = context;
3736 ixgbe_check_link(&adapter->hw,
3737 &adapter->link_speed, &adapter->link_up, 0);
3738 ixv_update_link_status(adapter);
3742 ** The VF stats registers never have a truely virgin
3743 ** starting point, so this routine tries to make an
3744 ** artificial one, marking ground zero on attach as
3748 ixv_save_stats(struct adapter *adapter)
3750 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3751 adapter->stats.saved_reset_vfgprc +=
3752 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3753 adapter->stats.saved_reset_vfgptc +=
3754 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3755 adapter->stats.saved_reset_vfgorc +=
3756 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3757 adapter->stats.saved_reset_vfgotc +=
3758 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3759 adapter->stats.saved_reset_vfmprc +=
3760 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3765 ixv_init_stats(struct adapter *adapter)
3767 struct ixgbe_hw *hw = &adapter->hw;
3769 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3770 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3771 adapter->stats.last_vfgorc |=
3772 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3774 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3775 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3776 adapter->stats.last_vfgotc |=
3777 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3779 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3781 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3782 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3783 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3784 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3785 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3788 #define UPDATE_STAT_32(reg, last, count) \
3790 u32 current = IXGBE_READ_REG(hw, reg); \
3791 if (current < last) \
3792 count += 0x100000000LL; \
3794 count &= 0xFFFFFFFF00000000LL; \
3798 #define UPDATE_STAT_36(lsb, msb, last, count) \
3800 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3801 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3802 u64 current = ((cur_msb << 32) | cur_lsb); \
3803 if (current < last) \
3804 count += 0x1000000000LL; \
3806 count &= 0xFFFFFFF000000000LL; \
3811 ** ixv_update_stats - Update the board statistics counters.
3814 ixv_update_stats(struct adapter *adapter)
3816 struct ixgbe_hw *hw = &adapter->hw;
3818 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3819 adapter->stats.vfgprc);
3820 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3821 adapter->stats.vfgptc);
3822 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3823 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3824 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3825 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3826 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3827 adapter->stats.vfmprc);
3830 /**********************************************************************
3832 * This routine is called only when ixgbe_display_debug_stats is enabled.
3833 * This routine provides a way to take a look at important statistics
3834 * maintained by the driver and hardware.
3836 **********************************************************************/
3838 ixv_print_hw_stats(struct adapter * adapter)
3840 device_t dev = adapter->dev;
3842 device_printf(dev,"Std Mbuf Failed = %lu\n",
3843 adapter->mbuf_defrag_failed);
3844 device_printf(dev,"Driver dropped packets = %lu\n",
3845 adapter->dropped_pkts);
3846 device_printf(dev, "watchdog timeouts = %ld\n",
3847 adapter->watchdog_events);
3849 device_printf(dev,"Good Packets Rcvd = %llu\n",
3850 (long long)adapter->stats.vfgprc);
3851 device_printf(dev,"Good Packets Xmtd = %llu\n",
3852 (long long)adapter->stats.vfgptc);
3853 device_printf(dev,"TSO Transmissions = %lu\n",
3858 /**********************************************************************
3860 * This routine is called only when em_display_debug_stats is enabled.
3861 * This routine provides a way to take a look at important statistics
3862 * maintained by the driver and hardware.
3864 **********************************************************************/
3866 ixv_print_debug_info(struct adapter *adapter)
3868 device_t dev = adapter->dev;
3869 struct ixgbe_hw *hw = &adapter->hw;
3870 struct ix_queue *que = adapter->queues;
3871 struct rx_ring *rxr;
3872 struct tx_ring *txr;
3873 struct lro_ctrl *lro;
3875 device_printf(dev,"Error Byte Count = %u \n",
3876 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3878 for (int i = 0; i < adapter->num_queues; i++, que++) {
3882 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3883 que->msix, (long)que->irqs);
3884 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3885 rxr->me, (long long)rxr->rx_packets);
3886 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3887 rxr->me, (long long)rxr->rx_split_packets);
3888 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3889 rxr->me, (long)rxr->rx_bytes);
3890 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3891 rxr->me, lro->lro_queued);
3892 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3893 rxr->me, lro->lro_flushed);
3894 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3895 txr->me, (long)txr->total_packets);
3896 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3897 txr->me, (long)txr->no_desc_avail);
3900 device_printf(dev,"MBX IRQ Handled: %lu\n",
3901 (long)adapter->mbx_irq);
3906 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3910 struct adapter *adapter;
3913 error = sysctl_handle_int(oidp, &result, 0, req);
3915 if (error || !req->newptr)
3919 adapter = (struct adapter *) arg1;
3920 ixv_print_hw_stats(adapter);
3926 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3929 struct adapter *adapter;
3932 error = sysctl_handle_int(oidp, &result, 0, req);
3934 if (error || !req->newptr)
3938 adapter = (struct adapter *) arg1;
3939 ixv_print_debug_info(adapter);
3945 ** Set flow control using sysctl:
3946 ** Flow control values:
3953 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3956 struct adapter *adapter;
3958 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3963 adapter = (struct adapter *) arg1;
3964 switch (ixv_flow_control) {
3965 case ixgbe_fc_rx_pause:
3966 case ixgbe_fc_tx_pause:
3968 adapter->hw.fc.requested_mode = ixv_flow_control;
3972 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3975 ixgbe_fc_enable(&adapter->hw, 0);
3980 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
3981 const char *description, int *limit, int value)
3984 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3985 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3986 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);