1 /******************************************************************************
3 Copyright (c) 2001-2011, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
44 *********************************************************************/
45 char ixv_driver_version[] = "1.0.1";
47 /*********************************************************************
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
57 static ixv_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 /* required last entry */
64 /*********************************************************************
65 * Table of branding strings
66 *********************************************************************/
68 static char *ixv_strings[] = {
69 "Intel(R) PRO/10GbE Virtual Function Network Driver"
72 /*********************************************************************
74 *********************************************************************/
75 static int ixv_probe(device_t);
76 static int ixv_attach(device_t);
77 static int ixv_detach(device_t);
78 static int ixv_shutdown(device_t);
79 #if __FreeBSD_version < 800000
80 static void ixv_start(struct ifnet *);
81 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
83 static int ixv_mq_start(struct ifnet *, struct mbuf *);
84 static int ixv_mq_start_locked(struct ifnet *,
85 struct tx_ring *, struct mbuf *);
86 static void ixv_qflush(struct ifnet *);
88 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
89 static void ixv_init(void *);
90 static void ixv_init_locked(struct adapter *);
91 static void ixv_stop(void *);
92 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
93 static int ixv_media_change(struct ifnet *);
94 static void ixv_identify_hardware(struct adapter *);
95 static int ixv_allocate_pci_resources(struct adapter *);
96 static int ixv_allocate_msix(struct adapter *);
97 static int ixv_allocate_queues(struct adapter *);
98 static int ixv_setup_msix(struct adapter *);
99 static void ixv_free_pci_resources(struct adapter *);
100 static void ixv_local_timer(void *);
101 static void ixv_setup_interface(device_t, struct adapter *);
102 static void ixv_config_link(struct adapter *);
104 static int ixv_allocate_transmit_buffers(struct tx_ring *);
105 static int ixv_setup_transmit_structures(struct adapter *);
106 static void ixv_setup_transmit_ring(struct tx_ring *);
107 static void ixv_initialize_transmit_units(struct adapter *);
108 static void ixv_free_transmit_structures(struct adapter *);
109 static void ixv_free_transmit_buffers(struct tx_ring *);
111 static int ixv_allocate_receive_buffers(struct rx_ring *);
112 static int ixv_setup_receive_structures(struct adapter *);
113 static int ixv_setup_receive_ring(struct rx_ring *);
114 static void ixv_initialize_receive_units(struct adapter *);
115 static void ixv_free_receive_structures(struct adapter *);
116 static void ixv_free_receive_buffers(struct rx_ring *);
118 static void ixv_enable_intr(struct adapter *);
119 static void ixv_disable_intr(struct adapter *);
120 static bool ixv_txeof(struct tx_ring *);
121 static bool ixv_rxeof(struct ix_queue *, int);
122 static void ixv_rx_checksum(u32, struct mbuf *, u32);
123 static void ixv_set_multi(struct adapter *);
124 static void ixv_update_link_status(struct adapter *);
125 static void ixv_refresh_mbufs(struct rx_ring *, int);
126 static int ixv_xmit(struct tx_ring *, struct mbuf **);
127 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
128 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
129 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
130 static int ixv_dma_malloc(struct adapter *, bus_size_t,
131 struct ixv_dma_alloc *, int);
132 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
133 static void ixv_add_rx_process_limit(struct adapter *, const char *,
134 const char *, int *, int);
135 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
136 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
137 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
138 static void ixv_configure_ivars(struct adapter *);
139 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
141 static void ixv_setup_vlan_support(struct adapter *);
142 static void ixv_register_vlan(void *, struct ifnet *, u16);
143 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
145 static void ixv_save_stats(struct adapter *);
146 static void ixv_init_stats(struct adapter *);
147 static void ixv_update_stats(struct adapter *);
149 static __inline void ixv_rx_discard(struct rx_ring *, int);
150 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
153 /* The MSI/X Interrupt handlers */
154 static void ixv_msix_que(void *);
155 static void ixv_msix_mbx(void *);
157 /* Deferred interrupt tasklets */
158 static void ixv_handle_que(void *, int);
159 static void ixv_handle_mbx(void *, int);
161 /*********************************************************************
162 * FreeBSD Device Interface Entry Points
163 *********************************************************************/
165 static device_method_t ixv_methods[] = {
166 /* Device interface */
167 DEVMETHOD(device_probe, ixv_probe),
168 DEVMETHOD(device_attach, ixv_attach),
169 DEVMETHOD(device_detach, ixv_detach),
170 DEVMETHOD(device_shutdown, ixv_shutdown),
174 static driver_t ixv_driver = {
175 "ix", ixv_methods, sizeof(struct adapter),
178 extern devclass_t ixgbe_devclass;
179 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
180 MODULE_DEPEND(ixv, pci, 1, 1, 1);
181 MODULE_DEPEND(ixv, ether, 1, 1, 1);
184 ** TUNEABLE PARAMETERS:
188 ** AIM: Adaptive Interrupt Moderation
189 ** which means that the interrupt rate
190 ** is varied over time based on the
191 ** traffic for that interrupt vector
193 static int ixv_enable_aim = FALSE;
194 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
196 /* How many packets rxeof tries to clean at a time */
197 static int ixv_rx_process_limit = 128;
198 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
200 /* Flow control setting, default to full */
201 static int ixv_flow_control = ixgbe_fc_full;
202 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
205 * Header split: this causes the hardware to DMA
206 * the header into a seperate mbuf from the payload,
207 * it can be a performance win in some workloads, but
208 * in others it actually hurts, its off by default.
210 static int ixv_header_split = FALSE;
211 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
214 ** Number of TX descriptors per ring,
215 ** setting higher than RX as this seems
216 ** the better performing choice.
218 static int ixv_txd = DEFAULT_TXD;
219 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
221 /* Number of RX descriptors per ring */
222 static int ixv_rxd = DEFAULT_RXD;
223 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
226 ** Shadow VFTA table, this is needed because
227 ** the real filter table gets cleared during
228 ** a soft reset and we need to repopulate it.
230 static u32 ixv_shadow_vfta[VFTA_SIZE];
232 /*********************************************************************
233 * Device identification routine
235 * ixv_probe determines if the driver should be loaded on
236 * adapter based on PCI vendor/device id of the adapter.
238 * return BUS_PROBE_DEFAULT on success, positive on failure
239 *********************************************************************/
242 ixv_probe(device_t dev)
244 ixv_vendor_info_t *ent;
246 u16 pci_vendor_id = 0;
247 u16 pci_device_id = 0;
248 u16 pci_subvendor_id = 0;
249 u16 pci_subdevice_id = 0;
250 char adapter_name[256];
253 pci_vendor_id = pci_get_vendor(dev);
254 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
257 pci_device_id = pci_get_device(dev);
258 pci_subvendor_id = pci_get_subvendor(dev);
259 pci_subdevice_id = pci_get_subdevice(dev);
261 ent = ixv_vendor_info_array;
262 while (ent->vendor_id != 0) {
263 if ((pci_vendor_id == ent->vendor_id) &&
264 (pci_device_id == ent->device_id) &&
266 ((pci_subvendor_id == ent->subvendor_id) ||
267 (ent->subvendor_id == 0)) &&
269 ((pci_subdevice_id == ent->subdevice_id) ||
270 (ent->subdevice_id == 0))) {
271 sprintf(adapter_name, "%s, Version - %s",
272 ixv_strings[ent->index],
274 device_set_desc_copy(dev, adapter_name);
275 return (BUS_PROBE_DEFAULT);
282 /*********************************************************************
283 * Device initialization routine
285 * The attach entry point is called when the driver is being loaded.
286 * This routine identifies the type of hardware, allocates all resources
287 * and initializes the hardware.
289 * return 0 on success, positive on failure
290 *********************************************************************/
293 ixv_attach(device_t dev)
295 struct adapter *adapter;
299 INIT_DEBUGOUT("ixv_attach: begin");
301 if (resource_disabled("ixgbe", device_get_unit(dev))) {
302 device_printf(dev, "Disabled by device hint\n");
306 /* Allocate, clear, and link in our adapter structure */
307 adapter = device_get_softc(dev);
308 adapter->dev = adapter->osdep.dev = dev;
312 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
315 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
316 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
317 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
318 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
320 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
321 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
322 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
323 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
325 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
326 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
327 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
328 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
330 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
331 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
332 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
333 &ixv_enable_aim, 1, "Interrupt Moderation");
335 /* Set up the timer callout */
336 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
338 /* Determine hardware revision */
339 ixv_identify_hardware(adapter);
341 /* Do base PCI setup - map BAR0 */
342 if (ixv_allocate_pci_resources(adapter)) {
343 device_printf(dev, "Allocation of PCI resources failed\n");
348 /* Do descriptor calc and sanity checks */
349 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351 device_printf(dev, "TXD config issue, using default!\n");
352 adapter->num_tx_desc = DEFAULT_TXD;
354 adapter->num_tx_desc = ixv_txd;
356 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
358 device_printf(dev, "RXD config issue, using default!\n");
359 adapter->num_rx_desc = DEFAULT_RXD;
361 adapter->num_rx_desc = ixv_rxd;
363 /* Allocate our TX/RX Queues */
364 if (ixv_allocate_queues(adapter)) {
370 ** Initialize the shared code: its
371 ** at this point the mac type is set.
373 error = ixgbe_init_shared_code(hw);
375 device_printf(dev,"Shared Code Initialization Failure\n");
380 /* Setup the mailbox */
381 ixgbe_init_mbx_params_vf(hw);
385 /* Get Hardware Flow Control setting */
386 hw->fc.requested_mode = ixgbe_fc_full;
387 hw->fc.pause_time = IXV_FC_PAUSE;
388 hw->fc.low_water = IXV_FC_LO;
389 hw->fc.high_water = IXV_FC_HI;
390 hw->fc.send_xon = TRUE;
392 error = ixgbe_init_hw(hw);
394 device_printf(dev,"Hardware Initialization Failure\n");
399 error = ixv_allocate_msix(adapter);
403 /* Setup OS specific network interface */
404 ixv_setup_interface(dev, adapter);
406 /* Sysctl for limiting the amount of work done in the taskqueue */
407 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
408 "max number of rx packets to process", &adapter->rx_process_limit,
409 ixv_rx_process_limit);
411 /* Do the stats setup */
412 ixv_save_stats(adapter);
413 ixv_init_stats(adapter);
415 /* Register for VLAN events */
416 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
417 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
418 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
419 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
421 INIT_DEBUGOUT("ixv_attach: end");
425 ixv_free_transmit_structures(adapter);
426 ixv_free_receive_structures(adapter);
428 ixv_free_pci_resources(adapter);
433 /*********************************************************************
434 * Device removal routine
436 * The detach entry point is called when the driver is being removed.
437 * This routine stops the adapter and deallocates all the resources
438 * that were allocated for driver operation.
440 * return 0 on success, positive on failure
441 *********************************************************************/
444 ixv_detach(device_t dev)
446 struct adapter *adapter = device_get_softc(dev);
447 struct ix_queue *que = adapter->queues;
449 INIT_DEBUGOUT("ixv_detach: begin");
451 /* Make sure VLANS are not using driver */
452 if (adapter->ifp->if_vlantrunk != NULL) {
453 device_printf(dev,"Vlan in use, detach first\n");
457 IXV_CORE_LOCK(adapter);
459 IXV_CORE_UNLOCK(adapter);
461 for (int i = 0; i < adapter->num_queues; i++, que++) {
463 taskqueue_drain(que->tq, &que->que_task);
464 taskqueue_free(que->tq);
468 /* Drain the Link queue */
470 taskqueue_drain(adapter->tq, &adapter->mbx_task);
471 taskqueue_free(adapter->tq);
474 /* Unregister VLAN events */
475 if (adapter->vlan_attach != NULL)
476 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
477 if (adapter->vlan_detach != NULL)
478 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
480 ether_ifdetach(adapter->ifp);
481 callout_drain(&adapter->timer);
482 ixv_free_pci_resources(adapter);
483 bus_generic_detach(dev);
484 if_free(adapter->ifp);
486 ixv_free_transmit_structures(adapter);
487 ixv_free_receive_structures(adapter);
489 IXV_CORE_LOCK_DESTROY(adapter);
493 /*********************************************************************
495 * Shutdown entry point
497 **********************************************************************/
499 ixv_shutdown(device_t dev)
501 struct adapter *adapter = device_get_softc(dev);
502 IXV_CORE_LOCK(adapter);
504 IXV_CORE_UNLOCK(adapter);
508 #if __FreeBSD_version < 800000
509 /*********************************************************************
510 * Transmit entry point
512 * ixv_start is called by the stack to initiate a transmit.
513 * The driver will remain in this routine as long as there are
514 * packets to transmit and transmit resources are available.
515 * In case resources are not available stack is notified and
516 * the packet is requeued.
517 **********************************************************************/
519 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
522 struct adapter *adapter = txr->adapter;
524 IXV_TX_LOCK_ASSERT(txr);
526 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
529 if (!adapter->link_active)
532 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
534 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
538 if (ixv_xmit(txr, &m_head)) {
541 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
542 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
545 /* Send a copy of the frame to the BPF listener */
546 ETHER_BPF_MTAP(ifp, m_head);
548 /* Set watchdog on */
549 txr->watchdog_check = TRUE;
550 txr->watchdog_time = ticks;
557 * Legacy TX start - called by the stack, this
558 * always uses the first tx ring, and should
559 * not be used with multiqueue tx enabled.
562 ixv_start(struct ifnet *ifp)
564 struct adapter *adapter = ifp->if_softc;
565 struct tx_ring *txr = adapter->tx_rings;
567 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
569 ixv_start_locked(txr, ifp);
578 ** Multiqueue Transmit driver
582 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
584 struct adapter *adapter = ifp->if_softc;
585 struct ix_queue *que;
589 /* Which queue to use */
590 if ((m->m_flags & M_FLOWID) != 0)
591 i = m->m_pkthdr.flowid % adapter->num_queues;
593 txr = &adapter->tx_rings[i];
594 que = &adapter->queues[i];
596 if (IXV_TX_TRYLOCK(txr)) {
597 err = ixv_mq_start_locked(ifp, txr, m);
600 err = drbr_enqueue(ifp, txr->br, m);
601 taskqueue_enqueue(que->tq, &que->que_task);
608 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
610 struct adapter *adapter = txr->adapter;
612 int enqueued, err = 0;
614 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
615 IFF_DRV_RUNNING || adapter->link_active == 0) {
617 err = drbr_enqueue(ifp, txr->br, m);
621 /* Do a clean if descriptors are low */
622 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
627 next = drbr_dequeue(ifp, txr->br);
628 } else if (drbr_needs_enqueue(ifp, txr->br)) {
629 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
631 next = drbr_dequeue(ifp, txr->br);
635 /* Process the queue */
636 while (next != NULL) {
637 if ((err = ixv_xmit(txr, &next)) != 0) {
639 err = drbr_enqueue(ifp, txr->br, next);
643 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
644 /* Send a copy of the frame to the BPF listener */
645 ETHER_BPF_MTAP(ifp, next);
646 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
648 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
649 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
652 next = drbr_dequeue(ifp, txr->br);
656 /* Set watchdog on */
657 txr->watchdog_check = TRUE;
658 txr->watchdog_time = ticks;
665 ** Flush all ring buffers
668 ixv_qflush(struct ifnet *ifp)
670 struct adapter *adapter = ifp->if_softc;
671 struct tx_ring *txr = adapter->tx_rings;
674 for (int i = 0; i < adapter->num_queues; i++, txr++) {
676 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
685 /*********************************************************************
688 * ixv_ioctl is called when the user wants to configure the
691 * return 0 on success, positive on failure
692 **********************************************************************/
695 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
697 struct adapter *adapter = ifp->if_softc;
698 struct ifreq *ifr = (struct ifreq *) data;
699 #if defined(INET) || defined(INET6)
700 struct ifaddr *ifa = (struct ifaddr *) data;
701 bool avoid_reset = FALSE;
709 if (ifa->ifa_addr->sa_family == AF_INET)
713 if (ifa->ifa_addr->sa_family == AF_INET6)
716 #if defined(INET) || defined(INET6)
718 ** Calling init results in link renegotiation,
719 ** so we avoid doing it when possible.
722 ifp->if_flags |= IFF_UP;
723 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
725 if (!(ifp->if_flags & IFF_NOARP))
726 arp_ifinit(ifp, ifa);
728 error = ether_ioctl(ifp, command, data);
732 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
733 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
736 IXV_CORE_LOCK(adapter);
737 ifp->if_mtu = ifr->ifr_mtu;
738 adapter->max_frame_size =
739 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
740 ixv_init_locked(adapter);
741 IXV_CORE_UNLOCK(adapter);
745 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
746 IXV_CORE_LOCK(adapter);
747 if (ifp->if_flags & IFF_UP) {
748 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
749 ixv_init_locked(adapter);
751 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
753 adapter->if_flags = ifp->if_flags;
754 IXV_CORE_UNLOCK(adapter);
758 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
759 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
760 IXV_CORE_LOCK(adapter);
761 ixv_disable_intr(adapter);
762 ixv_set_multi(adapter);
763 ixv_enable_intr(adapter);
764 IXV_CORE_UNLOCK(adapter);
769 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
770 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
774 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
775 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
776 if (mask & IFCAP_HWCSUM)
777 ifp->if_capenable ^= IFCAP_HWCSUM;
778 if (mask & IFCAP_TSO4)
779 ifp->if_capenable ^= IFCAP_TSO4;
780 if (mask & IFCAP_LRO)
781 ifp->if_capenable ^= IFCAP_LRO;
782 if (mask & IFCAP_VLAN_HWTAGGING)
783 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
784 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
785 IXV_CORE_LOCK(adapter);
786 ixv_init_locked(adapter);
787 IXV_CORE_UNLOCK(adapter);
789 VLAN_CAPABILITIES(ifp);
794 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
795 error = ether_ioctl(ifp, command, data);
802 /*********************************************************************
805 * This routine is used in two ways. It is used by the stack as
806 * init entry point in network interface structure. It is also used
807 * by the driver as a hw/sw initialization routine to get to a
810 * return 0 on success, positive on failure
811 **********************************************************************/
812 #define IXGBE_MHADD_MFS_SHIFT 16
815 ixv_init_locked(struct adapter *adapter)
817 struct ifnet *ifp = adapter->ifp;
818 device_t dev = adapter->dev;
819 struct ixgbe_hw *hw = &adapter->hw;
822 INIT_DEBUGOUT("ixv_init: begin");
823 mtx_assert(&adapter->core_mtx, MA_OWNED);
824 hw->adapter_stopped = FALSE;
825 ixgbe_stop_adapter(hw);
826 callout_stop(&adapter->timer);
828 /* reprogram the RAR[0] in case user changed it. */
829 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
831 /* Get the latest mac address, User can use a LAA */
832 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
833 IXGBE_ETH_LENGTH_OF_ADDRESS);
834 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
835 hw->addr_ctrl.rar_used_count = 1;
837 /* Prepare transmit descriptors and buffers */
838 if (ixv_setup_transmit_structures(adapter)) {
839 device_printf(dev,"Could not setup transmit structures\n");
845 ixv_initialize_transmit_units(adapter);
847 /* Setup Multicast table */
848 ixv_set_multi(adapter);
851 ** Determine the correct mbuf pool
852 ** for doing jumbo/headersplit
854 if (ifp->if_mtu > ETHERMTU)
855 adapter->rx_mbuf_sz = MJUMPAGESIZE;
857 adapter->rx_mbuf_sz = MCLBYTES;
859 /* Prepare receive descriptors and buffers */
860 if (ixv_setup_receive_structures(adapter)) {
861 device_printf(dev,"Could not setup receive structures\n");
866 /* Configure RX settings */
867 ixv_initialize_receive_units(adapter);
869 /* Enable Enhanced MSIX mode */
870 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
871 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
872 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
873 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
875 /* Set the various hardware offload abilities */
876 ifp->if_hwassist = 0;
877 if (ifp->if_capenable & IFCAP_TSO4)
878 ifp->if_hwassist |= CSUM_TSO;
879 if (ifp->if_capenable & IFCAP_TXCSUM) {
880 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
881 #if __FreeBSD_version >= 800000
882 ifp->if_hwassist |= CSUM_SCTP;
887 if (ifp->if_mtu > ETHERMTU) {
888 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
889 mhadd &= ~IXGBE_MHADD_MFS_MASK;
890 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
891 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
894 /* Set up VLAN offload and filter */
895 ixv_setup_vlan_support(adapter);
897 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
899 /* Set up MSI/X routing */
900 ixv_configure_ivars(adapter);
902 /* Set up auto-mask */
903 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
905 /* Set moderation on the Link interrupt */
906 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
909 ixv_init_stats(adapter);
911 /* Config/Enable Link */
912 ixv_config_link(adapter);
914 /* And now turn on interrupts */
915 ixv_enable_intr(adapter);
917 /* Now inform the stack we're ready */
918 ifp->if_drv_flags |= IFF_DRV_RUNNING;
919 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
927 struct adapter *adapter = arg;
929 IXV_CORE_LOCK(adapter);
930 ixv_init_locked(adapter);
931 IXV_CORE_UNLOCK(adapter);
938 ** MSIX Interrupt Handlers and Tasklets
943 ixv_enable_queue(struct adapter *adapter, u32 vector)
945 struct ixgbe_hw *hw = &adapter->hw;
946 u32 queue = 1 << vector;
949 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
950 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
954 ixv_disable_queue(struct adapter *adapter, u32 vector)
956 struct ixgbe_hw *hw = &adapter->hw;
957 u64 queue = (u64)(1 << vector);
960 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
961 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
965 ixv_rearm_queues(struct adapter *adapter, u64 queues)
967 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
968 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
973 ixv_handle_que(void *context, int pending)
975 struct ix_queue *que = context;
976 struct adapter *adapter = que->adapter;
977 struct tx_ring *txr = que->txr;
978 struct ifnet *ifp = adapter->ifp;
981 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
982 more = ixv_rxeof(que, adapter->rx_process_limit);
985 #if __FreeBSD_version >= 800000
986 if (!drbr_empty(ifp, txr->br))
987 ixv_mq_start_locked(ifp, txr, NULL);
989 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
990 ixv_start_locked(txr, ifp);
994 taskqueue_enqueue(que->tq, &que->que_task);
999 /* Reenable this interrupt */
1000 ixv_enable_queue(adapter, que->msix);
1004 /*********************************************************************
1006 * MSI Queue Interrupt Service routine
1008 **********************************************************************/
1010 ixv_msix_que(void *arg)
1012 struct ix_queue *que = arg;
1013 struct adapter *adapter = que->adapter;
1014 struct tx_ring *txr = que->txr;
1015 struct rx_ring *rxr = que->rxr;
1016 bool more_tx, more_rx;
1019 ixv_disable_queue(adapter, que->msix);
1022 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1025 more_tx = ixv_txeof(txr);
1028 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1032 if (ixv_enable_aim == FALSE)
1035 ** Do Adaptive Interrupt Moderation:
1036 ** - Write out last calculated setting
1037 ** - Calculate based on average size over
1038 ** the last interval.
1040 if (que->eitr_setting)
1041 IXGBE_WRITE_REG(&adapter->hw,
1042 IXGBE_VTEITR(que->msix),
1045 que->eitr_setting = 0;
1047 /* Idle, do nothing */
1048 if ((txr->bytes == 0) && (rxr->bytes == 0))
1051 if ((txr->bytes) && (txr->packets))
1052 newitr = txr->bytes/txr->packets;
1053 if ((rxr->bytes) && (rxr->packets))
1054 newitr = max(newitr,
1055 (rxr->bytes / rxr->packets));
1056 newitr += 24; /* account for hardware frame, crc */
1058 /* set an upper boundary */
1059 newitr = min(newitr, 3000);
1061 /* Be nice to the mid range */
1062 if ((newitr > 300) && (newitr < 1200))
1063 newitr = (newitr / 3);
1065 newitr = (newitr / 2);
1067 newitr |= newitr << 16;
1069 /* save for next interrupt */
1070 que->eitr_setting = newitr;
1079 if (more_tx || more_rx)
1080 taskqueue_enqueue(que->tq, &que->que_task);
1081 else /* Reenable this interrupt */
1082 ixv_enable_queue(adapter, que->msix);
1087 ixv_msix_mbx(void *arg)
1089 struct adapter *adapter = arg;
1090 struct ixgbe_hw *hw = &adapter->hw;
1095 /* First get the cause */
1096 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1097 /* Clear interrupt with write */
1098 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1100 /* Link status change */
1101 if (reg & IXGBE_EICR_LSC)
1102 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1104 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1108 /*********************************************************************
1110 * Media Ioctl callback
1112 * This routine is called whenever the user queries the status of
1113 * the interface using ifconfig.
1115 **********************************************************************/
1117 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1119 struct adapter *adapter = ifp->if_softc;
1121 INIT_DEBUGOUT("ixv_media_status: begin");
1122 IXV_CORE_LOCK(adapter);
1123 ixv_update_link_status(adapter);
1125 ifmr->ifm_status = IFM_AVALID;
1126 ifmr->ifm_active = IFM_ETHER;
1128 if (!adapter->link_active) {
1129 IXV_CORE_UNLOCK(adapter);
1133 ifmr->ifm_status |= IFM_ACTIVE;
1135 switch (adapter->link_speed) {
1136 case IXGBE_LINK_SPEED_1GB_FULL:
1137 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1139 case IXGBE_LINK_SPEED_10GB_FULL:
1140 ifmr->ifm_active |= IFM_FDX;
1144 IXV_CORE_UNLOCK(adapter);
1149 /*********************************************************************
1151 * Media Ioctl callback
1153 * This routine is called when the user changes speed/duplex using
1154 * media/mediopt option with ifconfig.
1156 **********************************************************************/
1158 ixv_media_change(struct ifnet * ifp)
1160 struct adapter *adapter = ifp->if_softc;
1161 struct ifmedia *ifm = &adapter->media;
1163 INIT_DEBUGOUT("ixv_media_change: begin");
1165 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1168 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1172 device_printf(adapter->dev, "Only auto media type\n");
1179 /*********************************************************************
1181 * This routine maps the mbufs to tx descriptors, allowing the
1182 * TX engine to transmit the packets.
1183 * - return 0 on success, positive on failure
1185 **********************************************************************/
1188 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1190 struct adapter *adapter = txr->adapter;
1191 u32 olinfo_status = 0, cmd_type_len;
1193 int i, j, error, nsegs;
1194 int first, last = 0;
1195 struct mbuf *m_head;
1196 bus_dma_segment_t segs[32];
1198 struct ixv_tx_buf *txbuf;
1199 union ixgbe_adv_tx_desc *txd = NULL;
1203 /* Basic descriptor defines */
1204 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1205 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1207 if (m_head->m_flags & M_VLANTAG)
1208 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1211 * Important to capture the first descriptor
1212 * used because it will contain the index of
1213 * the one we tell the hardware to report back
1215 first = txr->next_avail_desc;
1216 txbuf = &txr->tx_buffers[first];
1220 * Map the packet for DMA.
1222 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1223 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1225 if (error == EFBIG) {
1228 m = m_defrag(*m_headp, M_DONTWAIT);
1230 adapter->mbuf_defrag_failed++;
1238 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1239 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1241 if (error == ENOMEM) {
1242 adapter->no_tx_dma_setup++;
1244 } else if (error != 0) {
1245 adapter->no_tx_dma_setup++;
1250 } else if (error == ENOMEM) {
1251 adapter->no_tx_dma_setup++;
1253 } else if (error != 0) {
1254 adapter->no_tx_dma_setup++;
1260 /* Make certain there are enough descriptors */
1261 if (nsegs > txr->tx_avail - 2) {
1262 txr->no_desc_avail++;
1269 ** Set up the appropriate offload context
1270 ** this becomes the first descriptor of
1273 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1274 if (ixv_tso_setup(txr, m_head, &paylen)) {
1275 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1276 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1277 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1278 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1282 } else if (ixv_tx_ctx_setup(txr, m_head))
1283 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1285 /* Record payload length */
1287 olinfo_status |= m_head->m_pkthdr.len <<
1288 IXGBE_ADVTXD_PAYLEN_SHIFT;
1290 i = txr->next_avail_desc;
1291 for (j = 0; j < nsegs; j++) {
1295 txbuf = &txr->tx_buffers[i];
1296 txd = &txr->tx_base[i];
1297 seglen = segs[j].ds_len;
1298 segaddr = htole64(segs[j].ds_addr);
1300 txd->read.buffer_addr = segaddr;
1301 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1302 cmd_type_len |seglen);
1303 txd->read.olinfo_status = htole32(olinfo_status);
1304 last = i; /* descriptor that will get completion IRQ */
1306 if (++i == adapter->num_tx_desc)
1309 txbuf->m_head = NULL;
1310 txbuf->eop_index = -1;
1313 txd->read.cmd_type_len |=
1314 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1315 txr->tx_avail -= nsegs;
1316 txr->next_avail_desc = i;
1318 txbuf->m_head = m_head;
1319 txr->tx_buffers[first].map = txbuf->map;
1321 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1323 /* Set the index of the descriptor that will be marked done */
1324 txbuf = &txr->tx_buffers[first];
1325 txbuf->eop_index = last;
1327 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1328 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1330 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1331 * hardware that this frame is available to transmit.
1333 ++txr->total_packets;
1334 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1339 bus_dmamap_unload(txr->txtag, txbuf->map);
1345 /*********************************************************************
1348 * This routine is called whenever multicast address list is updated.
1350 **********************************************************************/
1351 #define IXGBE_RAR_ENTRIES 16
1354 ixv_set_multi(struct adapter *adapter)
1356 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1358 struct ifmultiaddr *ifma;
1360 struct ifnet *ifp = adapter->ifp;
1362 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1364 #if __FreeBSD_version < 800000
1367 if_maddr_rlock(ifp);
1369 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1370 if (ifma->ifma_addr->sa_family != AF_LINK)
1372 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1373 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1374 IXGBE_ETH_LENGTH_OF_ADDRESS);
1377 #if __FreeBSD_version < 800000
1378 IF_ADDR_UNLOCK(ifp);
1380 if_maddr_runlock(ifp);
1385 ixgbe_update_mc_addr_list(&adapter->hw,
1386 update_ptr, mcnt, ixv_mc_array_itr);
1392 * This is an iterator function now needed by the multicast
1393 * shared code. It simply feeds the shared code routine the
1394 * addresses in the array of ixv_set_multi() one by one.
1397 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1399 u8 *addr = *update_ptr;
1403 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1404 *update_ptr = newptr;
1408 /*********************************************************************
1411 * This routine checks for link status,updates statistics,
1412 * and runs the watchdog check.
1414 **********************************************************************/
1417 ixv_local_timer(void *arg)
1419 struct adapter *adapter = arg;
1420 device_t dev = adapter->dev;
1421 struct tx_ring *txr = adapter->tx_rings;
1424 mtx_assert(&adapter->core_mtx, MA_OWNED);
1426 ixv_update_link_status(adapter);
1429 ixv_update_stats(adapter);
1432 * If the interface has been paused
1433 * then don't do the watchdog check
1435 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1438 ** Check for time since any descriptor was cleaned
1440 for (i = 0; i < adapter->num_queues; i++, txr++) {
1442 if (txr->watchdog_check == FALSE) {
1446 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1451 ixv_rearm_queues(adapter, adapter->que_mask);
1452 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1456 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1457 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1458 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1459 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1460 device_printf(dev,"TX(%d) desc avail = %d,"
1461 "Next TX to Clean = %d\n",
1462 txr->me, txr->tx_avail, txr->next_to_clean);
1463 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1464 adapter->watchdog_events++;
1466 ixv_init_locked(adapter);
1470 ** Note: this routine updates the OS on the link state
1471 ** the real check of the hardware only happens with
1472 ** a link interrupt.
1475 ixv_update_link_status(struct adapter *adapter)
1477 struct ifnet *ifp = adapter->ifp;
1478 struct tx_ring *txr = adapter->tx_rings;
1479 device_t dev = adapter->dev;
1482 if (adapter->link_up){
1483 if (adapter->link_active == FALSE) {
1485 device_printf(dev,"Link is up %d Gbps %s \n",
1486 ((adapter->link_speed == 128)? 10:1),
1488 adapter->link_active = TRUE;
1489 if_link_state_change(ifp, LINK_STATE_UP);
1491 } else { /* Link down */
1492 if (adapter->link_active == TRUE) {
1494 device_printf(dev,"Link is Down\n");
1495 if_link_state_change(ifp, LINK_STATE_DOWN);
1496 adapter->link_active = FALSE;
1497 for (int i = 0; i < adapter->num_queues;
1499 txr->watchdog_check = FALSE;
1507 /*********************************************************************
1509 * This routine disables all traffic on the adapter by issuing a
1510 * global reset on the MAC and deallocates TX/RX buffers.
1512 **********************************************************************/
1518 struct adapter *adapter = arg;
1519 struct ixgbe_hw *hw = &adapter->hw;
1522 mtx_assert(&adapter->core_mtx, MA_OWNED);
1524 INIT_DEBUGOUT("ixv_stop: begin\n");
1525 ixv_disable_intr(adapter);
1527 /* Tell the stack that the interface is no longer active */
1528 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1531 adapter->hw.adapter_stopped = FALSE;
1532 ixgbe_stop_adapter(hw);
1533 callout_stop(&adapter->timer);
1535 /* reprogram the RAR[0] in case user changed it. */
1536 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1542 /*********************************************************************
1544 * Determine hardware revision.
1546 **********************************************************************/
1548 ixv_identify_hardware(struct adapter *adapter)
1550 device_t dev = adapter->dev;
1554 ** Make sure BUSMASTER is set, on a VM under
1555 ** KVM it may not be and will break things.
1557 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1558 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1559 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1560 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1561 "bits were not set!\n");
1562 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1563 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1566 /* Save off the information about this board */
1567 adapter->hw.vendor_id = pci_get_vendor(dev);
1568 adapter->hw.device_id = pci_get_device(dev);
1569 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1570 adapter->hw.subsystem_vendor_id =
1571 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1572 adapter->hw.subsystem_device_id =
1573 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1578 /*********************************************************************
1580 * Setup MSIX Interrupt resources and handlers
1582 **********************************************************************/
1584 ixv_allocate_msix(struct adapter *adapter)
1586 device_t dev = adapter->dev;
1587 struct ix_queue *que = adapter->queues;
1588 int error, rid, vector = 0;
1590 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1592 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1593 RF_SHAREABLE | RF_ACTIVE);
1594 if (que->res == NULL) {
1595 device_printf(dev,"Unable to allocate"
1596 " bus resource: que interrupt [%d]\n", vector);
1599 /* Set the handler function */
1600 error = bus_setup_intr(dev, que->res,
1601 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1602 ixv_msix_que, que, &que->tag);
1605 device_printf(dev, "Failed to register QUE handler");
1608 #if __FreeBSD_version >= 800504
1609 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1612 adapter->que_mask |= (u64)(1 << que->msix);
1614 ** Bind the msix vector, and thus the
1615 ** ring to the corresponding cpu.
1617 if (adapter->num_queues > 1)
1618 bus_bind_intr(dev, que->res, i);
1620 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1621 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1622 taskqueue_thread_enqueue, &que->tq);
1623 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1624 device_get_nameunit(adapter->dev));
1629 adapter->res = bus_alloc_resource_any(dev,
1630 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1631 if (!adapter->res) {
1632 device_printf(dev,"Unable to allocate"
1633 " bus resource: MBX interrupt [%d]\n", rid);
1636 /* Set the mbx handler function */
1637 error = bus_setup_intr(dev, adapter->res,
1638 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1639 ixv_msix_mbx, adapter, &adapter->tag);
1641 adapter->res = NULL;
1642 device_printf(dev, "Failed to register LINK handler");
1645 #if __FreeBSD_version >= 800504
1646 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1648 adapter->mbxvec = vector;
1649 /* Tasklets for Mailbox */
1650 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1651 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1652 taskqueue_thread_enqueue, &adapter->tq);
1653 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1654 device_get_nameunit(adapter->dev));
1656 ** Due to a broken design QEMU will fail to properly
1657 ** enable the guest for MSIX unless the vectors in
1658 ** the table are all set up, so we must rewrite the
1659 ** ENABLE in the MSIX control register again at this
1660 ** point to cause it to successfully initialize us.
1662 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1664 pci_find_cap(dev, PCIY_MSIX, &rid);
1665 rid += PCIR_MSIX_CTRL;
1666 msix_ctrl = pci_read_config(dev, rid, 2);
1667 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1668 pci_write_config(dev, rid, msix_ctrl, 2);
1675 * Setup MSIX resources, note that the VF
1676 * device MUST use MSIX, there is no fallback.
1679 ixv_setup_msix(struct adapter *adapter)
1681 device_t dev = adapter->dev;
1682 int rid, vectors, want = 2;
1685 /* First try MSI/X */
1687 adapter->msix_mem = bus_alloc_resource_any(dev,
1688 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1689 if (!adapter->msix_mem) {
1690 device_printf(adapter->dev,
1691 "Unable to map MSIX table \n");
1695 vectors = pci_msix_count(dev);
1697 bus_release_resource(dev, SYS_RES_MEMORY,
1698 rid, adapter->msix_mem);
1699 adapter->msix_mem = NULL;
1704 ** Want two vectors: one for a queue,
1705 ** plus an additional for mailbox.
1707 if (pci_alloc_msix(dev, &want) == 0) {
1708 device_printf(adapter->dev,
1709 "Using MSIX interrupts with %d vectors\n", want);
1713 device_printf(adapter->dev,"MSIX config error\n");
1719 ixv_allocate_pci_resources(struct adapter *adapter)
1722 device_t dev = adapter->dev;
1725 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1728 if (!(adapter->pci_mem)) {
1729 device_printf(dev,"Unable to allocate bus resource: memory\n");
1733 adapter->osdep.mem_bus_space_tag =
1734 rman_get_bustag(adapter->pci_mem);
1735 adapter->osdep.mem_bus_space_handle =
1736 rman_get_bushandle(adapter->pci_mem);
1737 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1739 adapter->num_queues = 1;
1740 adapter->hw.back = &adapter->osdep;
1743 ** Now setup MSI/X, should
1744 ** return us the number of
1745 ** configured vectors.
1747 adapter->msix = ixv_setup_msix(adapter);
1748 if (adapter->msix == ENXIO)
1755 ixv_free_pci_resources(struct adapter * adapter)
1757 struct ix_queue *que = adapter->queues;
1758 device_t dev = adapter->dev;
1761 memrid = PCIR_BAR(MSIX_BAR);
1764 ** There is a slight possibility of a failure mode
1765 ** in attach that will result in entering this function
1766 ** before interrupt resources have been initialized, and
1767 ** in that case we do not want to execute the loops below
1768 ** We can detect this reliably by the state of the adapter
1771 if (adapter->res == NULL)
1775 ** Release all msix queue resources:
1777 for (int i = 0; i < adapter->num_queues; i++, que++) {
1778 rid = que->msix + 1;
1779 if (que->tag != NULL) {
1780 bus_teardown_intr(dev, que->res, que->tag);
1783 if (que->res != NULL)
1784 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1788 /* Clean the Legacy or Link interrupt last */
1789 if (adapter->mbxvec) /* we are doing MSIX */
1790 rid = adapter->mbxvec + 1;
1792 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1794 if (adapter->tag != NULL) {
1795 bus_teardown_intr(dev, adapter->res, adapter->tag);
1796 adapter->tag = NULL;
1798 if (adapter->res != NULL)
1799 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1803 pci_release_msi(dev);
1805 if (adapter->msix_mem != NULL)
1806 bus_release_resource(dev, SYS_RES_MEMORY,
1807 memrid, adapter->msix_mem);
1809 if (adapter->pci_mem != NULL)
1810 bus_release_resource(dev, SYS_RES_MEMORY,
1811 PCIR_BAR(0), adapter->pci_mem);
1816 /*********************************************************************
1818 * Setup networking device structure and register an interface.
1820 **********************************************************************/
1822 ixv_setup_interface(device_t dev, struct adapter *adapter)
1826 INIT_DEBUGOUT("ixv_setup_interface: begin");
1828 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1830 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1831 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1832 ifp->if_mtu = ETHERMTU;
1833 ifp->if_baudrate = 1000000000;
1834 ifp->if_init = ixv_init;
1835 ifp->if_softc = adapter;
1836 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1837 ifp->if_ioctl = ixv_ioctl;
1838 #if __FreeBSD_version >= 800000
1839 ifp->if_transmit = ixv_mq_start;
1840 ifp->if_qflush = ixv_qflush;
1842 ifp->if_start = ixv_start;
1844 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1846 ether_ifattach(ifp, adapter->hw.mac.addr);
1848 adapter->max_frame_size =
1849 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1852 * Tell the upper layer(s) we support long frames.
1854 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1856 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1857 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1858 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1861 ifp->if_capenable = ifp->if_capabilities;
1863 /* Don't enable LRO by default */
1864 ifp->if_capabilities |= IFCAP_LRO;
1867 * Specify the media types supported by this adapter and register
1868 * callbacks to update media and link information
1870 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1872 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1873 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1874 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1880 ixv_config_link(struct adapter *adapter)
1882 struct ixgbe_hw *hw = &adapter->hw;
1883 u32 autoneg, err = 0;
1884 bool negotiate = TRUE;
1886 if (hw->mac.ops.check_link)
1887 err = hw->mac.ops.check_link(hw, &autoneg,
1888 &adapter->link_up, FALSE);
1892 if (hw->mac.ops.setup_link)
1893 err = hw->mac.ops.setup_link(hw, autoneg,
1894 negotiate, adapter->link_up);
1899 /********************************************************************
1900 * Manage DMA'able memory.
1901 *******************************************************************/
1903 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1907 *(bus_addr_t *) arg = segs->ds_addr;
1912 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1913 struct ixv_dma_alloc *dma, int mapflags)
1915 device_t dev = adapter->dev;
1918 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1919 DBA_ALIGN, 0, /* alignment, bounds */
1920 BUS_SPACE_MAXADDR, /* lowaddr */
1921 BUS_SPACE_MAXADDR, /* highaddr */
1922 NULL, NULL, /* filter, filterarg */
1925 size, /* maxsegsize */
1926 BUS_DMA_ALLOCNOW, /* flags */
1927 NULL, /* lockfunc */
1928 NULL, /* lockfuncarg */
1931 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1935 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1936 BUS_DMA_NOWAIT, &dma->dma_map);
1938 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1942 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1946 mapflags | BUS_DMA_NOWAIT);
1948 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1952 dma->dma_size = size;
1955 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1957 bus_dma_tag_destroy(dma->dma_tag);
1959 dma->dma_map = NULL;
1960 dma->dma_tag = NULL;
1965 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1967 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1968 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1969 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1970 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1971 bus_dma_tag_destroy(dma->dma_tag);
1975 /*********************************************************************
1977 * Allocate memory for the transmit and receive rings, and then
1978 * the descriptors associated with each, called only once at attach.
1980 **********************************************************************/
1982 ixv_allocate_queues(struct adapter *adapter)
1984 device_t dev = adapter->dev;
1985 struct ix_queue *que;
1986 struct tx_ring *txr;
1987 struct rx_ring *rxr;
1988 int rsize, tsize, error = 0;
1989 int txconf = 0, rxconf = 0;
1991 /* First allocate the top level queue structs */
1992 if (!(adapter->queues =
1993 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1994 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1995 device_printf(dev, "Unable to allocate queue memory\n");
2000 /* First allocate the TX ring struct memory */
2001 if (!(adapter->tx_rings =
2002 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2003 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2004 device_printf(dev, "Unable to allocate TX ring memory\n");
2009 /* Next allocate the RX */
2010 if (!(adapter->rx_rings =
2011 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2012 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2013 device_printf(dev, "Unable to allocate RX ring memory\n");
2018 /* For the ring itself */
2019 tsize = roundup2(adapter->num_tx_desc *
2020 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2023 * Now set up the TX queues, txconf is needed to handle the
2024 * possibility that things fail midcourse and we need to
2025 * undo memory gracefully
2027 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2028 /* Set up some basics */
2029 txr = &adapter->tx_rings[i];
2030 txr->adapter = adapter;
2033 /* Initialize the TX side lock */
2034 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2035 device_get_nameunit(dev), txr->me);
2036 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2038 if (ixv_dma_malloc(adapter, tsize,
2039 &txr->txdma, BUS_DMA_NOWAIT)) {
2041 "Unable to allocate TX Descriptor memory\n");
2045 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2046 bzero((void *)txr->tx_base, tsize);
2048 /* Now allocate transmit buffers for the ring */
2049 if (ixv_allocate_transmit_buffers(txr)) {
2051 "Critical Failure setting up transmit buffers\n");
2055 #if __FreeBSD_version >= 800000
2056 /* Allocate a buf ring */
2057 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2058 M_WAITOK, &txr->tx_mtx);
2059 if (txr->br == NULL) {
2061 "Critical Failure setting up buf ring\n");
2069 * Next the RX queues...
2071 rsize = roundup2(adapter->num_rx_desc *
2072 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2073 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2074 rxr = &adapter->rx_rings[i];
2075 /* Set up some basics */
2076 rxr->adapter = adapter;
2079 /* Initialize the RX side lock */
2080 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2081 device_get_nameunit(dev), rxr->me);
2082 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2084 if (ixv_dma_malloc(adapter, rsize,
2085 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2087 "Unable to allocate RxDescriptor memory\n");
2091 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2092 bzero((void *)rxr->rx_base, rsize);
2094 /* Allocate receive buffers for the ring*/
2095 if (ixv_allocate_receive_buffers(rxr)) {
2097 "Critical Failure setting up receive buffers\n");
2104 ** Finally set up the queue holding structs
2106 for (int i = 0; i < adapter->num_queues; i++) {
2107 que = &adapter->queues[i];
2108 que->adapter = adapter;
2109 que->txr = &adapter->tx_rings[i];
2110 que->rxr = &adapter->rx_rings[i];
2116 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2117 ixv_dma_free(adapter, &rxr->rxdma);
2119 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2120 ixv_dma_free(adapter, &txr->txdma);
2121 free(adapter->rx_rings, M_DEVBUF);
2123 free(adapter->tx_rings, M_DEVBUF);
2125 free(adapter->queues, M_DEVBUF);
2131 /*********************************************************************
2133 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2134 * the information needed to transmit a packet on the wire. This is
2135 * called only once at attach, setup is done every reset.
2137 **********************************************************************/
2139 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2141 struct adapter *adapter = txr->adapter;
2142 device_t dev = adapter->dev;
2143 struct ixv_tx_buf *txbuf;
2147 * Setup DMA descriptor areas.
2149 if ((error = bus_dma_tag_create(
2150 bus_get_dma_tag(adapter->dev), /* parent */
2151 1, 0, /* alignment, bounds */
2152 BUS_SPACE_MAXADDR, /* lowaddr */
2153 BUS_SPACE_MAXADDR, /* highaddr */
2154 NULL, NULL, /* filter, filterarg */
2155 IXV_TSO_SIZE, /* maxsize */
2157 PAGE_SIZE, /* maxsegsize */
2159 NULL, /* lockfunc */
2160 NULL, /* lockfuncarg */
2162 device_printf(dev,"Unable to allocate TX DMA tag\n");
2166 if (!(txr->tx_buffers =
2167 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2168 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2169 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2174 /* Create the descriptor buffer dma maps */
2175 txbuf = txr->tx_buffers;
2176 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2177 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2179 device_printf(dev, "Unable to create TX DMA map\n");
2186 /* We free all, it handles case where we are in the middle */
2187 ixv_free_transmit_structures(adapter);
2191 /*********************************************************************
2193 * Initialize a transmit ring.
2195 **********************************************************************/
2197 ixv_setup_transmit_ring(struct tx_ring *txr)
2199 struct adapter *adapter = txr->adapter;
2200 struct ixv_tx_buf *txbuf;
2203 /* Clear the old ring contents */
2205 bzero((void *)txr->tx_base,
2206 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2208 txr->next_avail_desc = 0;
2209 txr->next_to_clean = 0;
2211 /* Free any existing tx buffers. */
2212 txbuf = txr->tx_buffers;
2213 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2214 if (txbuf->m_head != NULL) {
2215 bus_dmamap_sync(txr->txtag, txbuf->map,
2216 BUS_DMASYNC_POSTWRITE);
2217 bus_dmamap_unload(txr->txtag, txbuf->map);
2218 m_freem(txbuf->m_head);
2219 txbuf->m_head = NULL;
2221 /* Clear the EOP index */
2222 txbuf->eop_index = -1;
2225 /* Set number of descriptors available */
2226 txr->tx_avail = adapter->num_tx_desc;
2228 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2229 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2233 /*********************************************************************
2235 * Initialize all transmit rings.
2237 **********************************************************************/
2239 ixv_setup_transmit_structures(struct adapter *adapter)
2241 struct tx_ring *txr = adapter->tx_rings;
2243 for (int i = 0; i < adapter->num_queues; i++, txr++)
2244 ixv_setup_transmit_ring(txr);
2249 /*********************************************************************
2251 * Enable transmit unit.
2253 **********************************************************************/
2255 ixv_initialize_transmit_units(struct adapter *adapter)
2257 struct tx_ring *txr = adapter->tx_rings;
2258 struct ixgbe_hw *hw = &adapter->hw;
2261 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2262 u64 tdba = txr->txdma.dma_paddr;
2265 /* Set WTHRESH to 8, burst writeback */
2266 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2267 txdctl |= (8 << 16);
2268 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2270 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2271 txdctl |= IXGBE_TXDCTL_ENABLE;
2272 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2274 /* Set the HW Tx Head and Tail indices */
2275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2278 /* Setup Transmit Descriptor Cmd Settings */
2279 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2280 txr->watchdog_check = FALSE;
2282 /* Set Ring parameters */
2283 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2284 (tdba & 0x00000000ffffffffULL));
2285 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2286 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2287 adapter->num_tx_desc *
2288 sizeof(struct ixgbe_legacy_tx_desc));
2289 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2290 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2291 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2298 /*********************************************************************
2300 * Free all transmit rings.
2302 **********************************************************************/
2304 ixv_free_transmit_structures(struct adapter *adapter)
2306 struct tx_ring *txr = adapter->tx_rings;
2308 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2310 ixv_free_transmit_buffers(txr);
2311 ixv_dma_free(adapter, &txr->txdma);
2313 IXV_TX_LOCK_DESTROY(txr);
2315 free(adapter->tx_rings, M_DEVBUF);
2318 /*********************************************************************
2320 * Free transmit ring related data structures.
2322 **********************************************************************/
2324 ixv_free_transmit_buffers(struct tx_ring *txr)
2326 struct adapter *adapter = txr->adapter;
2327 struct ixv_tx_buf *tx_buffer;
2330 INIT_DEBUGOUT("free_transmit_ring: begin");
2332 if (txr->tx_buffers == NULL)
2335 tx_buffer = txr->tx_buffers;
2336 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2337 if (tx_buffer->m_head != NULL) {
2338 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2339 BUS_DMASYNC_POSTWRITE);
2340 bus_dmamap_unload(txr->txtag,
2342 m_freem(tx_buffer->m_head);
2343 tx_buffer->m_head = NULL;
2344 if (tx_buffer->map != NULL) {
2345 bus_dmamap_destroy(txr->txtag,
2347 tx_buffer->map = NULL;
2349 } else if (tx_buffer->map != NULL) {
2350 bus_dmamap_unload(txr->txtag,
2352 bus_dmamap_destroy(txr->txtag,
2354 tx_buffer->map = NULL;
2357 #if __FreeBSD_version >= 800000
2358 if (txr->br != NULL)
2359 buf_ring_free(txr->br, M_DEVBUF);
2361 if (txr->tx_buffers != NULL) {
2362 free(txr->tx_buffers, M_DEVBUF);
2363 txr->tx_buffers = NULL;
2365 if (txr->txtag != NULL) {
2366 bus_dma_tag_destroy(txr->txtag);
2372 /*********************************************************************
2374 * Advanced Context Descriptor setup for VLAN or CSUM
2376 **********************************************************************/
2379 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2381 struct adapter *adapter = txr->adapter;
2382 struct ixgbe_adv_tx_context_desc *TXD;
2383 struct ixv_tx_buf *tx_buffer;
2384 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2385 struct ether_vlan_header *eh;
2387 struct ip6_hdr *ip6;
2388 int ehdrlen, ip_hlen = 0;
2391 bool offload = TRUE;
2392 int ctxd = txr->next_avail_desc;
2396 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2400 tx_buffer = &txr->tx_buffers[ctxd];
2401 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2404 ** In advanced descriptors the vlan tag must
2405 ** be placed into the descriptor itself.
2407 if (mp->m_flags & M_VLANTAG) {
2408 vtag = htole16(mp->m_pkthdr.ether_vtag);
2409 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2410 } else if (offload == FALSE)
2414 * Determine where frame payload starts.
2415 * Jump over vlan headers if already present,
2416 * helpful for QinQ too.
2418 eh = mtod(mp, struct ether_vlan_header *);
2419 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2420 etype = ntohs(eh->evl_proto);
2421 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2423 etype = ntohs(eh->evl_encap_proto);
2424 ehdrlen = ETHER_HDR_LEN;
2427 /* Set the ether header length */
2428 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2432 ip = (struct ip *)(mp->m_data + ehdrlen);
2433 ip_hlen = ip->ip_hl << 2;
2434 if (mp->m_len < ehdrlen + ip_hlen)
2437 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2439 case ETHERTYPE_IPV6:
2440 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2441 ip_hlen = sizeof(struct ip6_hdr);
2442 if (mp->m_len < ehdrlen + ip_hlen)
2444 ipproto = ip6->ip6_nxt;
2445 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2452 vlan_macip_lens |= ip_hlen;
2453 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2457 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2458 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2462 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2463 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2466 #if __FreeBSD_version >= 800000
2468 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2469 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2477 /* Now copy bits into descriptor */
2478 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2479 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2480 TXD->seqnum_seed = htole32(0);
2481 TXD->mss_l4len_idx = htole32(0);
2483 tx_buffer->m_head = NULL;
2484 tx_buffer->eop_index = -1;
2486 /* We've consumed the first desc, adjust counters */
2487 if (++ctxd == adapter->num_tx_desc)
2489 txr->next_avail_desc = ctxd;
2495 /**********************************************************************
2497 * Setup work for hardware segmentation offload (TSO) on
2498 * adapters using advanced tx descriptors
2500 **********************************************************************/
2502 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2504 struct adapter *adapter = txr->adapter;
2505 struct ixgbe_adv_tx_context_desc *TXD;
2506 struct ixv_tx_buf *tx_buffer;
2507 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2508 u32 mss_l4len_idx = 0;
2510 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2511 struct ether_vlan_header *eh;
2517 * Determine where frame payload starts.
2518 * Jump over vlan headers if already present
2520 eh = mtod(mp, struct ether_vlan_header *);
2521 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2522 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2524 ehdrlen = ETHER_HDR_LEN;
2526 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2527 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2530 ctxd = txr->next_avail_desc;
2531 tx_buffer = &txr->tx_buffers[ctxd];
2532 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2534 ip = (struct ip *)(mp->m_data + ehdrlen);
2535 if (ip->ip_p != IPPROTO_TCP)
2536 return FALSE; /* 0 */
2538 ip_hlen = ip->ip_hl << 2;
2539 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2540 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2541 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2542 tcp_hlen = th->th_off << 2;
2543 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2545 /* This is used in the transmit desc in encap */
2546 *paylen = mp->m_pkthdr.len - hdrlen;
2548 /* VLAN MACLEN IPLEN */
2549 if (mp->m_flags & M_VLANTAG) {
2550 vtag = htole16(mp->m_pkthdr.ether_vtag);
2551 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2554 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2555 vlan_macip_lens |= ip_hlen;
2556 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2558 /* ADV DTYPE TUCMD */
2559 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2560 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2561 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2562 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2566 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2567 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2568 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2570 TXD->seqnum_seed = htole32(0);
2571 tx_buffer->m_head = NULL;
2572 tx_buffer->eop_index = -1;
2574 if (++ctxd == adapter->num_tx_desc)
2578 txr->next_avail_desc = ctxd;
2583 /**********************************************************************
2585 * Examine each tx_buffer in the used queue. If the hardware is done
2586 * processing the packet then free associated resources. The
2587 * tx_buffer is put back on the free queue.
2589 **********************************************************************/
2591 ixv_txeof(struct tx_ring *txr)
2593 struct adapter *adapter = txr->adapter;
2594 struct ifnet *ifp = adapter->ifp;
2595 u32 first, last, done;
2596 struct ixv_tx_buf *tx_buffer;
2597 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2599 mtx_assert(&txr->tx_mtx, MA_OWNED);
2601 if (txr->tx_avail == adapter->num_tx_desc)
2604 first = txr->next_to_clean;
2605 tx_buffer = &txr->tx_buffers[first];
2606 /* For cleanup we just use legacy struct */
2607 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2608 last = tx_buffer->eop_index;
2611 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2614 ** Get the index of the first descriptor
2615 ** BEYOND the EOP and call that 'done'.
2616 ** I do this so the comparison in the
2617 ** inner while loop below can be simple
2619 if (++last == adapter->num_tx_desc) last = 0;
2622 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2623 BUS_DMASYNC_POSTREAD);
2625 ** Only the EOP descriptor of a packet now has the DD
2626 ** bit set, this is what we look for...
2628 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2629 /* We clean the range of the packet */
2630 while (first != done) {
2631 tx_desc->upper.data = 0;
2632 tx_desc->lower.data = 0;
2633 tx_desc->buffer_addr = 0;
2636 if (tx_buffer->m_head) {
2637 bus_dmamap_sync(txr->txtag,
2639 BUS_DMASYNC_POSTWRITE);
2640 bus_dmamap_unload(txr->txtag,
2642 m_freem(tx_buffer->m_head);
2643 tx_buffer->m_head = NULL;
2644 tx_buffer->map = NULL;
2646 tx_buffer->eop_index = -1;
2647 txr->watchdog_time = ticks;
2649 if (++first == adapter->num_tx_desc)
2652 tx_buffer = &txr->tx_buffers[first];
2654 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2657 /* See if there is more work now */
2658 last = tx_buffer->eop_index;
2661 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2662 /* Get next done point */
2663 if (++last == adapter->num_tx_desc) last = 0;
2668 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2669 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2671 txr->next_to_clean = first;
2674 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2675 * it is OK to send packets. If there are no pending descriptors,
2676 * clear the timeout. Otherwise, if some descriptors have been freed,
2677 * restart the timeout.
2679 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2680 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2681 if (txr->tx_avail == adapter->num_tx_desc) {
2682 txr->watchdog_check = FALSE;
2690 /*********************************************************************
2692 * Refresh mbuf buffers for RX descriptor rings
2693 * - now keeps its own state so discards due to resource
2694 * exhaustion are unnecessary, if an mbuf cannot be obtained
2695 * it just returns, keeping its placeholder, thus it can simply
2696 * be recalled to try again.
2698 **********************************************************************/
2700 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2702 struct adapter *adapter = rxr->adapter;
2703 bus_dma_segment_t hseg[1];
2704 bus_dma_segment_t pseg[1];
2705 struct ixv_rx_buf *rxbuf;
2706 struct mbuf *mh, *mp;
2707 int i, nsegs, error, cleaned;
2709 i = rxr->next_to_refresh;
2710 cleaned = -1; /* Signify no completions */
2711 while (i != limit) {
2712 rxbuf = &rxr->rx_buffers[i];
2713 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2714 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2717 mh->m_pkthdr.len = mh->m_len = MHLEN;
2719 mh->m_flags |= M_PKTHDR;
2720 m_adj(mh, ETHER_ALIGN);
2721 /* Get the memory mapping */
2722 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2723 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2725 printf("GET BUF: dmamap load"
2726 " failure - %d\n", error);
2731 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2732 BUS_DMASYNC_PREREAD);
2733 rxr->rx_base[i].read.hdr_addr =
2734 htole64(hseg[0].ds_addr);
2737 if (rxbuf->m_pack == NULL) {
2738 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2739 M_PKTHDR, adapter->rx_mbuf_sz);
2742 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2743 /* Get the memory mapping */
2744 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2745 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2747 printf("GET BUF: dmamap load"
2748 " failure - %d\n", error);
2753 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2754 BUS_DMASYNC_PREREAD);
2755 rxr->rx_base[i].read.pkt_addr =
2756 htole64(pseg[0].ds_addr);
2760 /* Calculate next index */
2761 if (++i == adapter->num_rx_desc)
2763 /* This is the work marker for refresh */
2764 rxr->next_to_refresh = i;
2767 if (cleaned != -1) /* If we refreshed some, bump tail */
2768 IXGBE_WRITE_REG(&adapter->hw,
2769 IXGBE_VFRDT(rxr->me), cleaned);
2773 /*********************************************************************
2775 * Allocate memory for rx_buffer structures. Since we use one
2776 * rx_buffer per received packet, the maximum number of rx_buffer's
2777 * that we'll need is equal to the number of receive descriptors
2778 * that we've allocated.
2780 **********************************************************************/
2782 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2784 struct adapter *adapter = rxr->adapter;
2785 device_t dev = adapter->dev;
2786 struct ixv_rx_buf *rxbuf;
2787 int i, bsize, error;
2789 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2790 if (!(rxr->rx_buffers =
2791 (struct ixv_rx_buf *) malloc(bsize,
2792 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2793 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2798 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2799 1, 0, /* alignment, bounds */
2800 BUS_SPACE_MAXADDR, /* lowaddr */
2801 BUS_SPACE_MAXADDR, /* highaddr */
2802 NULL, NULL, /* filter, filterarg */
2803 MSIZE, /* maxsize */
2805 MSIZE, /* maxsegsize */
2807 NULL, /* lockfunc */
2808 NULL, /* lockfuncarg */
2810 device_printf(dev, "Unable to create RX DMA tag\n");
2814 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2815 1, 0, /* alignment, bounds */
2816 BUS_SPACE_MAXADDR, /* lowaddr */
2817 BUS_SPACE_MAXADDR, /* highaddr */
2818 NULL, NULL, /* filter, filterarg */
2819 MJUMPAGESIZE, /* maxsize */
2821 MJUMPAGESIZE, /* maxsegsize */
2823 NULL, /* lockfunc */
2824 NULL, /* lockfuncarg */
2826 device_printf(dev, "Unable to create RX DMA tag\n");
2830 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2831 rxbuf = &rxr->rx_buffers[i];
2832 error = bus_dmamap_create(rxr->htag,
2833 BUS_DMA_NOWAIT, &rxbuf->hmap);
2835 device_printf(dev, "Unable to create RX head map\n");
2838 error = bus_dmamap_create(rxr->ptag,
2839 BUS_DMA_NOWAIT, &rxbuf->pmap);
2841 device_printf(dev, "Unable to create RX pkt map\n");
2849 /* Frees all, but can handle partial completion */
2850 ixv_free_receive_structures(adapter);
2855 ixv_free_receive_ring(struct rx_ring *rxr)
2857 struct adapter *adapter;
2858 struct ixv_rx_buf *rxbuf;
2861 adapter = rxr->adapter;
2862 for (i = 0; i < adapter->num_rx_desc; i++) {
2863 rxbuf = &rxr->rx_buffers[i];
2864 if (rxbuf->m_head != NULL) {
2865 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2866 BUS_DMASYNC_POSTREAD);
2867 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2868 rxbuf->m_head->m_flags |= M_PKTHDR;
2869 m_freem(rxbuf->m_head);
2871 if (rxbuf->m_pack != NULL) {
2872 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2873 BUS_DMASYNC_POSTREAD);
2874 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2875 rxbuf->m_pack->m_flags |= M_PKTHDR;
2876 m_freem(rxbuf->m_pack);
2878 rxbuf->m_head = NULL;
2879 rxbuf->m_pack = NULL;
2884 /*********************************************************************
2886 * Initialize a receive ring and its buffers.
2888 **********************************************************************/
2890 ixv_setup_receive_ring(struct rx_ring *rxr)
2892 struct adapter *adapter;
2895 struct ixv_rx_buf *rxbuf;
2896 bus_dma_segment_t pseg[1], hseg[1];
2897 struct lro_ctrl *lro = &rxr->lro;
2898 int rsize, nsegs, error = 0;
2900 adapter = rxr->adapter;
2904 /* Clear the ring contents */
2906 rsize = roundup2(adapter->num_rx_desc *
2907 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2908 bzero((void *)rxr->rx_base, rsize);
2910 /* Free current RX buffer structs and their mbufs */
2911 ixv_free_receive_ring(rxr);
2913 /* Configure header split? */
2914 if (ixv_header_split)
2915 rxr->hdr_split = TRUE;
2917 /* Now replenish the mbufs */
2918 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2919 struct mbuf *mh, *mp;
2921 rxbuf = &rxr->rx_buffers[j];
2923 ** Dont allocate mbufs if not
2924 ** doing header split, its wasteful
2926 if (rxr->hdr_split == FALSE)
2929 /* First the header */
2930 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2931 if (rxbuf->m_head == NULL) {
2935 m_adj(rxbuf->m_head, ETHER_ALIGN);
2937 mh->m_len = mh->m_pkthdr.len = MHLEN;
2938 mh->m_flags |= M_PKTHDR;
2939 /* Get the memory mapping */
2940 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2941 rxbuf->hmap, rxbuf->m_head, hseg,
2942 &nsegs, BUS_DMA_NOWAIT);
2943 if (error != 0) /* Nothing elegant to do here */
2945 bus_dmamap_sync(rxr->htag,
2946 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2947 /* Update descriptor */
2948 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2951 /* Now the payload cluster */
2952 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2953 M_PKTHDR, adapter->rx_mbuf_sz);
2954 if (rxbuf->m_pack == NULL) {
2959 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2960 /* Get the memory mapping */
2961 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2962 rxbuf->pmap, mp, pseg,
2963 &nsegs, BUS_DMA_NOWAIT);
2966 bus_dmamap_sync(rxr->ptag,
2967 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2968 /* Update descriptor */
2969 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2973 /* Setup our descriptor indices */
2974 rxr->next_to_check = 0;
2975 rxr->next_to_refresh = 0;
2976 rxr->lro_enabled = FALSE;
2977 rxr->rx_split_packets = 0;
2980 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2981 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2984 ** Now set up the LRO interface:
2986 if (ifp->if_capenable & IFCAP_LRO) {
2987 int err = tcp_lro_init(lro);
2989 device_printf(dev, "LRO Initialization failed!\n");
2992 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
2993 rxr->lro_enabled = TRUE;
2994 lro->ifp = adapter->ifp;
3001 ixv_free_receive_ring(rxr);
3006 /*********************************************************************
3008 * Initialize all receive rings.
3010 **********************************************************************/
3012 ixv_setup_receive_structures(struct adapter *adapter)
3014 struct rx_ring *rxr = adapter->rx_rings;
3017 for (j = 0; j < adapter->num_queues; j++, rxr++)
3018 if (ixv_setup_receive_ring(rxr))
3024 * Free RX buffers allocated so far, we will only handle
3025 * the rings that completed, the failing case will have
3026 * cleaned up for itself. 'j' failed, so its the terminus.
3028 for (int i = 0; i < j; ++i) {
3029 rxr = &adapter->rx_rings[i];
3030 ixv_free_receive_ring(rxr);
3036 /*********************************************************************
3038 * Setup receive registers and features.
3040 **********************************************************************/
3041 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3044 ixv_initialize_receive_units(struct adapter *adapter)
3046 struct rx_ring *rxr = adapter->rx_rings;
3047 struct ixgbe_hw *hw = &adapter->hw;
3048 struct ifnet *ifp = adapter->ifp;
3049 u32 bufsz, fctrl, rxcsum, hlreg;
3052 /* Enable broadcasts */
3053 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3054 fctrl |= IXGBE_FCTRL_BAM;
3055 fctrl |= IXGBE_FCTRL_DPF;
3056 fctrl |= IXGBE_FCTRL_PMCF;
3057 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3059 /* Set for Jumbo Frames? */
3060 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3061 if (ifp->if_mtu > ETHERMTU) {
3062 hlreg |= IXGBE_HLREG0_JUMBOEN;
3063 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3065 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3066 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3068 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3070 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3071 u64 rdba = rxr->rxdma.dma_paddr;
3074 /* Do the queue enabling first */
3075 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3076 rxdctl |= IXGBE_RXDCTL_ENABLE;
3077 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3078 for (int k = 0; k < 10; k++) {
3079 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3080 IXGBE_RXDCTL_ENABLE)
3087 /* Setup the Base and Length of the Rx Descriptor Ring */
3088 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3089 (rdba & 0x00000000ffffffffULL));
3090 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3092 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3093 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3095 /* Set up the SRRCTL register */
3096 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3097 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3098 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3100 if (rxr->hdr_split) {
3101 /* Use a standard mbuf for the header */
3102 reg |= ((IXV_RX_HDR <<
3103 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3104 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3105 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3107 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3108 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3110 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3111 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3112 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3113 adapter->num_rx_desc - 1);
3116 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3118 if (ifp->if_capenable & IFCAP_RXCSUM)
3119 rxcsum |= IXGBE_RXCSUM_PCSD;
3121 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3122 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3124 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3129 /*********************************************************************
3131 * Free all receive rings.
3133 **********************************************************************/
3135 ixv_free_receive_structures(struct adapter *adapter)
3137 struct rx_ring *rxr = adapter->rx_rings;
3139 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3140 struct lro_ctrl *lro = &rxr->lro;
3141 ixv_free_receive_buffers(rxr);
3142 /* Free LRO memory */
3144 /* Free the ring memory as well */
3145 ixv_dma_free(adapter, &rxr->rxdma);
3148 free(adapter->rx_rings, M_DEVBUF);
3152 /*********************************************************************
3154 * Free receive ring data structures
3156 **********************************************************************/
3158 ixv_free_receive_buffers(struct rx_ring *rxr)
3160 struct adapter *adapter = rxr->adapter;
3161 struct ixv_rx_buf *rxbuf;
3163 INIT_DEBUGOUT("free_receive_structures: begin");
3165 /* Cleanup any existing buffers */
3166 if (rxr->rx_buffers != NULL) {
3167 for (int i = 0; i < adapter->num_rx_desc; i++) {
3168 rxbuf = &rxr->rx_buffers[i];
3169 if (rxbuf->m_head != NULL) {
3170 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3171 BUS_DMASYNC_POSTREAD);
3172 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3173 rxbuf->m_head->m_flags |= M_PKTHDR;
3174 m_freem(rxbuf->m_head);
3176 if (rxbuf->m_pack != NULL) {
3177 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3178 BUS_DMASYNC_POSTREAD);
3179 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3180 rxbuf->m_pack->m_flags |= M_PKTHDR;
3181 m_freem(rxbuf->m_pack);
3183 rxbuf->m_head = NULL;
3184 rxbuf->m_pack = NULL;
3185 if (rxbuf->hmap != NULL) {
3186 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3189 if (rxbuf->pmap != NULL) {
3190 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3194 if (rxr->rx_buffers != NULL) {
3195 free(rxr->rx_buffers, M_DEVBUF);
3196 rxr->rx_buffers = NULL;
3200 if (rxr->htag != NULL) {
3201 bus_dma_tag_destroy(rxr->htag);
3204 if (rxr->ptag != NULL) {
3205 bus_dma_tag_destroy(rxr->ptag);
3212 static __inline void
3213 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3217 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3218 * should be computed by hardware. Also it should not have VLAN tag in
3221 if (rxr->lro_enabled &&
3222 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3223 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3224 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3225 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3226 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3227 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3229 * Send to the stack if:
3230 ** - LRO not enabled, or
3231 ** - no LRO resources, or
3232 ** - lro enqueue fails
3234 if (rxr->lro.lro_cnt != 0)
3235 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3238 (*ifp->if_input)(ifp, m);
3241 static __inline void
3242 ixv_rx_discard(struct rx_ring *rxr, int i)
3244 struct adapter *adapter = rxr->adapter;
3245 struct ixv_rx_buf *rbuf;
3246 struct mbuf *mh, *mp;
3248 rbuf = &rxr->rx_buffers[i];
3249 if (rbuf->fmp != NULL) /* Partial chain ? */
3255 /* Reuse loaded DMA map and just update mbuf chain */
3257 mh->m_flags |= M_PKTHDR;
3260 mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
3261 mp->m_data = mp->m_ext.ext_buf;
3267 /*********************************************************************
3269 * This routine executes in interrupt context. It replenishes
3270 * the mbufs in the descriptor and sends data which has been
3271 * dma'ed into host memory to upper layer.
3273 * We loop at most count times if count is > 0, or until done if
3276 * Return TRUE for more work, FALSE for all clean.
3277 *********************************************************************/
3279 ixv_rxeof(struct ix_queue *que, int count)
3281 struct adapter *adapter = que->adapter;
3282 struct rx_ring *rxr = que->rxr;
3283 struct ifnet *ifp = adapter->ifp;
3284 struct lro_ctrl *lro = &rxr->lro;
3285 struct lro_entry *queued;
3286 int i, nextp, processed = 0;
3288 union ixgbe_adv_rx_desc *cur;
3289 struct ixv_rx_buf *rbuf, *nbuf;
3293 for (i = rxr->next_to_check; count != 0;) {
3294 struct mbuf *sendmp, *mh, *mp;
3296 u16 hlen, plen, hdr, vtag;
3299 /* Sync the ring. */
3300 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3301 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3303 cur = &rxr->rx_base[i];
3304 staterr = le32toh(cur->wb.upper.status_error);
3306 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3308 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3315 cur->wb.upper.status_error = 0;
3316 rbuf = &rxr->rx_buffers[i];
3320 plen = le16toh(cur->wb.upper.length);
3321 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3322 IXGBE_RXDADV_PKTTYPE_MASK;
3323 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3324 vtag = le16toh(cur->wb.upper.vlan);
3325 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3327 /* Make sure all parts of a bad packet are discarded */
3328 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3331 rxr->rx_discarded++;
3333 rxr->discard = TRUE;
3335 rxr->discard = FALSE;
3336 ixv_rx_discard(rxr, i);
3342 if (nextp == adapter->num_rx_desc)
3344 nbuf = &rxr->rx_buffers[nextp];
3348 ** The header mbuf is ONLY used when header
3349 ** split is enabled, otherwise we get normal
3350 ** behavior, ie, both header and payload
3351 ** are DMA'd into the payload buffer.
3353 ** Rather than using the fmp/lmp global pointers
3354 ** we now keep the head of a packet chain in the
3355 ** buffer struct and pass this along from one
3356 ** descriptor to the next, until we get EOP.
3358 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3359 /* This must be an initial descriptor */
3360 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3361 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3362 if (hlen > IXV_RX_HDR)
3365 mh->m_flags |= M_PKTHDR;
3367 mh->m_pkthdr.len = mh->m_len;
3368 /* Null buf pointer so it is refreshed */
3369 rbuf->m_head = NULL;
3371 ** Check the payload length, this
3372 ** could be zero if its a small
3378 mp->m_flags &= ~M_PKTHDR;
3380 mh->m_pkthdr.len += mp->m_len;
3381 /* Null buf pointer so it is refreshed */
3382 rbuf->m_pack = NULL;
3383 rxr->rx_split_packets++;
3386 ** Now create the forward
3387 ** chain so when complete
3391 /* stash the chain head */
3393 /* Make forward chain */
3395 mp->m_next = nbuf->m_pack;
3397 mh->m_next = nbuf->m_pack;
3399 /* Singlet, prepare to send */
3401 if (staterr & IXGBE_RXD_STAT_VP) {
3402 sendmp->m_pkthdr.ether_vtag = vtag;
3403 sendmp->m_flags |= M_VLANTAG;
3408 ** Either no header split, or a
3409 ** secondary piece of a fragmented
3414 ** See if there is a stored head
3415 ** that determines what we are
3418 rbuf->m_pack = rbuf->fmp = NULL;
3420 if (sendmp != NULL) /* secondary frag */
3421 sendmp->m_pkthdr.len += mp->m_len;
3423 /* first desc of a non-ps chain */
3425 sendmp->m_flags |= M_PKTHDR;
3426 sendmp->m_pkthdr.len = mp->m_len;
3427 if (staterr & IXGBE_RXD_STAT_VP) {
3428 sendmp->m_pkthdr.ether_vtag = vtag;
3429 sendmp->m_flags |= M_VLANTAG;
3432 /* Pass the head pointer on */
3436 mp->m_next = nbuf->m_pack;
3440 /* Sending this frame? */
3442 sendmp->m_pkthdr.rcvif = ifp;
3445 /* capture data for AIM */
3446 rxr->bytes += sendmp->m_pkthdr.len;
3447 rxr->rx_bytes += sendmp->m_pkthdr.len;
3448 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3449 ixv_rx_checksum(staterr, sendmp, ptype);
3450 #if __FreeBSD_version >= 800000
3451 sendmp->m_pkthdr.flowid = que->msix;
3452 sendmp->m_flags |= M_FLOWID;
3456 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3457 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3459 /* Advance our pointers to the next descriptor. */
3460 if (++i == adapter->num_rx_desc)
3463 /* Now send to the stack or do LRO */
3465 ixv_rx_input(rxr, ifp, sendmp, ptype);
3467 /* Every 8 descriptors we go to refresh mbufs */
3468 if (processed == 8) {
3469 ixv_refresh_mbufs(rxr, i);
3474 /* Refresh any remaining buf structs */
3475 if (processed != 0) {
3476 ixv_refresh_mbufs(rxr, i);
3480 rxr->next_to_check = i;
3483 * Flush any outstanding LRO work
3485 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3486 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3487 tcp_lro_flush(lro, queued);
3493 ** We still have cleaning to do?
3494 ** Schedule another interrupt if so.
3496 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3497 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3505 /*********************************************************************
3507 * Verify that the hardware indicated that the checksum is valid.
3508 * Inform the stack about the status of checksum so that stack
3509 * doesn't spend time verifying the checksum.
3511 *********************************************************************/
3513 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3515 u16 status = (u16) staterr;
3516 u8 errors = (u8) (staterr >> 24);
3519 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3520 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3523 if (status & IXGBE_RXD_STAT_IPCS) {
3524 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3525 /* IP Checksum Good */
3526 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3527 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3530 mp->m_pkthdr.csum_flags = 0;
3532 if (status & IXGBE_RXD_STAT_L4CS) {
3533 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3534 #if __FreeBSD_version >= 800000
3536 type = CSUM_SCTP_VALID;
3538 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3539 mp->m_pkthdr.csum_flags |= type;
3541 mp->m_pkthdr.csum_data = htons(0xffff);
3548 ixv_setup_vlan_support(struct adapter *adapter)
3550 struct ixgbe_hw *hw = &adapter->hw;
3551 u32 ctrl, vid, vfta, retry;
3555 ** We get here thru init_locked, meaning
3556 ** a soft reset, this has already cleared
3557 ** the VFTA and other state, so if there
3558 ** have been no vlan's registered do nothing.
3560 if (adapter->num_vlans == 0)
3563 /* Enable the queues */
3564 for (int i = 0; i < adapter->num_queues; i++) {
3565 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3566 ctrl |= IXGBE_RXDCTL_VME;
3567 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3571 ** A soft reset zero's out the VFTA, so
3572 ** we need to repopulate it now.
3574 for (int i = 0; i < VFTA_SIZE; i++) {
3575 if (ixv_shadow_vfta[i] == 0)
3577 vfta = ixv_shadow_vfta[i];
3579 ** Reconstruct the vlan id's
3580 ** based on the bits set in each
3581 ** of the array ints.
3583 for ( int j = 0; j < 32; j++) {
3585 if ((vfta & (1 << j)) == 0)
3588 /* Call the shared code mailbox routine */
3589 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3598 ** This routine is run via an vlan config EVENT,
3599 ** it enables us to use the HW Filter table since
3600 ** we can get the vlan id. This just creates the
3601 ** entry in the soft version of the VFTA, init will
3602 ** repopulate the real table.
3605 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3607 struct adapter *adapter = ifp->if_softc;
3610 if (ifp->if_softc != arg) /* Not our event */
3613 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3616 index = (vtag >> 5) & 0x7F;
3618 ixv_shadow_vfta[index] |= (1 << bit);
3619 ++adapter->num_vlans;
3620 /* Re-init to load the changes */
3625 ** This routine is run via an vlan
3626 ** unconfig EVENT, remove our entry
3627 ** in the soft vfta.
3630 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3632 struct adapter *adapter = ifp->if_softc;
3635 if (ifp->if_softc != arg)
3638 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3641 index = (vtag >> 5) & 0x7F;
3643 ixv_shadow_vfta[index] &= ~(1 << bit);
3644 --adapter->num_vlans;
3645 /* Re-init to load the changes */
3650 ixv_enable_intr(struct adapter *adapter)
3652 struct ixgbe_hw *hw = &adapter->hw;
3653 struct ix_queue *que = adapter->queues;
3654 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3657 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3659 mask = IXGBE_EIMS_ENABLE_MASK;
3660 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3661 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3663 for (int i = 0; i < adapter->num_queues; i++, que++)
3664 ixv_enable_queue(adapter, que->msix);
3666 IXGBE_WRITE_FLUSH(hw);
3672 ixv_disable_intr(struct adapter *adapter)
3674 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3675 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3676 IXGBE_WRITE_FLUSH(&adapter->hw);
3681 ** Setup the correct IVAR register for a particular MSIX interrupt
3682 ** - entry is the register array entry
3683 ** - vector is the MSIX vector for this queue
3684 ** - type is RX/TX/MISC
3687 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3689 struct ixgbe_hw *hw = &adapter->hw;
3692 vector |= IXGBE_IVAR_ALLOC_VAL;
3694 if (type == -1) { /* MISC IVAR */
3695 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3698 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3699 } else { /* RX/TX IVARS */
3700 index = (16 * (entry & 1)) + (8 * type);
3701 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3702 ivar &= ~(0xFF << index);
3703 ivar |= (vector << index);
3704 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3709 ixv_configure_ivars(struct adapter *adapter)
3711 struct ix_queue *que = adapter->queues;
3713 for (int i = 0; i < adapter->num_queues; i++, que++) {
3714 /* First the RX queue entry */
3715 ixv_set_ivar(adapter, i, que->msix, 0);
3716 /* ... and the TX */
3717 ixv_set_ivar(adapter, i, que->msix, 1);
3718 /* Set an initial value in EITR */
3719 IXGBE_WRITE_REG(&adapter->hw,
3720 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3723 /* For the Link interrupt */
3724 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3729 ** Tasklet handler for MSIX MBX interrupts
3730 ** - do outside interrupt since it might sleep
3733 ixv_handle_mbx(void *context, int pending)
3735 struct adapter *adapter = context;
3737 ixgbe_check_link(&adapter->hw,
3738 &adapter->link_speed, &adapter->link_up, 0);
3739 ixv_update_link_status(adapter);
3743 ** The VF stats registers never have a truely virgin
3744 ** starting point, so this routine tries to make an
3745 ** artificial one, marking ground zero on attach as
3749 ixv_save_stats(struct adapter *adapter)
3751 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3752 adapter->stats.saved_reset_vfgprc +=
3753 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3754 adapter->stats.saved_reset_vfgptc +=
3755 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3756 adapter->stats.saved_reset_vfgorc +=
3757 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3758 adapter->stats.saved_reset_vfgotc +=
3759 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3760 adapter->stats.saved_reset_vfmprc +=
3761 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3766 ixv_init_stats(struct adapter *adapter)
3768 struct ixgbe_hw *hw = &adapter->hw;
3770 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3771 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3772 adapter->stats.last_vfgorc |=
3773 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3775 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3776 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3777 adapter->stats.last_vfgotc |=
3778 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3780 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3782 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3783 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3784 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3785 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3786 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3789 #define UPDATE_STAT_32(reg, last, count) \
3791 u32 current = IXGBE_READ_REG(hw, reg); \
3792 if (current < last) \
3793 count += 0x100000000LL; \
3795 count &= 0xFFFFFFFF00000000LL; \
3799 #define UPDATE_STAT_36(lsb, msb, last, count) \
3801 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3802 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3803 u64 current = ((cur_msb << 32) | cur_lsb); \
3804 if (current < last) \
3805 count += 0x1000000000LL; \
3807 count &= 0xFFFFFFF000000000LL; \
3812 ** ixv_update_stats - Update the board statistics counters.
3815 ixv_update_stats(struct adapter *adapter)
3817 struct ixgbe_hw *hw = &adapter->hw;
3819 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3820 adapter->stats.vfgprc);
3821 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3822 adapter->stats.vfgptc);
3823 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3824 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3825 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3826 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3827 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3828 adapter->stats.vfmprc);
3831 /**********************************************************************
3833 * This routine is called only when ixgbe_display_debug_stats is enabled.
3834 * This routine provides a way to take a look at important statistics
3835 * maintained by the driver and hardware.
3837 **********************************************************************/
3839 ixv_print_hw_stats(struct adapter * adapter)
3841 device_t dev = adapter->dev;
3843 device_printf(dev,"Std Mbuf Failed = %lu\n",
3844 adapter->mbuf_defrag_failed);
3845 device_printf(dev,"Driver dropped packets = %lu\n",
3846 adapter->dropped_pkts);
3847 device_printf(dev, "watchdog timeouts = %ld\n",
3848 adapter->watchdog_events);
3850 device_printf(dev,"Good Packets Rcvd = %llu\n",
3851 (long long)adapter->stats.vfgprc);
3852 device_printf(dev,"Good Packets Xmtd = %llu\n",
3853 (long long)adapter->stats.vfgptc);
3854 device_printf(dev,"TSO Transmissions = %lu\n",
3859 /**********************************************************************
3861 * This routine is called only when em_display_debug_stats is enabled.
3862 * This routine provides a way to take a look at important statistics
3863 * maintained by the driver and hardware.
3865 **********************************************************************/
3867 ixv_print_debug_info(struct adapter *adapter)
3869 device_t dev = adapter->dev;
3870 struct ixgbe_hw *hw = &adapter->hw;
3871 struct ix_queue *que = adapter->queues;
3872 struct rx_ring *rxr;
3873 struct tx_ring *txr;
3874 struct lro_ctrl *lro;
3876 device_printf(dev,"Error Byte Count = %u \n",
3877 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3879 for (int i = 0; i < adapter->num_queues; i++, que++) {
3883 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3884 que->msix, (long)que->irqs);
3885 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3886 rxr->me, (long long)rxr->rx_packets);
3887 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3888 rxr->me, (long long)rxr->rx_split_packets);
3889 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3890 rxr->me, (long)rxr->rx_bytes);
3891 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3892 rxr->me, lro->lro_queued);
3893 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3894 rxr->me, lro->lro_flushed);
3895 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3896 txr->me, (long)txr->total_packets);
3897 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3898 txr->me, (long)txr->no_desc_avail);
3901 device_printf(dev,"MBX IRQ Handled: %lu\n",
3902 (long)adapter->mbx_irq);
3907 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3911 struct adapter *adapter;
3914 error = sysctl_handle_int(oidp, &result, 0, req);
3916 if (error || !req->newptr)
3920 adapter = (struct adapter *) arg1;
3921 ixv_print_hw_stats(adapter);
3927 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3930 struct adapter *adapter;
3933 error = sysctl_handle_int(oidp, &result, 0, req);
3935 if (error || !req->newptr)
3939 adapter = (struct adapter *) arg1;
3940 ixv_print_debug_info(adapter);
3946 ** Set flow control using sysctl:
3947 ** Flow control values:
3954 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3957 struct adapter *adapter;
3959 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3964 adapter = (struct adapter *) arg1;
3965 switch (ixv_flow_control) {
3966 case ixgbe_fc_rx_pause:
3967 case ixgbe_fc_tx_pause:
3969 adapter->hw.fc.requested_mode = ixv_flow_control;
3973 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3976 ixgbe_fc_enable(&adapter->hw, 0);
3981 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
3982 const char *description, int *limit, int value)
3985 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3986 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3987 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);