1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
44 *********************************************************************/
45 char ixv_driver_version[] = "1.1.4";
47 /*********************************************************************
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
57 static ixv_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 /* required last entry */
65 /*********************************************************************
66 * Table of branding strings
67 *********************************************************************/
69 static char *ixv_strings[] = {
70 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 /*********************************************************************
75 *********************************************************************/
76 static int ixv_probe(device_t);
77 static int ixv_attach(device_t);
78 static int ixv_detach(device_t);
79 static int ixv_shutdown(device_t);
80 #if __FreeBSD_version < 800000
81 static void ixv_start(struct ifnet *);
82 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
84 static int ixv_mq_start(struct ifnet *, struct mbuf *);
85 static int ixv_mq_start_locked(struct ifnet *,
86 struct tx_ring *, struct mbuf *);
87 static void ixv_qflush(struct ifnet *);
89 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
90 static void ixv_init(void *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_stop(void *);
93 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
94 static int ixv_media_change(struct ifnet *);
95 static void ixv_identify_hardware(struct adapter *);
96 static int ixv_allocate_pci_resources(struct adapter *);
97 static int ixv_allocate_msix(struct adapter *);
98 static int ixv_allocate_queues(struct adapter *);
99 static int ixv_setup_msix(struct adapter *);
100 static void ixv_free_pci_resources(struct adapter *);
101 static void ixv_local_timer(void *);
102 static void ixv_setup_interface(device_t, struct adapter *);
103 static void ixv_config_link(struct adapter *);
105 static int ixv_allocate_transmit_buffers(struct tx_ring *);
106 static int ixv_setup_transmit_structures(struct adapter *);
107 static void ixv_setup_transmit_ring(struct tx_ring *);
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_free_transmit_structures(struct adapter *);
110 static void ixv_free_transmit_buffers(struct tx_ring *);
112 static int ixv_allocate_receive_buffers(struct rx_ring *);
113 static int ixv_setup_receive_structures(struct adapter *);
114 static int ixv_setup_receive_ring(struct rx_ring *);
115 static void ixv_initialize_receive_units(struct adapter *);
116 static void ixv_free_receive_structures(struct adapter *);
117 static void ixv_free_receive_buffers(struct rx_ring *);
119 static void ixv_enable_intr(struct adapter *);
120 static void ixv_disable_intr(struct adapter *);
121 static bool ixv_txeof(struct tx_ring *);
122 static bool ixv_rxeof(struct ix_queue *, int);
123 static void ixv_rx_checksum(u32, struct mbuf *, u32);
124 static void ixv_set_multi(struct adapter *);
125 static void ixv_update_link_status(struct adapter *);
126 static void ixv_refresh_mbufs(struct rx_ring *, int);
127 static int ixv_xmit(struct tx_ring *, struct mbuf **);
128 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
129 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
130 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
131 static int ixv_dma_malloc(struct adapter *, bus_size_t,
132 struct ixv_dma_alloc *, int);
133 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
134 static void ixv_add_rx_process_limit(struct adapter *, const char *,
135 const char *, int *, int);
136 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
137 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
138 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
139 static void ixv_configure_ivars(struct adapter *);
140 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
142 static void ixv_setup_vlan_support(struct adapter *);
143 static void ixv_register_vlan(void *, struct ifnet *, u16);
144 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
146 static void ixv_save_stats(struct adapter *);
147 static void ixv_init_stats(struct adapter *);
148 static void ixv_update_stats(struct adapter *);
150 static __inline void ixv_rx_discard(struct rx_ring *, int);
151 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
154 /* The MSI/X Interrupt handlers */
155 static void ixv_msix_que(void *);
156 static void ixv_msix_mbx(void *);
158 /* Deferred interrupt tasklets */
159 static void ixv_handle_que(void *, int);
160 static void ixv_handle_mbx(void *, int);
162 /*********************************************************************
163 * FreeBSD Device Interface Entry Points
164 *********************************************************************/
166 static device_method_t ixv_methods[] = {
167 /* Device interface */
168 DEVMETHOD(device_probe, ixv_probe),
169 DEVMETHOD(device_attach, ixv_attach),
170 DEVMETHOD(device_detach, ixv_detach),
171 DEVMETHOD(device_shutdown, ixv_shutdown),
176 static driver_t ixv_driver = {
177 "ix", ixv_methods, sizeof(struct adapter),
180 extern devclass_t ixgbe_devclass;
181 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
182 MODULE_DEPEND(ixv, pci, 1, 1, 1);
183 MODULE_DEPEND(ixv, ether, 1, 1, 1);
186 ** TUNEABLE PARAMETERS:
190 ** AIM: Adaptive Interrupt Moderation
191 ** which means that the interrupt rate
192 ** is varied over time based on the
193 ** traffic for that interrupt vector
195 static int ixv_enable_aim = FALSE;
196 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
198 /* How many packets rxeof tries to clean at a time */
199 static int ixv_rx_process_limit = 128;
200 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
202 /* Flow control setting, default to full */
203 static int ixv_flow_control = ixgbe_fc_full;
204 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
207 * Header split: this causes the hardware to DMA
208 * the header into a seperate mbuf from the payload,
209 * it can be a performance win in some workloads, but
210 * in others it actually hurts, its off by default.
212 static int ixv_header_split = FALSE;
213 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
216 ** Number of TX descriptors per ring,
217 ** setting higher than RX as this seems
218 ** the better performing choice.
220 static int ixv_txd = DEFAULT_TXD;
221 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
223 /* Number of RX descriptors per ring */
224 static int ixv_rxd = DEFAULT_RXD;
225 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
228 ** Shadow VFTA table, this is needed because
229 ** the real filter table gets cleared during
230 ** a soft reset and we need to repopulate it.
232 static u32 ixv_shadow_vfta[VFTA_SIZE];
234 /*********************************************************************
235 * Device identification routine
237 * ixv_probe determines if the driver should be loaded on
238 * adapter based on PCI vendor/device id of the adapter.
240 * return BUS_PROBE_DEFAULT on success, positive on failure
241 *********************************************************************/
244 ixv_probe(device_t dev)
246 ixv_vendor_info_t *ent;
248 u16 pci_vendor_id = 0;
249 u16 pci_device_id = 0;
250 u16 pci_subvendor_id = 0;
251 u16 pci_subdevice_id = 0;
252 char adapter_name[256];
255 pci_vendor_id = pci_get_vendor(dev);
256 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
259 pci_device_id = pci_get_device(dev);
260 pci_subvendor_id = pci_get_subvendor(dev);
261 pci_subdevice_id = pci_get_subdevice(dev);
263 ent = ixv_vendor_info_array;
264 while (ent->vendor_id != 0) {
265 if ((pci_vendor_id == ent->vendor_id) &&
266 (pci_device_id == ent->device_id) &&
268 ((pci_subvendor_id == ent->subvendor_id) ||
269 (ent->subvendor_id == 0)) &&
271 ((pci_subdevice_id == ent->subdevice_id) ||
272 (ent->subdevice_id == 0))) {
273 sprintf(adapter_name, "%s, Version - %s",
274 ixv_strings[ent->index],
276 device_set_desc_copy(dev, adapter_name);
277 return (BUS_PROBE_DEFAULT);
284 /*********************************************************************
285 * Device initialization routine
287 * The attach entry point is called when the driver is being loaded.
288 * This routine identifies the type of hardware, allocates all resources
289 * and initializes the hardware.
291 * return 0 on success, positive on failure
292 *********************************************************************/
295 ixv_attach(device_t dev)
297 struct adapter *adapter;
301 INIT_DEBUGOUT("ixv_attach: begin");
303 /* Allocate, clear, and link in our adapter structure */
304 adapter = device_get_softc(dev);
305 adapter->dev = adapter->osdep.dev = dev;
309 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
312 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
313 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
314 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
315 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
317 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
318 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
319 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
320 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
322 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
323 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
324 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
325 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
327 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
328 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
329 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
330 &ixv_enable_aim, 1, "Interrupt Moderation");
332 /* Set up the timer callout */
333 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
335 /* Determine hardware revision */
336 ixv_identify_hardware(adapter);
338 /* Do base PCI setup - map BAR0 */
339 if (ixv_allocate_pci_resources(adapter)) {
340 device_printf(dev, "Allocation of PCI resources failed\n");
345 /* Do descriptor calc and sanity checks */
346 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
347 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
348 device_printf(dev, "TXD config issue, using default!\n");
349 adapter->num_tx_desc = DEFAULT_TXD;
351 adapter->num_tx_desc = ixv_txd;
353 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
354 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
355 device_printf(dev, "RXD config issue, using default!\n");
356 adapter->num_rx_desc = DEFAULT_RXD;
358 adapter->num_rx_desc = ixv_rxd;
360 /* Allocate our TX/RX Queues */
361 if (ixv_allocate_queues(adapter)) {
367 ** Initialize the shared code: its
368 ** at this point the mac type is set.
370 error = ixgbe_init_shared_code(hw);
372 device_printf(dev,"Shared Code Initialization Failure\n");
377 /* Setup the mailbox */
378 ixgbe_init_mbx_params_vf(hw);
382 /* Get Hardware Flow Control setting */
383 hw->fc.requested_mode = ixgbe_fc_full;
384 hw->fc.pause_time = IXV_FC_PAUSE;
385 hw->fc.low_water[0] = IXV_FC_LO;
386 hw->fc.high_water[0] = IXV_FC_HI;
387 hw->fc.send_xon = TRUE;
389 error = ixgbe_init_hw(hw);
391 device_printf(dev,"Hardware Initialization Failure\n");
396 error = ixv_allocate_msix(adapter);
400 /* Setup OS specific network interface */
401 ixv_setup_interface(dev, adapter);
403 /* Sysctl for limiting the amount of work done in the taskqueue */
404 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
405 "max number of rx packets to process", &adapter->rx_process_limit,
406 ixv_rx_process_limit);
408 /* Do the stats setup */
409 ixv_save_stats(adapter);
410 ixv_init_stats(adapter);
412 /* Register for VLAN events */
413 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
414 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
415 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
416 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
418 INIT_DEBUGOUT("ixv_attach: end");
422 ixv_free_transmit_structures(adapter);
423 ixv_free_receive_structures(adapter);
425 ixv_free_pci_resources(adapter);
430 /*********************************************************************
431 * Device removal routine
433 * The detach entry point is called when the driver is being removed.
434 * This routine stops the adapter and deallocates all the resources
435 * that were allocated for driver operation.
437 * return 0 on success, positive on failure
438 *********************************************************************/
441 ixv_detach(device_t dev)
443 struct adapter *adapter = device_get_softc(dev);
444 struct ix_queue *que = adapter->queues;
446 INIT_DEBUGOUT("ixv_detach: begin");
448 /* Make sure VLANS are not using driver */
449 if (adapter->ifp->if_vlantrunk != NULL) {
450 device_printf(dev,"Vlan in use, detach first\n");
454 IXV_CORE_LOCK(adapter);
456 IXV_CORE_UNLOCK(adapter);
458 for (int i = 0; i < adapter->num_queues; i++, que++) {
460 taskqueue_drain(que->tq, &que->que_task);
461 taskqueue_free(que->tq);
465 /* Drain the Link queue */
467 taskqueue_drain(adapter->tq, &adapter->mbx_task);
468 taskqueue_free(adapter->tq);
471 /* Unregister VLAN events */
472 if (adapter->vlan_attach != NULL)
473 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
474 if (adapter->vlan_detach != NULL)
475 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
477 ether_ifdetach(adapter->ifp);
478 callout_drain(&adapter->timer);
479 ixv_free_pci_resources(adapter);
480 bus_generic_detach(dev);
481 if_free(adapter->ifp);
483 ixv_free_transmit_structures(adapter);
484 ixv_free_receive_structures(adapter);
486 IXV_CORE_LOCK_DESTROY(adapter);
490 /*********************************************************************
492 * Shutdown entry point
494 **********************************************************************/
496 ixv_shutdown(device_t dev)
498 struct adapter *adapter = device_get_softc(dev);
499 IXV_CORE_LOCK(adapter);
501 IXV_CORE_UNLOCK(adapter);
505 #if __FreeBSD_version < 800000
506 /*********************************************************************
507 * Transmit entry point
509 * ixv_start is called by the stack to initiate a transmit.
510 * The driver will remain in this routine as long as there are
511 * packets to transmit and transmit resources are available.
512 * In case resources are not available stack is notified and
513 * the packet is requeued.
514 **********************************************************************/
516 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
519 struct adapter *adapter = txr->adapter;
521 IXV_TX_LOCK_ASSERT(txr);
523 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
526 if (!adapter->link_active)
529 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
531 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
535 if (ixv_xmit(txr, &m_head)) {
538 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
539 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
542 /* Send a copy of the frame to the BPF listener */
543 ETHER_BPF_MTAP(ifp, m_head);
545 /* Set watchdog on */
546 txr->watchdog_check = TRUE;
547 txr->watchdog_time = ticks;
554 * Legacy TX start - called by the stack, this
555 * always uses the first tx ring, and should
556 * not be used with multiqueue tx enabled.
559 ixv_start(struct ifnet *ifp)
561 struct adapter *adapter = ifp->if_softc;
562 struct tx_ring *txr = adapter->tx_rings;
564 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
566 ixv_start_locked(txr, ifp);
575 ** Multiqueue Transmit driver
579 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
581 struct adapter *adapter = ifp->if_softc;
582 struct ix_queue *que;
586 /* Which queue to use */
587 if ((m->m_flags & M_FLOWID) != 0)
588 i = m->m_pkthdr.flowid % adapter->num_queues;
590 txr = &adapter->tx_rings[i];
591 que = &adapter->queues[i];
593 if (IXV_TX_TRYLOCK(txr)) {
594 err = ixv_mq_start_locked(ifp, txr, m);
597 err = drbr_enqueue(ifp, txr->br, m);
598 taskqueue_enqueue(que->tq, &que->que_task);
605 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
607 struct adapter *adapter = txr->adapter;
609 int enqueued, err = 0;
611 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
612 IFF_DRV_RUNNING || adapter->link_active == 0) {
614 err = drbr_enqueue(ifp, txr->br, m);
618 /* Do a clean if descriptors are low */
619 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
624 err = drbr_enqueue(ifp, txr->br, m);
629 /* Process the queue */
630 while ((next = drbr_peek(ifp, txr->br)) != NULL) {
631 if ((err = ixv_xmit(txr, &next)) != 0) {
633 drbr_advance(ifp, txr->br);
635 drbr_putback(ifp, txr->br, next);
639 drbr_advance(ifp, txr->br);
641 ifp->if_obytes += next->m_pkthdr.len;
642 if (next->m_flags & M_MCAST)
644 /* Send a copy of the frame to the BPF listener */
645 ETHER_BPF_MTAP(ifp, next);
646 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
648 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
649 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
655 /* Set watchdog on */
656 txr->watchdog_check = TRUE;
657 txr->watchdog_time = ticks;
664 ** Flush all ring buffers
667 ixv_qflush(struct ifnet *ifp)
669 struct adapter *adapter = ifp->if_softc;
670 struct tx_ring *txr = adapter->tx_rings;
673 for (int i = 0; i < adapter->num_queues; i++, txr++) {
675 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
684 /*********************************************************************
687 * ixv_ioctl is called when the user wants to configure the
690 * return 0 on success, positive on failure
691 **********************************************************************/
694 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
696 struct adapter *adapter = ifp->if_softc;
697 struct ifreq *ifr = (struct ifreq *) data;
698 #if defined(INET) || defined(INET6)
699 struct ifaddr *ifa = (struct ifaddr *) data;
700 bool avoid_reset = FALSE;
708 if (ifa->ifa_addr->sa_family == AF_INET)
712 if (ifa->ifa_addr->sa_family == AF_INET6)
715 #if defined(INET) || defined(INET6)
717 ** Calling init results in link renegotiation,
718 ** so we avoid doing it when possible.
721 ifp->if_flags |= IFF_UP;
722 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
724 if (!(ifp->if_flags & IFF_NOARP))
725 arp_ifinit(ifp, ifa);
727 error = ether_ioctl(ifp, command, data);
731 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
732 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
735 IXV_CORE_LOCK(adapter);
736 ifp->if_mtu = ifr->ifr_mtu;
737 adapter->max_frame_size =
738 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
739 ixv_init_locked(adapter);
740 IXV_CORE_UNLOCK(adapter);
744 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
745 IXV_CORE_LOCK(adapter);
746 if (ifp->if_flags & IFF_UP) {
747 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
748 ixv_init_locked(adapter);
750 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
752 adapter->if_flags = ifp->if_flags;
753 IXV_CORE_UNLOCK(adapter);
757 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
758 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
759 IXV_CORE_LOCK(adapter);
760 ixv_disable_intr(adapter);
761 ixv_set_multi(adapter);
762 ixv_enable_intr(adapter);
763 IXV_CORE_UNLOCK(adapter);
768 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
769 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
773 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
774 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
775 if (mask & IFCAP_HWCSUM)
776 ifp->if_capenable ^= IFCAP_HWCSUM;
777 if (mask & IFCAP_TSO4)
778 ifp->if_capenable ^= IFCAP_TSO4;
779 if (mask & IFCAP_LRO)
780 ifp->if_capenable ^= IFCAP_LRO;
781 if (mask & IFCAP_VLAN_HWTAGGING)
782 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
783 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
784 IXV_CORE_LOCK(adapter);
785 ixv_init_locked(adapter);
786 IXV_CORE_UNLOCK(adapter);
788 VLAN_CAPABILITIES(ifp);
793 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
794 error = ether_ioctl(ifp, command, data);
801 /*********************************************************************
804 * This routine is used in two ways. It is used by the stack as
805 * init entry point in network interface structure. It is also used
806 * by the driver as a hw/sw initialization routine to get to a
809 * return 0 on success, positive on failure
810 **********************************************************************/
811 #define IXGBE_MHADD_MFS_SHIFT 16
814 ixv_init_locked(struct adapter *adapter)
816 struct ifnet *ifp = adapter->ifp;
817 device_t dev = adapter->dev;
818 struct ixgbe_hw *hw = &adapter->hw;
821 INIT_DEBUGOUT("ixv_init: begin");
822 mtx_assert(&adapter->core_mtx, MA_OWNED);
823 hw->adapter_stopped = FALSE;
824 ixgbe_stop_adapter(hw);
825 callout_stop(&adapter->timer);
827 /* reprogram the RAR[0] in case user changed it. */
828 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
830 /* Get the latest mac address, User can use a LAA */
831 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
832 IXGBE_ETH_LENGTH_OF_ADDRESS);
833 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
834 hw->addr_ctrl.rar_used_count = 1;
836 /* Prepare transmit descriptors and buffers */
837 if (ixv_setup_transmit_structures(adapter)) {
838 device_printf(dev,"Could not setup transmit structures\n");
844 ixv_initialize_transmit_units(adapter);
846 /* Setup Multicast table */
847 ixv_set_multi(adapter);
850 ** Determine the correct mbuf pool
851 ** for doing jumbo/headersplit
853 if (ifp->if_mtu > ETHERMTU)
854 adapter->rx_mbuf_sz = MJUMPAGESIZE;
856 adapter->rx_mbuf_sz = MCLBYTES;
858 /* Prepare receive descriptors and buffers */
859 if (ixv_setup_receive_structures(adapter)) {
860 device_printf(dev,"Could not setup receive structures\n");
865 /* Configure RX settings */
866 ixv_initialize_receive_units(adapter);
868 /* Enable Enhanced MSIX mode */
869 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
870 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
871 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
872 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
874 /* Set the various hardware offload abilities */
875 ifp->if_hwassist = 0;
876 if (ifp->if_capenable & IFCAP_TSO4)
877 ifp->if_hwassist |= CSUM_TSO;
878 if (ifp->if_capenable & IFCAP_TXCSUM) {
879 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
880 #if __FreeBSD_version >= 800000
881 ifp->if_hwassist |= CSUM_SCTP;
886 if (ifp->if_mtu > ETHERMTU) {
887 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
888 mhadd &= ~IXGBE_MHADD_MFS_MASK;
889 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
890 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
893 /* Set up VLAN offload and filter */
894 ixv_setup_vlan_support(adapter);
896 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
898 /* Set up MSI/X routing */
899 ixv_configure_ivars(adapter);
901 /* Set up auto-mask */
902 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
904 /* Set moderation on the Link interrupt */
905 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
908 ixv_init_stats(adapter);
910 /* Config/Enable Link */
911 ixv_config_link(adapter);
913 /* And now turn on interrupts */
914 ixv_enable_intr(adapter);
916 /* Now inform the stack we're ready */
917 ifp->if_drv_flags |= IFF_DRV_RUNNING;
918 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
926 struct adapter *adapter = arg;
928 IXV_CORE_LOCK(adapter);
929 ixv_init_locked(adapter);
930 IXV_CORE_UNLOCK(adapter);
937 ** MSIX Interrupt Handlers and Tasklets
942 ixv_enable_queue(struct adapter *adapter, u32 vector)
944 struct ixgbe_hw *hw = &adapter->hw;
945 u32 queue = 1 << vector;
948 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
949 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
953 ixv_disable_queue(struct adapter *adapter, u32 vector)
955 struct ixgbe_hw *hw = &adapter->hw;
956 u64 queue = (u64)(1 << vector);
959 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
960 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
964 ixv_rearm_queues(struct adapter *adapter, u64 queues)
966 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
967 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
972 ixv_handle_que(void *context, int pending)
974 struct ix_queue *que = context;
975 struct adapter *adapter = que->adapter;
976 struct tx_ring *txr = que->txr;
977 struct ifnet *ifp = adapter->ifp;
980 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
981 more = ixv_rxeof(que, adapter->rx_process_limit);
984 #if __FreeBSD_version >= 800000
985 if (!drbr_empty(ifp, txr->br))
986 ixv_mq_start_locked(ifp, txr, NULL);
988 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
989 ixv_start_locked(txr, ifp);
993 taskqueue_enqueue(que->tq, &que->que_task);
998 /* Reenable this interrupt */
999 ixv_enable_queue(adapter, que->msix);
1003 /*********************************************************************
1005 * MSI Queue Interrupt Service routine
1007 **********************************************************************/
1009 ixv_msix_que(void *arg)
1011 struct ix_queue *que = arg;
1012 struct adapter *adapter = que->adapter;
1013 struct tx_ring *txr = que->txr;
1014 struct rx_ring *rxr = que->rxr;
1015 bool more_tx, more_rx;
1018 ixv_disable_queue(adapter, que->msix);
1021 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1024 more_tx = ixv_txeof(txr);
1026 ** Make certain that if the stack
1027 ** has anything queued the task gets
1028 ** scheduled to handle it.
1030 #if __FreeBSD_version < 800000
1031 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1033 if (!drbr_empty(adapter->ifp, txr->br))
1038 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1042 if (ixv_enable_aim == FALSE)
1045 ** Do Adaptive Interrupt Moderation:
1046 ** - Write out last calculated setting
1047 ** - Calculate based on average size over
1048 ** the last interval.
1050 if (que->eitr_setting)
1051 IXGBE_WRITE_REG(&adapter->hw,
1052 IXGBE_VTEITR(que->msix),
1055 que->eitr_setting = 0;
1057 /* Idle, do nothing */
1058 if ((txr->bytes == 0) && (rxr->bytes == 0))
1061 if ((txr->bytes) && (txr->packets))
1062 newitr = txr->bytes/txr->packets;
1063 if ((rxr->bytes) && (rxr->packets))
1064 newitr = max(newitr,
1065 (rxr->bytes / rxr->packets));
1066 newitr += 24; /* account for hardware frame, crc */
1068 /* set an upper boundary */
1069 newitr = min(newitr, 3000);
1071 /* Be nice to the mid range */
1072 if ((newitr > 300) && (newitr < 1200))
1073 newitr = (newitr / 3);
1075 newitr = (newitr / 2);
1077 newitr |= newitr << 16;
1079 /* save for next interrupt */
1080 que->eitr_setting = newitr;
1089 if (more_tx || more_rx)
1090 taskqueue_enqueue(que->tq, &que->que_task);
1091 else /* Reenable this interrupt */
1092 ixv_enable_queue(adapter, que->msix);
1097 ixv_msix_mbx(void *arg)
1099 struct adapter *adapter = arg;
1100 struct ixgbe_hw *hw = &adapter->hw;
1105 /* First get the cause */
1106 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1107 /* Clear interrupt with write */
1108 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1110 /* Link status change */
1111 if (reg & IXGBE_EICR_LSC)
1112 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1114 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1118 /*********************************************************************
1120 * Media Ioctl callback
1122 * This routine is called whenever the user queries the status of
1123 * the interface using ifconfig.
1125 **********************************************************************/
1127 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1129 struct adapter *adapter = ifp->if_softc;
1131 INIT_DEBUGOUT("ixv_media_status: begin");
1132 IXV_CORE_LOCK(adapter);
1133 ixv_update_link_status(adapter);
1135 ifmr->ifm_status = IFM_AVALID;
1136 ifmr->ifm_active = IFM_ETHER;
1138 if (!adapter->link_active) {
1139 IXV_CORE_UNLOCK(adapter);
1143 ifmr->ifm_status |= IFM_ACTIVE;
1145 switch (adapter->link_speed) {
1146 case IXGBE_LINK_SPEED_1GB_FULL:
1147 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1149 case IXGBE_LINK_SPEED_10GB_FULL:
1150 ifmr->ifm_active |= IFM_FDX;
1154 IXV_CORE_UNLOCK(adapter);
1159 /*********************************************************************
1161 * Media Ioctl callback
1163 * This routine is called when the user changes speed/duplex using
1164 * media/mediopt option with ifconfig.
1166 **********************************************************************/
1168 ixv_media_change(struct ifnet * ifp)
1170 struct adapter *adapter = ifp->if_softc;
1171 struct ifmedia *ifm = &adapter->media;
1173 INIT_DEBUGOUT("ixv_media_change: begin");
1175 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1178 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1182 device_printf(adapter->dev, "Only auto media type\n");
1189 /*********************************************************************
1191 * This routine maps the mbufs to tx descriptors, allowing the
1192 * TX engine to transmit the packets.
1193 * - return 0 on success, positive on failure
1195 **********************************************************************/
1198 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1200 struct adapter *adapter = txr->adapter;
1201 u32 olinfo_status = 0, cmd_type_len;
1203 int i, j, error, nsegs;
1204 int first, last = 0;
1205 struct mbuf *m_head;
1206 bus_dma_segment_t segs[32];
1208 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1209 union ixgbe_adv_tx_desc *txd = NULL;
1213 /* Basic descriptor defines */
1214 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1215 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1217 if (m_head->m_flags & M_VLANTAG)
1218 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1221 * Important to capture the first descriptor
1222 * used because it will contain the index of
1223 * the one we tell the hardware to report back
1225 first = txr->next_avail_desc;
1226 txbuf = &txr->tx_buffers[first];
1227 txbuf_mapped = txbuf;
1231 * Map the packet for DMA.
1233 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1234 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1236 if (error == EFBIG) {
1239 m = m_defrag(*m_headp, M_NOWAIT);
1241 adapter->mbuf_defrag_failed++;
1249 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1250 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1252 if (error == ENOMEM) {
1253 adapter->no_tx_dma_setup++;
1255 } else if (error != 0) {
1256 adapter->no_tx_dma_setup++;
1261 } else if (error == ENOMEM) {
1262 adapter->no_tx_dma_setup++;
1264 } else if (error != 0) {
1265 adapter->no_tx_dma_setup++;
1271 /* Make certain there are enough descriptors */
1272 if (nsegs > txr->tx_avail - 2) {
1273 txr->no_desc_avail++;
1280 ** Set up the appropriate offload context
1281 ** this becomes the first descriptor of
1284 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1285 if (ixv_tso_setup(txr, m_head, &paylen)) {
1286 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1287 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1288 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1289 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1293 } else if (ixv_tx_ctx_setup(txr, m_head))
1294 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1296 /* Record payload length */
1298 olinfo_status |= m_head->m_pkthdr.len <<
1299 IXGBE_ADVTXD_PAYLEN_SHIFT;
1301 i = txr->next_avail_desc;
1302 for (j = 0; j < nsegs; j++) {
1306 txbuf = &txr->tx_buffers[i];
1307 txd = &txr->tx_base[i];
1308 seglen = segs[j].ds_len;
1309 segaddr = htole64(segs[j].ds_addr);
1311 txd->read.buffer_addr = segaddr;
1312 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1313 cmd_type_len |seglen);
1314 txd->read.olinfo_status = htole32(olinfo_status);
1315 last = i; /* descriptor that will get completion IRQ */
1317 if (++i == adapter->num_tx_desc)
1320 txbuf->m_head = NULL;
1321 txbuf->eop_index = -1;
1324 txd->read.cmd_type_len |=
1325 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1326 txr->tx_avail -= nsegs;
1327 txr->next_avail_desc = i;
1329 txbuf->m_head = m_head;
1330 txr->tx_buffers[first].map = txbuf->map;
1332 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1334 /* Set the index of the descriptor that will be marked done */
1335 txbuf = &txr->tx_buffers[first];
1336 txbuf->eop_index = last;
1338 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1339 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1341 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1342 * hardware that this frame is available to transmit.
1344 ++txr->total_packets;
1345 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1350 bus_dmamap_unload(txr->txtag, txbuf->map);
1356 /*********************************************************************
1359 * This routine is called whenever multicast address list is updated.
1361 **********************************************************************/
1362 #define IXGBE_RAR_ENTRIES 16
1365 ixv_set_multi(struct adapter *adapter)
1367 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1369 struct ifmultiaddr *ifma;
1371 struct ifnet *ifp = adapter->ifp;
1373 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1375 #if __FreeBSD_version < 800000
1378 if_maddr_rlock(ifp);
1380 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1381 if (ifma->ifma_addr->sa_family != AF_LINK)
1383 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1384 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1385 IXGBE_ETH_LENGTH_OF_ADDRESS);
1388 #if __FreeBSD_version < 800000
1389 IF_ADDR_UNLOCK(ifp);
1391 if_maddr_runlock(ifp);
1396 ixgbe_update_mc_addr_list(&adapter->hw,
1397 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1403 * This is an iterator function now needed by the multicast
1404 * shared code. It simply feeds the shared code routine the
1405 * addresses in the array of ixv_set_multi() one by one.
1408 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1410 u8 *addr = *update_ptr;
1414 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1415 *update_ptr = newptr;
1419 /*********************************************************************
1422 * This routine checks for link status,updates statistics,
1423 * and runs the watchdog check.
1425 **********************************************************************/
1428 ixv_local_timer(void *arg)
1430 struct adapter *adapter = arg;
1431 device_t dev = adapter->dev;
1432 struct tx_ring *txr = adapter->tx_rings;
1435 mtx_assert(&adapter->core_mtx, MA_OWNED);
1437 ixv_update_link_status(adapter);
1440 ixv_update_stats(adapter);
1443 * If the interface has been paused
1444 * then don't do the watchdog check
1446 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1449 ** Check for time since any descriptor was cleaned
1451 for (i = 0; i < adapter->num_queues; i++, txr++) {
1453 if (txr->watchdog_check == FALSE) {
1457 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1462 ixv_rearm_queues(adapter, adapter->que_mask);
1463 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1467 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1468 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1469 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1470 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1471 device_printf(dev,"TX(%d) desc avail = %d,"
1472 "Next TX to Clean = %d\n",
1473 txr->me, txr->tx_avail, txr->next_to_clean);
1474 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1475 adapter->watchdog_events++;
1477 ixv_init_locked(adapter);
1481 ** Note: this routine updates the OS on the link state
1482 ** the real check of the hardware only happens with
1483 ** a link interrupt.
1486 ixv_update_link_status(struct adapter *adapter)
1488 struct ifnet *ifp = adapter->ifp;
1489 struct tx_ring *txr = adapter->tx_rings;
1490 device_t dev = adapter->dev;
1493 if (adapter->link_up){
1494 if (adapter->link_active == FALSE) {
1496 device_printf(dev,"Link is up %d Gbps %s \n",
1497 ((adapter->link_speed == 128)? 10:1),
1499 adapter->link_active = TRUE;
1500 if_link_state_change(ifp, LINK_STATE_UP);
1502 } else { /* Link down */
1503 if (adapter->link_active == TRUE) {
1505 device_printf(dev,"Link is Down\n");
1506 if_link_state_change(ifp, LINK_STATE_DOWN);
1507 adapter->link_active = FALSE;
1508 for (int i = 0; i < adapter->num_queues;
1510 txr->watchdog_check = FALSE;
1518 /*********************************************************************
1520 * This routine disables all traffic on the adapter by issuing a
1521 * global reset on the MAC and deallocates TX/RX buffers.
1523 **********************************************************************/
1529 struct adapter *adapter = arg;
1530 struct ixgbe_hw *hw = &adapter->hw;
1533 mtx_assert(&adapter->core_mtx, MA_OWNED);
1535 INIT_DEBUGOUT("ixv_stop: begin\n");
1536 ixv_disable_intr(adapter);
1538 /* Tell the stack that the interface is no longer active */
1539 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1542 adapter->hw.adapter_stopped = FALSE;
1543 ixgbe_stop_adapter(hw);
1544 callout_stop(&adapter->timer);
1546 /* reprogram the RAR[0] in case user changed it. */
1547 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1553 /*********************************************************************
1555 * Determine hardware revision.
1557 **********************************************************************/
1559 ixv_identify_hardware(struct adapter *adapter)
1561 device_t dev = adapter->dev;
1565 ** Make sure BUSMASTER is set, on a VM under
1566 ** KVM it may not be and will break things.
1568 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1569 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1570 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1571 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1572 "bits were not set!\n");
1573 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1574 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1577 /* Save off the information about this board */
1578 adapter->hw.vendor_id = pci_get_vendor(dev);
1579 adapter->hw.device_id = pci_get_device(dev);
1580 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1581 adapter->hw.subsystem_vendor_id =
1582 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1583 adapter->hw.subsystem_device_id =
1584 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1589 /*********************************************************************
1591 * Setup MSIX Interrupt resources and handlers
1593 **********************************************************************/
1595 ixv_allocate_msix(struct adapter *adapter)
1597 device_t dev = adapter->dev;
1598 struct ix_queue *que = adapter->queues;
1599 int error, rid, vector = 0;
1601 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1603 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1604 RF_SHAREABLE | RF_ACTIVE);
1605 if (que->res == NULL) {
1606 device_printf(dev,"Unable to allocate"
1607 " bus resource: que interrupt [%d]\n", vector);
1610 /* Set the handler function */
1611 error = bus_setup_intr(dev, que->res,
1612 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1613 ixv_msix_que, que, &que->tag);
1616 device_printf(dev, "Failed to register QUE handler");
1619 #if __FreeBSD_version >= 800504
1620 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1623 adapter->que_mask |= (u64)(1 << que->msix);
1625 ** Bind the msix vector, and thus the
1626 ** ring to the corresponding cpu.
1628 if (adapter->num_queues > 1)
1629 bus_bind_intr(dev, que->res, i);
1631 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1632 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1633 taskqueue_thread_enqueue, &que->tq);
1634 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1635 device_get_nameunit(adapter->dev));
1640 adapter->res = bus_alloc_resource_any(dev,
1641 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1642 if (!adapter->res) {
1643 device_printf(dev,"Unable to allocate"
1644 " bus resource: MBX interrupt [%d]\n", rid);
1647 /* Set the mbx handler function */
1648 error = bus_setup_intr(dev, adapter->res,
1649 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1650 ixv_msix_mbx, adapter, &adapter->tag);
1652 adapter->res = NULL;
1653 device_printf(dev, "Failed to register LINK handler");
1656 #if __FreeBSD_version >= 800504
1657 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1659 adapter->mbxvec = vector;
1660 /* Tasklets for Mailbox */
1661 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1662 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1663 taskqueue_thread_enqueue, &adapter->tq);
1664 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1665 device_get_nameunit(adapter->dev));
1667 ** Due to a broken design QEMU will fail to properly
1668 ** enable the guest for MSIX unless the vectors in
1669 ** the table are all set up, so we must rewrite the
1670 ** ENABLE in the MSIX control register again at this
1671 ** point to cause it to successfully initialize us.
1673 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1675 pci_find_cap(dev, PCIY_MSIX, &rid);
1676 rid += PCIR_MSIX_CTRL;
1677 msix_ctrl = pci_read_config(dev, rid, 2);
1678 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1679 pci_write_config(dev, rid, msix_ctrl, 2);
1686 * Setup MSIX resources, note that the VF
1687 * device MUST use MSIX, there is no fallback.
1690 ixv_setup_msix(struct adapter *adapter)
1692 device_t dev = adapter->dev;
1693 int rid, vectors, want = 2;
1696 /* First try MSI/X */
1698 adapter->msix_mem = bus_alloc_resource_any(dev,
1699 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1700 if (!adapter->msix_mem) {
1701 device_printf(adapter->dev,
1702 "Unable to map MSIX table \n");
1706 vectors = pci_msix_count(dev);
1708 bus_release_resource(dev, SYS_RES_MEMORY,
1709 rid, adapter->msix_mem);
1710 adapter->msix_mem = NULL;
1715 ** Want two vectors: one for a queue,
1716 ** plus an additional for mailbox.
1718 if (pci_alloc_msix(dev, &want) == 0) {
1719 device_printf(adapter->dev,
1720 "Using MSIX interrupts with %d vectors\n", want);
1724 device_printf(adapter->dev,"MSIX config error\n");
1730 ixv_allocate_pci_resources(struct adapter *adapter)
1733 device_t dev = adapter->dev;
1736 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1739 if (!(adapter->pci_mem)) {
1740 device_printf(dev,"Unable to allocate bus resource: memory\n");
1744 adapter->osdep.mem_bus_space_tag =
1745 rman_get_bustag(adapter->pci_mem);
1746 adapter->osdep.mem_bus_space_handle =
1747 rman_get_bushandle(adapter->pci_mem);
1748 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1750 adapter->num_queues = 1;
1751 adapter->hw.back = &adapter->osdep;
1754 ** Now setup MSI/X, should
1755 ** return us the number of
1756 ** configured vectors.
1758 adapter->msix = ixv_setup_msix(adapter);
1759 if (adapter->msix == ENXIO)
1766 ixv_free_pci_resources(struct adapter * adapter)
1768 struct ix_queue *que = adapter->queues;
1769 device_t dev = adapter->dev;
1772 memrid = PCIR_BAR(MSIX_BAR);
1775 ** There is a slight possibility of a failure mode
1776 ** in attach that will result in entering this function
1777 ** before interrupt resources have been initialized, and
1778 ** in that case we do not want to execute the loops below
1779 ** We can detect this reliably by the state of the adapter
1782 if (adapter->res == NULL)
1786 ** Release all msix queue resources:
1788 for (int i = 0; i < adapter->num_queues; i++, que++) {
1789 rid = que->msix + 1;
1790 if (que->tag != NULL) {
1791 bus_teardown_intr(dev, que->res, que->tag);
1794 if (que->res != NULL)
1795 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1799 /* Clean the Legacy or Link interrupt last */
1800 if (adapter->mbxvec) /* we are doing MSIX */
1801 rid = adapter->mbxvec + 1;
1803 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1805 if (adapter->tag != NULL) {
1806 bus_teardown_intr(dev, adapter->res, adapter->tag);
1807 adapter->tag = NULL;
1809 if (adapter->res != NULL)
1810 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1814 pci_release_msi(dev);
1816 if (adapter->msix_mem != NULL)
1817 bus_release_resource(dev, SYS_RES_MEMORY,
1818 memrid, adapter->msix_mem);
1820 if (adapter->pci_mem != NULL)
1821 bus_release_resource(dev, SYS_RES_MEMORY,
1822 PCIR_BAR(0), adapter->pci_mem);
1827 /*********************************************************************
1829 * Setup networking device structure and register an interface.
1831 **********************************************************************/
1833 ixv_setup_interface(device_t dev, struct adapter *adapter)
1837 INIT_DEBUGOUT("ixv_setup_interface: begin");
1839 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1841 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1842 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1843 ifp->if_baudrate = 1000000000;
1844 ifp->if_init = ixv_init;
1845 ifp->if_softc = adapter;
1846 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1847 ifp->if_ioctl = ixv_ioctl;
1848 #if __FreeBSD_version >= 800000
1849 ifp->if_transmit = ixv_mq_start;
1850 ifp->if_qflush = ixv_qflush;
1852 ifp->if_start = ixv_start;
1854 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1856 ether_ifattach(ifp, adapter->hw.mac.addr);
1858 adapter->max_frame_size =
1859 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1862 * Tell the upper layer(s) we support long frames.
1864 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1866 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1867 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1868 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1871 ifp->if_capenable = ifp->if_capabilities;
1873 /* Don't enable LRO by default */
1874 ifp->if_capabilities |= IFCAP_LRO;
1877 * Specify the media types supported by this adapter and register
1878 * callbacks to update media and link information
1880 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1882 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1883 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1884 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1890 ixv_config_link(struct adapter *adapter)
1892 struct ixgbe_hw *hw = &adapter->hw;
1893 u32 autoneg, err = 0;
1894 bool negotiate = TRUE;
1896 if (hw->mac.ops.check_link)
1897 err = hw->mac.ops.check_link(hw, &autoneg,
1898 &adapter->link_up, FALSE);
1902 if (hw->mac.ops.setup_link)
1903 err = hw->mac.ops.setup_link(hw, autoneg,
1904 negotiate, adapter->link_up);
1909 /********************************************************************
1910 * Manage DMA'able memory.
1911 *******************************************************************/
1913 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1917 *(bus_addr_t *) arg = segs->ds_addr;
1922 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1923 struct ixv_dma_alloc *dma, int mapflags)
1925 device_t dev = adapter->dev;
1928 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1929 DBA_ALIGN, 0, /* alignment, bounds */
1930 BUS_SPACE_MAXADDR, /* lowaddr */
1931 BUS_SPACE_MAXADDR, /* highaddr */
1932 NULL, NULL, /* filter, filterarg */
1935 size, /* maxsegsize */
1936 BUS_DMA_ALLOCNOW, /* flags */
1937 NULL, /* lockfunc */
1938 NULL, /* lockfuncarg */
1941 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1945 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1946 BUS_DMA_NOWAIT, &dma->dma_map);
1948 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1952 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1956 mapflags | BUS_DMA_NOWAIT);
1958 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1962 dma->dma_size = size;
1965 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1967 bus_dma_tag_destroy(dma->dma_tag);
1969 dma->dma_map = NULL;
1970 dma->dma_tag = NULL;
1975 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1977 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1978 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1979 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1980 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1981 bus_dma_tag_destroy(dma->dma_tag);
1985 /*********************************************************************
1987 * Allocate memory for the transmit and receive rings, and then
1988 * the descriptors associated with each, called only once at attach.
1990 **********************************************************************/
1992 ixv_allocate_queues(struct adapter *adapter)
1994 device_t dev = adapter->dev;
1995 struct ix_queue *que;
1996 struct tx_ring *txr;
1997 struct rx_ring *rxr;
1998 int rsize, tsize, error = 0;
1999 int txconf = 0, rxconf = 0;
2001 /* First allocate the top level queue structs */
2002 if (!(adapter->queues =
2003 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2004 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2005 device_printf(dev, "Unable to allocate queue memory\n");
2010 /* First allocate the TX ring struct memory */
2011 if (!(adapter->tx_rings =
2012 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2013 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2014 device_printf(dev, "Unable to allocate TX ring memory\n");
2019 /* Next allocate the RX */
2020 if (!(adapter->rx_rings =
2021 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2022 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2023 device_printf(dev, "Unable to allocate RX ring memory\n");
2028 /* For the ring itself */
2029 tsize = roundup2(adapter->num_tx_desc *
2030 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2033 * Now set up the TX queues, txconf is needed to handle the
2034 * possibility that things fail midcourse and we need to
2035 * undo memory gracefully
2037 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2038 /* Set up some basics */
2039 txr = &adapter->tx_rings[i];
2040 txr->adapter = adapter;
2043 /* Initialize the TX side lock */
2044 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2045 device_get_nameunit(dev), txr->me);
2046 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2048 if (ixv_dma_malloc(adapter, tsize,
2049 &txr->txdma, BUS_DMA_NOWAIT)) {
2051 "Unable to allocate TX Descriptor memory\n");
2055 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2056 bzero((void *)txr->tx_base, tsize);
2058 /* Now allocate transmit buffers for the ring */
2059 if (ixv_allocate_transmit_buffers(txr)) {
2061 "Critical Failure setting up transmit buffers\n");
2065 #if __FreeBSD_version >= 800000
2066 /* Allocate a buf ring */
2067 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2068 M_WAITOK, &txr->tx_mtx);
2069 if (txr->br == NULL) {
2071 "Critical Failure setting up buf ring\n");
2079 * Next the RX queues...
2081 rsize = roundup2(adapter->num_rx_desc *
2082 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2083 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2084 rxr = &adapter->rx_rings[i];
2085 /* Set up some basics */
2086 rxr->adapter = adapter;
2089 /* Initialize the RX side lock */
2090 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2091 device_get_nameunit(dev), rxr->me);
2092 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2094 if (ixv_dma_malloc(adapter, rsize,
2095 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2097 "Unable to allocate RxDescriptor memory\n");
2101 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2102 bzero((void *)rxr->rx_base, rsize);
2104 /* Allocate receive buffers for the ring*/
2105 if (ixv_allocate_receive_buffers(rxr)) {
2107 "Critical Failure setting up receive buffers\n");
2114 ** Finally set up the queue holding structs
2116 for (int i = 0; i < adapter->num_queues; i++) {
2117 que = &adapter->queues[i];
2118 que->adapter = adapter;
2119 que->txr = &adapter->tx_rings[i];
2120 que->rxr = &adapter->rx_rings[i];
2126 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2127 ixv_dma_free(adapter, &rxr->rxdma);
2129 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2130 ixv_dma_free(adapter, &txr->txdma);
2131 free(adapter->rx_rings, M_DEVBUF);
2133 free(adapter->tx_rings, M_DEVBUF);
2135 free(adapter->queues, M_DEVBUF);
2141 /*********************************************************************
2143 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2144 * the information needed to transmit a packet on the wire. This is
2145 * called only once at attach, setup is done every reset.
2147 **********************************************************************/
2149 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2151 struct adapter *adapter = txr->adapter;
2152 device_t dev = adapter->dev;
2153 struct ixv_tx_buf *txbuf;
2157 * Setup DMA descriptor areas.
2159 if ((error = bus_dma_tag_create(
2160 bus_get_dma_tag(adapter->dev), /* parent */
2161 1, 0, /* alignment, bounds */
2162 BUS_SPACE_MAXADDR, /* lowaddr */
2163 BUS_SPACE_MAXADDR, /* highaddr */
2164 NULL, NULL, /* filter, filterarg */
2165 IXV_TSO_SIZE, /* maxsize */
2167 PAGE_SIZE, /* maxsegsize */
2169 NULL, /* lockfunc */
2170 NULL, /* lockfuncarg */
2172 device_printf(dev,"Unable to allocate TX DMA tag\n");
2176 if (!(txr->tx_buffers =
2177 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2178 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2179 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2184 /* Create the descriptor buffer dma maps */
2185 txbuf = txr->tx_buffers;
2186 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2187 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2189 device_printf(dev, "Unable to create TX DMA map\n");
2196 /* We free all, it handles case where we are in the middle */
2197 ixv_free_transmit_structures(adapter);
2201 /*********************************************************************
2203 * Initialize a transmit ring.
2205 **********************************************************************/
2207 ixv_setup_transmit_ring(struct tx_ring *txr)
2209 struct adapter *adapter = txr->adapter;
2210 struct ixv_tx_buf *txbuf;
2213 /* Clear the old ring contents */
2215 bzero((void *)txr->tx_base,
2216 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2218 txr->next_avail_desc = 0;
2219 txr->next_to_clean = 0;
2221 /* Free any existing tx buffers. */
2222 txbuf = txr->tx_buffers;
2223 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2224 if (txbuf->m_head != NULL) {
2225 bus_dmamap_sync(txr->txtag, txbuf->map,
2226 BUS_DMASYNC_POSTWRITE);
2227 bus_dmamap_unload(txr->txtag, txbuf->map);
2228 m_freem(txbuf->m_head);
2229 txbuf->m_head = NULL;
2231 /* Clear the EOP index */
2232 txbuf->eop_index = -1;
2235 /* Set number of descriptors available */
2236 txr->tx_avail = adapter->num_tx_desc;
2238 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2239 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2243 /*********************************************************************
2245 * Initialize all transmit rings.
2247 **********************************************************************/
2249 ixv_setup_transmit_structures(struct adapter *adapter)
2251 struct tx_ring *txr = adapter->tx_rings;
2253 for (int i = 0; i < adapter->num_queues; i++, txr++)
2254 ixv_setup_transmit_ring(txr);
2259 /*********************************************************************
2261 * Enable transmit unit.
2263 **********************************************************************/
2265 ixv_initialize_transmit_units(struct adapter *adapter)
2267 struct tx_ring *txr = adapter->tx_rings;
2268 struct ixgbe_hw *hw = &adapter->hw;
2271 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2272 u64 tdba = txr->txdma.dma_paddr;
2275 /* Set WTHRESH to 8, burst writeback */
2276 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2277 txdctl |= (8 << 16);
2278 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2280 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2281 txdctl |= IXGBE_TXDCTL_ENABLE;
2282 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2284 /* Set the HW Tx Head and Tail indices */
2285 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2286 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2288 /* Setup Transmit Descriptor Cmd Settings */
2289 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2290 txr->watchdog_check = FALSE;
2292 /* Set Ring parameters */
2293 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2294 (tdba & 0x00000000ffffffffULL));
2295 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2296 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2297 adapter->num_tx_desc *
2298 sizeof(struct ixgbe_legacy_tx_desc));
2299 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2300 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2301 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2308 /*********************************************************************
2310 * Free all transmit rings.
2312 **********************************************************************/
2314 ixv_free_transmit_structures(struct adapter *adapter)
2316 struct tx_ring *txr = adapter->tx_rings;
2318 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2320 ixv_free_transmit_buffers(txr);
2321 ixv_dma_free(adapter, &txr->txdma);
2323 IXV_TX_LOCK_DESTROY(txr);
2325 free(adapter->tx_rings, M_DEVBUF);
2328 /*********************************************************************
2330 * Free transmit ring related data structures.
2332 **********************************************************************/
2334 ixv_free_transmit_buffers(struct tx_ring *txr)
2336 struct adapter *adapter = txr->adapter;
2337 struct ixv_tx_buf *tx_buffer;
2340 INIT_DEBUGOUT("free_transmit_ring: begin");
2342 if (txr->tx_buffers == NULL)
2345 tx_buffer = txr->tx_buffers;
2346 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2347 if (tx_buffer->m_head != NULL) {
2348 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2349 BUS_DMASYNC_POSTWRITE);
2350 bus_dmamap_unload(txr->txtag,
2352 m_freem(tx_buffer->m_head);
2353 tx_buffer->m_head = NULL;
2354 if (tx_buffer->map != NULL) {
2355 bus_dmamap_destroy(txr->txtag,
2357 tx_buffer->map = NULL;
2359 } else if (tx_buffer->map != NULL) {
2360 bus_dmamap_unload(txr->txtag,
2362 bus_dmamap_destroy(txr->txtag,
2364 tx_buffer->map = NULL;
2367 #if __FreeBSD_version >= 800000
2368 if (txr->br != NULL)
2369 buf_ring_free(txr->br, M_DEVBUF);
2371 if (txr->tx_buffers != NULL) {
2372 free(txr->tx_buffers, M_DEVBUF);
2373 txr->tx_buffers = NULL;
2375 if (txr->txtag != NULL) {
2376 bus_dma_tag_destroy(txr->txtag);
2382 /*********************************************************************
2384 * Advanced Context Descriptor setup for VLAN or CSUM
2386 **********************************************************************/
2389 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2391 struct adapter *adapter = txr->adapter;
2392 struct ixgbe_adv_tx_context_desc *TXD;
2393 struct ixv_tx_buf *tx_buffer;
2394 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2395 struct ether_vlan_header *eh;
2397 struct ip6_hdr *ip6;
2398 int ehdrlen, ip_hlen = 0;
2401 bool offload = TRUE;
2402 int ctxd = txr->next_avail_desc;
2406 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2410 tx_buffer = &txr->tx_buffers[ctxd];
2411 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2414 ** In advanced descriptors the vlan tag must
2415 ** be placed into the descriptor itself.
2417 if (mp->m_flags & M_VLANTAG) {
2418 vtag = htole16(mp->m_pkthdr.ether_vtag);
2419 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2420 } else if (offload == FALSE)
2424 * Determine where frame payload starts.
2425 * Jump over vlan headers if already present,
2426 * helpful for QinQ too.
2428 eh = mtod(mp, struct ether_vlan_header *);
2429 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2430 etype = ntohs(eh->evl_proto);
2431 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2433 etype = ntohs(eh->evl_encap_proto);
2434 ehdrlen = ETHER_HDR_LEN;
2437 /* Set the ether header length */
2438 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2442 ip = (struct ip *)(mp->m_data + ehdrlen);
2443 ip_hlen = ip->ip_hl << 2;
2444 if (mp->m_len < ehdrlen + ip_hlen)
2447 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2449 case ETHERTYPE_IPV6:
2450 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2451 ip_hlen = sizeof(struct ip6_hdr);
2452 if (mp->m_len < ehdrlen + ip_hlen)
2454 ipproto = ip6->ip6_nxt;
2455 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2462 vlan_macip_lens |= ip_hlen;
2463 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2467 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2468 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2472 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2473 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2476 #if __FreeBSD_version >= 800000
2478 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2479 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2487 /* Now copy bits into descriptor */
2488 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2489 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2490 TXD->seqnum_seed = htole32(0);
2491 TXD->mss_l4len_idx = htole32(0);
2493 tx_buffer->m_head = NULL;
2494 tx_buffer->eop_index = -1;
2496 /* We've consumed the first desc, adjust counters */
2497 if (++ctxd == adapter->num_tx_desc)
2499 txr->next_avail_desc = ctxd;
2505 /**********************************************************************
2507 * Setup work for hardware segmentation offload (TSO) on
2508 * adapters using advanced tx descriptors
2510 **********************************************************************/
2512 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2514 struct adapter *adapter = txr->adapter;
2515 struct ixgbe_adv_tx_context_desc *TXD;
2516 struct ixv_tx_buf *tx_buffer;
2517 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2518 u32 mss_l4len_idx = 0;
2520 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2521 struct ether_vlan_header *eh;
2527 * Determine where frame payload starts.
2528 * Jump over vlan headers if already present
2530 eh = mtod(mp, struct ether_vlan_header *);
2531 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2532 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2534 ehdrlen = ETHER_HDR_LEN;
2536 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2537 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2540 ctxd = txr->next_avail_desc;
2541 tx_buffer = &txr->tx_buffers[ctxd];
2542 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2544 ip = (struct ip *)(mp->m_data + ehdrlen);
2545 if (ip->ip_p != IPPROTO_TCP)
2546 return FALSE; /* 0 */
2548 ip_hlen = ip->ip_hl << 2;
2549 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2550 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2551 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2552 tcp_hlen = th->th_off << 2;
2553 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2555 /* This is used in the transmit desc in encap */
2556 *paylen = mp->m_pkthdr.len - hdrlen;
2558 /* VLAN MACLEN IPLEN */
2559 if (mp->m_flags & M_VLANTAG) {
2560 vtag = htole16(mp->m_pkthdr.ether_vtag);
2561 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2564 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2565 vlan_macip_lens |= ip_hlen;
2566 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2568 /* ADV DTYPE TUCMD */
2569 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2570 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2571 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2572 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2576 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2577 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2578 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2580 TXD->seqnum_seed = htole32(0);
2581 tx_buffer->m_head = NULL;
2582 tx_buffer->eop_index = -1;
2584 if (++ctxd == adapter->num_tx_desc)
2588 txr->next_avail_desc = ctxd;
2593 /**********************************************************************
2595 * Examine each tx_buffer in the used queue. If the hardware is done
2596 * processing the packet then free associated resources. The
2597 * tx_buffer is put back on the free queue.
2599 **********************************************************************/
2601 ixv_txeof(struct tx_ring *txr)
2603 struct adapter *adapter = txr->adapter;
2604 struct ifnet *ifp = adapter->ifp;
2605 u32 first, last, done;
2606 struct ixv_tx_buf *tx_buffer;
2607 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2609 mtx_assert(&txr->tx_mtx, MA_OWNED);
2611 if (txr->tx_avail == adapter->num_tx_desc)
2614 first = txr->next_to_clean;
2615 tx_buffer = &txr->tx_buffers[first];
2616 /* For cleanup we just use legacy struct */
2617 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2618 last = tx_buffer->eop_index;
2621 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2624 ** Get the index of the first descriptor
2625 ** BEYOND the EOP and call that 'done'.
2626 ** I do this so the comparison in the
2627 ** inner while loop below can be simple
2629 if (++last == adapter->num_tx_desc) last = 0;
2632 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2633 BUS_DMASYNC_POSTREAD);
2635 ** Only the EOP descriptor of a packet now has the DD
2636 ** bit set, this is what we look for...
2638 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2639 /* We clean the range of the packet */
2640 while (first != done) {
2641 tx_desc->upper.data = 0;
2642 tx_desc->lower.data = 0;
2643 tx_desc->buffer_addr = 0;
2646 if (tx_buffer->m_head) {
2647 bus_dmamap_sync(txr->txtag,
2649 BUS_DMASYNC_POSTWRITE);
2650 bus_dmamap_unload(txr->txtag,
2652 m_freem(tx_buffer->m_head);
2653 tx_buffer->m_head = NULL;
2654 tx_buffer->map = NULL;
2656 tx_buffer->eop_index = -1;
2657 txr->watchdog_time = ticks;
2659 if (++first == adapter->num_tx_desc)
2662 tx_buffer = &txr->tx_buffers[first];
2664 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2667 /* See if there is more work now */
2668 last = tx_buffer->eop_index;
2671 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2672 /* Get next done point */
2673 if (++last == adapter->num_tx_desc) last = 0;
2678 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2679 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2681 txr->next_to_clean = first;
2684 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2685 * it is OK to send packets. If there are no pending descriptors,
2686 * clear the timeout. Otherwise, if some descriptors have been freed,
2687 * restart the timeout.
2689 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2690 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2691 if (txr->tx_avail == adapter->num_tx_desc) {
2692 txr->watchdog_check = FALSE;
2700 /*********************************************************************
2702 * Refresh mbuf buffers for RX descriptor rings
2703 * - now keeps its own state so discards due to resource
2704 * exhaustion are unnecessary, if an mbuf cannot be obtained
2705 * it just returns, keeping its placeholder, thus it can simply
2706 * be recalled to try again.
2708 **********************************************************************/
2710 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2712 struct adapter *adapter = rxr->adapter;
2713 bus_dma_segment_t hseg[1];
2714 bus_dma_segment_t pseg[1];
2715 struct ixv_rx_buf *rxbuf;
2716 struct mbuf *mh, *mp;
2717 int i, j, nsegs, error;
2718 bool refreshed = FALSE;
2720 i = j = rxr->next_to_refresh;
2721 /* Get the control variable, one beyond refresh point */
2722 if (++j == adapter->num_rx_desc)
2724 while (j != limit) {
2725 rxbuf = &rxr->rx_buffers[i];
2726 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2727 mh = m_gethdr(M_NOWAIT, MT_DATA);
2730 mh->m_pkthdr.len = mh->m_len = MHLEN;
2732 mh->m_flags |= M_PKTHDR;
2733 m_adj(mh, ETHER_ALIGN);
2734 /* Get the memory mapping */
2735 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2736 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2738 printf("GET BUF: dmamap load"
2739 " failure - %d\n", error);
2744 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2745 BUS_DMASYNC_PREREAD);
2746 rxr->rx_base[i].read.hdr_addr =
2747 htole64(hseg[0].ds_addr);
2750 if (rxbuf->m_pack == NULL) {
2751 mp = m_getjcl(M_NOWAIT, MT_DATA,
2752 M_PKTHDR, adapter->rx_mbuf_sz);
2758 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2759 /* Get the memory mapping */
2760 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2761 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2763 printf("GET BUF: dmamap load"
2764 " failure - %d\n", error);
2766 rxbuf->m_pack = NULL;
2770 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2771 BUS_DMASYNC_PREREAD);
2772 rxr->rx_base[i].read.pkt_addr =
2773 htole64(pseg[0].ds_addr);
2776 rxr->next_to_refresh = i = j;
2777 /* Calculate next index */
2778 if (++j == adapter->num_rx_desc)
2782 if (refreshed) /* update tail index */
2783 IXGBE_WRITE_REG(&adapter->hw,
2784 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2788 /*********************************************************************
2790 * Allocate memory for rx_buffer structures. Since we use one
2791 * rx_buffer per received packet, the maximum number of rx_buffer's
2792 * that we'll need is equal to the number of receive descriptors
2793 * that we've allocated.
2795 **********************************************************************/
2797 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2799 struct adapter *adapter = rxr->adapter;
2800 device_t dev = adapter->dev;
2801 struct ixv_rx_buf *rxbuf;
2802 int i, bsize, error;
2804 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2805 if (!(rxr->rx_buffers =
2806 (struct ixv_rx_buf *) malloc(bsize,
2807 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2808 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2813 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2814 1, 0, /* alignment, bounds */
2815 BUS_SPACE_MAXADDR, /* lowaddr */
2816 BUS_SPACE_MAXADDR, /* highaddr */
2817 NULL, NULL, /* filter, filterarg */
2818 MSIZE, /* maxsize */
2820 MSIZE, /* maxsegsize */
2822 NULL, /* lockfunc */
2823 NULL, /* lockfuncarg */
2825 device_printf(dev, "Unable to create RX DMA tag\n");
2829 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2830 1, 0, /* alignment, bounds */
2831 BUS_SPACE_MAXADDR, /* lowaddr */
2832 BUS_SPACE_MAXADDR, /* highaddr */
2833 NULL, NULL, /* filter, filterarg */
2834 MJUMPAGESIZE, /* maxsize */
2836 MJUMPAGESIZE, /* maxsegsize */
2838 NULL, /* lockfunc */
2839 NULL, /* lockfuncarg */
2841 device_printf(dev, "Unable to create RX DMA tag\n");
2845 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2846 rxbuf = &rxr->rx_buffers[i];
2847 error = bus_dmamap_create(rxr->htag,
2848 BUS_DMA_NOWAIT, &rxbuf->hmap);
2850 device_printf(dev, "Unable to create RX head map\n");
2853 error = bus_dmamap_create(rxr->ptag,
2854 BUS_DMA_NOWAIT, &rxbuf->pmap);
2856 device_printf(dev, "Unable to create RX pkt map\n");
2864 /* Frees all, but can handle partial completion */
2865 ixv_free_receive_structures(adapter);
2870 ixv_free_receive_ring(struct rx_ring *rxr)
2872 struct adapter *adapter;
2873 struct ixv_rx_buf *rxbuf;
2876 adapter = rxr->adapter;
2877 for (i = 0; i < adapter->num_rx_desc; i++) {
2878 rxbuf = &rxr->rx_buffers[i];
2879 if (rxbuf->m_head != NULL) {
2880 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2881 BUS_DMASYNC_POSTREAD);
2882 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2883 rxbuf->m_head->m_flags |= M_PKTHDR;
2884 m_freem(rxbuf->m_head);
2886 if (rxbuf->m_pack != NULL) {
2887 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2888 BUS_DMASYNC_POSTREAD);
2889 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2890 rxbuf->m_pack->m_flags |= M_PKTHDR;
2891 m_freem(rxbuf->m_pack);
2893 rxbuf->m_head = NULL;
2894 rxbuf->m_pack = NULL;
2899 /*********************************************************************
2901 * Initialize a receive ring and its buffers.
2903 **********************************************************************/
2905 ixv_setup_receive_ring(struct rx_ring *rxr)
2907 struct adapter *adapter;
2910 struct ixv_rx_buf *rxbuf;
2911 bus_dma_segment_t pseg[1], hseg[1];
2912 struct lro_ctrl *lro = &rxr->lro;
2913 int rsize, nsegs, error = 0;
2915 adapter = rxr->adapter;
2919 /* Clear the ring contents */
2921 rsize = roundup2(adapter->num_rx_desc *
2922 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2923 bzero((void *)rxr->rx_base, rsize);
2925 /* Free current RX buffer structs and their mbufs */
2926 ixv_free_receive_ring(rxr);
2928 /* Configure header split? */
2929 if (ixv_header_split)
2930 rxr->hdr_split = TRUE;
2932 /* Now replenish the mbufs */
2933 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2934 struct mbuf *mh, *mp;
2936 rxbuf = &rxr->rx_buffers[j];
2938 ** Dont allocate mbufs if not
2939 ** doing header split, its wasteful
2941 if (rxr->hdr_split == FALSE)
2944 /* First the header */
2945 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2946 if (rxbuf->m_head == NULL) {
2950 m_adj(rxbuf->m_head, ETHER_ALIGN);
2952 mh->m_len = mh->m_pkthdr.len = MHLEN;
2953 mh->m_flags |= M_PKTHDR;
2954 /* Get the memory mapping */
2955 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2956 rxbuf->hmap, rxbuf->m_head, hseg,
2957 &nsegs, BUS_DMA_NOWAIT);
2958 if (error != 0) /* Nothing elegant to do here */
2960 bus_dmamap_sync(rxr->htag,
2961 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2962 /* Update descriptor */
2963 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2966 /* Now the payload cluster */
2967 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2968 M_PKTHDR, adapter->rx_mbuf_sz);
2969 if (rxbuf->m_pack == NULL) {
2974 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2975 /* Get the memory mapping */
2976 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2977 rxbuf->pmap, mp, pseg,
2978 &nsegs, BUS_DMA_NOWAIT);
2981 bus_dmamap_sync(rxr->ptag,
2982 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2983 /* Update descriptor */
2984 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2988 /* Setup our descriptor indices */
2989 rxr->next_to_check = 0;
2990 rxr->next_to_refresh = 0;
2991 rxr->lro_enabled = FALSE;
2992 rxr->rx_split_packets = 0;
2994 rxr->discard = FALSE;
2996 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2997 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3000 ** Now set up the LRO interface:
3002 if (ifp->if_capenable & IFCAP_LRO) {
3003 int err = tcp_lro_init(lro);
3005 device_printf(dev, "LRO Initialization failed!\n");
3008 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3009 rxr->lro_enabled = TRUE;
3010 lro->ifp = adapter->ifp;
3017 ixv_free_receive_ring(rxr);
3022 /*********************************************************************
3024 * Initialize all receive rings.
3026 **********************************************************************/
3028 ixv_setup_receive_structures(struct adapter *adapter)
3030 struct rx_ring *rxr = adapter->rx_rings;
3033 for (j = 0; j < adapter->num_queues; j++, rxr++)
3034 if (ixv_setup_receive_ring(rxr))
3040 * Free RX buffers allocated so far, we will only handle
3041 * the rings that completed, the failing case will have
3042 * cleaned up for itself. 'j' failed, so its the terminus.
3044 for (int i = 0; i < j; ++i) {
3045 rxr = &adapter->rx_rings[i];
3046 ixv_free_receive_ring(rxr);
3052 /*********************************************************************
3054 * Setup receive registers and features.
3056 **********************************************************************/
3057 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3060 ixv_initialize_receive_units(struct adapter *adapter)
3062 struct rx_ring *rxr = adapter->rx_rings;
3063 struct ixgbe_hw *hw = &adapter->hw;
3064 struct ifnet *ifp = adapter->ifp;
3065 u32 bufsz, fctrl, rxcsum, hlreg;
3068 /* Enable broadcasts */
3069 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3070 fctrl |= IXGBE_FCTRL_BAM;
3071 fctrl |= IXGBE_FCTRL_DPF;
3072 fctrl |= IXGBE_FCTRL_PMCF;
3073 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3075 /* Set for Jumbo Frames? */
3076 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3077 if (ifp->if_mtu > ETHERMTU) {
3078 hlreg |= IXGBE_HLREG0_JUMBOEN;
3079 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3081 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3082 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3084 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3086 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3087 u64 rdba = rxr->rxdma.dma_paddr;
3090 /* Do the queue enabling first */
3091 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3092 rxdctl |= IXGBE_RXDCTL_ENABLE;
3093 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3094 for (int k = 0; k < 10; k++) {
3095 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3096 IXGBE_RXDCTL_ENABLE)
3103 /* Setup the Base and Length of the Rx Descriptor Ring */
3104 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3105 (rdba & 0x00000000ffffffffULL));
3106 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3108 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3109 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3111 /* Set up the SRRCTL register */
3112 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3113 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3114 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3116 if (rxr->hdr_split) {
3117 /* Use a standard mbuf for the header */
3118 reg |= ((IXV_RX_HDR <<
3119 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3120 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3121 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3123 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3124 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3126 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3127 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3128 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3129 adapter->num_rx_desc - 1);
3132 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3134 if (ifp->if_capenable & IFCAP_RXCSUM)
3135 rxcsum |= IXGBE_RXCSUM_PCSD;
3137 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3138 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3140 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3145 /*********************************************************************
3147 * Free all receive rings.
3149 **********************************************************************/
3151 ixv_free_receive_structures(struct adapter *adapter)
3153 struct rx_ring *rxr = adapter->rx_rings;
3155 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3156 struct lro_ctrl *lro = &rxr->lro;
3157 ixv_free_receive_buffers(rxr);
3158 /* Free LRO memory */
3160 /* Free the ring memory as well */
3161 ixv_dma_free(adapter, &rxr->rxdma);
3164 free(adapter->rx_rings, M_DEVBUF);
3168 /*********************************************************************
3170 * Free receive ring data structures
3172 **********************************************************************/
3174 ixv_free_receive_buffers(struct rx_ring *rxr)
3176 struct adapter *adapter = rxr->adapter;
3177 struct ixv_rx_buf *rxbuf;
3179 INIT_DEBUGOUT("free_receive_structures: begin");
3181 /* Cleanup any existing buffers */
3182 if (rxr->rx_buffers != NULL) {
3183 for (int i = 0; i < adapter->num_rx_desc; i++) {
3184 rxbuf = &rxr->rx_buffers[i];
3185 if (rxbuf->m_head != NULL) {
3186 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3187 BUS_DMASYNC_POSTREAD);
3188 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3189 rxbuf->m_head->m_flags |= M_PKTHDR;
3190 m_freem(rxbuf->m_head);
3192 if (rxbuf->m_pack != NULL) {
3193 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3194 BUS_DMASYNC_POSTREAD);
3195 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3196 rxbuf->m_pack->m_flags |= M_PKTHDR;
3197 m_freem(rxbuf->m_pack);
3199 rxbuf->m_head = NULL;
3200 rxbuf->m_pack = NULL;
3201 if (rxbuf->hmap != NULL) {
3202 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3205 if (rxbuf->pmap != NULL) {
3206 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3210 if (rxr->rx_buffers != NULL) {
3211 free(rxr->rx_buffers, M_DEVBUF);
3212 rxr->rx_buffers = NULL;
3216 if (rxr->htag != NULL) {
3217 bus_dma_tag_destroy(rxr->htag);
3220 if (rxr->ptag != NULL) {
3221 bus_dma_tag_destroy(rxr->ptag);
3228 static __inline void
3229 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3233 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3234 * should be computed by hardware. Also it should not have VLAN tag in
3237 if (rxr->lro_enabled &&
3238 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3239 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3240 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3241 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3242 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3243 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3245 * Send to the stack if:
3246 ** - LRO not enabled, or
3247 ** - no LRO resources, or
3248 ** - lro enqueue fails
3250 if (rxr->lro.lro_cnt != 0)
3251 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3255 (*ifp->if_input)(ifp, m);
3259 static __inline void
3260 ixv_rx_discard(struct rx_ring *rxr, int i)
3262 struct ixv_rx_buf *rbuf;
3264 rbuf = &rxr->rx_buffers[i];
3266 if (rbuf->fmp != NULL) {/* Partial chain ? */
3267 rbuf->fmp->m_flags |= M_PKTHDR;
3273 ** With advanced descriptors the writeback
3274 ** clobbers the buffer addrs, so its easier
3275 ** to just free the existing mbufs and take
3276 ** the normal refresh path to get new buffers
3280 m_free(rbuf->m_head);
3281 rbuf->m_head = NULL;
3285 m_free(rbuf->m_pack);
3286 rbuf->m_pack = NULL;
3293 /*********************************************************************
3295 * This routine executes in interrupt context. It replenishes
3296 * the mbufs in the descriptor and sends data which has been
3297 * dma'ed into host memory to upper layer.
3299 * We loop at most count times if count is > 0, or until done if
3302 * Return TRUE for more work, FALSE for all clean.
3303 *********************************************************************/
3305 ixv_rxeof(struct ix_queue *que, int count)
3307 struct adapter *adapter = que->adapter;
3308 struct rx_ring *rxr = que->rxr;
3309 struct ifnet *ifp = adapter->ifp;
3310 struct lro_ctrl *lro = &rxr->lro;
3311 struct lro_entry *queued;
3312 int i, nextp, processed = 0;
3314 union ixgbe_adv_rx_desc *cur;
3315 struct ixv_rx_buf *rbuf, *nbuf;
3319 for (i = rxr->next_to_check; count != 0;) {
3320 struct mbuf *sendmp, *mh, *mp;
3322 u16 hlen, plen, hdr, vtag;
3325 /* Sync the ring. */
3326 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3327 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3329 cur = &rxr->rx_base[i];
3330 staterr = le32toh(cur->wb.upper.status_error);
3332 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3334 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3341 cur->wb.upper.status_error = 0;
3342 rbuf = &rxr->rx_buffers[i];
3346 plen = le16toh(cur->wb.upper.length);
3347 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3348 IXGBE_RXDADV_PKTTYPE_MASK;
3349 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3350 vtag = le16toh(cur->wb.upper.vlan);
3351 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3353 /* Make sure all parts of a bad packet are discarded */
3354 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3357 rxr->rx_discarded++;
3359 rxr->discard = TRUE;
3361 rxr->discard = FALSE;
3362 ixv_rx_discard(rxr, i);
3368 if (nextp == adapter->num_rx_desc)
3370 nbuf = &rxr->rx_buffers[nextp];
3374 ** The header mbuf is ONLY used when header
3375 ** split is enabled, otherwise we get normal
3376 ** behavior, ie, both header and payload
3377 ** are DMA'd into the payload buffer.
3379 ** Rather than using the fmp/lmp global pointers
3380 ** we now keep the head of a packet chain in the
3381 ** buffer struct and pass this along from one
3382 ** descriptor to the next, until we get EOP.
3384 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3385 /* This must be an initial descriptor */
3386 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3387 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3388 if (hlen > IXV_RX_HDR)
3391 mh->m_flags |= M_PKTHDR;
3393 mh->m_pkthdr.len = mh->m_len;
3394 /* Null buf pointer so it is refreshed */
3395 rbuf->m_head = NULL;
3397 ** Check the payload length, this
3398 ** could be zero if its a small
3404 mp->m_flags &= ~M_PKTHDR;
3406 mh->m_pkthdr.len += mp->m_len;
3407 /* Null buf pointer so it is refreshed */
3408 rbuf->m_pack = NULL;
3409 rxr->rx_split_packets++;
3412 ** Now create the forward
3413 ** chain so when complete
3417 /* stash the chain head */
3419 /* Make forward chain */
3421 mp->m_next = nbuf->m_pack;
3423 mh->m_next = nbuf->m_pack;
3425 /* Singlet, prepare to send */
3427 if ((adapter->num_vlans) &&
3428 (staterr & IXGBE_RXD_STAT_VP)) {
3429 sendmp->m_pkthdr.ether_vtag = vtag;
3430 sendmp->m_flags |= M_VLANTAG;
3435 ** Either no header split, or a
3436 ** secondary piece of a fragmented
3441 ** See if there is a stored head
3442 ** that determines what we are
3445 rbuf->m_pack = rbuf->fmp = NULL;
3447 if (sendmp != NULL) /* secondary frag */
3448 sendmp->m_pkthdr.len += mp->m_len;
3450 /* first desc of a non-ps chain */
3452 sendmp->m_flags |= M_PKTHDR;
3453 sendmp->m_pkthdr.len = mp->m_len;
3454 if (staterr & IXGBE_RXD_STAT_VP) {
3455 sendmp->m_pkthdr.ether_vtag = vtag;
3456 sendmp->m_flags |= M_VLANTAG;
3459 /* Pass the head pointer on */
3463 mp->m_next = nbuf->m_pack;
3467 /* Sending this frame? */
3469 sendmp->m_pkthdr.rcvif = ifp;
3472 /* capture data for AIM */
3473 rxr->bytes += sendmp->m_pkthdr.len;
3474 rxr->rx_bytes += sendmp->m_pkthdr.len;
3475 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3476 ixv_rx_checksum(staterr, sendmp, ptype);
3477 #if __FreeBSD_version >= 800000
3478 sendmp->m_pkthdr.flowid = que->msix;
3479 sendmp->m_flags |= M_FLOWID;
3483 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3484 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3486 /* Advance our pointers to the next descriptor. */
3487 if (++i == adapter->num_rx_desc)
3490 /* Now send to the stack or do LRO */
3492 ixv_rx_input(rxr, ifp, sendmp, ptype);
3494 /* Every 8 descriptors we go to refresh mbufs */
3495 if (processed == 8) {
3496 ixv_refresh_mbufs(rxr, i);
3501 /* Refresh any remaining buf structs */
3502 if (ixv_rx_unrefreshed(rxr))
3503 ixv_refresh_mbufs(rxr, i);
3505 rxr->next_to_check = i;
3508 * Flush any outstanding LRO work
3510 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3511 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3512 tcp_lro_flush(lro, queued);
3518 ** We still have cleaning to do?
3519 ** Schedule another interrupt if so.
3521 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3522 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3530 /*********************************************************************
3532 * Verify that the hardware indicated that the checksum is valid.
3533 * Inform the stack about the status of checksum so that stack
3534 * doesn't spend time verifying the checksum.
3536 *********************************************************************/
3538 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3540 u16 status = (u16) staterr;
3541 u8 errors = (u8) (staterr >> 24);
3544 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3545 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3548 if (status & IXGBE_RXD_STAT_IPCS) {
3549 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3550 /* IP Checksum Good */
3551 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3552 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3555 mp->m_pkthdr.csum_flags = 0;
3557 if (status & IXGBE_RXD_STAT_L4CS) {
3558 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3559 #if __FreeBSD_version >= 800000
3561 type = CSUM_SCTP_VALID;
3563 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3564 mp->m_pkthdr.csum_flags |= type;
3566 mp->m_pkthdr.csum_data = htons(0xffff);
3573 ixv_setup_vlan_support(struct adapter *adapter)
3575 struct ixgbe_hw *hw = &adapter->hw;
3576 u32 ctrl, vid, vfta, retry;
3580 ** We get here thru init_locked, meaning
3581 ** a soft reset, this has already cleared
3582 ** the VFTA and other state, so if there
3583 ** have been no vlan's registered do nothing.
3585 if (adapter->num_vlans == 0)
3588 /* Enable the queues */
3589 for (int i = 0; i < adapter->num_queues; i++) {
3590 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3591 ctrl |= IXGBE_RXDCTL_VME;
3592 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3596 ** A soft reset zero's out the VFTA, so
3597 ** we need to repopulate it now.
3599 for (int i = 0; i < VFTA_SIZE; i++) {
3600 if (ixv_shadow_vfta[i] == 0)
3602 vfta = ixv_shadow_vfta[i];
3604 ** Reconstruct the vlan id's
3605 ** based on the bits set in each
3606 ** of the array ints.
3608 for ( int j = 0; j < 32; j++) {
3610 if ((vfta & (1 << j)) == 0)
3613 /* Call the shared code mailbox routine */
3614 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3623 ** This routine is run via an vlan config EVENT,
3624 ** it enables us to use the HW Filter table since
3625 ** we can get the vlan id. This just creates the
3626 ** entry in the soft version of the VFTA, init will
3627 ** repopulate the real table.
3630 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3632 struct adapter *adapter = ifp->if_softc;
3635 if (ifp->if_softc != arg) /* Not our event */
3638 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3641 IXV_CORE_LOCK(adapter);
3642 index = (vtag >> 5) & 0x7F;
3644 ixv_shadow_vfta[index] |= (1 << bit);
3645 ++adapter->num_vlans;
3646 /* Re-init to load the changes */
3647 ixv_init_locked(adapter);
3648 IXV_CORE_UNLOCK(adapter);
3652 ** This routine is run via an vlan
3653 ** unconfig EVENT, remove our entry
3654 ** in the soft vfta.
3657 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3659 struct adapter *adapter = ifp->if_softc;
3662 if (ifp->if_softc != arg)
3665 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3668 IXV_CORE_LOCK(adapter);
3669 index = (vtag >> 5) & 0x7F;
3671 ixv_shadow_vfta[index] &= ~(1 << bit);
3672 --adapter->num_vlans;
3673 /* Re-init to load the changes */
3674 ixv_init_locked(adapter);
3675 IXV_CORE_UNLOCK(adapter);
3679 ixv_enable_intr(struct adapter *adapter)
3681 struct ixgbe_hw *hw = &adapter->hw;
3682 struct ix_queue *que = adapter->queues;
3683 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3686 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3688 mask = IXGBE_EIMS_ENABLE_MASK;
3689 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3690 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3692 for (int i = 0; i < adapter->num_queues; i++, que++)
3693 ixv_enable_queue(adapter, que->msix);
3695 IXGBE_WRITE_FLUSH(hw);
3701 ixv_disable_intr(struct adapter *adapter)
3703 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3704 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3705 IXGBE_WRITE_FLUSH(&adapter->hw);
3710 ** Setup the correct IVAR register for a particular MSIX interrupt
3711 ** - entry is the register array entry
3712 ** - vector is the MSIX vector for this queue
3713 ** - type is RX/TX/MISC
3716 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3718 struct ixgbe_hw *hw = &adapter->hw;
3721 vector |= IXGBE_IVAR_ALLOC_VAL;
3723 if (type == -1) { /* MISC IVAR */
3724 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3727 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3728 } else { /* RX/TX IVARS */
3729 index = (16 * (entry & 1)) + (8 * type);
3730 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3731 ivar &= ~(0xFF << index);
3732 ivar |= (vector << index);
3733 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3738 ixv_configure_ivars(struct adapter *adapter)
3740 struct ix_queue *que = adapter->queues;
3742 for (int i = 0; i < adapter->num_queues; i++, que++) {
3743 /* First the RX queue entry */
3744 ixv_set_ivar(adapter, i, que->msix, 0);
3745 /* ... and the TX */
3746 ixv_set_ivar(adapter, i, que->msix, 1);
3747 /* Set an initial value in EITR */
3748 IXGBE_WRITE_REG(&adapter->hw,
3749 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3752 /* For the Link interrupt */
3753 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3758 ** Tasklet handler for MSIX MBX interrupts
3759 ** - do outside interrupt since it might sleep
3762 ixv_handle_mbx(void *context, int pending)
3764 struct adapter *adapter = context;
3766 ixgbe_check_link(&adapter->hw,
3767 &adapter->link_speed, &adapter->link_up, 0);
3768 ixv_update_link_status(adapter);
3772 ** The VF stats registers never have a truely virgin
3773 ** starting point, so this routine tries to make an
3774 ** artificial one, marking ground zero on attach as
3778 ixv_save_stats(struct adapter *adapter)
3780 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3781 adapter->stats.saved_reset_vfgprc +=
3782 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3783 adapter->stats.saved_reset_vfgptc +=
3784 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3785 adapter->stats.saved_reset_vfgorc +=
3786 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3787 adapter->stats.saved_reset_vfgotc +=
3788 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3789 adapter->stats.saved_reset_vfmprc +=
3790 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3795 ixv_init_stats(struct adapter *adapter)
3797 struct ixgbe_hw *hw = &adapter->hw;
3799 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3800 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3801 adapter->stats.last_vfgorc |=
3802 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3804 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3805 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3806 adapter->stats.last_vfgotc |=
3807 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3809 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3811 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3812 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3813 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3814 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3815 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3818 #define UPDATE_STAT_32(reg, last, count) \
3820 u32 current = IXGBE_READ_REG(hw, reg); \
3821 if (current < last) \
3822 count += 0x100000000LL; \
3824 count &= 0xFFFFFFFF00000000LL; \
3828 #define UPDATE_STAT_36(lsb, msb, last, count) \
3830 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3831 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3832 u64 current = ((cur_msb << 32) | cur_lsb); \
3833 if (current < last) \
3834 count += 0x1000000000LL; \
3836 count &= 0xFFFFFFF000000000LL; \
3841 ** ixv_update_stats - Update the board statistics counters.
3844 ixv_update_stats(struct adapter *adapter)
3846 struct ixgbe_hw *hw = &adapter->hw;
3848 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3849 adapter->stats.vfgprc);
3850 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3851 adapter->stats.vfgptc);
3852 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3853 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3854 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3855 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3856 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3857 adapter->stats.vfmprc);
3860 /**********************************************************************
3862 * This routine is called only when ixgbe_display_debug_stats is enabled.
3863 * This routine provides a way to take a look at important statistics
3864 * maintained by the driver and hardware.
3866 **********************************************************************/
3868 ixv_print_hw_stats(struct adapter * adapter)
3870 device_t dev = adapter->dev;
3872 device_printf(dev,"Std Mbuf Failed = %lu\n",
3873 adapter->mbuf_defrag_failed);
3874 device_printf(dev,"Driver dropped packets = %lu\n",
3875 adapter->dropped_pkts);
3876 device_printf(dev, "watchdog timeouts = %ld\n",
3877 adapter->watchdog_events);
3879 device_printf(dev,"Good Packets Rcvd = %llu\n",
3880 (long long)adapter->stats.vfgprc);
3881 device_printf(dev,"Good Packets Xmtd = %llu\n",
3882 (long long)adapter->stats.vfgptc);
3883 device_printf(dev,"TSO Transmissions = %lu\n",
3888 /**********************************************************************
3890 * This routine is called only when em_display_debug_stats is enabled.
3891 * This routine provides a way to take a look at important statistics
3892 * maintained by the driver and hardware.
3894 **********************************************************************/
3896 ixv_print_debug_info(struct adapter *adapter)
3898 device_t dev = adapter->dev;
3899 struct ixgbe_hw *hw = &adapter->hw;
3900 struct ix_queue *que = adapter->queues;
3901 struct rx_ring *rxr;
3902 struct tx_ring *txr;
3903 struct lro_ctrl *lro;
3905 device_printf(dev,"Error Byte Count = %u \n",
3906 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3908 for (int i = 0; i < adapter->num_queues; i++, que++) {
3912 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3913 que->msix, (long)que->irqs);
3914 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3915 rxr->me, (long long)rxr->rx_packets);
3916 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3917 rxr->me, (long long)rxr->rx_split_packets);
3918 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3919 rxr->me, (long)rxr->rx_bytes);
3920 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3921 rxr->me, lro->lro_queued);
3922 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3923 rxr->me, lro->lro_flushed);
3924 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3925 txr->me, (long)txr->total_packets);
3926 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3927 txr->me, (long)txr->no_desc_avail);
3930 device_printf(dev,"MBX IRQ Handled: %lu\n",
3931 (long)adapter->mbx_irq);
3936 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3940 struct adapter *adapter;
3943 error = sysctl_handle_int(oidp, &result, 0, req);
3945 if (error || !req->newptr)
3949 adapter = (struct adapter *) arg1;
3950 ixv_print_hw_stats(adapter);
3956 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3959 struct adapter *adapter;
3962 error = sysctl_handle_int(oidp, &result, 0, req);
3964 if (error || !req->newptr)
3968 adapter = (struct adapter *) arg1;
3969 ixv_print_debug_info(adapter);
3975 ** Set flow control using sysctl:
3976 ** Flow control values:
3983 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3986 struct adapter *adapter;
3988 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3993 adapter = (struct adapter *) arg1;
3994 switch (ixv_flow_control) {
3995 case ixgbe_fc_rx_pause:
3996 case ixgbe_fc_tx_pause:
3998 adapter->hw.fc.requested_mode = ixv_flow_control;
4002 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4005 ixgbe_fc_enable(&adapter->hw);
4010 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4011 const char *description, int *limit, int value)
4014 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4015 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4016 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);