1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_inet6.h"
42 /*********************************************************************
44 *********************************************************************/
45 char ixv_driver_version[] = "1.1.4";
47 /*********************************************************************
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
57 static ixv_vendor_info_t ixv_vendor_info_array[] =
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 /* required last entry */
65 /*********************************************************************
66 * Table of branding strings
67 *********************************************************************/
69 static char *ixv_strings[] = {
70 "Intel(R) PRO/10GbE Virtual Function Network Driver"
73 /*********************************************************************
75 *********************************************************************/
76 static int ixv_probe(device_t);
77 static int ixv_attach(device_t);
78 static int ixv_detach(device_t);
79 static int ixv_shutdown(device_t);
80 #if __FreeBSD_version < 800000
81 static void ixv_start(struct ifnet *);
82 static void ixv_start_locked(struct tx_ring *, struct ifnet *);
84 static int ixv_mq_start(struct ifnet *, struct mbuf *);
85 static int ixv_mq_start_locked(struct ifnet *,
86 struct tx_ring *, struct mbuf *);
87 static void ixv_qflush(struct ifnet *);
89 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
90 static void ixv_init(void *);
91 static void ixv_init_locked(struct adapter *);
92 static void ixv_stop(void *);
93 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
94 static int ixv_media_change(struct ifnet *);
95 static void ixv_identify_hardware(struct adapter *);
96 static int ixv_allocate_pci_resources(struct adapter *);
97 static int ixv_allocate_msix(struct adapter *);
98 static int ixv_allocate_queues(struct adapter *);
99 static int ixv_setup_msix(struct adapter *);
100 static void ixv_free_pci_resources(struct adapter *);
101 static void ixv_local_timer(void *);
102 static void ixv_setup_interface(device_t, struct adapter *);
103 static void ixv_config_link(struct adapter *);
105 static int ixv_allocate_transmit_buffers(struct tx_ring *);
106 static int ixv_setup_transmit_structures(struct adapter *);
107 static void ixv_setup_transmit_ring(struct tx_ring *);
108 static void ixv_initialize_transmit_units(struct adapter *);
109 static void ixv_free_transmit_structures(struct adapter *);
110 static void ixv_free_transmit_buffers(struct tx_ring *);
112 static int ixv_allocate_receive_buffers(struct rx_ring *);
113 static int ixv_setup_receive_structures(struct adapter *);
114 static int ixv_setup_receive_ring(struct rx_ring *);
115 static void ixv_initialize_receive_units(struct adapter *);
116 static void ixv_free_receive_structures(struct adapter *);
117 static void ixv_free_receive_buffers(struct rx_ring *);
119 static void ixv_enable_intr(struct adapter *);
120 static void ixv_disable_intr(struct adapter *);
121 static bool ixv_txeof(struct tx_ring *);
122 static bool ixv_rxeof(struct ix_queue *, int);
123 static void ixv_rx_checksum(u32, struct mbuf *, u32);
124 static void ixv_set_multi(struct adapter *);
125 static void ixv_update_link_status(struct adapter *);
126 static void ixv_refresh_mbufs(struct rx_ring *, int);
127 static int ixv_xmit(struct tx_ring *, struct mbuf **);
128 static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
129 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
130 static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
131 static int ixv_dma_malloc(struct adapter *, bus_size_t,
132 struct ixv_dma_alloc *, int);
133 static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
134 static void ixv_add_rx_process_limit(struct adapter *, const char *,
135 const char *, int *, int);
136 static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
137 static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
138 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
139 static void ixv_configure_ivars(struct adapter *);
140 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
142 static void ixv_setup_vlan_support(struct adapter *);
143 static void ixv_register_vlan(void *, struct ifnet *, u16);
144 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
146 static void ixv_save_stats(struct adapter *);
147 static void ixv_init_stats(struct adapter *);
148 static void ixv_update_stats(struct adapter *);
150 static __inline void ixv_rx_discard(struct rx_ring *, int);
151 static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
154 /* The MSI/X Interrupt handlers */
155 static void ixv_msix_que(void *);
156 static void ixv_msix_mbx(void *);
158 /* Deferred interrupt tasklets */
159 static void ixv_handle_que(void *, int);
160 static void ixv_handle_mbx(void *, int);
162 /*********************************************************************
163 * FreeBSD Device Interface Entry Points
164 *********************************************************************/
166 static device_method_t ixv_methods[] = {
167 /* Device interface */
168 DEVMETHOD(device_probe, ixv_probe),
169 DEVMETHOD(device_attach, ixv_attach),
170 DEVMETHOD(device_detach, ixv_detach),
171 DEVMETHOD(device_shutdown, ixv_shutdown),
175 static driver_t ixv_driver = {
176 "ix", ixv_methods, sizeof(struct adapter),
179 extern devclass_t ixgbe_devclass;
180 DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
181 MODULE_DEPEND(ixv, pci, 1, 1, 1);
182 MODULE_DEPEND(ixv, ether, 1, 1, 1);
185 ** TUNEABLE PARAMETERS:
189 ** AIM: Adaptive Interrupt Moderation
190 ** which means that the interrupt rate
191 ** is varied over time based on the
192 ** traffic for that interrupt vector
194 static int ixv_enable_aim = FALSE;
195 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
197 /* How many packets rxeof tries to clean at a time */
198 static int ixv_rx_process_limit = 128;
199 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201 /* Flow control setting, default to full */
202 static int ixv_flow_control = ixgbe_fc_full;
203 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
206 * Header split: this causes the hardware to DMA
207 * the header into a seperate mbuf from the payload,
208 * it can be a performance win in some workloads, but
209 * in others it actually hurts, its off by default.
211 static int ixv_header_split = FALSE;
212 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
215 ** Number of TX descriptors per ring,
216 ** setting higher than RX as this seems
217 ** the better performing choice.
219 static int ixv_txd = DEFAULT_TXD;
220 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
222 /* Number of RX descriptors per ring */
223 static int ixv_rxd = DEFAULT_RXD;
224 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
227 ** Shadow VFTA table, this is needed because
228 ** the real filter table gets cleared during
229 ** a soft reset and we need to repopulate it.
231 static u32 ixv_shadow_vfta[VFTA_SIZE];
233 /*********************************************************************
234 * Device identification routine
236 * ixv_probe determines if the driver should be loaded on
237 * adapter based on PCI vendor/device id of the adapter.
239 * return BUS_PROBE_DEFAULT on success, positive on failure
240 *********************************************************************/
243 ixv_probe(device_t dev)
245 ixv_vendor_info_t *ent;
247 u16 pci_vendor_id = 0;
248 u16 pci_device_id = 0;
249 u16 pci_subvendor_id = 0;
250 u16 pci_subdevice_id = 0;
251 char adapter_name[256];
254 pci_vendor_id = pci_get_vendor(dev);
255 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
258 pci_device_id = pci_get_device(dev);
259 pci_subvendor_id = pci_get_subvendor(dev);
260 pci_subdevice_id = pci_get_subdevice(dev);
262 ent = ixv_vendor_info_array;
263 while (ent->vendor_id != 0) {
264 if ((pci_vendor_id == ent->vendor_id) &&
265 (pci_device_id == ent->device_id) &&
267 ((pci_subvendor_id == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
270 ((pci_subdevice_id == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 sprintf(adapter_name, "%s, Version - %s",
273 ixv_strings[ent->index],
275 device_set_desc_copy(dev, adapter_name);
276 return (BUS_PROBE_DEFAULT);
283 /*********************************************************************
284 * Device initialization routine
286 * The attach entry point is called when the driver is being loaded.
287 * This routine identifies the type of hardware, allocates all resources
288 * and initializes the hardware.
290 * return 0 on success, positive on failure
291 *********************************************************************/
294 ixv_attach(device_t dev)
296 struct adapter *adapter;
300 INIT_DEBUGOUT("ixv_attach: begin");
302 /* Allocate, clear, and link in our adapter structure */
303 adapter = device_get_softc(dev);
304 adapter->dev = adapter->osdep.dev = dev;
308 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
311 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
312 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
314 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
316 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
317 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
318 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
319 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
321 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
322 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
323 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
324 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
326 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
329 &ixv_enable_aim, 1, "Interrupt Moderation");
331 /* Set up the timer callout */
332 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
334 /* Determine hardware revision */
335 ixv_identify_hardware(adapter);
337 /* Do base PCI setup - map BAR0 */
338 if (ixv_allocate_pci_resources(adapter)) {
339 device_printf(dev, "Allocation of PCI resources failed\n");
344 /* Do descriptor calc and sanity checks */
345 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
346 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
347 device_printf(dev, "TXD config issue, using default!\n");
348 adapter->num_tx_desc = DEFAULT_TXD;
350 adapter->num_tx_desc = ixv_txd;
352 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
353 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
354 device_printf(dev, "RXD config issue, using default!\n");
355 adapter->num_rx_desc = DEFAULT_RXD;
357 adapter->num_rx_desc = ixv_rxd;
359 /* Allocate our TX/RX Queues */
360 if (ixv_allocate_queues(adapter)) {
366 ** Initialize the shared code: its
367 ** at this point the mac type is set.
369 error = ixgbe_init_shared_code(hw);
371 device_printf(dev,"Shared Code Initialization Failure\n");
376 /* Setup the mailbox */
377 ixgbe_init_mbx_params_vf(hw);
381 /* Get Hardware Flow Control setting */
382 hw->fc.requested_mode = ixgbe_fc_full;
383 hw->fc.pause_time = IXV_FC_PAUSE;
384 hw->fc.low_water[0] = IXV_FC_LO;
385 hw->fc.high_water[0] = IXV_FC_HI;
386 hw->fc.send_xon = TRUE;
388 error = ixgbe_init_hw(hw);
390 device_printf(dev,"Hardware Initialization Failure\n");
395 error = ixv_allocate_msix(adapter);
399 /* Setup OS specific network interface */
400 ixv_setup_interface(dev, adapter);
402 /* Sysctl for limiting the amount of work done in the taskqueue */
403 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
404 "max number of rx packets to process", &adapter->rx_process_limit,
405 ixv_rx_process_limit);
407 /* Do the stats setup */
408 ixv_save_stats(adapter);
409 ixv_init_stats(adapter);
411 /* Register for VLAN events */
412 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
413 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
414 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
415 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
417 INIT_DEBUGOUT("ixv_attach: end");
421 ixv_free_transmit_structures(adapter);
422 ixv_free_receive_structures(adapter);
424 ixv_free_pci_resources(adapter);
429 /*********************************************************************
430 * Device removal routine
432 * The detach entry point is called when the driver is being removed.
433 * This routine stops the adapter and deallocates all the resources
434 * that were allocated for driver operation.
436 * return 0 on success, positive on failure
437 *********************************************************************/
440 ixv_detach(device_t dev)
442 struct adapter *adapter = device_get_softc(dev);
443 struct ix_queue *que = adapter->queues;
445 INIT_DEBUGOUT("ixv_detach: begin");
447 /* Make sure VLANS are not using driver */
448 if (adapter->ifp->if_vlantrunk != NULL) {
449 device_printf(dev,"Vlan in use, detach first\n");
453 IXV_CORE_LOCK(adapter);
455 IXV_CORE_UNLOCK(adapter);
457 for (int i = 0; i < adapter->num_queues; i++, que++) {
459 taskqueue_drain(que->tq, &que->que_task);
460 taskqueue_free(que->tq);
464 /* Drain the Link queue */
466 taskqueue_drain(adapter->tq, &adapter->mbx_task);
467 taskqueue_free(adapter->tq);
470 /* Unregister VLAN events */
471 if (adapter->vlan_attach != NULL)
472 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
473 if (adapter->vlan_detach != NULL)
474 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
476 ether_ifdetach(adapter->ifp);
477 callout_drain(&adapter->timer);
478 ixv_free_pci_resources(adapter);
479 bus_generic_detach(dev);
480 if_free(adapter->ifp);
482 ixv_free_transmit_structures(adapter);
483 ixv_free_receive_structures(adapter);
485 IXV_CORE_LOCK_DESTROY(adapter);
489 /*********************************************************************
491 * Shutdown entry point
493 **********************************************************************/
495 ixv_shutdown(device_t dev)
497 struct adapter *adapter = device_get_softc(dev);
498 IXV_CORE_LOCK(adapter);
500 IXV_CORE_UNLOCK(adapter);
504 #if __FreeBSD_version < 800000
505 /*********************************************************************
506 * Transmit entry point
508 * ixv_start is called by the stack to initiate a transmit.
509 * The driver will remain in this routine as long as there are
510 * packets to transmit and transmit resources are available.
511 * In case resources are not available stack is notified and
512 * the packet is requeued.
513 **********************************************************************/
515 ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
518 struct adapter *adapter = txr->adapter;
520 IXV_TX_LOCK_ASSERT(txr);
522 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
525 if (!adapter->link_active)
528 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
530 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
534 if (ixv_xmit(txr, &m_head)) {
537 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
538 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
541 /* Send a copy of the frame to the BPF listener */
542 ETHER_BPF_MTAP(ifp, m_head);
544 /* Set watchdog on */
545 txr->watchdog_check = TRUE;
546 txr->watchdog_time = ticks;
553 * Legacy TX start - called by the stack, this
554 * always uses the first tx ring, and should
555 * not be used with multiqueue tx enabled.
558 ixv_start(struct ifnet *ifp)
560 struct adapter *adapter = ifp->if_softc;
561 struct tx_ring *txr = adapter->tx_rings;
563 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
565 ixv_start_locked(txr, ifp);
574 ** Multiqueue Transmit driver
578 ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
580 struct adapter *adapter = ifp->if_softc;
581 struct ix_queue *que;
585 /* Which queue to use */
586 if ((m->m_flags & M_FLOWID) != 0)
587 i = m->m_pkthdr.flowid % adapter->num_queues;
589 txr = &adapter->tx_rings[i];
590 que = &adapter->queues[i];
592 if (IXV_TX_TRYLOCK(txr)) {
593 err = ixv_mq_start_locked(ifp, txr, m);
596 err = drbr_enqueue(ifp, txr->br, m);
597 taskqueue_enqueue(que->tq, &que->que_task);
604 ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
606 struct adapter *adapter = txr->adapter;
608 int enqueued, err = 0;
610 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
611 IFF_DRV_RUNNING || adapter->link_active == 0) {
613 err = drbr_enqueue(ifp, txr->br, m);
617 /* Do a clean if descriptors are low */
618 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
623 next = drbr_dequeue(ifp, txr->br);
624 } else if (drbr_needs_enqueue(ifp, txr->br)) {
625 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
627 next = drbr_dequeue(ifp, txr->br);
631 /* Process the queue */
632 while (next != NULL) {
633 if ((err = ixv_xmit(txr, &next)) != 0) {
635 err = drbr_enqueue(ifp, txr->br, next);
639 ifp->if_obytes += next->m_pkthdr.len;
640 if (next->m_flags & M_MCAST)
642 /* Send a copy of the frame to the BPF listener */
643 ETHER_BPF_MTAP(ifp, next);
644 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
646 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
647 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
650 next = drbr_dequeue(ifp, txr->br);
654 /* Set watchdog on */
655 txr->watchdog_check = TRUE;
656 txr->watchdog_time = ticks;
663 ** Flush all ring buffers
666 ixv_qflush(struct ifnet *ifp)
668 struct adapter *adapter = ifp->if_softc;
669 struct tx_ring *txr = adapter->tx_rings;
672 for (int i = 0; i < adapter->num_queues; i++, txr++) {
674 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
683 /*********************************************************************
686 * ixv_ioctl is called when the user wants to configure the
689 * return 0 on success, positive on failure
690 **********************************************************************/
693 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
695 struct adapter *adapter = ifp->if_softc;
696 struct ifreq *ifr = (struct ifreq *) data;
697 #if defined(INET) || defined(INET6)
698 struct ifaddr *ifa = (struct ifaddr *) data;
699 bool avoid_reset = FALSE;
707 if (ifa->ifa_addr->sa_family == AF_INET)
711 if (ifa->ifa_addr->sa_family == AF_INET6)
714 #if defined(INET) || defined(INET6)
716 ** Calling init results in link renegotiation,
717 ** so we avoid doing it when possible.
720 ifp->if_flags |= IFF_UP;
721 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
723 if (!(ifp->if_flags & IFF_NOARP))
724 arp_ifinit(ifp, ifa);
726 error = ether_ioctl(ifp, command, data);
730 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
731 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
734 IXV_CORE_LOCK(adapter);
735 ifp->if_mtu = ifr->ifr_mtu;
736 adapter->max_frame_size =
737 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
738 ixv_init_locked(adapter);
739 IXV_CORE_UNLOCK(adapter);
743 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
744 IXV_CORE_LOCK(adapter);
745 if (ifp->if_flags & IFF_UP) {
746 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
747 ixv_init_locked(adapter);
749 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
751 adapter->if_flags = ifp->if_flags;
752 IXV_CORE_UNLOCK(adapter);
756 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
757 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
758 IXV_CORE_LOCK(adapter);
759 ixv_disable_intr(adapter);
760 ixv_set_multi(adapter);
761 ixv_enable_intr(adapter);
762 IXV_CORE_UNLOCK(adapter);
767 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
768 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
772 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
773 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
774 if (mask & IFCAP_HWCSUM)
775 ifp->if_capenable ^= IFCAP_HWCSUM;
776 if (mask & IFCAP_TSO4)
777 ifp->if_capenable ^= IFCAP_TSO4;
778 if (mask & IFCAP_LRO)
779 ifp->if_capenable ^= IFCAP_LRO;
780 if (mask & IFCAP_VLAN_HWTAGGING)
781 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
782 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
783 IXV_CORE_LOCK(adapter);
784 ixv_init_locked(adapter);
785 IXV_CORE_UNLOCK(adapter);
787 VLAN_CAPABILITIES(ifp);
792 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
793 error = ether_ioctl(ifp, command, data);
800 /*********************************************************************
803 * This routine is used in two ways. It is used by the stack as
804 * init entry point in network interface structure. It is also used
805 * by the driver as a hw/sw initialization routine to get to a
808 * return 0 on success, positive on failure
809 **********************************************************************/
810 #define IXGBE_MHADD_MFS_SHIFT 16
813 ixv_init_locked(struct adapter *adapter)
815 struct ifnet *ifp = adapter->ifp;
816 device_t dev = adapter->dev;
817 struct ixgbe_hw *hw = &adapter->hw;
820 INIT_DEBUGOUT("ixv_init: begin");
821 mtx_assert(&adapter->core_mtx, MA_OWNED);
822 hw->adapter_stopped = FALSE;
823 ixgbe_stop_adapter(hw);
824 callout_stop(&adapter->timer);
826 /* reprogram the RAR[0] in case user changed it. */
827 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
829 /* Get the latest mac address, User can use a LAA */
830 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
831 IXGBE_ETH_LENGTH_OF_ADDRESS);
832 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
833 hw->addr_ctrl.rar_used_count = 1;
835 /* Prepare transmit descriptors and buffers */
836 if (ixv_setup_transmit_structures(adapter)) {
837 device_printf(dev,"Could not setup transmit structures\n");
843 ixv_initialize_transmit_units(adapter);
845 /* Setup Multicast table */
846 ixv_set_multi(adapter);
849 ** Determine the correct mbuf pool
850 ** for doing jumbo/headersplit
852 if (ifp->if_mtu > ETHERMTU)
853 adapter->rx_mbuf_sz = MJUMPAGESIZE;
855 adapter->rx_mbuf_sz = MCLBYTES;
857 /* Prepare receive descriptors and buffers */
858 if (ixv_setup_receive_structures(adapter)) {
859 device_printf(dev,"Could not setup receive structures\n");
864 /* Configure RX settings */
865 ixv_initialize_receive_units(adapter);
867 /* Enable Enhanced MSIX mode */
868 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
869 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
870 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
871 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
873 /* Set the various hardware offload abilities */
874 ifp->if_hwassist = 0;
875 if (ifp->if_capenable & IFCAP_TSO4)
876 ifp->if_hwassist |= CSUM_TSO;
877 if (ifp->if_capenable & IFCAP_TXCSUM) {
878 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
879 #if __FreeBSD_version >= 800000
880 ifp->if_hwassist |= CSUM_SCTP;
885 if (ifp->if_mtu > ETHERMTU) {
886 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
887 mhadd &= ~IXGBE_MHADD_MFS_MASK;
888 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
889 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
892 /* Set up VLAN offload and filter */
893 ixv_setup_vlan_support(adapter);
895 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
897 /* Set up MSI/X routing */
898 ixv_configure_ivars(adapter);
900 /* Set up auto-mask */
901 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
903 /* Set moderation on the Link interrupt */
904 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
907 ixv_init_stats(adapter);
909 /* Config/Enable Link */
910 ixv_config_link(adapter);
912 /* And now turn on interrupts */
913 ixv_enable_intr(adapter);
915 /* Now inform the stack we're ready */
916 ifp->if_drv_flags |= IFF_DRV_RUNNING;
917 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
925 struct adapter *adapter = arg;
927 IXV_CORE_LOCK(adapter);
928 ixv_init_locked(adapter);
929 IXV_CORE_UNLOCK(adapter);
936 ** MSIX Interrupt Handlers and Tasklets
941 ixv_enable_queue(struct adapter *adapter, u32 vector)
943 struct ixgbe_hw *hw = &adapter->hw;
944 u32 queue = 1 << vector;
947 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
948 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
952 ixv_disable_queue(struct adapter *adapter, u32 vector)
954 struct ixgbe_hw *hw = &adapter->hw;
955 u64 queue = (u64)(1 << vector);
958 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
959 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
963 ixv_rearm_queues(struct adapter *adapter, u64 queues)
965 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
966 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
971 ixv_handle_que(void *context, int pending)
973 struct ix_queue *que = context;
974 struct adapter *adapter = que->adapter;
975 struct tx_ring *txr = que->txr;
976 struct ifnet *ifp = adapter->ifp;
979 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
980 more = ixv_rxeof(que, adapter->rx_process_limit);
983 #if __FreeBSD_version >= 800000
984 if (!drbr_empty(ifp, txr->br))
985 ixv_mq_start_locked(ifp, txr, NULL);
987 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
988 ixv_start_locked(txr, ifp);
992 taskqueue_enqueue(que->tq, &que->que_task);
997 /* Reenable this interrupt */
998 ixv_enable_queue(adapter, que->msix);
1002 /*********************************************************************
1004 * MSI Queue Interrupt Service routine
1006 **********************************************************************/
1008 ixv_msix_que(void *arg)
1010 struct ix_queue *que = arg;
1011 struct adapter *adapter = que->adapter;
1012 struct tx_ring *txr = que->txr;
1013 struct rx_ring *rxr = que->rxr;
1014 bool more_tx, more_rx;
1017 ixv_disable_queue(adapter, que->msix);
1020 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1023 more_tx = ixv_txeof(txr);
1025 ** Make certain that if the stack
1026 ** has anything queued the task gets
1027 ** scheduled to handle it.
1029 #if __FreeBSD_version < 800000
1030 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1032 if (!drbr_empty(adapter->ifp, txr->br))
1037 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1041 if (ixv_enable_aim == FALSE)
1044 ** Do Adaptive Interrupt Moderation:
1045 ** - Write out last calculated setting
1046 ** - Calculate based on average size over
1047 ** the last interval.
1049 if (que->eitr_setting)
1050 IXGBE_WRITE_REG(&adapter->hw,
1051 IXGBE_VTEITR(que->msix),
1054 que->eitr_setting = 0;
1056 /* Idle, do nothing */
1057 if ((txr->bytes == 0) && (rxr->bytes == 0))
1060 if ((txr->bytes) && (txr->packets))
1061 newitr = txr->bytes/txr->packets;
1062 if ((rxr->bytes) && (rxr->packets))
1063 newitr = max(newitr,
1064 (rxr->bytes / rxr->packets));
1065 newitr += 24; /* account for hardware frame, crc */
1067 /* set an upper boundary */
1068 newitr = min(newitr, 3000);
1070 /* Be nice to the mid range */
1071 if ((newitr > 300) && (newitr < 1200))
1072 newitr = (newitr / 3);
1074 newitr = (newitr / 2);
1076 newitr |= newitr << 16;
1078 /* save for next interrupt */
1079 que->eitr_setting = newitr;
1088 if (more_tx || more_rx)
1089 taskqueue_enqueue(que->tq, &que->que_task);
1090 else /* Reenable this interrupt */
1091 ixv_enable_queue(adapter, que->msix);
1096 ixv_msix_mbx(void *arg)
1098 struct adapter *adapter = arg;
1099 struct ixgbe_hw *hw = &adapter->hw;
1104 /* First get the cause */
1105 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1106 /* Clear interrupt with write */
1107 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1109 /* Link status change */
1110 if (reg & IXGBE_EICR_LSC)
1111 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1113 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1117 /*********************************************************************
1119 * Media Ioctl callback
1121 * This routine is called whenever the user queries the status of
1122 * the interface using ifconfig.
1124 **********************************************************************/
1126 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1128 struct adapter *adapter = ifp->if_softc;
1130 INIT_DEBUGOUT("ixv_media_status: begin");
1131 IXV_CORE_LOCK(adapter);
1132 ixv_update_link_status(adapter);
1134 ifmr->ifm_status = IFM_AVALID;
1135 ifmr->ifm_active = IFM_ETHER;
1137 if (!adapter->link_active) {
1138 IXV_CORE_UNLOCK(adapter);
1142 ifmr->ifm_status |= IFM_ACTIVE;
1144 switch (adapter->link_speed) {
1145 case IXGBE_LINK_SPEED_1GB_FULL:
1146 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1148 case IXGBE_LINK_SPEED_10GB_FULL:
1149 ifmr->ifm_active |= IFM_FDX;
1153 IXV_CORE_UNLOCK(adapter);
1158 /*********************************************************************
1160 * Media Ioctl callback
1162 * This routine is called when the user changes speed/duplex using
1163 * media/mediopt option with ifconfig.
1165 **********************************************************************/
1167 ixv_media_change(struct ifnet * ifp)
1169 struct adapter *adapter = ifp->if_softc;
1170 struct ifmedia *ifm = &adapter->media;
1172 INIT_DEBUGOUT("ixv_media_change: begin");
1174 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1177 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1181 device_printf(adapter->dev, "Only auto media type\n");
1188 /*********************************************************************
1190 * This routine maps the mbufs to tx descriptors, allowing the
1191 * TX engine to transmit the packets.
1192 * - return 0 on success, positive on failure
1194 **********************************************************************/
1197 ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1199 struct adapter *adapter = txr->adapter;
1200 u32 olinfo_status = 0, cmd_type_len;
1202 int i, j, error, nsegs;
1203 int first, last = 0;
1204 struct mbuf *m_head;
1205 bus_dma_segment_t segs[32];
1207 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1208 union ixgbe_adv_tx_desc *txd = NULL;
1212 /* Basic descriptor defines */
1213 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1214 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1216 if (m_head->m_flags & M_VLANTAG)
1217 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1220 * Important to capture the first descriptor
1221 * used because it will contain the index of
1222 * the one we tell the hardware to report back
1224 first = txr->next_avail_desc;
1225 txbuf = &txr->tx_buffers[first];
1226 txbuf_mapped = txbuf;
1230 * Map the packet for DMA.
1232 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1233 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1235 if (error == EFBIG) {
1238 m = m_defrag(*m_headp, M_DONTWAIT);
1240 adapter->mbuf_defrag_failed++;
1248 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1249 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1251 if (error == ENOMEM) {
1252 adapter->no_tx_dma_setup++;
1254 } else if (error != 0) {
1255 adapter->no_tx_dma_setup++;
1260 } else if (error == ENOMEM) {
1261 adapter->no_tx_dma_setup++;
1263 } else if (error != 0) {
1264 adapter->no_tx_dma_setup++;
1270 /* Make certain there are enough descriptors */
1271 if (nsegs > txr->tx_avail - 2) {
1272 txr->no_desc_avail++;
1279 ** Set up the appropriate offload context
1280 ** this becomes the first descriptor of
1283 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1284 if (ixv_tso_setup(txr, m_head, &paylen)) {
1285 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1286 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1287 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1288 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1292 } else if (ixv_tx_ctx_setup(txr, m_head))
1293 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1295 /* Record payload length */
1297 olinfo_status |= m_head->m_pkthdr.len <<
1298 IXGBE_ADVTXD_PAYLEN_SHIFT;
1300 i = txr->next_avail_desc;
1301 for (j = 0; j < nsegs; j++) {
1305 txbuf = &txr->tx_buffers[i];
1306 txd = &txr->tx_base[i];
1307 seglen = segs[j].ds_len;
1308 segaddr = htole64(segs[j].ds_addr);
1310 txd->read.buffer_addr = segaddr;
1311 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1312 cmd_type_len |seglen);
1313 txd->read.olinfo_status = htole32(olinfo_status);
1314 last = i; /* descriptor that will get completion IRQ */
1316 if (++i == adapter->num_tx_desc)
1319 txbuf->m_head = NULL;
1320 txbuf->eop_index = -1;
1323 txd->read.cmd_type_len |=
1324 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1325 txr->tx_avail -= nsegs;
1326 txr->next_avail_desc = i;
1328 txbuf->m_head = m_head;
1329 txr->tx_buffers[first].map = txbuf->map;
1331 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1333 /* Set the index of the descriptor that will be marked done */
1334 txbuf = &txr->tx_buffers[first];
1335 txbuf->eop_index = last;
1337 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1338 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1340 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1341 * hardware that this frame is available to transmit.
1343 ++txr->total_packets;
1344 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1349 bus_dmamap_unload(txr->txtag, txbuf->map);
1355 /*********************************************************************
1358 * This routine is called whenever multicast address list is updated.
1360 **********************************************************************/
1361 #define IXGBE_RAR_ENTRIES 16
1364 ixv_set_multi(struct adapter *adapter)
1366 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1368 struct ifmultiaddr *ifma;
1370 struct ifnet *ifp = adapter->ifp;
1372 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1374 #if __FreeBSD_version < 800000
1377 if_maddr_rlock(ifp);
1379 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1380 if (ifma->ifma_addr->sa_family != AF_LINK)
1382 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1383 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1384 IXGBE_ETH_LENGTH_OF_ADDRESS);
1387 #if __FreeBSD_version < 800000
1388 IF_ADDR_UNLOCK(ifp);
1390 if_maddr_runlock(ifp);
1395 ixgbe_update_mc_addr_list(&adapter->hw,
1396 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1402 * This is an iterator function now needed by the multicast
1403 * shared code. It simply feeds the shared code routine the
1404 * addresses in the array of ixv_set_multi() one by one.
1407 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1409 u8 *addr = *update_ptr;
1413 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1414 *update_ptr = newptr;
1418 /*********************************************************************
1421 * This routine checks for link status,updates statistics,
1422 * and runs the watchdog check.
1424 **********************************************************************/
1427 ixv_local_timer(void *arg)
1429 struct adapter *adapter = arg;
1430 device_t dev = adapter->dev;
1431 struct tx_ring *txr = adapter->tx_rings;
1434 mtx_assert(&adapter->core_mtx, MA_OWNED);
1436 ixv_update_link_status(adapter);
1439 ixv_update_stats(adapter);
1442 * If the interface has been paused
1443 * then don't do the watchdog check
1445 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1448 ** Check for time since any descriptor was cleaned
1450 for (i = 0; i < adapter->num_queues; i++, txr++) {
1452 if (txr->watchdog_check == FALSE) {
1456 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1461 ixv_rearm_queues(adapter, adapter->que_mask);
1462 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1466 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1467 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1468 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1469 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1470 device_printf(dev,"TX(%d) desc avail = %d,"
1471 "Next TX to Clean = %d\n",
1472 txr->me, txr->tx_avail, txr->next_to_clean);
1473 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1474 adapter->watchdog_events++;
1476 ixv_init_locked(adapter);
1480 ** Note: this routine updates the OS on the link state
1481 ** the real check of the hardware only happens with
1482 ** a link interrupt.
1485 ixv_update_link_status(struct adapter *adapter)
1487 struct ifnet *ifp = adapter->ifp;
1488 struct tx_ring *txr = adapter->tx_rings;
1489 device_t dev = adapter->dev;
1492 if (adapter->link_up){
1493 if (adapter->link_active == FALSE) {
1495 device_printf(dev,"Link is up %d Gbps %s \n",
1496 ((adapter->link_speed == 128)? 10:1),
1498 adapter->link_active = TRUE;
1499 if_link_state_change(ifp, LINK_STATE_UP);
1501 } else { /* Link down */
1502 if (adapter->link_active == TRUE) {
1504 device_printf(dev,"Link is Down\n");
1505 if_link_state_change(ifp, LINK_STATE_DOWN);
1506 adapter->link_active = FALSE;
1507 for (int i = 0; i < adapter->num_queues;
1509 txr->watchdog_check = FALSE;
1517 /*********************************************************************
1519 * This routine disables all traffic on the adapter by issuing a
1520 * global reset on the MAC and deallocates TX/RX buffers.
1522 **********************************************************************/
1528 struct adapter *adapter = arg;
1529 struct ixgbe_hw *hw = &adapter->hw;
1532 mtx_assert(&adapter->core_mtx, MA_OWNED);
1534 INIT_DEBUGOUT("ixv_stop: begin\n");
1535 ixv_disable_intr(adapter);
1537 /* Tell the stack that the interface is no longer active */
1538 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1541 adapter->hw.adapter_stopped = FALSE;
1542 ixgbe_stop_adapter(hw);
1543 callout_stop(&adapter->timer);
1545 /* reprogram the RAR[0] in case user changed it. */
1546 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1552 /*********************************************************************
1554 * Determine hardware revision.
1556 **********************************************************************/
1558 ixv_identify_hardware(struct adapter *adapter)
1560 device_t dev = adapter->dev;
1564 ** Make sure BUSMASTER is set, on a VM under
1565 ** KVM it may not be and will break things.
1567 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1568 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1569 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1570 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1571 "bits were not set!\n");
1572 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1573 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1576 /* Save off the information about this board */
1577 adapter->hw.vendor_id = pci_get_vendor(dev);
1578 adapter->hw.device_id = pci_get_device(dev);
1579 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1580 adapter->hw.subsystem_vendor_id =
1581 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1582 adapter->hw.subsystem_device_id =
1583 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1588 /*********************************************************************
1590 * Setup MSIX Interrupt resources and handlers
1592 **********************************************************************/
1594 ixv_allocate_msix(struct adapter *adapter)
1596 device_t dev = adapter->dev;
1597 struct ix_queue *que = adapter->queues;
1598 int error, rid, vector = 0;
1600 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1602 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1603 RF_SHAREABLE | RF_ACTIVE);
1604 if (que->res == NULL) {
1605 device_printf(dev,"Unable to allocate"
1606 " bus resource: que interrupt [%d]\n", vector);
1609 /* Set the handler function */
1610 error = bus_setup_intr(dev, que->res,
1611 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1612 ixv_msix_que, que, &que->tag);
1615 device_printf(dev, "Failed to register QUE handler");
1618 #if __FreeBSD_version >= 800504
1619 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1622 adapter->que_mask |= (u64)(1 << que->msix);
1624 ** Bind the msix vector, and thus the
1625 ** ring to the corresponding cpu.
1627 if (adapter->num_queues > 1)
1628 bus_bind_intr(dev, que->res, i);
1630 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1631 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1632 taskqueue_thread_enqueue, &que->tq);
1633 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1634 device_get_nameunit(adapter->dev));
1639 adapter->res = bus_alloc_resource_any(dev,
1640 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1641 if (!adapter->res) {
1642 device_printf(dev,"Unable to allocate"
1643 " bus resource: MBX interrupt [%d]\n", rid);
1646 /* Set the mbx handler function */
1647 error = bus_setup_intr(dev, adapter->res,
1648 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1649 ixv_msix_mbx, adapter, &adapter->tag);
1651 adapter->res = NULL;
1652 device_printf(dev, "Failed to register LINK handler");
1655 #if __FreeBSD_version >= 800504
1656 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1658 adapter->mbxvec = vector;
1659 /* Tasklets for Mailbox */
1660 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1661 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1662 taskqueue_thread_enqueue, &adapter->tq);
1663 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1664 device_get_nameunit(adapter->dev));
1666 ** Due to a broken design QEMU will fail to properly
1667 ** enable the guest for MSIX unless the vectors in
1668 ** the table are all set up, so we must rewrite the
1669 ** ENABLE in the MSIX control register again at this
1670 ** point to cause it to successfully initialize us.
1672 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1674 pci_find_cap(dev, PCIY_MSIX, &rid);
1675 rid += PCIR_MSIX_CTRL;
1676 msix_ctrl = pci_read_config(dev, rid, 2);
1677 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1678 pci_write_config(dev, rid, msix_ctrl, 2);
1685 * Setup MSIX resources, note that the VF
1686 * device MUST use MSIX, there is no fallback.
1689 ixv_setup_msix(struct adapter *adapter)
1691 device_t dev = adapter->dev;
1692 int rid, vectors, want = 2;
1695 /* First try MSI/X */
1697 adapter->msix_mem = bus_alloc_resource_any(dev,
1698 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1699 if (!adapter->msix_mem) {
1700 device_printf(adapter->dev,
1701 "Unable to map MSIX table \n");
1705 vectors = pci_msix_count(dev);
1707 bus_release_resource(dev, SYS_RES_MEMORY,
1708 rid, adapter->msix_mem);
1709 adapter->msix_mem = NULL;
1714 ** Want two vectors: one for a queue,
1715 ** plus an additional for mailbox.
1717 if (pci_alloc_msix(dev, &want) == 0) {
1718 device_printf(adapter->dev,
1719 "Using MSIX interrupts with %d vectors\n", want);
1723 device_printf(adapter->dev,"MSIX config error\n");
1729 ixv_allocate_pci_resources(struct adapter *adapter)
1732 device_t dev = adapter->dev;
1735 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1738 if (!(adapter->pci_mem)) {
1739 device_printf(dev,"Unable to allocate bus resource: memory\n");
1743 adapter->osdep.mem_bus_space_tag =
1744 rman_get_bustag(adapter->pci_mem);
1745 adapter->osdep.mem_bus_space_handle =
1746 rman_get_bushandle(adapter->pci_mem);
1747 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1749 adapter->num_queues = 1;
1750 adapter->hw.back = &adapter->osdep;
1753 ** Now setup MSI/X, should
1754 ** return us the number of
1755 ** configured vectors.
1757 adapter->msix = ixv_setup_msix(adapter);
1758 if (adapter->msix == ENXIO)
1765 ixv_free_pci_resources(struct adapter * adapter)
1767 struct ix_queue *que = adapter->queues;
1768 device_t dev = adapter->dev;
1771 memrid = PCIR_BAR(MSIX_BAR);
1774 ** There is a slight possibility of a failure mode
1775 ** in attach that will result in entering this function
1776 ** before interrupt resources have been initialized, and
1777 ** in that case we do not want to execute the loops below
1778 ** We can detect this reliably by the state of the adapter
1781 if (adapter->res == NULL)
1785 ** Release all msix queue resources:
1787 for (int i = 0; i < adapter->num_queues; i++, que++) {
1788 rid = que->msix + 1;
1789 if (que->tag != NULL) {
1790 bus_teardown_intr(dev, que->res, que->tag);
1793 if (que->res != NULL)
1794 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1798 /* Clean the Legacy or Link interrupt last */
1799 if (adapter->mbxvec) /* we are doing MSIX */
1800 rid = adapter->mbxvec + 1;
1802 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1804 if (adapter->tag != NULL) {
1805 bus_teardown_intr(dev, adapter->res, adapter->tag);
1806 adapter->tag = NULL;
1808 if (adapter->res != NULL)
1809 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1813 pci_release_msi(dev);
1815 if (adapter->msix_mem != NULL)
1816 bus_release_resource(dev, SYS_RES_MEMORY,
1817 memrid, adapter->msix_mem);
1819 if (adapter->pci_mem != NULL)
1820 bus_release_resource(dev, SYS_RES_MEMORY,
1821 PCIR_BAR(0), adapter->pci_mem);
1826 /*********************************************************************
1828 * Setup networking device structure and register an interface.
1830 **********************************************************************/
1832 ixv_setup_interface(device_t dev, struct adapter *adapter)
1836 INIT_DEBUGOUT("ixv_setup_interface: begin");
1838 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1840 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1841 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1842 ifp->if_baudrate = 1000000000;
1843 ifp->if_init = ixv_init;
1844 ifp->if_softc = adapter;
1845 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1846 ifp->if_ioctl = ixv_ioctl;
1847 #if __FreeBSD_version >= 800000
1848 ifp->if_transmit = ixv_mq_start;
1849 ifp->if_qflush = ixv_qflush;
1851 ifp->if_start = ixv_start;
1853 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1855 ether_ifattach(ifp, adapter->hw.mac.addr);
1857 adapter->max_frame_size =
1858 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1861 * Tell the upper layer(s) we support long frames.
1863 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1865 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1866 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1867 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1870 ifp->if_capenable = ifp->if_capabilities;
1872 /* Don't enable LRO by default */
1873 ifp->if_capabilities |= IFCAP_LRO;
1876 * Specify the media types supported by this adapter and register
1877 * callbacks to update media and link information
1879 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1881 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1882 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1883 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1889 ixv_config_link(struct adapter *adapter)
1891 struct ixgbe_hw *hw = &adapter->hw;
1892 u32 autoneg, err = 0;
1893 bool negotiate = TRUE;
1895 if (hw->mac.ops.check_link)
1896 err = hw->mac.ops.check_link(hw, &autoneg,
1897 &adapter->link_up, FALSE);
1901 if (hw->mac.ops.setup_link)
1902 err = hw->mac.ops.setup_link(hw, autoneg,
1903 negotiate, adapter->link_up);
1908 /********************************************************************
1909 * Manage DMA'able memory.
1910 *******************************************************************/
1912 ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1916 *(bus_addr_t *) arg = segs->ds_addr;
1921 ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1922 struct ixv_dma_alloc *dma, int mapflags)
1924 device_t dev = adapter->dev;
1927 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1928 DBA_ALIGN, 0, /* alignment, bounds */
1929 BUS_SPACE_MAXADDR, /* lowaddr */
1930 BUS_SPACE_MAXADDR, /* highaddr */
1931 NULL, NULL, /* filter, filterarg */
1934 size, /* maxsegsize */
1935 BUS_DMA_ALLOCNOW, /* flags */
1936 NULL, /* lockfunc */
1937 NULL, /* lockfuncarg */
1940 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1944 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1945 BUS_DMA_NOWAIT, &dma->dma_map);
1947 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1951 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1955 mapflags | BUS_DMA_NOWAIT);
1957 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1961 dma->dma_size = size;
1964 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1966 bus_dma_tag_destroy(dma->dma_tag);
1968 dma->dma_map = NULL;
1969 dma->dma_tag = NULL;
1974 ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1976 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1977 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1978 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1979 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1980 bus_dma_tag_destroy(dma->dma_tag);
1984 /*********************************************************************
1986 * Allocate memory for the transmit and receive rings, and then
1987 * the descriptors associated with each, called only once at attach.
1989 **********************************************************************/
1991 ixv_allocate_queues(struct adapter *adapter)
1993 device_t dev = adapter->dev;
1994 struct ix_queue *que;
1995 struct tx_ring *txr;
1996 struct rx_ring *rxr;
1997 int rsize, tsize, error = 0;
1998 int txconf = 0, rxconf = 0;
2000 /* First allocate the top level queue structs */
2001 if (!(adapter->queues =
2002 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2003 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2004 device_printf(dev, "Unable to allocate queue memory\n");
2009 /* First allocate the TX ring struct memory */
2010 if (!(adapter->tx_rings =
2011 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2012 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2013 device_printf(dev, "Unable to allocate TX ring memory\n");
2018 /* Next allocate the RX */
2019 if (!(adapter->rx_rings =
2020 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2021 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2022 device_printf(dev, "Unable to allocate RX ring memory\n");
2027 /* For the ring itself */
2028 tsize = roundup2(adapter->num_tx_desc *
2029 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2032 * Now set up the TX queues, txconf is needed to handle the
2033 * possibility that things fail midcourse and we need to
2034 * undo memory gracefully
2036 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2037 /* Set up some basics */
2038 txr = &adapter->tx_rings[i];
2039 txr->adapter = adapter;
2042 /* Initialize the TX side lock */
2043 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2044 device_get_nameunit(dev), txr->me);
2045 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2047 if (ixv_dma_malloc(adapter, tsize,
2048 &txr->txdma, BUS_DMA_NOWAIT)) {
2050 "Unable to allocate TX Descriptor memory\n");
2054 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2055 bzero((void *)txr->tx_base, tsize);
2057 /* Now allocate transmit buffers for the ring */
2058 if (ixv_allocate_transmit_buffers(txr)) {
2060 "Critical Failure setting up transmit buffers\n");
2064 #if __FreeBSD_version >= 800000
2065 /* Allocate a buf ring */
2066 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2067 M_WAITOK, &txr->tx_mtx);
2068 if (txr->br == NULL) {
2070 "Critical Failure setting up buf ring\n");
2078 * Next the RX queues...
2080 rsize = roundup2(adapter->num_rx_desc *
2081 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2082 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2083 rxr = &adapter->rx_rings[i];
2084 /* Set up some basics */
2085 rxr->adapter = adapter;
2088 /* Initialize the RX side lock */
2089 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2090 device_get_nameunit(dev), rxr->me);
2091 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2093 if (ixv_dma_malloc(adapter, rsize,
2094 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2096 "Unable to allocate RxDescriptor memory\n");
2100 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2101 bzero((void *)rxr->rx_base, rsize);
2103 /* Allocate receive buffers for the ring*/
2104 if (ixv_allocate_receive_buffers(rxr)) {
2106 "Critical Failure setting up receive buffers\n");
2113 ** Finally set up the queue holding structs
2115 for (int i = 0; i < adapter->num_queues; i++) {
2116 que = &adapter->queues[i];
2117 que->adapter = adapter;
2118 que->txr = &adapter->tx_rings[i];
2119 que->rxr = &adapter->rx_rings[i];
2125 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2126 ixv_dma_free(adapter, &rxr->rxdma);
2128 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2129 ixv_dma_free(adapter, &txr->txdma);
2130 free(adapter->rx_rings, M_DEVBUF);
2132 free(adapter->tx_rings, M_DEVBUF);
2134 free(adapter->queues, M_DEVBUF);
2140 /*********************************************************************
2142 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2143 * the information needed to transmit a packet on the wire. This is
2144 * called only once at attach, setup is done every reset.
2146 **********************************************************************/
2148 ixv_allocate_transmit_buffers(struct tx_ring *txr)
2150 struct adapter *adapter = txr->adapter;
2151 device_t dev = adapter->dev;
2152 struct ixv_tx_buf *txbuf;
2156 * Setup DMA descriptor areas.
2158 if ((error = bus_dma_tag_create(
2159 bus_get_dma_tag(adapter->dev), /* parent */
2160 1, 0, /* alignment, bounds */
2161 BUS_SPACE_MAXADDR, /* lowaddr */
2162 BUS_SPACE_MAXADDR, /* highaddr */
2163 NULL, NULL, /* filter, filterarg */
2164 IXV_TSO_SIZE, /* maxsize */
2166 PAGE_SIZE, /* maxsegsize */
2168 NULL, /* lockfunc */
2169 NULL, /* lockfuncarg */
2171 device_printf(dev,"Unable to allocate TX DMA tag\n");
2175 if (!(txr->tx_buffers =
2176 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2177 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2178 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2183 /* Create the descriptor buffer dma maps */
2184 txbuf = txr->tx_buffers;
2185 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2186 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2188 device_printf(dev, "Unable to create TX DMA map\n");
2195 /* We free all, it handles case where we are in the middle */
2196 ixv_free_transmit_structures(adapter);
2200 /*********************************************************************
2202 * Initialize a transmit ring.
2204 **********************************************************************/
2206 ixv_setup_transmit_ring(struct tx_ring *txr)
2208 struct adapter *adapter = txr->adapter;
2209 struct ixv_tx_buf *txbuf;
2212 /* Clear the old ring contents */
2214 bzero((void *)txr->tx_base,
2215 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2217 txr->next_avail_desc = 0;
2218 txr->next_to_clean = 0;
2220 /* Free any existing tx buffers. */
2221 txbuf = txr->tx_buffers;
2222 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2223 if (txbuf->m_head != NULL) {
2224 bus_dmamap_sync(txr->txtag, txbuf->map,
2225 BUS_DMASYNC_POSTWRITE);
2226 bus_dmamap_unload(txr->txtag, txbuf->map);
2227 m_freem(txbuf->m_head);
2228 txbuf->m_head = NULL;
2230 /* Clear the EOP index */
2231 txbuf->eop_index = -1;
2234 /* Set number of descriptors available */
2235 txr->tx_avail = adapter->num_tx_desc;
2237 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2238 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2242 /*********************************************************************
2244 * Initialize all transmit rings.
2246 **********************************************************************/
2248 ixv_setup_transmit_structures(struct adapter *adapter)
2250 struct tx_ring *txr = adapter->tx_rings;
2252 for (int i = 0; i < adapter->num_queues; i++, txr++)
2253 ixv_setup_transmit_ring(txr);
2258 /*********************************************************************
2260 * Enable transmit unit.
2262 **********************************************************************/
2264 ixv_initialize_transmit_units(struct adapter *adapter)
2266 struct tx_ring *txr = adapter->tx_rings;
2267 struct ixgbe_hw *hw = &adapter->hw;
2270 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2271 u64 tdba = txr->txdma.dma_paddr;
2274 /* Set WTHRESH to 8, burst writeback */
2275 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2276 txdctl |= (8 << 16);
2277 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2279 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2280 txdctl |= IXGBE_TXDCTL_ENABLE;
2281 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2283 /* Set the HW Tx Head and Tail indices */
2284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2285 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2287 /* Setup Transmit Descriptor Cmd Settings */
2288 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2289 txr->watchdog_check = FALSE;
2291 /* Set Ring parameters */
2292 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2293 (tdba & 0x00000000ffffffffULL));
2294 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2295 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2296 adapter->num_tx_desc *
2297 sizeof(struct ixgbe_legacy_tx_desc));
2298 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2299 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2300 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2307 /*********************************************************************
2309 * Free all transmit rings.
2311 **********************************************************************/
2313 ixv_free_transmit_structures(struct adapter *adapter)
2315 struct tx_ring *txr = adapter->tx_rings;
2317 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2319 ixv_free_transmit_buffers(txr);
2320 ixv_dma_free(adapter, &txr->txdma);
2322 IXV_TX_LOCK_DESTROY(txr);
2324 free(adapter->tx_rings, M_DEVBUF);
2327 /*********************************************************************
2329 * Free transmit ring related data structures.
2331 **********************************************************************/
2333 ixv_free_transmit_buffers(struct tx_ring *txr)
2335 struct adapter *adapter = txr->adapter;
2336 struct ixv_tx_buf *tx_buffer;
2339 INIT_DEBUGOUT("free_transmit_ring: begin");
2341 if (txr->tx_buffers == NULL)
2344 tx_buffer = txr->tx_buffers;
2345 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2346 if (tx_buffer->m_head != NULL) {
2347 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2348 BUS_DMASYNC_POSTWRITE);
2349 bus_dmamap_unload(txr->txtag,
2351 m_freem(tx_buffer->m_head);
2352 tx_buffer->m_head = NULL;
2353 if (tx_buffer->map != NULL) {
2354 bus_dmamap_destroy(txr->txtag,
2356 tx_buffer->map = NULL;
2358 } else if (tx_buffer->map != NULL) {
2359 bus_dmamap_unload(txr->txtag,
2361 bus_dmamap_destroy(txr->txtag,
2363 tx_buffer->map = NULL;
2366 #if __FreeBSD_version >= 800000
2367 if (txr->br != NULL)
2368 buf_ring_free(txr->br, M_DEVBUF);
2370 if (txr->tx_buffers != NULL) {
2371 free(txr->tx_buffers, M_DEVBUF);
2372 txr->tx_buffers = NULL;
2374 if (txr->txtag != NULL) {
2375 bus_dma_tag_destroy(txr->txtag);
2381 /*********************************************************************
2383 * Advanced Context Descriptor setup for VLAN or CSUM
2385 **********************************************************************/
2388 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2390 struct adapter *adapter = txr->adapter;
2391 struct ixgbe_adv_tx_context_desc *TXD;
2392 struct ixv_tx_buf *tx_buffer;
2393 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2394 struct ether_vlan_header *eh;
2396 struct ip6_hdr *ip6;
2397 int ehdrlen, ip_hlen = 0;
2400 bool offload = TRUE;
2401 int ctxd = txr->next_avail_desc;
2405 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2409 tx_buffer = &txr->tx_buffers[ctxd];
2410 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2413 ** In advanced descriptors the vlan tag must
2414 ** be placed into the descriptor itself.
2416 if (mp->m_flags & M_VLANTAG) {
2417 vtag = htole16(mp->m_pkthdr.ether_vtag);
2418 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2419 } else if (offload == FALSE)
2423 * Determine where frame payload starts.
2424 * Jump over vlan headers if already present,
2425 * helpful for QinQ too.
2427 eh = mtod(mp, struct ether_vlan_header *);
2428 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2429 etype = ntohs(eh->evl_proto);
2430 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2432 etype = ntohs(eh->evl_encap_proto);
2433 ehdrlen = ETHER_HDR_LEN;
2436 /* Set the ether header length */
2437 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2441 ip = (struct ip *)(mp->m_data + ehdrlen);
2442 ip_hlen = ip->ip_hl << 2;
2443 if (mp->m_len < ehdrlen + ip_hlen)
2446 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2448 case ETHERTYPE_IPV6:
2449 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2450 ip_hlen = sizeof(struct ip6_hdr);
2451 if (mp->m_len < ehdrlen + ip_hlen)
2453 ipproto = ip6->ip6_nxt;
2454 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2461 vlan_macip_lens |= ip_hlen;
2462 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2466 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2467 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2471 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2472 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2475 #if __FreeBSD_version >= 800000
2477 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2478 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2486 /* Now copy bits into descriptor */
2487 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2488 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2489 TXD->seqnum_seed = htole32(0);
2490 TXD->mss_l4len_idx = htole32(0);
2492 tx_buffer->m_head = NULL;
2493 tx_buffer->eop_index = -1;
2495 /* We've consumed the first desc, adjust counters */
2496 if (++ctxd == adapter->num_tx_desc)
2498 txr->next_avail_desc = ctxd;
2504 /**********************************************************************
2506 * Setup work for hardware segmentation offload (TSO) on
2507 * adapters using advanced tx descriptors
2509 **********************************************************************/
2511 ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2513 struct adapter *adapter = txr->adapter;
2514 struct ixgbe_adv_tx_context_desc *TXD;
2515 struct ixv_tx_buf *tx_buffer;
2516 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2517 u32 mss_l4len_idx = 0;
2519 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2520 struct ether_vlan_header *eh;
2526 * Determine where frame payload starts.
2527 * Jump over vlan headers if already present
2529 eh = mtod(mp, struct ether_vlan_header *);
2530 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2531 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2533 ehdrlen = ETHER_HDR_LEN;
2535 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2536 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2539 ctxd = txr->next_avail_desc;
2540 tx_buffer = &txr->tx_buffers[ctxd];
2541 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2543 ip = (struct ip *)(mp->m_data + ehdrlen);
2544 if (ip->ip_p != IPPROTO_TCP)
2545 return FALSE; /* 0 */
2547 ip_hlen = ip->ip_hl << 2;
2548 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2549 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2550 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2551 tcp_hlen = th->th_off << 2;
2552 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2554 /* This is used in the transmit desc in encap */
2555 *paylen = mp->m_pkthdr.len - hdrlen;
2557 /* VLAN MACLEN IPLEN */
2558 if (mp->m_flags & M_VLANTAG) {
2559 vtag = htole16(mp->m_pkthdr.ether_vtag);
2560 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2563 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2564 vlan_macip_lens |= ip_hlen;
2565 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2567 /* ADV DTYPE TUCMD */
2568 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2569 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2570 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2571 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2575 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2576 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2577 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2579 TXD->seqnum_seed = htole32(0);
2580 tx_buffer->m_head = NULL;
2581 tx_buffer->eop_index = -1;
2583 if (++ctxd == adapter->num_tx_desc)
2587 txr->next_avail_desc = ctxd;
2592 /**********************************************************************
2594 * Examine each tx_buffer in the used queue. If the hardware is done
2595 * processing the packet then free associated resources. The
2596 * tx_buffer is put back on the free queue.
2598 **********************************************************************/
2600 ixv_txeof(struct tx_ring *txr)
2602 struct adapter *adapter = txr->adapter;
2603 struct ifnet *ifp = adapter->ifp;
2604 u32 first, last, done;
2605 struct ixv_tx_buf *tx_buffer;
2606 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2608 mtx_assert(&txr->tx_mtx, MA_OWNED);
2610 if (txr->tx_avail == adapter->num_tx_desc)
2613 first = txr->next_to_clean;
2614 tx_buffer = &txr->tx_buffers[first];
2615 /* For cleanup we just use legacy struct */
2616 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2617 last = tx_buffer->eop_index;
2620 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2623 ** Get the index of the first descriptor
2624 ** BEYOND the EOP and call that 'done'.
2625 ** I do this so the comparison in the
2626 ** inner while loop below can be simple
2628 if (++last == adapter->num_tx_desc) last = 0;
2631 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2632 BUS_DMASYNC_POSTREAD);
2634 ** Only the EOP descriptor of a packet now has the DD
2635 ** bit set, this is what we look for...
2637 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2638 /* We clean the range of the packet */
2639 while (first != done) {
2640 tx_desc->upper.data = 0;
2641 tx_desc->lower.data = 0;
2642 tx_desc->buffer_addr = 0;
2645 if (tx_buffer->m_head) {
2646 bus_dmamap_sync(txr->txtag,
2648 BUS_DMASYNC_POSTWRITE);
2649 bus_dmamap_unload(txr->txtag,
2651 m_freem(tx_buffer->m_head);
2652 tx_buffer->m_head = NULL;
2653 tx_buffer->map = NULL;
2655 tx_buffer->eop_index = -1;
2656 txr->watchdog_time = ticks;
2658 if (++first == adapter->num_tx_desc)
2661 tx_buffer = &txr->tx_buffers[first];
2663 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2666 /* See if there is more work now */
2667 last = tx_buffer->eop_index;
2670 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2671 /* Get next done point */
2672 if (++last == adapter->num_tx_desc) last = 0;
2677 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2680 txr->next_to_clean = first;
2683 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2684 * it is OK to send packets. If there are no pending descriptors,
2685 * clear the timeout. Otherwise, if some descriptors have been freed,
2686 * restart the timeout.
2688 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2689 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2690 if (txr->tx_avail == adapter->num_tx_desc) {
2691 txr->watchdog_check = FALSE;
2699 /*********************************************************************
2701 * Refresh mbuf buffers for RX descriptor rings
2702 * - now keeps its own state so discards due to resource
2703 * exhaustion are unnecessary, if an mbuf cannot be obtained
2704 * it just returns, keeping its placeholder, thus it can simply
2705 * be recalled to try again.
2707 **********************************************************************/
2709 ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2711 struct adapter *adapter = rxr->adapter;
2712 bus_dma_segment_t hseg[1];
2713 bus_dma_segment_t pseg[1];
2714 struct ixv_rx_buf *rxbuf;
2715 struct mbuf *mh, *mp;
2716 int i, j, nsegs, error;
2717 bool refreshed = FALSE;
2719 i = j = rxr->next_to_refresh;
2720 /* Get the control variable, one beyond refresh point */
2721 if (++j == adapter->num_rx_desc)
2723 while (j != limit) {
2724 rxbuf = &rxr->rx_buffers[i];
2725 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2726 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2729 mh->m_pkthdr.len = mh->m_len = MHLEN;
2731 mh->m_flags |= M_PKTHDR;
2732 m_adj(mh, ETHER_ALIGN);
2733 /* Get the memory mapping */
2734 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2735 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2737 printf("GET BUF: dmamap load"
2738 " failure - %d\n", error);
2743 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2744 BUS_DMASYNC_PREREAD);
2745 rxr->rx_base[i].read.hdr_addr =
2746 htole64(hseg[0].ds_addr);
2749 if (rxbuf->m_pack == NULL) {
2750 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2751 M_PKTHDR, adapter->rx_mbuf_sz);
2757 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2758 /* Get the memory mapping */
2759 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2760 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2762 printf("GET BUF: dmamap load"
2763 " failure - %d\n", error);
2765 rxbuf->m_pack = NULL;
2769 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2770 BUS_DMASYNC_PREREAD);
2771 rxr->rx_base[i].read.pkt_addr =
2772 htole64(pseg[0].ds_addr);
2775 rxr->next_to_refresh = i = j;
2776 /* Calculate next index */
2777 if (++j == adapter->num_rx_desc)
2781 if (refreshed) /* update tail index */
2782 IXGBE_WRITE_REG(&adapter->hw,
2783 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2787 /*********************************************************************
2789 * Allocate memory for rx_buffer structures. Since we use one
2790 * rx_buffer per received packet, the maximum number of rx_buffer's
2791 * that we'll need is equal to the number of receive descriptors
2792 * that we've allocated.
2794 **********************************************************************/
2796 ixv_allocate_receive_buffers(struct rx_ring *rxr)
2798 struct adapter *adapter = rxr->adapter;
2799 device_t dev = adapter->dev;
2800 struct ixv_rx_buf *rxbuf;
2801 int i, bsize, error;
2803 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2804 if (!(rxr->rx_buffers =
2805 (struct ixv_rx_buf *) malloc(bsize,
2806 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2807 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2812 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2813 1, 0, /* alignment, bounds */
2814 BUS_SPACE_MAXADDR, /* lowaddr */
2815 BUS_SPACE_MAXADDR, /* highaddr */
2816 NULL, NULL, /* filter, filterarg */
2817 MSIZE, /* maxsize */
2819 MSIZE, /* maxsegsize */
2821 NULL, /* lockfunc */
2822 NULL, /* lockfuncarg */
2824 device_printf(dev, "Unable to create RX DMA tag\n");
2828 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2829 1, 0, /* alignment, bounds */
2830 BUS_SPACE_MAXADDR, /* lowaddr */
2831 BUS_SPACE_MAXADDR, /* highaddr */
2832 NULL, NULL, /* filter, filterarg */
2833 MJUMPAGESIZE, /* maxsize */
2835 MJUMPAGESIZE, /* maxsegsize */
2837 NULL, /* lockfunc */
2838 NULL, /* lockfuncarg */
2840 device_printf(dev, "Unable to create RX DMA tag\n");
2844 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2845 rxbuf = &rxr->rx_buffers[i];
2846 error = bus_dmamap_create(rxr->htag,
2847 BUS_DMA_NOWAIT, &rxbuf->hmap);
2849 device_printf(dev, "Unable to create RX head map\n");
2852 error = bus_dmamap_create(rxr->ptag,
2853 BUS_DMA_NOWAIT, &rxbuf->pmap);
2855 device_printf(dev, "Unable to create RX pkt map\n");
2863 /* Frees all, but can handle partial completion */
2864 ixv_free_receive_structures(adapter);
2869 ixv_free_receive_ring(struct rx_ring *rxr)
2871 struct adapter *adapter;
2872 struct ixv_rx_buf *rxbuf;
2875 adapter = rxr->adapter;
2876 for (i = 0; i < adapter->num_rx_desc; i++) {
2877 rxbuf = &rxr->rx_buffers[i];
2878 if (rxbuf->m_head != NULL) {
2879 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2880 BUS_DMASYNC_POSTREAD);
2881 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2882 rxbuf->m_head->m_flags |= M_PKTHDR;
2883 m_freem(rxbuf->m_head);
2885 if (rxbuf->m_pack != NULL) {
2886 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2887 BUS_DMASYNC_POSTREAD);
2888 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2889 rxbuf->m_pack->m_flags |= M_PKTHDR;
2890 m_freem(rxbuf->m_pack);
2892 rxbuf->m_head = NULL;
2893 rxbuf->m_pack = NULL;
2898 /*********************************************************************
2900 * Initialize a receive ring and its buffers.
2902 **********************************************************************/
2904 ixv_setup_receive_ring(struct rx_ring *rxr)
2906 struct adapter *adapter;
2909 struct ixv_rx_buf *rxbuf;
2910 bus_dma_segment_t pseg[1], hseg[1];
2911 struct lro_ctrl *lro = &rxr->lro;
2912 int rsize, nsegs, error = 0;
2914 adapter = rxr->adapter;
2918 /* Clear the ring contents */
2920 rsize = roundup2(adapter->num_rx_desc *
2921 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2922 bzero((void *)rxr->rx_base, rsize);
2924 /* Free current RX buffer structs and their mbufs */
2925 ixv_free_receive_ring(rxr);
2927 /* Configure header split? */
2928 if (ixv_header_split)
2929 rxr->hdr_split = TRUE;
2931 /* Now replenish the mbufs */
2932 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2933 struct mbuf *mh, *mp;
2935 rxbuf = &rxr->rx_buffers[j];
2937 ** Dont allocate mbufs if not
2938 ** doing header split, its wasteful
2940 if (rxr->hdr_split == FALSE)
2943 /* First the header */
2944 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2945 if (rxbuf->m_head == NULL) {
2949 m_adj(rxbuf->m_head, ETHER_ALIGN);
2951 mh->m_len = mh->m_pkthdr.len = MHLEN;
2952 mh->m_flags |= M_PKTHDR;
2953 /* Get the memory mapping */
2954 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2955 rxbuf->hmap, rxbuf->m_head, hseg,
2956 &nsegs, BUS_DMA_NOWAIT);
2957 if (error != 0) /* Nothing elegant to do here */
2959 bus_dmamap_sync(rxr->htag,
2960 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2961 /* Update descriptor */
2962 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2965 /* Now the payload cluster */
2966 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2967 M_PKTHDR, adapter->rx_mbuf_sz);
2968 if (rxbuf->m_pack == NULL) {
2973 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2974 /* Get the memory mapping */
2975 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2976 rxbuf->pmap, mp, pseg,
2977 &nsegs, BUS_DMA_NOWAIT);
2980 bus_dmamap_sync(rxr->ptag,
2981 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2982 /* Update descriptor */
2983 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2987 /* Setup our descriptor indices */
2988 rxr->next_to_check = 0;
2989 rxr->next_to_refresh = 0;
2990 rxr->lro_enabled = FALSE;
2991 rxr->rx_split_packets = 0;
2993 rxr->discard = FALSE;
2995 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2996 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2999 ** Now set up the LRO interface:
3001 if (ifp->if_capenable & IFCAP_LRO) {
3002 int err = tcp_lro_init(lro);
3004 device_printf(dev, "LRO Initialization failed!\n");
3007 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3008 rxr->lro_enabled = TRUE;
3009 lro->ifp = adapter->ifp;
3016 ixv_free_receive_ring(rxr);
3021 /*********************************************************************
3023 * Initialize all receive rings.
3025 **********************************************************************/
3027 ixv_setup_receive_structures(struct adapter *adapter)
3029 struct rx_ring *rxr = adapter->rx_rings;
3032 for (j = 0; j < adapter->num_queues; j++, rxr++)
3033 if (ixv_setup_receive_ring(rxr))
3039 * Free RX buffers allocated so far, we will only handle
3040 * the rings that completed, the failing case will have
3041 * cleaned up for itself. 'j' failed, so its the terminus.
3043 for (int i = 0; i < j; ++i) {
3044 rxr = &adapter->rx_rings[i];
3045 ixv_free_receive_ring(rxr);
3051 /*********************************************************************
3053 * Setup receive registers and features.
3055 **********************************************************************/
3056 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3059 ixv_initialize_receive_units(struct adapter *adapter)
3061 struct rx_ring *rxr = adapter->rx_rings;
3062 struct ixgbe_hw *hw = &adapter->hw;
3063 struct ifnet *ifp = adapter->ifp;
3064 u32 bufsz, fctrl, rxcsum, hlreg;
3067 /* Enable broadcasts */
3068 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3069 fctrl |= IXGBE_FCTRL_BAM;
3070 fctrl |= IXGBE_FCTRL_DPF;
3071 fctrl |= IXGBE_FCTRL_PMCF;
3072 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3074 /* Set for Jumbo Frames? */
3075 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3076 if (ifp->if_mtu > ETHERMTU) {
3077 hlreg |= IXGBE_HLREG0_JUMBOEN;
3078 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3080 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3081 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3083 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3085 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3086 u64 rdba = rxr->rxdma.dma_paddr;
3089 /* Do the queue enabling first */
3090 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3091 rxdctl |= IXGBE_RXDCTL_ENABLE;
3092 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3093 for (int k = 0; k < 10; k++) {
3094 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3095 IXGBE_RXDCTL_ENABLE)
3102 /* Setup the Base and Length of the Rx Descriptor Ring */
3103 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3104 (rdba & 0x00000000ffffffffULL));
3105 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3107 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3108 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3110 /* Set up the SRRCTL register */
3111 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3112 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3113 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3115 if (rxr->hdr_split) {
3116 /* Use a standard mbuf for the header */
3117 reg |= ((IXV_RX_HDR <<
3118 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3119 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3120 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3122 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3123 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3125 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3126 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3127 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3128 adapter->num_rx_desc - 1);
3131 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3133 if (ifp->if_capenable & IFCAP_RXCSUM)
3134 rxcsum |= IXGBE_RXCSUM_PCSD;
3136 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3137 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3139 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3144 /*********************************************************************
3146 * Free all receive rings.
3148 **********************************************************************/
3150 ixv_free_receive_structures(struct adapter *adapter)
3152 struct rx_ring *rxr = adapter->rx_rings;
3154 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3155 struct lro_ctrl *lro = &rxr->lro;
3156 ixv_free_receive_buffers(rxr);
3157 /* Free LRO memory */
3159 /* Free the ring memory as well */
3160 ixv_dma_free(adapter, &rxr->rxdma);
3163 free(adapter->rx_rings, M_DEVBUF);
3167 /*********************************************************************
3169 * Free receive ring data structures
3171 **********************************************************************/
3173 ixv_free_receive_buffers(struct rx_ring *rxr)
3175 struct adapter *adapter = rxr->adapter;
3176 struct ixv_rx_buf *rxbuf;
3178 INIT_DEBUGOUT("free_receive_structures: begin");
3180 /* Cleanup any existing buffers */
3181 if (rxr->rx_buffers != NULL) {
3182 for (int i = 0; i < adapter->num_rx_desc; i++) {
3183 rxbuf = &rxr->rx_buffers[i];
3184 if (rxbuf->m_head != NULL) {
3185 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3186 BUS_DMASYNC_POSTREAD);
3187 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3188 rxbuf->m_head->m_flags |= M_PKTHDR;
3189 m_freem(rxbuf->m_head);
3191 if (rxbuf->m_pack != NULL) {
3192 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3193 BUS_DMASYNC_POSTREAD);
3194 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3195 rxbuf->m_pack->m_flags |= M_PKTHDR;
3196 m_freem(rxbuf->m_pack);
3198 rxbuf->m_head = NULL;
3199 rxbuf->m_pack = NULL;
3200 if (rxbuf->hmap != NULL) {
3201 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3204 if (rxbuf->pmap != NULL) {
3205 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3209 if (rxr->rx_buffers != NULL) {
3210 free(rxr->rx_buffers, M_DEVBUF);
3211 rxr->rx_buffers = NULL;
3215 if (rxr->htag != NULL) {
3216 bus_dma_tag_destroy(rxr->htag);
3219 if (rxr->ptag != NULL) {
3220 bus_dma_tag_destroy(rxr->ptag);
3227 static __inline void
3228 ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3232 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3233 * should be computed by hardware. Also it should not have VLAN tag in
3236 if (rxr->lro_enabled &&
3237 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3238 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3239 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3240 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3241 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3242 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3244 * Send to the stack if:
3245 ** - LRO not enabled, or
3246 ** - no LRO resources, or
3247 ** - lro enqueue fails
3249 if (rxr->lro.lro_cnt != 0)
3250 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3254 (*ifp->if_input)(ifp, m);
3258 static __inline void
3259 ixv_rx_discard(struct rx_ring *rxr, int i)
3261 struct ixv_rx_buf *rbuf;
3263 rbuf = &rxr->rx_buffers[i];
3265 if (rbuf->fmp != NULL) {/* Partial chain ? */
3266 rbuf->fmp->m_flags |= M_PKTHDR;
3272 ** With advanced descriptors the writeback
3273 ** clobbers the buffer addrs, so its easier
3274 ** to just free the existing mbufs and take
3275 ** the normal refresh path to get new buffers
3279 m_free(rbuf->m_head);
3280 rbuf->m_head = NULL;
3284 m_free(rbuf->m_pack);
3285 rbuf->m_pack = NULL;
3292 /*********************************************************************
3294 * This routine executes in interrupt context. It replenishes
3295 * the mbufs in the descriptor and sends data which has been
3296 * dma'ed into host memory to upper layer.
3298 * We loop at most count times if count is > 0, or until done if
3301 * Return TRUE for more work, FALSE for all clean.
3302 *********************************************************************/
3304 ixv_rxeof(struct ix_queue *que, int count)
3306 struct adapter *adapter = que->adapter;
3307 struct rx_ring *rxr = que->rxr;
3308 struct ifnet *ifp = adapter->ifp;
3309 struct lro_ctrl *lro = &rxr->lro;
3310 struct lro_entry *queued;
3311 int i, nextp, processed = 0;
3313 union ixgbe_adv_rx_desc *cur;
3314 struct ixv_rx_buf *rbuf, *nbuf;
3318 for (i = rxr->next_to_check; count != 0;) {
3319 struct mbuf *sendmp, *mh, *mp;
3321 u16 hlen, plen, hdr, vtag;
3324 /* Sync the ring. */
3325 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3326 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3328 cur = &rxr->rx_base[i];
3329 staterr = le32toh(cur->wb.upper.status_error);
3331 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3333 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3340 cur->wb.upper.status_error = 0;
3341 rbuf = &rxr->rx_buffers[i];
3345 plen = le16toh(cur->wb.upper.length);
3346 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3347 IXGBE_RXDADV_PKTTYPE_MASK;
3348 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3349 vtag = le16toh(cur->wb.upper.vlan);
3350 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3352 /* Make sure all parts of a bad packet are discarded */
3353 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3356 rxr->rx_discarded++;
3358 rxr->discard = TRUE;
3360 rxr->discard = FALSE;
3361 ixv_rx_discard(rxr, i);
3367 if (nextp == adapter->num_rx_desc)
3369 nbuf = &rxr->rx_buffers[nextp];
3373 ** The header mbuf is ONLY used when header
3374 ** split is enabled, otherwise we get normal
3375 ** behavior, ie, both header and payload
3376 ** are DMA'd into the payload buffer.
3378 ** Rather than using the fmp/lmp global pointers
3379 ** we now keep the head of a packet chain in the
3380 ** buffer struct and pass this along from one
3381 ** descriptor to the next, until we get EOP.
3383 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3384 /* This must be an initial descriptor */
3385 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3386 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3387 if (hlen > IXV_RX_HDR)
3390 mh->m_flags |= M_PKTHDR;
3392 mh->m_pkthdr.len = mh->m_len;
3393 /* Null buf pointer so it is refreshed */
3394 rbuf->m_head = NULL;
3396 ** Check the payload length, this
3397 ** could be zero if its a small
3403 mp->m_flags &= ~M_PKTHDR;
3405 mh->m_pkthdr.len += mp->m_len;
3406 /* Null buf pointer so it is refreshed */
3407 rbuf->m_pack = NULL;
3408 rxr->rx_split_packets++;
3411 ** Now create the forward
3412 ** chain so when complete
3416 /* stash the chain head */
3418 /* Make forward chain */
3420 mp->m_next = nbuf->m_pack;
3422 mh->m_next = nbuf->m_pack;
3424 /* Singlet, prepare to send */
3426 if ((adapter->num_vlans) &&
3427 (staterr & IXGBE_RXD_STAT_VP)) {
3428 sendmp->m_pkthdr.ether_vtag = vtag;
3429 sendmp->m_flags |= M_VLANTAG;
3434 ** Either no header split, or a
3435 ** secondary piece of a fragmented
3440 ** See if there is a stored head
3441 ** that determines what we are
3444 rbuf->m_pack = rbuf->fmp = NULL;
3446 if (sendmp != NULL) /* secondary frag */
3447 sendmp->m_pkthdr.len += mp->m_len;
3449 /* first desc of a non-ps chain */
3451 sendmp->m_flags |= M_PKTHDR;
3452 sendmp->m_pkthdr.len = mp->m_len;
3453 if (staterr & IXGBE_RXD_STAT_VP) {
3454 sendmp->m_pkthdr.ether_vtag = vtag;
3455 sendmp->m_flags |= M_VLANTAG;
3458 /* Pass the head pointer on */
3462 mp->m_next = nbuf->m_pack;
3466 /* Sending this frame? */
3468 sendmp->m_pkthdr.rcvif = ifp;
3471 /* capture data for AIM */
3472 rxr->bytes += sendmp->m_pkthdr.len;
3473 rxr->rx_bytes += sendmp->m_pkthdr.len;
3474 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3475 ixv_rx_checksum(staterr, sendmp, ptype);
3476 #if __FreeBSD_version >= 800000
3477 sendmp->m_pkthdr.flowid = que->msix;
3478 sendmp->m_flags |= M_FLOWID;
3482 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3483 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3485 /* Advance our pointers to the next descriptor. */
3486 if (++i == adapter->num_rx_desc)
3489 /* Now send to the stack or do LRO */
3491 ixv_rx_input(rxr, ifp, sendmp, ptype);
3493 /* Every 8 descriptors we go to refresh mbufs */
3494 if (processed == 8) {
3495 ixv_refresh_mbufs(rxr, i);
3500 /* Refresh any remaining buf structs */
3501 if (ixv_rx_unrefreshed(rxr))
3502 ixv_refresh_mbufs(rxr, i);
3504 rxr->next_to_check = i;
3507 * Flush any outstanding LRO work
3509 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3510 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3511 tcp_lro_flush(lro, queued);
3517 ** We still have cleaning to do?
3518 ** Schedule another interrupt if so.
3520 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3521 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3529 /*********************************************************************
3531 * Verify that the hardware indicated that the checksum is valid.
3532 * Inform the stack about the status of checksum so that stack
3533 * doesn't spend time verifying the checksum.
3535 *********************************************************************/
3537 ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3539 u16 status = (u16) staterr;
3540 u8 errors = (u8) (staterr >> 24);
3543 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3544 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3547 if (status & IXGBE_RXD_STAT_IPCS) {
3548 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3549 /* IP Checksum Good */
3550 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3551 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3554 mp->m_pkthdr.csum_flags = 0;
3556 if (status & IXGBE_RXD_STAT_L4CS) {
3557 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3558 #if __FreeBSD_version >= 800000
3560 type = CSUM_SCTP_VALID;
3562 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3563 mp->m_pkthdr.csum_flags |= type;
3565 mp->m_pkthdr.csum_data = htons(0xffff);
3572 ixv_setup_vlan_support(struct adapter *adapter)
3574 struct ixgbe_hw *hw = &adapter->hw;
3575 u32 ctrl, vid, vfta, retry;
3579 ** We get here thru init_locked, meaning
3580 ** a soft reset, this has already cleared
3581 ** the VFTA and other state, so if there
3582 ** have been no vlan's registered do nothing.
3584 if (adapter->num_vlans == 0)
3587 /* Enable the queues */
3588 for (int i = 0; i < adapter->num_queues; i++) {
3589 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3590 ctrl |= IXGBE_RXDCTL_VME;
3591 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3595 ** A soft reset zero's out the VFTA, so
3596 ** we need to repopulate it now.
3598 for (int i = 0; i < VFTA_SIZE; i++) {
3599 if (ixv_shadow_vfta[i] == 0)
3601 vfta = ixv_shadow_vfta[i];
3603 ** Reconstruct the vlan id's
3604 ** based on the bits set in each
3605 ** of the array ints.
3607 for ( int j = 0; j < 32; j++) {
3609 if ((vfta & (1 << j)) == 0)
3612 /* Call the shared code mailbox routine */
3613 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3622 ** This routine is run via an vlan config EVENT,
3623 ** it enables us to use the HW Filter table since
3624 ** we can get the vlan id. This just creates the
3625 ** entry in the soft version of the VFTA, init will
3626 ** repopulate the real table.
3629 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3631 struct adapter *adapter = ifp->if_softc;
3634 if (ifp->if_softc != arg) /* Not our event */
3637 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3640 IXV_CORE_LOCK(adapter);
3641 index = (vtag >> 5) & 0x7F;
3643 ixv_shadow_vfta[index] |= (1 << bit);
3644 ++adapter->num_vlans;
3645 /* Re-init to load the changes */
3646 ixv_init_locked(adapter);
3647 IXV_CORE_UNLOCK(adapter);
3651 ** This routine is run via an vlan
3652 ** unconfig EVENT, remove our entry
3653 ** in the soft vfta.
3656 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3658 struct adapter *adapter = ifp->if_softc;
3661 if (ifp->if_softc != arg)
3664 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3667 IXV_CORE_LOCK(adapter);
3668 index = (vtag >> 5) & 0x7F;
3670 ixv_shadow_vfta[index] &= ~(1 << bit);
3671 --adapter->num_vlans;
3672 /* Re-init to load the changes */
3673 ixv_init_locked(adapter);
3674 IXV_CORE_UNLOCK(adapter);
3678 ixv_enable_intr(struct adapter *adapter)
3680 struct ixgbe_hw *hw = &adapter->hw;
3681 struct ix_queue *que = adapter->queues;
3682 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3685 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3687 mask = IXGBE_EIMS_ENABLE_MASK;
3688 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3689 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3691 for (int i = 0; i < adapter->num_queues; i++, que++)
3692 ixv_enable_queue(adapter, que->msix);
3694 IXGBE_WRITE_FLUSH(hw);
3700 ixv_disable_intr(struct adapter *adapter)
3702 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3703 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3704 IXGBE_WRITE_FLUSH(&adapter->hw);
3709 ** Setup the correct IVAR register for a particular MSIX interrupt
3710 ** - entry is the register array entry
3711 ** - vector is the MSIX vector for this queue
3712 ** - type is RX/TX/MISC
3715 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3717 struct ixgbe_hw *hw = &adapter->hw;
3720 vector |= IXGBE_IVAR_ALLOC_VAL;
3722 if (type == -1) { /* MISC IVAR */
3723 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3726 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3727 } else { /* RX/TX IVARS */
3728 index = (16 * (entry & 1)) + (8 * type);
3729 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3730 ivar &= ~(0xFF << index);
3731 ivar |= (vector << index);
3732 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3737 ixv_configure_ivars(struct adapter *adapter)
3739 struct ix_queue *que = adapter->queues;
3741 for (int i = 0; i < adapter->num_queues; i++, que++) {
3742 /* First the RX queue entry */
3743 ixv_set_ivar(adapter, i, que->msix, 0);
3744 /* ... and the TX */
3745 ixv_set_ivar(adapter, i, que->msix, 1);
3746 /* Set an initial value in EITR */
3747 IXGBE_WRITE_REG(&adapter->hw,
3748 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3751 /* For the Link interrupt */
3752 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3757 ** Tasklet handler for MSIX MBX interrupts
3758 ** - do outside interrupt since it might sleep
3761 ixv_handle_mbx(void *context, int pending)
3763 struct adapter *adapter = context;
3765 ixgbe_check_link(&adapter->hw,
3766 &adapter->link_speed, &adapter->link_up, 0);
3767 ixv_update_link_status(adapter);
3771 ** The VF stats registers never have a truely virgin
3772 ** starting point, so this routine tries to make an
3773 ** artificial one, marking ground zero on attach as
3777 ixv_save_stats(struct adapter *adapter)
3779 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3780 adapter->stats.saved_reset_vfgprc +=
3781 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3782 adapter->stats.saved_reset_vfgptc +=
3783 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3784 adapter->stats.saved_reset_vfgorc +=
3785 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3786 adapter->stats.saved_reset_vfgotc +=
3787 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3788 adapter->stats.saved_reset_vfmprc +=
3789 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3794 ixv_init_stats(struct adapter *adapter)
3796 struct ixgbe_hw *hw = &adapter->hw;
3798 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3799 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3800 adapter->stats.last_vfgorc |=
3801 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3803 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3804 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3805 adapter->stats.last_vfgotc |=
3806 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3808 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3810 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3811 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3812 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3813 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3814 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3817 #define UPDATE_STAT_32(reg, last, count) \
3819 u32 current = IXGBE_READ_REG(hw, reg); \
3820 if (current < last) \
3821 count += 0x100000000LL; \
3823 count &= 0xFFFFFFFF00000000LL; \
3827 #define UPDATE_STAT_36(lsb, msb, last, count) \
3829 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3830 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3831 u64 current = ((cur_msb << 32) | cur_lsb); \
3832 if (current < last) \
3833 count += 0x1000000000LL; \
3835 count &= 0xFFFFFFF000000000LL; \
3840 ** ixv_update_stats - Update the board statistics counters.
3843 ixv_update_stats(struct adapter *adapter)
3845 struct ixgbe_hw *hw = &adapter->hw;
3847 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3848 adapter->stats.vfgprc);
3849 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3850 adapter->stats.vfgptc);
3851 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3852 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3853 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3854 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3855 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3856 adapter->stats.vfmprc);
3859 /**********************************************************************
3861 * This routine is called only when ixgbe_display_debug_stats is enabled.
3862 * This routine provides a way to take a look at important statistics
3863 * maintained by the driver and hardware.
3865 **********************************************************************/
3867 ixv_print_hw_stats(struct adapter * adapter)
3869 device_t dev = adapter->dev;
3871 device_printf(dev,"Std Mbuf Failed = %lu\n",
3872 adapter->mbuf_defrag_failed);
3873 device_printf(dev,"Driver dropped packets = %lu\n",
3874 adapter->dropped_pkts);
3875 device_printf(dev, "watchdog timeouts = %ld\n",
3876 adapter->watchdog_events);
3878 device_printf(dev,"Good Packets Rcvd = %llu\n",
3879 (long long)adapter->stats.vfgprc);
3880 device_printf(dev,"Good Packets Xmtd = %llu\n",
3881 (long long)adapter->stats.vfgptc);
3882 device_printf(dev,"TSO Transmissions = %lu\n",
3887 /**********************************************************************
3889 * This routine is called only when em_display_debug_stats is enabled.
3890 * This routine provides a way to take a look at important statistics
3891 * maintained by the driver and hardware.
3893 **********************************************************************/
3895 ixv_print_debug_info(struct adapter *adapter)
3897 device_t dev = adapter->dev;
3898 struct ixgbe_hw *hw = &adapter->hw;
3899 struct ix_queue *que = adapter->queues;
3900 struct rx_ring *rxr;
3901 struct tx_ring *txr;
3902 struct lro_ctrl *lro;
3904 device_printf(dev,"Error Byte Count = %u \n",
3905 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3907 for (int i = 0; i < adapter->num_queues; i++, que++) {
3911 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3912 que->msix, (long)que->irqs);
3913 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3914 rxr->me, (long long)rxr->rx_packets);
3915 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3916 rxr->me, (long long)rxr->rx_split_packets);
3917 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3918 rxr->me, (long)rxr->rx_bytes);
3919 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3920 rxr->me, lro->lro_queued);
3921 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3922 rxr->me, lro->lro_flushed);
3923 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3924 txr->me, (long)txr->total_packets);
3925 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3926 txr->me, (long)txr->no_desc_avail);
3929 device_printf(dev,"MBX IRQ Handled: %lu\n",
3930 (long)adapter->mbx_irq);
3935 ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3939 struct adapter *adapter;
3942 error = sysctl_handle_int(oidp, &result, 0, req);
3944 if (error || !req->newptr)
3948 adapter = (struct adapter *) arg1;
3949 ixv_print_hw_stats(adapter);
3955 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3958 struct adapter *adapter;
3961 error = sysctl_handle_int(oidp, &result, 0, req);
3963 if (error || !req->newptr)
3967 adapter = (struct adapter *) arg1;
3968 ixv_print_debug_info(adapter);
3974 ** Set flow control using sysctl:
3975 ** Flow control values:
3982 ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3985 struct adapter *adapter;
3987 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3992 adapter = (struct adapter *) arg1;
3993 switch (ixv_flow_control) {
3994 case ixgbe_fc_rx_pause:
3995 case ixgbe_fc_tx_pause:
3997 adapter->hw.fc.requested_mode = ixv_flow_control;
4001 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4004 ixgbe_fc_enable(&adapter->hw);
4009 ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4010 const char *description, int *limit, int value)
4013 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4014 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4015 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);